id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
15,275
import numpy as np import torch from medpy import metric from scipy.ndimage import zoom import torch.nn as nn import SimpleITK as sitk def calculate_metric_percase(pred, gt): def test_single_volume(image, label, net, classes, patch_size=[256, 256], test_save_path=None, case=None, z_spacing=1): image, label = image.squeeze(0).cpu().detach().numpy(), label.squeeze(0).cpu().detach().numpy() if len(image.shape) == 3: prediction = np.zeros_like(label) for ind in range(image.shape[0]): slice = image[ind, :, :] x, y = slice.shape[0], slice.shape[1] if x != patch_size[0] or y != patch_size[1]: slice = zoom(slice, (patch_size[0] / x, patch_size[1] / y), order=3) # previous using 0 input = torch.from_numpy(slice).unsqueeze(0).unsqueeze(0).float().cuda() net.eval() with torch.no_grad(): outputs = net(input) out = torch.argmax(torch.softmax(outputs, dim=1), dim=1).squeeze(0) out = out.cpu().detach().numpy() if x != patch_size[0] or y != patch_size[1]: pred = zoom(out, (x / patch_size[0], y / patch_size[1]), order=0) else: pred = out prediction[ind] = pred else: input = torch.from_numpy(image).unsqueeze( 0).unsqueeze(0).float().cuda() net.eval() with torch.no_grad(): out = torch.argmax(torch.softmax(net(input), dim=1), dim=1).squeeze(0) prediction = out.cpu().detach().numpy() metric_list = [] for i in range(1, classes): metric_list.append(calculate_metric_percase(prediction == i, label == i)) if test_save_path is not None: img_itk = sitk.GetImageFromArray(image.astype(np.float32)) prd_itk = sitk.GetImageFromArray(prediction.astype(np.float32)) lab_itk = sitk.GetImageFromArray(label.astype(np.float32)) img_itk.SetSpacing((1, 1, z_spacing)) prd_itk.SetSpacing((1, 1, z_spacing)) lab_itk.SetSpacing((1, 1, z_spacing)) sitk.WriteImage(prd_itk, test_save_path + '/'+case + "_pred.nii.gz") sitk.WriteImage(img_itk, test_save_path + '/'+ case + "_img.nii.gz") sitk.WriteImage(lab_itk, test_save_path + '/'+ case + "_gt.nii.gz") return metric_list
null
15,276
import argparse import logging import os import random import sys import time import numpy as np import torch import torch.nn as nn import torch.optim as optim from tensorboardX import SummaryWriter from torch.nn.modules.loss import CrossEntropyLoss from torch.utils.data import DataLoader from tqdm import tqdm from utils import DiceLoss from torchvision import transforms from utils import test_single_volume class DiceLoss(nn.Module): def __init__(self, n_classes): def _one_hot_encoder(self, input_tensor): def _dice_loss(self, score, target): def forward(self, inputs, target, weight=None, softmax=False): class RandomGenerator(object): def __init__(self, output_size): def __call__(self, sample): class Synapse_dataset(Dataset): def __init__(self, base_dir, list_dir, split, transform=None): def __len__(self): def __getitem__(self, idx): def trainer_synapse(args, model, snapshot_path): from datasets.dataset_synapse import Synapse_dataset, RandomGenerator logging.basicConfig(filename=snapshot_path + "/log.txt", level=logging.INFO, format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S') logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) logging.info(str(args)) base_lr = args.base_lr num_classes = args.num_classes batch_size = args.batch_size * args.n_gpu # max_iterations = args.max_iterations db_train = Synapse_dataset(base_dir=args.root_path, list_dir=args.list_dir, split="train", transform=transforms.Compose( [RandomGenerator(output_size=[args.img_size, args.img_size])])) print("The length of train set is: {}".format(len(db_train))) def worker_init_fn(worker_id): random.seed(args.seed + worker_id) trainloader = DataLoader(db_train, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True, worker_init_fn=worker_init_fn) if args.n_gpu > 1: model = nn.DataParallel(model) model.train() ce_loss = CrossEntropyLoss() dice_loss = DiceLoss(num_classes) optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=0.0001) writer = SummaryWriter(snapshot_path + '/log') iter_num = 0 max_epoch = args.max_epochs max_iterations = args.max_epochs * len(trainloader) # max_epoch = max_iterations // len(trainloader) + 1 logging.info("{} iterations per epoch. {} max iterations ".format(len(trainloader), max_iterations)) best_performance = 0.0 iterator = tqdm(range(max_epoch), ncols=70) for epoch_num in iterator: for i_batch, sampled_batch in enumerate(trainloader): image_batch, label_batch = sampled_batch['image'], sampled_batch['label'] image_batch, label_batch = image_batch.cuda(), label_batch.cuda() outputs = model(image_batch) loss_ce = ce_loss(outputs, label_batch[:].long()) loss_dice = dice_loss(outputs, label_batch, softmax=True) loss = 0.4 * loss_ce + 0.6 * loss_dice optimizer.zero_grad() loss.backward() optimizer.step() lr_ = base_lr * (1.0 - iter_num / max_iterations) ** 0.9 for param_group in optimizer.param_groups: param_group['lr'] = lr_ iter_num = iter_num + 1 writer.add_scalar('info/lr', lr_, iter_num) writer.add_scalar('info/total_loss', loss, iter_num) writer.add_scalar('info/loss_ce', loss_ce, iter_num) logging.info('iteration %d : loss : %f, loss_ce: %f' % (iter_num, loss.item(), loss_ce.item())) if iter_num % 20 == 0: image = image_batch[1, 0:1, :, :] image = (image - image.min()) / (image.max() - image.min()) writer.add_image('train/Image', image, iter_num) outputs = torch.argmax(torch.softmax(outputs, dim=1), dim=1, keepdim=True) writer.add_image('train/Prediction', outputs[1, ...] * 50, iter_num) labs = label_batch[1, ...].unsqueeze(0) * 50 writer.add_image('train/GroundTruth', labs, iter_num) save_interval = 50 # int(max_epoch/6) if epoch_num > int(max_epoch / 2) and (epoch_num + 1) % save_interval == 0: save_mode_path = os.path.join(snapshot_path, 'epoch_' + str(epoch_num) + '.pth') torch.save(model.state_dict(), save_mode_path) logging.info("save model to {}".format(save_mode_path)) if epoch_num >= max_epoch - 1: save_mode_path = os.path.join(snapshot_path, 'epoch_' + str(epoch_num) + '.pth') torch.save(model.state_dict(), save_mode_path) logging.info("save model to {}".format(save_mode_path)) iterator.close() break writer.close() return "Training Finished!"
null
15,277
import os import yaml from yacs.config import CfgNode as CN _C = CN() _C.BASE = [''] _C.DATA = CN() _C.DATA.BATCH_SIZE = 128 _C.DATA.DATA_PATH = '' _C.DATA.DATASET = 'imagenet' _C.DATA.IMG_SIZE = 224 _C.DATA.INTERPOLATION = 'bicubic' _C.DATA.ZIP_MODE = False _C.DATA.CACHE_MODE = 'part' _C.DATA.PIN_MEMORY = True _C.DATA.NUM_WORKERS = 8 _C.MODEL = CN() _C.MODEL.TYPE = 'swin' _C.MODEL.NAME = 'swin_tiny_patch4_window7_224' _C.MODEL.PRETRAIN_CKPT = './pretrained_ckpt/swin_tiny_patch4_window7_224.pth' _C.MODEL.RESUME = '' _C.MODEL.NUM_CLASSES = 1000 _C.MODEL.DROP_RATE = 0.0 _C.MODEL.DROP_PATH_RATE = 0.1 _C.MODEL.LABEL_SMOOTHING = 0.1 _C.MODEL.SWIN = CN() _C.MODEL.SWIN.PATCH_SIZE = 4 _C.MODEL.SWIN.IN_CHANS = 3 _C.MODEL.SWIN.EMBED_DIM = 96 _C.MODEL.SWIN.DEPTHS = [2, 2, 6, 2] _C.MODEL.SWIN.DECODER_DEPTHS = [2, 2, 6, 2] _C.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24] _C.MODEL.SWIN.WINDOW_SIZE = 7 _C.MODEL.SWIN.MLP_RATIO = 4. _C.MODEL.SWIN.QKV_BIAS = True _C.MODEL.SWIN.QK_SCALE = None _C.MODEL.SWIN.APE = False _C.MODEL.SWIN.PATCH_NORM = True _C.MODEL.SWIN.FINAL_UPSAMPLE= "expand_first" _C.TRAIN = CN() _C.TRAIN.START_EPOCH = 0 _C.TRAIN.EPOCHS = 300 _C.TRAIN.WARMUP_EPOCHS = 20 _C.TRAIN.WEIGHT_DECAY = 0.05 _C.TRAIN.BASE_LR = 5e-4 _C.TRAIN.WARMUP_LR = 5e-7 _C.TRAIN.MIN_LR = 5e-6 _C.TRAIN.CLIP_GRAD = 5.0 _C.TRAIN.AUTO_RESUME = True _C.TRAIN.ACCUMULATION_STEPS = 0 _C.TRAIN.USE_CHECKPOINT = False _C.TRAIN.LR_SCHEDULER = CN() _C.TRAIN.LR_SCHEDULER.NAME = 'cosine' _C.TRAIN.LR_SCHEDULER.DECAY_EPOCHS = 30 _C.TRAIN.LR_SCHEDULER.DECAY_RATE = 0.1 _C.TRAIN.OPTIMIZER = CN() _C.TRAIN.OPTIMIZER.NAME = 'adamw' _C.TRAIN.OPTIMIZER.EPS = 1e-8 _C.TRAIN.OPTIMIZER.BETAS = (0.9, 0.999) _C.TRAIN.OPTIMIZER.MOMENTUM = 0.9 _C.AUG = CN() _C.AUG.COLOR_JITTER = 0.4 _C.AUG.AUTO_AUGMENT = 'rand-m9-mstd0.5-inc1' _C.AUG.REPROB = 0.25 _C.AUG.REMODE = 'pixel' _C.AUG.RECOUNT = 1 _C.AUG.MIXUP = 0.8 _C.AUG.CUTMIX = 1.0 _C.AUG.CUTMIX_MINMAX = None _C.AUG.MIXUP_PROB = 1.0 _C.AUG.MIXUP_SWITCH_PROB = 0.5 _C.AUG.MIXUP_MODE = 'batch' _C.TEST = CN() _C.TEST.CROP = True _C.AMP_OPT_LEVEL = '' _C.OUTPUT = '' _C.TAG = 'default' _C.SAVE_FREQ = 1 _C.PRINT_FREQ = 10 _C.SEED = 0 _C.EVAL_MODE = False _C.THROUGHPUT_MODE = False _C.LOCAL_RANK = 0 def update_config(config, args): _update_config_from_file(config, args.cfg) config.defrost() if args.opts: config.merge_from_list(args.opts) # merge from specific arguments if args.batch_size: config.DATA.BATCH_SIZE = args.batch_size if args.zip: config.DATA.ZIP_MODE = True if args.cache_mode: config.DATA.CACHE_MODE = args.cache_mode if args.resume: config.MODEL.RESUME = args.resume if args.accumulation_steps: config.TRAIN.ACCUMULATION_STEPS = args.accumulation_steps if args.use_checkpoint: config.TRAIN.USE_CHECKPOINT = True if args.amp_opt_level: config.AMP_OPT_LEVEL = args.amp_opt_level if args.tag: config.TAG = args.tag if args.eval: config.EVAL_MODE = True if args.throughput: config.THROUGHPUT_MODE = True config.freeze() The provided code snippet includes necessary dependencies for implementing the `get_config` function. Write a Python function `def get_config(args)` to solve the following problem: Get a yacs CfgNode object with default values. Here is the function: def get_config(args): """Get a yacs CfgNode object with default values.""" # Return a clone so that the defaults will not be altered # This is for the "local variable" use pattern config = _C.clone() update_config(config, args) return config
Get a yacs CfgNode object with default values.
15,278
import torch import torch.nn as nn import torch.utils.checkpoint as checkpoint from einops import rearrange from timm.models.layers import DropPath, to_2tuple, trunc_normal_ The provided code snippet includes necessary dependencies for implementing the `window_partition` function. Write a Python function `def window_partition(x, window_size)` to solve the following problem: Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C) Here is the function: def window_partition(x, window_size): """ Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C) """ B, H, W, C = x.shape x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) return windows
Args: x: (B, H, W, C) window_size (int): window size Returns: windows: (num_windows*B, window_size, window_size, C)
15,279
import torch import torch.nn as nn import torch.utils.checkpoint as checkpoint from einops import rearrange from timm.models.layers import DropPath, to_2tuple, trunc_normal_ The provided code snippet includes necessary dependencies for implementing the `window_reverse` function. Write a Python function `def window_reverse(windows, window_size, H, W)` to solve the following problem: Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) Here is the function: def window_reverse(windows, window_size, H, W): """ Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C) """ B = int(windows.shape[0] / (H * W / window_size / window_size)) x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) return x
Args: windows: (num_windows*B, window_size, window_size, C) window_size (int): Window size H (int): Height of image W (int): Width of image Returns: x: (B, H, W, C)
15,280
import os import random import h5py import numpy as np import torch from scipy import ndimage from scipy.ndimage.interpolation import zoom from torch.utils.data import Dataset def random_rot_flip(image, label): k = np.random.randint(0, 4) image = np.rot90(image, k) label = np.rot90(label, k) axis = np.random.randint(0, 2) image = np.flip(image, axis=axis).copy() label = np.flip(label, axis=axis).copy() return image, label
null
15,281
import os import random import h5py import numpy as np import torch from scipy import ndimage from scipy.ndimage.interpolation import zoom from torch.utils.data import Dataset def random_rotate(image, label): angle = np.random.randint(-20, 20) image = ndimage.rotate(image, angle, order=0, reshape=False) label = ndimage.rotate(label, angle, order=0, reshape=False) return image, label
null
15,282
from pathlib import Path from typing import List import mkdocs_gen_files import yaml SELECTIONS = {} animals = list(Path("docs/assets").glob("square_*.png")) def indent(s, num_spaces): """ Indent every line of s by num_spaces spaces """ return ("\n" + " " * num_spaces).join(s.splitlines()) nav = mkdocs_gen_files.Nav() with mkdocs_gen_files.open("reference/SUMMARY.md", "w") as nav_file: nav_file.writelines(nav.build_literate_nav()) def generate_docs(packages: List[str], directory: str): total_pages = 0 for package in packages: for path in sorted(Path(f"{directory}/{package}").glob("**/*.py")): if ( path.name.startswith("__init__") or path.name.startswith("_bind_dependencies") or path.name.startswith("_auto_attributes") ): continue module_path = path.relative_to(directory).with_suffix("") doc_path = path.relative_to(directory).with_suffix(".md") full_doc_path = Path("reference", doc_path) parts = list(module_path.parts) parts[-1] = f"{parts[-1]}.py" nav[parts] = doc_path with mkdocs_gen_files.open(full_doc_path, "w") as fd: ident = ".".join(module_path.parts) print("::: " + ident, file=fd) if ident in SELECTIONS: print(" selection:", end="\n ", file=fd) print(indent(yaml.dump(SELECTIONS[ident]), 6), file=fd) # Build relative path to assets directory animal_path = ("../" * len(module_path.parts)) / ( animals[total_pages % len(animals)].relative_to("docs") ) total_pages += 1 print( f"\n\n" f'<div align="right">\n', f'<img width="125" height="125" src="{animal_path}">\n', f"</div>\n", file=fd, ) mkdocs_gen_files.set_edit_path(full_doc_path, path)
null
15,283
from dataclasses import dataclass import sys from typing import Iterable, List class Range: """ Range of addresses. :ivar start: The start address of the Range :ivar end: The end address of the Range """ start: int end: int MAX = sys.maxsize def __post_init__(self): if self.start > self.end: raise ValueError("The start value must be less than or equal to the end value") def __iter__(self): """ Make this class iterable, allowing to write `for i in Range(...)` and `if i in Range(...)`. """ yield from range(self.start, self.end) def __contains__(self, value): """ Make the check `value in Range(...)` constant time. """ return self.contains_value(value) def length(self): """ Compute the length of this range """ return self.end - self.start def contains_value(self, value: int) -> bool: """ Determine if the provided value is within the range (inclusive of the start, exclusive of the end). Note: you can use `value in r` alternatively to `r.contains_value(value)`. """ return self.start <= value < self.end def within(self, range: "Range") -> bool: """ Determine if this range is within the provided range """ return self.start >= range.start and self.end <= range.end def overlaps(self, range: "Range") -> bool: """ Determine if this range overlaps the provided range. """ return range.start < self.end and range.end > self.start def intersect(self, range: "Range") -> "Range": """ Compute the largest possible range that is within both this range and the provided range. It raises a ValueError if no such range exists. """ start = max(range.start, self.start) end = min(range.end, self.end) if start > end: raise ValueError("There is no overlap between this range and the provided range ") return Range(start, end) def split(self, range: "Range") -> Iterable["Range"]: """ Split the range into one or more ranges that do not overlap the provided range :param range: :return: """ if range.start >= self.end or range.end <= self.start: # No overlap return (self,) elif self.within(range): # This range is covered by the provided range return tuple() elif range.start <= self.start and range.end < self.end: # Overlap on the right return (Range(range.end, self.end),) elif range.start > self.start and range.end >= self.end: # Overlap on the left return (Range(self.start, range.start),) elif range.within(self): # The provided range sits in the middle of this range return Range(self.start, range.start), Range(range.end, self.end) else: raise ValueError("Unreachable") def translate(self, offset: int) -> "Range": """ Generate a new range based on this range translated by the provided offset """ if offset == 0: return self if offset + self.start < 0: raise ValueError("The start of the translated range cannot be negative") return Range(self.start + offset, self.end + offset) def __repr__(self) -> str: return f"Range({hex(self.start)}, {hex(self.end)})" def __hash__(self): return hash((self.start, self.end)) def from_size(start: int, size: int) -> "Range": return Range(start, start + size) def merge_ranges(ranges: Iterable["Range"]) -> List["Range"]: """ Merge multiple Ranges into a minimal set of Ranges. The algorithm here is basically finding where the input Ranges do NOT overlap, and inverting that. In more detail the algorithm is essentially: 1. Iterates over all values from the minimum start to the maximum end while tracking a counter 2. Increment the counter whenever the start of a range is reached, decrement the counter when the end of a range is reached 3. Save a merged range when the counter is 0. It's more efficient than that since only range starts/ends are iterated over, but that's the idea. :param ranges: unordered iterable of Range objects to merge """ range_bounds_markers = [] for r in ranges: range_bounds_markers.append((r.start, 1)) range_bounds_markers.append((r.end, -1)) # Sort markers by index, and use inverted second item in tuple (incr) as tiebreaker # If start and end markers have the same index, the start marker(s) should be counted first range_bounds_markers.sort(key=lambda idx_incr: (idx_incr[0], -idx_incr[1])) merged_ranges = [] current_overlapping_ranges = 0 last_range_start = 0 for idx, incr in range_bounds_markers: if current_overlapping_ranges == 0: # Must be at the start of a range last_range_start = idx current_overlapping_ranges += incr # The counter should never drop below zero assert current_overlapping_ranges >= 0, "Range bounds markers not ordered correctly" if current_overlapping_ranges == 0: # Must be at the end of a range merged_ranges.append(Range(last_range_start, idx)) return merged_ranges The provided code snippet includes necessary dependencies for implementing the `chunk_ranges` function. Write a Python function `def chunk_ranges(ranges: List[Range], chunk_size: int) -> List[Range]` to solve the following problem: Break a list of Ranges into equal sized regions of Ranges, assuming each range is evenly divisible by chunk_size. :param ranges: :param chunk_size: :return: equal sized regions of Ranges Here is the function: def chunk_ranges(ranges: List[Range], chunk_size: int) -> List[Range]: """ Break a list of Ranges into equal sized regions of Ranges, assuming each range is evenly divisible by chunk_size. :param ranges: :param chunk_size: :return: equal sized regions of Ranges """ regions = Range.merge_ranges(ranges) chunked = [] for region in regions: for i in range(region.start, region.end, chunk_size): chunked.append(Range(i, min(i + chunk_size, region.end))) return chunked
Break a list of Ranges into equal sized regions of Ranges, assuming each range is evenly divisible by chunk_size. :param ranges: :param chunk_size: :return: equal sized regions of Ranges
15,284
from dataclasses import dataclass import sys from typing import Iterable, List class Range: """ Range of addresses. :ivar start: The start address of the Range :ivar end: The end address of the Range """ start: int end: int MAX = sys.maxsize def __post_init__(self): if self.start > self.end: raise ValueError("The start value must be less than or equal to the end value") def __iter__(self): """ Make this class iterable, allowing to write `for i in Range(...)` and `if i in Range(...)`. """ yield from range(self.start, self.end) def __contains__(self, value): """ Make the check `value in Range(...)` constant time. """ return self.contains_value(value) def length(self): """ Compute the length of this range """ return self.end - self.start def contains_value(self, value: int) -> bool: """ Determine if the provided value is within the range (inclusive of the start, exclusive of the end). Note: you can use `value in r` alternatively to `r.contains_value(value)`. """ return self.start <= value < self.end def within(self, range: "Range") -> bool: """ Determine if this range is within the provided range """ return self.start >= range.start and self.end <= range.end def overlaps(self, range: "Range") -> bool: """ Determine if this range overlaps the provided range. """ return range.start < self.end and range.end > self.start def intersect(self, range: "Range") -> "Range": """ Compute the largest possible range that is within both this range and the provided range. It raises a ValueError if no such range exists. """ start = max(range.start, self.start) end = min(range.end, self.end) if start > end: raise ValueError("There is no overlap between this range and the provided range ") return Range(start, end) def split(self, range: "Range") -> Iterable["Range"]: """ Split the range into one or more ranges that do not overlap the provided range :param range: :return: """ if range.start >= self.end or range.end <= self.start: # No overlap return (self,) elif self.within(range): # This range is covered by the provided range return tuple() elif range.start <= self.start and range.end < self.end: # Overlap on the right return (Range(range.end, self.end),) elif range.start > self.start and range.end >= self.end: # Overlap on the left return (Range(self.start, range.start),) elif range.within(self): # The provided range sits in the middle of this range return Range(self.start, range.start), Range(range.end, self.end) else: raise ValueError("Unreachable") def translate(self, offset: int) -> "Range": """ Generate a new range based on this range translated by the provided offset """ if offset == 0: return self if offset + self.start < 0: raise ValueError("The start of the translated range cannot be negative") return Range(self.start + offset, self.end + offset) def __repr__(self) -> str: return f"Range({hex(self.start)}, {hex(self.end)})" def __hash__(self): return hash((self.start, self.end)) def from_size(start: int, size: int) -> "Range": return Range(start, start + size) def merge_ranges(ranges: Iterable["Range"]) -> List["Range"]: """ Merge multiple Ranges into a minimal set of Ranges. The algorithm here is basically finding where the input Ranges do NOT overlap, and inverting that. In more detail the algorithm is essentially: 1. Iterates over all values from the minimum start to the maximum end while tracking a counter 2. Increment the counter whenever the start of a range is reached, decrement the counter when the end of a range is reached 3. Save a merged range when the counter is 0. It's more efficient than that since only range starts/ends are iterated over, but that's the idea. :param ranges: unordered iterable of Range objects to merge """ range_bounds_markers = [] for r in ranges: range_bounds_markers.append((r.start, 1)) range_bounds_markers.append((r.end, -1)) # Sort markers by index, and use inverted second item in tuple (incr) as tiebreaker # If start and end markers have the same index, the start marker(s) should be counted first range_bounds_markers.sort(key=lambda idx_incr: (idx_incr[0], -idx_incr[1])) merged_ranges = [] current_overlapping_ranges = 0 last_range_start = 0 for idx, incr in range_bounds_markers: if current_overlapping_ranges == 0: # Must be at the start of a range last_range_start = idx current_overlapping_ranges += incr # The counter should never drop below zero assert current_overlapping_ranges >= 0, "Range bounds markers not ordered correctly" if current_overlapping_ranges == 0: # Must be at the end of a range merged_ranges.append(Range(last_range_start, idx)) return merged_ranges The provided code snippet includes necessary dependencies for implementing the `remove_subranges` function. Write a Python function `def remove_subranges(ranges: List[Range], to_remove: List[Range]) -> List[Range]` to solve the following problem: Subtract one set of addresses from another, both expressed as a list of non-overlapping ranges. :param ranges: A list of non-overlapping ranges. :param to_remove: A list of non-overlapping ranges to be removed from the first argument. :return: A list of ranges covering the input ranges with the subranges removed. Here is the function: def remove_subranges(ranges: List[Range], to_remove: List[Range]) -> List[Range]: """ Subtract one set of addresses from another, both expressed as a list of non-overlapping ranges. :param ranges: A list of non-overlapping ranges. :param to_remove: A list of non-overlapping ranges to be removed from the first argument. :return: A list of ranges covering the input ranges with the subranges removed. """ if not ranges or not to_remove: return ranges ranges.sort(key=lambda range_: range_.start) to_remove.sort(key=lambda range_: range_.start) i = 0 j = 0 ret = [] current_range = ranges[i] current_to_remove = to_remove[j] while True: if current_range.start >= current_to_remove.end: j += 1 if j < len(to_remove): current_to_remove = to_remove[j] else: ret.append(current_range) ret.extend(ranges[i + 1 :]) break elif current_range.end <= current_to_remove.start: ret.append(current_range) i += 1 if i < len(ranges): current_range = ranges[i] else: break elif current_range.start < current_to_remove.start: ret.append(Range(current_range.start, current_to_remove.start)) current_range = Range(current_to_remove.start, current_range.end) elif current_range.end > current_to_remove.end: current_range = Range(current_to_remove.end, current_range.end) else: # current_to_remove contains current_range i += 1 if i >= len(ranges): break current_range = ranges[i] return ret
Subtract one set of addresses from another, both expressed as a list of non-overlapping ranges. :param ranges: A list of non-overlapping ranges. :param to_remove: A list of non-overlapping ranges to be removed from the first argument. :return: A list of ranges covering the input ranges with the subranges removed.
15,285
import setuptools import pkg_resources from setuptools.command.egg_info import egg_info with open("LICENSE") as f: license = "".join(["\n", f.read()]) def read_requirements(requirements_path): with open(requirements_path) as requirements_handle: return [ str(requirement) for requirement in pkg_resources.parse_requirements(requirements_handle) ]
null
15,286
import os HELLO_WORLD_SOURCE = r""" #include <stdio.h> int main() { printf("Hello, World!\n"); return 0; } """ def create_binary(c_program: str, executable_filename: str) -> None: """Compile `c_program` into a binary at `executable_filename`.""" c_source_filename = f"{executable_filename}.c" with open(c_source_filename, "w") as f: f.write(c_program) # -no-pie is used to circumvent a current limitation of our Ghidra integration os.system(f"gcc -no-pie -o {executable_filename} {c_source_filename}") The provided code snippet includes necessary dependencies for implementing the `create_hello_world_binary` function. Write a Python function `def create_hello_world_binary() -> None` to solve the following problem: Create a simple binary printing "Hello, World!\n" to stdout. Here is the function: def create_hello_world_binary() -> None: """Create a simple binary printing "Hello, World!\n" to stdout.""" create_binary(HELLO_WORLD_SOURCE, "hello_world")
Create a simple binary printing "Hello, World!\n" to stdout.
15,287
import os The provided code snippet includes necessary dependencies for implementing the `get_descendants_tags` function. Write a Python function `async def get_descendants_tags(resource)` to solve the following problem: Return an alphabetically sorted list of all the tags of the descendants of `resource`. Here is the function: async def get_descendants_tags(resource): """Return an alphabetically sorted list of all the tags of the descendants of `resource`.""" all_tags = set() for child_resource in await resource.get_descendants(): all_tags |= set(child_resource.get_tags()) return sorted(all_tags, key=str)
Return an alphabetically sorted list of all the tags of the descendants of `resource`.
15,288
import argparse import os from ofrak import OFRAK, OFRAKContext from ofrak.core import ( Elf, ElfProgramHeader, ElfProgramHeaderType, ElfProgramHeaderModifier, ElfProgramHeaderModifierConfig, ) from ofrak_type.memory_permissions import MemoryPermissions The provided code snippet includes necessary dependencies for implementing the `get_exec_load_program_header` function. Write a Python function `async def get_exec_load_program_header(elf_v: Elf) -> ElfProgramHeader` to solve the following problem: Return the first executable LOAD program header in `elf_view`. Here is the function: async def get_exec_load_program_header(elf_v: Elf) -> ElfProgramHeader: """Return the first executable LOAD program header in `elf_view`.""" for program_header in await elf_v.get_program_headers(): if ( program_header.p_type == ElfProgramHeaderType.LOAD.value and program_header.p_flags & MemoryPermissions.X.value ): return program_header raise RuntimeError(f"Could not find executable LOAD program header in {elf_v}")
Return the first executable LOAD program header in `elf_view`.
15,289
import argparse import os from ofrak_patch_maker.toolchain.llvm_12 import LLVM_12_0_1_Toolchain import ofrak_ghidra from ofrak import OFRAK, OFRAKContext, Resource, ResourceFilter, ResourceAttributeValueFilter from ofrak.core import ( Allocatable, CodeRegion, ComplexBlock, Instruction, LiefAddSegmentConfig, LiefAddSegmentModifier, ElfProgramHeader, ) from ofrak.core.patch_maker.modifiers import ( PatchFromSourceModifier, PatchFromSourceModifierConfig, SourceBundle, ) from ofrak_patch_maker.toolchain.model import ( ToolchainConfig, BinFileType, CompilerOptimizationLevel, Segment, ) from ofrak_patch_maker.toolchain.utils import get_file_format from ofrak_type import Range from ofrak_type.memory_permissions import MemoryPermissions PAGE_ALIGN = 0x1000 GHIDRA_PIE_OFFSET = 0x100000 The provided code snippet includes necessary dependencies for implementing the `add_and_return_segment` function. Write a Python function `async def add_and_return_segment(elf_resource: Resource, vaddr: int, size: int) -> ElfProgramHeader` to solve the following problem: Add a segment to `elf_resource`, of size `size` at virtual address `vaddr`, returning this new segment resource after unpacking. Here is the function: async def add_and_return_segment(elf_resource: Resource, vaddr: int, size: int) -> ElfProgramHeader: """Add a segment to `elf_resource`, of size `size` at virtual address `vaddr`, returning this new segment resource after unpacking.""" config = LiefAddSegmentConfig(vaddr, PAGE_ALIGN, [0 for _ in range(size)], "rx") await elf_resource.run(LiefAddSegmentModifier, config) await elf_resource.unpack_recursively() # Get our newly added segment. First get all ElfProgramHeaders, then return the one # with our virtual address. file_segments = await elf_resource.get_descendants_as_view( ElfProgramHeader, r_filter=ResourceFilter(tags=(ElfProgramHeader,)) ) segment = [seg for seg in file_segments if seg.p_vaddr == vaddr].pop() # Carve out a child of the new segment where we can store the code for our new function. code_region = CodeRegion(segment.p_vaddr + GHIDRA_PIE_OFFSET, segment.p_filesz) code_region.resource = await elf_resource.create_child_from_view( code_region, data_range=Range(segment.p_offset, segment.p_offset + segment.p_filesz) ) elf_resource.add_tag(Allocatable) await elf_resource.save() return segment
Add a segment to `elf_resource`, of size `size` at virtual address `vaddr`, returning this new segment resource after unpacking.
15,290
import argparse import os from ofrak_patch_maker.toolchain.llvm_12 import LLVM_12_0_1_Toolchain import ofrak_ghidra from ofrak import OFRAK, OFRAKContext, Resource, ResourceFilter, ResourceAttributeValueFilter from ofrak.core import ( Allocatable, CodeRegion, ComplexBlock, Instruction, LiefAddSegmentConfig, LiefAddSegmentModifier, ElfProgramHeader, ) from ofrak.core.patch_maker.modifiers import ( PatchFromSourceModifier, PatchFromSourceModifierConfig, SourceBundle, ) from ofrak_patch_maker.toolchain.model import ( ToolchainConfig, BinFileType, CompilerOptimizationLevel, Segment, ) from ofrak_patch_maker.toolchain.utils import get_file_format from ofrak_type import Range from ofrak_type.memory_permissions import MemoryPermissions GHIDRA_PIE_OFFSET = 0x100000 The provided code snippet includes necessary dependencies for implementing the `call_new_segment_instead` function. Write a Python function `async def call_new_segment_instead(resource: Resource, new_segment: ElfProgramHeader)` to solve the following problem: Replace the original `call` instruction in main with a call to the start of `new_segment`. Here is the function: async def call_new_segment_instead(resource: Resource, new_segment: ElfProgramHeader): """Replace the original `call` instruction in main with a call to the start of `new_segment`.""" main_cb = await resource.get_only_descendant_as_view( v_type=ComplexBlock, r_filter=ResourceFilter( attribute_filters=(ResourceAttributeValueFilter(ComplexBlock.Symbol, "main"),) ), ) call_instruction = await main_cb.resource.get_only_descendant_as_view( v_type=Instruction, r_filter=ResourceFilter( attribute_filters=(ResourceAttributeValueFilter(Instruction.Mnemonic, "call"),) ), ) ghidra_new_segment_vaddr = new_segment.p_vaddr + GHIDRA_PIE_OFFSET await call_instruction.modify_assembly("call", f"0x{ghidra_new_segment_vaddr:x}")
Replace the original `call` instruction in main with a call to the start of `new_segment`.
15,291
import argparse import os from ofrak_patch_maker.toolchain.llvm_12 import LLVM_12_0_1_Toolchain import ofrak_ghidra from ofrak import OFRAK, OFRAKContext, Resource, ResourceFilter, ResourceAttributeValueFilter from ofrak.core import ( Allocatable, CodeRegion, ComplexBlock, Instruction, LiefAddSegmentConfig, LiefAddSegmentModifier, ElfProgramHeader, ) from ofrak.core.patch_maker.modifiers import ( PatchFromSourceModifier, PatchFromSourceModifierConfig, SourceBundle, ) from ofrak_patch_maker.toolchain.model import ( ToolchainConfig, BinFileType, CompilerOptimizationLevel, Segment, ) from ofrak_patch_maker.toolchain.utils import get_file_format from ofrak_type import Range from ofrak_type.memory_permissions import MemoryPermissions GHIDRA_PIE_OFFSET = 0x100000 class SourceBundle(Dict[str, Union[bytes, "SourceBundle"]]): """ Class used to store filesystem trees of source code as serializable in-memory trees, for transfer between components. """ def slurp(cls, path: str) -> "SourceBundle": """ Slurp up a path into a SourceBundle, recursively getting all files and directories and storing them as a tree in memory. :param path: :return: """ root, dirs, files = next(os.walk(path, topdown=True)) pairs: List[Tuple[str, Union[bytes, SourceBundle]]] = [] for file_name in files: file_path = os.path.join(root, file_name) with open(file_path, "rb") as f: file_contents = f.read() pairs.append((file_name, file_contents)) for dir_name in dirs: dir_path = os.path.join(root, dir_name) pairs.append((dir_name, SourceBundle.slurp(dir_path))) return cls(pairs) def dump(self, target_path: str): """ Dump a SourceBundle tree back into the local filesystem, at the given target path. :param target_path: :return: """ os.makedirs(target_path, exist_ok=True) for item_name, item_contents in self.items(): item_path = os.path.join(target_path, item_name) if type(item_contents) is bytes: # item is a file with open(item_path, "wb") as f: f.write(item_contents) else: # item is a directory cast(SourceBundle, item_contents).dump(item_path) class PatchFromSourceModifierConfig(ComponentConfig): """ :var source_code: Path to directory containing source code (ideally ONLY source code) :var source_patches: path of each source file to build and inject, with one or more segments defining where to inject one or more of the .text, .data, and .rodata from the build file :var toolchain_config: configuration for the [Toolchain][ofrak_patch_maker.toolchain.abstract.Toolchain] to use :var toolchain: the type of which [Toolchain][ofrak_patch_maker.toolchain.abstract.Toolchain] to use to build patch :var header_directories: (Optional) paths to directories to search for header files in :var patch_name: Optional name of patch """ source_code: SourceBundle source_patches: Dict[str, Tuple[Segment, ...]] toolchain_config: ToolchainConfig toolchain: Type[Toolchain] header_directories: Tuple[SourceBundle, ...] = () patch_name: Optional[str] = None class PatchFromSourceModifier(Modifier): """ Modifier exposing some basic source code patching capabilities. """ targets = (Program,) async def modify(self, resource: Resource, config: PatchFromSourceModifierConfig) -> None: if config.patch_name is None: patch_name = f"{resource.get_id().hex()}_patch" else: patch_name = config.patch_name build_tmp_dir = tempfile.mkdtemp() source_tmp_dir = tempfile.mkdtemp() config.source_code.dump(source_tmp_dir) header_dirs = [] for header_directory in config.header_directories: header_tmp_dir = tempfile.mkdtemp() header_directory.dump(header_tmp_dir) header_dirs.append(header_tmp_dir) absolute_source_list = [ os.path.join(source_tmp_dir, src_file) for src_file in config.source_patches.keys() ] program_attributes = await resource.analyze(ProgramAttributes) patch_maker = PatchMaker( toolchain=config.toolchain(program_attributes, config.toolchain_config), build_dir=build_tmp_dir, ) patch_bom = patch_maker.make_bom( name=patch_name, source_list=absolute_source_list, object_list=[], header_dirs=header_dirs, ) # Map each object file in the BOM to the segments associated with its source file patch_bom_segment_mapping = { patch_bom.object_map[os.path.join(source_tmp_dir, src_file)].path: src_segments for src_file, src_segments in config.source_patches.items() } target_program = await resource.view_as(Program) target_linkable_bom_info = await target_program.make_linkable_bom( patch_maker, build_tmp_dir, patch_bom.unresolved_symbols, ) # To support additional dynamic references in user space executables # Create and use a modifier that will: # 1. Extend .got, add new entry # 2. Extend .got.plt, add new stub code # 3. If the DSO is not already listed in the load list for executable it must be extended and added. # 4. Provide the additional .got and .got.plt symbols to make_fem now that we have the locations # NOTE: These external functions will probably be *UND* p = PatchRegionConfig(patch_bom.name + "_patch", patch_bom_segment_mapping) exec_path = os.path.join(build_tmp_dir, "output_exec") fem = patch_maker.make_fem( [(patch_bom, p), target_linkable_bom_info], exec_path, ) await resource.run( SegmentInjectorModifier, SegmentInjectorModifierConfig.from_fem(fem), ) # Refresh LinkableBinary with the LinkableSymbols used in this patch target_binary = await resource.view_as(LinkableBinary) await target_binary.define_linkable_symbols_from_patch( fem.executable.symbols, program_attributes ) async def patch_uppercase(resource: Resource, source_dir: str, new_segment: ElfProgramHeader): # The PatchMaker will need to know how to configure the build toolchain. tc_config = ToolchainConfig( file_format=BinFileType.ELF, force_inlines=True, relocatable=True, no_std_lib=True, no_jump_tables=True, no_bss_section=True, create_map_files=True, compiler_optimization_level=CompilerOptimizationLevel.SPACE, debug_info=False, check_overlap=False, ) # Tell the PatchMaker about the segment we added in the binary... text_segment_uppercase = Segment( segment_name=".text", vm_address=new_segment.p_vaddr + GHIDRA_PIE_OFFSET, offset=0, is_entry=False, length=new_segment.p_filesz, access_perms=MemoryPermissions.RX, ) # ... And that we want to put the compiled C patch there. uppercase_source: str = os.path.join(source_dir, "uppercase.c") segment_dict = { uppercase_source: (text_segment_uppercase,), } # Tell PatcherFromSourceModifier about the source files, toolchain, and patch name. patch_from_source_config = PatchFromSourceModifierConfig( SourceBundle.slurp(source_dir), segment_dict, tc_config, LLVM_12_0_1_Toolchain, patch_name="HELLO_WORLD", ) # Run PatchFromSourceModifier, which will analyze the target binary, run PatchMaker on our # patch, create a Batch of Objects and Metadata (BOM) for the patch, create a BOM from the # target binary for all unresolved symbols in the patch, make a Final Executable and Metadata # (FEM), and then inject our patch into the binary. await resource.run(PatchFromSourceModifier, patch_from_source_config)
null
15,292
import logging from typing import Iterable, Tuple, List from typing import Optional from warnings import warn from angr.knowledge_plugins.functions.function import Function as AngrFunction from archinfo.arch_arm import get_real_address_if_arm, is_arm_arch from ofrak.component.unpacker import UnpackerError from ofrak_type.architecture import InstructionSetMode from ofrak_type.range import Range from ofrak.core.basic_block import BasicBlock from ofrak.core.code_region import CodeRegionUnpacker, CodeRegion from ofrak.core.complex_block import ComplexBlock, ComplexBlockUnpacker from ofrak.core.data import DataWord from ofrak.resource import Resource from ofrak.service.resource_service_i import ResourceFilter from ofrak_angr.components.angr_analyzer import AngrAnalyzerConfig, AngrCodeRegionModifier from ofrak_angr.components.identifiers import AngrAnalysisResource from ofrak_angr.model import AngrAnalysis LOGGER = logging.getLogger(__name__) class UnpackerError(RuntimeError): """Base exception raised by unpackers.""" The provided code snippet includes necessary dependencies for implementing the `_get_bb_exit_addr_info` function. Write a Python function `def _get_bb_exit_addr_info( angr_analysis, angr_complex_block, angr_cb_basic_blocks, current_angr_bb, current_bb_idx, ) -> Tuple[bool, Optional[int]]` to solve the following problem: Get exit address info needed for BasicBlock creation: BasicBlock.is_exit_point, BasicBlock.exit_addr. Here is the function: def _get_bb_exit_addr_info( angr_analysis, angr_complex_block, angr_cb_basic_blocks, current_angr_bb, current_bb_idx, ) -> Tuple[bool, Optional[int]]: """ Get exit address info needed for BasicBlock creation: BasicBlock.is_exit_point, BasicBlock.exit_addr. """ # Fetch the exit point addr (if it exists) and sanity check the selection if current_angr_bb.codenode in angr_complex_block.endpoints: return True, None if current_bb_idx == len(angr_cb_basic_blocks) - 1: LOGGER.error( f"Exit point defined for BB 0x{current_angr_bb.addr:x} even though it is the last BB on the addr list" ) raise UnpackerError() # If no conditional branches taken, execution "falls through" to next basic block fallthrough_vaddr = get_real_address_if_arm( angr_analysis.project.arch, angr_cb_basic_blocks[current_bb_idx + 1].addr ) try: successor_vaddrs = [ get_real_address_if_arm(angr_analysis.project.arch, succ_codenode.addr) for succ_codenode, edge_info in angr_complex_block.graph.succ[ current_angr_bb.codenode ].items() ] except KeyError: LOGGER.warning( f"Cannot find any successors in angr for BB 0x{current_angr_bb.addr:x}, but since it " f"has a BB after it, assume that it still falls through to the next BB." ) return True, fallthrough_vaddr if fallthrough_vaddr in successor_vaddrs: # Basic block can fall through to next block, so the next block should be the exit addr return False, fallthrough_vaddr else: # Basic block can't fall through to next block, choose first succ as exit addr # For example: basic block ends in unconditional one-way branch (not a call) # If there are somehow multiple successors and the fallthrough block is not one of them, # choosing the first succ as the exit addr is arbitrary, but better choice is unclear. return False, successor_vaddrs[0]
Get exit address info needed for BasicBlock creation: BasicBlock.is_exit_point, BasicBlock.exit_addr.
15,293
import setuptools import pkg_resources from setuptools.command.egg_info import egg_info with open("README.md") as f: long_description = f.read() def read_requirements(requirements_path): with open(requirements_path) as requirements_handle: return [ str(requirement) for requirement in pkg_resources.parse_requirements(requirements_handle) ]
null
15,295
import os from dataclasses import dataclass from io import StringIO import yaml GHIDRA_CONFIG_PATH = os.path.join(os.path.dirname(__file__), "ofrak_ghidra.conf.yml") class OfrakGhidraConfig: ghidra_path: str ghidra_version: str ghidra_log_file: str ghidra_server_user: str ghidra_server_pass: str ghidra_analysis_host: str ghidra_analysis_port: int ghidra_repository_host: str ghidra_repository_port: int def config_help() -> str: return """ python -m ofrak_ghidra.config dump Dumps the current OFRAK Ghidra config as yaml to stdout. python -m ofrak_ghidra.config import <config-path> Loads a complete OFRAK Ghidra config from a path to a yaml file and saves it as the current Ghidra config. python -m ofrak_ghidra.config restore Restore the default OFRAK Ghidra settings. To change one or more of the options, the recommended process is: 1. Use `dump` to save the current settings to a temporary yaml file 2. Edit the temporary yaml file, changing settings as desired 3. Use `import` to then load that temporary yaml file The options are: ghidra_install: path: # Path to the root directory of the Ghidra install, e.g. /opt/rbs/ghidra_10.1.2_PUBLIC log_file: # Path to the file that Ghidra will use for its logs server: user: # User for the local Ghidra repository and server OFRAK will create pass: # Password for the local Ghidra repository and server OFRAK will create repository: host: # Host for the Ghidra repository OFRAK will create, e.g. localhost port: # Port for the Ghidra repository OFRAK will create analysis: host: # Host for the server OFRAK will create in a headless Ghidra instance, e.g. localhost port: # Host for the port OFRAK will create in a headless Ghidra instance """ def from_yaml(raw_yaml: str) -> "OfrakGhidraConfig": raw_config = yaml.safe_load(StringIO(raw_yaml)) return OfrakGhidraConfig( ghidra_path=raw_config["ghidra_install"]["path"], ghidra_version=raw_config["ghidra_install"]["version"], ghidra_log_file=raw_config["ghidra_install"]["log_file"], ghidra_server_user=raw_config["server"]["user"], ghidra_server_pass=raw_config["server"]["pass"], ghidra_repository_host=raw_config["server"]["repository"]["host"], ghidra_repository_port=int(raw_config["server"]["repository"]["port"]), ghidra_analysis_host=raw_config["server"]["analysis"]["host"], ghidra_analysis_port=int(raw_config["server"]["analysis"]["port"]), ) def to_yaml(self) -> str: raw_config = { "ghidra_install": { "path": self.ghidra_path, "log_file": self.ghidra_log_file, "version": self.ghidra_version, }, "server": { "user": self.ghidra_server_user, "pass": self.ghidra_server_pass, "repository": { "host": self.ghidra_repository_host, "port": self.ghidra_repository_port, }, "analysis": { "host": self.ghidra_analysis_host, "port": self.ghidra_analysis_port, }, }, } return yaml.safe_dump(raw_config) def restore_default_ghidra_config(): with open(DEFAULT_GHIDRA_CONFIG_PATH) as f: default_config = OfrakGhidraConfig.from_yaml(f.read()) save_ghidra_config(default_config) def load_ghidra_config() -> OfrakGhidraConfig: if not os.path.exists(GHIDRA_CONFIG_PATH): restore_default_ghidra_config() with open(GHIDRA_CONFIG_PATH) as f: return OfrakGhidraConfig.from_yaml(f.read())
null
15,296
import argparse from ofrak_ghidra.config.ofrak_ghidra_config import ( save_ghidra_config, OfrakGhidraConfig, load_ghidra_config, restore_default_ghidra_config, ) def _dump_config(args): print(load_ghidra_config().to_yaml())
null
15,297
import argparse from ofrak_ghidra.config.ofrak_ghidra_config import ( save_ghidra_config, OfrakGhidraConfig, load_ghidra_config, restore_default_ghidra_config, ) def _import_config(args): with open(args.config_path) as f: raw_new_config = f.read() new_config = OfrakGhidraConfig.from_yaml(raw_new_config) save_ghidra_config(new_config)
null
15,298
import argparse from ofrak_ghidra.config.ofrak_ghidra_config import ( save_ghidra_config, OfrakGhidraConfig, load_ghidra_config, restore_default_ghidra_config, ) def _restore_config(args): restore_default_ghidra_config()
null
15,299
import asyncio import os import re from collections import defaultdict from typing import Tuple, Dict, Union, List, Iterable from ofrak.core.architecture import ProgramAttributes from ofrak_type.architecture import InstructionSet, InstructionSetMode from ofrak.core.basic_block import BasicBlockUnpacker, BasicBlock from ofrak.core.instruction import Instruction from ofrak.resource import ResourceFactory, Resource from ofrak.service.component_locator_i import ComponentLocatorInterface from ofrak.service.data_service_i import DataServiceInterface from ofrak.service.resource_service_i import ResourceServiceInterface from ofrak_ghidra.components.blocks.unpackers import ( RE_STRIP_PRECEDING_ZERO, RE_CPY_TO_MOV, ) from ofrak_ghidra.constants import CORE_OFRAK_GHIDRA_SCRIPTS from ofrak_ghidra.ghidra_model import OfrakGhidraMixin, OfrakGhidraScript from ofrak_io.batch_manager import make_batch_manager class ProgramAttributes(ResourceAttributes, ArchInfo): """ Analyzer output containing architecture attributes of a program. """ The provided code snippet includes necessary dependencies for implementing the `_asm_fixups` function. Write a Python function `def _asm_fixups( base_mnemonic: str, base_operands: str, program_attrs: ProgramAttributes ) -> Tuple[str, str]` to solve the following problem: Fix up an assembly instruction from Ghidra, so that the toolchain can assemble it. :param base_mnemonic: original mnemonic from Ghidra :param base_operands: original operands from Ghidra :param program_attrs: ProgramAttributes for the binary analyzed in Ghidra :return: fixed up assembly instruction Here is the function: def _asm_fixups( base_mnemonic: str, base_operands: str, program_attrs: ProgramAttributes ) -> Tuple[str, str]: """ Fix up an assembly instruction from Ghidra, so that the toolchain can assemble it. :param base_mnemonic: original mnemonic from Ghidra :param base_operands: original operands from Ghidra :param program_attrs: ProgramAttributes for the binary analyzed in Ghidra :return: fixed up assembly instruction """ operands = base_operands.replace(",", ", ") operands = operands.replace("+ -", "- ") operands = re.sub(RE_STRIP_PRECEDING_ZERO, r"0x\1", operands) if program_attrs.isa is InstructionSet.ARM: # Convert the CPY Ghidra instruction to the more commonly used MOV instruction mnemonic = re.sub(RE_CPY_TO_MOV, "mov", base_mnemonic) elif program_attrs.isa is InstructionSet.M68K: # Convert the Ghidra assembly syntax (that corresponds to the manual's syntax) to AT&T syntax that the GNU toolchain uses mnemonic = base_mnemonic operands = operands.replace("sp", "%SP") operands = operands.replace("sr", "%SR") operands = operands.replace(" 0x", " #0x") operands = operands.replace(" -0x", " #-0x") for mnem in [ "moveq", "mov3q", "subq", "cmpi", "addq", "cmpi", "addi", "ori", "subi", "stop", ]: if mnem in mnemonic: operands = re.sub(r"^0x", r"#0x", operands) operand_list = re.split("(,)", operands) operands = "" for operand in operand_list: if not "0x" in operand: operand = re.sub(r"a([0-7])", r"%A\1", operand) operand = re.sub(r"d([0-7])[bw]?", r"%D\1", operand) operands += operand else: mnemonic = base_mnemonic return mnemonic, operands
Fix up an assembly instruction from Ghidra, so that the toolchain can assemble it. :param base_mnemonic: original mnemonic from Ghidra :param base_operands: original operands from Ghidra :param program_attrs: ProgramAttributes for the binary analyzed in Ghidra :return: fixed up assembly instruction
15,300
import argparse import os import stat import subprocess import sys from ofrak_ghidra.constants import ( GHIDRA_START_SERVER_SCRIPT, GHIDRA_PATH, CORE_OFRAK_GHIDRA_SCRIPTS, GHIDRA_USER, GHIDRA_PASS, GHIDRA_REPOSITORY_HOST, GHIDRA_REPOSITORY_PORT, ) def _run_ghidra_server(*args): if sys.platform == "linux" or sys.platform == "darwin": os.chmod( GHIDRA_START_SERVER_SCRIPT, os.stat(GHIDRA_START_SERVER_SCRIPT).st_mode | stat.S_IEXEC ) subprocess.call( [ GHIDRA_START_SERVER_SCRIPT, GHIDRA_PATH, CORE_OFRAK_GHIDRA_SCRIPTS, GHIDRA_USER, GHIDRA_PASS, GHIDRA_REPOSITORY_HOST, str(GHIDRA_REPOSITORY_PORT), ] ) else: raise NotImplementedError(f"Native OFRAK Ghidra server not supported for {sys.platform}!")
null
15,301
import argparse import os import stat import subprocess import sys from ofrak_ghidra.constants import ( GHIDRA_START_SERVER_SCRIPT, GHIDRA_PATH, CORE_OFRAK_GHIDRA_SCRIPTS, GHIDRA_USER, GHIDRA_PASS, GHIDRA_REPOSITORY_HOST, GHIDRA_REPOSITORY_PORT, ) def _stop_ghidra_server(*args): if sys.platform == "linux" or sys.platform == "darwin": subprocess.call([os.path.join(GHIDRA_PATH, "server", "ghidraSvr"), "stop"]) else: raise NotImplementedError(f"Native OFRAK Ghidra server not supported for {sys.platform}!")
null
15,303
import logging import re from dataclasses import dataclass from typing import Dict, Tuple, Optional, Iterable from capstone import ( Cs, CS_ARCH_ARM64, CS_ARCH_ARM, CS_ARCH_X86, CS_ARCH_PPC, CS_ARCH_MIPS, CS_MODE_BIG_ENDIAN, CS_MODE_LITTLE_ENDIAN, CS_MODE_THUMB, CS_MODE_ARM, CS_MODE_32, CS_MODE_64, CS_MODE_16, CsError, ) from ofrak_type.architecture import ( InstructionSet, SubInstructionSet, InstructionSetMode, ProcessorType, ) from ofrak.service.disassembler.disassembler_service_i import ( DisassemblerServiceInterface, DisassemblerServiceRequest, DisassemblyResult, RegisterUsageResult, DisassemblerRegisterUsageSupportError, DisassemblerArchSupportError, ) from ofrak_type.bit_width import BitWidth from ofrak_type.endianness import Endianness RE_REPRESENT_CONSTANTS_HEX = re.compile(r"(\W-?)([0-9]([^0-9x]|$))") RE_RENAME_FP_TO_R11 = re.compile(r"(\W?)fp(\W?)") def _asm_fixups(base_mnemonic: str, base_operands: str, isa: InstructionSet) -> Tuple[str, str]: operands = re.sub(RE_REPRESENT_CONSTANTS_HEX, r"\g<1>0x\g<2>", base_operands) if isa is InstructionSet.ARM: operands = re.sub(RE_RENAME_FP_TO_R11, r"\1r11\2", operands) mnemonic = base_mnemonic return mnemonic, operands
null
15,304
import logging import re from dataclasses import dataclass from typing import Dict, Tuple, Optional, Iterable from capstone import ( Cs, CS_ARCH_ARM64, CS_ARCH_ARM, CS_ARCH_X86, CS_ARCH_PPC, CS_ARCH_MIPS, CS_MODE_BIG_ENDIAN, CS_MODE_LITTLE_ENDIAN, CS_MODE_THUMB, CS_MODE_ARM, CS_MODE_32, CS_MODE_64, CS_MODE_16, CsError, ) from ofrak_type.architecture import ( InstructionSet, SubInstructionSet, InstructionSetMode, ProcessorType, ) from ofrak.service.disassembler.disassembler_service_i import ( DisassemblerServiceInterface, DisassemblerServiceRequest, DisassemblyResult, RegisterUsageResult, DisassemblerRegisterUsageSupportError, DisassemblerArchSupportError, ) from ofrak_type.bit_width import BitWidth from ofrak_type.endianness import Endianness class CapstoneDisassemblerType: isa: InstructionSet sub_isa: Optional[SubInstructionSet] bit_width: BitWidth endianness: Endianness processor: Optional[ProcessorType] mode: InstructionSetMode class DisassemblerServiceRequest: isa: InstructionSet sub_isa: Optional[SubInstructionSet] bit_width: BitWidth endianness: Endianness processor: Optional[ProcessorType] mode: InstructionSetMode data: bytes virtual_address: int def _get_cs_disam_type(request: DisassemblerServiceRequest) -> CapstoneDisassemblerType: return CapstoneDisassemblerType( request.isa, request.sub_isa, request.bit_width, request.endianness, request.processor, request.mode, )
null
15,305
import argparse from binaryninja.update import ( UpdateChannel, are_auto_updates_enabled, set_auto_updates_enabled, is_update_installation_pending, install_pending_update, ) from binaryninja import core_version def get_version(version_string: str): channel = list(UpdateChannel)[0] for version in channel.versions: if version.version == version_string: return version raise ValueError(f"Cannot find {version_string}")
null
15,306
from dataclasses import dataclass from enum import Enum from typing import List, Optional import argparse import os import subprocess import sys import pkg_resources import yaml class InstallTarget(Enum): INSTALL = "install" DEVELOP = "develop" class OfrakImageConfig: registry: str base_image_name: str image_name: str packages_paths: List[str] build_base: bool build_finish: bool # Whether to supply --no-cache to docker build commands no_cache: bool extra_build_args: Optional[List[str]] install_target: InstallTarget cache_from: List[str] entrypoint: Optional[str] def validate_serial_txt_existence(self): """ Check that the potential serial.txt file in `extra_build_args` exists on the filesystem. Otherwise, raise an explicit error message. """ if ( self.extra_build_args is not None and "id=serial,src=serial.txt" in self.extra_build_args and not os.path.exists("serial.txt") ): print( "Error: file serial.txt not found.\n" "You need a valid BinaryNinja license file, and to extract the serial number from that file " "into a file named serial.txt in this directory.\n" "Refer to the documentation for more details." ) sys.exit(1) def parse_args() -> OfrakImageConfig: parser = argparse.ArgumentParser() parser.add_argument("--config", required=True) parser.add_argument("--base", action="store_true") parser.add_argument("--finish", action="store_true") parser.add_argument("--no-cache", action="store_true") parser.add_argument( "--target", choices=[InstallTarget.DEVELOP.value, InstallTarget.INSTALL.value], default=InstallTarget.DEVELOP.value, ) parser.add_argument("--cache-from", action="append") args = parser.parse_args() with open(args.config) as file_handle: config_dict = yaml.safe_load(file_handle) image_config = OfrakImageConfig( config_dict["registry"], config_dict["base_image_name"], config_dict["image_name"], config_dict["packages_paths"], args.base, args.finish, args.no_cache, config_dict.get("extra_build_args"), InstallTarget(args.target), args.cache_from, config_dict.get("entrypoint"), ) image_config.validate_serial_txt_existence() return image_config
null
15,307
from dataclasses import dataclass from enum import Enum from typing import List, Optional import argparse import os import subprocess import sys import pkg_resources import yaml def check_package_contents(package_path: str): required_contents = [ package_path, os.path.join(package_path, "Dockerstub"), os.path.join(package_path, "Makefile"), ] if not all([os.path.exists(content) for content in required_contents]): raise ValueError( f"Package or required files (Dockerstub, Makefile) do not exist for " f"{os.path.abspath(package_path)}" ) return
null
15,308
from dataclasses import dataclass from enum import Enum from typing import List, Optional import argparse import os import subprocess import sys import pkg_resources import yaml class InstallTarget(Enum): class OfrakImageConfig: def validate_serial_txt_existence(self): def create_dockerfile_base(config: OfrakImageConfig) -> str: dockerfile_base_parts = [ "# syntax = docker/dockerfile:1.3", ] # Support multi-stage builds for package_path in config.packages_paths: dockerstage_path = os.path.join(package_path, "Dockerstage") if not os.path.exists(dockerstage_path): continue with open(dockerstage_path) as file_handle: dockerstub = file_handle.read() # Cannot use ENV here because of multi-stage build FROM, so replace direclty in Docerkstage contents dockerstub = dockerstub.replace("$PACKAGE_DIR", package_path) dockerfile_base_parts += [f"### {dockerstage_path}", dockerstub] dockerfile_base_parts += [ "FROM python:3.8-bullseye@sha256:e1cd369204123e89646f8c001db830eddfe3e381bd5c837df00141be3bd754cb", "", ] requirement_suffixes = ["", "-non-pypi"] if config.install_target is InstallTarget.DEVELOP: requirement_suffixes += ["-docs", "-test"] for package_path in config.packages_paths: dockerstub_path = os.path.join(package_path, "Dockerstub") with open(dockerstub_path) as file_handle: dockerstub = file_handle.read() dockerfile_base_parts += [ f"### {dockerstub_path}", f"ENV PACKAGE_PATH={package_path}", dockerstub, ] # Collect python dependencies python_reqs = [] for suff in requirement_suffixes: requirements_path = os.path.join(package_path, f"requirements{suff}.txt") if not os.path.exists(requirements_path): continue with open(requirements_path) as requirements_handle: python_reqs += [ str(requirement) for requirement in pkg_resources.parse_requirements(requirements_handle) ] if python_reqs: dockerfile_base_parts += [ f"### Python dependencies from the {package_path} requirements file[s]", "RUN python3 -m pip install --upgrade pip &&\\", " python3 -m pip install '" + "' '".join(python_reqs) + "'", "", ] return "\n".join(dockerfile_base_parts)
null
15,309
from dataclasses import dataclass from enum import Enum from typing import List, Optional import argparse import os import subprocess import sys import pkg_resources import yaml GIT_COMMIT_HASH = ( subprocess.check_output(["git", "rev-parse", "--short=8", "HEAD"]).decode("ascii").strip() ) class OfrakImageConfig: registry: str base_image_name: str image_name: str packages_paths: List[str] build_base: bool build_finish: bool # Whether to supply --no-cache to docker build commands no_cache: bool extra_build_args: Optional[List[str]] install_target: InstallTarget cache_from: List[str] entrypoint: Optional[str] def validate_serial_txt_existence(self): """ Check that the potential serial.txt file in `extra_build_args` exists on the filesystem. Otherwise, raise an explicit error message. """ if ( self.extra_build_args is not None and "id=serial,src=serial.txt" in self.extra_build_args and not os.path.exists("serial.txt") ): print( "Error: file serial.txt not found.\n" "You need a valid BinaryNinja license file, and to extract the serial number from that file " "into a file named serial.txt in this directory.\n" "Refer to the documentation for more details." ) sys.exit(1) def create_dockerfile_finish(config: OfrakImageConfig) -> str: full_base_image_name = "/".join((config.registry, config.base_image_name)) dockerfile_finish_parts = [ f"FROM {full_base_image_name}:{GIT_COMMIT_HASH}\n\n", f"ARG OFRAK_SRC_DIR=/\n", ] package_names = list() for package_path in config.packages_paths: package_name = os.path.basename(package_path) package_names.append(package_name) dockerfile_finish_parts.append(f"ADD {package_path} $OFRAK_SRC_DIR/{package_name}\n") dockerfile_finish_parts.append("\nWORKDIR /\n") dockerfile_finish_parts.append("ARG INSTALL_TARGET\n") develop_makefile = "\\n\\\n".join( [ "$INSTALL_TARGET:", "\\n\\\n".join( [f"\tmake -C {package_name} $INSTALL_TARGET" for package_name in package_names] ), "\\n", ] ) dockerfile_finish_parts.append(f'RUN printf "{develop_makefile}" >> Makefile\n') dockerfile_finish_parts.append("RUN make $INSTALL_TARGET\n\n") finish_makefile = "\\n\\\n".join( [ "test:", "\\n\\\n".join([f"\tmake -C {package_name} test" for package_name in package_names]), "\\n", ] ) dockerfile_finish_parts.append(f'RUN printf "{finish_makefile}" >> Makefile\n') if config.entrypoint is not None: dockerfile_finish_parts.append(f"ENTRYPOINT {config.entrypoint}") return "".join(dockerfile_finish_parts)
null
15,311
import asyncio import dataclasses import hashlib import logging from inspect import isawaitable from typing import ( BinaryIO, Iterable, List, Optional, Tuple, Type, TypeVar, cast, Union, Awaitable, Sequence, Callable, Set, Pattern, overload, ) from ofrak.component.interface import ComponentInterface from ofrak.model.component_model import ComponentContext, CC, ComponentRunResult from ofrak.model.data_model import DataPatch from ofrak.model.job_model import ( JobRunContext, ) from ofrak.model.job_request_model import ( JobAnalyzerRequest, JobComponentRequest, JobMultiComponentRequest, ) from ofrak.model.resource_model import ( ResourceAttributes, ResourceModel, MutableResourceModel, ResourceContext, Data, ) from ofrak.model.tag_model import ResourceTag from ofrak.model.viewable_tag_model import ( ViewableResourceTag, ResourceViewInterface, ResourceViewContext, ) from ofrak.service.data_service_i import DataServiceInterface from ofrak.service.dependency_handler import DependencyHandler from ofrak.service.id_service_i import IDServiceInterface from ofrak.service.job_service_i import JobServiceInterface from ofrak.service.resource_service_i import ( ResourceServiceInterface, ResourceFilter, ResourceSort, ) from ofrak_type.error import NotFoundError, InvalidStateError from ofrak_type.range import Range class ComponentContext: component_id: bytes component_version: int access_trackers: Dict[bytes, ComponentResourceAccessTracker] = field( default_factory=lambda: defaultdict(ComponentResourceAccessTracker) ) modification_trackers: Dict[bytes, ComponentResourceModificationTracker] = field( default_factory=lambda: defaultdict(ComponentResourceModificationTracker) ) resources_created: Set[bytes] = field(default_factory=set) resources_deleted: Set[bytes] = field(default_factory=set) def mark_resource_modified(self, r_id: bytes): # Creates a new tracker if none exists, and leaves tracker untouched if it already exists _ = self.modification_trackers[r_id] def get_modified_resource_ids(self, include_deleted=False) -> Set[bytes]: modified_resource_ids = set(self.modification_trackers.keys()) if not include_deleted: modified_resource_ids = modified_resource_ids.difference(self.resources_deleted) return modified_resource_ids class DataPatch: """ Representation of a binary patch to part of a resource's data. :ivar range: The slice of the binary blob to replace with new data (zero-length is allowed) :ivar data_id: ID of the binary blob to apply this path to :ivar data: The bytes to replace old data with (zero-length is allowed) """ range: Range data_id: bytes data: bytes def __repr__(self): return f"DataPatch({self.data_id.hex()}, {self.range}, {len(self.data)})" class MutableResourceModel(ResourceModel): __slots__ = "is_modified", "_diff", "is_deleted" def __init__( self, id: bytes, data_id: Optional[bytes] = None, parent_id: Optional[bytes] = None, tags: Optional[Set[ResourceTag]] = None, attributes: Optional[Dict[Type[ResourceAttributes], ResourceAttributes]] = None, data_dependencies: Optional[Dict[ResourceAttributeDependency, Set[Range]]] = None, attribute_dependencies: Optional[ Dict[Type[ResourceAttributes], Set[ResourceAttributeDependency]] ] = None, component_versions: Optional[Dict[bytes, int]] = None, components_by_attributes: Optional[ModelComponentsByAttributesType] = None, ): super().__init__( id, data_id, parent_id, tags, attributes, data_dependencies, attribute_dependencies, component_versions, components_by_attributes, ) self.is_modified = False self.is_deleted = False self._diff: Optional[ResourceModelDiff] = None def __hash__(self): return self.id.__hash__() def diff(self): if not self._diff: self._diff = ResourceModelDiff(self.id) return self._diff def diff(self, value): self._diff = value def from_model(model: ResourceModel): return MutableResourceModel( model.id, model.data_id, model.parent_id, model.tags, dict(model.attributes), ResourceModel._clone_data_dependencies(model.data_dependencies), ResourceModel._clone_dependencies(model.attribute_dependencies), dict(model.component_versions), dict(model.components_by_attributes), ) def add_tag(self, tag: ResourceTag) -> Set[ResourceTag]: if tag in self.tags: return set() self.is_modified = True self.tags.add(tag) self.diff.tags_added.add(tag) new_tags = {tag} for base_tag in tag.base_tags(): new_tags.update(self.add_tag(base_tag)) return new_tags def remove_tag(self, tag: ResourceTag): self.is_modified = True self.tags.remove(tag) self.diff.tags_removed.add(tag) def add_attributes(self, attributes: ResourceAttributes): attributes_type = type(attributes) prev_attributes = self.attributes.get(attributes_type) if prev_attributes is not None: if prev_attributes == attributes: return else: self.diff.attributes_removed.add(attributes_type) self.is_modified = True self.attributes[attributes_type] = attributes self.diff.attributes_added[attributes_type] = attributes def remove_attributes(self, attributes_type: Type[ResourceAttributes]): self.is_modified = True del self.attributes[attributes_type] self.diff.attributes_removed.add(attributes_type) def add_attribute_dependency( self, attribute_type: Type[ResourceAttributes], dependency: ResourceAttributeDependency ): self.is_modified = True self.attribute_dependencies[attribute_type].add(dependency) self.diff.attribute_dependencies_added.add((attribute_type, dependency)) def add_data_dependency(self, dependency: ResourceAttributeDependency, data_range: Range): self.is_modified = True self.data_dependencies[dependency].add(data_range) self.diff.data_dependencies_added.add((dependency, data_range)) def remove_dependency(self, dependency: ResourceAttributeDependency): if dependency in self.data_dependencies: self.is_modified = True del self.data_dependencies[dependency] self.diff.data_dependencies_removed.add(dependency) for attribute_type, attribute_dependencies in self.attribute_dependencies.items(): if dependency in attribute_dependencies: self.is_modified = True attribute_dependencies.remove(dependency) self.diff.attribute_dependencies_removed.add((attribute_type, dependency)) def add_component( self, component_id: bytes, version: int, ): """ Mark that a component has run on a resource. """ self.is_modified = True self.component_versions[component_id] = version self.diff.component_versions_added.add((component_id, version)) def add_component_for_attributes( self, component_id: bytes, version: int, attribute_type: Type[ResourceAttributes], ): """ Mark that a component has added attributes to a resource """ self.components_by_attributes[attribute_type] = (component_id, version) self.diff.attributes_component_added.add((attribute_type, component_id, version)) def remove_component( self, component_id: bytes, attribute_type: Optional[Type[ResourceAttributes]] ): if component_id in self.component_versions: self.is_modified = True del self.component_versions[component_id] self.diff.component_versions_removed.add(component_id) if attribute_type in self.components_by_attributes: self.is_modified = True del self.components_by_attributes[attribute_type] self.diff.attributes_component_removed.add(attribute_type) def reset(self, model: ResourceModel): self.id = model.id self.data_id = model.data_id self.parent_id = model.parent_id self.tags = model.tags self.attributes = dict(model.attributes) self.data_dependencies: Dict[ ResourceAttributeDependency, Set[Range] ] = ResourceModel._clone_data_dependencies(model.data_dependencies) self.attribute_dependencies: ModelAttributeDependenciesType = ( ResourceModel._clone_dependencies(model.attribute_dependencies) ) self.component_versions = dict(model.component_versions) self.components_by_attributes = dict(model.components_by_attributes) self.is_modified = False self.diff = ResourceModelDiff(self.id) def save(self): self.is_modified = False diff = self.diff self.diff = ResourceModelDiff(self.id) return diff class ResourceContext(ABC): """ Resource context. """ def __init__(self, resource_models: MutableMapping[bytes, MutableResourceModel]): self.resource_models = resource_models class ResourceViewContext: ViewByTag = MutableMapping[ViewableResourceTag, ResourceViewInterface] def __init__(self): self.views_by_resource: MutableMapping[bytes, ResourceViewContext.ViewByTag] = defaultdict( dict ) def has_view(self, resource_id: bytes, view_type: ViewableResourceTag) -> bool: return view_type in self.views_by_resource[resource_id] def get_view(self, resource_id: bytes, view_type: Type[RVI]) -> RVI: return self.views_by_resource[resource_id][view_type] # type: ignore def add_view(self, resource_id: bytes, view: ResourceViewInterface): self.views_by_resource[resource_id][type(view)] = view def update_views( self, modified: Iterable[bytes], deleted: Iterable[bytes], resource_context: ResourceContext ): for resource_id in modified: views_in_context = self.views_by_resource[resource_id] for view in views_in_context.values(): updated_model = resource_context.resource_models[resource_id] fresh_view = view.create(updated_model) for field in dataclasses.fields(fresh_view): if field.name == "_resource": continue setattr(view, field.name, getattr(fresh_view, field.name)) for resource_id in deleted: views_in_context = self.views_by_resource[resource_id] for view in views_in_context.values(): view.set_deleted() class DataServiceInterface(AbstractOfrakService, metaclass=ABCMeta): async def create_root(self, data_id: bytes, data: bytes) -> DataModel: """ Create a root data model with its own data bytes. :param data_id: Unique ID for the new data model :param data: Binary data belonging to the new data model :return: The new data model object :raises AlreadyExistError: if `data_id` is already associated with a model """ raise NotImplementedError() async def create_mapped( self, data_id: bytes, parent_id: bytes, range_in_parent: Range, ) -> DataModel: """ Create a new data model which is mapped into another data model. That is, it does not hold its own data, but defines its own data as a subsection of another model's data. The model it maps from (`parent_id`) may be a root model or another mapped model; if `parent_id` is another mapped node, the new mapped node created here will be mapped to the same root as `parent_id` at a range translated to be within `parent_id` as defined by `range_in_parent`. :param data_id: Unique ID for the new data model :param parent_id: ID of the data model to map the new model into :param range_in_parent: Range in `parent_id` which the new model will map :return: The new data model object :raises AlreadyExistError: if `data_id` is already associated with a model :raises NotFoundError: if `parent_id` is not associated with any known model """ raise NotImplementedError() async def get_by_id(self, data_id: bytes) -> DataModel: """ Get the data model object associated with the given ID. :param data_id: A unique ID for a data model :return: The model associated with `data_id` :raises NotFoundError: if `data_id` is not associated with any known model """ raise NotImplementedError() async def get_by_ids(self, data_ids: Iterable[bytes]) -> Iterable[DataModel]: """ Get the data models object associated with the given IDs. :param data_ids: Multiple unique IDs for data models :return: The models associated with each ID in `data_ids`, in the same order their IDs were provided :raises NotFoundError: if any ID in `data_ids` is not associated with any known model """ raise NotImplementedError() async def get_data_length(self, data_id: bytes) -> int: """ Return the length of a single data model. :param data_id: A unique ID for a data model :return: The length of the data included in the model :raises NotFoundError: if `data_id` is not associated with any known model """ raise NotImplementedError() async def get_data_range_within_root(self, data_id: bytes) -> Range: """ Get the range that a model maps in its root. If the model specified by `data_id` is itself a root, returns a range covering that whole root (i.e. Range(0, length)). :param data_id: A unique ID for a data model :return: Range that `data_id` maps in its root :raises NotFoundError: if `data_id` is not associated with any known model """ raise NotImplementedError() async def get_range_within_other(self, data_id: bytes, within_data_id: bytes) -> Range: """ Get the range representing the intersection between two data models, assuming they are both mapped into the same root data. Either of `data_id` or `within_data_id` may be roots, but they cannot both be roots (unless they are the same). :param data_id: A unique ID for a data model :param within_data_id: A unique ID for a data model :return: The range where `data_id`'s model intersects `within_data_id`'s model :raises NotFoundError: if `data_id` or `within_data_id` is not associated with any known model :raises ValueError: if `data_id` is not mapped into `within_data_id` or they do not share the same root """ raise NotImplementedError() async def get_data(self, data_id: bytes, data_range: Optional[Range] = None) -> bytes: """ Get the data (or section of data) of a model. The optional `data_range` parameter specifies which a range within `data_id`'s data to return; if this range actually falls outside the boundaries of `data_id`'s data, an empty bytestring is returned. :param data_id: A unique ID for a data model :param data_range: An optional range within the model's data to return :return: Bytes of data from the model associated with `data_id` - all bytes by default, a specific slice if `data_range` is provided, and empty bytes if `data_range` is provided but is outside the modeled data. :raises NotFoundError: if `data_id` is not associated with any known model """ raise NotImplementedError() async def apply_patches( self, patches: List[DataPatch], ) -> List[DataPatchesResult]: """ Modify the data of a number of models, modeled as a list of `DataPatch` structures each specifying: a target data model (by ID), new data, and a range to overwrite with the new data. The listed patches are applied in order, so that subsequent patches may effectively 'erase' an earlier patch. Patches may resize data if the new data is not the same size as the range it is overwriting. Such patches create additional restrictions: 1. If `patches` contains a patch that resizes a range of data, no subsequent patch in `patches` is allowed to modify that resized range. 2. Resizing patches are not allowed to overwrite ranges that contain the borders of any data models. For example, if model B maps Range(0, 6) of model A, a patch that resizes Range(4, 10) of model A is not allowed (whether it increases or decreases the size). :param patches: A list of patch data structures to be applied, in order :return: A list of data structures describing all modified ranges of each data model affected by `patches` :raises NotFoundError: if any data ID in the `patches` list is not associated with any known model :raises PatchOverlapError: if a patch targets a region of data which has already been modified by a patch which resized that region :raises PatchOverlapError: if a patch would resize a region of data which contains the start or end of one or more data models """ raise NotImplementedError() async def delete_models(self, data_ids: Iterable[bytes]) -> None: """ Delete one or more data models. If a root model is deleted, all models mapped into that root are also deleted. :param data_ids: Multiple unique IDs for data models :raises NotFoundError: if any ID in `data_ids` is not associated with any known model """ raise NotImplementedError() async def search( self, data_id: bytes, query: Pattern[bytes], start: Optional[int] = None, end: Optional[int] = None, max_matches: Optional[int] = None, ) -> Tuple[Tuple[int, bytes], ...]: ... async def search( self, data_id: bytes, query: bytes, start: Optional[int] = None, end: Optional[int] = None, max_matches: Optional[int] = None, ) -> Tuple[int, ...]: ... async def search(self, data_id, query, start=None, end=None, max_matches=None): """ Search for some data in one of the models. The query may be a regex pattern (a return value of `re.compile`). If the query is a regex pattern, returns a tuple of pairs with both the offset of the match and the contents of the match itself. If the query is plain bytes, a list of only the match offsets are returned. :param data_id: Data model to search :param query: Plain bytes to exactly match or a regex pattern to search for :param start: Start offset in the data model to begin searching :param end: End offset in the data model to stop searching :param max_matches: Maximum number of matches to return :return: A tuple of offsets matching a plain bytes query, or a list of (offset, match) pairs for a regex pattern query """ raise NotImplementedError() class DependencyHandler: """ Stateless handler for dealing with creating and invalidating dependencies. Intended for one-time use in a component, or possible re-use in a resource. """ def __init__( self, resource_service: ResourceServiceInterface, data_service: DataServiceInterface, component_context: ComponentContext, resource_context: ResourceContext, ): self._resource_service = resource_service self._data_service = data_service self._component_context = component_context self._resource_context = resource_context async def map_data_ids_to_resources( self, data_ids: Iterable[bytes] ) -> Dict[bytes, MutableResourceModel]: resources_by_data_id = dict() for resource_id, resource_m in self._resource_context.resource_models.items(): if resource_m.data_id is not None: resources_by_data_id[resource_m.data_id] = resource_m missing_data_ids = set() for data_id in data_ids: if data_id not in resources_by_data_id: missing_data_ids.add(data_id) missing_resources = await self._resource_service.get_by_data_ids(missing_data_ids) for missing_resource in missing_resources: missing_resource_data_id = cast(bytes, missing_resource.data_id) if missing_resource_data_id in resources_by_data_id: raise ValueError("Something is wrong in the implementation") mutable_resource = MutableResourceModel.from_model(missing_resource) resources_by_data_id[missing_resource_data_id] = mutable_resource self._resource_context.resource_models[missing_resource.id] = mutable_resource return resources_by_data_id async def handle_post_patch_dependencies( self, patch_results: List[DataPatchesResult] ) -> Set[MutableResourceModel]: modified_resources = set() # Create look up maps for resources and dependencies resources_by_data_id = await self.map_data_ids_to_resources( patch_result.data_id for patch_result in patch_results ) all_data_ids = [patch_result.data_id for patch_result in patch_results] models_by_data_id: Dict[bytes, DataModel] = { data_id: model for data_id, model in zip( all_data_ids, await self._data_service.get_by_ids(all_data_ids) ) } # Go through and update all models' Data for data_patch_result in patch_results: resource_m = resources_by_data_id[data_patch_result.data_id] data_m = models_by_data_id[data_patch_result.data_id] resource_m.add_attributes(Data(data_m.range.start, data_m.range.length())) modified_resources.add(resource_m) unhandled_dependencies: Set[ResourceAttributeDependency] = set() # Figure out which components results must be invalidated based on data changes for data_patch_result in patch_results: resource_m = resources_by_data_id[data_patch_result.data_id] removed_data_dependencies = set() # Iterate over the resource's data dependencies to find one that's affected by one of # the patch range for dependency, dependency_ranges in resource_m.data_dependencies.items(): # Iterate over the resource's data dependency ranges to find a range that overlaps # the patch range for dependency_range in dependency_ranges: for patch_range in data_patch_result.patches: if not dependency_range.overlaps(patch_range): continue LOGGER.debug( f"Invalidating results of {dependency.component_id!r} on resource " f"{dependency.dependent_resource_id.hex()} due to a data change on " f"resource {resource_m.id.hex()}" ) # That dependency is invalid, we can move on to the next dependency unhandled_dependencies.add(dependency) removed_data_dependencies.add(dependency) break # If a previous dependency range was found to affect the dependency, no need to # continue iterating the ranges if dependency in removed_data_dependencies: break for removed_data_dependency in removed_data_dependencies: resource_m.remove_dependency(removed_data_dependency) modified_resources.add(resource_m) # Recursively invalidate component results based on other components that were invalidated handled_dependencies: Set[ResourceAttributeDependency] = set() await self._invalidate_dependencies( handled_dependencies, unhandled_dependencies, modified_resources ) return modified_resources def create_component_dependencies( self, component_id: bytes, component_version: int, ): """ Register dependencies between the component and the resources it interacts with. This may not even be necessary since Resource.add_attributes does this anyway... """ self._validate_resource_context_complete() for resource_id in self._component_context.resources_created: new_resource_m = self._resource_context.resource_models[resource_id] for attributes in new_resource_m.attributes.keys(): new_resource_m.add_component_for_attributes( component_id, component_version, attributes ) def create_resource_dependencies( self, component_id: bytes, ): """ Register dependencies between a resource with some attributes and the resources which were accessed in the context where these attributes were added. When a component runs, this method is called to record what data was accessed by the component and what resource attributes from other resources were accessed within that component. These registered dependencies allow for OFRAK to not rerun analyzers when the resource and its dependencies have not changed. Whenever a [Modifier][ofrak.component.modifier.Modifier] is run, these resource attribute dependencies are invalidated so as to force analysis to be rerun. :param bytes component_id: """ self._validate_resource_context_complete() resource_dependencies = [] # Create dependency for each attribute on newly created resources for resource_id in self._component_context.resources_created: new_resource_m = self._resource_context.resource_models[resource_id] for attributes in new_resource_m.attributes.keys(): resource_dependencies.append( ResourceAttributeDependency( resource_id, component_id, attributes, ) ) # Create dependency for each new attribute on modified resources for resource_id in self._component_context.modification_trackers.keys(): modified_resource_m = self._resource_context.resource_models[resource_id] for attrs_added in modified_resource_m.diff.attributes_added.keys(): resource_dependencies.append( ResourceAttributeDependency( resource_id, component_id, attrs_added, ) ) # Add dependencies to all resources which were accessed for resource_id, access_tracker in self._component_context.access_trackers.items(): if resource_id in self._component_context.resources_created: # Avoid all the newly created components depending on each other continue accessed_resource_m = self._resource_context.resource_models[resource_id] merged_accessed_ranges = Range.merge_ranges(access_tracker.data_accessed) # Add attributes dependency on all accessed attributes for attributes_accessed in access_tracker.attributes_accessed: for resource_dependency in resource_dependencies: accessed_resource_m.add_attribute_dependency( attributes_accessed, resource_dependency ) # Add data dependency on all accessed data for accessed_range in merged_accessed_ranges: for resource_dependency in resource_dependencies: accessed_resource_m.add_data_dependency(resource_dependency, accessed_range) def _validate_resource_context_complete(self): for resource_id in self._component_context.resources_created: if resource_id not in self._resource_context.resource_models: raise ValueError( f"The resource model {resource_id.hex()} was created but it's not in the " f"resource context" ) async def _fetch_missing_resources(self, resource_ids: Iterable[bytes]): missing_resource_ids = set() # Fetch all the resources referred to by the unhandled dependencies for resource_id in resource_ids: if resource_id not in self._resource_context.resource_models: missing_resource_ids.add(resource_id) missing_resources = await self._resource_service.get_by_ids(missing_resource_ids) for missing_resource in missing_resources: self._resource_context.resource_models[ missing_resource.id ] = MutableResourceModel.from_model(missing_resource) async def _invalidate_dependencies( self, handled_dependencies: Set[ResourceAttributeDependency], unhandled_dependencies: Set[ResourceAttributeDependency], resources_modified: Set[MutableResourceModel], ): """ Invalidate the unhandled resource attribute dependencies. :param Set[ResourceAttributeDependency] handled_dependencies: A set of dependencies that have already been invalidated :param Set[ResourceAttributeDependency] unhandled_dependencies: A set of dependencies that should be invalidated """ if len(unhandled_dependencies) == 0: return dependent_resource_ids = { dependency.dependent_resource_id for dependency in unhandled_dependencies } deleted_dependent_ids = { r_id for r_id, currently_exists in zip( dependent_resource_ids, await self._resource_service.verify_ids_exist(dependent_resource_ids), ) if not currently_exists } await self._fetch_missing_resources( dependent_resource_ids.difference(deleted_dependent_ids) ) # Invalidate the resources' attributes referred to by the unhandled_dependencies next_unhandled_dependencies = set() for dependency in unhandled_dependencies: # It's possible that the attribute was already invalidated from an earlier run if dependency in handled_dependencies: continue # If the dependent resource was deleted, don't need to propagate dependency invalidation if dependency.dependent_resource_id in deleted_dependent_ids: handled_dependencies.add(dependency) continue try: resource_m = self._resource_context.resource_models[ dependency.dependent_resource_id ] except KeyError as e: missing_model = await self._resource_service.get_by_id( dependency.dependent_resource_id ) resource_m = MutableResourceModel.from_model(missing_model) # Invalidate the attributes on the resource handled_dependencies.add(dependency) # The component id is not necessarily present. It could have been invalidated already # by a previous patch that impacted other resources that this resource depends on. if resource_m.get_component_id_by_attributes(dependency.attributes): resource_m.remove_component(dependency.component_id, dependency.attributes) self._component_context.mark_resource_modified(resource_m.id) resources_modified.add(resource_m) # Find other dependencies to invalidate due to the invalidation of the attributes invalidated_dependencies = set() for next_dependency in resource_m.attribute_dependencies[dependency.attributes]: # Make sure the dependency wasn't already handled if next_dependency not in handled_dependencies: LOGGER.debug( f"Invalidating attributes {next_dependency.attributes.__name__} from " f"component {next_dependency.component_id!r} on resource " f"{next_dependency.dependent_resource_id.hex()} due to " f"attributes {dependency.attributes.__name__} on resource" f" {dependency.dependent_resource_id.hex()} being invalidated" ) invalidated_dependencies.add(next_dependency) for invalidated_dependency in invalidated_dependencies: resource_m.remove_dependency(invalidated_dependency) self._component_context.mark_resource_modified(resource_m.id) resources_modified.add(resource_m) next_unhandled_dependencies.update(invalidated_dependencies) await self._invalidate_dependencies( handled_dependencies, next_unhandled_dependencies, resources_modified, ) class ResourceServiceInterface(AbstractOfrakService, metaclass=ABCMeta): """ Stores [ResourceModels][ofrak.model.resource_model.ResourceModel] in a tree structure and provides methods to walk that tree according to given [ResourceFilters][ofrak.service.resource_service_i.ResourceFilter]. Resource instantiation is handled by the [ResourceFactory][ofrak.resource.ResourceFactory]. """ async def create(self, resource: ResourceModel) -> ResourceModel: """ Add a [ResourceModel][ofrak.model.resource_model.ResourceModel] to the resource service database according to the given model. If the ``resource`` model says it has a parent, ``resource`` will be added as a child of that parent. :param resource: The resource model to add to the database :raises AlreadyExistError: If ``resource`` has an ID which already exists in the database :raises NotFoundError: If ``resource`` has a parent ID but no resource with that ID exists :return: The same model which was passed in, with no changes """ raise NotImplementedError() async def get_root_resources(self) -> Iterable[ResourceModel]: """ Get all of the root resources known to this resource service. Any resource created without a parent will be returned by this method. :return: All resources with no parents """ raise NotImplementedError() async def verify_ids_exist(self, resource_ids: Iterable[bytes]) -> Iterable[bool]: """ Check if a number of resource IDs exist in the resource store. This is useful for filtering out IDs of resources which have been deleted. :param resource_ids: Iterable of resource IDs to check for :return: A boolean for each resource ID, True if it exists in the store and False otherwise """ raise NotImplementedError() async def get_by_data_ids(self, data_ids: Iterable[bytes]) -> Iterable[ResourceModel]: """ Get the resource models with a given sequence of data IDs. :param data_ids: A list of valid data IDs :raises NotFoundError: If there is not a resource for all of the IDs in `data_ids` :return: A sequence of resource models each with one of the given data IDs, in the same order which `data_ids` was given in. """ raise NotImplementedError() async def get_by_ids(self, resource_ids: Iterable[bytes]) -> Iterable[ResourceModel]: """ Get the resource models with a given sequence of resource IDs. :param resource_ids: A list of valid resource IDs :raises NotFoundError: If there is not a resource for all of the IDs in `resource_ids` :return: A sequence of resource models each with one of the given resource IDs, in the same order which `resource_ids` was given in. """ raise NotImplementedError() async def get_by_id(self, resource_id: bytes) -> ResourceModel: """ Get the resource model with a given resource ID. :param resource_id: A valid resource ID :raises NotFoundError: If there is not a resource with resource ID `resource_id` :return: The resource model with ID matching `resource_id` """ raise NotImplementedError() async def get_depths(self, resource_ids: Iterable[bytes]) -> Iterable[int]: """ Get the depth of each resource in `resource_ids`. :param resource_ids: A list of valid resource IDs :raises NotFoundError: If there is not a resource for all of the IDs in `resource_ids` :return: A sequence of resource model depths, in the same order which `resource_ids` was given in. """ raise NotImplementedError() async def get_ancestors_by_id( self, resource_id: bytes, max_count: int = -1, r_filter: Optional[ResourceFilter] = None, ) -> Iterable[ResourceModel]: """ Get the resource models of the ancestors of a resource with a given ID. These ancestors may be filtered by an optional filter argument. A maximum count of ancestors may also be given, to cap the number of (filtered or unfiltered) ancestors returned. :param resource_id: ID of resource to get ancestors of :param max_count: Optional argument to cap the number of models returned; if set to -1 (default) then any number of ancestors may be returned :param r_filter: Optional resource filter for the resource models returned; if set to `None`, all ancestors may be returned (the model for `resource_id` is excluded), otherwise all ancestors matching the filter may be returned (possibly including the model for `resource_id`), up to the maximum allowed by `max_count` :raises NotFoundError: If there is not a resource with resource ID `resource_id` :return: As many ancestors of `resource_id` matching `r_filter` as `max_count` allows, in order of reverse depth (deeper resources first, root last) """ raise NotImplementedError() async def get_descendants_by_id( self, resource_id: bytes, max_count: int = -1, max_depth: int = -1, r_filter: Optional[ResourceFilter] = None, r_sort: Optional[ResourceSort] = None, ) -> Iterable[ResourceModel]: """ Get the resource models of the descendants of a resource with a given ID. These descendants may be filtered by an optional filter argument. A maximum count of descendants may also be given, to cap the number of (filtered or unfiltered) descendants returned. A maximum depth may also be given, to limit how deep to search for descendants. :param resource_id: ID of resource to get descendants of :param max_count: Optional argument to cap the number of models returned; if set to -1 (default) then any number of descendants may be returned :param max_depth: Optional argument to limit the depth to search for descendants; if set to -1 (default) then descendants of any depth may be returned; if set to 1, for example, only children of `resource_id` may be returned :param r_filter: Optional resource filter for the resource models returned; if set to `None` all descendants may be returned (the model for `resource_id` is excluded), otherwise all descendants matching the filter may be returned (possibly including the model for `resource_id`), up to the maximum allowed by `max_count` :param r_sort: Optional logic to order the returned descendants by the value of a specific attribute of each descendant :raises NotFoundError: If there is not a resource with resource ID `resource_id` :return: As many descendants of `resource_id` matching `r_filter` as `max_count` allows, in order specified by `r_sort`. If `r_sort` is None, no specific ordering is guaranteed. """ raise NotImplementedError() async def get_siblings_by_id( self, resource_id: bytes, max_count: int = -1, r_filter: Optional[ResourceFilter] = None, r_sort: Optional[ResourceSort] = None, ) -> Iterable[ResourceModel]: """ Get the resource models of the siblings of a resource with a given ID. These siblings may be filtered by an optional filter argument. A maximum count of siblings may also be given, to cap the number of (filtered or unfiltered) siblings returned. :param resource_id: ID of resource to get siblings of :param max_count: Optional argument to cap the number of models returned; if set to -1 (default) then any number of siblings may be returned :param r_filter: Optional resource filter for the resource models returned; if set to None all siblings may be returned (the model for `resource_id` is excluded), otherwise all siblings matching the filter may be returned (possibly including the model for `resource_id`), up to the maximum allowed by `max_count` :param r_sort: Optional logic to order the returned siblings by the value of a specific attribute of each sibling :raises NotFoundError: If there is not a resource with resource ID `resource_id` :raises NotFoundError: If the resource with ID `resource_id` does not have siblings because it is a root :return: As many siblings of `resource_id` matching `r_filter` as `max_count` allows, in order specified by `r_sort`. If `r_sort` is None, no specific ordering is guaranteed. """ raise NotImplementedError() async def update(self, resource_diff: ResourceModelDiff) -> ResourceModel: """ Modify a stored resource model according to the differences in the given diff object. :param resource_diff: Diff object containing changes to a resource model, as well as the resource ID of the model to update :raises NotFoundError: If there is not a resource with resource ID matching the ID in `resource_diff` :return: The updated resource model (with changes applied) """ raise NotImplementedError() async def update_many( self, resource_diffs: Iterable[ResourceModelDiff] ) -> Iterable[ResourceModel]: """ Modify a stored resource model according to the differences in the given diff object. :param resource_diffs: Diff objects containing changes to resource models, as well as the resource ID of each model to update :raises NotFoundError: If there is not a resource with resource ID matching one of the IDs in `resource_diffs` :return: The updated resource models (with changes applied) """ raise NotImplementedError() async def rebase_resource(self, resource_id: bytes, new_parent_id: bytes): """ Move a resource which was a child to instead be a child of a different resource. :param resource_id: resource ID of the resource to rebase :param new_parent_id: resource ID of the new parent resource for `resource_id` :raises NotFoundError: If there is not a resource with resource ID `resource_id` :raises NotFoundError: If there is not a resource with resource ID `new_parent_id` """ raise NotImplementedError() async def delete_resource(self, resource_id: bytes) -> Iterable[ResourceModel]: """ Delete a resource by ID and all of its descendants, removing them from the database. If no resource for the given ID is found, it is assumed the resource has already been deleted (does not raise an error). :param resource_id: The ID of the resource to delete :return: all of the models that were deleted """ raise NotImplementedError() async def delete_resources(self, resource_ids: Iterable[bytes]) -> Iterable[ResourceModel]: """ Delete multiple resources by ID and all of their descendants, removing them from the database. If no resource for any given ID is found, it is assumed the resource has already been deleted (does not raise an error). :param resource_ids: The ID of the resources to delete :return: all of the models that were deleted """ raise NotImplementedError() async def save_resources( resources: Iterable["Resource"], resource_service: ResourceServiceInterface, data_service: DataServiceInterface, component_context: ComponentContext, resource_context: ResourceContext, resource_view_context: ResourceViewContext, ): dependency_handler = DependencyHandler( resource_service, data_service, component_context, resource_context ) resources_to_delete: List[bytes] = [] patches_to_apply: List[DataPatch] = [] resources_to_update: List[MutableResourceModel] = [] for resource in resources: _resources_to_delete, _patches_to_apply, _resources_to_update = resource._save() resources_to_delete.extend(_resources_to_delete) patches_to_apply.extend(_patches_to_apply) resources_to_update.extend(_resources_to_update) deleted_descendants = await resource_service.delete_resources(resources_to_delete) data_ids_to_delete = [ resource_m.data_id for resource_m in deleted_descendants if resource_m.data_id is not None ] await data_service.delete_models(data_ids_to_delete) patch_results = await data_service.apply_patches(patches_to_apply) resources_to_update.extend( await dependency_handler.handle_post_patch_dependencies(patch_results) ) diffs = [] updated_ids = [] for resource_m in resources_to_update: diffs.append(resource_m.save()) updated_ids.append(resource_m.id) await resource_service.update_many(diffs) resource_view_context.update_views(updated_ids, resources_to_delete, resource_context)
null
15,312
import asyncio import dataclasses import hashlib import logging from inspect import isawaitable from typing import ( BinaryIO, Iterable, List, Optional, Tuple, Type, TypeVar, cast, Union, Awaitable, Sequence, Callable, Set, Pattern, overload, ) from ofrak.component.interface import ComponentInterface from ofrak.model.component_model import ComponentContext, CC, ComponentRunResult from ofrak.model.data_model import DataPatch from ofrak.model.job_model import ( JobRunContext, ) from ofrak.model.job_request_model import ( JobAnalyzerRequest, JobComponentRequest, JobMultiComponentRequest, ) from ofrak.model.resource_model import ( ResourceAttributes, ResourceModel, MutableResourceModel, ResourceContext, Data, ) from ofrak.model.tag_model import ResourceTag from ofrak.model.viewable_tag_model import ( ViewableResourceTag, ResourceViewInterface, ResourceViewContext, ) from ofrak.service.data_service_i import DataServiceInterface from ofrak.service.dependency_handler import DependencyHandler from ofrak.service.id_service_i import IDServiceInterface from ofrak.service.job_service_i import JobServiceInterface from ofrak.service.resource_service_i import ( ResourceServiceInterface, ResourceFilter, ResourceSort, ) from ofrak_type.error import NotFoundError, InvalidStateError from ofrak_type.range import Range class Resource: """ Defines methods for interacting with the data and attributes of Resources, the main building block of OFRAK. """ __slots__ = ( "_job_id", "_job_context", "_component_context", "_resource_context", "_resource_view_context", "_resource", "_resource_factory", "_id_service", "_resource_service", "_data_service", "_job_service", "_dependency_handler", ) def __init__( self, job_id: bytes, resource: MutableResourceModel, resource_context: ResourceContext, resource_view_context: ResourceViewContext, job_context: Optional[JobRunContext], component_context: ComponentContext, resource_factory: "ResourceFactory", id_service: IDServiceInterface, data_service: DataServiceInterface, resource_service: ResourceServiceInterface, job_service: JobServiceInterface, ): self._job_id: bytes = job_id self._job_context: Optional[JobRunContext] = job_context self._component_context: ComponentContext = component_context self._resource_context: ResourceContext = resource_context self._resource_view_context: ResourceViewContext = resource_view_context self._resource: MutableResourceModel = resource self._resource_factory: "ResourceFactory" = resource_factory self._id_service: IDServiceInterface = id_service self._resource_service: ResourceServiceInterface = resource_service self._data_service: DataServiceInterface = data_service self._job_service: JobServiceInterface = job_service def get_id(self) -> bytes: """ :return: This resource's ID """ return self._resource.id def get_job_id(self) -> bytes: """ Each resource belongs to a specific "job." See [JobServiceInterface][ofrak.service.job_service_i.JobServiceInterface]. :return: The ID of the job this resource belongs to """ return self._job_id def get_data_id(self) -> Optional[bytes]: """ Each resource may have a data ID. This refers to a [DataModel][ofrak.model.data_model.DataModel] representing some chunk of raw binary data. :return: The data ID associated with this resource, if it exists """ return self._resource.data_id def get_resource_context(self) -> ResourceContext: return self._resource_context def get_resource_view_context(self) -> ResourceViewContext: return self._resource_view_context def get_component_context(self) -> ComponentContext: return self._component_context def get_job_context(self) -> Optional[JobRunContext]: return self._job_context def get_caption(self) -> str: return self._resource.caption def is_modified(self) -> bool: """ Check if the resource has been modified in this context and is considered "dirty". :return: `True` if the resource is modified, `False` otherwise """ return self._resource.is_modified def get_model(self) -> MutableResourceModel: """ Get the underlying [model][ofrak.model.resource_model.ResourceModel] of this resource. :return: """ return self._resource async def get_data(self, range: Optional[Range] = None) -> bytes: """ A resource often represents a chunk of underlying binary data. This method returns the entire chunk by default; this can be reduced by an optional parameter. :param range: A range within the resource's data, relative to the resource's data itself (e.g. Range(0, 10) returns the first 10 bytes of the chunk) :return: The full range or a partial range of this resource's bytes """ if self._resource.data_id is None: raise ValueError( "Resource does not have a data_id. Cannot get data from a resource with no data" ) data = await self._data_service.get_data(self._resource.data_id, range) if range is None: range = Range(0, len(data)) self._component_context.access_trackers[self._resource.id].data_accessed.add(range) return data async def get_data_length(self) -> int: """ :return: The length of the underlying binary data this resource represents """ if self._resource.data_id is None: raise ValueError( "Resource does not have a data_id. Cannot get data length from a " "resource with no data." ) return await self._data_service.get_data_length(self._resource.data_id) async def get_data_range_within_parent(self) -> Range: """ If this resource is "mapped," i.e. its underlying data is defined as a range of its parent's underlying data, this method returns the range within the parent resource's data where this resource lies. If this resource is not mapped (it is root), it returns a range starting at 0 with length 0. :return: The range of the parent's data which this resource represents """ if self._resource.data_id is None: raise ValueError( "Resource does not have a data_id. Cannot get data range from a " "resource with no data." ) if self._resource.parent_id is None: return Range(0, 0) parent_models = list( await self._resource_service.get_ancestors_by_id(self._resource.id, max_count=1) ) if len(parent_models) != 1: raise NotFoundError(f"There is no parent for resource {self._resource.id.hex()}") parent_model = parent_models[0] parent_data_id = parent_model.data_id if parent_data_id is None: return Range(0, 0) try: return await self._data_service.get_range_within_other( self._resource.data_id, parent_data_id ) except ValueError: return Range(0, 0) async def get_data_range_within_root(self) -> Range: """ Does the same thing as `get_data_range_within_parent`, except the range is relative to the root. :return: The range of the root node's data which this resource represents """ if self._resource.data_id is None: raise ValueError( "Resource does not have a data_id. Cannot get data range from a " "resource with no data." ) return await self._data_service.get_data_range_within_root(self._resource.data_id) async def search_data( self, query: Pattern[bytes], start: Optional[int] = None, end: Optional[int] = None, max_matches: Optional[int] = None, ) -> Tuple[Tuple[int, bytes], ...]: ... async def search_data( self, query: bytes, start: Optional[int] = None, end: Optional[int] = None, max_matches: Optional[int] = None, ) -> Tuple[int, ...]: ... async def search_data(self, query, start=None, end=None, max_matches=None): """ Search for some data in this resource. The query may be a regex pattern (a return value of `re.compile`). If the query is a regex pattern, returns a tuple of pairs with both the offset of the match and the contents of the match itself. If the query is plain bytes, a list of only the match offsets are returned. :param query: Plain bytes to exactly match or a regex pattern to search for :param start: Start offset in the data model to begin searching :param end: End offset in the data model to stop searching :return: A tuple of offsets matching a plain bytes query, or a list of (offset, match) pairs for a regex pattern query """ return await self._data_service.search(self.get_data_id(), query, start, end, max_matches) async def save(self): """ If this resource has been modified, update the model stored in the resource service with the local changes. :raises NotFoundError: If the resource service does not have a model for this resource's ID """ await save_resources( (self,), self._resource_service, self._data_service, self._component_context, self._resource_context, self._resource_view_context, ) def _save(self) -> Tuple[List[bytes], List[DataPatch], List[MutableResourceModel]]: resources_to_delete: List[bytes] = [] patches_to_apply: List[DataPatch] = [] resources_to_update: List[MutableResourceModel] = [] if self._resource.is_deleted: resources_to_delete.append(self._resource.id) elif self._resource.is_modified: modification_tracker = self._component_context.modification_trackers.get( self._resource.id ) assert modification_tracker is not None, ( f"Resource {self._resource.id.hex()} was " f"marked as modified but is missing a tracker!" ) patches_to_apply.extend(modification_tracker.data_patches) resources_to_update.append(self._resource) modification_tracker.data_patches.clear() return resources_to_delete, patches_to_apply, resources_to_update async def _fetch(self, resource: MutableResourceModel): """ Update the local model with the latest version from the resource service. This will fail if this resource has been modified. :raises InvalidStateError: If the local resource model has been modified :raises NotFoundError: If the resource service does not have a model for this resource's ID """ if resource.is_modified and not resource.is_deleted: raise InvalidStateError( f"Cannot fetch dirty resource {resource.id.hex()} (resource " f"{self.get_id().hex()} attempted fetch)" ) try: fetched_resource = await self._resource_service.get_by_id(resource.id) except NotFoundError: if ( resource.id in self._component_context.modification_trackers and resource.id in self._resource_context.resource_models ): del self._resource_context.resource_models[resource.id] return resource.reset(fetched_resource) async def _fetch_resources(self, resource_ids: Iterable[bytes]): tasks = [] for resource_id in resource_ids: context_resource = self._resource_context.resource_models.get(resource_id) if context_resource is not None: tasks.append(self._fetch(context_resource)) await asyncio.gather(*tasks) async def _update_views(self, modified: Set[bytes], deleted: Set[bytes]): for resource_id in modified: views_in_context = self._resource_view_context.views_by_resource[resource_id] for view in views_in_context.values(): if resource_id not in self._resource_context.resource_models: await self._fetch(view.resource.get_model()) # type: ignore if resource_id not in self._resource_context.resource_models: view.set_deleted() continue updated_model = self._resource_context.resource_models[resource_id] fresh_view = view.create(updated_model) for field in dataclasses.fields(fresh_view): if field.name == "_resource": continue setattr(view, field.name, getattr(fresh_view, field.name)) for resource_id in deleted: views_in_context = self._resource_view_context.views_by_resource[resource_id] for view in views_in_context.values(): view.set_deleted() async def run( self, component_type: Type[ComponentInterface[CC]], config: CC = None, ) -> ComponentRunResult: """ Run a single component. Runs even if the component has already been run on this resource. :param component_type: The component type (may be an interface) to get and run :param config: Optional config to pass to the component :return: A ComponentRunResult containing information on resources affected by the component """ job_context = self._job_context component_result = await self._job_service.run_component( JobComponentRequest( self._job_id, self._resource.id, component_type.get_id(), config, ), job_context, ) for deleted_id in component_result.resources_deleted: if deleted_id in self._component_context.modification_trackers: del self._component_context.modification_trackers[deleted_id] await self._fetch_resources(component_result.resources_modified) await self._update_views( component_result.resources_modified, component_result.resources_deleted ) return component_result async def auto_run( self, components: Iterable[Type[ComponentInterface]] = tuple(), blacklisted_components: Iterable[Type[ComponentInterface]] = tuple(), all_unpackers: bool = False, all_identifiers: bool = False, all_analyzers: bool = False, all_packers: bool = False, ) -> ComponentRunResult: """ Automatically run multiple components which may run on this resource. From an initial set of possible components to run, this set is searched for components for which the intersection of the component's targets and this resource's tags is not empty. Accepts several optional flags to expand or restrict the initial set of components. :param components: Components to explicitly add to the initial set of components :param blacklisted_components: Components to explicitly remove to the initial set of components :param all_unpackers: If true, all Unpackers are added to the initial set of components :param all_identifiers: If true, all Identifiers are added to the initial set of components :param all_analyzers: If true, all Analyzers are added to the initial set of components :return: A ComponentRunResult containing information on resources affected by the component """ components_result = await self._job_service.run_components( JobMultiComponentRequest( self._job_id, self._resource.id, components_allowed=tuple(c.get_id() for c in components), components_disallowed=tuple(c.get_id() for c in blacklisted_components), all_unpackers=all_unpackers, all_identifiers=all_identifiers, all_analyzers=all_analyzers, all_packers=all_packers, ) ) for deleted_id in components_result.resources_deleted: if deleted_id in self._component_context.modification_trackers: del self._component_context.modification_trackers[deleted_id] await self._fetch_resources(components_result.resources_modified) await self._update_views( components_result.resources_modified, components_result.resources_deleted ) return components_result async def unpack(self) -> ComponentRunResult: """ Unpack the resource. :return: A ComponentRunResult containing information on resources affected by the component """ return await self.auto_run(all_identifiers=True, all_unpackers=True) async def analyze(self, resource_attributes: Type[RA]) -> RA: """ Analyze the resource for a specific resource attribute. :param Type[RA] resource_attributes: :return: """ attributes = self._check_attributes(resource_attributes) if attributes is None: await self._analyze_attributes((resource_attributes,)) return self.get_attributes(resource_attributes) else: return attributes async def identify(self) -> ComponentRunResult: """ Run all registered identifiers on the resource, tagging it with matching resource tags. """ return await self.auto_run(all_identifiers=True) async def pack(self) -> ComponentRunResult: """ Pack the resource. :return: A ComponentRunResult containing information on resources affected by the component """ return await self.auto_run(all_packers=True) async def auto_run_recursively( self, components: Iterable[Type[ComponentInterface]] = tuple(), blacklisted_components: Iterable[Type[ComponentInterface]] = tuple(), blacklisted_tags: Iterable[ResourceTag] = tuple(), all_unpackers: bool = False, all_identifiers: bool = False, all_analyzers: bool = False, ) -> ComponentRunResult: """ Automatically run multiple components which may run on this resource or its descendents. From an initial set of possible components to run, this set is searched for components for which the intersection of the component's targets and this resource's tags is not empty. Accepts several optional flags to expand or restrict the initial set of components. After each run, compatible components from the initial set are run on any resources which have had tags added (including newly created resources). This is repeated until no new tags are added. :param components: Components to explicitly add to the initial set of components :param blacklisted_components: Components to explicitly remove to the initial set of components :param all_unpackers: If true, all Unpackers are added to the initial set of components :param all_identifiers: If true, all Identifiers are added to the initial set of components :param all_analyzers: If true, all Analyzers are added to the initial set of components :return: A ComponentRunResult containing information on resources affected by the component """ components_result = await self._job_service.run_components_recursively( JobMultiComponentRequest( self._job_id, self._resource.id, components_allowed=tuple(c.get_id() for c in components), components_disallowed=tuple(c.get_id() for c in blacklisted_components), all_unpackers=all_unpackers, all_identifiers=all_identifiers, all_analyzers=all_analyzers, tags_ignored=tuple(blacklisted_tags), ) ) await self._fetch_resources(components_result.resources_modified) await self._update_views( components_result.resources_modified, components_result.resources_deleted ) return components_result async def unpack_recursively( self, blacklisted_components: Iterable[Type[ComponentInterface]] = tuple(), do_not_unpack: Iterable[ResourceTag] = tuple(), ) -> ComponentRunResult: """ Automatically unpack this resource and recursively unpack all of its descendants. First this resource is unpacked; then, any resource which "valid" tags were added to will also be unpacked. New resources created with tags count as resources with new tags. A "valid" tag is a tag which is not explicitly ignored via the ``do_not_unpack`` argument. The unpacking will only stop when no new "valid" tags have been added in the previous iteration. This can lead to a very long unpacking process if it is totally unconstrained. :param blacklisted_components: Components which are blocked from running during the recursive unpacking, on this resource or any descendants. :param do_not_unpack: Do not unpack resources with this tag, and ignore these tags when checking if any new tags have been added in this iteration. :return: A ComponentRunResult containing information on resources affected by the component """ return await self.auto_run_recursively( all_identifiers=True, all_unpackers=True, blacklisted_components=blacklisted_components, blacklisted_tags=do_not_unpack, ) async def analyze_recursively(self) -> ComponentRunResult: return await self.auto_run_recursively(all_analyzers=True) async def pack_recursively(self) -> ComponentRunResult: """ Recursively pack the resource, starting with its descendants. """ return await self._job_service.pack_recursively(self._job_id, self._resource.id) async def write_to(self, destination: BinaryIO, pack: bool = True): """ Recursively repack resource and write data out to an arbitrary ``BinaryIO`` destination. :param destination: Destination for packed resource data :return: """ if pack is True: await self.pack_recursively() destination.write(await self.get_data()) async def _analyze_attributes(self, attribute_types: Tuple[Type[ResourceAttributes], ...]): job_context = self._job_context components_result = await self._job_service.run_analyzer_by_attribute( JobAnalyzerRequest( self._job_id, self._resource.id, attribute_types, tuple(self._resource.tags), ), job_context, ) # Update all the resources in the local context that were modified as part of the # analysis await self._fetch_resources(components_result.resources_modified) await self._update_views( components_result.resources_modified, components_result.resources_deleted ) return components_result async def _create_resource(self, resource_model: ResourceModel) -> "Resource": return await self._resource_factory.create( self._job_id, resource_model.id, self._resource_context, self._resource_view_context, self._component_context, self._job_context, ) async def _create_resources( self, resource_models: Iterable[ResourceModel] ) -> Iterable["Resource"]: return await self._resource_factory.create_many( self._job_id, [resource_model.id for resource_model in resource_models], self._resource_context, self._resource_view_context, self._component_context, self._job_context, ) async def create_child( self, tags: Iterable[ResourceTag] = None, attributes: Iterable[ResourceAttributes] = None, data: Optional[bytes] = None, data_range: Optional[Range] = None, ) -> "Resource": """ Create a new resource as a child of this resource. This method entirely defines the child's tags and attributes. This method also defines the child's data semantics: A child resource can either be defined in one of three ways: 1) The resource contains no data ("Dataless" resource). Not used in practice. 2) As mapping a range of its parent's data ("Mapped" resource). For example, an instruction maps a portion of its parent basic block. 3) Defining its own new, independent data ("Unmapped" resource). For example, a file extracted from a zip archive is a child of the zip archive resource, but its data does not map to some specific range of that parent archive. By default a resource will be defined the third way (unmapped). To specify that the resource is a mapped resource, include the optional ``data_range`` parameter set to the range of the parent's data which the child maps. That is, `data_range=Range(0, 10)` creates a resource which maps the first 10 bytes of the parent. The optional ``data`` param defines whether to populate the new child's data. It can be used only if the data is unmapped. If the child is unmapped, the value of ``data`` still becomes that child's data, but the parent's data is unaffected. If ``data`` and ``data_range`` are both `None` (default), the new child is a dataless resource. The following table sums up the possible interactions between ``data`` and ``data_range``: | | ``data_range`` param not `None` | ``data_range`` param `None` | |--------------------------|--------------------------------------------------------|----------------------------------------------| | ``data`` param not `None` | Not allowed | Child unmapped, child's data set to ``data`` | | ``data`` param `None` | Child mapped, parent's data untouched | Child is dataless | :param tags: [tags][ofrak.model.tag_model.ResourceTag] to add to the new child :param attributes: [attributes][ofrak.model.resource_model.ResourceAttributes] to add to the new child :param data: The binary data for the new child. If `None` and ``data_range`` is `None`, the resource has no data. Defaults to `None`. :param data_range: The range of the parent's data which the new child maps. If `None` ( default), the child will not map the parent's data. :return: """ if data is not None and data_range is not None: raise ValueError( "Cannot create a child from both data and data_range. These parameters are " "mutually exclusive." ) resource_id = self._id_service.generate_id() if data_range is not None: if self._resource.data_id is None: raise ValueError( "Cannot create a child with mapped data from a parent that doesn't have data" ) data_model_id = resource_id await self._data_service.create_mapped( data_model_id, self._resource.data_id, data_range, ) data_attrs = Data(data_range.start, data_range.length()) attributes = [data_attrs, *attributes] if attributes else [data_attrs] elif data is not None: if self._resource.data_id is None: raise ValueError( "Cannot create a child with data from a parent that doesn't have data" ) data_model_id = resource_id await self._data_service.create_root(data_model_id, data) data_attrs = Data(0, len(data)) attributes = [data_attrs, *attributes] if attributes else [data_attrs] else: data_model_id = None resource_model = ResourceModel.create( resource_id, data_model_id, self._resource.id, tags, attributes, self._component_context.component_id, self._component_context.component_version, ) await self._resource_service.create(resource_model) if self._job_context: resource_tracker = self._job_context.trackers[resource_model.id] resource_tracker.tags_added.update(resource_model.tags) self._component_context.mark_resource_modified(resource_id) self._component_context.resources_created.add(resource_model.id) created_resource = await self._create_resource(resource_model) return created_resource async def create_child_from_view( self, view: RV, data_range: Optional[Range] = None, data: Optional[bytes] = None, additional_tags: Iterable[ResourceTag] = (), additional_attributes: Iterable[ResourceAttributes] = (), ) -> "Resource": """ Create a new resource as a child of this resource. The new resource will have tags and attributes as defined by the [view][ofrak.model.viewable_tag_model.ViewableResourceTag]; in this way a view can act as a template to create a new resource. The ``additional_tags`` and ``additional_attributes`` can also be used to add more tags and attributes beyond what the view contains. This method's ``data`` and ``data_range`` parameters have the same semantics as in `create_child`, in short: | | ``data_range`` param not `None` | ``data_range`` param `None` | |--------------------------|--------------------------------------------------------|----------------------------------------------| | ``data`` param not `None` | Child mapped, ``data`` patched into child (and parent) | Child unmapped, child's data set to ``data`` | | ``data`` param `None` | Child mapped, parent's data untouched | Child is dataless | See `create_child` documentation for details. :param view: A [resource view][ofrak.resource_view] to pull [tags][ofrak.model.tag_model.ResourceTag] and [attributes][ofrak.model.resource_model.ResourceAttributes] from to populate the new child :param data_range: The range of the parent's data which the new child maps. If `None` ( default), the child will not map the parent's data. :param data: The binary data for the new child. If `None` and ``data_range`` is `None`, the resource has no data. Defaults to `None`. :param additional_tags: Any [tags][ofrak.model.tag_model.ResourceTag] for the child in addition to those from the ``view`` :param additional_attributes: Any [attributes][ofrak.model.resource_model.ResourceAttributes] for the child in addition to those from the ``view`` :return: """ viewable_tag: ViewableResourceTag = type(view) new_resource = await self.create_child( tags=(viewable_tag, *additional_tags), attributes=(*view.get_attributes_instances().values(), *additional_attributes), data_range=data_range, data=data, ) return new_resource def _view_as(self, viewable_tag: Type[RV]) -> Union[RV, Awaitable[RV]]: """ Try to get a view without calling any analysis, to avoid as many unnecessary `asyncio.gather` calls as possible. Checks cached views first for view, and if not found, then checks if the attributes needed to create the view are already present and up-to-date, and only if both of those are not found does it return an awaitable. """ if self._resource_view_context.has_view(self.get_id(), viewable_tag): # First early return: View already exists in cache return self._resource_view_context.get_view(self.get_id(), viewable_tag) if not issubclass(viewable_tag, ResourceViewInterface): raise ValueError( f"Cannot get view for resource {self.get_id().hex()} of a type " f"{viewable_tag.__name__} because it is not a subclass of ResourceView" ) if not self.has_tag(viewable_tag): raise ValueError( f"Cannot get resource {self.get_id().hex()} as view " f"{viewable_tag.__name__} because the resource is not tagged as a " f"{viewable_tag.__name__}" ) composed_attrs_types = viewable_tag.composed_attributes_types existing_attributes = [self._check_attributes(attrs_t) for attrs_t in composed_attrs_types] if all(existing_attributes): # Second early return: All attributes needed for view are present and up-to-date view = viewable_tag.create(self.get_model()) view.resource = self # type: ignore self._resource_view_context.add_view(self.get_id(), view) return cast(RV, view) # Only if analysis is absolutely necessary is an awaitable created and returned async def finish_view_creation( attrs_to_analyze: Tuple[Type[ResourceAttributes], ...] ) -> RV: await self._analyze_attributes(attrs_to_analyze) view = viewable_tag.create(self.get_model()) view.resource = self # type: ignore self._resource_view_context.add_view(self.get_id(), view) return cast(RV, view) return finish_view_creation( tuple( attrs_t for attrs_t, existing in zip(composed_attrs_types, existing_attributes) if not existing ) ) async def view_as(self, viewable_tag: Type[RV]) -> RV: """ Provides a specific type of view instance for this resource. The returned instance is an object which has some of the information from this same resource, however in a simpler interface. This resource instance will itself remain available through the view's ``.resource`` property. :param viewable_tag: A ViewableResourceTag, which this resource's model must already contain :raises ValueError: If the model does not contain this tag, or this tag is not a ViewableResourceTag :return: """ view_or_create_view_task: Union[RV, Awaitable[RV]] = self._view_as(viewable_tag) if isawaitable(view_or_create_view_task): return await view_or_create_view_task else: return cast(RV, view_or_create_view_task) def add_view(self, view: ResourceViewInterface): """ Add all the attributes composed in a view to this resource, and tag this resource with the view type. Calling this is the equivalent of making N ``add_attributes`` calls and one ``add_tag`` call (where N is the number of attributes the view is composed of). :param view: An instance of a view """ for attributes in view.get_attributes_instances().values(): # type: ignore self.add_attributes(attributes) self.add_tag(type(view)) def _set_modified(self): self._component_context.mark_resource_modified(self._resource.id) def _add_tag(self, tag: ResourceTag): """ Associate a tag with the resource. If the resource already have the provided tag, it has no effects. All parent classes of the provided tag that are tags themselves are also added. """ if self._resource.has_tag(tag, False): return self._component_context.mark_resource_modified(self._resource.id) new_tags = self._resource.add_tag(tag) if self._job_context: resource_tracker = self._job_context.trackers[self._resource.id] resource_tracker.tags_added.update(new_tags) def add_tag(self, *tags: ResourceTag): """ Associate multiple tags with the resource. If the resource already have one of the provided tag, the tag is not added. All parent classes of the provided tag that are tags themselves are also added. """ for tag in tags: self._add_tag(tag) def get_tags(self, inherit: bool = True) -> Iterable[ResourceTag]: """ Get a set of tags associated with the resource. """ return self._resource.get_tags(inherit) def has_tag(self, tag: ResourceTag, inherit: bool = True) -> bool: """ Determine if the resource is associated with the provided tag. """ return self._resource.has_tag(tag, inherit) def remove_tag(self, tag: ResourceTag): if not self._resource.has_tag(tag): return self._set_modified() self._resource.remove_tag(tag) def get_most_specific_tags(self) -> Iterable[ResourceTag]: """ Get all tags associated with the resource from which no other tags on that resource inherit. In other words, get the resource's tags that aren't subclassed by other tags on the resource. For example, for a resource tagged as `Elf`, the result would be just `[Elf]` instead of `[Elf, Program, GenericBinary]` that `Resource.get_tags` returns. This is because `Elf` inherits from `Program`, which inherits from `GenericBinary`. Even though the resource has all of those tags, the most derived class with no other derivatives is the "most specific." """ return self._resource.get_most_specific_tags() def _check_attributes(self, attributes_type: Type[RA]) -> Optional[RA]: """ Try to get the current attributes. TODO: Should we be using the version as well? The client wouldn't know the version of the component in a client-server environment. We could do that efficiently by adding a service method that list all available components (and their version) :param attributes_type: The type of attributes to check this resource for. :return: The requested attributes if they are present and up-to-date, otherwise return None. """ attributes = self._resource.get_attributes(attributes_type) if attributes is not None: # Make sure that the attributes have not been invalidated component_id = self._resource.get_component_id_by_attributes(type(attributes)) if component_id is not None: return attributes return None def _add_attributes(self, attributes: ResourceAttributes): existing_attributes = self._resource.get_attributes(type(attributes)) if existing_attributes is not None and existing_attributes == attributes: return self._set_modified() self._resource.add_attributes(attributes) component_context = self._component_context self._resource.add_component_for_attributes( component_context.component_id, component_context.component_version, type(attributes) ) def add_attributes(self, *attributes: ResourceAttributes): """ Add the provided attributes to the resource. If the resource already have the provided attributes classes, they are replaced with the provided one. """ for attrs in attributes: self._add_attributes(attrs) def has_attributes(self, attributes_type: Type[ResourceAttributes]) -> bool: """ Check if this resource has a value for the given attributes type. :param attributes_type: :return: """ return self._resource.has_attributes(attributes_type) def get_attributes(self, attributes_type: Type[RA]) -> RA: """ If this resource has attributes matching the given type, return the value of those attributes. Otherwise returns `None`. :param attributes_type: :return: """ attributes = self._resource.get_attributes(attributes_type) if attributes is None: raise NotFoundError( f"Cannot find attributes {attributes_type} for resource {self.get_id().hex()}" ) self._component_context.access_trackers[self._resource.id].attributes_accessed.add( attributes_type ) return attributes def remove_attributes(self, attributes_type: Type[ResourceAttributes]): """ Remove the value of a given attributes type from this resource, if there is such a value. If the resource does not have a value for the given attributes type, do nothing. :param attributes_type: :return: """ if not self._resource.has_attributes(attributes_type): return self._set_modified() self._resource.remove_attributes(attributes_type) def add_component( self, component_id: bytes, version: int, ): """ Mark that a component has run on this resource :param component_id: ID of the component which ran :param version: Version of the component which ran :return: """ self._set_modified() self._resource.add_component(component_id, version) def add_component_for_attributes( self, component_id: bytes, version: int, attributes: Type[ResourceAttributes], ): """ Mark that a component was responsible for adding some attributes to this resource. :param component_id: ID of the component which added the attributes :param version: version of the component which added the attributes :param attributes: The type of attributes which were added :return: """ self._set_modified() self._resource.add_component_for_attributes(component_id, version, attributes) def remove_component( self, component_id: bytes, attributes: Optional[Type[ResourceAttributes]] = None, ): """ Remove any information that this component ran on this resource and/or added a particular type of attributes to this resource :param component_id: ID of the component to remove information about :param attributes: The type of attributes to remove information about :return: """ self._set_modified() self._resource.remove_component(component_id, attributes) def has_component_run(self, component_id: bytes, desired_version: Optional[int] = None) -> bool: """ Check if a particular component has run on this resource :param component_id: ID of the component to check for :param desired_version: If this is not `None`, also check that a specific version of ``component`` ran. Defaults to ``None``. :return: `True` if a component matching ``component_id`` and ``desired_version`` ran on this resource, `False` otherwise. If ``desired_version`` is `None`, only ``component_id`` must be matched to return `True`. """ version = self._resource.get_component_version(component_id) if version is None: return False if desired_version is None: return True return version == desired_version def queue_patch( self, patch_range: Range, data: bytes, ): """ Replace the data within the provided range with the provided data. This operation may shrink, expand or leave untouched the resource's data. Patches are queued up to be applied, and will only be applied to the resource's data after the component this was called from exits. :param patch_range: The range of binary data in this resource to replace :param data: The bytes to replace part of this resource's data with :return: """ if not self._component_context: raise InvalidStateError( f"Cannot patch resource {self._resource.id.hex()} without a context" ) if self._resource.data_id is None: raise ValueError("Cannot patch a resource with no data") self._component_context.modification_trackers[self._resource.id].data_patches.append( DataPatch( patch_range, self._resource.data_id, data, ) ) self._resource.is_modified = True async def get_parent_as_view(self, v_type: Type[RV]) -> RV: """ Get the parent of this resource. The parent will be returned as an instance of the given [viewable tag][ofrak.model.viewable_tag_model.ViewableResourceTag]. :param v_type: The type of [view][ofrak.resource] to get the parent as """ parent_r = await self.get_parent() return await parent_r.view_as(v_type) async def get_parent(self) -> "Resource": """ Get the parent of this resource. """ models = list( await self._resource_service.get_ancestors_by_id(self._resource.id, max_count=1) ) if len(models) != 1: raise NotFoundError(f"There is no parent for resource {self._resource.id.hex()}") return await self._create_resource(models[0]) async def get_ancestors( self, r_filter: ResourceFilter = None, ) -> Iterable["Resource"]: """ Get all the ancestors of this resource. May optionally filter the ancestors so only those matching certain parameters are returned. :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If a filter was provided and no resources match the provided filter """ models = await self._resource_service.get_ancestors_by_id( self._resource.id, r_filter=r_filter ) return await self._create_resources(models) async def get_only_ancestor_as_view( self, v_type: Type[RV], r_filter: ResourceFilter, ) -> RV: """ Get the only ancestor of this resource which matches the given filter. The ancestor will be returned as an instance of the given [viewable tag][ofrak.model.viewable_tag_model.ViewableResourceTag]. :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If more or fewer than one ancestor matches ``r_filter`` """ ancestor_r = await self.get_only_ancestor(r_filter) return await ancestor_r.view_as(v_type) async def get_only_ancestor(self, r_filter: ResourceFilter) -> "Resource": """ Get the only ancestor of this resource which matches the given filter. :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: """ ancestors = list( await self._resource_service.get_ancestors_by_id(self._resource.id, 1, r_filter) ) if len(ancestors) == 0: raise NotFoundError( f"There is no ancestor for resource {self._resource.id.hex()} matching the " f"provided filter" ) return await self._create_resource(ancestors[0]) async def get_descendants_as_view( self, v_type: Type[RV], max_depth: int = -1, r_filter: ResourceFilter = None, r_sort: ResourceSort = None, ) -> Iterable[RV]: """ Get all the descendants of this resource. May optionally filter the descendants so only those matching certain parameters are returned. May optionally sort the descendants by an indexable attribute value key. The descendants will be returned as an instance of the given [viewable tag][ofrak.model.viewable_tag_model.ViewableResourceTag]. :param v_type: The type of [view][ofrak.resource] to get the descendants as :param max_depth: Maximum depth from this resource to search for descendants; if -1, no maximum depth :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :param r_sort: Specifies which indexable attribute to use as the key to sort and the direction to sort :return: :raises NotFoundError: If a filter was provided and no resources match the provided filter """ descendants = await self.get_descendants(max_depth, r_filter, r_sort) views_or_tasks = [r._view_as(v_type) for r in descendants] # analysis tasks to generate views of resources which don't have attrs for the view already view_tasks: List[Awaitable[RV]] = [] # each resources' already-existing views OR the index in `view_tasks` of the analysis task views_or_task_indexes: List[Union[int, RV]] = [] for view_or_create_view_task in views_or_tasks: if isawaitable(view_or_create_view_task): views_or_task_indexes.append(len(view_tasks)) view_tasks.append(view_or_create_view_task) else: views_or_task_indexes.append(cast(RV, view_or_create_view_task)) if view_tasks: completed_views: Sequence[RV] = await asyncio.gather(*view_tasks) return [ completed_views[v_or_i] if type(v_or_i) is int else cast(RV, v_or_i) for v_or_i in views_or_task_indexes ] else: # There are no tasks, so all needed views are already present return cast(List[RV], views_or_task_indexes) async def get_descendants( self, max_depth: int = -1, r_filter: ResourceFilter = None, r_sort: ResourceSort = None, ) -> Iterable["Resource"]: """ Get all the descendants of this resource. May optionally filter the descendants so only those matching certain parameters are returned. May optionally sort the descendants by an indexable attribute value key. :param max_depth: Maximum depth from this resource to search for descendants; if -1, no maximum depth :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :param r_sort: Specifies which indexable attribute to use as the key to sort and the direction to sort :return: :raises NotFoundError: If a filter was provided and no resources match the provided filter """ models = await self._resource_service.get_descendants_by_id( self._resource.id, max_depth=max_depth, r_filter=r_filter, r_sort=r_sort ) return await self._create_resources(models) async def get_only_descendant_as_view( self, v_type: Type[RV], max_depth: int = -1, r_filter: ResourceFilter = None, ) -> RV: """ If a filter is provided, get the only descendant of this resource which matches the given filter. If a filter is not provided, gets the only descendant of this resource. The descendant will be returned as an instance of the given [viewable tag][ofrak.model.viewable_tag_model.ViewableResourceTag]. :param v_type: The type of [view][ofrak.resource] to get the descendant as :param max_depth: Maximum depth from this resource to search for descendants; if -1, no maximum depth :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If a filter is provided and more or fewer than one descendant matches ``r_filter`` :raises NotFoundError: If a filter is not provided and this resource has multiple descendant """ descendant_r = await self.get_only_descendant(max_depth, r_filter) return await descendant_r.view_as(v_type) async def get_only_descendant( self, max_depth: int = -1, r_filter: ResourceFilter = None, ) -> "Resource": """ If a filter is provided, get the only descendant of this resource which matches the given filter. If a filter is not provided, gets the only descendant of this resource. :param max_depth: Maximum depth from this resource to search for descendants; if -1, no maximum depth :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If a filter is provided and more or fewer than one descendant matches ``r_filter`` :raises NotFoundError: If a filter is not provided and this resource has multiple descendant """ models = list( await self._resource_service.get_descendants_by_id( self._resource.id, max_depth=max_depth, max_count=2, r_filter=r_filter, ) ) if len(models) == 0: raise NotFoundError( f"There is no descendant for resource {self._resource.id.hex()} matching " f"the provided filter {r_filter}" ) if len(models) > 1: # TODO: Not the right kind of error raise NotFoundError( f"There are multiple descendants for resource {self._resource.id.hex()} " f"matching the provided filter" ) return await self._create_resource(models[0]) async def get_only_sibling_as_view( self, v_type: Type[RV], r_filter: ResourceFilter = None, ) -> RV: """ If a filter is provided, get the only sibling of this resource which matches the given filter. If a filter is not provided, gets the only sibling of this resource. The sibling will be returned as an instance of the given [viewable tag][ofrak.model.viewable_tag_model.ViewableResourceTag]. :param v_type: The type of [view][ofrak.resource] to get the sibling as :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If a filter is provided and more or fewer than one sibling matches ``r_filter`` :raises NotFoundError: If a filter is not provided and this resource has multiple siblings """ sibling_r = await self.get_only_sibling(r_filter) return await sibling_r.view_as(v_type) async def get_only_sibling(self, r_filter: ResourceFilter = None) -> "Resource": """ If a filter is provided, get the only sibling of this resource which matches the given filter. If a filter is not provided, gets the only sibling of this resource. :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If a filter is provided and more or fewer than one sibling matches ``r_filter`` :raises NotFoundError: If a filter is not provided and this resource has multiple siblings """ models = list( await self._resource_service.get_siblings_by_id( self._resource.id, max_count=2, r_filter=r_filter, ) ) if len(models) == 0: raise NotFoundError( f"There is no sibling for resource {self._resource.id.hex()} matching " f"the provided filter" ) if len(models) > 1: raise NotFoundError( f"There are multiple siblings for resource {self._resource.id.hex()} " f"matching the provided filter" ) return await self._create_resource(models[0]) async def get_children( self, r_filter: ResourceFilter = None, r_sort: ResourceSort = None, ) -> Iterable["Resource"]: """ Get all the children of this resource. May optionally sort the children by an indexable attribute value key. May optionally filter the children so only those matching certain parameters are returned. :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :param r_sort: Specifies which indexable attribute to use as the key to sort and the direction to sort :return: :raises NotFoundError: If a filter was provided and no resources match the provided filter """ return await self.get_descendants(1, r_filter, r_sort) async def get_children_as_view( self, v_type: Type[RV], r_filter: ResourceFilter = None, r_sort: ResourceSort = None, ) -> Iterable[RV]: """ Get all the children of this resource. May optionally filter the children so only those matching certain parameters are returned. May optionally sort the children by an indexable attribute value key. The children will be returned as an instance of the given [viewable tag][ofrak.model.viewable_tag_model.ViewableResourceTag]. :param v_type: The type of [view][ofrak.resource] to get the children as :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :param r_sort: Specifies which indexable attribute to use as the key to sort and the direction to sort :return: :raises NotFoundError: If a filter was provided and no resources match the provided filter """ return await self.get_descendants_as_view(v_type, 1, r_filter, r_sort) async def get_only_child(self, r_filter: ResourceFilter = None) -> "Resource": """ If a filter is provided, get the only child of this resource which matches the given filter. If a filter is not provided, gets the only child of this resource. :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If a filter is provided and more or fewer than one child matches ``r_filter`` :raises NotFoundError: If a filter is not provided and this resource has multiple children """ return await self.get_only_descendant(1, r_filter) async def get_only_child_as_view(self, v_type: Type[RV], r_filter: ResourceFilter = None) -> RV: """ If a filter is provided, get the only child of this resource which matches the given filter. If a filter is not provided, gets the only child of this resource. The child will be returned as an instance of the given [viewable tag][ofrak.model.viewable_tag_model.ViewableResourceTag]. :param v_type: The type of [view][ofrak.resource] to get the child as :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If a filter is provided and more or fewer than one child matches ``r_filter`` :raises NotFoundError: If a filter is not provided and this resource has multiple children """ return await self.get_only_descendant_as_view(v_type, 1, r_filter) async def delete(self): """ Delete this resource and all of its descendants. :return: """ self._component_context.resources_deleted.add(self._resource.id) for child_r in await self.get_children(): await child_r.delete() self._resource.is_modified = True self._resource.is_deleted = True async def flush_data_to_disk(self, path: str, pack: bool = True): """ Recursively repack the resource and write its data out to a file on disk. If this is a dataless resource, creates an empty file. :param path: Path to the file to write out to. The file is created if it does not exist. """ if pack is True: await self.pack_recursively() data = await self.get_data() if data is not None: with open(path, "wb") as f: f.write(data) else: # Create empty file with open(path, "wb") as f: pass def __repr__(self): properties = [ f"resource_id={self._resource.id.hex()}", f"tag=[{','.join([tag.__name__ for tag in self._resource.tags])}]", ] if self._resource.data_id: properties.append(f"data={self._resource.data_id.hex()}") return f"{type(self).__name__}(" + ", ".join(properties) + f")" async def summarize(self) -> str: """ Create a string summary of this resource, including specific tags, attribute types, and the data offsets of this resource in the parent and root (if applicable). Not that this is not a complete string representation of the resource: not all tags are included, and only the types of attributes are included, not their values. It is a summary which gives a high level overview of the resource. """ return await _default_summarize_resource(self) async def summarize_tree( self, r_filter: ResourceFilter = None, r_sort: ResourceSort = None, indent: str = "", summarize_resource_callback: Optional[Callable[["Resource"], Awaitable[str]]] = None, ) -> str: """ Create a string summary of this resource and its (optionally filtered and/or sorted) descendants. The summaries of each resource are the same as the result of [summarize][ofrak.resource.Resource.summarize], organized into a tree structure. If a filter parameter is provided, it is applied recursively: the children of this resource will be filtered, then only those children matching the filter be displayed, and then the same filter will be applied to their children, etc. For example, :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :param r_sort: Specifies which indexable attribute to use as the key to sort and the direction to sort """ SPACER_BLANK = " " SPACER_LINE = "───" if summarize_resource_callback is None: summarize_resource_callback = _default_summarize_resource children = cast( List[Resource], list(await self.get_children(r_filter=r_filter, r_sort=r_sort)) ) if children: if indent == "": tree_string = "┌" else: tree_string = "┬" else: tree_string = "─" tree_string += f"{await summarize_resource_callback(self)}\n" # All children but the last should display as a "fork" in the drop-down tree # After the last child, a vertical line should not be drawn as part of the indent # Both of those needs are handled here child_formatting: List[Tuple[str, str]] = [ ("├", indent + "│" + SPACER_BLANK) for _ in children[:-1] ] child_formatting.append(("└", indent + " " + SPACER_BLANK)) for child, (branch_symbol, child_indent) in zip(children, child_formatting): child_tree_string = await child.summarize_tree( r_filter=r_filter, r_sort=r_sort, indent=child_indent, summarize_resource_callback=summarize_resource_callback, ) tree_string += f"{indent}{branch_symbol}{SPACER_LINE}{child_tree_string}" return tree_string async def _default_summarize_resource(resource: Resource) -> str: attributes_info = ", ".join(attrs_type.__name__ for attrs_type in resource._resource.attributes) if resource._resource.data_id: root_data_range = await resource.get_data_range_within_root() parent_data_range = await resource.get_data_range_within_parent() data = await resource.get_data() if len(data) <= 128: # Convert bytes to string to check .isprintable without doing .decode. Note that # not all ASCII is printable, so we have to check both decodable and printable raw_data_str = "".join(map(chr, data)) if raw_data_str.isascii() and raw_data_str.isprintable(): data_string = f'data_ascii="{data.decode("ascii")}"' else: data_string = f"data_hex={data.hex()}" else: sha256 = hashlib.sha256() sha256.update(data) data_string = f"data_hash={sha256.hexdigest()[:8]}" data_info = ( f", global_offset=({hex(root_data_range.start)}-{hex(root_data_range.end)})" f", parent_offset=({hex(parent_data_range.start)}-{hex(parent_data_range.end)})" f", {data_string}" ) else: data_info = "" return ( f"{resource.get_id().hex()}: [caption=({resource.get_caption()}), " f"attributes=({attributes_info}){data_info}]" )
null
15,313
import logging from dataclasses import dataclass from enum import Enum from hashlib import md5 from typing import Any, Callable, Dict, Generator, Iterable, Optional from ofrak.component.packer import Packer, PackerError from ofrak.component.unpacker import Unpacker, UnpackerError from ofrak.core.binary import GenericBinary from ofrak.core.ecc.abstract import EccAlgorithm, EccError from ofrak.model.resource_model import ResourceAttributes from ofrak.resource import Resource from ofrak.service.resource_service_i import ResourceFilter from ofrak.model.component_model import ComponentConfig from ofrak_type.error import NotFoundError from ofrak_type.range import Range class FlashFieldType(Enum): """ `DATA_SIZE` is the packed size of the DATA only (excluding `MAGIC`, `CHECKSUM`, `DELIMITER`, `ECC`, etc) `TOTAL_SIZE` is the size of the entire region (including all `DATA`, `MAGIC`, `CHECKSUM`, `DELIMITER`, `ECC`, etc) `ALIGNMENT` will pad with \x00 bytes by default """ DATA = 0 ECC = 1 ALIGNMENT = 2 MAGIC = 3 DATA_SIZE = 4 ECC_SIZE = 5 CHECKSUM = 6 DELIMITER = 7 TOTAL_SIZE = 8 class FlashAttributes(ResourceAttributes): """ FlashAttributes is for specifying everything about the specific model of flash. The intent is to expand to all common flash configurations. Every block has a format specifier to show where each field is in the block as well as the length. If there is no OOB data, data_block_format may take this form: data_block_format = [FlashField(field_type=FlashFieldType.DATA,size=block_size),] Important Notes: Assumes that the provided list for each block format is ordered. Only define a block_format if they are different from other block formats. - A current workaround is adding `FlashField(FlashFieldType.ALIGNMENT, 0)` Assumes that there are only one of each block format except the data_block_format """ data_block_format: Iterable[FlashField] header_block_format: Optional[Iterable[FlashField]] = None first_data_block_format: Optional[Iterable[FlashField]] = None last_data_block_format: Optional[Iterable[FlashField]] = None tail_block_format: Optional[Iterable[FlashField]] = None ecc_attributes: Optional[FlashEccAttributes] = None checksum_func: Optional[Callable[[Any], Any]] = None def get_block_formats(self) -> Iterable: return filter( None, [ self.header_block_format, self.first_data_block_format, self.data_block_format, self.last_data_block_format, self.tail_block_format, ], ) def get_block_size(self, block_format: Iterable[FlashField]) -> int: if block_format is not None: return sum(field.size for field in block_format) else: return 0 def get_oob_size_in_block(self, block_format: Iterable[FlashField]) -> int: if block_format is None: return 0 data_length = self.get_field_length_in_block( block_format=block_format, field_type=FlashFieldType.DATA ) return self.get_block_size(block_format=block_format) - data_length def get_field_in_block( self, block_format: Optional[Iterable[FlashField]], field_type: FlashFieldType ) -> Optional[FlashField]: if block_format is None: return None for field in block_format: if field.field_type is field_type: return field return None def get_field_range_in_block( self, block_format: Optional[Iterable[FlashField]], field_type: FlashFieldType ) -> Optional[Range]: offset = 0 if block_format is None: return None for field in block_format: if field.field_type is field_type: data_size = field.size return Range(offset, offset + data_size) # Add all data in fields that come before data offset += field.size return None def get_field_length_in_block( self, block_format: Iterable[FlashField], field_type: FlashFieldType ) -> int: if block_format is None: return 0 field_range = self.get_field_range_in_block( block_format=block_format, field_type=field_type ) if field_range is not None: return field_range.length() return 0 def get_field_data_in_block( self, block_format: Optional[Iterable[FlashField]], field_type: FlashFieldType, data: bytes, block_start_offset: int, ) -> Optional[bytes]: field_range = self.get_field_range_in_block( block_format=block_format, field_type=field_type ) if field_range is not None: return data[ block_start_offset + field_range.start : block_start_offset + field_range.end ] return None def get_num_data_blocks(self, data_len: int, includes_oob: bool = False) -> int: data_block_count = 0 data_count = 0 for c in self.get_block_formats(): if c != self.data_block_format: # Skip data block for now if includes_oob: block_data_len = self.get_block_size(c) else: block_data_len = self.get_field_length_in_block(c, FlashFieldType.DATA) if block_data_len is not None: data_count += block_data_len if includes_oob: block_data_len = self.get_block_size(self.data_block_format) else: block_data_len = self.get_field_length_in_block( self.data_block_format, FlashFieldType.DATA ) while data_count < data_len: # The rest of the blocks are data blocks data_count += block_data_len data_block_count += 1 return data_block_count def iterate_through_all_blocks( self, data_len: int, includes_oob: bool ) -> Generator[Iterable[FlashField], None, int]: count = 0 for c in self.get_block_formats(): num_blocks = ( self.get_num_data_blocks(data_len, includes_oob) if c is self.data_block_format else 1 ) for _ in range(0, num_blocks): yield c count += 1 return count def get_total_oob_size(self, data_len: int, includes_oob: bool = False) -> int: total_oob_size = 0 for c in self.get_block_formats(): block_oob_size = self.get_oob_size_in_block(c) if block_oob_size is not None: num_blocks = ( self.get_num_data_blocks(data_len, includes_oob) if c is self.data_block_format else 1 ) total_oob_size += block_oob_size * num_blocks return total_oob_size def get_total_field_size( self, data_len: int, field_type: FlashFieldType, includes_oob: bool = False ) -> int: total_field_size = 0 for c in self.get_block_formats(): block_field_size = self.get_field_length_in_block(c, field_type) if block_field_size is not None: num_blocks = ( self.get_num_data_blocks(data_len, includes_oob) if c is self.data_block_format else 1 ) total_field_size += block_field_size * num_blocks return total_field_size class UnpackerError(RuntimeError): """Base exception raised by unpackers.""" def _get_end_from_magic(attributes: FlashAttributes, start_index: int, data: bytes, data_len: int): search_offset = start_index search_field = FlashFieldType.MAGIC if ( attributes is None or attributes.tail_block_format is None or attributes.ecc_attributes is None ): raise UnpackerError("Tried to find magic without all attributes defined") search_key = attributes.ecc_attributes.ecc_magic if search_key is None: raise UnpackerError( "Tried to find magic in tail without providing attribute in FlashEccAttributes" ) while 0 <= search_offset <= data_len: search_offset = data.find(search_key, search_offset, data_len) if search_offset == -1: break field_range_in_block = attributes.get_field_range_in_block( attributes.tail_block_format, search_field ) if field_range_in_block is not None: tail_start_offset = search_offset - field_range_in_block.start tail_read_magic = attributes.get_field_data_in_block( attributes.tail_block_format, search_field, data, tail_start_offset ) if tail_read_magic == search_key: tail_block_size = attributes.get_block_size(attributes.tail_block_format) tail_end_offset = tail_start_offset + tail_block_size return tail_end_offset search_offset += 1 return data_len
null
15,314
import logging from dataclasses import dataclass from enum import Enum from hashlib import md5 from typing import Any, Callable, Dict, Generator, Iterable, Optional from ofrak.component.packer import Packer, PackerError from ofrak.component.unpacker import Unpacker, UnpackerError from ofrak.core.binary import GenericBinary from ofrak.core.ecc.abstract import EccAlgorithm, EccError from ofrak.model.resource_model import ResourceAttributes from ofrak.resource import Resource from ofrak.service.resource_service_i import ResourceFilter from ofrak.model.component_model import ComponentConfig from ofrak_type.error import NotFoundError from ofrak_type.range import Range class FlashFieldType(Enum): """ `DATA_SIZE` is the packed size of the DATA only (excluding `MAGIC`, `CHECKSUM`, `DELIMITER`, `ECC`, etc) `TOTAL_SIZE` is the size of the entire region (including all `DATA`, `MAGIC`, `CHECKSUM`, `DELIMITER`, `ECC`, etc) `ALIGNMENT` will pad with \x00 bytes by default """ DATA = 0 ECC = 1 ALIGNMENT = 2 MAGIC = 3 DATA_SIZE = 4 ECC_SIZE = 5 CHECKSUM = 6 DELIMITER = 7 TOTAL_SIZE = 8 class FlashAttributes(ResourceAttributes): """ FlashAttributes is for specifying everything about the specific model of flash. The intent is to expand to all common flash configurations. Every block has a format specifier to show where each field is in the block as well as the length. If there is no OOB data, data_block_format may take this form: data_block_format = [FlashField(field_type=FlashFieldType.DATA,size=block_size),] Important Notes: Assumes that the provided list for each block format is ordered. Only define a block_format if they are different from other block formats. - A current workaround is adding `FlashField(FlashFieldType.ALIGNMENT, 0)` Assumes that there are only one of each block format except the data_block_format """ data_block_format: Iterable[FlashField] header_block_format: Optional[Iterable[FlashField]] = None first_data_block_format: Optional[Iterable[FlashField]] = None last_data_block_format: Optional[Iterable[FlashField]] = None tail_block_format: Optional[Iterable[FlashField]] = None ecc_attributes: Optional[FlashEccAttributes] = None checksum_func: Optional[Callable[[Any], Any]] = None def get_block_formats(self) -> Iterable: return filter( None, [ self.header_block_format, self.first_data_block_format, self.data_block_format, self.last_data_block_format, self.tail_block_format, ], ) def get_block_size(self, block_format: Iterable[FlashField]) -> int: if block_format is not None: return sum(field.size for field in block_format) else: return 0 def get_oob_size_in_block(self, block_format: Iterable[FlashField]) -> int: if block_format is None: return 0 data_length = self.get_field_length_in_block( block_format=block_format, field_type=FlashFieldType.DATA ) return self.get_block_size(block_format=block_format) - data_length def get_field_in_block( self, block_format: Optional[Iterable[FlashField]], field_type: FlashFieldType ) -> Optional[FlashField]: if block_format is None: return None for field in block_format: if field.field_type is field_type: return field return None def get_field_range_in_block( self, block_format: Optional[Iterable[FlashField]], field_type: FlashFieldType ) -> Optional[Range]: offset = 0 if block_format is None: return None for field in block_format: if field.field_type is field_type: data_size = field.size return Range(offset, offset + data_size) # Add all data in fields that come before data offset += field.size return None def get_field_length_in_block( self, block_format: Iterable[FlashField], field_type: FlashFieldType ) -> int: if block_format is None: return 0 field_range = self.get_field_range_in_block( block_format=block_format, field_type=field_type ) if field_range is not None: return field_range.length() return 0 def get_field_data_in_block( self, block_format: Optional[Iterable[FlashField]], field_type: FlashFieldType, data: bytes, block_start_offset: int, ) -> Optional[bytes]: field_range = self.get_field_range_in_block( block_format=block_format, field_type=field_type ) if field_range is not None: return data[ block_start_offset + field_range.start : block_start_offset + field_range.end ] return None def get_num_data_blocks(self, data_len: int, includes_oob: bool = False) -> int: data_block_count = 0 data_count = 0 for c in self.get_block_formats(): if c != self.data_block_format: # Skip data block for now if includes_oob: block_data_len = self.get_block_size(c) else: block_data_len = self.get_field_length_in_block(c, FlashFieldType.DATA) if block_data_len is not None: data_count += block_data_len if includes_oob: block_data_len = self.get_block_size(self.data_block_format) else: block_data_len = self.get_field_length_in_block( self.data_block_format, FlashFieldType.DATA ) while data_count < data_len: # The rest of the blocks are data blocks data_count += block_data_len data_block_count += 1 return data_block_count def iterate_through_all_blocks( self, data_len: int, includes_oob: bool ) -> Generator[Iterable[FlashField], None, int]: count = 0 for c in self.get_block_formats(): num_blocks = ( self.get_num_data_blocks(data_len, includes_oob) if c is self.data_block_format else 1 ) for _ in range(0, num_blocks): yield c count += 1 return count def get_total_oob_size(self, data_len: int, includes_oob: bool = False) -> int: total_oob_size = 0 for c in self.get_block_formats(): block_oob_size = self.get_oob_size_in_block(c) if block_oob_size is not None: num_blocks = ( self.get_num_data_blocks(data_len, includes_oob) if c is self.data_block_format else 1 ) total_oob_size += block_oob_size * num_blocks return total_oob_size def get_total_field_size( self, data_len: int, field_type: FlashFieldType, includes_oob: bool = False ) -> int: total_field_size = 0 for c in self.get_block_formats(): block_field_size = self.get_field_length_in_block(c, field_type) if block_field_size is not None: num_blocks = ( self.get_num_data_blocks(data_len, includes_oob) if c is self.data_block_format else 1 ) total_field_size += block_field_size * num_blocks return total_field_size class UnpackerError(RuntimeError): """Base exception raised by unpackers.""" def _get_end_from_size( attributes: FlashAttributes, start_index: int, data: bytes, data_len: int, field_type: FlashFieldType, ): # Only calculates the size if it knows the total size, or data size if (field_type is not FlashFieldType.TOTAL_SIZE) and ( field_type is not FlashFieldType.DATA_SIZE ): return data_len if attributes.tail_block_format is None: raise UnpackerError("Tried to find end of resource without providing tail block format") tail_block_size = attributes.get_block_size(attributes.tail_block_format) cur_offset = start_index total_data_size = 0 read_offset = 0 for c in attributes.iterate_through_all_blocks(data_len - start_index, True): cur_block_size = attributes.get_block_size(c) # Treat every block as the tail, checking if it has the right field cur_block_size_field = attributes.get_field_data_in_block( attributes.tail_block_format, field_type, data, cur_offset ) if cur_block_size_field is not None: read_offset = int.from_bytes(cur_block_size_field, "big") end_rel_offset = (cur_offset - start_index) + tail_block_size if ( attributes.get_field_in_block( attributes.tail_block_format, FlashFieldType.TOTAL_SIZE ) is not None and read_offset == end_rel_offset ) or ( attributes.get_field_in_block( attributes.tail_block_format, FlashFieldType.DATA_SIZE ) is not None and read_offset == total_data_size ): return cur_offset + tail_block_size total_data_size += attributes.get_field_length_in_block(c, FlashFieldType.DATA) cur_offset += cur_block_size return data_len
null
15,315
import logging from dataclasses import dataclass from enum import Enum from hashlib import md5 from typing import Any, Callable, Dict, Generator, Iterable, Optional from ofrak.component.packer import Packer, PackerError from ofrak.component.unpacker import Unpacker, UnpackerError from ofrak.core.binary import GenericBinary from ofrak.core.ecc.abstract import EccAlgorithm, EccError from ofrak.model.resource_model import ResourceAttributes from ofrak.resource import Resource from ofrak.service.resource_service_i import ResourceFilter from ofrak.model.component_model import ComponentConfig from ofrak_type.error import NotFoundError from ofrak_type.range import Range DATA_HASHES: Dict[bytes, bytes] = dict() class FlashFieldType(Enum): """ `DATA_SIZE` is the packed size of the DATA only (excluding `MAGIC`, `CHECKSUM`, `DELIMITER`, `ECC`, etc) `TOTAL_SIZE` is the size of the entire region (including all `DATA`, `MAGIC`, `CHECKSUM`, `DELIMITER`, `ECC`, etc) `ALIGNMENT` will pad with \x00 bytes by default """ DATA = 0 ECC = 1 ALIGNMENT = 2 MAGIC = 3 DATA_SIZE = 4 ECC_SIZE = 5 CHECKSUM = 6 DELIMITER = 7 TOTAL_SIZE = 8 class FlashField: field_type: FlashFieldType size: int class FlashAttributes(ResourceAttributes): """ FlashAttributes is for specifying everything about the specific model of flash. The intent is to expand to all common flash configurations. Every block has a format specifier to show where each field is in the block as well as the length. If there is no OOB data, data_block_format may take this form: data_block_format = [FlashField(field_type=FlashFieldType.DATA,size=block_size),] Important Notes: Assumes that the provided list for each block format is ordered. Only define a block_format if they are different from other block formats. - A current workaround is adding `FlashField(FlashFieldType.ALIGNMENT, 0)` Assumes that there are only one of each block format except the data_block_format """ data_block_format: Iterable[FlashField] header_block_format: Optional[Iterable[FlashField]] = None first_data_block_format: Optional[Iterable[FlashField]] = None last_data_block_format: Optional[Iterable[FlashField]] = None tail_block_format: Optional[Iterable[FlashField]] = None ecc_attributes: Optional[FlashEccAttributes] = None checksum_func: Optional[Callable[[Any], Any]] = None def get_block_formats(self) -> Iterable: return filter( None, [ self.header_block_format, self.first_data_block_format, self.data_block_format, self.last_data_block_format, self.tail_block_format, ], ) def get_block_size(self, block_format: Iterable[FlashField]) -> int: if block_format is not None: return sum(field.size for field in block_format) else: return 0 def get_oob_size_in_block(self, block_format: Iterable[FlashField]) -> int: if block_format is None: return 0 data_length = self.get_field_length_in_block( block_format=block_format, field_type=FlashFieldType.DATA ) return self.get_block_size(block_format=block_format) - data_length def get_field_in_block( self, block_format: Optional[Iterable[FlashField]], field_type: FlashFieldType ) -> Optional[FlashField]: if block_format is None: return None for field in block_format: if field.field_type is field_type: return field return None def get_field_range_in_block( self, block_format: Optional[Iterable[FlashField]], field_type: FlashFieldType ) -> Optional[Range]: offset = 0 if block_format is None: return None for field in block_format: if field.field_type is field_type: data_size = field.size return Range(offset, offset + data_size) # Add all data in fields that come before data offset += field.size return None def get_field_length_in_block( self, block_format: Iterable[FlashField], field_type: FlashFieldType ) -> int: if block_format is None: return 0 field_range = self.get_field_range_in_block( block_format=block_format, field_type=field_type ) if field_range is not None: return field_range.length() return 0 def get_field_data_in_block( self, block_format: Optional[Iterable[FlashField]], field_type: FlashFieldType, data: bytes, block_start_offset: int, ) -> Optional[bytes]: field_range = self.get_field_range_in_block( block_format=block_format, field_type=field_type ) if field_range is not None: return data[ block_start_offset + field_range.start : block_start_offset + field_range.end ] return None def get_num_data_blocks(self, data_len: int, includes_oob: bool = False) -> int: data_block_count = 0 data_count = 0 for c in self.get_block_formats(): if c != self.data_block_format: # Skip data block for now if includes_oob: block_data_len = self.get_block_size(c) else: block_data_len = self.get_field_length_in_block(c, FlashFieldType.DATA) if block_data_len is not None: data_count += block_data_len if includes_oob: block_data_len = self.get_block_size(self.data_block_format) else: block_data_len = self.get_field_length_in_block( self.data_block_format, FlashFieldType.DATA ) while data_count < data_len: # The rest of the blocks are data blocks data_count += block_data_len data_block_count += 1 return data_block_count def iterate_through_all_blocks( self, data_len: int, includes_oob: bool ) -> Generator[Iterable[FlashField], None, int]: count = 0 for c in self.get_block_formats(): num_blocks = ( self.get_num_data_blocks(data_len, includes_oob) if c is self.data_block_format else 1 ) for _ in range(0, num_blocks): yield c count += 1 return count def get_total_oob_size(self, data_len: int, includes_oob: bool = False) -> int: total_oob_size = 0 for c in self.get_block_formats(): block_oob_size = self.get_oob_size_in_block(c) if block_oob_size is not None: num_blocks = ( self.get_num_data_blocks(data_len, includes_oob) if c is self.data_block_format else 1 ) total_oob_size += block_oob_size * num_blocks return total_oob_size def get_total_field_size( self, data_len: int, field_type: FlashFieldType, includes_oob: bool = False ) -> int: total_field_size = 0 for c in self.get_block_formats(): block_field_size = self.get_field_length_in_block(c, field_type) if block_field_size is not None: num_blocks = ( self.get_num_data_blocks(data_len, includes_oob) if c is self.data_block_format else 1 ) total_field_size += block_field_size * num_blocks return total_field_size class PackerError(RuntimeError): """Base exception raised by Packers.""" class UnpackerError(RuntimeError): """Base exception raised by unpackers.""" def _build_block( cur_block_type: Iterable[FlashField], attributes: FlashAttributes, block_data: bytes, original_data: bytes, ) -> bytes: # Update the checksum, even if its not used we use it for tracking if we need it to update ECC if attributes is None: raise UnpackerError("Cannot pack without providing FlashAttributes") data_hash = md5(block_data).digest() ecc_attr = attributes.ecc_attributes block = b"" if ecc_attr is not None: ecc_class = ecc_attr.ecc_class if ecc_class is None: raise UnpackerError("Cannot pack FlashLogicalDataResource without providing ECC class") for field in cur_block_type: if field is not None: f = field.field_type f_size = field.size if f is FlashFieldType.ALIGNMENT: block += b"\x00" * f_size elif f is FlashFieldType.CHECKSUM: if attributes.checksum_func is not None: block += attributes.checksum_func(original_data) elif f is FlashFieldType.DATA: expected_data_size = attributes.get_field_length_in_block( cur_block_type, FlashFieldType.DATA ) real_data_len = len(block_data) if real_data_len < expected_data_size: cur_block_data = bytearray(expected_data_size) cur_block_data[:real_data_len] = block_data block_data = cur_block_data block += block_data elif f is FlashFieldType.DATA_SIZE: block += len(original_data).to_bytes(f_size, "big") elif f is FlashFieldType.DELIMITER: if cur_block_type == attributes.header_block_format: delimiter = ecc_attr.head_delimiter elif cur_block_type == attributes.first_data_block_format: delimiter = ecc_attr.first_data_delimiter elif cur_block_type == attributes.data_block_format: delimiter = ecc_attr.data_delimiter elif cur_block_type == attributes.last_data_block_format: delimiter = ecc_attr.last_data_delimiter elif cur_block_type == attributes.tail_block_format: delimiter = ecc_attr.tail_delimiter else: raise PackerError( "Tried to add delimiter without specifying in FlashEccAttributes" ) if delimiter is not None: block += delimiter elif f is FlashFieldType.ECC: if data_hash in DATA_HASHES: ecc = DATA_HASHES[data_hash] else: # Assumes that all previously added data in the block should be included in the ECC # TODO: Support ECC that comes before data try: if ecc_attr.ecc_class is not None: ecc = ecc_attr.ecc_class.encode(block) except TypeError: raise PackerError( "Tried to encode ECC without specifying ecc_class in FlashEccAttributes" ) block += ecc elif f is FlashFieldType.ECC_SIZE: block_ecc_field = attributes.get_field_in_block( cur_block_type, FlashFieldType.ECC ) if block_ecc_field is not None: block += block_ecc_field.size.to_bytes(f_size, "big") elif f is FlashFieldType.TOTAL_SIZE: data_size = len(original_data) oob_size = attributes.get_total_oob_size(data_len=data_size) expected_data_size = attributes.get_total_field_size( data_len=data_size, field_type=FlashFieldType.DATA ) total_size = expected_data_size + oob_size block += (total_size).to_bytes(f_size, "big") elif f is FlashFieldType.MAGIC: if ecc_attr.ecc_magic is None: raise PackerError( "Tried to add Magic without specifying in FlashEccAttributes" ) block += ecc_attr.ecc_magic return block
null
15,316
import asyncio import ctypes import logging import math from concurrent.futures import ProcessPoolExecutor from concurrent.futures.process import BrokenProcessPool from dataclasses import dataclass from ofrak.component.analyzer import Analyzer from ofrak.model.resource_model import ResourceAttributes from ofrak.resource import Resource, ResourceFactory from ofrak.service.data_service_i import DataServiceInterface from ofrak.service.resource_service_i import ResourceServiceInterface LOGGER = logging.getLogger(__name__) The provided code snippet includes necessary dependencies for implementing the `sample_entropy` function. Write a Python function `def sample_entropy( data: bytes, resource_id: bytes, window_size=256, max_samples=2**20 ) -> bytes` to solve the following problem: Return a list of entropy values where each value represents the Shannon entropy of the byte value distribution over a fixed-size, sliding window. If the entropy data is larger than a maximum size, summarize it by periodically sampling it. Shannon entropy represents how uniform a probability distribution is. Since more uniform implies less predictable (because the probability of any outcome is equally likely in a uniform distribution), a sample with higher entropy is "more random" than one with lower entropy. More here: <https://en.wikipedia.org/wiki/Entropy_(information_theory)>. Here is the function: def sample_entropy( data: bytes, resource_id: bytes, window_size=256, max_samples=2**20 ) -> bytes: # pragma: no cover """ Return a list of entropy values where each value represents the Shannon entropy of the byte value distribution over a fixed-size, sliding window. If the entropy data is larger than a maximum size, summarize it by periodically sampling it. Shannon entropy represents how uniform a probability distribution is. Since more uniform implies less predictable (because the probability of any outcome is equally likely in a uniform distribution), a sample with higher entropy is "more random" than one with lower entropy. More here: <https://en.wikipedia.org/wiki/Entropy_(information_theory)>. """ if len(data) < 256: return b"" def log_percent(percent): # pragma: no cover LOGGER.info(f"Entropy calculation {percent}% complete for {resource_id.hex()}") result = entropy_func(data, window_size, log_percent) if len(result) <= max_samples: return result # Sample the calculated array if it is too large skip = len(result) / max_samples return bytes(result[math.floor(i * skip)] for i in range(max_samples))
Return a list of entropy values where each value represents the Shannon entropy of the byte value distribution over a fixed-size, sliding window. If the entropy data is larger than a maximum size, summarize it by periodically sampling it. Shannon entropy represents how uniform a probability distribution is. Since more uniform implies less predictable (because the probability of any outcome is equally likely in a uniform distribution), a sample with higher entropy is "more random" than one with lower entropy. More here: <https://en.wikipedia.org/wiki/Entropy_(information_theory)>.
15,317
import asyncio import ctypes import logging import math from concurrent.futures import ProcessPoolExecutor from concurrent.futures.process import BrokenProcessPool from dataclasses import dataclass from ofrak.component.analyzer import Analyzer from ofrak.model.resource_model import ResourceAttributes from ofrak.resource import Resource, ResourceFactory from ofrak.service.data_service_i import DataServiceInterface from ofrak.service.resource_service_i import ResourceServiceInterface def sample_magnitude(data: bytes, max_samples=2**20) -> bytes: # pragma: no cover if len(data) < max_samples: # TODO: Should this be a shallow copy instead? return data else: skip = len(data) / max_samples return bytes(data[math.floor(i * skip)] for i in range(max_samples))
null
15,318
import logging import math from typing import Callable, List, Optional def _shannon_entropy(distribution: List[int], window_size: int) -> float: """ Return the Shannon entropy of the input probability distribution (represented as a histogram counting byte occurrences over a window of known size). Shannon entropy represents how uniform a probability distribution is. Since more uniform implies less predictable (because the probability of any outcome is equally likely in a uniform distribution), a sample with higher entropy is "more random" than one with lower entropy. More here: <https://en.wikipedia.org/wiki/Entropy_(information_theory)>. """ result = 0.0 for num_occurrences in distribution: probability = num_occurrences / window_size # Note that the zero check is required because the domain of log2 is the positive reals result += probability * math.log2(probability) if probability != 0.0 else 0.0 return -result / math.log2(window_size) The provided code snippet includes necessary dependencies for implementing the `entropy_py` function. Write a Python function `def entropy_py( data: bytes, window_size: int, log_percent: Optional[Callable[[int], None]] = None ) -> bytes` to solve the following problem: Return a list of entropy values where each value represents the Shannon entropy of the byte value distribution over a fixed-size, sliding window. Here is the function: def entropy_py( data: bytes, window_size: int, log_percent: Optional[Callable[[int], None]] = None ) -> bytes: """ Return a list of entropy values where each value represents the Shannon entropy of the byte value distribution over a fixed-size, sliding window. """ if log_percent is None: log_percent = lambda x: None else: # Sort of hacky way to know we are being called from the tests and don't need to log this logging.warning( f"Using the Python implementation of the Shannon entropy calculation! This is potentially " f"very slow, and is only used when the C extension cannot be built/found." ) # Create a histogram, and populate it with initial values histogram = [0] * 256 for b in data[:window_size]: histogram[b] += 1 # Calculate the entropy using a sliding window entropy = [0] * (len(data) - window_size) last_percent_logged = 0 for i in range(len(entropy)): entropy[i] = math.floor(255 * _shannon_entropy(histogram, window_size)) histogram[data[i]] -= 1 histogram[data[i + window_size]] += 1 percent = int((i * 100) / len(data)) if percent > last_percent_logged and percent % 10 == 0: log_percent(percent) last_percent_logged = percent return bytes(entropy)
Return a list of entropy values where each value represents the Shannon entropy of the byte value distribution over a fixed-size, sliding window.
15,319
import io import struct import zlib from dataclasses import dataclass from enum import Enum from typing import Optional, List from ofrak.component.identifier import Identifier from ofrak.component.analyzer import Analyzer from ofrak.component.modifier import Modifier from ofrak.component.packer import Packer from ofrak.component.unpacker import Unpacker, UnpackerError from ofrak.core.binary import GenericBinary from ofrak.core.filesystem import File from ofrak.model.component_model import ComponentConfig from ofrak.model.resource_model import ResourceAttributes from ofrak.model.viewable_tag_model import AttributesType from ofrak.resource import Resource from ofrak.resource_view import ResourceView from ofrak.service.resource_service_i import ResourceFilter from ofrak_io.deserializer import BinaryDeserializer from ofrak_type.endianness import Endianness from ofrak_type.range import Range import zlib The provided code snippet includes necessary dependencies for implementing the `openwrt_crc32` function. Write a Python function `def openwrt_crc32(data: bytes) -> int` to solve the following problem: Calculate CRC32 a-la OpenWrt. Original implementation: <https://git.archive.openwrt.org/?p=14.07/openwrt.git;a=blob;f=tools/firmware-utils/src/trx.c> Implements CRC-32 Ethernet which requires XOR'ing the zlib.crc32 result with 0xFFFFFFFF Here is the function: def openwrt_crc32(data: bytes) -> int: """ Calculate CRC32 a-la OpenWrt. Original implementation: <https://git.archive.openwrt.org/?p=14.07/openwrt.git;a=blob;f=tools/firmware-utils/src/trx.c> Implements CRC-32 Ethernet which requires XOR'ing the zlib.crc32 result with 0xFFFFFFFF """ return (zlib.crc32(data) & 0xFFFFFFFF) ^ 0xFFFFFFFF
Calculate CRC32 a-la OpenWrt. Original implementation: <https://git.archive.openwrt.org/?p=14.07/openwrt.git;a=blob;f=tools/firmware-utils/src/trx.c> Implements CRC-32 Ethernet which requires XOR'ing the zlib.crc32 result with 0xFFFFFFFF
15,320
import inspect import logging def _warn_user_no_xattr(function_name: str) -> None: LOGGER.warning( f"Function {function_name} not found. Library xattr is not available on Windows platforms. \ Extended attributes will not be properly handled while using OFRAK on this platform. \ If you require extended attributes, please use a platform that supports xattr." ) def listxattr(f, symlink=False): frame = inspect.currentframe() _warn_user_no_xattr(inspect.getframeinfo(frame).function) return tuple()
null
15,321
import inspect import logging def _warn_user_no_xattr(function_name: str) -> None: LOGGER.warning( f"Function {function_name} not found. Library xattr is not available on Windows platforms. \ Extended attributes will not be properly handled while using OFRAK on this platform. \ If you require extended attributes, please use a platform that supports xattr." ) def getxattr(f, attr, symlink=False): frame = inspect.currentframe() _warn_user_no_xattr(inspect.getframeinfo(frame).function) return b""
null
15,322
import inspect import logging def _warn_user_no_xattr(function_name: str) -> None: def setxattr(f, attr, value, options=0, symlink=False): frame = inspect.currentframe() _warn_user_no_xattr(inspect.getframeinfo(frame).function) return None
null
15,323
import inspect import logging def _warn_user_no_xattr(function_name: str) -> None: def removexattr(f, attr, symlink=False): frame = inspect.currentframe() _warn_user_no_xattr(inspect.getframeinfo(frame).function) return None
null
15,324
from collections import defaultdict from dataclasses import dataclass from itertools import chain from typing import List, Tuple, Dict, Optional, Iterable, Mapping from immutabledict import immutabledict from ofrak.core.binary import BinaryPatchModifier, BinaryPatchConfig from ofrak.component.analyzer import Analyzer from ofrak.component.modifier import Modifier, ModifierError from ofrak.core.memory_region import MemoryRegion from ofrak.model.component_model import ComponentConfig from ofrak.model.resource_model import index from ofrak.resource import Resource from ofrak.resource_view import ResourceView from ofrak.service.resource_service_i import ( ResourceFilter, ResourceAttributeRangeFilter, ResourceAttributeValueFilter, ResourceSort, ) from ofrak_patch_maker.model import AssembledObject, PatchRegionConfig, BOM from ofrak_patch_maker.toolchain.model import Segment from ofrak_type.memory_permissions import MemoryPermissions from ofrak_type.range import Range, remove_subranges class MemoryRegion(Addressable): """ Binary bytes that are addressable. :ivar virtual_address: the virtual address of the start of the memory region :ivar size: the size of the memory region """ size: int def Size(self) -> int: return self.size def EndVaddr(self) -> int: return self.size + self.VirtualAddress def end_vaddr(self) -> int: """ Get the virtual address of the end of the memory region. :returns: the virtual address directly after the memory region """ return self.virtual_address + self.size def vaddr_range(self) -> Range: return Range.from_size(self.virtual_address, self.size) def caption(cls, all_attributes) -> str: try: mem_region_attributes = all_attributes[AttributesType[MemoryRegion]] addressable_attributes = all_attributes[AttributesType[Addressable]] except KeyError: return super().caption(all_attributes) return ( f"{str(cls.__name__)}: " f"{hex(addressable_attributes.virtual_address)}-" f"{hex(addressable_attributes.virtual_address + mem_region_attributes.size)}" ) def contains(self, vaddr: int) -> bool: """ Does the memory region contain the given virtual address? :param vaddr: a virtual address :return: True if the memory region contains the given virtual address """ return self.virtual_address <= vaddr < self.end_vaddr() def get_offset_in_self(self, vaddr: int) -> int: """ Get the physical offset within the memory region that corresponds to the given virtual address. :param vaddr: a virtual address :return: an offset within the memory region """ if not self.contains(vaddr): raise ValueError( f"Memory region {hex(self.virtual_address)}-{hex(self.end_vaddr())} " f"does not contain vaddr {hex(vaddr)}" ) return vaddr - self.virtual_address async def create_child_region( self, child_mr: "MemoryRegion", additional_attributes: Iterable[ResourceAttributes] = (), ) -> Resource: """ Create a child memory region that is mapped into this memory region. :param child_mr: the child memory region :param additional_attributes: additional attributes passed to the child memory region :raises ValueError: if the child's end offset is larger than the memory region's size :return: the created child resource """ start_offset = self.get_offset_in_self(child_mr.virtual_address) end_offset = start_offset + child_mr.size if start_offset < 0: raise ValueError( f"New child has vaddr {hex(child_mr.virtual_address)} which is before" f" the proposed parent's vaddr {hex(self.virtual_address)}" ) if end_offset > self.size: raise ValueError( f"New child at {hex(child_mr.virtual_address)} is too large to fit in the proposed " f"parent - end vaddr {hex(child_mr.end_vaddr())} goes past the parent's end vaddr " f"{hex(self.end_vaddr())}." ) return await self.resource.create_child_from_view( child_mr, data_range=Range(start_offset, end_offset), additional_attributes=additional_attributes, ) def get_mem_region_with_vaddr_from_sorted(vaddr: int, sorted_regions: Iterable["MemoryRegion"]): """ Return the first [memory region][ofrak.core.memory_region.MemoryRegion] in the input iterable that contains vaddr. :param vaddr: Virtual address :param sorted_regions: Sorted iterable of memory regions to check in order for vaddr :raises NotFoundError: If vaddr is not in any element of the iterable :return: The first memory region in the sorted iterable containing vaddr """ for mem_view in sorted_regions: # the first region we find should be the largest mem_region_vaddr_range = Range( mem_view.virtual_address, mem_view.virtual_address + mem_view.size, ) if vaddr in mem_region_vaddr_range: return mem_view raise NotFoundError(f"Cannot find memory region matching {hex(vaddr)}") def __str__(self): if type(self) is MemoryRegion: return f"MemoryRegion({hex(self.virtual_address)}-{hex(self.end_vaddr())})" else: return super().__str__() def __hash__(self): """ Return the hash of the virtual address and size. !!! warning Two memory regions may have the same hash, even if they refer to different data! As long as the address and size are the same, two regions will have the same hash, since the resource is not part of the data that is hashed. Be careful about comparing memory regions that refer to different data! """ return hash((self.virtual_address, self.size)) class Resource: """ Defines methods for interacting with the data and attributes of Resources, the main building block of OFRAK. """ __slots__ = ( "_job_id", "_job_context", "_component_context", "_resource_context", "_resource_view_context", "_resource", "_resource_factory", "_id_service", "_resource_service", "_data_service", "_job_service", "_dependency_handler", ) def __init__( self, job_id: bytes, resource: MutableResourceModel, resource_context: ResourceContext, resource_view_context: ResourceViewContext, job_context: Optional[JobRunContext], component_context: ComponentContext, resource_factory: "ResourceFactory", id_service: IDServiceInterface, data_service: DataServiceInterface, resource_service: ResourceServiceInterface, job_service: JobServiceInterface, ): self._job_id: bytes = job_id self._job_context: Optional[JobRunContext] = job_context self._component_context: ComponentContext = component_context self._resource_context: ResourceContext = resource_context self._resource_view_context: ResourceViewContext = resource_view_context self._resource: MutableResourceModel = resource self._resource_factory: "ResourceFactory" = resource_factory self._id_service: IDServiceInterface = id_service self._resource_service: ResourceServiceInterface = resource_service self._data_service: DataServiceInterface = data_service self._job_service: JobServiceInterface = job_service def get_id(self) -> bytes: """ :return: This resource's ID """ return self._resource.id def get_job_id(self) -> bytes: """ Each resource belongs to a specific "job." See [JobServiceInterface][ofrak.service.job_service_i.JobServiceInterface]. :return: The ID of the job this resource belongs to """ return self._job_id def get_data_id(self) -> Optional[bytes]: """ Each resource may have a data ID. This refers to a [DataModel][ofrak.model.data_model.DataModel] representing some chunk of raw binary data. :return: The data ID associated with this resource, if it exists """ return self._resource.data_id def get_resource_context(self) -> ResourceContext: return self._resource_context def get_resource_view_context(self) -> ResourceViewContext: return self._resource_view_context def get_component_context(self) -> ComponentContext: return self._component_context def get_job_context(self) -> Optional[JobRunContext]: return self._job_context def get_caption(self) -> str: return self._resource.caption def is_modified(self) -> bool: """ Check if the resource has been modified in this context and is considered "dirty". :return: `True` if the resource is modified, `False` otherwise """ return self._resource.is_modified def get_model(self) -> MutableResourceModel: """ Get the underlying [model][ofrak.model.resource_model.ResourceModel] of this resource. :return: """ return self._resource async def get_data(self, range: Optional[Range] = None) -> bytes: """ A resource often represents a chunk of underlying binary data. This method returns the entire chunk by default; this can be reduced by an optional parameter. :param range: A range within the resource's data, relative to the resource's data itself (e.g. Range(0, 10) returns the first 10 bytes of the chunk) :return: The full range or a partial range of this resource's bytes """ if self._resource.data_id is None: raise ValueError( "Resource does not have a data_id. Cannot get data from a resource with no data" ) data = await self._data_service.get_data(self._resource.data_id, range) if range is None: range = Range(0, len(data)) self._component_context.access_trackers[self._resource.id].data_accessed.add(range) return data async def get_data_length(self) -> int: """ :return: The length of the underlying binary data this resource represents """ if self._resource.data_id is None: raise ValueError( "Resource does not have a data_id. Cannot get data length from a " "resource with no data." ) return await self._data_service.get_data_length(self._resource.data_id) async def get_data_range_within_parent(self) -> Range: """ If this resource is "mapped," i.e. its underlying data is defined as a range of its parent's underlying data, this method returns the range within the parent resource's data where this resource lies. If this resource is not mapped (it is root), it returns a range starting at 0 with length 0. :return: The range of the parent's data which this resource represents """ if self._resource.data_id is None: raise ValueError( "Resource does not have a data_id. Cannot get data range from a " "resource with no data." ) if self._resource.parent_id is None: return Range(0, 0) parent_models = list( await self._resource_service.get_ancestors_by_id(self._resource.id, max_count=1) ) if len(parent_models) != 1: raise NotFoundError(f"There is no parent for resource {self._resource.id.hex()}") parent_model = parent_models[0] parent_data_id = parent_model.data_id if parent_data_id is None: return Range(0, 0) try: return await self._data_service.get_range_within_other( self._resource.data_id, parent_data_id ) except ValueError: return Range(0, 0) async def get_data_range_within_root(self) -> Range: """ Does the same thing as `get_data_range_within_parent`, except the range is relative to the root. :return: The range of the root node's data which this resource represents """ if self._resource.data_id is None: raise ValueError( "Resource does not have a data_id. Cannot get data range from a " "resource with no data." ) return await self._data_service.get_data_range_within_root(self._resource.data_id) async def search_data( self, query: Pattern[bytes], start: Optional[int] = None, end: Optional[int] = None, max_matches: Optional[int] = None, ) -> Tuple[Tuple[int, bytes], ...]: ... async def search_data( self, query: bytes, start: Optional[int] = None, end: Optional[int] = None, max_matches: Optional[int] = None, ) -> Tuple[int, ...]: ... async def search_data(self, query, start=None, end=None, max_matches=None): """ Search for some data in this resource. The query may be a regex pattern (a return value of `re.compile`). If the query is a regex pattern, returns a tuple of pairs with both the offset of the match and the contents of the match itself. If the query is plain bytes, a list of only the match offsets are returned. :param query: Plain bytes to exactly match or a regex pattern to search for :param start: Start offset in the data model to begin searching :param end: End offset in the data model to stop searching :return: A tuple of offsets matching a plain bytes query, or a list of (offset, match) pairs for a regex pattern query """ return await self._data_service.search(self.get_data_id(), query, start, end, max_matches) async def save(self): """ If this resource has been modified, update the model stored in the resource service with the local changes. :raises NotFoundError: If the resource service does not have a model for this resource's ID """ await save_resources( (self,), self._resource_service, self._data_service, self._component_context, self._resource_context, self._resource_view_context, ) def _save(self) -> Tuple[List[bytes], List[DataPatch], List[MutableResourceModel]]: resources_to_delete: List[bytes] = [] patches_to_apply: List[DataPatch] = [] resources_to_update: List[MutableResourceModel] = [] if self._resource.is_deleted: resources_to_delete.append(self._resource.id) elif self._resource.is_modified: modification_tracker = self._component_context.modification_trackers.get( self._resource.id ) assert modification_tracker is not None, ( f"Resource {self._resource.id.hex()} was " f"marked as modified but is missing a tracker!" ) patches_to_apply.extend(modification_tracker.data_patches) resources_to_update.append(self._resource) modification_tracker.data_patches.clear() return resources_to_delete, patches_to_apply, resources_to_update async def _fetch(self, resource: MutableResourceModel): """ Update the local model with the latest version from the resource service. This will fail if this resource has been modified. :raises InvalidStateError: If the local resource model has been modified :raises NotFoundError: If the resource service does not have a model for this resource's ID """ if resource.is_modified and not resource.is_deleted: raise InvalidStateError( f"Cannot fetch dirty resource {resource.id.hex()} (resource " f"{self.get_id().hex()} attempted fetch)" ) try: fetched_resource = await self._resource_service.get_by_id(resource.id) except NotFoundError: if ( resource.id in self._component_context.modification_trackers and resource.id in self._resource_context.resource_models ): del self._resource_context.resource_models[resource.id] return resource.reset(fetched_resource) async def _fetch_resources(self, resource_ids: Iterable[bytes]): tasks = [] for resource_id in resource_ids: context_resource = self._resource_context.resource_models.get(resource_id) if context_resource is not None: tasks.append(self._fetch(context_resource)) await asyncio.gather(*tasks) async def _update_views(self, modified: Set[bytes], deleted: Set[bytes]): for resource_id in modified: views_in_context = self._resource_view_context.views_by_resource[resource_id] for view in views_in_context.values(): if resource_id not in self._resource_context.resource_models: await self._fetch(view.resource.get_model()) # type: ignore if resource_id not in self._resource_context.resource_models: view.set_deleted() continue updated_model = self._resource_context.resource_models[resource_id] fresh_view = view.create(updated_model) for field in dataclasses.fields(fresh_view): if field.name == "_resource": continue setattr(view, field.name, getattr(fresh_view, field.name)) for resource_id in deleted: views_in_context = self._resource_view_context.views_by_resource[resource_id] for view in views_in_context.values(): view.set_deleted() async def run( self, component_type: Type[ComponentInterface[CC]], config: CC = None, ) -> ComponentRunResult: """ Run a single component. Runs even if the component has already been run on this resource. :param component_type: The component type (may be an interface) to get and run :param config: Optional config to pass to the component :return: A ComponentRunResult containing information on resources affected by the component """ job_context = self._job_context component_result = await self._job_service.run_component( JobComponentRequest( self._job_id, self._resource.id, component_type.get_id(), config, ), job_context, ) for deleted_id in component_result.resources_deleted: if deleted_id in self._component_context.modification_trackers: del self._component_context.modification_trackers[deleted_id] await self._fetch_resources(component_result.resources_modified) await self._update_views( component_result.resources_modified, component_result.resources_deleted ) return component_result async def auto_run( self, components: Iterable[Type[ComponentInterface]] = tuple(), blacklisted_components: Iterable[Type[ComponentInterface]] = tuple(), all_unpackers: bool = False, all_identifiers: bool = False, all_analyzers: bool = False, all_packers: bool = False, ) -> ComponentRunResult: """ Automatically run multiple components which may run on this resource. From an initial set of possible components to run, this set is searched for components for which the intersection of the component's targets and this resource's tags is not empty. Accepts several optional flags to expand or restrict the initial set of components. :param components: Components to explicitly add to the initial set of components :param blacklisted_components: Components to explicitly remove to the initial set of components :param all_unpackers: If true, all Unpackers are added to the initial set of components :param all_identifiers: If true, all Identifiers are added to the initial set of components :param all_analyzers: If true, all Analyzers are added to the initial set of components :return: A ComponentRunResult containing information on resources affected by the component """ components_result = await self._job_service.run_components( JobMultiComponentRequest( self._job_id, self._resource.id, components_allowed=tuple(c.get_id() for c in components), components_disallowed=tuple(c.get_id() for c in blacklisted_components), all_unpackers=all_unpackers, all_identifiers=all_identifiers, all_analyzers=all_analyzers, all_packers=all_packers, ) ) for deleted_id in components_result.resources_deleted: if deleted_id in self._component_context.modification_trackers: del self._component_context.modification_trackers[deleted_id] await self._fetch_resources(components_result.resources_modified) await self._update_views( components_result.resources_modified, components_result.resources_deleted ) return components_result async def unpack(self) -> ComponentRunResult: """ Unpack the resource. :return: A ComponentRunResult containing information on resources affected by the component """ return await self.auto_run(all_identifiers=True, all_unpackers=True) async def analyze(self, resource_attributes: Type[RA]) -> RA: """ Analyze the resource for a specific resource attribute. :param Type[RA] resource_attributes: :return: """ attributes = self._check_attributes(resource_attributes) if attributes is None: await self._analyze_attributes((resource_attributes,)) return self.get_attributes(resource_attributes) else: return attributes async def identify(self) -> ComponentRunResult: """ Run all registered identifiers on the resource, tagging it with matching resource tags. """ return await self.auto_run(all_identifiers=True) async def pack(self) -> ComponentRunResult: """ Pack the resource. :return: A ComponentRunResult containing information on resources affected by the component """ return await self.auto_run(all_packers=True) async def auto_run_recursively( self, components: Iterable[Type[ComponentInterface]] = tuple(), blacklisted_components: Iterable[Type[ComponentInterface]] = tuple(), blacklisted_tags: Iterable[ResourceTag] = tuple(), all_unpackers: bool = False, all_identifiers: bool = False, all_analyzers: bool = False, ) -> ComponentRunResult: """ Automatically run multiple components which may run on this resource or its descendents. From an initial set of possible components to run, this set is searched for components for which the intersection of the component's targets and this resource's tags is not empty. Accepts several optional flags to expand or restrict the initial set of components. After each run, compatible components from the initial set are run on any resources which have had tags added (including newly created resources). This is repeated until no new tags are added. :param components: Components to explicitly add to the initial set of components :param blacklisted_components: Components to explicitly remove to the initial set of components :param all_unpackers: If true, all Unpackers are added to the initial set of components :param all_identifiers: If true, all Identifiers are added to the initial set of components :param all_analyzers: If true, all Analyzers are added to the initial set of components :return: A ComponentRunResult containing information on resources affected by the component """ components_result = await self._job_service.run_components_recursively( JobMultiComponentRequest( self._job_id, self._resource.id, components_allowed=tuple(c.get_id() for c in components), components_disallowed=tuple(c.get_id() for c in blacklisted_components), all_unpackers=all_unpackers, all_identifiers=all_identifiers, all_analyzers=all_analyzers, tags_ignored=tuple(blacklisted_tags), ) ) await self._fetch_resources(components_result.resources_modified) await self._update_views( components_result.resources_modified, components_result.resources_deleted ) return components_result async def unpack_recursively( self, blacklisted_components: Iterable[Type[ComponentInterface]] = tuple(), do_not_unpack: Iterable[ResourceTag] = tuple(), ) -> ComponentRunResult: """ Automatically unpack this resource and recursively unpack all of its descendants. First this resource is unpacked; then, any resource which "valid" tags were added to will also be unpacked. New resources created with tags count as resources with new tags. A "valid" tag is a tag which is not explicitly ignored via the ``do_not_unpack`` argument. The unpacking will only stop when no new "valid" tags have been added in the previous iteration. This can lead to a very long unpacking process if it is totally unconstrained. :param blacklisted_components: Components which are blocked from running during the recursive unpacking, on this resource or any descendants. :param do_not_unpack: Do not unpack resources with this tag, and ignore these tags when checking if any new tags have been added in this iteration. :return: A ComponentRunResult containing information on resources affected by the component """ return await self.auto_run_recursively( all_identifiers=True, all_unpackers=True, blacklisted_components=blacklisted_components, blacklisted_tags=do_not_unpack, ) async def analyze_recursively(self) -> ComponentRunResult: return await self.auto_run_recursively(all_analyzers=True) async def pack_recursively(self) -> ComponentRunResult: """ Recursively pack the resource, starting with its descendants. """ return await self._job_service.pack_recursively(self._job_id, self._resource.id) async def write_to(self, destination: BinaryIO, pack: bool = True): """ Recursively repack resource and write data out to an arbitrary ``BinaryIO`` destination. :param destination: Destination for packed resource data :return: """ if pack is True: await self.pack_recursively() destination.write(await self.get_data()) async def _analyze_attributes(self, attribute_types: Tuple[Type[ResourceAttributes], ...]): job_context = self._job_context components_result = await self._job_service.run_analyzer_by_attribute( JobAnalyzerRequest( self._job_id, self._resource.id, attribute_types, tuple(self._resource.tags), ), job_context, ) # Update all the resources in the local context that were modified as part of the # analysis await self._fetch_resources(components_result.resources_modified) await self._update_views( components_result.resources_modified, components_result.resources_deleted ) return components_result async def _create_resource(self, resource_model: ResourceModel) -> "Resource": return await self._resource_factory.create( self._job_id, resource_model.id, self._resource_context, self._resource_view_context, self._component_context, self._job_context, ) async def _create_resources( self, resource_models: Iterable[ResourceModel] ) -> Iterable["Resource"]: return await self._resource_factory.create_many( self._job_id, [resource_model.id for resource_model in resource_models], self._resource_context, self._resource_view_context, self._component_context, self._job_context, ) async def create_child( self, tags: Iterable[ResourceTag] = None, attributes: Iterable[ResourceAttributes] = None, data: Optional[bytes] = None, data_range: Optional[Range] = None, ) -> "Resource": """ Create a new resource as a child of this resource. This method entirely defines the child's tags and attributes. This method also defines the child's data semantics: A child resource can either be defined in one of three ways: 1) The resource contains no data ("Dataless" resource). Not used in practice. 2) As mapping a range of its parent's data ("Mapped" resource). For example, an instruction maps a portion of its parent basic block. 3) Defining its own new, independent data ("Unmapped" resource). For example, a file extracted from a zip archive is a child of the zip archive resource, but its data does not map to some specific range of that parent archive. By default a resource will be defined the third way (unmapped). To specify that the resource is a mapped resource, include the optional ``data_range`` parameter set to the range of the parent's data which the child maps. That is, `data_range=Range(0, 10)` creates a resource which maps the first 10 bytes of the parent. The optional ``data`` param defines whether to populate the new child's data. It can be used only if the data is unmapped. If the child is unmapped, the value of ``data`` still becomes that child's data, but the parent's data is unaffected. If ``data`` and ``data_range`` are both `None` (default), the new child is a dataless resource. The following table sums up the possible interactions between ``data`` and ``data_range``: | | ``data_range`` param not `None` | ``data_range`` param `None` | |--------------------------|--------------------------------------------------------|----------------------------------------------| | ``data`` param not `None` | Not allowed | Child unmapped, child's data set to ``data`` | | ``data`` param `None` | Child mapped, parent's data untouched | Child is dataless | :param tags: [tags][ofrak.model.tag_model.ResourceTag] to add to the new child :param attributes: [attributes][ofrak.model.resource_model.ResourceAttributes] to add to the new child :param data: The binary data for the new child. If `None` and ``data_range`` is `None`, the resource has no data. Defaults to `None`. :param data_range: The range of the parent's data which the new child maps. If `None` ( default), the child will not map the parent's data. :return: """ if data is not None and data_range is not None: raise ValueError( "Cannot create a child from both data and data_range. These parameters are " "mutually exclusive." ) resource_id = self._id_service.generate_id() if data_range is not None: if self._resource.data_id is None: raise ValueError( "Cannot create a child with mapped data from a parent that doesn't have data" ) data_model_id = resource_id await self._data_service.create_mapped( data_model_id, self._resource.data_id, data_range, ) data_attrs = Data(data_range.start, data_range.length()) attributes = [data_attrs, *attributes] if attributes else [data_attrs] elif data is not None: if self._resource.data_id is None: raise ValueError( "Cannot create a child with data from a parent that doesn't have data" ) data_model_id = resource_id await self._data_service.create_root(data_model_id, data) data_attrs = Data(0, len(data)) attributes = [data_attrs, *attributes] if attributes else [data_attrs] else: data_model_id = None resource_model = ResourceModel.create( resource_id, data_model_id, self._resource.id, tags, attributes, self._component_context.component_id, self._component_context.component_version, ) await self._resource_service.create(resource_model) if self._job_context: resource_tracker = self._job_context.trackers[resource_model.id] resource_tracker.tags_added.update(resource_model.tags) self._component_context.mark_resource_modified(resource_id) self._component_context.resources_created.add(resource_model.id) created_resource = await self._create_resource(resource_model) return created_resource async def create_child_from_view( self, view: RV, data_range: Optional[Range] = None, data: Optional[bytes] = None, additional_tags: Iterable[ResourceTag] = (), additional_attributes: Iterable[ResourceAttributes] = (), ) -> "Resource": """ Create a new resource as a child of this resource. The new resource will have tags and attributes as defined by the [view][ofrak.model.viewable_tag_model.ViewableResourceTag]; in this way a view can act as a template to create a new resource. The ``additional_tags`` and ``additional_attributes`` can also be used to add more tags and attributes beyond what the view contains. This method's ``data`` and ``data_range`` parameters have the same semantics as in `create_child`, in short: | | ``data_range`` param not `None` | ``data_range`` param `None` | |--------------------------|--------------------------------------------------------|----------------------------------------------| | ``data`` param not `None` | Child mapped, ``data`` patched into child (and parent) | Child unmapped, child's data set to ``data`` | | ``data`` param `None` | Child mapped, parent's data untouched | Child is dataless | See `create_child` documentation for details. :param view: A [resource view][ofrak.resource_view] to pull [tags][ofrak.model.tag_model.ResourceTag] and [attributes][ofrak.model.resource_model.ResourceAttributes] from to populate the new child :param data_range: The range of the parent's data which the new child maps. If `None` ( default), the child will not map the parent's data. :param data: The binary data for the new child. If `None` and ``data_range`` is `None`, the resource has no data. Defaults to `None`. :param additional_tags: Any [tags][ofrak.model.tag_model.ResourceTag] for the child in addition to those from the ``view`` :param additional_attributes: Any [attributes][ofrak.model.resource_model.ResourceAttributes] for the child in addition to those from the ``view`` :return: """ viewable_tag: ViewableResourceTag = type(view) new_resource = await self.create_child( tags=(viewable_tag, *additional_tags), attributes=(*view.get_attributes_instances().values(), *additional_attributes), data_range=data_range, data=data, ) return new_resource def _view_as(self, viewable_tag: Type[RV]) -> Union[RV, Awaitable[RV]]: """ Try to get a view without calling any analysis, to avoid as many unnecessary `asyncio.gather` calls as possible. Checks cached views first for view, and if not found, then checks if the attributes needed to create the view are already present and up-to-date, and only if both of those are not found does it return an awaitable. """ if self._resource_view_context.has_view(self.get_id(), viewable_tag): # First early return: View already exists in cache return self._resource_view_context.get_view(self.get_id(), viewable_tag) if not issubclass(viewable_tag, ResourceViewInterface): raise ValueError( f"Cannot get view for resource {self.get_id().hex()} of a type " f"{viewable_tag.__name__} because it is not a subclass of ResourceView" ) if not self.has_tag(viewable_tag): raise ValueError( f"Cannot get resource {self.get_id().hex()} as view " f"{viewable_tag.__name__} because the resource is not tagged as a " f"{viewable_tag.__name__}" ) composed_attrs_types = viewable_tag.composed_attributes_types existing_attributes = [self._check_attributes(attrs_t) for attrs_t in composed_attrs_types] if all(existing_attributes): # Second early return: All attributes needed for view are present and up-to-date view = viewable_tag.create(self.get_model()) view.resource = self # type: ignore self._resource_view_context.add_view(self.get_id(), view) return cast(RV, view) # Only if analysis is absolutely necessary is an awaitable created and returned async def finish_view_creation( attrs_to_analyze: Tuple[Type[ResourceAttributes], ...] ) -> RV: await self._analyze_attributes(attrs_to_analyze) view = viewable_tag.create(self.get_model()) view.resource = self # type: ignore self._resource_view_context.add_view(self.get_id(), view) return cast(RV, view) return finish_view_creation( tuple( attrs_t for attrs_t, existing in zip(composed_attrs_types, existing_attributes) if not existing ) ) async def view_as(self, viewable_tag: Type[RV]) -> RV: """ Provides a specific type of view instance for this resource. The returned instance is an object which has some of the information from this same resource, however in a simpler interface. This resource instance will itself remain available through the view's ``.resource`` property. :param viewable_tag: A ViewableResourceTag, which this resource's model must already contain :raises ValueError: If the model does not contain this tag, or this tag is not a ViewableResourceTag :return: """ view_or_create_view_task: Union[RV, Awaitable[RV]] = self._view_as(viewable_tag) if isawaitable(view_or_create_view_task): return await view_or_create_view_task else: return cast(RV, view_or_create_view_task) def add_view(self, view: ResourceViewInterface): """ Add all the attributes composed in a view to this resource, and tag this resource with the view type. Calling this is the equivalent of making N ``add_attributes`` calls and one ``add_tag`` call (where N is the number of attributes the view is composed of). :param view: An instance of a view """ for attributes in view.get_attributes_instances().values(): # type: ignore self.add_attributes(attributes) self.add_tag(type(view)) def _set_modified(self): self._component_context.mark_resource_modified(self._resource.id) def _add_tag(self, tag: ResourceTag): """ Associate a tag with the resource. If the resource already have the provided tag, it has no effects. All parent classes of the provided tag that are tags themselves are also added. """ if self._resource.has_tag(tag, False): return self._component_context.mark_resource_modified(self._resource.id) new_tags = self._resource.add_tag(tag) if self._job_context: resource_tracker = self._job_context.trackers[self._resource.id] resource_tracker.tags_added.update(new_tags) def add_tag(self, *tags: ResourceTag): """ Associate multiple tags with the resource. If the resource already have one of the provided tag, the tag is not added. All parent classes of the provided tag that are tags themselves are also added. """ for tag in tags: self._add_tag(tag) def get_tags(self, inherit: bool = True) -> Iterable[ResourceTag]: """ Get a set of tags associated with the resource. """ return self._resource.get_tags(inherit) def has_tag(self, tag: ResourceTag, inherit: bool = True) -> bool: """ Determine if the resource is associated with the provided tag. """ return self._resource.has_tag(tag, inherit) def remove_tag(self, tag: ResourceTag): if not self._resource.has_tag(tag): return self._set_modified() self._resource.remove_tag(tag) def get_most_specific_tags(self) -> Iterable[ResourceTag]: """ Get all tags associated with the resource from which no other tags on that resource inherit. In other words, get the resource's tags that aren't subclassed by other tags on the resource. For example, for a resource tagged as `Elf`, the result would be just `[Elf]` instead of `[Elf, Program, GenericBinary]` that `Resource.get_tags` returns. This is because `Elf` inherits from `Program`, which inherits from `GenericBinary`. Even though the resource has all of those tags, the most derived class with no other derivatives is the "most specific." """ return self._resource.get_most_specific_tags() def _check_attributes(self, attributes_type: Type[RA]) -> Optional[RA]: """ Try to get the current attributes. TODO: Should we be using the version as well? The client wouldn't know the version of the component in a client-server environment. We could do that efficiently by adding a service method that list all available components (and their version) :param attributes_type: The type of attributes to check this resource for. :return: The requested attributes if they are present and up-to-date, otherwise return None. """ attributes = self._resource.get_attributes(attributes_type) if attributes is not None: # Make sure that the attributes have not been invalidated component_id = self._resource.get_component_id_by_attributes(type(attributes)) if component_id is not None: return attributes return None def _add_attributes(self, attributes: ResourceAttributes): existing_attributes = self._resource.get_attributes(type(attributes)) if existing_attributes is not None and existing_attributes == attributes: return self._set_modified() self._resource.add_attributes(attributes) component_context = self._component_context self._resource.add_component_for_attributes( component_context.component_id, component_context.component_version, type(attributes) ) def add_attributes(self, *attributes: ResourceAttributes): """ Add the provided attributes to the resource. If the resource already have the provided attributes classes, they are replaced with the provided one. """ for attrs in attributes: self._add_attributes(attrs) def has_attributes(self, attributes_type: Type[ResourceAttributes]) -> bool: """ Check if this resource has a value for the given attributes type. :param attributes_type: :return: """ return self._resource.has_attributes(attributes_type) def get_attributes(self, attributes_type: Type[RA]) -> RA: """ If this resource has attributes matching the given type, return the value of those attributes. Otherwise returns `None`. :param attributes_type: :return: """ attributes = self._resource.get_attributes(attributes_type) if attributes is None: raise NotFoundError( f"Cannot find attributes {attributes_type} for resource {self.get_id().hex()}" ) self._component_context.access_trackers[self._resource.id].attributes_accessed.add( attributes_type ) return attributes def remove_attributes(self, attributes_type: Type[ResourceAttributes]): """ Remove the value of a given attributes type from this resource, if there is such a value. If the resource does not have a value for the given attributes type, do nothing. :param attributes_type: :return: """ if not self._resource.has_attributes(attributes_type): return self._set_modified() self._resource.remove_attributes(attributes_type) def add_component( self, component_id: bytes, version: int, ): """ Mark that a component has run on this resource :param component_id: ID of the component which ran :param version: Version of the component which ran :return: """ self._set_modified() self._resource.add_component(component_id, version) def add_component_for_attributes( self, component_id: bytes, version: int, attributes: Type[ResourceAttributes], ): """ Mark that a component was responsible for adding some attributes to this resource. :param component_id: ID of the component which added the attributes :param version: version of the component which added the attributes :param attributes: The type of attributes which were added :return: """ self._set_modified() self._resource.add_component_for_attributes(component_id, version, attributes) def remove_component( self, component_id: bytes, attributes: Optional[Type[ResourceAttributes]] = None, ): """ Remove any information that this component ran on this resource and/or added a particular type of attributes to this resource :param component_id: ID of the component to remove information about :param attributes: The type of attributes to remove information about :return: """ self._set_modified() self._resource.remove_component(component_id, attributes) def has_component_run(self, component_id: bytes, desired_version: Optional[int] = None) -> bool: """ Check if a particular component has run on this resource :param component_id: ID of the component to check for :param desired_version: If this is not `None`, also check that a specific version of ``component`` ran. Defaults to ``None``. :return: `True` if a component matching ``component_id`` and ``desired_version`` ran on this resource, `False` otherwise. If ``desired_version`` is `None`, only ``component_id`` must be matched to return `True`. """ version = self._resource.get_component_version(component_id) if version is None: return False if desired_version is None: return True return version == desired_version def queue_patch( self, patch_range: Range, data: bytes, ): """ Replace the data within the provided range with the provided data. This operation may shrink, expand or leave untouched the resource's data. Patches are queued up to be applied, and will only be applied to the resource's data after the component this was called from exits. :param patch_range: The range of binary data in this resource to replace :param data: The bytes to replace part of this resource's data with :return: """ if not self._component_context: raise InvalidStateError( f"Cannot patch resource {self._resource.id.hex()} without a context" ) if self._resource.data_id is None: raise ValueError("Cannot patch a resource with no data") self._component_context.modification_trackers[self._resource.id].data_patches.append( DataPatch( patch_range, self._resource.data_id, data, ) ) self._resource.is_modified = True async def get_parent_as_view(self, v_type: Type[RV]) -> RV: """ Get the parent of this resource. The parent will be returned as an instance of the given [viewable tag][ofrak.model.viewable_tag_model.ViewableResourceTag]. :param v_type: The type of [view][ofrak.resource] to get the parent as """ parent_r = await self.get_parent() return await parent_r.view_as(v_type) async def get_parent(self) -> "Resource": """ Get the parent of this resource. """ models = list( await self._resource_service.get_ancestors_by_id(self._resource.id, max_count=1) ) if len(models) != 1: raise NotFoundError(f"There is no parent for resource {self._resource.id.hex()}") return await self._create_resource(models[0]) async def get_ancestors( self, r_filter: ResourceFilter = None, ) -> Iterable["Resource"]: """ Get all the ancestors of this resource. May optionally filter the ancestors so only those matching certain parameters are returned. :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If a filter was provided and no resources match the provided filter """ models = await self._resource_service.get_ancestors_by_id( self._resource.id, r_filter=r_filter ) return await self._create_resources(models) async def get_only_ancestor_as_view( self, v_type: Type[RV], r_filter: ResourceFilter, ) -> RV: """ Get the only ancestor of this resource which matches the given filter. The ancestor will be returned as an instance of the given [viewable tag][ofrak.model.viewable_tag_model.ViewableResourceTag]. :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If more or fewer than one ancestor matches ``r_filter`` """ ancestor_r = await self.get_only_ancestor(r_filter) return await ancestor_r.view_as(v_type) async def get_only_ancestor(self, r_filter: ResourceFilter) -> "Resource": """ Get the only ancestor of this resource which matches the given filter. :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: """ ancestors = list( await self._resource_service.get_ancestors_by_id(self._resource.id, 1, r_filter) ) if len(ancestors) == 0: raise NotFoundError( f"There is no ancestor for resource {self._resource.id.hex()} matching the " f"provided filter" ) return await self._create_resource(ancestors[0]) async def get_descendants_as_view( self, v_type: Type[RV], max_depth: int = -1, r_filter: ResourceFilter = None, r_sort: ResourceSort = None, ) -> Iterable[RV]: """ Get all the descendants of this resource. May optionally filter the descendants so only those matching certain parameters are returned. May optionally sort the descendants by an indexable attribute value key. The descendants will be returned as an instance of the given [viewable tag][ofrak.model.viewable_tag_model.ViewableResourceTag]. :param v_type: The type of [view][ofrak.resource] to get the descendants as :param max_depth: Maximum depth from this resource to search for descendants; if -1, no maximum depth :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :param r_sort: Specifies which indexable attribute to use as the key to sort and the direction to sort :return: :raises NotFoundError: If a filter was provided and no resources match the provided filter """ descendants = await self.get_descendants(max_depth, r_filter, r_sort) views_or_tasks = [r._view_as(v_type) for r in descendants] # analysis tasks to generate views of resources which don't have attrs for the view already view_tasks: List[Awaitable[RV]] = [] # each resources' already-existing views OR the index in `view_tasks` of the analysis task views_or_task_indexes: List[Union[int, RV]] = [] for view_or_create_view_task in views_or_tasks: if isawaitable(view_or_create_view_task): views_or_task_indexes.append(len(view_tasks)) view_tasks.append(view_or_create_view_task) else: views_or_task_indexes.append(cast(RV, view_or_create_view_task)) if view_tasks: completed_views: Sequence[RV] = await asyncio.gather(*view_tasks) return [ completed_views[v_or_i] if type(v_or_i) is int else cast(RV, v_or_i) for v_or_i in views_or_task_indexes ] else: # There are no tasks, so all needed views are already present return cast(List[RV], views_or_task_indexes) async def get_descendants( self, max_depth: int = -1, r_filter: ResourceFilter = None, r_sort: ResourceSort = None, ) -> Iterable["Resource"]: """ Get all the descendants of this resource. May optionally filter the descendants so only those matching certain parameters are returned. May optionally sort the descendants by an indexable attribute value key. :param max_depth: Maximum depth from this resource to search for descendants; if -1, no maximum depth :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :param r_sort: Specifies which indexable attribute to use as the key to sort and the direction to sort :return: :raises NotFoundError: If a filter was provided and no resources match the provided filter """ models = await self._resource_service.get_descendants_by_id( self._resource.id, max_depth=max_depth, r_filter=r_filter, r_sort=r_sort ) return await self._create_resources(models) async def get_only_descendant_as_view( self, v_type: Type[RV], max_depth: int = -1, r_filter: ResourceFilter = None, ) -> RV: """ If a filter is provided, get the only descendant of this resource which matches the given filter. If a filter is not provided, gets the only descendant of this resource. The descendant will be returned as an instance of the given [viewable tag][ofrak.model.viewable_tag_model.ViewableResourceTag]. :param v_type: The type of [view][ofrak.resource] to get the descendant as :param max_depth: Maximum depth from this resource to search for descendants; if -1, no maximum depth :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If a filter is provided and more or fewer than one descendant matches ``r_filter`` :raises NotFoundError: If a filter is not provided and this resource has multiple descendant """ descendant_r = await self.get_only_descendant(max_depth, r_filter) return await descendant_r.view_as(v_type) async def get_only_descendant( self, max_depth: int = -1, r_filter: ResourceFilter = None, ) -> "Resource": """ If a filter is provided, get the only descendant of this resource which matches the given filter. If a filter is not provided, gets the only descendant of this resource. :param max_depth: Maximum depth from this resource to search for descendants; if -1, no maximum depth :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If a filter is provided and more or fewer than one descendant matches ``r_filter`` :raises NotFoundError: If a filter is not provided and this resource has multiple descendant """ models = list( await self._resource_service.get_descendants_by_id( self._resource.id, max_depth=max_depth, max_count=2, r_filter=r_filter, ) ) if len(models) == 0: raise NotFoundError( f"There is no descendant for resource {self._resource.id.hex()} matching " f"the provided filter {r_filter}" ) if len(models) > 1: # TODO: Not the right kind of error raise NotFoundError( f"There are multiple descendants for resource {self._resource.id.hex()} " f"matching the provided filter" ) return await self._create_resource(models[0]) async def get_only_sibling_as_view( self, v_type: Type[RV], r_filter: ResourceFilter = None, ) -> RV: """ If a filter is provided, get the only sibling of this resource which matches the given filter. If a filter is not provided, gets the only sibling of this resource. The sibling will be returned as an instance of the given [viewable tag][ofrak.model.viewable_tag_model.ViewableResourceTag]. :param v_type: The type of [view][ofrak.resource] to get the sibling as :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If a filter is provided and more or fewer than one sibling matches ``r_filter`` :raises NotFoundError: If a filter is not provided and this resource has multiple siblings """ sibling_r = await self.get_only_sibling(r_filter) return await sibling_r.view_as(v_type) async def get_only_sibling(self, r_filter: ResourceFilter = None) -> "Resource": """ If a filter is provided, get the only sibling of this resource which matches the given filter. If a filter is not provided, gets the only sibling of this resource. :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If a filter is provided and more or fewer than one sibling matches ``r_filter`` :raises NotFoundError: If a filter is not provided and this resource has multiple siblings """ models = list( await self._resource_service.get_siblings_by_id( self._resource.id, max_count=2, r_filter=r_filter, ) ) if len(models) == 0: raise NotFoundError( f"There is no sibling for resource {self._resource.id.hex()} matching " f"the provided filter" ) if len(models) > 1: raise NotFoundError( f"There are multiple siblings for resource {self._resource.id.hex()} " f"matching the provided filter" ) return await self._create_resource(models[0]) async def get_children( self, r_filter: ResourceFilter = None, r_sort: ResourceSort = None, ) -> Iterable["Resource"]: """ Get all the children of this resource. May optionally sort the children by an indexable attribute value key. May optionally filter the children so only those matching certain parameters are returned. :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :param r_sort: Specifies which indexable attribute to use as the key to sort and the direction to sort :return: :raises NotFoundError: If a filter was provided and no resources match the provided filter """ return await self.get_descendants(1, r_filter, r_sort) async def get_children_as_view( self, v_type: Type[RV], r_filter: ResourceFilter = None, r_sort: ResourceSort = None, ) -> Iterable[RV]: """ Get all the children of this resource. May optionally filter the children so only those matching certain parameters are returned. May optionally sort the children by an indexable attribute value key. The children will be returned as an instance of the given [viewable tag][ofrak.model.viewable_tag_model.ViewableResourceTag]. :param v_type: The type of [view][ofrak.resource] to get the children as :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :param r_sort: Specifies which indexable attribute to use as the key to sort and the direction to sort :return: :raises NotFoundError: If a filter was provided and no resources match the provided filter """ return await self.get_descendants_as_view(v_type, 1, r_filter, r_sort) async def get_only_child(self, r_filter: ResourceFilter = None) -> "Resource": """ If a filter is provided, get the only child of this resource which matches the given filter. If a filter is not provided, gets the only child of this resource. :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If a filter is provided and more or fewer than one child matches ``r_filter`` :raises NotFoundError: If a filter is not provided and this resource has multiple children """ return await self.get_only_descendant(1, r_filter) async def get_only_child_as_view(self, v_type: Type[RV], r_filter: ResourceFilter = None) -> RV: """ If a filter is provided, get the only child of this resource which matches the given filter. If a filter is not provided, gets the only child of this resource. The child will be returned as an instance of the given [viewable tag][ofrak.model.viewable_tag_model.ViewableResourceTag]. :param v_type: The type of [view][ofrak.resource] to get the child as :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If a filter is provided and more or fewer than one child matches ``r_filter`` :raises NotFoundError: If a filter is not provided and this resource has multiple children """ return await self.get_only_descendant_as_view(v_type, 1, r_filter) async def delete(self): """ Delete this resource and all of its descendants. :return: """ self._component_context.resources_deleted.add(self._resource.id) for child_r in await self.get_children(): await child_r.delete() self._resource.is_modified = True self._resource.is_deleted = True async def flush_data_to_disk(self, path: str, pack: bool = True): """ Recursively repack the resource and write its data out to a file on disk. If this is a dataless resource, creates an empty file. :param path: Path to the file to write out to. The file is created if it does not exist. """ if pack is True: await self.pack_recursively() data = await self.get_data() if data is not None: with open(path, "wb") as f: f.write(data) else: # Create empty file with open(path, "wb") as f: pass def __repr__(self): properties = [ f"resource_id={self._resource.id.hex()}", f"tag=[{','.join([tag.__name__ for tag in self._resource.tags])}]", ] if self._resource.data_id: properties.append(f"data={self._resource.data_id.hex()}") return f"{type(self).__name__}(" + ", ".join(properties) + f")" async def summarize(self) -> str: """ Create a string summary of this resource, including specific tags, attribute types, and the data offsets of this resource in the parent and root (if applicable). Not that this is not a complete string representation of the resource: not all tags are included, and only the types of attributes are included, not their values. It is a summary which gives a high level overview of the resource. """ return await _default_summarize_resource(self) async def summarize_tree( self, r_filter: ResourceFilter = None, r_sort: ResourceSort = None, indent: str = "", summarize_resource_callback: Optional[Callable[["Resource"], Awaitable[str]]] = None, ) -> str: """ Create a string summary of this resource and its (optionally filtered and/or sorted) descendants. The summaries of each resource are the same as the result of [summarize][ofrak.resource.Resource.summarize], organized into a tree structure. If a filter parameter is provided, it is applied recursively: the children of this resource will be filtered, then only those children matching the filter be displayed, and then the same filter will be applied to their children, etc. For example, :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :param r_sort: Specifies which indexable attribute to use as the key to sort and the direction to sort """ SPACER_BLANK = " " SPACER_LINE = "───" if summarize_resource_callback is None: summarize_resource_callback = _default_summarize_resource children = cast( List[Resource], list(await self.get_children(r_filter=r_filter, r_sort=r_sort)) ) if children: if indent == "": tree_string = "┌" else: tree_string = "┬" else: tree_string = "─" tree_string += f"{await summarize_resource_callback(self)}\n" # All children but the last should display as a "fork" in the drop-down tree # After the last child, a vertical line should not be drawn as part of the indent # Both of those needs are handled here child_formatting: List[Tuple[str, str]] = [ ("├", indent + "│" + SPACER_BLANK) for _ in children[:-1] ] child_formatting.append(("└", indent + " " + SPACER_BLANK)) for child, (branch_symbol, child_indent) in zip(children, child_formatting): child_tree_string = await child.summarize_tree( r_filter=r_filter, r_sort=r_sort, indent=child_indent, summarize_resource_callback=summarize_resource_callback, ) tree_string += f"{indent}{branch_symbol}{SPACER_LINE}{child_tree_string}" return tree_string class ResourceAttributeRangeFilter(ResourceAttributeFilter): """ A resource's [index][ofrak.model.resource_model.index] value must be within a range. The range must be bounded with either a maximum or minimum. The maximum is exclusive, i.e. if the index value is equal to the max, then the resource is excluded. """ min: Any = None max: Any = None class ResourceFilter: include_self: bool = False tags: Optional[Iterable[ResourceTag]] = None tags_condition: ResourceFilterCondition = ResourceFilterCondition.AND attribute_filters: Optional[Iterable[ResourceAttributeFilter]] = None def with_tags(cls, *tags: ResourceTag): return ResourceFilter(tags=tags) async def _find_and_delete_overlapping_children(resource: Resource, freed_range: Range): # Note this filter calculation has the potential to be very expensive if, for instance, # the resource is an entire program segment... overlap_resources = list( await resource.get_children_as_view( MemoryRegion, r_filter=ResourceFilter( tags=(MemoryRegion,), attribute_filters=( ResourceAttributeRangeFilter(MemoryRegion.VirtualAddress, max=freed_range.end), ResourceAttributeRangeFilter(MemoryRegion.EndVaddr, min=freed_range.start + 1), ), ), ) ) for overlapping_child in overlap_resources: await overlapping_child.resource.delete() await overlapping_child.resource.save()
null
15,325
from collections import defaultdict from dataclasses import dataclass from itertools import chain from typing import List, Tuple, Dict, Optional, Iterable, Mapping from immutabledict import immutabledict from ofrak.core.binary import BinaryPatchModifier, BinaryPatchConfig from ofrak.component.analyzer import Analyzer from ofrak.component.modifier import Modifier, ModifierError from ofrak.core.memory_region import MemoryRegion from ofrak.model.component_model import ComponentConfig from ofrak.model.resource_model import index from ofrak.resource import Resource from ofrak.resource_view import ResourceView from ofrak.service.resource_service_i import ( ResourceFilter, ResourceAttributeRangeFilter, ResourceAttributeValueFilter, ResourceSort, ) from ofrak_patch_maker.model import AssembledObject, PatchRegionConfig, BOM from ofrak_patch_maker.toolchain.model import Segment from ofrak_type.memory_permissions import MemoryPermissions from ofrak_type.range import Range, remove_subranges def _get_patch(freed_range: Range, stub: bytes, fill: bytes) -> bytes: total_fill_length = freed_range.length() - len(stub) remainder = total_fill_length % len(fill) final = b"".join([stub, fill * (total_fill_length // len(fill)), fill[:remainder]]) assert len(final) == freed_range.length() return final
null
15,326
import os import struct from dataclasses import dataclass from enum import Enum from typing import Union, List, Tuple import fdt from ofrak.component.analyzer import Analyzer from ofrak.component.identifier import Identifier from ofrak.component.packer import Packer from ofrak.component.unpacker import Unpacker from ofrak.model.viewable_tag_model import AttributesType from ofrak.resource import Resource from ofrak.service.resource_service_i import ResourceFilter, ResourceSort from ofrak.core import GenericBinary, MagicMimeIdentifier, MagicDescriptionIdentifier from ofrak.model.component_model import ComponentConfig from ofrak.model.resource_model import index from ofrak_type.range import Range class DtbPropertyType(Enum): DtbPropNoValue = 0 DtbInt = 1 DtbStr = 2 DtbBytes = 3 DtbIntList = 4 DtbStrList = 5 class DtbProperty(GenericBinary): """ DTB Property """ name: str p_type: DtbPropertyType def DtbPropertyName(self) -> str: return self.name def caption(cls, attributes) -> str: try: dtb_attributes = attributes[AttributesType[DtbProperty]] except KeyError: return super().caption(attributes) return f"{cls.__name__}: {dtb_attributes.name}" async def get_path(self): parent_node = await self.resource.get_parent_as_view(v_type=DtbNode) return os.path.join(await parent_node.get_path(), self.name) async def get_value(self) -> Union[str, List[str], int, List[int], bytes, bytearray, None]: if self.p_type is DtbPropertyType.DtbPropNoValue: return None elif self.p_type is DtbPropertyType.DtbBytes: return await self.resource.get_data() elif self.p_type is DtbPropertyType.DtbInt: return struct.unpack(">I", await self.resource.get_data())[0] elif self.p_type is DtbPropertyType.DtbIntList: data = await self.resource.get_data() return [ struct.unpack(">I", i)[0] for i in [data[j : j + 4] for j in range(0, len(data), 4)] ] elif self.p_type is DtbPropertyType.DtbStr: data = await self.resource.get_data() return data.decode("ascii") elif self.p_type is DtbPropertyType.DtbStrList: data = await self.resource.get_data() return [s.decode("ascii") for s in data.split(b"\x00")] else: raise TypeError(f"Unsupported type {self.p_type} for property {self.name}") The provided code snippet includes necessary dependencies for implementing the `_prop_to_fdt` function. Write a Python function `async def _prop_to_fdt(p: DtbProperty) -> fdt.items.Property` to solve the following problem: Generates an fdt.items.property corresponding to a DtbProperty. :param p: :return: Here is the function: async def _prop_to_fdt(p: DtbProperty) -> fdt.items.Property: """ Generates an fdt.items.property corresponding to a DtbProperty. :param p: :return: """ value = await p.get_value() if p.p_type is DtbPropertyType.DtbPropNoValue: return fdt.items.Property(name=p.name) elif p.p_type is DtbPropertyType.DtbBytes: return fdt.items.PropBytes(name=p.name, data=await p.resource.get_data()) elif p.p_type is DtbPropertyType.DtbInt: return fdt.items.PropWords(p.name, value) elif p.p_type is DtbPropertyType.DtbIntList: return fdt.items.PropWords(p.name, *value) elif p.p_type is DtbPropertyType.DtbStr: return fdt.items.PropStrings(p.name, value) elif p.p_type is DtbPropertyType.DtbStrList: return fdt.items.PropStrings(p.name, *value) else: raise TypeError(f"Unsupported type {p.p_type} for property {p.name}")
Generates an fdt.items.property corresponding to a DtbProperty. :param p: :return:
15,327
import os import struct from dataclasses import dataclass from enum import Enum from typing import Union, List, Tuple import fdt from ofrak.component.analyzer import Analyzer from ofrak.component.identifier import Identifier from ofrak.component.packer import Packer from ofrak.component.unpacker import Unpacker from ofrak.model.viewable_tag_model import AttributesType from ofrak.resource import Resource from ofrak.service.resource_service_i import ResourceFilter, ResourceSort from ofrak.core import GenericBinary, MagicMimeIdentifier, MagicDescriptionIdentifier from ofrak.model.component_model import ComponentConfig from ofrak.model.resource_model import index from ofrak_type.range import Range class DtbPropertyType(Enum): DtbPropNoValue = 0 DtbInt = 1 DtbStr = 2 DtbBytes = 3 DtbIntList = 4 DtbStrList = 5 The provided code snippet includes necessary dependencies for implementing the `_prop_from_fdt` function. Write a Python function `def _prop_from_fdt(p: fdt.items.Property) -> Tuple[DtbPropertyType, bytes]` to solve the following problem: Converts an fdt.items.property to its p_type and p_data values. :param p: :return: Here is the function: def _prop_from_fdt(p: fdt.items.Property) -> Tuple[DtbPropertyType, bytes]: """ Converts an fdt.items.property to its p_type and p_data values. :param p: :return: """ if type(p) is fdt.items.Property or len(p.data) == 0: _p_type = DtbPropertyType.DtbPropNoValue _p_data = b"" elif type(p) is fdt.items.PropBytes: _p_type = DtbPropertyType.DtbBytes _p_data = bytes(p.data) elif isinstance(p.value, int): if len(p.data) == 1: _p_type = DtbPropertyType.DtbInt else: _p_type = DtbPropertyType.DtbIntList _p_data = b"".join([struct.pack(">I", i) for i in p.data]) elif isinstance(p.value, str): if len(p.data) == 1: _p_type = DtbPropertyType.DtbStr _p_data = b"".join([s.encode("ascii") for s in p.data]) else: _p_type = DtbPropertyType.DtbStrList _p_data = b"\0".join([s.encode("ascii") for s in p.data]) else: raise TypeError(f"Unknown type for DTB Property: {p}") return _p_type, _p_data
Converts an fdt.items.property to its p_type and p_data values. :param p: :return:
15,328
import asyncio import tempfile from concurrent.futures.process import ProcessPoolExecutor from dataclasses import dataclass from typing import Dict from ofrak.resource import ResourceFactory, Resource from ofrak.model.resource_model import ResourceAttributes from ofrak.component.abstract import ComponentMissingDependencyError from ofrak.component.analyzer import Analyzer try: import binwalk BINWALK_INSTALLED = True except ImportError: BINWALK_INSTALLED = False from ofrak.core.binary import GenericBinary from ofrak.core.filesystem import File from ofrak.model.component_model import ComponentExternalTool from ofrak.service.data_service_i import DataServiceInterface from ofrak.service.resource_service_i import ResourceServiceInterface try: import binwalk BINWALK_INSTALLED = True except ImportError: BINWALK_INSTALLED = False def _run_binwalk_on_file(filename): # pragma: no cover offsets = dict() for module in binwalk.scan(filename, signature=True): for result in module.results: offsets[result.offset] = result.description return offsets
null
15,329
import logging import re import sys from dataclasses import dataclass from typing import List, Union, Tuple, Any from ofrak.component.abstract import ComponentMissingDependencyError from ofrak.component.analyzer import Analyzer from ofrak.component.identifier import Identifier from ofrak.component.packer import Packer from ofrak.component.unpacker import Unpacker from ofrak.core.binary import GenericBinary, GenericText from ofrak.core.program_section import ProgramSection from ofrak.core.program import Program from ofrak.model.component_model import ComponentExternalTool from ofrak.resource import Resource from ofrak.service.resource_service_i import ResourceFilter from ofrak_type.range import Range class IhexProgram(Program): address_limits: Range start_addr: Union[None, int] segments: List[Range] _BINCOPY_TOOL = ComponentExternalTool( "bincopy", "https://github.com/eerimoq/bincopy", "--help", ) class ComponentMissingDependencyError(RuntimeError): def __init__( self, component: ComponentInterface, dependency: ComponentExternalTool, ): if dependency.apt_package: apt_install_str = f"\n\tapt installation: apt install {dependency.apt_package}" else: apt_install_str = "" if dependency.brew_package: brew_install_str = f"\n\tbrew installation: brew install {dependency.brew_package}" else: brew_install_str = "" super().__init__( f"Missing {dependency.tool} tool needed for {type(component).__name__}!" f"{apt_install_str}" f"{brew_install_str}" f"\n\tSee {dependency.tool_homepage} for more info and installation help." f"\n\tAlternatively, OFRAK can ignore this component (and any others with missing " f"dependencies) so that they will never be run: OFRAK(..., exclude_components_missing_dependencies=True)" ) self.component = component self.dependency = dependency def _binfile_analysis(raw_ihex: bytes, component) -> Tuple[IhexProgram, Any]: if not BINCOPY_INSTALLED: raise ComponentMissingDependencyError(component, _BINCOPY_TOOL) binfile = BinFile() binfile.add_ihex(raw_ihex.decode("utf-8")) ihex_program = IhexProgram( Range(binfile.minimum_address, binfile.maximum_address), binfile.execution_start_address, [Range(segment.minimum_address, segment.maximum_address) for segment in binfile.segments], ) return ihex_program, binfile
null
15,330
import io import logging from typing import Optional, TypeVar from ofrak.component.analyzer import Analyzer from ofrak.core import NamedProgramSection from ofrak.core.architecture import ProgramAttributes from ofrak.core.elf.model import ( ElfSectionHeader, Elf, ElfHeader, ElfBasicHeader, ElfProgramHeader, ElfSegmentStructure, ElfSegment, ElfSectionStructure, ElfSection, ElfSymbol, ElfSymbolStructure, ElfRelaEntry, ElfDynamicEntry, ElfVirtualAddress, SECTION_NAME_PATTERN, ) from ofrak.core.memory_region import MemoryRegion from ofrak.model.component_model import ComponentConfig from ofrak.model.resource_model import ResourceAttributes from ofrak.model.viewable_tag_model import AttributesType from ofrak.resource import Resource from ofrak.service.resource_service_i import ResourceFilter from ofrak_io.deserializer import BinaryDeserializer class ElfBasicHeader(ResourceView): """ See "e_ident" in <https://man7.org/linux/man-pages/man5/elf.5.html> for details. """ ei_magic: bytes ei_class: int ei_data: int ei_version: int ei_osabi: int ei_abiversion: int ei_pad: bytes def get_endianness(self) -> Endianness: if self.ei_data == 1: return Endianness.LITTLE_ENDIAN elif self.ei_data == 2: return Endianness.BIG_ENDIAN else: raise ValueError("Invalid endianness value in the ELF header ") def get_bitwidth(self) -> BitWidth: if self.ei_class == 1: return BitWidth.BIT_32 elif self.ei_class == 2: return BitWidth.BIT_64 else: raise ValueError("Invalid bit width value in the ELF header ") class Elf(Program): """ An Executable and Linking Format (ELF) file. See <https://man7.org/linux/man-pages/man5/elf.5.html> for details. """ async def get_header(self) -> ElfHeader: return await self.resource.get_only_child_as_view( ElfHeader, ResourceFilter.with_tags(ElfHeader) ) async def get_basic_header(self) -> ElfBasicHeader: return await self.resource.get_only_child_as_view( ElfBasicHeader, ResourceFilter.with_tags(ElfBasicHeader) ) async def get_segments(self) -> Iterable[ElfSegment]: return await self.resource.get_children_as_view( ElfSegment, ResourceFilter(tags=(ElfSegment,)), ResourceSort(ElfSegmentStructure.SegmentIndex, ResourceSortDirection.ASCENDANT), ) async def get_sections(self) -> Iterable[ElfSection]: return await self.resource.get_children_as_view( ElfSection, ResourceFilter(tags=(ElfSection,)), ResourceSort(ElfSectionStructure.SectionIndex, ResourceSortDirection.ASCENDANT), ) async def get_section_by_index(self, index: int) -> ElfSection: return await self.resource.get_only_child_as_view( ElfSection, ResourceFilter( tags=(ElfSection,), attribute_filters=( ResourceAttributeValueFilter(ElfSectionStructure.SectionIndex, index), ), ), ) async def get_section_by_name(self, name: str) -> ElfSection: _ = await self.get_sections() # Forces analyzing name of all sections return await self.resource.get_only_child_as_view( ElfSection, ResourceFilter( tags=(ElfSection,), attribute_filters=(ResourceAttributeValueFilter(ElfSection.SectionName, name),), ), ) async def get_section_name_string_section(self) -> ElfSectionNameStringSection: return await self.resource.get_only_child_as_view( ElfSectionNameStringSection, ResourceFilter( tags=(ElfSectionNameStringSection,), ), ) async def get_string_section(self) -> ElfStringSection: for string_section in await self.resource.get_children_as_view( ElfStringSection, ResourceFilter( tags=(ElfStringSection,), ), ): if string_section.resource.has_tag(ElfSectionNameStringSection): continue section = await string_section.get_section() if section.name != ".strtab": continue return string_section raise ValueError("Could not find string section!") async def get_symbol_section(self) -> ElfSymbolSection: return await self.resource.get_only_child_as_view( ElfSymbolSection, ResourceFilter( tags=(ElfSymbolSection,), ), ) async def get_section_headers(self) -> Iterable[ElfSectionHeader]: return await self.resource.get_children_as_view( ElfSectionHeader, ResourceFilter(tags=(ElfSectionHeader,)), ResourceSort(ElfSectionStructure.SectionIndex, ResourceSortDirection.ASCENDANT), ) async def get_section_header_by_index(self, index: int) -> ElfSectionHeader: return await self.resource.get_only_child_as_view( ElfSectionHeader, ResourceFilter( tags=(ElfSectionHeader,), attribute_filters=( ResourceAttributeValueFilter(ElfSectionStructure.SectionIndex, index), ), ), ) async def get_program_headers(self) -> Iterable[ElfProgramHeader]: return await self.resource.get_children_as_view( ElfProgramHeader, ResourceFilter(tags=(ElfProgramHeader,)), ResourceSort(ElfProgramHeader.SegmentIndex, ResourceSortDirection.ASCENDANT), ) async def get_program_header_by_index(self, index: int) -> ElfProgramHeader: return await self.resource.get_only_child_as_view( ElfProgramHeader, ResourceFilter( tags=(ElfProgramHeader,), attribute_filters=( ResourceAttributeValueFilter(ElfProgramHeader.SegmentIndex, index), ), ), ) class Resource: """ Defines methods for interacting with the data and attributes of Resources, the main building block of OFRAK. """ __slots__ = ( "_job_id", "_job_context", "_component_context", "_resource_context", "_resource_view_context", "_resource", "_resource_factory", "_id_service", "_resource_service", "_data_service", "_job_service", "_dependency_handler", ) def __init__( self, job_id: bytes, resource: MutableResourceModel, resource_context: ResourceContext, resource_view_context: ResourceViewContext, job_context: Optional[JobRunContext], component_context: ComponentContext, resource_factory: "ResourceFactory", id_service: IDServiceInterface, data_service: DataServiceInterface, resource_service: ResourceServiceInterface, job_service: JobServiceInterface, ): self._job_id: bytes = job_id self._job_context: Optional[JobRunContext] = job_context self._component_context: ComponentContext = component_context self._resource_context: ResourceContext = resource_context self._resource_view_context: ResourceViewContext = resource_view_context self._resource: MutableResourceModel = resource self._resource_factory: "ResourceFactory" = resource_factory self._id_service: IDServiceInterface = id_service self._resource_service: ResourceServiceInterface = resource_service self._data_service: DataServiceInterface = data_service self._job_service: JobServiceInterface = job_service def get_id(self) -> bytes: """ :return: This resource's ID """ return self._resource.id def get_job_id(self) -> bytes: """ Each resource belongs to a specific "job." See [JobServiceInterface][ofrak.service.job_service_i.JobServiceInterface]. :return: The ID of the job this resource belongs to """ return self._job_id def get_data_id(self) -> Optional[bytes]: """ Each resource may have a data ID. This refers to a [DataModel][ofrak.model.data_model.DataModel] representing some chunk of raw binary data. :return: The data ID associated with this resource, if it exists """ return self._resource.data_id def get_resource_context(self) -> ResourceContext: return self._resource_context def get_resource_view_context(self) -> ResourceViewContext: return self._resource_view_context def get_component_context(self) -> ComponentContext: return self._component_context def get_job_context(self) -> Optional[JobRunContext]: return self._job_context def get_caption(self) -> str: return self._resource.caption def is_modified(self) -> bool: """ Check if the resource has been modified in this context and is considered "dirty". :return: `True` if the resource is modified, `False` otherwise """ return self._resource.is_modified def get_model(self) -> MutableResourceModel: """ Get the underlying [model][ofrak.model.resource_model.ResourceModel] of this resource. :return: """ return self._resource async def get_data(self, range: Optional[Range] = None) -> bytes: """ A resource often represents a chunk of underlying binary data. This method returns the entire chunk by default; this can be reduced by an optional parameter. :param range: A range within the resource's data, relative to the resource's data itself (e.g. Range(0, 10) returns the first 10 bytes of the chunk) :return: The full range or a partial range of this resource's bytes """ if self._resource.data_id is None: raise ValueError( "Resource does not have a data_id. Cannot get data from a resource with no data" ) data = await self._data_service.get_data(self._resource.data_id, range) if range is None: range = Range(0, len(data)) self._component_context.access_trackers[self._resource.id].data_accessed.add(range) return data async def get_data_length(self) -> int: """ :return: The length of the underlying binary data this resource represents """ if self._resource.data_id is None: raise ValueError( "Resource does not have a data_id. Cannot get data length from a " "resource with no data." ) return await self._data_service.get_data_length(self._resource.data_id) async def get_data_range_within_parent(self) -> Range: """ If this resource is "mapped," i.e. its underlying data is defined as a range of its parent's underlying data, this method returns the range within the parent resource's data where this resource lies. If this resource is not mapped (it is root), it returns a range starting at 0 with length 0. :return: The range of the parent's data which this resource represents """ if self._resource.data_id is None: raise ValueError( "Resource does not have a data_id. Cannot get data range from a " "resource with no data." ) if self._resource.parent_id is None: return Range(0, 0) parent_models = list( await self._resource_service.get_ancestors_by_id(self._resource.id, max_count=1) ) if len(parent_models) != 1: raise NotFoundError(f"There is no parent for resource {self._resource.id.hex()}") parent_model = parent_models[0] parent_data_id = parent_model.data_id if parent_data_id is None: return Range(0, 0) try: return await self._data_service.get_range_within_other( self._resource.data_id, parent_data_id ) except ValueError: return Range(0, 0) async def get_data_range_within_root(self) -> Range: """ Does the same thing as `get_data_range_within_parent`, except the range is relative to the root. :return: The range of the root node's data which this resource represents """ if self._resource.data_id is None: raise ValueError( "Resource does not have a data_id. Cannot get data range from a " "resource with no data." ) return await self._data_service.get_data_range_within_root(self._resource.data_id) async def search_data( self, query: Pattern[bytes], start: Optional[int] = None, end: Optional[int] = None, max_matches: Optional[int] = None, ) -> Tuple[Tuple[int, bytes], ...]: ... async def search_data( self, query: bytes, start: Optional[int] = None, end: Optional[int] = None, max_matches: Optional[int] = None, ) -> Tuple[int, ...]: ... async def search_data(self, query, start=None, end=None, max_matches=None): """ Search for some data in this resource. The query may be a regex pattern (a return value of `re.compile`). If the query is a regex pattern, returns a tuple of pairs with both the offset of the match and the contents of the match itself. If the query is plain bytes, a list of only the match offsets are returned. :param query: Plain bytes to exactly match or a regex pattern to search for :param start: Start offset in the data model to begin searching :param end: End offset in the data model to stop searching :return: A tuple of offsets matching a plain bytes query, or a list of (offset, match) pairs for a regex pattern query """ return await self._data_service.search(self.get_data_id(), query, start, end, max_matches) async def save(self): """ If this resource has been modified, update the model stored in the resource service with the local changes. :raises NotFoundError: If the resource service does not have a model for this resource's ID """ await save_resources( (self,), self._resource_service, self._data_service, self._component_context, self._resource_context, self._resource_view_context, ) def _save(self) -> Tuple[List[bytes], List[DataPatch], List[MutableResourceModel]]: resources_to_delete: List[bytes] = [] patches_to_apply: List[DataPatch] = [] resources_to_update: List[MutableResourceModel] = [] if self._resource.is_deleted: resources_to_delete.append(self._resource.id) elif self._resource.is_modified: modification_tracker = self._component_context.modification_trackers.get( self._resource.id ) assert modification_tracker is not None, ( f"Resource {self._resource.id.hex()} was " f"marked as modified but is missing a tracker!" ) patches_to_apply.extend(modification_tracker.data_patches) resources_to_update.append(self._resource) modification_tracker.data_patches.clear() return resources_to_delete, patches_to_apply, resources_to_update async def _fetch(self, resource: MutableResourceModel): """ Update the local model with the latest version from the resource service. This will fail if this resource has been modified. :raises InvalidStateError: If the local resource model has been modified :raises NotFoundError: If the resource service does not have a model for this resource's ID """ if resource.is_modified and not resource.is_deleted: raise InvalidStateError( f"Cannot fetch dirty resource {resource.id.hex()} (resource " f"{self.get_id().hex()} attempted fetch)" ) try: fetched_resource = await self._resource_service.get_by_id(resource.id) except NotFoundError: if ( resource.id in self._component_context.modification_trackers and resource.id in self._resource_context.resource_models ): del self._resource_context.resource_models[resource.id] return resource.reset(fetched_resource) async def _fetch_resources(self, resource_ids: Iterable[bytes]): tasks = [] for resource_id in resource_ids: context_resource = self._resource_context.resource_models.get(resource_id) if context_resource is not None: tasks.append(self._fetch(context_resource)) await asyncio.gather(*tasks) async def _update_views(self, modified: Set[bytes], deleted: Set[bytes]): for resource_id in modified: views_in_context = self._resource_view_context.views_by_resource[resource_id] for view in views_in_context.values(): if resource_id not in self._resource_context.resource_models: await self._fetch(view.resource.get_model()) # type: ignore if resource_id not in self._resource_context.resource_models: view.set_deleted() continue updated_model = self._resource_context.resource_models[resource_id] fresh_view = view.create(updated_model) for field in dataclasses.fields(fresh_view): if field.name == "_resource": continue setattr(view, field.name, getattr(fresh_view, field.name)) for resource_id in deleted: views_in_context = self._resource_view_context.views_by_resource[resource_id] for view in views_in_context.values(): view.set_deleted() async def run( self, component_type: Type[ComponentInterface[CC]], config: CC = None, ) -> ComponentRunResult: """ Run a single component. Runs even if the component has already been run on this resource. :param component_type: The component type (may be an interface) to get and run :param config: Optional config to pass to the component :return: A ComponentRunResult containing information on resources affected by the component """ job_context = self._job_context component_result = await self._job_service.run_component( JobComponentRequest( self._job_id, self._resource.id, component_type.get_id(), config, ), job_context, ) for deleted_id in component_result.resources_deleted: if deleted_id in self._component_context.modification_trackers: del self._component_context.modification_trackers[deleted_id] await self._fetch_resources(component_result.resources_modified) await self._update_views( component_result.resources_modified, component_result.resources_deleted ) return component_result async def auto_run( self, components: Iterable[Type[ComponentInterface]] = tuple(), blacklisted_components: Iterable[Type[ComponentInterface]] = tuple(), all_unpackers: bool = False, all_identifiers: bool = False, all_analyzers: bool = False, all_packers: bool = False, ) -> ComponentRunResult: """ Automatically run multiple components which may run on this resource. From an initial set of possible components to run, this set is searched for components for which the intersection of the component's targets and this resource's tags is not empty. Accepts several optional flags to expand or restrict the initial set of components. :param components: Components to explicitly add to the initial set of components :param blacklisted_components: Components to explicitly remove to the initial set of components :param all_unpackers: If true, all Unpackers are added to the initial set of components :param all_identifiers: If true, all Identifiers are added to the initial set of components :param all_analyzers: If true, all Analyzers are added to the initial set of components :return: A ComponentRunResult containing information on resources affected by the component """ components_result = await self._job_service.run_components( JobMultiComponentRequest( self._job_id, self._resource.id, components_allowed=tuple(c.get_id() for c in components), components_disallowed=tuple(c.get_id() for c in blacklisted_components), all_unpackers=all_unpackers, all_identifiers=all_identifiers, all_analyzers=all_analyzers, all_packers=all_packers, ) ) for deleted_id in components_result.resources_deleted: if deleted_id in self._component_context.modification_trackers: del self._component_context.modification_trackers[deleted_id] await self._fetch_resources(components_result.resources_modified) await self._update_views( components_result.resources_modified, components_result.resources_deleted ) return components_result async def unpack(self) -> ComponentRunResult: """ Unpack the resource. :return: A ComponentRunResult containing information on resources affected by the component """ return await self.auto_run(all_identifiers=True, all_unpackers=True) async def analyze(self, resource_attributes: Type[RA]) -> RA: """ Analyze the resource for a specific resource attribute. :param Type[RA] resource_attributes: :return: """ attributes = self._check_attributes(resource_attributes) if attributes is None: await self._analyze_attributes((resource_attributes,)) return self.get_attributes(resource_attributes) else: return attributes async def identify(self) -> ComponentRunResult: """ Run all registered identifiers on the resource, tagging it with matching resource tags. """ return await self.auto_run(all_identifiers=True) async def pack(self) -> ComponentRunResult: """ Pack the resource. :return: A ComponentRunResult containing information on resources affected by the component """ return await self.auto_run(all_packers=True) async def auto_run_recursively( self, components: Iterable[Type[ComponentInterface]] = tuple(), blacklisted_components: Iterable[Type[ComponentInterface]] = tuple(), blacklisted_tags: Iterable[ResourceTag] = tuple(), all_unpackers: bool = False, all_identifiers: bool = False, all_analyzers: bool = False, ) -> ComponentRunResult: """ Automatically run multiple components which may run on this resource or its descendents. From an initial set of possible components to run, this set is searched for components for which the intersection of the component's targets and this resource's tags is not empty. Accepts several optional flags to expand or restrict the initial set of components. After each run, compatible components from the initial set are run on any resources which have had tags added (including newly created resources). This is repeated until no new tags are added. :param components: Components to explicitly add to the initial set of components :param blacklisted_components: Components to explicitly remove to the initial set of components :param all_unpackers: If true, all Unpackers are added to the initial set of components :param all_identifiers: If true, all Identifiers are added to the initial set of components :param all_analyzers: If true, all Analyzers are added to the initial set of components :return: A ComponentRunResult containing information on resources affected by the component """ components_result = await self._job_service.run_components_recursively( JobMultiComponentRequest( self._job_id, self._resource.id, components_allowed=tuple(c.get_id() for c in components), components_disallowed=tuple(c.get_id() for c in blacklisted_components), all_unpackers=all_unpackers, all_identifiers=all_identifiers, all_analyzers=all_analyzers, tags_ignored=tuple(blacklisted_tags), ) ) await self._fetch_resources(components_result.resources_modified) await self._update_views( components_result.resources_modified, components_result.resources_deleted ) return components_result async def unpack_recursively( self, blacklisted_components: Iterable[Type[ComponentInterface]] = tuple(), do_not_unpack: Iterable[ResourceTag] = tuple(), ) -> ComponentRunResult: """ Automatically unpack this resource and recursively unpack all of its descendants. First this resource is unpacked; then, any resource which "valid" tags were added to will also be unpacked. New resources created with tags count as resources with new tags. A "valid" tag is a tag which is not explicitly ignored via the ``do_not_unpack`` argument. The unpacking will only stop when no new "valid" tags have been added in the previous iteration. This can lead to a very long unpacking process if it is totally unconstrained. :param blacklisted_components: Components which are blocked from running during the recursive unpacking, on this resource or any descendants. :param do_not_unpack: Do not unpack resources with this tag, and ignore these tags when checking if any new tags have been added in this iteration. :return: A ComponentRunResult containing information on resources affected by the component """ return await self.auto_run_recursively( all_identifiers=True, all_unpackers=True, blacklisted_components=blacklisted_components, blacklisted_tags=do_not_unpack, ) async def analyze_recursively(self) -> ComponentRunResult: return await self.auto_run_recursively(all_analyzers=True) async def pack_recursively(self) -> ComponentRunResult: """ Recursively pack the resource, starting with its descendants. """ return await self._job_service.pack_recursively(self._job_id, self._resource.id) async def write_to(self, destination: BinaryIO, pack: bool = True): """ Recursively repack resource and write data out to an arbitrary ``BinaryIO`` destination. :param destination: Destination for packed resource data :return: """ if pack is True: await self.pack_recursively() destination.write(await self.get_data()) async def _analyze_attributes(self, attribute_types: Tuple[Type[ResourceAttributes], ...]): job_context = self._job_context components_result = await self._job_service.run_analyzer_by_attribute( JobAnalyzerRequest( self._job_id, self._resource.id, attribute_types, tuple(self._resource.tags), ), job_context, ) # Update all the resources in the local context that were modified as part of the # analysis await self._fetch_resources(components_result.resources_modified) await self._update_views( components_result.resources_modified, components_result.resources_deleted ) return components_result async def _create_resource(self, resource_model: ResourceModel) -> "Resource": return await self._resource_factory.create( self._job_id, resource_model.id, self._resource_context, self._resource_view_context, self._component_context, self._job_context, ) async def _create_resources( self, resource_models: Iterable[ResourceModel] ) -> Iterable["Resource"]: return await self._resource_factory.create_many( self._job_id, [resource_model.id for resource_model in resource_models], self._resource_context, self._resource_view_context, self._component_context, self._job_context, ) async def create_child( self, tags: Iterable[ResourceTag] = None, attributes: Iterable[ResourceAttributes] = None, data: Optional[bytes] = None, data_range: Optional[Range] = None, ) -> "Resource": """ Create a new resource as a child of this resource. This method entirely defines the child's tags and attributes. This method also defines the child's data semantics: A child resource can either be defined in one of three ways: 1) The resource contains no data ("Dataless" resource). Not used in practice. 2) As mapping a range of its parent's data ("Mapped" resource). For example, an instruction maps a portion of its parent basic block. 3) Defining its own new, independent data ("Unmapped" resource). For example, a file extracted from a zip archive is a child of the zip archive resource, but its data does not map to some specific range of that parent archive. By default a resource will be defined the third way (unmapped). To specify that the resource is a mapped resource, include the optional ``data_range`` parameter set to the range of the parent's data which the child maps. That is, `data_range=Range(0, 10)` creates a resource which maps the first 10 bytes of the parent. The optional ``data`` param defines whether to populate the new child's data. It can be used only if the data is unmapped. If the child is unmapped, the value of ``data`` still becomes that child's data, but the parent's data is unaffected. If ``data`` and ``data_range`` are both `None` (default), the new child is a dataless resource. The following table sums up the possible interactions between ``data`` and ``data_range``: | | ``data_range`` param not `None` | ``data_range`` param `None` | |--------------------------|--------------------------------------------------------|----------------------------------------------| | ``data`` param not `None` | Not allowed | Child unmapped, child's data set to ``data`` | | ``data`` param `None` | Child mapped, parent's data untouched | Child is dataless | :param tags: [tags][ofrak.model.tag_model.ResourceTag] to add to the new child :param attributes: [attributes][ofrak.model.resource_model.ResourceAttributes] to add to the new child :param data: The binary data for the new child. If `None` and ``data_range`` is `None`, the resource has no data. Defaults to `None`. :param data_range: The range of the parent's data which the new child maps. If `None` ( default), the child will not map the parent's data. :return: """ if data is not None and data_range is not None: raise ValueError( "Cannot create a child from both data and data_range. These parameters are " "mutually exclusive." ) resource_id = self._id_service.generate_id() if data_range is not None: if self._resource.data_id is None: raise ValueError( "Cannot create a child with mapped data from a parent that doesn't have data" ) data_model_id = resource_id await self._data_service.create_mapped( data_model_id, self._resource.data_id, data_range, ) data_attrs = Data(data_range.start, data_range.length()) attributes = [data_attrs, *attributes] if attributes else [data_attrs] elif data is not None: if self._resource.data_id is None: raise ValueError( "Cannot create a child with data from a parent that doesn't have data" ) data_model_id = resource_id await self._data_service.create_root(data_model_id, data) data_attrs = Data(0, len(data)) attributes = [data_attrs, *attributes] if attributes else [data_attrs] else: data_model_id = None resource_model = ResourceModel.create( resource_id, data_model_id, self._resource.id, tags, attributes, self._component_context.component_id, self._component_context.component_version, ) await self._resource_service.create(resource_model) if self._job_context: resource_tracker = self._job_context.trackers[resource_model.id] resource_tracker.tags_added.update(resource_model.tags) self._component_context.mark_resource_modified(resource_id) self._component_context.resources_created.add(resource_model.id) created_resource = await self._create_resource(resource_model) return created_resource async def create_child_from_view( self, view: RV, data_range: Optional[Range] = None, data: Optional[bytes] = None, additional_tags: Iterable[ResourceTag] = (), additional_attributes: Iterable[ResourceAttributes] = (), ) -> "Resource": """ Create a new resource as a child of this resource. The new resource will have tags and attributes as defined by the [view][ofrak.model.viewable_tag_model.ViewableResourceTag]; in this way a view can act as a template to create a new resource. The ``additional_tags`` and ``additional_attributes`` can also be used to add more tags and attributes beyond what the view contains. This method's ``data`` and ``data_range`` parameters have the same semantics as in `create_child`, in short: | | ``data_range`` param not `None` | ``data_range`` param `None` | |--------------------------|--------------------------------------------------------|----------------------------------------------| | ``data`` param not `None` | Child mapped, ``data`` patched into child (and parent) | Child unmapped, child's data set to ``data`` | | ``data`` param `None` | Child mapped, parent's data untouched | Child is dataless | See `create_child` documentation for details. :param view: A [resource view][ofrak.resource_view] to pull [tags][ofrak.model.tag_model.ResourceTag] and [attributes][ofrak.model.resource_model.ResourceAttributes] from to populate the new child :param data_range: The range of the parent's data which the new child maps. If `None` ( default), the child will not map the parent's data. :param data: The binary data for the new child. If `None` and ``data_range`` is `None`, the resource has no data. Defaults to `None`. :param additional_tags: Any [tags][ofrak.model.tag_model.ResourceTag] for the child in addition to those from the ``view`` :param additional_attributes: Any [attributes][ofrak.model.resource_model.ResourceAttributes] for the child in addition to those from the ``view`` :return: """ viewable_tag: ViewableResourceTag = type(view) new_resource = await self.create_child( tags=(viewable_tag, *additional_tags), attributes=(*view.get_attributes_instances().values(), *additional_attributes), data_range=data_range, data=data, ) return new_resource def _view_as(self, viewable_tag: Type[RV]) -> Union[RV, Awaitable[RV]]: """ Try to get a view without calling any analysis, to avoid as many unnecessary `asyncio.gather` calls as possible. Checks cached views first for view, and if not found, then checks if the attributes needed to create the view are already present and up-to-date, and only if both of those are not found does it return an awaitable. """ if self._resource_view_context.has_view(self.get_id(), viewable_tag): # First early return: View already exists in cache return self._resource_view_context.get_view(self.get_id(), viewable_tag) if not issubclass(viewable_tag, ResourceViewInterface): raise ValueError( f"Cannot get view for resource {self.get_id().hex()} of a type " f"{viewable_tag.__name__} because it is not a subclass of ResourceView" ) if not self.has_tag(viewable_tag): raise ValueError( f"Cannot get resource {self.get_id().hex()} as view " f"{viewable_tag.__name__} because the resource is not tagged as a " f"{viewable_tag.__name__}" ) composed_attrs_types = viewable_tag.composed_attributes_types existing_attributes = [self._check_attributes(attrs_t) for attrs_t in composed_attrs_types] if all(existing_attributes): # Second early return: All attributes needed for view are present and up-to-date view = viewable_tag.create(self.get_model()) view.resource = self # type: ignore self._resource_view_context.add_view(self.get_id(), view) return cast(RV, view) # Only if analysis is absolutely necessary is an awaitable created and returned async def finish_view_creation( attrs_to_analyze: Tuple[Type[ResourceAttributes], ...] ) -> RV: await self._analyze_attributes(attrs_to_analyze) view = viewable_tag.create(self.get_model()) view.resource = self # type: ignore self._resource_view_context.add_view(self.get_id(), view) return cast(RV, view) return finish_view_creation( tuple( attrs_t for attrs_t, existing in zip(composed_attrs_types, existing_attributes) if not existing ) ) async def view_as(self, viewable_tag: Type[RV]) -> RV: """ Provides a specific type of view instance for this resource. The returned instance is an object which has some of the information from this same resource, however in a simpler interface. This resource instance will itself remain available through the view's ``.resource`` property. :param viewable_tag: A ViewableResourceTag, which this resource's model must already contain :raises ValueError: If the model does not contain this tag, or this tag is not a ViewableResourceTag :return: """ view_or_create_view_task: Union[RV, Awaitable[RV]] = self._view_as(viewable_tag) if isawaitable(view_or_create_view_task): return await view_or_create_view_task else: return cast(RV, view_or_create_view_task) def add_view(self, view: ResourceViewInterface): """ Add all the attributes composed in a view to this resource, and tag this resource with the view type. Calling this is the equivalent of making N ``add_attributes`` calls and one ``add_tag`` call (where N is the number of attributes the view is composed of). :param view: An instance of a view """ for attributes in view.get_attributes_instances().values(): # type: ignore self.add_attributes(attributes) self.add_tag(type(view)) def _set_modified(self): self._component_context.mark_resource_modified(self._resource.id) def _add_tag(self, tag: ResourceTag): """ Associate a tag with the resource. If the resource already have the provided tag, it has no effects. All parent classes of the provided tag that are tags themselves are also added. """ if self._resource.has_tag(tag, False): return self._component_context.mark_resource_modified(self._resource.id) new_tags = self._resource.add_tag(tag) if self._job_context: resource_tracker = self._job_context.trackers[self._resource.id] resource_tracker.tags_added.update(new_tags) def add_tag(self, *tags: ResourceTag): """ Associate multiple tags with the resource. If the resource already have one of the provided tag, the tag is not added. All parent classes of the provided tag that are tags themselves are also added. """ for tag in tags: self._add_tag(tag) def get_tags(self, inherit: bool = True) -> Iterable[ResourceTag]: """ Get a set of tags associated with the resource. """ return self._resource.get_tags(inherit) def has_tag(self, tag: ResourceTag, inherit: bool = True) -> bool: """ Determine if the resource is associated with the provided tag. """ return self._resource.has_tag(tag, inherit) def remove_tag(self, tag: ResourceTag): if not self._resource.has_tag(tag): return self._set_modified() self._resource.remove_tag(tag) def get_most_specific_tags(self) -> Iterable[ResourceTag]: """ Get all tags associated with the resource from which no other tags on that resource inherit. In other words, get the resource's tags that aren't subclassed by other tags on the resource. For example, for a resource tagged as `Elf`, the result would be just `[Elf]` instead of `[Elf, Program, GenericBinary]` that `Resource.get_tags` returns. This is because `Elf` inherits from `Program`, which inherits from `GenericBinary`. Even though the resource has all of those tags, the most derived class with no other derivatives is the "most specific." """ return self._resource.get_most_specific_tags() def _check_attributes(self, attributes_type: Type[RA]) -> Optional[RA]: """ Try to get the current attributes. TODO: Should we be using the version as well? The client wouldn't know the version of the component in a client-server environment. We could do that efficiently by adding a service method that list all available components (and their version) :param attributes_type: The type of attributes to check this resource for. :return: The requested attributes if they are present and up-to-date, otherwise return None. """ attributes = self._resource.get_attributes(attributes_type) if attributes is not None: # Make sure that the attributes have not been invalidated component_id = self._resource.get_component_id_by_attributes(type(attributes)) if component_id is not None: return attributes return None def _add_attributes(self, attributes: ResourceAttributes): existing_attributes = self._resource.get_attributes(type(attributes)) if existing_attributes is not None and existing_attributes == attributes: return self._set_modified() self._resource.add_attributes(attributes) component_context = self._component_context self._resource.add_component_for_attributes( component_context.component_id, component_context.component_version, type(attributes) ) def add_attributes(self, *attributes: ResourceAttributes): """ Add the provided attributes to the resource. If the resource already have the provided attributes classes, they are replaced with the provided one. """ for attrs in attributes: self._add_attributes(attrs) def has_attributes(self, attributes_type: Type[ResourceAttributes]) -> bool: """ Check if this resource has a value for the given attributes type. :param attributes_type: :return: """ return self._resource.has_attributes(attributes_type) def get_attributes(self, attributes_type: Type[RA]) -> RA: """ If this resource has attributes matching the given type, return the value of those attributes. Otherwise returns `None`. :param attributes_type: :return: """ attributes = self._resource.get_attributes(attributes_type) if attributes is None: raise NotFoundError( f"Cannot find attributes {attributes_type} for resource {self.get_id().hex()}" ) self._component_context.access_trackers[self._resource.id].attributes_accessed.add( attributes_type ) return attributes def remove_attributes(self, attributes_type: Type[ResourceAttributes]): """ Remove the value of a given attributes type from this resource, if there is such a value. If the resource does not have a value for the given attributes type, do nothing. :param attributes_type: :return: """ if not self._resource.has_attributes(attributes_type): return self._set_modified() self._resource.remove_attributes(attributes_type) def add_component( self, component_id: bytes, version: int, ): """ Mark that a component has run on this resource :param component_id: ID of the component which ran :param version: Version of the component which ran :return: """ self._set_modified() self._resource.add_component(component_id, version) def add_component_for_attributes( self, component_id: bytes, version: int, attributes: Type[ResourceAttributes], ): """ Mark that a component was responsible for adding some attributes to this resource. :param component_id: ID of the component which added the attributes :param version: version of the component which added the attributes :param attributes: The type of attributes which were added :return: """ self._set_modified() self._resource.add_component_for_attributes(component_id, version, attributes) def remove_component( self, component_id: bytes, attributes: Optional[Type[ResourceAttributes]] = None, ): """ Remove any information that this component ran on this resource and/or added a particular type of attributes to this resource :param component_id: ID of the component to remove information about :param attributes: The type of attributes to remove information about :return: """ self._set_modified() self._resource.remove_component(component_id, attributes) def has_component_run(self, component_id: bytes, desired_version: Optional[int] = None) -> bool: """ Check if a particular component has run on this resource :param component_id: ID of the component to check for :param desired_version: If this is not `None`, also check that a specific version of ``component`` ran. Defaults to ``None``. :return: `True` if a component matching ``component_id`` and ``desired_version`` ran on this resource, `False` otherwise. If ``desired_version`` is `None`, only ``component_id`` must be matched to return `True`. """ version = self._resource.get_component_version(component_id) if version is None: return False if desired_version is None: return True return version == desired_version def queue_patch( self, patch_range: Range, data: bytes, ): """ Replace the data within the provided range with the provided data. This operation may shrink, expand or leave untouched the resource's data. Patches are queued up to be applied, and will only be applied to the resource's data after the component this was called from exits. :param patch_range: The range of binary data in this resource to replace :param data: The bytes to replace part of this resource's data with :return: """ if not self._component_context: raise InvalidStateError( f"Cannot patch resource {self._resource.id.hex()} without a context" ) if self._resource.data_id is None: raise ValueError("Cannot patch a resource with no data") self._component_context.modification_trackers[self._resource.id].data_patches.append( DataPatch( patch_range, self._resource.data_id, data, ) ) self._resource.is_modified = True async def get_parent_as_view(self, v_type: Type[RV]) -> RV: """ Get the parent of this resource. The parent will be returned as an instance of the given [viewable tag][ofrak.model.viewable_tag_model.ViewableResourceTag]. :param v_type: The type of [view][ofrak.resource] to get the parent as """ parent_r = await self.get_parent() return await parent_r.view_as(v_type) async def get_parent(self) -> "Resource": """ Get the parent of this resource. """ models = list( await self._resource_service.get_ancestors_by_id(self._resource.id, max_count=1) ) if len(models) != 1: raise NotFoundError(f"There is no parent for resource {self._resource.id.hex()}") return await self._create_resource(models[0]) async def get_ancestors( self, r_filter: ResourceFilter = None, ) -> Iterable["Resource"]: """ Get all the ancestors of this resource. May optionally filter the ancestors so only those matching certain parameters are returned. :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If a filter was provided and no resources match the provided filter """ models = await self._resource_service.get_ancestors_by_id( self._resource.id, r_filter=r_filter ) return await self._create_resources(models) async def get_only_ancestor_as_view( self, v_type: Type[RV], r_filter: ResourceFilter, ) -> RV: """ Get the only ancestor of this resource which matches the given filter. The ancestor will be returned as an instance of the given [viewable tag][ofrak.model.viewable_tag_model.ViewableResourceTag]. :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If more or fewer than one ancestor matches ``r_filter`` """ ancestor_r = await self.get_only_ancestor(r_filter) return await ancestor_r.view_as(v_type) async def get_only_ancestor(self, r_filter: ResourceFilter) -> "Resource": """ Get the only ancestor of this resource which matches the given filter. :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: """ ancestors = list( await self._resource_service.get_ancestors_by_id(self._resource.id, 1, r_filter) ) if len(ancestors) == 0: raise NotFoundError( f"There is no ancestor for resource {self._resource.id.hex()} matching the " f"provided filter" ) return await self._create_resource(ancestors[0]) async def get_descendants_as_view( self, v_type: Type[RV], max_depth: int = -1, r_filter: ResourceFilter = None, r_sort: ResourceSort = None, ) -> Iterable[RV]: """ Get all the descendants of this resource. May optionally filter the descendants so only those matching certain parameters are returned. May optionally sort the descendants by an indexable attribute value key. The descendants will be returned as an instance of the given [viewable tag][ofrak.model.viewable_tag_model.ViewableResourceTag]. :param v_type: The type of [view][ofrak.resource] to get the descendants as :param max_depth: Maximum depth from this resource to search for descendants; if -1, no maximum depth :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :param r_sort: Specifies which indexable attribute to use as the key to sort and the direction to sort :return: :raises NotFoundError: If a filter was provided and no resources match the provided filter """ descendants = await self.get_descendants(max_depth, r_filter, r_sort) views_or_tasks = [r._view_as(v_type) for r in descendants] # analysis tasks to generate views of resources which don't have attrs for the view already view_tasks: List[Awaitable[RV]] = [] # each resources' already-existing views OR the index in `view_tasks` of the analysis task views_or_task_indexes: List[Union[int, RV]] = [] for view_or_create_view_task in views_or_tasks: if isawaitable(view_or_create_view_task): views_or_task_indexes.append(len(view_tasks)) view_tasks.append(view_or_create_view_task) else: views_or_task_indexes.append(cast(RV, view_or_create_view_task)) if view_tasks: completed_views: Sequence[RV] = await asyncio.gather(*view_tasks) return [ completed_views[v_or_i] if type(v_or_i) is int else cast(RV, v_or_i) for v_or_i in views_or_task_indexes ] else: # There are no tasks, so all needed views are already present return cast(List[RV], views_or_task_indexes) async def get_descendants( self, max_depth: int = -1, r_filter: ResourceFilter = None, r_sort: ResourceSort = None, ) -> Iterable["Resource"]: """ Get all the descendants of this resource. May optionally filter the descendants so only those matching certain parameters are returned. May optionally sort the descendants by an indexable attribute value key. :param max_depth: Maximum depth from this resource to search for descendants; if -1, no maximum depth :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :param r_sort: Specifies which indexable attribute to use as the key to sort and the direction to sort :return: :raises NotFoundError: If a filter was provided and no resources match the provided filter """ models = await self._resource_service.get_descendants_by_id( self._resource.id, max_depth=max_depth, r_filter=r_filter, r_sort=r_sort ) return await self._create_resources(models) async def get_only_descendant_as_view( self, v_type: Type[RV], max_depth: int = -1, r_filter: ResourceFilter = None, ) -> RV: """ If a filter is provided, get the only descendant of this resource which matches the given filter. If a filter is not provided, gets the only descendant of this resource. The descendant will be returned as an instance of the given [viewable tag][ofrak.model.viewable_tag_model.ViewableResourceTag]. :param v_type: The type of [view][ofrak.resource] to get the descendant as :param max_depth: Maximum depth from this resource to search for descendants; if -1, no maximum depth :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If a filter is provided and more or fewer than one descendant matches ``r_filter`` :raises NotFoundError: If a filter is not provided and this resource has multiple descendant """ descendant_r = await self.get_only_descendant(max_depth, r_filter) return await descendant_r.view_as(v_type) async def get_only_descendant( self, max_depth: int = -1, r_filter: ResourceFilter = None, ) -> "Resource": """ If a filter is provided, get the only descendant of this resource which matches the given filter. If a filter is not provided, gets the only descendant of this resource. :param max_depth: Maximum depth from this resource to search for descendants; if -1, no maximum depth :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If a filter is provided and more or fewer than one descendant matches ``r_filter`` :raises NotFoundError: If a filter is not provided and this resource has multiple descendant """ models = list( await self._resource_service.get_descendants_by_id( self._resource.id, max_depth=max_depth, max_count=2, r_filter=r_filter, ) ) if len(models) == 0: raise NotFoundError( f"There is no descendant for resource {self._resource.id.hex()} matching " f"the provided filter {r_filter}" ) if len(models) > 1: # TODO: Not the right kind of error raise NotFoundError( f"There are multiple descendants for resource {self._resource.id.hex()} " f"matching the provided filter" ) return await self._create_resource(models[0]) async def get_only_sibling_as_view( self, v_type: Type[RV], r_filter: ResourceFilter = None, ) -> RV: """ If a filter is provided, get the only sibling of this resource which matches the given filter. If a filter is not provided, gets the only sibling of this resource. The sibling will be returned as an instance of the given [viewable tag][ofrak.model.viewable_tag_model.ViewableResourceTag]. :param v_type: The type of [view][ofrak.resource] to get the sibling as :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If a filter is provided and more or fewer than one sibling matches ``r_filter`` :raises NotFoundError: If a filter is not provided and this resource has multiple siblings """ sibling_r = await self.get_only_sibling(r_filter) return await sibling_r.view_as(v_type) async def get_only_sibling(self, r_filter: ResourceFilter = None) -> "Resource": """ If a filter is provided, get the only sibling of this resource which matches the given filter. If a filter is not provided, gets the only sibling of this resource. :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If a filter is provided and more or fewer than one sibling matches ``r_filter`` :raises NotFoundError: If a filter is not provided and this resource has multiple siblings """ models = list( await self._resource_service.get_siblings_by_id( self._resource.id, max_count=2, r_filter=r_filter, ) ) if len(models) == 0: raise NotFoundError( f"There is no sibling for resource {self._resource.id.hex()} matching " f"the provided filter" ) if len(models) > 1: raise NotFoundError( f"There are multiple siblings for resource {self._resource.id.hex()} " f"matching the provided filter" ) return await self._create_resource(models[0]) async def get_children( self, r_filter: ResourceFilter = None, r_sort: ResourceSort = None, ) -> Iterable["Resource"]: """ Get all the children of this resource. May optionally sort the children by an indexable attribute value key. May optionally filter the children so only those matching certain parameters are returned. :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :param r_sort: Specifies which indexable attribute to use as the key to sort and the direction to sort :return: :raises NotFoundError: If a filter was provided and no resources match the provided filter """ return await self.get_descendants(1, r_filter, r_sort) async def get_children_as_view( self, v_type: Type[RV], r_filter: ResourceFilter = None, r_sort: ResourceSort = None, ) -> Iterable[RV]: """ Get all the children of this resource. May optionally filter the children so only those matching certain parameters are returned. May optionally sort the children by an indexable attribute value key. The children will be returned as an instance of the given [viewable tag][ofrak.model.viewable_tag_model.ViewableResourceTag]. :param v_type: The type of [view][ofrak.resource] to get the children as :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :param r_sort: Specifies which indexable attribute to use as the key to sort and the direction to sort :return: :raises NotFoundError: If a filter was provided and no resources match the provided filter """ return await self.get_descendants_as_view(v_type, 1, r_filter, r_sort) async def get_only_child(self, r_filter: ResourceFilter = None) -> "Resource": """ If a filter is provided, get the only child of this resource which matches the given filter. If a filter is not provided, gets the only child of this resource. :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If a filter is provided and more or fewer than one child matches ``r_filter`` :raises NotFoundError: If a filter is not provided and this resource has multiple children """ return await self.get_only_descendant(1, r_filter) async def get_only_child_as_view(self, v_type: Type[RV], r_filter: ResourceFilter = None) -> RV: """ If a filter is provided, get the only child of this resource which matches the given filter. If a filter is not provided, gets the only child of this resource. The child will be returned as an instance of the given [viewable tag][ofrak.model.viewable_tag_model.ViewableResourceTag]. :param v_type: The type of [view][ofrak.resource] to get the child as :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If a filter is provided and more or fewer than one child matches ``r_filter`` :raises NotFoundError: If a filter is not provided and this resource has multiple children """ return await self.get_only_descendant_as_view(v_type, 1, r_filter) async def delete(self): """ Delete this resource and all of its descendants. :return: """ self._component_context.resources_deleted.add(self._resource.id) for child_r in await self.get_children(): await child_r.delete() self._resource.is_modified = True self._resource.is_deleted = True async def flush_data_to_disk(self, path: str, pack: bool = True): """ Recursively repack the resource and write its data out to a file on disk. If this is a dataless resource, creates an empty file. :param path: Path to the file to write out to. The file is created if it does not exist. """ if pack is True: await self.pack_recursively() data = await self.get_data() if data is not None: with open(path, "wb") as f: f.write(data) else: # Create empty file with open(path, "wb") as f: pass def __repr__(self): properties = [ f"resource_id={self._resource.id.hex()}", f"tag=[{','.join([tag.__name__ for tag in self._resource.tags])}]", ] if self._resource.data_id: properties.append(f"data={self._resource.data_id.hex()}") return f"{type(self).__name__}(" + ", ".join(properties) + f")" async def summarize(self) -> str: """ Create a string summary of this resource, including specific tags, attribute types, and the data offsets of this resource in the parent and root (if applicable). Not that this is not a complete string representation of the resource: not all tags are included, and only the types of attributes are included, not their values. It is a summary which gives a high level overview of the resource. """ return await _default_summarize_resource(self) async def summarize_tree( self, r_filter: ResourceFilter = None, r_sort: ResourceSort = None, indent: str = "", summarize_resource_callback: Optional[Callable[["Resource"], Awaitable[str]]] = None, ) -> str: """ Create a string summary of this resource and its (optionally filtered and/or sorted) descendants. The summaries of each resource are the same as the result of [summarize][ofrak.resource.Resource.summarize], organized into a tree structure. If a filter parameter is provided, it is applied recursively: the children of this resource will be filtered, then only those children matching the filter be displayed, and then the same filter will be applied to their children, etc. For example, :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :param r_sort: Specifies which indexable attribute to use as the key to sort and the direction to sort """ SPACER_BLANK = " " SPACER_LINE = "───" if summarize_resource_callback is None: summarize_resource_callback = _default_summarize_resource children = cast( List[Resource], list(await self.get_children(r_filter=r_filter, r_sort=r_sort)) ) if children: if indent == "": tree_string = "┌" else: tree_string = "┬" else: tree_string = "─" tree_string += f"{await summarize_resource_callback(self)}\n" # All children but the last should display as a "fork" in the drop-down tree # After the last child, a vertical line should not be drawn as part of the indent # Both of those needs are handled here child_formatting: List[Tuple[str, str]] = [ ("├", indent + "│" + SPACER_BLANK) for _ in children[:-1] ] child_formatting.append(("└", indent + " " + SPACER_BLANK)) for child, (branch_symbol, child_indent) in zip(children, child_formatting): child_tree_string = await child.summarize_tree( r_filter=r_filter, r_sort=r_sort, indent=child_indent, summarize_resource_callback=summarize_resource_callback, ) tree_string += f"{indent}{branch_symbol}{SPACER_LINE}{child_tree_string}" return tree_string class ResourceFilter: include_self: bool = False tags: Optional[Iterable[ResourceTag]] = None tags_condition: ResourceFilterCondition = ResourceFilterCondition.AND attribute_filters: Optional[Iterable[ResourceAttributeFilter]] = None def with_tags(cls, *tags: ResourceTag): return ResourceFilter(tags=tags) async def _create_deserializer(resource: Resource) -> BinaryDeserializer: elf_r = await resource.get_only_ancestor(ResourceFilter(tags=(Elf,))) e_basic_header = await elf_r.get_only_child_as_view( ElfBasicHeader, ResourceFilter.with_tags(ElfBasicHeader) ) deserializer = BinaryDeserializer( io.BytesIO(await resource.get_data()), endianness=e_basic_header.get_endianness(), word_size=int(e_basic_header.get_bitwidth().get_word_size()), ) return deserializer
null
15,331
import asyncio from typing import Optional, Dict, Type, Tuple from ofrak.model.tag_model import ResourceTag from ofrak.component.unpacker import Unpacker from ofrak.core.code_region import CodeRegion from ofrak.model.resource_model import ResourceAttributes from ofrak.model.viewable_tag_model import AttributesType from ofrak.resource import Resource from ofrak.service.resource_service_i import ResourceFilter from ofrak.core.elf.model import ( ElfHeader, ElfProgramHeader, ElfProgramHeaderType, ElfSegment, ElfSectionHeader, ElfSection, ElfStringSection, ElfSectionType, ElfSymbolSection, ElfSymbol, ElfBasicHeader, ElfSectionFlag, ElfSectionNameStringSection, Elf, ElfSegmentStructure, ElfSectionStructure, ElfRelaSection, ElfRelaEntry, ElfDynSymbolSection, ElfDynamicSection, ElfDynamicEntry, ElfFiniArraySection, ElfInitArraySection, ElfPointerArraySection, ElfSymbolStructure, ElfVirtualAddress, ) from ofrak_type.bit_width import BitWidth from ofrak_type.memory_permissions import MemoryPermissions from ofrak_type.range import Range class ResourceTag(type): all_tags: Set["ResourceTag"] = set() def __init__(cls, *args, **kwargs): super().__init__(*args, **kwargs) specificity = 0 for base in cls.base_tags(): specificity = max(specificity, base.tag_specificity()) cls._specificity: int = specificity + 1 ResourceTag.all_tags.add(cls) def tag_specificity(cls) -> int: """ Indicates how specific an abstraction this tag is. :return: The number of classes in the inheritance hierarchy between this class and Resource """ return cls._specificity def tag_classes(cls) -> Set["ResourceTag"]: """ :return: Set of parent classes (including itself) that are a subclass of a _ResourceTag but not the _ResourceTag class. """ parents = set() parents.add(cls) for base in cls.base_tags(): parents.update(base.tag_classes()) return parents def base_tags(cls) -> Set["ResourceTag"]: """ :return: All _ResourceTags which this tag inherits from directly (does not traverse all ancestors) """ base_tags = set() for base in cls.__bases__: if base is not cls and isinstance(base, ResourceTag) and base.tag_specificity() > 0: base_tags.add(base) return base_tags def sort_tags_into_tiers( tags: "Iterable[ResourceTag]", ) -> "Tuple[Tuple[ResourceTag, ...], ...]": """ Sort the given tags by specificity, and group all the ties together. :param tags: Tags to sort and group :return: Tuple of groups of tags with the same specificity, sorting all of these by the specificity value each group represents from least to greatest. """ levels: List[List[ResourceTag]] = [[], [], [], [], [], [], [], [], [], []] for t in tags: spec = t.tag_specificity() if spec > len(levels): levels.extend([] for _ in range(spec - len(levels))) levels[spec].append(t) return tuple(tuple(level) for level in reversed(levels) if len(level) > 0) def caption(cls, attributes) -> str: # pragma: no cover return str(cls.__name__) def __repr__(cls): return cls.__name__ class ResourceAttributes: DATACLASS_PARAMS = {"frozen": True, "eq": True} """ Wraps immutable attributes attached to a resource. While not enforced programmatically, only analyzers should add/replace attributes attached to a resource. Additionally, a [ResourceAttributes][ofrak.model.resource_model.ResourceAttributes] instance also defines which component attached the attributes to a specific resource. """ def __str__(self): fields_str = ", ".join( f"{field.name}={str(getattr(self, field.name))}" for field in dataclasses.fields(self) ) return f"{self.__class__.__name__}({fields_str})" def get_indexable_attributes(cls) -> List[ResourceIndexedAttribute]: indexable_attributes = [] for name, attr in cls.__dict__.items(): if type(attr) is ResourceIndexedAttribute: indexable_attributes.append(attr) return indexable_attributes def replace_updated(resource_attributes: RA, updated_attributes: Any) -> RA: """ Replace the fields of `resource_attributes` with the updated values found in `updated_attributes`, returning a new object. The fields having non-`None` values in `updated_attributes` are considered to be updated and will be replaced in `resource_attributes` if they exist there. Both arguments must be `dataclass` instances. `updated_attributes` is typically a descendant of [ComponentConfig][ofrak.model.component_model.ComponentConfig]. !!! todo "To do" This currently assumes that values can't be updated to `None`, but that could happen. :raises TypeError: if any of `resource_attributes` or `updated_attributes` isn't a dataclass instance. """ for obj in (resource_attributes, updated_attributes): if not (dataclasses.is_dataclass(obj) and not isinstance(obj, type)): raise TypeError(f"{obj.__name__} must be a dataclass instance") updated_fields = { field: val for field, val in dataclasses.asdict(updated_attributes).items() if val is not None } updated_attributes = dataclasses.replace( resource_attributes, **updated_fields, ) return updated_attributes class Resource: """ Defines methods for interacting with the data and attributes of Resources, the main building block of OFRAK. """ __slots__ = ( "_job_id", "_job_context", "_component_context", "_resource_context", "_resource_view_context", "_resource", "_resource_factory", "_id_service", "_resource_service", "_data_service", "_job_service", "_dependency_handler", ) def __init__( self, job_id: bytes, resource: MutableResourceModel, resource_context: ResourceContext, resource_view_context: ResourceViewContext, job_context: Optional[JobRunContext], component_context: ComponentContext, resource_factory: "ResourceFactory", id_service: IDServiceInterface, data_service: DataServiceInterface, resource_service: ResourceServiceInterface, job_service: JobServiceInterface, ): self._job_id: bytes = job_id self._job_context: Optional[JobRunContext] = job_context self._component_context: ComponentContext = component_context self._resource_context: ResourceContext = resource_context self._resource_view_context: ResourceViewContext = resource_view_context self._resource: MutableResourceModel = resource self._resource_factory: "ResourceFactory" = resource_factory self._id_service: IDServiceInterface = id_service self._resource_service: ResourceServiceInterface = resource_service self._data_service: DataServiceInterface = data_service self._job_service: JobServiceInterface = job_service def get_id(self) -> bytes: """ :return: This resource's ID """ return self._resource.id def get_job_id(self) -> bytes: """ Each resource belongs to a specific "job." See [JobServiceInterface][ofrak.service.job_service_i.JobServiceInterface]. :return: The ID of the job this resource belongs to """ return self._job_id def get_data_id(self) -> Optional[bytes]: """ Each resource may have a data ID. This refers to a [DataModel][ofrak.model.data_model.DataModel] representing some chunk of raw binary data. :return: The data ID associated with this resource, if it exists """ return self._resource.data_id def get_resource_context(self) -> ResourceContext: return self._resource_context def get_resource_view_context(self) -> ResourceViewContext: return self._resource_view_context def get_component_context(self) -> ComponentContext: return self._component_context def get_job_context(self) -> Optional[JobRunContext]: return self._job_context def get_caption(self) -> str: return self._resource.caption def is_modified(self) -> bool: """ Check if the resource has been modified in this context and is considered "dirty". :return: `True` if the resource is modified, `False` otherwise """ return self._resource.is_modified def get_model(self) -> MutableResourceModel: """ Get the underlying [model][ofrak.model.resource_model.ResourceModel] of this resource. :return: """ return self._resource async def get_data(self, range: Optional[Range] = None) -> bytes: """ A resource often represents a chunk of underlying binary data. This method returns the entire chunk by default; this can be reduced by an optional parameter. :param range: A range within the resource's data, relative to the resource's data itself (e.g. Range(0, 10) returns the first 10 bytes of the chunk) :return: The full range or a partial range of this resource's bytes """ if self._resource.data_id is None: raise ValueError( "Resource does not have a data_id. Cannot get data from a resource with no data" ) data = await self._data_service.get_data(self._resource.data_id, range) if range is None: range = Range(0, len(data)) self._component_context.access_trackers[self._resource.id].data_accessed.add(range) return data async def get_data_length(self) -> int: """ :return: The length of the underlying binary data this resource represents """ if self._resource.data_id is None: raise ValueError( "Resource does not have a data_id. Cannot get data length from a " "resource with no data." ) return await self._data_service.get_data_length(self._resource.data_id) async def get_data_range_within_parent(self) -> Range: """ If this resource is "mapped," i.e. its underlying data is defined as a range of its parent's underlying data, this method returns the range within the parent resource's data where this resource lies. If this resource is not mapped (it is root), it returns a range starting at 0 with length 0. :return: The range of the parent's data which this resource represents """ if self._resource.data_id is None: raise ValueError( "Resource does not have a data_id. Cannot get data range from a " "resource with no data." ) if self._resource.parent_id is None: return Range(0, 0) parent_models = list( await self._resource_service.get_ancestors_by_id(self._resource.id, max_count=1) ) if len(parent_models) != 1: raise NotFoundError(f"There is no parent for resource {self._resource.id.hex()}") parent_model = parent_models[0] parent_data_id = parent_model.data_id if parent_data_id is None: return Range(0, 0) try: return await self._data_service.get_range_within_other( self._resource.data_id, parent_data_id ) except ValueError: return Range(0, 0) async def get_data_range_within_root(self) -> Range: """ Does the same thing as `get_data_range_within_parent`, except the range is relative to the root. :return: The range of the root node's data which this resource represents """ if self._resource.data_id is None: raise ValueError( "Resource does not have a data_id. Cannot get data range from a " "resource with no data." ) return await self._data_service.get_data_range_within_root(self._resource.data_id) async def search_data( self, query: Pattern[bytes], start: Optional[int] = None, end: Optional[int] = None, max_matches: Optional[int] = None, ) -> Tuple[Tuple[int, bytes], ...]: ... async def search_data( self, query: bytes, start: Optional[int] = None, end: Optional[int] = None, max_matches: Optional[int] = None, ) -> Tuple[int, ...]: ... async def search_data(self, query, start=None, end=None, max_matches=None): """ Search for some data in this resource. The query may be a regex pattern (a return value of `re.compile`). If the query is a regex pattern, returns a tuple of pairs with both the offset of the match and the contents of the match itself. If the query is plain bytes, a list of only the match offsets are returned. :param query: Plain bytes to exactly match or a regex pattern to search for :param start: Start offset in the data model to begin searching :param end: End offset in the data model to stop searching :return: A tuple of offsets matching a plain bytes query, or a list of (offset, match) pairs for a regex pattern query """ return await self._data_service.search(self.get_data_id(), query, start, end, max_matches) async def save(self): """ If this resource has been modified, update the model stored in the resource service with the local changes. :raises NotFoundError: If the resource service does not have a model for this resource's ID """ await save_resources( (self,), self._resource_service, self._data_service, self._component_context, self._resource_context, self._resource_view_context, ) def _save(self) -> Tuple[List[bytes], List[DataPatch], List[MutableResourceModel]]: resources_to_delete: List[bytes] = [] patches_to_apply: List[DataPatch] = [] resources_to_update: List[MutableResourceModel] = [] if self._resource.is_deleted: resources_to_delete.append(self._resource.id) elif self._resource.is_modified: modification_tracker = self._component_context.modification_trackers.get( self._resource.id ) assert modification_tracker is not None, ( f"Resource {self._resource.id.hex()} was " f"marked as modified but is missing a tracker!" ) patches_to_apply.extend(modification_tracker.data_patches) resources_to_update.append(self._resource) modification_tracker.data_patches.clear() return resources_to_delete, patches_to_apply, resources_to_update async def _fetch(self, resource: MutableResourceModel): """ Update the local model with the latest version from the resource service. This will fail if this resource has been modified. :raises InvalidStateError: If the local resource model has been modified :raises NotFoundError: If the resource service does not have a model for this resource's ID """ if resource.is_modified and not resource.is_deleted: raise InvalidStateError( f"Cannot fetch dirty resource {resource.id.hex()} (resource " f"{self.get_id().hex()} attempted fetch)" ) try: fetched_resource = await self._resource_service.get_by_id(resource.id) except NotFoundError: if ( resource.id in self._component_context.modification_trackers and resource.id in self._resource_context.resource_models ): del self._resource_context.resource_models[resource.id] return resource.reset(fetched_resource) async def _fetch_resources(self, resource_ids: Iterable[bytes]): tasks = [] for resource_id in resource_ids: context_resource = self._resource_context.resource_models.get(resource_id) if context_resource is not None: tasks.append(self._fetch(context_resource)) await asyncio.gather(*tasks) async def _update_views(self, modified: Set[bytes], deleted: Set[bytes]): for resource_id in modified: views_in_context = self._resource_view_context.views_by_resource[resource_id] for view in views_in_context.values(): if resource_id not in self._resource_context.resource_models: await self._fetch(view.resource.get_model()) # type: ignore if resource_id not in self._resource_context.resource_models: view.set_deleted() continue updated_model = self._resource_context.resource_models[resource_id] fresh_view = view.create(updated_model) for field in dataclasses.fields(fresh_view): if field.name == "_resource": continue setattr(view, field.name, getattr(fresh_view, field.name)) for resource_id in deleted: views_in_context = self._resource_view_context.views_by_resource[resource_id] for view in views_in_context.values(): view.set_deleted() async def run( self, component_type: Type[ComponentInterface[CC]], config: CC = None, ) -> ComponentRunResult: """ Run a single component. Runs even if the component has already been run on this resource. :param component_type: The component type (may be an interface) to get and run :param config: Optional config to pass to the component :return: A ComponentRunResult containing information on resources affected by the component """ job_context = self._job_context component_result = await self._job_service.run_component( JobComponentRequest( self._job_id, self._resource.id, component_type.get_id(), config, ), job_context, ) for deleted_id in component_result.resources_deleted: if deleted_id in self._component_context.modification_trackers: del self._component_context.modification_trackers[deleted_id] await self._fetch_resources(component_result.resources_modified) await self._update_views( component_result.resources_modified, component_result.resources_deleted ) return component_result async def auto_run( self, components: Iterable[Type[ComponentInterface]] = tuple(), blacklisted_components: Iterable[Type[ComponentInterface]] = tuple(), all_unpackers: bool = False, all_identifiers: bool = False, all_analyzers: bool = False, all_packers: bool = False, ) -> ComponentRunResult: """ Automatically run multiple components which may run on this resource. From an initial set of possible components to run, this set is searched for components for which the intersection of the component's targets and this resource's tags is not empty. Accepts several optional flags to expand or restrict the initial set of components. :param components: Components to explicitly add to the initial set of components :param blacklisted_components: Components to explicitly remove to the initial set of components :param all_unpackers: If true, all Unpackers are added to the initial set of components :param all_identifiers: If true, all Identifiers are added to the initial set of components :param all_analyzers: If true, all Analyzers are added to the initial set of components :return: A ComponentRunResult containing information on resources affected by the component """ components_result = await self._job_service.run_components( JobMultiComponentRequest( self._job_id, self._resource.id, components_allowed=tuple(c.get_id() for c in components), components_disallowed=tuple(c.get_id() for c in blacklisted_components), all_unpackers=all_unpackers, all_identifiers=all_identifiers, all_analyzers=all_analyzers, all_packers=all_packers, ) ) for deleted_id in components_result.resources_deleted: if deleted_id in self._component_context.modification_trackers: del self._component_context.modification_trackers[deleted_id] await self._fetch_resources(components_result.resources_modified) await self._update_views( components_result.resources_modified, components_result.resources_deleted ) return components_result async def unpack(self) -> ComponentRunResult: """ Unpack the resource. :return: A ComponentRunResult containing information on resources affected by the component """ return await self.auto_run(all_identifiers=True, all_unpackers=True) async def analyze(self, resource_attributes: Type[RA]) -> RA: """ Analyze the resource for a specific resource attribute. :param Type[RA] resource_attributes: :return: """ attributes = self._check_attributes(resource_attributes) if attributes is None: await self._analyze_attributes((resource_attributes,)) return self.get_attributes(resource_attributes) else: return attributes async def identify(self) -> ComponentRunResult: """ Run all registered identifiers on the resource, tagging it with matching resource tags. """ return await self.auto_run(all_identifiers=True) async def pack(self) -> ComponentRunResult: """ Pack the resource. :return: A ComponentRunResult containing information on resources affected by the component """ return await self.auto_run(all_packers=True) async def auto_run_recursively( self, components: Iterable[Type[ComponentInterface]] = tuple(), blacklisted_components: Iterable[Type[ComponentInterface]] = tuple(), blacklisted_tags: Iterable[ResourceTag] = tuple(), all_unpackers: bool = False, all_identifiers: bool = False, all_analyzers: bool = False, ) -> ComponentRunResult: """ Automatically run multiple components which may run on this resource or its descendents. From an initial set of possible components to run, this set is searched for components for which the intersection of the component's targets and this resource's tags is not empty. Accepts several optional flags to expand or restrict the initial set of components. After each run, compatible components from the initial set are run on any resources which have had tags added (including newly created resources). This is repeated until no new tags are added. :param components: Components to explicitly add to the initial set of components :param blacklisted_components: Components to explicitly remove to the initial set of components :param all_unpackers: If true, all Unpackers are added to the initial set of components :param all_identifiers: If true, all Identifiers are added to the initial set of components :param all_analyzers: If true, all Analyzers are added to the initial set of components :return: A ComponentRunResult containing information on resources affected by the component """ components_result = await self._job_service.run_components_recursively( JobMultiComponentRequest( self._job_id, self._resource.id, components_allowed=tuple(c.get_id() for c in components), components_disallowed=tuple(c.get_id() for c in blacklisted_components), all_unpackers=all_unpackers, all_identifiers=all_identifiers, all_analyzers=all_analyzers, tags_ignored=tuple(blacklisted_tags), ) ) await self._fetch_resources(components_result.resources_modified) await self._update_views( components_result.resources_modified, components_result.resources_deleted ) return components_result async def unpack_recursively( self, blacklisted_components: Iterable[Type[ComponentInterface]] = tuple(), do_not_unpack: Iterable[ResourceTag] = tuple(), ) -> ComponentRunResult: """ Automatically unpack this resource and recursively unpack all of its descendants. First this resource is unpacked; then, any resource which "valid" tags were added to will also be unpacked. New resources created with tags count as resources with new tags. A "valid" tag is a tag which is not explicitly ignored via the ``do_not_unpack`` argument. The unpacking will only stop when no new "valid" tags have been added in the previous iteration. This can lead to a very long unpacking process if it is totally unconstrained. :param blacklisted_components: Components which are blocked from running during the recursive unpacking, on this resource or any descendants. :param do_not_unpack: Do not unpack resources with this tag, and ignore these tags when checking if any new tags have been added in this iteration. :return: A ComponentRunResult containing information on resources affected by the component """ return await self.auto_run_recursively( all_identifiers=True, all_unpackers=True, blacklisted_components=blacklisted_components, blacklisted_tags=do_not_unpack, ) async def analyze_recursively(self) -> ComponentRunResult: return await self.auto_run_recursively(all_analyzers=True) async def pack_recursively(self) -> ComponentRunResult: """ Recursively pack the resource, starting with its descendants. """ return await self._job_service.pack_recursively(self._job_id, self._resource.id) async def write_to(self, destination: BinaryIO, pack: bool = True): """ Recursively repack resource and write data out to an arbitrary ``BinaryIO`` destination. :param destination: Destination for packed resource data :return: """ if pack is True: await self.pack_recursively() destination.write(await self.get_data()) async def _analyze_attributes(self, attribute_types: Tuple[Type[ResourceAttributes], ...]): job_context = self._job_context components_result = await self._job_service.run_analyzer_by_attribute( JobAnalyzerRequest( self._job_id, self._resource.id, attribute_types, tuple(self._resource.tags), ), job_context, ) # Update all the resources in the local context that were modified as part of the # analysis await self._fetch_resources(components_result.resources_modified) await self._update_views( components_result.resources_modified, components_result.resources_deleted ) return components_result async def _create_resource(self, resource_model: ResourceModel) -> "Resource": return await self._resource_factory.create( self._job_id, resource_model.id, self._resource_context, self._resource_view_context, self._component_context, self._job_context, ) async def _create_resources( self, resource_models: Iterable[ResourceModel] ) -> Iterable["Resource"]: return await self._resource_factory.create_many( self._job_id, [resource_model.id for resource_model in resource_models], self._resource_context, self._resource_view_context, self._component_context, self._job_context, ) async def create_child( self, tags: Iterable[ResourceTag] = None, attributes: Iterable[ResourceAttributes] = None, data: Optional[bytes] = None, data_range: Optional[Range] = None, ) -> "Resource": """ Create a new resource as a child of this resource. This method entirely defines the child's tags and attributes. This method also defines the child's data semantics: A child resource can either be defined in one of three ways: 1) The resource contains no data ("Dataless" resource). Not used in practice. 2) As mapping a range of its parent's data ("Mapped" resource). For example, an instruction maps a portion of its parent basic block. 3) Defining its own new, independent data ("Unmapped" resource). For example, a file extracted from a zip archive is a child of the zip archive resource, but its data does not map to some specific range of that parent archive. By default a resource will be defined the third way (unmapped). To specify that the resource is a mapped resource, include the optional ``data_range`` parameter set to the range of the parent's data which the child maps. That is, `data_range=Range(0, 10)` creates a resource which maps the first 10 bytes of the parent. The optional ``data`` param defines whether to populate the new child's data. It can be used only if the data is unmapped. If the child is unmapped, the value of ``data`` still becomes that child's data, but the parent's data is unaffected. If ``data`` and ``data_range`` are both `None` (default), the new child is a dataless resource. The following table sums up the possible interactions between ``data`` and ``data_range``: | | ``data_range`` param not `None` | ``data_range`` param `None` | |--------------------------|--------------------------------------------------------|----------------------------------------------| | ``data`` param not `None` | Not allowed | Child unmapped, child's data set to ``data`` | | ``data`` param `None` | Child mapped, parent's data untouched | Child is dataless | :param tags: [tags][ofrak.model.tag_model.ResourceTag] to add to the new child :param attributes: [attributes][ofrak.model.resource_model.ResourceAttributes] to add to the new child :param data: The binary data for the new child. If `None` and ``data_range`` is `None`, the resource has no data. Defaults to `None`. :param data_range: The range of the parent's data which the new child maps. If `None` ( default), the child will not map the parent's data. :return: """ if data is not None and data_range is not None: raise ValueError( "Cannot create a child from both data and data_range. These parameters are " "mutually exclusive." ) resource_id = self._id_service.generate_id() if data_range is not None: if self._resource.data_id is None: raise ValueError( "Cannot create a child with mapped data from a parent that doesn't have data" ) data_model_id = resource_id await self._data_service.create_mapped( data_model_id, self._resource.data_id, data_range, ) data_attrs = Data(data_range.start, data_range.length()) attributes = [data_attrs, *attributes] if attributes else [data_attrs] elif data is not None: if self._resource.data_id is None: raise ValueError( "Cannot create a child with data from a parent that doesn't have data" ) data_model_id = resource_id await self._data_service.create_root(data_model_id, data) data_attrs = Data(0, len(data)) attributes = [data_attrs, *attributes] if attributes else [data_attrs] else: data_model_id = None resource_model = ResourceModel.create( resource_id, data_model_id, self._resource.id, tags, attributes, self._component_context.component_id, self._component_context.component_version, ) await self._resource_service.create(resource_model) if self._job_context: resource_tracker = self._job_context.trackers[resource_model.id] resource_tracker.tags_added.update(resource_model.tags) self._component_context.mark_resource_modified(resource_id) self._component_context.resources_created.add(resource_model.id) created_resource = await self._create_resource(resource_model) return created_resource async def create_child_from_view( self, view: RV, data_range: Optional[Range] = None, data: Optional[bytes] = None, additional_tags: Iterable[ResourceTag] = (), additional_attributes: Iterable[ResourceAttributes] = (), ) -> "Resource": """ Create a new resource as a child of this resource. The new resource will have tags and attributes as defined by the [view][ofrak.model.viewable_tag_model.ViewableResourceTag]; in this way a view can act as a template to create a new resource. The ``additional_tags`` and ``additional_attributes`` can also be used to add more tags and attributes beyond what the view contains. This method's ``data`` and ``data_range`` parameters have the same semantics as in `create_child`, in short: | | ``data_range`` param not `None` | ``data_range`` param `None` | |--------------------------|--------------------------------------------------------|----------------------------------------------| | ``data`` param not `None` | Child mapped, ``data`` patched into child (and parent) | Child unmapped, child's data set to ``data`` | | ``data`` param `None` | Child mapped, parent's data untouched | Child is dataless | See `create_child` documentation for details. :param view: A [resource view][ofrak.resource_view] to pull [tags][ofrak.model.tag_model.ResourceTag] and [attributes][ofrak.model.resource_model.ResourceAttributes] from to populate the new child :param data_range: The range of the parent's data which the new child maps. If `None` ( default), the child will not map the parent's data. :param data: The binary data for the new child. If `None` and ``data_range`` is `None`, the resource has no data. Defaults to `None`. :param additional_tags: Any [tags][ofrak.model.tag_model.ResourceTag] for the child in addition to those from the ``view`` :param additional_attributes: Any [attributes][ofrak.model.resource_model.ResourceAttributes] for the child in addition to those from the ``view`` :return: """ viewable_tag: ViewableResourceTag = type(view) new_resource = await self.create_child( tags=(viewable_tag, *additional_tags), attributes=(*view.get_attributes_instances().values(), *additional_attributes), data_range=data_range, data=data, ) return new_resource def _view_as(self, viewable_tag: Type[RV]) -> Union[RV, Awaitable[RV]]: """ Try to get a view without calling any analysis, to avoid as many unnecessary `asyncio.gather` calls as possible. Checks cached views first for view, and if not found, then checks if the attributes needed to create the view are already present and up-to-date, and only if both of those are not found does it return an awaitable. """ if self._resource_view_context.has_view(self.get_id(), viewable_tag): # First early return: View already exists in cache return self._resource_view_context.get_view(self.get_id(), viewable_tag) if not issubclass(viewable_tag, ResourceViewInterface): raise ValueError( f"Cannot get view for resource {self.get_id().hex()} of a type " f"{viewable_tag.__name__} because it is not a subclass of ResourceView" ) if not self.has_tag(viewable_tag): raise ValueError( f"Cannot get resource {self.get_id().hex()} as view " f"{viewable_tag.__name__} because the resource is not tagged as a " f"{viewable_tag.__name__}" ) composed_attrs_types = viewable_tag.composed_attributes_types existing_attributes = [self._check_attributes(attrs_t) for attrs_t in composed_attrs_types] if all(existing_attributes): # Second early return: All attributes needed for view are present and up-to-date view = viewable_tag.create(self.get_model()) view.resource = self # type: ignore self._resource_view_context.add_view(self.get_id(), view) return cast(RV, view) # Only if analysis is absolutely necessary is an awaitable created and returned async def finish_view_creation( attrs_to_analyze: Tuple[Type[ResourceAttributes], ...] ) -> RV: await self._analyze_attributes(attrs_to_analyze) view = viewable_tag.create(self.get_model()) view.resource = self # type: ignore self._resource_view_context.add_view(self.get_id(), view) return cast(RV, view) return finish_view_creation( tuple( attrs_t for attrs_t, existing in zip(composed_attrs_types, existing_attributes) if not existing ) ) async def view_as(self, viewable_tag: Type[RV]) -> RV: """ Provides a specific type of view instance for this resource. The returned instance is an object which has some of the information from this same resource, however in a simpler interface. This resource instance will itself remain available through the view's ``.resource`` property. :param viewable_tag: A ViewableResourceTag, which this resource's model must already contain :raises ValueError: If the model does not contain this tag, or this tag is not a ViewableResourceTag :return: """ view_or_create_view_task: Union[RV, Awaitable[RV]] = self._view_as(viewable_tag) if isawaitable(view_or_create_view_task): return await view_or_create_view_task else: return cast(RV, view_or_create_view_task) def add_view(self, view: ResourceViewInterface): """ Add all the attributes composed in a view to this resource, and tag this resource with the view type. Calling this is the equivalent of making N ``add_attributes`` calls and one ``add_tag`` call (where N is the number of attributes the view is composed of). :param view: An instance of a view """ for attributes in view.get_attributes_instances().values(): # type: ignore self.add_attributes(attributes) self.add_tag(type(view)) def _set_modified(self): self._component_context.mark_resource_modified(self._resource.id) def _add_tag(self, tag: ResourceTag): """ Associate a tag with the resource. If the resource already have the provided tag, it has no effects. All parent classes of the provided tag that are tags themselves are also added. """ if self._resource.has_tag(tag, False): return self._component_context.mark_resource_modified(self._resource.id) new_tags = self._resource.add_tag(tag) if self._job_context: resource_tracker = self._job_context.trackers[self._resource.id] resource_tracker.tags_added.update(new_tags) def add_tag(self, *tags: ResourceTag): """ Associate multiple tags with the resource. If the resource already have one of the provided tag, the tag is not added. All parent classes of the provided tag that are tags themselves are also added. """ for tag in tags: self._add_tag(tag) def get_tags(self, inherit: bool = True) -> Iterable[ResourceTag]: """ Get a set of tags associated with the resource. """ return self._resource.get_tags(inherit) def has_tag(self, tag: ResourceTag, inherit: bool = True) -> bool: """ Determine if the resource is associated with the provided tag. """ return self._resource.has_tag(tag, inherit) def remove_tag(self, tag: ResourceTag): if not self._resource.has_tag(tag): return self._set_modified() self._resource.remove_tag(tag) def get_most_specific_tags(self) -> Iterable[ResourceTag]: """ Get all tags associated with the resource from which no other tags on that resource inherit. In other words, get the resource's tags that aren't subclassed by other tags on the resource. For example, for a resource tagged as `Elf`, the result would be just `[Elf]` instead of `[Elf, Program, GenericBinary]` that `Resource.get_tags` returns. This is because `Elf` inherits from `Program`, which inherits from `GenericBinary`. Even though the resource has all of those tags, the most derived class with no other derivatives is the "most specific." """ return self._resource.get_most_specific_tags() def _check_attributes(self, attributes_type: Type[RA]) -> Optional[RA]: """ Try to get the current attributes. TODO: Should we be using the version as well? The client wouldn't know the version of the component in a client-server environment. We could do that efficiently by adding a service method that list all available components (and their version) :param attributes_type: The type of attributes to check this resource for. :return: The requested attributes if they are present and up-to-date, otherwise return None. """ attributes = self._resource.get_attributes(attributes_type) if attributes is not None: # Make sure that the attributes have not been invalidated component_id = self._resource.get_component_id_by_attributes(type(attributes)) if component_id is not None: return attributes return None def _add_attributes(self, attributes: ResourceAttributes): existing_attributes = self._resource.get_attributes(type(attributes)) if existing_attributes is not None and existing_attributes == attributes: return self._set_modified() self._resource.add_attributes(attributes) component_context = self._component_context self._resource.add_component_for_attributes( component_context.component_id, component_context.component_version, type(attributes) ) def add_attributes(self, *attributes: ResourceAttributes): """ Add the provided attributes to the resource. If the resource already have the provided attributes classes, they are replaced with the provided one. """ for attrs in attributes: self._add_attributes(attrs) def has_attributes(self, attributes_type: Type[ResourceAttributes]) -> bool: """ Check if this resource has a value for the given attributes type. :param attributes_type: :return: """ return self._resource.has_attributes(attributes_type) def get_attributes(self, attributes_type: Type[RA]) -> RA: """ If this resource has attributes matching the given type, return the value of those attributes. Otherwise returns `None`. :param attributes_type: :return: """ attributes = self._resource.get_attributes(attributes_type) if attributes is None: raise NotFoundError( f"Cannot find attributes {attributes_type} for resource {self.get_id().hex()}" ) self._component_context.access_trackers[self._resource.id].attributes_accessed.add( attributes_type ) return attributes def remove_attributes(self, attributes_type: Type[ResourceAttributes]): """ Remove the value of a given attributes type from this resource, if there is such a value. If the resource does not have a value for the given attributes type, do nothing. :param attributes_type: :return: """ if not self._resource.has_attributes(attributes_type): return self._set_modified() self._resource.remove_attributes(attributes_type) def add_component( self, component_id: bytes, version: int, ): """ Mark that a component has run on this resource :param component_id: ID of the component which ran :param version: Version of the component which ran :return: """ self._set_modified() self._resource.add_component(component_id, version) def add_component_for_attributes( self, component_id: bytes, version: int, attributes: Type[ResourceAttributes], ): """ Mark that a component was responsible for adding some attributes to this resource. :param component_id: ID of the component which added the attributes :param version: version of the component which added the attributes :param attributes: The type of attributes which were added :return: """ self._set_modified() self._resource.add_component_for_attributes(component_id, version, attributes) def remove_component( self, component_id: bytes, attributes: Optional[Type[ResourceAttributes]] = None, ): """ Remove any information that this component ran on this resource and/or added a particular type of attributes to this resource :param component_id: ID of the component to remove information about :param attributes: The type of attributes to remove information about :return: """ self._set_modified() self._resource.remove_component(component_id, attributes) def has_component_run(self, component_id: bytes, desired_version: Optional[int] = None) -> bool: """ Check if a particular component has run on this resource :param component_id: ID of the component to check for :param desired_version: If this is not `None`, also check that a specific version of ``component`` ran. Defaults to ``None``. :return: `True` if a component matching ``component_id`` and ``desired_version`` ran on this resource, `False` otherwise. If ``desired_version`` is `None`, only ``component_id`` must be matched to return `True`. """ version = self._resource.get_component_version(component_id) if version is None: return False if desired_version is None: return True return version == desired_version def queue_patch( self, patch_range: Range, data: bytes, ): """ Replace the data within the provided range with the provided data. This operation may shrink, expand or leave untouched the resource's data. Patches are queued up to be applied, and will only be applied to the resource's data after the component this was called from exits. :param patch_range: The range of binary data in this resource to replace :param data: The bytes to replace part of this resource's data with :return: """ if not self._component_context: raise InvalidStateError( f"Cannot patch resource {self._resource.id.hex()} without a context" ) if self._resource.data_id is None: raise ValueError("Cannot patch a resource with no data") self._component_context.modification_trackers[self._resource.id].data_patches.append( DataPatch( patch_range, self._resource.data_id, data, ) ) self._resource.is_modified = True async def get_parent_as_view(self, v_type: Type[RV]) -> RV: """ Get the parent of this resource. The parent will be returned as an instance of the given [viewable tag][ofrak.model.viewable_tag_model.ViewableResourceTag]. :param v_type: The type of [view][ofrak.resource] to get the parent as """ parent_r = await self.get_parent() return await parent_r.view_as(v_type) async def get_parent(self) -> "Resource": """ Get the parent of this resource. """ models = list( await self._resource_service.get_ancestors_by_id(self._resource.id, max_count=1) ) if len(models) != 1: raise NotFoundError(f"There is no parent for resource {self._resource.id.hex()}") return await self._create_resource(models[0]) async def get_ancestors( self, r_filter: ResourceFilter = None, ) -> Iterable["Resource"]: """ Get all the ancestors of this resource. May optionally filter the ancestors so only those matching certain parameters are returned. :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If a filter was provided and no resources match the provided filter """ models = await self._resource_service.get_ancestors_by_id( self._resource.id, r_filter=r_filter ) return await self._create_resources(models) async def get_only_ancestor_as_view( self, v_type: Type[RV], r_filter: ResourceFilter, ) -> RV: """ Get the only ancestor of this resource which matches the given filter. The ancestor will be returned as an instance of the given [viewable tag][ofrak.model.viewable_tag_model.ViewableResourceTag]. :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If more or fewer than one ancestor matches ``r_filter`` """ ancestor_r = await self.get_only_ancestor(r_filter) return await ancestor_r.view_as(v_type) async def get_only_ancestor(self, r_filter: ResourceFilter) -> "Resource": """ Get the only ancestor of this resource which matches the given filter. :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: """ ancestors = list( await self._resource_service.get_ancestors_by_id(self._resource.id, 1, r_filter) ) if len(ancestors) == 0: raise NotFoundError( f"There is no ancestor for resource {self._resource.id.hex()} matching the " f"provided filter" ) return await self._create_resource(ancestors[0]) async def get_descendants_as_view( self, v_type: Type[RV], max_depth: int = -1, r_filter: ResourceFilter = None, r_sort: ResourceSort = None, ) -> Iterable[RV]: """ Get all the descendants of this resource. May optionally filter the descendants so only those matching certain parameters are returned. May optionally sort the descendants by an indexable attribute value key. The descendants will be returned as an instance of the given [viewable tag][ofrak.model.viewable_tag_model.ViewableResourceTag]. :param v_type: The type of [view][ofrak.resource] to get the descendants as :param max_depth: Maximum depth from this resource to search for descendants; if -1, no maximum depth :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :param r_sort: Specifies which indexable attribute to use as the key to sort and the direction to sort :return: :raises NotFoundError: If a filter was provided and no resources match the provided filter """ descendants = await self.get_descendants(max_depth, r_filter, r_sort) views_or_tasks = [r._view_as(v_type) for r in descendants] # analysis tasks to generate views of resources which don't have attrs for the view already view_tasks: List[Awaitable[RV]] = [] # each resources' already-existing views OR the index in `view_tasks` of the analysis task views_or_task_indexes: List[Union[int, RV]] = [] for view_or_create_view_task in views_or_tasks: if isawaitable(view_or_create_view_task): views_or_task_indexes.append(len(view_tasks)) view_tasks.append(view_or_create_view_task) else: views_or_task_indexes.append(cast(RV, view_or_create_view_task)) if view_tasks: completed_views: Sequence[RV] = await asyncio.gather(*view_tasks) return [ completed_views[v_or_i] if type(v_or_i) is int else cast(RV, v_or_i) for v_or_i in views_or_task_indexes ] else: # There are no tasks, so all needed views are already present return cast(List[RV], views_or_task_indexes) async def get_descendants( self, max_depth: int = -1, r_filter: ResourceFilter = None, r_sort: ResourceSort = None, ) -> Iterable["Resource"]: """ Get all the descendants of this resource. May optionally filter the descendants so only those matching certain parameters are returned. May optionally sort the descendants by an indexable attribute value key. :param max_depth: Maximum depth from this resource to search for descendants; if -1, no maximum depth :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :param r_sort: Specifies which indexable attribute to use as the key to sort and the direction to sort :return: :raises NotFoundError: If a filter was provided and no resources match the provided filter """ models = await self._resource_service.get_descendants_by_id( self._resource.id, max_depth=max_depth, r_filter=r_filter, r_sort=r_sort ) return await self._create_resources(models) async def get_only_descendant_as_view( self, v_type: Type[RV], max_depth: int = -1, r_filter: ResourceFilter = None, ) -> RV: """ If a filter is provided, get the only descendant of this resource which matches the given filter. If a filter is not provided, gets the only descendant of this resource. The descendant will be returned as an instance of the given [viewable tag][ofrak.model.viewable_tag_model.ViewableResourceTag]. :param v_type: The type of [view][ofrak.resource] to get the descendant as :param max_depth: Maximum depth from this resource to search for descendants; if -1, no maximum depth :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If a filter is provided and more or fewer than one descendant matches ``r_filter`` :raises NotFoundError: If a filter is not provided and this resource has multiple descendant """ descendant_r = await self.get_only_descendant(max_depth, r_filter) return await descendant_r.view_as(v_type) async def get_only_descendant( self, max_depth: int = -1, r_filter: ResourceFilter = None, ) -> "Resource": """ If a filter is provided, get the only descendant of this resource which matches the given filter. If a filter is not provided, gets the only descendant of this resource. :param max_depth: Maximum depth from this resource to search for descendants; if -1, no maximum depth :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If a filter is provided and more or fewer than one descendant matches ``r_filter`` :raises NotFoundError: If a filter is not provided and this resource has multiple descendant """ models = list( await self._resource_service.get_descendants_by_id( self._resource.id, max_depth=max_depth, max_count=2, r_filter=r_filter, ) ) if len(models) == 0: raise NotFoundError( f"There is no descendant for resource {self._resource.id.hex()} matching " f"the provided filter {r_filter}" ) if len(models) > 1: # TODO: Not the right kind of error raise NotFoundError( f"There are multiple descendants for resource {self._resource.id.hex()} " f"matching the provided filter" ) return await self._create_resource(models[0]) async def get_only_sibling_as_view( self, v_type: Type[RV], r_filter: ResourceFilter = None, ) -> RV: """ If a filter is provided, get the only sibling of this resource which matches the given filter. If a filter is not provided, gets the only sibling of this resource. The sibling will be returned as an instance of the given [viewable tag][ofrak.model.viewable_tag_model.ViewableResourceTag]. :param v_type: The type of [view][ofrak.resource] to get the sibling as :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If a filter is provided and more or fewer than one sibling matches ``r_filter`` :raises NotFoundError: If a filter is not provided and this resource has multiple siblings """ sibling_r = await self.get_only_sibling(r_filter) return await sibling_r.view_as(v_type) async def get_only_sibling(self, r_filter: ResourceFilter = None) -> "Resource": """ If a filter is provided, get the only sibling of this resource which matches the given filter. If a filter is not provided, gets the only sibling of this resource. :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If a filter is provided and more or fewer than one sibling matches ``r_filter`` :raises NotFoundError: If a filter is not provided and this resource has multiple siblings """ models = list( await self._resource_service.get_siblings_by_id( self._resource.id, max_count=2, r_filter=r_filter, ) ) if len(models) == 0: raise NotFoundError( f"There is no sibling for resource {self._resource.id.hex()} matching " f"the provided filter" ) if len(models) > 1: raise NotFoundError( f"There are multiple siblings for resource {self._resource.id.hex()} " f"matching the provided filter" ) return await self._create_resource(models[0]) async def get_children( self, r_filter: ResourceFilter = None, r_sort: ResourceSort = None, ) -> Iterable["Resource"]: """ Get all the children of this resource. May optionally sort the children by an indexable attribute value key. May optionally filter the children so only those matching certain parameters are returned. :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :param r_sort: Specifies which indexable attribute to use as the key to sort and the direction to sort :return: :raises NotFoundError: If a filter was provided and no resources match the provided filter """ return await self.get_descendants(1, r_filter, r_sort) async def get_children_as_view( self, v_type: Type[RV], r_filter: ResourceFilter = None, r_sort: ResourceSort = None, ) -> Iterable[RV]: """ Get all the children of this resource. May optionally filter the children so only those matching certain parameters are returned. May optionally sort the children by an indexable attribute value key. The children will be returned as an instance of the given [viewable tag][ofrak.model.viewable_tag_model.ViewableResourceTag]. :param v_type: The type of [view][ofrak.resource] to get the children as :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :param r_sort: Specifies which indexable attribute to use as the key to sort and the direction to sort :return: :raises NotFoundError: If a filter was provided and no resources match the provided filter """ return await self.get_descendants_as_view(v_type, 1, r_filter, r_sort) async def get_only_child(self, r_filter: ResourceFilter = None) -> "Resource": """ If a filter is provided, get the only child of this resource which matches the given filter. If a filter is not provided, gets the only child of this resource. :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If a filter is provided and more or fewer than one child matches ``r_filter`` :raises NotFoundError: If a filter is not provided and this resource has multiple children """ return await self.get_only_descendant(1, r_filter) async def get_only_child_as_view(self, v_type: Type[RV], r_filter: ResourceFilter = None) -> RV: """ If a filter is provided, get the only child of this resource which matches the given filter. If a filter is not provided, gets the only child of this resource. The child will be returned as an instance of the given [viewable tag][ofrak.model.viewable_tag_model.ViewableResourceTag]. :param v_type: The type of [view][ofrak.resource] to get the child as :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :return: :raises NotFoundError: If a filter is provided and more or fewer than one child matches ``r_filter`` :raises NotFoundError: If a filter is not provided and this resource has multiple children """ return await self.get_only_descendant_as_view(v_type, 1, r_filter) async def delete(self): """ Delete this resource and all of its descendants. :return: """ self._component_context.resources_deleted.add(self._resource.id) for child_r in await self.get_children(): await child_r.delete() self._resource.is_modified = True self._resource.is_deleted = True async def flush_data_to_disk(self, path: str, pack: bool = True): """ Recursively repack the resource and write its data out to a file on disk. If this is a dataless resource, creates an empty file. :param path: Path to the file to write out to. The file is created if it does not exist. """ if pack is True: await self.pack_recursively() data = await self.get_data() if data is not None: with open(path, "wb") as f: f.write(data) else: # Create empty file with open(path, "wb") as f: pass def __repr__(self): properties = [ f"resource_id={self._resource.id.hex()}", f"tag=[{','.join([tag.__name__ for tag in self._resource.tags])}]", ] if self._resource.data_id: properties.append(f"data={self._resource.data_id.hex()}") return f"{type(self).__name__}(" + ", ".join(properties) + f")" async def summarize(self) -> str: """ Create a string summary of this resource, including specific tags, attribute types, and the data offsets of this resource in the parent and root (if applicable). Not that this is not a complete string representation of the resource: not all tags are included, and only the types of attributes are included, not their values. It is a summary which gives a high level overview of the resource. """ return await _default_summarize_resource(self) async def summarize_tree( self, r_filter: ResourceFilter = None, r_sort: ResourceSort = None, indent: str = "", summarize_resource_callback: Optional[Callable[["Resource"], Awaitable[str]]] = None, ) -> str: """ Create a string summary of this resource and its (optionally filtered and/or sorted) descendants. The summaries of each resource are the same as the result of [summarize][ofrak.resource.Resource.summarize], organized into a tree structure. If a filter parameter is provided, it is applied recursively: the children of this resource will be filtered, then only those children matching the filter be displayed, and then the same filter will be applied to their children, etc. For example, :param r_filter: Contains parameters which resources must match to be returned, including any tags it must have and/or values of indexable attributes :param r_sort: Specifies which indexable attribute to use as the key to sort and the direction to sort """ SPACER_BLANK = " " SPACER_LINE = "───" if summarize_resource_callback is None: summarize_resource_callback = _default_summarize_resource children = cast( List[Resource], list(await self.get_children(r_filter=r_filter, r_sort=r_sort)) ) if children: if indent == "": tree_string = "┌" else: tree_string = "┬" else: tree_string = "─" tree_string += f"{await summarize_resource_callback(self)}\n" # All children but the last should display as a "fork" in the drop-down tree # After the last child, a vertical line should not be drawn as part of the indent # Both of those needs are handled here child_formatting: List[Tuple[str, str]] = [ ("├", indent + "│" + SPACER_BLANK) for _ in children[:-1] ] child_formatting.append(("└", indent + " " + SPACER_BLANK)) for child, (branch_symbol, child_indent) in zip(children, child_formatting): child_tree_string = await child.summarize_tree( r_filter=r_filter, r_sort=r_sort, indent=child_indent, summarize_resource_callback=summarize_resource_callback, ) tree_string += f"{indent}{branch_symbol}{SPACER_LINE}{child_tree_string}" return tree_string async def make_children_helper( resource: Resource, entry_type: ResourceTag, entry_size: int, structure_index_type: Optional[Type[ResourceAttributes]], ) -> None: elf_section_size = await resource.get_data_length() create_child_tasks = [] for i, offset in enumerate(range(0, elf_section_size, entry_size)): if structure_index_type is not None: attrs: Tuple[ResourceAttributes, ...] = (structure_index_type(i),) # type: ignore else: attrs = () create_child_tasks.append( resource.create_child( tags=(entry_type,), data_range=Range.from_size(offset, entry_size), attributes=attrs, ) ) await asyncio.gather(*create_child_tasks)
null
15,332
import logging from itertools import tee from typing import List from ofrak import Modifier, Resource, OFRAKContext from ofrak.core import ( Elf, FreeSpace, ElfProgramHeaderModifier, ElfProgramHeaderModifierConfig, Allocatable, ElfProgramHeader, ElfProgramHeaderType, ElfUnpacker, ) from ofrak_type import Range async def get_load_program_headers(elf: Elf) -> List[ElfProgramHeader]: load_program_headers = list() for program_header in await elf.get_program_headers(): if program_header.p_type == ElfProgramHeaderType.LOAD.value: load_program_headers.append(program_header) return load_program_headers
null
15,333
import logging from itertools import tee from typing import List from ofrak import Modifier, Resource, OFRAKContext from ofrak.core import ( Elf, FreeSpace, ElfProgramHeaderModifier, ElfProgramHeaderModifierConfig, Allocatable, ElfProgramHeader, ElfProgramHeaderType, ElfUnpacker, ) from ofrak_type import Range The provided code snippet includes necessary dependencies for implementing the `_crop_range` function. Write a Python function `def _crop_range(range_1: Range, range_2: Range) -> Range` to solve the following problem: Split range_1 around range_2 and return the split range whose start is the same as range_1. Here is the function: def _crop_range(range_1: Range, range_2: Range) -> Range: """ Split range_1 around range_2 and return the split range whose start is the same as range_1. """ splits = range_1.split(range_2) for s in splits: if s.start == range_1.start: return s raise ValueError( f"Error removing intersection between {range_1} and {range_2} from {range_1}. " f"This should be unreachable." )
Split range_1 around range_2 and return the split range whose start is the same as range_1.
15,334
import logging from itertools import tee from typing import List from ofrak import Modifier, Resource, OFRAKContext from ofrak.core import ( Elf, FreeSpace, ElfProgramHeaderModifier, ElfProgramHeaderModifierConfig, Allocatable, ElfProgramHeader, ElfProgramHeaderType, ElfUnpacker, ) from ofrak_type import Range The provided code snippet includes necessary dependencies for implementing the `_pairwise` function. Write a Python function `def _pairwise(iterable)` to solve the following problem: A helper function waiting for itertools.pairwise from Python 3.10: https://docs.python.org/3/library/itertools.html#itertools.pairwise. Usage: `_pairwise('ABCDEFG') --> AB BC CD DE EF FG` Here is the function: def _pairwise(iterable): """ A helper function waiting for itertools.pairwise from Python 3.10: https://docs.python.org/3/library/itertools.html#itertools.pairwise. Usage: `_pairwise('ABCDEFG') --> AB BC CD DE EF FG` """ a, b = tee(iterable) next(b, None) return zip(a, b)
A helper function waiting for itertools.pairwise from Python 3.10: https://docs.python.org/3/library/itertools.html#itertools.pairwise. Usage: `_pairwise('ABCDEFG') --> AB BC CD DE EF FG`
15,335
from dataclasses import dataclass from typing import List, Tuple from ofrak_type import LinkableSymbolType, MemoryPermissions, InstructionSetMode from ofrak.core.label import LabeledAddress from ofrak_patch_maker.toolchain.model import Segment from ofrak_type.memory_permissions import MemoryPermissions class LinkableSymbolStubInfo: """ Container holding the information needed to create a stub for a LinkableSymbol. :var asm_prefixes: The lines to prefix an assembly stub for this symbol, usually describing what type of symbol this is and optionally the mode :var segments: Segments to extract from the stub object file. """ asm_prefixes: List[str] segments: Tuple[Segment, ...] def _make_rx_stub_info(name: str, vaddr: int, mode: InstructionSetMode) -> LinkableSymbolStubInfo: asm_prefixes = [".section .text", f".type {name}, %function"] if mode is InstructionSetMode.THUMB: asm_prefixes.append(".thumb") segments = ( # Executable stub goes in .text segment Segment( segment_name=".text", vm_address=vaddr, offset=0, is_entry=False, length=0, access_perms=MemoryPermissions.RX, ), # Null segments required for unused .data Segment( segment_name=".data", vm_address=0, offset=0, is_entry=False, length=0, access_perms=MemoryPermissions.RW, ), ) return LinkableSymbolStubInfo(asm_prefixes, segments)
null
15,336
from dataclasses import dataclass from typing import List, Tuple from ofrak_type import LinkableSymbolType, MemoryPermissions, InstructionSetMode from ofrak.core.label import LabeledAddress from ofrak_patch_maker.toolchain.model import Segment from ofrak_type.memory_permissions import MemoryPermissions class LinkableSymbolStubInfo: """ Container holding the information needed to create a stub for a LinkableSymbol. :var asm_prefixes: The lines to prefix an assembly stub for this symbol, usually describing what type of symbol this is and optionally the mode :var segments: Segments to extract from the stub object file. """ asm_prefixes: List[str] segments: Tuple[Segment, ...] def _make_r_stub_info(name: str, vaddr: int) -> LinkableSymbolStubInfo: asm_prefixes = [".section .rodata", f".type {name}, %object"] segments = ( # Read-only symbol goes in .rodata segment Segment( segment_name=".rodata", vm_address=vaddr, offset=0, is_entry=False, length=0, access_perms=MemoryPermissions.R, ), # Null segments required for unused .text and .data Segment( segment_name=".text", vm_address=0, offset=0, is_entry=False, length=0, access_perms=MemoryPermissions.RX, ), Segment( segment_name=".data", vm_address=0, offset=0, is_entry=False, length=0, access_perms=MemoryPermissions.RW, ), ) return LinkableSymbolStubInfo(asm_prefixes, segments)
null
15,337
from dataclasses import dataclass from typing import List, Tuple from ofrak_type import LinkableSymbolType, MemoryPermissions, InstructionSetMode from ofrak.core.label import LabeledAddress from ofrak_patch_maker.toolchain.model import Segment from ofrak_type.memory_permissions import MemoryPermissions class LinkableSymbolStubInfo: """ Container holding the information needed to create a stub for a LinkableSymbol. :var asm_prefixes: The lines to prefix an assembly stub for this symbol, usually describing what type of symbol this is and optionally the mode :var segments: Segments to extract from the stub object file. """ asm_prefixes: List[str] segments: Tuple[Segment, ...] def _make_rw_stub_info(name: str, vaddr: int) -> LinkableSymbolStubInfo: asm_prefixes = [".section .data", f".type {name}, %object"] segments = ( # Read-write symbol goes in .data segment Segment( segment_name=".data", vm_address=vaddr, offset=0, is_entry=False, length=0, access_perms=MemoryPermissions.RW, ), # Null segment required for unused .text Segment( segment_name=".text", vm_address=0, offset=0, is_entry=False, length=0, access_perms=MemoryPermissions.RX, ), ) return LinkableSymbolStubInfo(asm_prefixes, segments)
null
15,338
import os.path from typing import Optional from ofrak import OFRAKContext, Resource import argparse from pathlib import Path import time import sys from ofrak.cli.ofrak_cli import OfrakCommandRunsScript from ofrak.core import FilesystemEntry from ofrak.gui.server import open_gui class UnpackCommand(OfrakCommandRunsScript): def __init__(self): self._filename_trackers = dict() self._resource_paths = dict() def create_parser(self, parser: argparse._SubParsersAction): subparser = parser.add_parser( "unpack", help="Unpack all identified structures that can be unpacked with OFRAK", description="Import a file as an OFRAK resource, then identifies and unpacks it. The " "resource's children are written to the output directory as individual " "files. Children which have no data are not written as files. A file `__ofrak_info__` " "is also written to the output directory, containing the known OFRAK tags and " "attributes for each descendant.", ) subparser.add_argument( "-o", "--output_directory", help="Directory to write unpacked resource tree to. If no directory is given, a new one" " will be created in the same directory as the file being unpacked.", ) subparser.add_argument( "--recursive", "-r", action="store_true", default=False, help="Unpack recursively: all resources unpacked from the root will be unpack, as " "well as all resources unpacked from those, and so.", ) subparser.add_argument("filename", help="File to unpack") # GUI args subparser.add_argument( "--gui", action="store_true", help="Open the OFRAK GUI after unpacking", default=False, ) subparser.add_argument( "-gH", "--gui-hostname", action="store", help="Set GUI server host address.", default="127.0.0.1", ) subparser.add_argument( "-gp", "--gui-port", action="store", type=int, help="Set GUI server host port.", default=8080, ) subparser.add_argument( "--gui-no-browser", action="store_true", help="Don't open the browser to the OFRAK GUI", ) self.add_ofrak_arguments(subparser) return subparser async def ofrak_func(self, ofrak_context: OFRAKContext, args: argparse.Namespace): print(f"Unpacking file: {args.filename}\n") root_resource = await ofrak_context.create_root_resource_from_file(args.filename) if args.recursive: await root_resource.unpack_recursively() else: await root_resource.unpack() if args.output_directory: extraction_dir = Path(args.output_directory) else: file_path = Path(args.filename) parent_dir = file_path.parent extraction_dir = Path( parent_dir / f'{file_path.name}_extracted_{time.strftime("%Y%m%d%H%M%S")}' ) if extraction_dir.exists(): if any(extraction_dir.iterdir()): print(f"Found files in {extraction_dir}, ABORTING!", file=sys.stderr) return else: extraction_dir.mkdir() print(f"Extracting data to {extraction_dir}") root_resource_path = os.path.join( extraction_dir, await self.get_filesystem_name(root_resource), ) info_dump_path = os.path.join(extraction_dir, "__ofrak_info__") await self.resource_tree_to_files(root_resource, root_resource_path) info_dump = await root_resource.summarize_tree( summarize_resource_callback=lambda resource: _custom_summarize_resource(resource, self) ) # Some characters in filename bytestrings are no valid unicode, can't be printed, must be replaced # https://stackoverflow.com/questions/27366479/python-3-os-walk-file-paths-unicodeencodeerror-utf-8-codec-cant-encode-s info_dump = info_dump.encode("utf-8", "replace").decode( "utf-8", ) with open(info_dump_path, "w", encoding="utf-8") as f: f.write(info_dump) print(info_dump) if args.gui: server = await open_gui( args.gui_hostname, args.gui_port, focus_resource=root_resource, open_in_browser=(not args.gui_no_browser), ) await server.run_until_cancelled() async def resource_tree_to_files(self, resource: Resource, path): children_dir = path + ".ofrak_children" for child_resource in await resource.get_children(): filename = await self.get_filesystem_name(child_resource) if not os.path.exists(children_dir): os.mkdir(children_dir) child_path = os.path.join(children_dir, filename) await self.resource_tree_to_files(child_resource, child_path) if resource.get_data_id() is None: return data = await resource.get_data() if len(data) == 0: return with open(path, "wb") as f: f.write(data) self._resource_paths[resource.get_id()] = path async def get_filesystem_name(self, resource: Resource) -> str: if resource.has_tag(FilesystemEntry): file_view = await resource.view_as(FilesystemEntry) filename = file_view.name else: filename = resource.get_caption() parent_id = resource.get_model().parent_id filesystem_name_key = (parent_id, filename) if filesystem_name_key in self._filename_trackers: name_suffixes = self._filename_trackers[filesystem_name_key] else: name_suffixes = {resource.get_id(): ""} self._filename_trackers[filesystem_name_key] = name_suffixes return filename if resource.get_id() in name_suffixes: return filename + name_suffixes[resource.get_id()] else: suffix = f"_{len(name_suffixes)}" name_suffixes[resource.get_id()] = suffix return filename + suffix def get_path(self, resource: Resource) -> Optional[str]: return self._resource_paths.get(resource.get_id()) async def _custom_summarize_resource(resource: Resource, unpack_cmd: UnpackCommand) -> str: attributes_info = ", ".join(attrs_type.__name__ for attrs_type in resource._resource.attributes) name = await unpack_cmd.get_filesystem_name(resource) if " " in name: name = f"'{name}'" if resource._resource.data_id: data_info = f", size={await resource.get_data_length()} bytes" else: data_info = ", no data" path = unpack_cmd.get_path(resource) if path is None: path_info = ", (not written)" else: path_info = f", extracted-path={unpack_cmd.get_path(resource)}" return f"{name}: [attributes=({attributes_info}){data_info}{path_info}]"
null
15,339
from argparse import Namespace from typing import Iterable, Set from ofrak.cli.ofrak_cli import OfrakCommand, OFRAKEnvironment def _print_lines_without_duplicates(output_lines: Iterable[str]): # strip duplicates, resetting the memory of duplicates when indentation changes prev_indent = 0 seen: Set[str] = set() for line in output_lines: indent = line.rfind("\t") + 1 if indent != prev_indent: seen = set() prev_indent = indent if line not in seen: print(line) seen.add(line)
null
15,340
from collections import defaultdict from typing import Any, List, Set, Tuple, cast, Callable from typing import Dict, Optional from sortedcontainers import SortedList from typing_inspect import get_origin from ofrak import ResourceTag from ofrak.model.resource_model import ResourceIndexedAttribute from ofrak.service.resource_service import ( ResourceNode, ResourceAttributeIndex, AttributeIndexDict, T, ) from ofrak.service.resource_service import ResourceService from ofrak.service.serialization.pjson_types import PJSONType from ofrak.service.serialization.serializers.serializer_i import SerializerInterface class ResourceAttributeIndex(Generic[T]): def __init__(self, attribute: ResourceIndexedAttribute[T]): self._attribute: ResourceIndexedAttribute[T] = attribute self.index: SortedList = SortedList() self.values_by_node_id: Dict[bytes, Any] = dict() def add_resource_attribute( self, value: T, resource: ResourceNode, ): if resource.model.id in self.values_by_node_id: if self.values_by_node_id[resource.model.id] != value: raise ValueError( f"The provided resource {resource.model.id.hex()} is already in the " f"index for {self._attribute.__name__} with a different value!" ) else: return self.index.add((value, resource)) self.values_by_node_id[resource.model.id] = value def remove_resource_attribute( self, resource: ResourceNode, ): if resource.model.id not in self.values_by_node_id: return value = self.values_by_node_id[resource.model.id] self.index.remove((value, resource)) del self.values_by_node_id[resource.model.id] def _is_generic_resource_attribute_index(type_hint): return get_origin(type_hint) == ResourceAttributeIndex
null
15,341
import dataclasses from dataclasses import is_dataclass, fields from typing import Any, Dict, Type, cast, Tuple import inspect from ofrak.service.serialization.pjson_types import PJSONType from ofrak.service.serialization.serializers.enum_serializer import is_enum from ofrak.service.serialization.serializers.serializer_i import SerializerInterface from ofrak.service.serialization.serializers.type_serializer import is_metaclass def is_enum(type_hint): return type_hint == Enum or (inspect.isclass(type_hint) and issubclass(type_hint, Enum)) def is_metaclass(type_hint): """Will recognize Type, Type[X], and metaclasses""" return any( [ type_hint == Type, get_origin(type_hint) == type, inspect.isclass(type_hint) and issubclass(type_hint, type), ] ) def _is_regular_class_instance(type_hint): return inspect.isclass(type_hint) and not is_enum(type_hint) and not is_metaclass(type_hint)
null
15,342
from typing import Any, Type, Dict from typing_inspect import get_origin from ofrak.model.resource_model import ResourceIndexedAttribute, ResourceAttributes from ofrak.service.serialization.pjson_types import PJSONType from ofrak.service.serialization.serializers.serializer_i import SerializerInterface class ResourceIndexedAttribute(Generic[X]): """ Descriptor class for values in resource attributes which can be indexed. When a field `Foo` of a [ResourceAttributes][ofrak.model.resource_model.ResourceAttributes] type `A` is indexed, it is possible to include an `r_filter` or `r_sort` in a query to the resource service which filters the returned resource by the value of `foo` each of them have. This class should not be explicitly instantiated, instead created using the @index decorator. For example: ```python class A(ResourceAttributes): x: int def Foo(self) -> int: return self.x ``` """ def __init__( self, getter_func: Callable[[Any], X], uses_indexes: Iterable["ResourceIndexedAttribute"] = (), ): """ :param getter_func: Getter function for the property :param uses_indexes: Additional index types that are required to calculate the value of this index :raises TypeError: if the getter function does not have a return type annotation :raises TypeError: if the getter does not return an indexable type """ _validate_indexed_type(getter_func) self.fget: Callable[[Any], X] = getter_func self.attributes_owner: Optional[Type[ResourceAttributes]] = None self.uses_indexes = uses_indexes self.used_by_indexes: List["ResourceIndexedAttribute"] = [] self.index_name: str = getter_func.__name__ for other_index in self.uses_indexes: other_index.used_by_indexes.append(self) def __set_name__(self, owner, name): self.attributes_owner = owner self.__name__ = f"{owner.__name__}.{name}" self.index_name = name def __get__(self, instance: None, owner: type) -> "ResourceIndexedAttribute[X]": """ Applicable when getting the ResourceIndexedAttribute of a class, not an instance. Example (continued building off of example from class docstring): A.X # Returns a ResourceIndexedAttribute[int] """ ... def __get__(self, instance: Any, owner: type) -> X: """ Applicable when getting the ResourceIndexedAttribute of an instance Example (continued building off of example from class docstring): a = A(10) a.X # Returns 10 """ ... def __get__( self, instance: Optional[Any], owner: type ) -> Union["ResourceIndexedAttribute[X]", X]: if instance is None: return self else: return self.fget(instance) def __getattr__(self, name) -> Any: ... def __set__(self, instance, value): raise ValueError("Cannot set value of indexed attributes") def get_value( self, index_holder: "ResourceModel", ) -> Optional[X]: if self.attributes_owner is None: raise TypeError( f"Cannot get index value for {self.__name__} of model " f"{index_holder.id.hex()} because {self.__name__}'s owner has not " f"been set. This cannot happen unless `get_index` has somehow been " f"called during class creation, before the owner is set." ) else: attributes = index_holder.get_attributes(self.attributes_owner) if attributes is None: return None elif self.uses_indexes: # Create new copy of attributes to inject index values into attributes_plus_required_indexes = dataclasses.replace(attributes) for nested_index in self.uses_indexes: val = nested_index.get_value(index_holder) if val is None: # Not all of the nested indexes are available, can't calculate index val. return None object.__setattr__( attributes_plus_required_indexes, nested_index.index_name, val ) return self.fget(attributes_plus_required_indexes) else: return self.fget(attributes) def __repr__(self) -> str: return self.__name__ def _is_generic_resource_indexed_attribute(type_hint): return get_origin(type_hint) == ResourceIndexedAttribute
null
15,343
from collections import defaultdict from typing import Any, Dict, cast from itertools import product from ofrak.service.serialization.pjson import PJSONSerializationService from ofrak.service.serialization.pjson_types import PJSONType from ofrak.service.serialization.service_i import SerializationServiceInterface The provided code snippet includes necessary dependencies for implementing the `_short_string_generator` function. Write a Python function `def _short_string_generator()` to solve the following problem: Generator of short JSON-compatible strings of increasing size. Here is the function: def _short_string_generator(): """Generator of short JSON-compatible strings of increasing size.""" disallowed_characters = [0x22, 0x5C, 0x2F, 0x62, 0x66, 0x6E, 0x72, 0x74, 0x75] alphabet = [chr(i) for i in range(0x20, 0x7E) if i not in disallowed_characters] size = 1 while True: for letters in product(*([alphabet] * size)): yield "".join(letters) size += 1
Generator of short JSON-compatible strings of increasing size.
15,344
import asyncio import logging from collections import defaultdict from dataclasses import dataclass from functools import lru_cache from typing import ( Awaitable, Dict, Iterable, List, Optional, Set, Tuple, TypeVar, Union, cast, Any, ) from ofrak.component.unpacker import Unpacker from ofrak.component.analyzer import Analyzer from ofrak.component.identifier import Identifier from ofrak.component.interface import ComponentInterface from ofrak.component.packer import Packer from ofrak.model.component_model import CC, ComponentRunResult from ofrak.model.job_model import ( JobModel, JobRunContext, JobRunContextFactory, ) from ofrak.model.job_request_model import ( JobAnalyzerRequest, JobComponentRequest, JobMultiComponentRequest, ) from ofrak.model.resource_model import EphemeralResourceContextFactory from ofrak.model.tag_model import ResourceTag from ofrak.model.viewable_tag_model import ResourceViewContext from ofrak.service.component_locator_i import ( ComponentLocatorInterface, ComponentFilter, ) from ofrak.model.component_filters import ( ComponentTargetFilter, AnalyzerOutputFilter, ComponentOrMetaFilter, ComponentAndMetaFilter, ComponentPrioritySelectingMetaFilter, ComponentNotMetaFilter, ComponentWhitelistFilter, ComponentTypeFilter, ) from ofrak.service.job_service_i import ( JobServiceInterface, ComponentAutoRunFailure, ) from ofrak.service.resource_service_i import ( ResourceServiceInterface, ResourceFilter, ResourceFilterCondition, ) from ofrak_type.error import NotFoundError LOGGER = logging.getLogger(__name__) class ComponentInterface(Generic[CC], ABC): """ A component is responsible for modifying or create firmware resources. They are categorized as identifiers, unpackers, analyzers, modifiers and packers. """ version: Optional[int] = None id: Optional[bytes] = None def get_default_config(cls) -> Optional[CC]: raise NotImplementedError() def get_version(self) -> int: raise NotImplementedError() def targets(self) -> Tuple[ResourceTag, ...]: raise NotImplementedError() def external_dependencies(self) -> Tuple[ComponentExternalTool, ...]: raise NotImplementedError() async def run( self, job_id: bytes, resource_id: bytes, job_context: JobRunContext, resource_context: ResourceContext, resource_view_context: ResourceViewContext, config: CC, ) -> ComponentRunResult: """ :param job_id: :param resource_id: :param job_context: :param resource_context: :param resource_view_context: :param config: :return: The IDs of all resources modified by this component """ raise NotImplementedError() def get_id(cls) -> bytes: raise NotImplementedError() class ComponentRunResult: """ Dataclass created after one or more components complete, holding high-level information about what resources were affected by a component or components """ components_run: Set[bytes] = field(default_factory=set) resources_modified: Set[bytes] = field(default_factory=set) resources_deleted: Set[bytes] = field(default_factory=set) resources_created: Set[bytes] = field(default_factory=set) def update(self, other_results: "ComponentRunResult"): self.components_run.update(other_results.components_run) self.resources_modified.update(other_results.resources_modified) self.resources_created.update(other_results.resources_created) self.resources_deleted.update(other_results.resources_deleted) def _log_component_run_result_info( job_id: bytes, resource_id: bytes, component: ComponentInterface, component_result: ComponentRunResult, max_ids_to_log: int = 12, ): if LOGGER.getEffectiveLevel() > logging.INFO: return def truncate_id_seq(id_seq) -> Iterable[str]: for n, r_id in enumerate(id_seq): if n > max_ids_to_log: yield f" ... ({len(id_seq) - n} more)" return yield r_id.hex() logging_component_results = [] if component_result.resources_modified: logging_component_results.append( f"Modified resources: {','.join(truncate_id_seq(component_result.resources_modified))}" ) if component_result.resources_created: logging_component_results.append( f"Created resources: {','.join(truncate_id_seq(component_result.resources_created))}" ) if component_result.resources_deleted: logging_component_results.append( f"Deleted resources: {','.join(truncate_id_seq(component_result.resources_deleted))}" ) if logging_component_results: logging_component_results_str = "\n\t" + ("\n\t".join(logging_component_results)) else: logging_component_results_str = "" LOGGER.info( f"JOB {job_id.hex()} - Finished running {component.get_id().decode()} on " f"{resource_id.hex()}:{logging_component_results_str}" )
null
15,345
import asyncio import logging from collections import defaultdict from dataclasses import dataclass from functools import lru_cache from typing import ( Awaitable, Dict, Iterable, List, Optional, Set, Tuple, TypeVar, Union, cast, Any, ) from ofrak.component.unpacker import Unpacker from ofrak.component.analyzer import Analyzer from ofrak.component.identifier import Identifier from ofrak.component.interface import ComponentInterface from ofrak.component.packer import Packer from ofrak.model.component_model import CC, ComponentRunResult from ofrak.model.job_model import ( JobModel, JobRunContext, JobRunContextFactory, ) from ofrak.model.job_request_model import ( JobAnalyzerRequest, JobComponentRequest, JobMultiComponentRequest, ) from ofrak.model.resource_model import EphemeralResourceContextFactory from ofrak.model.tag_model import ResourceTag from ofrak.model.viewable_tag_model import ResourceViewContext from ofrak.service.component_locator_i import ( ComponentLocatorInterface, ComponentFilter, ) from ofrak.model.component_filters import ( ComponentTargetFilter, AnalyzerOutputFilter, ComponentOrMetaFilter, ComponentAndMetaFilter, ComponentPrioritySelectingMetaFilter, ComponentNotMetaFilter, ComponentWhitelistFilter, ComponentTypeFilter, ) from ofrak.service.job_service_i import ( JobServiceInterface, ComponentAutoRunFailure, ) from ofrak.service.resource_service_i import ( ResourceServiceInterface, ResourceFilter, ResourceFilterCondition, ) from ofrak_type.error import NotFoundError IDENTIFIERS_FILTER = ComponentTypeFilter(Identifier) class ResourceTag(type): all_tags: Set["ResourceTag"] = set() def __init__(cls, *args, **kwargs): super().__init__(*args, **kwargs) specificity = 0 for base in cls.base_tags(): specificity = max(specificity, base.tag_specificity()) cls._specificity: int = specificity + 1 ResourceTag.all_tags.add(cls) def tag_specificity(cls) -> int: """ Indicates how specific an abstraction this tag is. :return: The number of classes in the inheritance hierarchy between this class and Resource """ return cls._specificity def tag_classes(cls) -> Set["ResourceTag"]: """ :return: Set of parent classes (including itself) that are a subclass of a _ResourceTag but not the _ResourceTag class. """ parents = set() parents.add(cls) for base in cls.base_tags(): parents.update(base.tag_classes()) return parents def base_tags(cls) -> Set["ResourceTag"]: """ :return: All _ResourceTags which this tag inherits from directly (does not traverse all ancestors) """ base_tags = set() for base in cls.__bases__: if base is not cls and isinstance(base, ResourceTag) and base.tag_specificity() > 0: base_tags.add(base) return base_tags def sort_tags_into_tiers( tags: "Iterable[ResourceTag]", ) -> "Tuple[Tuple[ResourceTag, ...], ...]": """ Sort the given tags by specificity, and group all the ties together. :param tags: Tags to sort and group :return: Tuple of groups of tags with the same specificity, sorting all of these by the specificity value each group represents from least to greatest. """ levels: List[List[ResourceTag]] = [[], [], [], [], [], [], [], [], [], []] for t in tags: spec = t.tag_specificity() if spec > len(levels): levels.extend([] for _ in range(spec - len(levels))) levels[spec].append(t) return tuple(tuple(level) for level in reversed(levels) if len(level) > 0) def caption(cls, attributes) -> str: # pragma: no cover return str(cls.__name__) def __repr__(cls): return cls.__name__ class ComponentFilter(ABC): def filter(self, components: Set[ComponentInterface]) -> Set[ComponentInterface]: """ Filter out components according to the rules this component enforces. :param components: Components to filter :return: All components which this filter allows """ raise NotImplementedError() class ComponentTargetFilter(ComponentFilter): """ Only allow components which target at least one of the tags in a set. The tags must be strictly equal, that is, super/subclasses of the tags are not checked. """ tags: Tuple[ResourceTag, ...] def __init__(self, *tags: ResourceTag): object.__setattr__(self, "tags", tags) def __repr__(self) -> str: return f"ComponentTargetFilter({', '.join(t.__name__ for t in self.tags)})" def filter(self, components: Set[ComponentInterface]) -> Set[ComponentInterface]: return {c for c in components if any(t in c.targets for t in self.tags)} class ComponentOrMetaFilter(ComponentFilter): """ Only allow components which match any one of multiple filters. If there are no filters, no components will be filtered out. """ filters: Tuple[ComponentFilter, ...] def __init__(self, *filters: ComponentFilter): object.__setattr__(self, "filters", filters) def __repr__(self) -> str: return f"({' or '.join(f.__repr__() for f in self.filters)})" def filter(self, components: Set[ComponentInterface]) -> Set[ComponentInterface]: if 0 == len(self.filters): return components else: return set(itertools.chain(*(f.filter(components) for f in self.filters))) class ComponentAndMetaFilter(ComponentFilter): """ Only allow components which match all of multiple filters. If there are no filters, all components will be filtered out. """ filters: Tuple[ComponentFilter, ...] def __init__(self, *filters: ComponentFilter): object.__setattr__(self, "filters", filters) def __repr__(self) -> str: return f"({' and '.join(f.__repr__() for f in self.filters)})" def filter(self, components: Set[ComponentInterface]) -> Set[ComponentInterface]: if 0 == len(self.filters): return set() for component_filter in self.filters: components = component_filter.filter(components) if not components: break return components class ComponentPrioritySelectingMetaFilter(ComponentFilter): """ Selects exactly one filter to apply from a prioritized list of component filters. Only the first filter which allows more than zero components is applied. If no filters allow any components through, then this filter passes no components. For example, if the filters are: - filter 1: tag matches A, B, or C - filter 2: tag matches C, D, or E - filter 3: tag matches E, F, or G and the components under consideration are one targeting (E) and one targeting (F), then only the component targeting (E) passes this meta-filter. This is because: - filter 1 would filter out all components under consideration, so it's ignored; - filter 2 would allow one or more components (namely, the component targeting (E)), so this meta-filter then behaves like just filter 2 in this prioritized list. """ filters: Tuple[ComponentFilter, ...] def __init__(self, *filters: ComponentFilter): object.__setattr__(self, "filters", filters) def __repr__(self) -> str: return f"({' then '.join(f.__repr__() for f in self.filters)})" def filter(self, components: Set[ComponentInterface]) -> Set[ComponentInterface]: for f in self.filters: components_passing_f = f.filter(components) if components_passing_f: return components_passing_f return set() class ComponentNotMetaFilter(ComponentFilter): """ Invert the result of a child filter, that is, filter out components that would pass it and pass components which the child filter would filter out. """ child_filter: ComponentFilter def filter(self, components: Set[ComponentInterface]) -> Set[ComponentInterface]: return components.difference(self.child_filter.filter(components)) def __repr__(self) -> str: return f"not {self.child_filter}" The provided code snippet includes necessary dependencies for implementing the `_build_tag_filter` function. Write a Python function `def _build_tag_filter(tags: Tuple[ResourceTag]) -> ComponentFilter` to solve the following problem: When auto-running components, most of the time only the *most specific* components should be run for a resource. For example, an APK resource is also a ZIP resource; we want to always run the APK Unpacker on resources that are tagged as both ZIP and APK, because APK is a more specific tag. However, Identifiers are a special case because they have benign side-effects, so it is desirable to greedily run all Identifiers that could target a resource, not only the most specific Identifiers. This function constructs a filter which allows only components that target at least one of the given tags, but for non-identifiers the filter is even stricter so that only the most specific components are filtered. :param tags: Tags to target, from the resource that is being auto-run on :return: A filter which allows a component to run if (it is an Identifier, AND it targets at least one of the given tags) OR (it is NOT an Identifier, AND it targets one of the most specific given tags that are targeted by components) Here is the function: def _build_tag_filter(tags: Tuple[ResourceTag]) -> ComponentFilter: """ When auto-running components, most of the time only the *most specific* components should be run for a resource. For example, an APK resource is also a ZIP resource; we want to always run the APK Unpacker on resources that are tagged as both ZIP and APK, because APK is a more specific tag. However, Identifiers are a special case because they have benign side-effects, so it is desirable to greedily run all Identifiers that could target a resource, not only the most specific Identifiers. This function constructs a filter which allows only components that target at least one of the given tags, but for non-identifiers the filter is even stricter so that only the most specific components are filtered. :param tags: Tags to target, from the resource that is being auto-run on :return: A filter which allows a component to run if (it is an Identifier, AND it targets at least one of the given tags) OR (it is NOT an Identifier, AND it targets one of the most specific given tags that are targeted by components) """ tags_by_specificity = ResourceTag.sort_tags_into_tiers(tags) filters_prioritized_by_specificity = tuple( ComponentTargetFilter(*tag_specificity_level) for tag_specificity_level in tags_by_specificity ) return ComponentOrMetaFilter( ComponentAndMetaFilter( IDENTIFIERS_FILTER, ComponentTargetFilter(*tags), ), ComponentAndMetaFilter( ComponentNotMetaFilter( IDENTIFIERS_FILTER, ), ComponentPrioritySelectingMetaFilter(*filters_prioritized_by_specificity), ), )
When auto-running components, most of the time only the *most specific* components should be run for a resource. For example, an APK resource is also a ZIP resource; we want to always run the APK Unpacker on resources that are tagged as both ZIP and APK, because APK is a more specific tag. However, Identifiers are a special case because they have benign side-effects, so it is desirable to greedily run all Identifiers that could target a resource, not only the most specific Identifiers. This function constructs a filter which allows only components that target at least one of the given tags, but for non-identifiers the filter is even stricter so that only the most specific components are filtered. :param tags: Tags to target, from the resource that is being auto-run on :return: A filter which allows a component to run if (it is an Identifier, AND it targets at least one of the given tags) OR (it is NOT an Identifier, AND it targets one of the most specific given tags that are targeted by components)
15,346
import asyncio import logging from collections import defaultdict from dataclasses import dataclass from functools import lru_cache from typing import ( Awaitable, Dict, Iterable, List, Optional, Set, Tuple, TypeVar, Union, cast, Any, ) from ofrak.component.unpacker import Unpacker from ofrak.component.analyzer import Analyzer from ofrak.component.identifier import Identifier from ofrak.component.interface import ComponentInterface from ofrak.component.packer import Packer from ofrak.model.component_model import CC, ComponentRunResult from ofrak.model.job_model import ( JobModel, JobRunContext, JobRunContextFactory, ) from ofrak.model.job_request_model import ( JobAnalyzerRequest, JobComponentRequest, JobMultiComponentRequest, ) from ofrak.model.resource_model import EphemeralResourceContextFactory from ofrak.model.tag_model import ResourceTag from ofrak.model.viewable_tag_model import ResourceViewContext from ofrak.service.component_locator_i import ( ComponentLocatorInterface, ComponentFilter, ) from ofrak.model.component_filters import ( ComponentTargetFilter, AnalyzerOutputFilter, ComponentOrMetaFilter, ComponentAndMetaFilter, ComponentPrioritySelectingMetaFilter, ComponentNotMetaFilter, ComponentWhitelistFilter, ComponentTypeFilter, ) from ofrak.service.job_service_i import ( JobServiceInterface, ComponentAutoRunFailure, ) from ofrak.service.resource_service_i import ( ResourceServiceInterface, ResourceFilter, ResourceFilterCondition, ) from ofrak_type.error import NotFoundError ANALYZERS_FILTER = ComponentTypeFilter(Analyzer) IDENTIFIERS_FILTER = ComponentTypeFilter(Identifier) UNPACKERS_FILTER = ComponentTypeFilter(Unpacker) PACKERS_FILTER = ComponentTypeFilter(Packer) class JobMultiComponentRequest: job_id: bytes resource_id: bytes components_allowed: Tuple[bytes, ...] = () components_disallowed: Tuple[bytes, ...] = () tags_ignored: Tuple[ResourceTag, ...] = () all_unpackers: bool = False all_identifiers: bool = False all_analyzers: bool = False all_packers: bool = False class ComponentFilter(ABC): def filter(self, components: Set[ComponentInterface]) -> Set[ComponentInterface]: """ Filter out components according to the rules this component enforces. :param components: Components to filter :return: All components which this filter allows """ raise NotImplementedError() class ComponentWhitelistFilter(ComponentFilter): """ Only allow components which belong to a specific set to be run. """ whitelisted_component_ids: FrozenSet[bytes] def __init__(self, *whitelisted_component_ids): object.__setattr__(self, "whitelisted_component_ids", frozenset(whitelisted_component_ids)) def filter(self, components: Set[ComponentInterface]) -> Set[ComponentInterface]: return {c for c in components if c.get_id() in self.whitelisted_component_ids} class ComponentTargetFilter(ComponentFilter): """ Only allow components which target at least one of the tags in a set. The tags must be strictly equal, that is, super/subclasses of the tags are not checked. """ tags: Tuple[ResourceTag, ...] def __init__(self, *tags: ResourceTag): object.__setattr__(self, "tags", tags) def __repr__(self) -> str: return f"ComponentTargetFilter({', '.join(t.__name__ for t in self.tags)})" def filter(self, components: Set[ComponentInterface]) -> Set[ComponentInterface]: return {c for c in components if any(t in c.targets for t in self.tags)} class ComponentOrMetaFilter(ComponentFilter): """ Only allow components which match any one of multiple filters. If there are no filters, no components will be filtered out. """ filters: Tuple[ComponentFilter, ...] def __init__(self, *filters: ComponentFilter): object.__setattr__(self, "filters", filters) def __repr__(self) -> str: return f"({' or '.join(f.__repr__() for f in self.filters)})" def filter(self, components: Set[ComponentInterface]) -> Set[ComponentInterface]: if 0 == len(self.filters): return components else: return set(itertools.chain(*(f.filter(components) for f in self.filters))) class ComponentAndMetaFilter(ComponentFilter): """ Only allow components which match all of multiple filters. If there are no filters, all components will be filtered out. """ filters: Tuple[ComponentFilter, ...] def __init__(self, *filters: ComponentFilter): object.__setattr__(self, "filters", filters) def __repr__(self) -> str: return f"({' and '.join(f.__repr__() for f in self.filters)})" def filter(self, components: Set[ComponentInterface]) -> Set[ComponentInterface]: if 0 == len(self.filters): return set() for component_filter in self.filters: components = component_filter.filter(components) if not components: break return components class ComponentNotMetaFilter(ComponentFilter): """ Invert the result of a child filter, that is, filter out components that would pass it and pass components which the child filter would filter out. """ child_filter: ComponentFilter def filter(self, components: Set[ComponentInterface]) -> Set[ComponentInterface]: return components.difference(self.child_filter.filter(components)) def __repr__(self) -> str: return f"not {self.child_filter}" def _build_auto_run_filter( request: JobMultiComponentRequest, ) -> ComponentFilter: filters: List[ComponentFilter] = [] if request.components_allowed: filters.append(ComponentWhitelistFilter(*request.components_allowed)) type_filters = [] if request.all_unpackers: type_filters.append(UNPACKERS_FILTER) if request.all_identifiers: type_filters.append(IDENTIFIERS_FILTER) if request.all_analyzers: type_filters.append(ANALYZERS_FILTER) if request.all_packers: type_filters.append(PACKERS_FILTER) filters.append(ComponentOrMetaFilter(*type_filters)) if request.components_disallowed: filters.append( ComponentNotMetaFilter(ComponentWhitelistFilter(*request.components_disallowed)) ) if request.tags_ignored: filters.append(ComponentNotMetaFilter(ComponentTargetFilter(*request.tags_ignored))) return ComponentAndMetaFilter(*filters)
null
15,347
import asyncio import binascii import dataclasses import re from enum import Enum import functools import itertools import logging from ofrak.project.project import OfrakProject import typing_inspect from typing_inspect import get_args import json import orjson import inspect import os import sys import webbrowser from collections import defaultdict from typing import ( Iterable, Optional, Dict, cast, Set, Tuple, Union, Type, Callable, TypeVar, Any, List, ) from aiohttp import web from aiohttp.web_exceptions import HTTPBadRequest from aiohttp.web_request import Request from aiohttp.web_response import Response from aiohttp.web_fileresponse import FileResponse from dataclasses import fields from ofrak.component.interface import ComponentInterface from ofrak.model.component_filters import ( ComponentOrMetaFilter, ComponentTypeFilter, ComponentTargetFilter, ComponentAndMetaFilter, ) from ofrak.ofrak_context import get_current_ofrak_context from ofrak.service.component_locator_i import ComponentFilter from ofrak_patch_maker.toolchain.abstract import Toolchain from ofrak_type.error import NotFoundError from ofrak_type.range import Range from ofrak import ( OFRAKContext, ResourceFilter, ResourceAttributeRangeFilter, ResourceAttributeValueFilter, ResourceSort, ResourceTag, Packer, Unpacker, Modifier, Analyzer, ) from ofrak.core import Addressable, File from ofrak.core import ( GenericBinary, AddCommentModifier, AddCommentModifierConfig, DeleteCommentModifierConfig, DeleteCommentModifier, StringFindReplaceConfig, StringFindReplaceModifier, ) from ofrak.model.component_model import ( ComponentContext, ClientComponentContext, ComponentRunResult, ComponentConfig, ) from ofrak.model.resource_model import ( ResourceContext, ClientResourceContext, ResourceModel, ResourceAttributes, MutableResourceModel, ) from ofrak.model.viewable_tag_model import ResourceViewContext from ofrak.resource import Resource from ofrak.service.error import SerializedError from ofrak.service.serialization.pjson import ( SerializationServiceInterface, PJSONSerializationService, ) from ofrak.gui.script_builder import ActionType, ScriptBuilder from ofrak.service.serialization.pjson_types import PJSONType from ofrak.core.entropy import DataSummaryAnalyzer LOGGER = logging.getLogger(__name__) def respond_with_error(error: Exception, error_cls: Type[SerializedError]) -> Response: if isinstance(error, error_cls): text = error.to_json() else: text = json.dumps(error_cls.to_dict(error)) response = Response(text=text, status=500) return response class SerializedError(RuntimeError): def to_json(self): return json.dumps(self.to_dict(self)) def to_dict(cls, error: Exception): return {"type": type(error).__name__, "message": str(error)} def from_json(cls, serialized: str) -> "SerializedError": error_dict = json.loads(serialized) error_type = error_dict["type"] try: error = getattr(sys.modules[__name__], error_type) except AttributeError: try: error = getattr(ofrak_type.error, error_type) except: raise ValueError(error_dict) if issubclass(error, cls): return error.from_dict({"message": error_dict["message"]}) else: return error(error_dict["message"]) def from_dict(cls, error_dict: Dict[str, Any]) -> "SerializedError": return cls(error_dict["message"]) The provided code snippet includes necessary dependencies for implementing the `exceptions_to_http` function. Write a Python function `def exceptions_to_http(error_class: Type[SerializedError])` to solve the following problem: Decorator for a server function that attempts to do some work, and forwards the exception, if any, to the client over HTTP. Usage: @exceptions_to_http(MyErrorClass) async def handle_some_request(self, request...): ... Here is the function: def exceptions_to_http(error_class: Type[SerializedError]): """ Decorator for a server function that attempts to do some work, and forwards the exception, if any, to the client over HTTP. Usage: @exceptions_to_http(MyErrorClass) async def handle_some_request(self, request...): ... """ def exceptions_to_http_decorator(func: Callable): @functools.wraps(func) async def wrapper(*args, **kwargs): try: return await func(*args, **kwargs) except Exception as error: LOGGER.exception("Exception raised in aiohttp endpoint") return respond_with_error(error, error_class) return wrapper return exceptions_to_http_decorator
Decorator for a server function that attempts to do some work, and forwards the exception, if any, to the client over HTTP. Usage: @exceptions_to_http(MyErrorClass) async def handle_some_request(self, request...): ...
15,348
import asyncio import binascii import dataclasses import re from enum import Enum import functools import itertools import logging from ofrak.project.project import OfrakProject import typing_inspect from typing_inspect import get_args import json import orjson import inspect import os import sys import webbrowser from collections import defaultdict from typing import ( Iterable, Optional, Dict, cast, Set, Tuple, Union, Type, Callable, TypeVar, Any, List, ) from aiohttp import web from aiohttp.web_exceptions import HTTPBadRequest from aiohttp.web_request import Request from aiohttp.web_response import Response from aiohttp.web_fileresponse import FileResponse from dataclasses import fields from ofrak.component.interface import ComponentInterface from ofrak.model.component_filters import ( ComponentOrMetaFilter, ComponentTypeFilter, ComponentTargetFilter, ComponentAndMetaFilter, ) from ofrak.ofrak_context import get_current_ofrak_context from ofrak.service.component_locator_i import ComponentFilter from ofrak_patch_maker.toolchain.abstract import Toolchain from ofrak_type.error import NotFoundError from ofrak_type.range import Range from ofrak import ( OFRAKContext, ResourceFilter, ResourceAttributeRangeFilter, ResourceAttributeValueFilter, ResourceSort, ResourceTag, Packer, Unpacker, Modifier, Analyzer, ) from ofrak.core import Addressable, File from ofrak.core import ( GenericBinary, AddCommentModifier, AddCommentModifierConfig, DeleteCommentModifierConfig, DeleteCommentModifier, StringFindReplaceConfig, StringFindReplaceModifier, ) from ofrak.model.component_model import ( ComponentContext, ClientComponentContext, ComponentRunResult, ComponentConfig, ) from ofrak.model.resource_model import ( ResourceContext, ClientResourceContext, ResourceModel, ResourceAttributes, MutableResourceModel, ) from ofrak.model.viewable_tag_model import ResourceViewContext from ofrak.resource import Resource from ofrak.service.error import SerializedError from ofrak.service.serialization.pjson import ( SerializationServiceInterface, PJSONSerializationService, ) from ofrak.gui.script_builder import ActionType, ScriptBuilder from ofrak.service.serialization.pjson_types import PJSONType from ofrak.core.entropy import DataSummaryAnalyzer def pluck_id(request: Request, get_parameter_name: str) -> bytes: return bytes.fromhex(request.match_info[get_parameter_name])
null
15,349
import asyncio import binascii import dataclasses import re from enum import Enum import functools import itertools import logging from ofrak.project.project import OfrakProject import typing_inspect from typing_inspect import get_args import json import orjson import inspect import os import sys import webbrowser from collections import defaultdict from typing import ( Iterable, Optional, Dict, cast, Set, Tuple, Union, Type, Callable, TypeVar, Any, List, ) from aiohttp import web from aiohttp.web_exceptions import HTTPBadRequest from aiohttp.web_request import Request from aiohttp.web_response import Response from aiohttp.web_fileresponse import FileResponse from dataclasses import fields from ofrak.component.interface import ComponentInterface from ofrak.model.component_filters import ( ComponentOrMetaFilter, ComponentTypeFilter, ComponentTargetFilter, ComponentAndMetaFilter, ) from ofrak.ofrak_context import get_current_ofrak_context from ofrak.service.component_locator_i import ComponentFilter from ofrak_patch_maker.toolchain.abstract import Toolchain from ofrak_type.error import NotFoundError from ofrak_type.range import Range from ofrak import ( OFRAKContext, ResourceFilter, ResourceAttributeRangeFilter, ResourceAttributeValueFilter, ResourceSort, ResourceTag, Packer, Unpacker, Modifier, Analyzer, ) from ofrak.core import Addressable, File from ofrak.core import ( GenericBinary, AddCommentModifier, AddCommentModifierConfig, DeleteCommentModifierConfig, DeleteCommentModifier, StringFindReplaceConfig, StringFindReplaceModifier, ) from ofrak.model.component_model import ( ComponentContext, ClientComponentContext, ComponentRunResult, ComponentConfig, ) from ofrak.model.resource_model import ( ResourceContext, ClientResourceContext, ResourceModel, ResourceAttributes, MutableResourceModel, ) from ofrak.model.viewable_tag_model import ResourceViewContext from ofrak.resource import Resource from ofrak.service.error import SerializedError from ofrak.service.serialization.pjson import ( SerializationServiceInterface, PJSONSerializationService, ) from ofrak.gui.script_builder import ActionType, ScriptBuilder from ofrak.service.serialization.pjson_types import PJSONType from ofrak.core.entropy import DataSummaryAnalyzer PJSONType = Union[str, int, float, bool, None, Dict[str, Any], List[Any], Tuple[Any, ...]] The provided code snippet includes necessary dependencies for implementing the `get_query_string_as_pjson` function. Write a Python function `def get_query_string_as_pjson(request: Request) -> Dict[str, PJSONType]` to solve the following problem: URL-encoded GET parameters are all strings. For example, None is encoded as 'None', or 1 as '1', which isn't valid PJSON. We fix this by applying `json.loads` on each parameter. Here is the function: def get_query_string_as_pjson(request: Request) -> Dict[str, PJSONType]: """ URL-encoded GET parameters are all strings. For example, None is encoded as 'None', or 1 as '1', which isn't valid PJSON. We fix this by applying `json.loads` on each parameter. """ return {key: json.loads(value) for key, value in request.query.items()}
URL-encoded GET parameters are all strings. For example, None is encoded as 'None', or 1 as '1', which isn't valid PJSON. We fix this by applying `json.loads` on each parameter.
15,350
import asyncio import binascii import dataclasses import re from enum import Enum import functools import itertools import logging from ofrak.project.project import OfrakProject import typing_inspect from typing_inspect import get_args import json import orjson import inspect import os import sys import webbrowser from collections import defaultdict from typing import ( Iterable, Optional, Dict, cast, Set, Tuple, Union, Type, Callable, TypeVar, Any, List, ) from aiohttp import web from aiohttp.web_exceptions import HTTPBadRequest from aiohttp.web_request import Request from aiohttp.web_response import Response from aiohttp.web_fileresponse import FileResponse from dataclasses import fields from ofrak.component.interface import ComponentInterface from ofrak.model.component_filters import ( ComponentOrMetaFilter, ComponentTypeFilter, ComponentTargetFilter, ComponentAndMetaFilter, ) from ofrak.ofrak_context import get_current_ofrak_context from ofrak.service.component_locator_i import ComponentFilter from ofrak_patch_maker.toolchain.abstract import Toolchain from ofrak_type.error import NotFoundError from ofrak_type.range import Range from ofrak import ( OFRAKContext, ResourceFilter, ResourceAttributeRangeFilter, ResourceAttributeValueFilter, ResourceSort, ResourceTag, Packer, Unpacker, Modifier, Analyzer, ) from ofrak.core import Addressable, File from ofrak.core import ( GenericBinary, AddCommentModifier, AddCommentModifierConfig, DeleteCommentModifierConfig, DeleteCommentModifier, StringFindReplaceConfig, StringFindReplaceModifier, ) from ofrak.model.component_model import ( ComponentContext, ClientComponentContext, ComponentRunResult, ComponentConfig, ) from ofrak.model.resource_model import ( ResourceContext, ClientResourceContext, ResourceModel, ResourceAttributes, MutableResourceModel, ) from ofrak.model.viewable_tag_model import ResourceViewContext from ofrak.resource import Resource from ofrak.service.error import SerializedError from ofrak.service.serialization.pjson import ( SerializationServiceInterface, PJSONSerializationService, ) from ofrak.gui.script_builder import ActionType, ScriptBuilder from ofrak.service.serialization.pjson_types import PJSONType from ofrak.core.entropy import DataSummaryAnalyzer class AiohttpOFRAKServer: def __init__( self, serializer: SerializationServiceInterface, ofrak_context: OFRAKContext, host: str, port: int, enable_cors: bool = False, ): async def start(self): async def stop(self): async def run_until_cancelled(self): async def init_chunked_root_resource(self, request: Request) -> Response: async def root_resource_chunk(self, request: Request) -> Response: async def create_chunked_root_resource(self, request: Request) -> Response: async def create_root_resource(self, request: Request) -> Response: async def get_root_resources(self, request: Request) -> Response: async def get_resource(self, request: Request) -> Response: async def get_data(self, request: Request) -> Response: async def get_data_length(self, request: Request) -> Response: async def get_child_data_ranges(self, request: Request) -> Response: async def get_range(child): async def batch_get_range(self, request: Request) -> Response: async def get_resource_range(resource_id): async def unpack(self, request: Request) -> Response: async def unpack_recursively(self, request: Request) -> Response: async def pack(self, request: Request) -> Response: async def pack_recursively(self, request: Request) -> Response: async def identify(self, request: Request) -> Response: async def identify_recursively(self, request: Request) -> Response: async def data_summary(self, request: Request) -> Response: async def analyze(self, request: Request) -> Response: async def get_parent(self, request: Request) -> Response: async def get_ancestors(self, request: Request) -> Response: async def get_descendants(self, request: Request) -> Response: async def batch_get_children(self, request: Request) -> Response: async def get_resource_children(resource_id): def get_child_sort_key(child): async def get_root_resource_from_child(self, request: Request) -> Response: async def queue_patch(self, request: Request) -> Response: async def create_mapped_child(self, request: Request) -> Response: async def find_and_replace(self, request: Request) -> Response: async def add_comment(self, request: Request) -> Response: async def delete_comment(self, request: Request) -> Response: async def search_for_vaddr(self, request: Request) -> Response: async def search_for_string(self, request: Request): async def search_for_bytes(self, request: Request): async def add_tag(self, request: Request) -> Response: async def get_all_tags(self, request: Request) -> Response: async def add_flush_to_disk_to_script(self, request: Request) -> Response: async def get_script(self, request: Request) -> Response: async def get_components(self, request: Request) -> Response: async def get_config_for_component(self, request: Request) -> Response: async def run_component(self, request: Request) -> Response: async def get_static_files(self, request: Request) -> FileResponse: async def get_tags_and_num_components(self, request: Request): async def search_data(self, request: Request) -> Response: async def create_new_project(self, request: Request) -> Response: async def clone_project_from_git(self, request: Request) -> Response: def recurse_path_collisions(path: str, count: int) -> str: async def get_project_by_id(self, request: Request) -> Response: async def get_all_projects(self, request: Request) -> Response: async def reset_project(self, request: Request) -> Response: async def add_binary_to_project(self, request: Request) -> Response: async def add_script_to_project(self, request: Request) -> Response: async def open_project(self, request: Request) -> Response: async def get_projects_path(self, request: Request) -> Response: async def set_projects_path(self, request: Request) -> Response: async def save_project_data(self, request: Request) -> Response: async def delete_binary_from_project(self, request: Request) -> Response: async def delete_script_from_project(self, request: Request) -> Response: async def get_project_script(self, request: Request) -> Response: async def get_project_by_resource_id(self, request: Request) -> Response: def _slurp_projects_from_dir(self) -> Set: def _get_project_by_id(self, id) -> OfrakProject: def _construct_field_response(self, obj): def _construct_arg_response(self, obj): def _modify_by_case(self, obj): def _construct_enum_response(self, obj): def _has_elipsis(self, obj): def _convert_to_class_name_str(self, obj: Any): async def _get_resource_by_id(self, resource_id: bytes, job_id: bytes) -> Resource: def _get_specific_components( self, resource: Resource, show_all_components: bool, target_filter: Optional[str], incl_analyzers: bool, incl_modifiers: bool, incl_packers: bool, incl_unpackers: bool, ) -> List[str]: def _get_config_for_component( self, component: Type[ComponentInterface] ) -> Type[ComponentConfig]: async def _get_resource_model_by_id( self, resource_id: bytes, job_id: bytes ) -> Optional[Union[ResourceModel, MutableResourceModel]]: async def _serialize_component_result(self, result: ComponentRunResult) -> PJSONType: async def get_and_serialize(resource_id) -> PJSONType: async def _get_resource_for_request(self, request: Request) -> Resource: def _serialize_resource_model(self, resource_model: ResourceModel) -> PJSONType: def _serialize_resource(self, resource: Resource) -> PJSONType: def _serialize_multi_resource(self, resources: Iterable[Resource]) -> PJSONType: async def start_server( ofrak_context: OFRAKContext, host: str, port: int, enable_cors: bool = False, ) -> AiohttpOFRAKServer: def get_current_ofrak_context() -> OFRAKContext: class Resource: def __init__( self, job_id: bytes, resource: MutableResourceModel, resource_context: ResourceContext, resource_view_context: ResourceViewContext, job_context: Optional[JobRunContext], component_context: ComponentContext, resource_factory: "ResourceFactory", id_service: IDServiceInterface, data_service: DataServiceInterface, resource_service: ResourceServiceInterface, job_service: JobServiceInterface, ): def get_id(self) -> bytes: def get_job_id(self) -> bytes: def get_data_id(self) -> Optional[bytes]: def get_resource_context(self) -> ResourceContext: def get_resource_view_context(self) -> ResourceViewContext: def get_component_context(self) -> ComponentContext: def get_job_context(self) -> Optional[JobRunContext]: def get_caption(self) -> str: def is_modified(self) -> bool: def get_model(self) -> MutableResourceModel: async def get_data(self, range: Optional[Range] = None) -> bytes: async def get_data_length(self) -> int: async def get_data_range_within_parent(self) -> Range: async def get_data_range_within_root(self) -> Range: async def search_data( self, query: Pattern[bytes], start: Optional[int] = None, end: Optional[int] = None, max_matches: Optional[int] = None, ) -> Tuple[Tuple[int, bytes], ...]: async def search_data( self, query: bytes, start: Optional[int] = None, end: Optional[int] = None, max_matches: Optional[int] = None, ) -> Tuple[int, ...]: async def search_data(self, query, start=None, end=None, max_matches=None): async def save(self): def _save(self) -> Tuple[List[bytes], List[DataPatch], List[MutableResourceModel]]: async def _fetch(self, resource: MutableResourceModel): async def _fetch_resources(self, resource_ids: Iterable[bytes]): async def _update_views(self, modified: Set[bytes], deleted: Set[bytes]): async def run( self, component_type: Type[ComponentInterface[CC]], config: CC = None, ) -> ComponentRunResult: async def auto_run( self, components: Iterable[Type[ComponentInterface]] = tuple(), blacklisted_components: Iterable[Type[ComponentInterface]] = tuple(), all_unpackers: bool = False, all_identifiers: bool = False, all_analyzers: bool = False, all_packers: bool = False, ) -> ComponentRunResult: async def unpack(self) -> ComponentRunResult: async def analyze(self, resource_attributes: Type[RA]) -> RA: async def identify(self) -> ComponentRunResult: async def pack(self) -> ComponentRunResult: async def auto_run_recursively( self, components: Iterable[Type[ComponentInterface]] = tuple(), blacklisted_components: Iterable[Type[ComponentInterface]] = tuple(), blacklisted_tags: Iterable[ResourceTag] = tuple(), all_unpackers: bool = False, all_identifiers: bool = False, all_analyzers: bool = False, ) -> ComponentRunResult: async def unpack_recursively( self, blacklisted_components: Iterable[Type[ComponentInterface]] = tuple(), do_not_unpack: Iterable[ResourceTag] = tuple(), ) -> ComponentRunResult: async def analyze_recursively(self) -> ComponentRunResult: async def pack_recursively(self) -> ComponentRunResult: async def write_to(self, destination: BinaryIO, pack: bool = True): async def _analyze_attributes(self, attribute_types: Tuple[Type[ResourceAttributes], ...]): async def _create_resource(self, resource_model: ResourceModel) -> "Resource": async def _create_resources( self, resource_models: Iterable[ResourceModel] ) -> Iterable["Resource"]: async def create_child( self, tags: Iterable[ResourceTag] = None, attributes: Iterable[ResourceAttributes] = None, data: Optional[bytes] = None, data_range: Optional[Range] = None, ) -> "Resource": async def create_child_from_view( self, view: RV, data_range: Optional[Range] = None, data: Optional[bytes] = None, additional_tags: Iterable[ResourceTag] = (), additional_attributes: Iterable[ResourceAttributes] = (), ) -> "Resource": def _view_as(self, viewable_tag: Type[RV]) -> Union[RV, Awaitable[RV]]: async def finish_view_creation( attrs_to_analyze: Tuple[Type[ResourceAttributes], ...] ) -> RV: async def view_as(self, viewable_tag: Type[RV]) -> RV: def add_view(self, view: ResourceViewInterface): def _set_modified(self): def _add_tag(self, tag: ResourceTag): def add_tag(self, *tags: ResourceTag): def get_tags(self, inherit: bool = True) -> Iterable[ResourceTag]: def has_tag(self, tag: ResourceTag, inherit: bool = True) -> bool: def remove_tag(self, tag: ResourceTag): def get_most_specific_tags(self) -> Iterable[ResourceTag]: def _check_attributes(self, attributes_type: Type[RA]) -> Optional[RA]: def _add_attributes(self, attributes: ResourceAttributes): def add_attributes(self, *attributes: ResourceAttributes): def has_attributes(self, attributes_type: Type[ResourceAttributes]) -> bool: def get_attributes(self, attributes_type: Type[RA]) -> RA: def remove_attributes(self, attributes_type: Type[ResourceAttributes]): def add_component( self, component_id: bytes, version: int, ): def add_component_for_attributes( self, component_id: bytes, version: int, attributes: Type[ResourceAttributes], ): def remove_component( self, component_id: bytes, attributes: Optional[Type[ResourceAttributes]] = None, ): def has_component_run(self, component_id: bytes, desired_version: Optional[int] = None) -> bool: def queue_patch( self, patch_range: Range, data: bytes, ): async def get_parent_as_view(self, v_type: Type[RV]) -> RV: async def get_parent(self) -> "Resource": async def get_ancestors( self, r_filter: ResourceFilter = None, ) -> Iterable["Resource"]: async def get_only_ancestor_as_view( self, v_type: Type[RV], r_filter: ResourceFilter, ) -> RV: async def get_only_ancestor(self, r_filter: ResourceFilter) -> "Resource": async def get_descendants_as_view( self, v_type: Type[RV], max_depth: int = -1, r_filter: ResourceFilter = None, r_sort: ResourceSort = None, ) -> Iterable[RV]: async def get_descendants( self, max_depth: int = -1, r_filter: ResourceFilter = None, r_sort: ResourceSort = None, ) -> Iterable["Resource"]: async def get_only_descendant_as_view( self, v_type: Type[RV], max_depth: int = -1, r_filter: ResourceFilter = None, ) -> RV: async def get_only_descendant( self, max_depth: int = -1, r_filter: ResourceFilter = None, ) -> "Resource": async def get_only_sibling_as_view( self, v_type: Type[RV], r_filter: ResourceFilter = None, ) -> RV: async def get_only_sibling(self, r_filter: ResourceFilter = None) -> "Resource": async def get_children( self, r_filter: ResourceFilter = None, r_sort: ResourceSort = None, ) -> Iterable["Resource"]: async def get_children_as_view( self, v_type: Type[RV], r_filter: ResourceFilter = None, r_sort: ResourceSort = None, ) -> Iterable[RV]: async def get_only_child(self, r_filter: ResourceFilter = None) -> "Resource": async def get_only_child_as_view(self, v_type: Type[RV], r_filter: ResourceFilter = None) -> RV: async def delete(self): async def flush_data_to_disk(self, path: str, pack: bool = True): def __repr__(self): async def summarize(self) -> str: async def summarize_tree( self, r_filter: ResourceFilter = None, r_sort: ResourceSort = None, indent: str = "", summarize_resource_callback: Optional[Callable[["Resource"], Awaitable[str]]] = None, ) -> str: async def open_gui( host: str, port: int, focus_resource: Optional[Resource] = None, ofrak_context: Optional[OFRAKContext] = None, open_in_browser: bool = True, enable_cors: bool = False, ) -> AiohttpOFRAKServer: # pragma: no cover if ofrak_context is None: ofrak_context = get_current_ofrak_context() server = await start_server(ofrak_context, host, port, enable_cors) if focus_resource is None: url = f"http://{server._host}:{server._port}/" else: url = f"http://{server._host}:{server._port}/#{focus_resource.get_id().hex()}" print(f"GUI is being served on {url}") if open_in_browser: webbrowser.open(url) return server
null
15,351
import asyncio import binascii import dataclasses import re from enum import Enum import functools import itertools import logging from ofrak.project.project import OfrakProject import typing_inspect from typing_inspect import get_args import json import orjson import inspect import os import sys import webbrowser from collections import defaultdict from typing import ( Iterable, Optional, Dict, cast, Set, Tuple, Union, Type, Callable, TypeVar, Any, List, ) from aiohttp import web from aiohttp.web_exceptions import HTTPBadRequest from aiohttp.web_request import Request from aiohttp.web_response import Response from aiohttp.web_fileresponse import FileResponse from dataclasses import fields from ofrak.component.interface import ComponentInterface from ofrak.model.component_filters import ( ComponentOrMetaFilter, ComponentTypeFilter, ComponentTargetFilter, ComponentAndMetaFilter, ) from ofrak.ofrak_context import get_current_ofrak_context from ofrak.service.component_locator_i import ComponentFilter from ofrak_patch_maker.toolchain.abstract import Toolchain from ofrak_type.error import NotFoundError from ofrak_type.range import Range from ofrak import ( OFRAKContext, ResourceFilter, ResourceAttributeRangeFilter, ResourceAttributeValueFilter, ResourceSort, ResourceTag, Packer, Unpacker, Modifier, Analyzer, ) from ofrak.core import Addressable, File from ofrak.core import ( GenericBinary, AddCommentModifier, AddCommentModifierConfig, DeleteCommentModifierConfig, DeleteCommentModifier, StringFindReplaceConfig, StringFindReplaceModifier, ) from ofrak.model.component_model import ( ComponentContext, ClientComponentContext, ComponentRunResult, ComponentConfig, ) from ofrak.model.resource_model import ( ResourceContext, ClientResourceContext, ResourceModel, ResourceAttributes, MutableResourceModel, ) from ofrak.model.viewable_tag_model import ResourceViewContext from ofrak.resource import Resource from ofrak.service.error import SerializedError from ofrak.service.serialization.pjson import ( SerializationServiceInterface, PJSONSerializationService, ) from ofrak.gui.script_builder import ActionType, ScriptBuilder from ofrak.service.serialization.pjson_types import PJSONType from ofrak.core.entropy import DataSummaryAnalyzer def json_response( data: Any = None, *, text: Optional[str] = None, body: Optional[bytes] = None, status: int = 200, reason: Optional[str] = None, headers=None, content_type: str = "application/json", dumps=orjson.dumps, ) -> Response: if data is not None: if text or body: raise ValueError("only one of data, text, or body should be specified") else: body = dumps(data) return Response( text=text, body=body, status=status, reason=reason, headers=headers, content_type=content_type, )
null
15,352
import asyncio import binascii import dataclasses import re from enum import Enum import functools import itertools import logging from ofrak.project.project import OfrakProject import typing_inspect from typing_inspect import get_args import json import orjson import inspect import os import sys import webbrowser from collections import defaultdict from typing import ( Iterable, Optional, Dict, cast, Set, Tuple, Union, Type, Callable, TypeVar, Any, List, ) from aiohttp import web from aiohttp.web_exceptions import HTTPBadRequest from aiohttp.web_request import Request from aiohttp.web_response import Response from aiohttp.web_fileresponse import FileResponse from dataclasses import fields from ofrak.component.interface import ComponentInterface from ofrak.model.component_filters import ( ComponentOrMetaFilter, ComponentTypeFilter, ComponentTargetFilter, ComponentAndMetaFilter, ) from ofrak.ofrak_context import get_current_ofrak_context from ofrak.service.component_locator_i import ComponentFilter from ofrak_patch_maker.toolchain.abstract import Toolchain from ofrak_type.error import NotFoundError from ofrak_type.range import Range from ofrak import ( OFRAKContext, ResourceFilter, ResourceAttributeRangeFilter, ResourceAttributeValueFilter, ResourceSort, ResourceTag, Packer, Unpacker, Modifier, Analyzer, ) from ofrak.core import Addressable, File from ofrak.core import ( GenericBinary, AddCommentModifier, AddCommentModifierConfig, DeleteCommentModifierConfig, DeleteCommentModifier, StringFindReplaceConfig, StringFindReplaceModifier, ) from ofrak.model.component_model import ( ComponentContext, ClientComponentContext, ComponentRunResult, ComponentConfig, ) from ofrak.model.resource_model import ( ResourceContext, ClientResourceContext, ResourceModel, ResourceAttributes, MutableResourceModel, ) from ofrak.model.viewable_tag_model import ResourceViewContext from ofrak.resource import Resource from ofrak.service.error import SerializedError from ofrak.service.serialization.pjson import ( SerializationServiceInterface, PJSONSerializationService, ) from ofrak.gui.script_builder import ActionType, ScriptBuilder from ofrak.service.serialization.pjson_types import PJSONType from ofrak.core.entropy import DataSummaryAnalyzer def _format_default(default): return default.decode() if isinstance(default, bytes) else default
null
15,353
import dataclasses import functools import logging from dataclasses import dataclass from typing import Optional, Type, TypeVar, Dict, Iterable, Any, cast, Set from ofrak.resource import Resource from ofrak.model.resource_model import ResourceAttributes, ResourceModel from ofrak.model.tag_model import ResourceTag from ofrak.model.viewable_tag_model import ResourceViewInterface def _fields(*args, **kwargs): return dataclasses.fields(*args, **kwargs)
null
15,354
import dataclasses from abc import ABC, abstractmethod from collections import defaultdict from typing import ( TypeVar, Set, Type, Dict, Optional, Iterable, MutableMapping, Union, Tuple, List, Callable, Generic, Any, cast, overload, ) from weakref import WeakValueDictionary from ofrak.model.tag_model import ResourceTag from ofrak_type.range import Range X = TypeVar("X", str, int, float, bytes) class ResourceIndexedAttribute(Generic[X]): """ Descriptor class for values in resource attributes which can be indexed. When a field `Foo` of a [ResourceAttributes][ofrak.model.resource_model.ResourceAttributes] type `A` is indexed, it is possible to include an `r_filter` or `r_sort` in a query to the resource service which filters the returned resource by the value of `foo` each of them have. This class should not be explicitly instantiated, instead created using the @index decorator. For example: ```python class A(ResourceAttributes): x: int def Foo(self) -> int: return self.x ``` """ def __init__( self, getter_func: Callable[[Any], X], uses_indexes: Iterable["ResourceIndexedAttribute"] = (), ): """ :param getter_func: Getter function for the property :param uses_indexes: Additional index types that are required to calculate the value of this index :raises TypeError: if the getter function does not have a return type annotation :raises TypeError: if the getter does not return an indexable type """ _validate_indexed_type(getter_func) self.fget: Callable[[Any], X] = getter_func self.attributes_owner: Optional[Type[ResourceAttributes]] = None self.uses_indexes = uses_indexes self.used_by_indexes: List["ResourceIndexedAttribute"] = [] self.index_name: str = getter_func.__name__ for other_index in self.uses_indexes: other_index.used_by_indexes.append(self) def __set_name__(self, owner, name): self.attributes_owner = owner self.__name__ = f"{owner.__name__}.{name}" self.index_name = name def __get__(self, instance: None, owner: type) -> "ResourceIndexedAttribute[X]": """ Applicable when getting the ResourceIndexedAttribute of a class, not an instance. Example (continued building off of example from class docstring): A.X # Returns a ResourceIndexedAttribute[int] """ ... def __get__(self, instance: Any, owner: type) -> X: """ Applicable when getting the ResourceIndexedAttribute of an instance Example (continued building off of example from class docstring): a = A(10) a.X # Returns 10 """ ... def __get__( self, instance: Optional[Any], owner: type ) -> Union["ResourceIndexedAttribute[X]", X]: if instance is None: return self else: return self.fget(instance) def __getattr__(self, name) -> Any: ... def __set__(self, instance, value): raise ValueError("Cannot set value of indexed attributes") def get_value( self, index_holder: "ResourceModel", ) -> Optional[X]: if self.attributes_owner is None: raise TypeError( f"Cannot get index value for {self.__name__} of model " f"{index_holder.id.hex()} because {self.__name__}'s owner has not " f"been set. This cannot happen unless `get_index` has somehow been " f"called during class creation, before the owner is set." ) else: attributes = index_holder.get_attributes(self.attributes_owner) if attributes is None: return None elif self.uses_indexes: # Create new copy of attributes to inject index values into attributes_plus_required_indexes = dataclasses.replace(attributes) for nested_index in self.uses_indexes: val = nested_index.get_value(index_holder) if val is None: # Not all of the nested indexes are available, can't calculate index val. return None object.__setattr__( attributes_plus_required_indexes, nested_index.index_name, val ) return self.fget(attributes_plus_required_indexes) else: return self.fget(attributes) def __repr__(self) -> str: return self.__name__ The provided code snippet includes necessary dependencies for implementing the `index` function. Write a Python function `def index( *, uses_indexes: Iterable[ResourceIndexedAttribute] = ..., ) -> Callable[[Callable[[Any], X]], ResourceIndexedAttribute[X]]` to solve the following problem: When called as: @index(uses_indexes=(...)) def MyIndex(self): ... Here is the function: def index( *, uses_indexes: Iterable[ResourceIndexedAttribute] = ..., ) -> Callable[[Callable[[Any], X]], ResourceIndexedAttribute[X]]: """ When called as: @index(uses_indexes=(...)) def MyIndex(self): ... """ ...
When called as: @index(uses_indexes=(...)) def MyIndex(self): ...
15,355
import dataclasses from abc import ABC, abstractmethod from collections import defaultdict from typing import ( TypeVar, Set, Type, Dict, Optional, Iterable, MutableMapping, Union, Tuple, List, Callable, Generic, Any, cast, overload, ) from weakref import WeakValueDictionary from ofrak.model.tag_model import ResourceTag from ofrak_type.range import Range X = TypeVar("X", str, int, float, bytes) class ResourceIndexedAttribute(Generic[X]): """ Descriptor class for values in resource attributes which can be indexed. When a field `Foo` of a [ResourceAttributes][ofrak.model.resource_model.ResourceAttributes] type `A` is indexed, it is possible to include an `r_filter` or `r_sort` in a query to the resource service which filters the returned resource by the value of `foo` each of them have. This class should not be explicitly instantiated, instead created using the @index decorator. For example: ```python class A(ResourceAttributes): x: int def Foo(self) -> int: return self.x ``` """ def __init__( self, getter_func: Callable[[Any], X], uses_indexes: Iterable["ResourceIndexedAttribute"] = (), ): """ :param getter_func: Getter function for the property :param uses_indexes: Additional index types that are required to calculate the value of this index :raises TypeError: if the getter function does not have a return type annotation :raises TypeError: if the getter does not return an indexable type """ _validate_indexed_type(getter_func) self.fget: Callable[[Any], X] = getter_func self.attributes_owner: Optional[Type[ResourceAttributes]] = None self.uses_indexes = uses_indexes self.used_by_indexes: List["ResourceIndexedAttribute"] = [] self.index_name: str = getter_func.__name__ for other_index in self.uses_indexes: other_index.used_by_indexes.append(self) def __set_name__(self, owner, name): self.attributes_owner = owner self.__name__ = f"{owner.__name__}.{name}" self.index_name = name def __get__(self, instance: None, owner: type) -> "ResourceIndexedAttribute[X]": """ Applicable when getting the ResourceIndexedAttribute of a class, not an instance. Example (continued building off of example from class docstring): A.X # Returns a ResourceIndexedAttribute[int] """ ... def __get__(self, instance: Any, owner: type) -> X: """ Applicable when getting the ResourceIndexedAttribute of an instance Example (continued building off of example from class docstring): a = A(10) a.X # Returns 10 """ ... def __get__( self, instance: Optional[Any], owner: type ) -> Union["ResourceIndexedAttribute[X]", X]: if instance is None: return self else: return self.fget(instance) def __getattr__(self, name) -> Any: ... def __set__(self, instance, value): raise ValueError("Cannot set value of indexed attributes") def get_value( self, index_holder: "ResourceModel", ) -> Optional[X]: if self.attributes_owner is None: raise TypeError( f"Cannot get index value for {self.__name__} of model " f"{index_holder.id.hex()} because {self.__name__}'s owner has not " f"been set. This cannot happen unless `get_index` has somehow been " f"called during class creation, before the owner is set." ) else: attributes = index_holder.get_attributes(self.attributes_owner) if attributes is None: return None elif self.uses_indexes: # Create new copy of attributes to inject index values into attributes_plus_required_indexes = dataclasses.replace(attributes) for nested_index in self.uses_indexes: val = nested_index.get_value(index_holder) if val is None: # Not all of the nested indexes are available, can't calculate index val. return None object.__setattr__( attributes_plus_required_indexes, nested_index.index_name, val ) return self.fget(attributes_plus_required_indexes) else: return self.fget(attributes) def __repr__(self) -> str: return self.__name__ The provided code snippet includes necessary dependencies for implementing the `index` function. Write a Python function `def index( index_value_getter: Callable[[Any], X], ) -> ResourceIndexedAttribute[X]` to solve the following problem: When called as: @index def MyIndex(self): ... Here is the function: def index( index_value_getter: Callable[[Any], X], ) -> ResourceIndexedAttribute[X]: """ When called as: @index def MyIndex(self): ... """ ...
When called as: @index def MyIndex(self): ...
15,356
import dataclasses from abc import ABC, abstractmethod from collections import defaultdict from typing import ( TypeVar, Set, Type, Dict, Optional, Iterable, MutableMapping, Union, Tuple, List, Callable, Generic, Any, cast, overload, ) from weakref import WeakValueDictionary from ofrak.model.tag_model import ResourceTag from ofrak_type.range import Range X = TypeVar("X", str, int, float, bytes) class ResourceIndexedAttribute(Generic[X]): """ Descriptor class for values in resource attributes which can be indexed. When a field `Foo` of a [ResourceAttributes][ofrak.model.resource_model.ResourceAttributes] type `A` is indexed, it is possible to include an `r_filter` or `r_sort` in a query to the resource service which filters the returned resource by the value of `foo` each of them have. This class should not be explicitly instantiated, instead created using the @index decorator. For example: ```python class A(ResourceAttributes): x: int def Foo(self) -> int: return self.x ``` """ def __init__( self, getter_func: Callable[[Any], X], uses_indexes: Iterable["ResourceIndexedAttribute"] = (), ): """ :param getter_func: Getter function for the property :param uses_indexes: Additional index types that are required to calculate the value of this index :raises TypeError: if the getter function does not have a return type annotation :raises TypeError: if the getter does not return an indexable type """ _validate_indexed_type(getter_func) self.fget: Callable[[Any], X] = getter_func self.attributes_owner: Optional[Type[ResourceAttributes]] = None self.uses_indexes = uses_indexes self.used_by_indexes: List["ResourceIndexedAttribute"] = [] self.index_name: str = getter_func.__name__ for other_index in self.uses_indexes: other_index.used_by_indexes.append(self) def __set_name__(self, owner, name): self.attributes_owner = owner self.__name__ = f"{owner.__name__}.{name}" self.index_name = name def __get__(self, instance: None, owner: type) -> "ResourceIndexedAttribute[X]": """ Applicable when getting the ResourceIndexedAttribute of a class, not an instance. Example (continued building off of example from class docstring): A.X # Returns a ResourceIndexedAttribute[int] """ ... def __get__(self, instance: Any, owner: type) -> X: """ Applicable when getting the ResourceIndexedAttribute of an instance Example (continued building off of example from class docstring): a = A(10) a.X # Returns 10 """ ... def __get__( self, instance: Optional[Any], owner: type ) -> Union["ResourceIndexedAttribute[X]", X]: if instance is None: return self else: return self.fget(instance) def __getattr__(self, name) -> Any: ... def __set__(self, instance, value): raise ValueError("Cannot set value of indexed attributes") def get_value( self, index_holder: "ResourceModel", ) -> Optional[X]: if self.attributes_owner is None: raise TypeError( f"Cannot get index value for {self.__name__} of model " f"{index_holder.id.hex()} because {self.__name__}'s owner has not " f"been set. This cannot happen unless `get_index` has somehow been " f"called during class creation, before the owner is set." ) else: attributes = index_holder.get_attributes(self.attributes_owner) if attributes is None: return None elif self.uses_indexes: # Create new copy of attributes to inject index values into attributes_plus_required_indexes = dataclasses.replace(attributes) for nested_index in self.uses_indexes: val = nested_index.get_value(index_holder) if val is None: # Not all of the nested indexes are available, can't calculate index val. return None object.__setattr__( attributes_plus_required_indexes, nested_index.index_name, val ) return self.fget(attributes_plus_required_indexes) else: return self.fget(attributes) def __repr__(self) -> str: return self.__name__ The provided code snippet includes necessary dependencies for implementing the `index` function. Write a Python function `def index( index_value_getter: Callable[[Any], X] = None, *, uses_indexes: Iterable[ResourceIndexedAttribute] = (), ) -> Union[ Callable[[Callable[[Any], X]], ResourceIndexedAttribute[X]], ResourceIndexedAttribute[X] ]` to solve the following problem: Create a new indexable attribute for a [ResourceAttributes][ofrak.model.resource_model.ResourceAttributes]. :param index_value_getter: Method of [ResourceAttributes][ofrak.model.resource_model.ResourceAttributes] which returns the value of the index for that instance. :param uses_indexes: Additional index types that are required to calculate the value of this index. :return: [ResourceIndexedAttribute][ofrak.model.resource_model.ResourceIndexedAttribute] instance Here is the function: def index( index_value_getter: Callable[[Any], X] = None, *, uses_indexes: Iterable[ResourceIndexedAttribute] = (), ) -> Union[ Callable[[Callable[[Any], X]], ResourceIndexedAttribute[X]], ResourceIndexedAttribute[X] ]: """ Create a new indexable attribute for a [ResourceAttributes][ofrak.model.resource_model.ResourceAttributes]. :param index_value_getter: Method of [ResourceAttributes][ofrak.model.resource_model.ResourceAttributes] which returns the value of the index for that instance. :param uses_indexes: Additional index types that are required to calculate the value of this index. :return: [ResourceIndexedAttribute][ofrak.model.resource_model.ResourceIndexedAttribute] instance """ # See if we're being called as @index or @index(). if index_value_getter is None: # We're called with parens. def wrap(_index_value_getter) -> ResourceIndexedAttribute[X]: return ResourceIndexedAttribute[X](_index_value_getter, uses_indexes) return wrap # type: ignore # We're called as @index without parens. return ResourceIndexedAttribute[X](index_value_getter)
Create a new indexable attribute for a [ResourceAttributes][ofrak.model.resource_model.ResourceAttributes]. :param index_value_getter: Method of [ResourceAttributes][ofrak.model.resource_model.ResourceAttributes] which returns the value of the index for that instance. :param uses_indexes: Additional index types that are required to calculate the value of this index. :return: [ResourceIndexedAttribute][ofrak.model.resource_model.ResourceIndexedAttribute] instance
15,357
import dataclasses from abc import ABC, abstractmethod from collections import defaultdict from typing import ( TypeVar, Set, Type, Dict, Optional, Iterable, MutableMapping, Union, Tuple, List, Callable, Generic, Any, cast, overload, ) from weakref import WeakValueDictionary from ofrak.model.tag_model import ResourceTag from ofrak_type.range import Range X = TypeVar("X", str, int, float, bytes) _INDEXABLE_TYPES: Dict[str, Type] = { indexable_type.__name__: indexable_type for indexable_type in getattr(X, "__constraints__") # type: ignore } The provided code snippet includes necessary dependencies for implementing the `_validate_indexed_type` function. Write a Python function `def _validate_indexed_type(getter_func: Callable[[Any], X])` to solve the following problem: Verify the getter function returns a valid indexable type - a primitive type which can be compared. :param getter_func: :raises TypeError: if the getter function does not have a return type annotation :raises TypeError: if the getter does not return an indexable type :return: Here is the function: def _validate_indexed_type(getter_func: Callable[[Any], X]): """ Verify the getter function returns a valid indexable type - a primitive type which can be compared. :param getter_func: :raises TypeError: if the getter function does not have a return type annotation :raises TypeError: if the getter does not return an indexable type :return: """ if not hasattr(getter_func, "__annotations__"): raise TypeError( f"Index {getter_func.__name__} must have type annotations, including return type" ) annotations = getattr(getter_func, "__annotations__") if "return" not in annotations: raise TypeError( f"Index {getter_func.__name__} must have type annotations, including return type" ) index_type = annotations["return"] if type(index_type) is str: # handles case where type annotations is "stringified" and not the actual type, e.g. # def foo(self) -> "int": ... index_type_name = index_type else: index_type_name = index_type.__name__ if index_type_name not in _INDEXABLE_TYPES: raise TypeError( f"Type of index {getter_func.__name__} is {index_type}, which is not " f"one of {_INDEXABLE_TYPES.values()}; cannot index by this value!" )
Verify the getter function returns a valid indexable type - a primitive type which can be compared. :param getter_func: :raises TypeError: if the getter function does not have a return type annotation :raises TypeError: if the getter does not return an indexable type :return:
15,358
import dataclasses from _warnings import warn from collections import defaultdict from dataclasses import dataclass from typing import Tuple, Type, Dict, Any, List, Set, TypeVar, Iterable, MutableMapping, Generic import ofrak.model._auto_attributes from ofrak.model.resource_model import ( ResourceAttributes, ResourceIndexedAttribute, ResourceModel, ResourceContext, ) from ofrak.model.tag_model import ResourceTag _VIEW_ATTRIBUTES_TYPE = "__view_attributes_type__" class ViewableResourceTag(ResourceTag): def __new__(mcs, name: str, bases: Tuple[Type, ...], namespace: Dict[str, Any]): """ Create a new attributes type for the `ViewableResourceTag` which is about to be created. This type will inherit from `ResourceAttributes` and have fields corresponding to all the fields of unique to the new `ViewableResourceTag`. This type, as well as all attributes types inherited from the bases, are added to the namespace of the new class under the dunders `_VIEW_ATTRIBUTES_TYPE` and `_COMPOSED_ATTRIBUTES_TYPE`. This method also checks for possible attempted polymorphism, raising warnings if any method overrides are detected. The `__new__` method can inspect and edit the names, bases, and namespace (attributes and methods) of the class. It is called after the class definition has been parsed, and before the new class object has been created. :param name: Name of the new class :param bases: Base classes of the new class :param namespace: Namespace of attributes for the new class, in the form of a dictionary mapping attribute names to objects :return: """ _check_for_polymorphism(name, bases, namespace) attributes_type = mcs._create_attributes_type(name, bases, namespace) composed_attributes_types = [attributes_type] for base_cls in bases: composed_attributes_types.extend(_get_attributes_types_recursively(base_cls)) composed_attributes_types = _filter_attributes_types(composed_attributes_types) namespace[_VIEW_ATTRIBUTES_TYPE] = attributes_type namespace[_COMPOSED_ATTRIBUTES_TYPE] = composed_attributes_types return super().__new__(mcs, name, bases, namespace) def __init__(cls, name: str, bases: Tuple[Type, ...], namespace: Dict[str, Any]): """ Fix up any `@index` defined in this `ViewableResourceTag`. Each index descriptor needs an owner which is a `ResourceAttributes` subclass, and an automatically populated owner will be set to the newly created `cls`, which is an instance of `ViewableResourceTag` and not a `ResourceAttributes` subclass. This is a little hacky, but it works just fine. Can inspect and edit the names, bases, and namespace (attributes and methods) of the class. Called after the new class object has been created. :param name: Name of the new class :param bases: Base classes of the new class :param namespace: Namespace of attributes for the new class, in the form of a dictionary mapping attribute names to objects :raises TypeError: If the viewable tag has no attributes but does have an @index :return: """ # Change owner of the indexes to be the attributes type for name, index_descriptor in _get_indexes(namespace).items(): if getattr(cls, _VIEW_ATTRIBUTES_TYPE) is None: raise TypeError( f"Cannot have an index in a ResourceView which has no attributes " f"- an index should only access one set of attributes, " f"so this index is likely illegal anyway." ) index_descriptor.__set_name__(getattr(cls, _VIEW_ATTRIBUTES_TYPE), name) super().__init__(cls, name, bases) # type: ignore def attributes_type(cls) -> Type[ResourceAttributes]: """ Get the auto-generated `ResourceAttributes` subclass for this `ViewableResourceTag`. The returned class is a `dataclass` which encapsulates the fields defined in one specific `ViewableResourceTag`. For example if `B` inherits from `A` and `A` defines several fields, `B.attributes_type` has only fields defined in `B`, and none of the fields defined in `A`. :return: The auto-generated `ResourceAttributes` subclass for this `ViewableResourceTag` class, in no particular order. """ warn( "T.attributes_type is deprecated! Use AttributesType[T] instead.", category=DeprecationWarning, ) return getattr(cls, _VIEW_ATTRIBUTES_TYPE) def composed_attributes_types(cls) -> Iterable[Type[ResourceAttributes]]: """ Get all of the `ResourceAttributes` subclasses which this class is composed of. This means walking back through the class hierarchy and getting the `AttributesType[base]` for every base class of this class. :return: The `attributes_type` of every `ViewableResourceTag` this class inherits from, in no particular order. """ return getattr(cls, _COMPOSED_ATTRIBUTES_TYPE) def _create_attributes_type( mcs, name: str, bases: Tuple[Type, ...], namespace: Dict[str, Any] ) -> Type[ResourceAttributes]: """ Get a type inheriting from `ResourceAttributes` which is unique to this tag (`cls`). :return: """ # First make this a dataclass to easily get its fields # We can't depend on the class already being a dataclass because class decorators run # after metaclass __new__ and __init__ methods, so it is not yet a dataclass tmp_cls = super().__new__(mcs, name, bases, namespace) tmp_dataclass: object = dataclass(tmp_cls) # type: ignore base_fields: Set[dataclasses.Field] = set() for base in bases: if type(base) is ViewableResourceTag: base_fields.update(dataclasses.fields(base)) fields = [ (_field.name, _field.type, _field) for _field in dataclasses.fields(tmp_dataclass) if _field not in base_fields and not _field.name.startswith("_") ] if len(fields) == 0: return _NO_RESOURCE_ATTRIBUTES_TYPE indexed_attributes_namespace = _get_indexes(namespace) # Creates a new class inheriting from ResourceAttributes, with the same fields as this # ViewableResourceTag, as well as the same indexed attribute descriptors attributes_type = dataclasses.make_dataclass( f"{AttributesType.__name__}[{name}]", fields, bases=(ResourceAttributes,), namespace=indexed_attributes_namespace, **ResourceAttributes.DATACLASS_PARAMS, ) # By default, this new attributes_type is part of the "types" module. # Instead, we make it part of the module ofrak.model._auto_attributes. attributes_type.__module__ = "ofrak.model._auto_attributes" setattr( ofrak.model._auto_attributes, attributes_type.__name__, attributes_type, ) return attributes_type class ResourceAttributes: DATACLASS_PARAMS = {"frozen": True, "eq": True} """ Wraps immutable attributes attached to a resource. While not enforced programmatically, only analyzers should add/replace attributes attached to a resource. Additionally, a [ResourceAttributes][ofrak.model.resource_model.ResourceAttributes] instance also defines which component attached the attributes to a specific resource. """ def __str__(self): fields_str = ", ".join( f"{field.name}={str(getattr(self, field.name))}" for field in dataclasses.fields(self) ) return f"{self.__class__.__name__}({fields_str})" def get_indexable_attributes(cls) -> List[ResourceIndexedAttribute]: indexable_attributes = [] for name, attr in cls.__dict__.items(): if type(attr) is ResourceIndexedAttribute: indexable_attributes.append(attr) return indexable_attributes def replace_updated(resource_attributes: RA, updated_attributes: Any) -> RA: """ Replace the fields of `resource_attributes` with the updated values found in `updated_attributes`, returning a new object. The fields having non-`None` values in `updated_attributes` are considered to be updated and will be replaced in `resource_attributes` if they exist there. Both arguments must be `dataclass` instances. `updated_attributes` is typically a descendant of [ComponentConfig][ofrak.model.component_model.ComponentConfig]. !!! todo "To do" This currently assumes that values can't be updated to `None`, but that could happen. :raises TypeError: if any of `resource_attributes` or `updated_attributes` isn't a dataclass instance. """ for obj in (resource_attributes, updated_attributes): if not (dataclasses.is_dataclass(obj) and not isinstance(obj, type)): raise TypeError(f"{obj.__name__} must be a dataclass instance") updated_fields = { field: val for field, val in dataclasses.asdict(updated_attributes).items() if val is not None } updated_attributes = dataclasses.replace( resource_attributes, **updated_fields, ) return updated_attributes def _get_attributes_types_recursively(cls: type) -> List[Type[ResourceAttributes]]: if isinstance(cls, ViewableResourceTag): attrs_types = [getattr(cls, _VIEW_ATTRIBUTES_TYPE)] for base_cls in cls.__bases__: attrs_types.extend(_get_attributes_types_recursively(base_cls)) return attrs_types else: return []
null
15,359
import dataclasses from _warnings import warn from collections import defaultdict from dataclasses import dataclass from typing import Tuple, Type, Dict, Any, List, Set, TypeVar, Iterable, MutableMapping, Generic import ofrak.model._auto_attributes from ofrak.model.resource_model import ( ResourceAttributes, ResourceIndexedAttribute, ResourceModel, ResourceContext, ) from ofrak.model.tag_model import ResourceTag _NO_RESOURCE_ATTRIBUTES_TYPE = _NoResourceAttributesType class ResourceAttributes: DATACLASS_PARAMS = {"frozen": True, "eq": True} """ Wraps immutable attributes attached to a resource. While not enforced programmatically, only analyzers should add/replace attributes attached to a resource. Additionally, a [ResourceAttributes][ofrak.model.resource_model.ResourceAttributes] instance also defines which component attached the attributes to a specific resource. """ def __str__(self): fields_str = ", ".join( f"{field.name}={str(getattr(self, field.name))}" for field in dataclasses.fields(self) ) return f"{self.__class__.__name__}({fields_str})" def get_indexable_attributes(cls) -> List[ResourceIndexedAttribute]: indexable_attributes = [] for name, attr in cls.__dict__.items(): if type(attr) is ResourceIndexedAttribute: indexable_attributes.append(attr) return indexable_attributes def replace_updated(resource_attributes: RA, updated_attributes: Any) -> RA: """ Replace the fields of `resource_attributes` with the updated values found in `updated_attributes`, returning a new object. The fields having non-`None` values in `updated_attributes` are considered to be updated and will be replaced in `resource_attributes` if they exist there. Both arguments must be `dataclass` instances. `updated_attributes` is typically a descendant of [ComponentConfig][ofrak.model.component_model.ComponentConfig]. !!! todo "To do" This currently assumes that values can't be updated to `None`, but that could happen. :raises TypeError: if any of `resource_attributes` or `updated_attributes` isn't a dataclass instance. """ for obj in (resource_attributes, updated_attributes): if not (dataclasses.is_dataclass(obj) and not isinstance(obj, type)): raise TypeError(f"{obj.__name__} must be a dataclass instance") updated_fields = { field: val for field, val in dataclasses.asdict(updated_attributes).items() if val is not None } updated_attributes = dataclasses.replace( resource_attributes, **updated_fields, ) return updated_attributes def _filter_attributes_types( attr_types: List[Type[ResourceAttributes]], ) -> List[Type[ResourceAttributes]]: filtered_types = set() for attr_type in attr_types: if attr_type is not _NO_RESOURCE_ATTRIBUTES_TYPE: filtered_types.add(attr_type) return list(filtered_types)
null
15,360
import dataclasses from _warnings import warn from collections import defaultdict from dataclasses import dataclass from typing import Tuple, Type, Dict, Any, List, Set, TypeVar, Iterable, MutableMapping, Generic import ofrak.model._auto_attributes from ofrak.model.resource_model import ( ResourceAttributes, ResourceIndexedAttribute, ResourceModel, ResourceContext, ) from ofrak.model.tag_model import ResourceTag class ViewableResourceTag(ResourceTag): def __new__(mcs, name: str, bases: Tuple[Type, ...], namespace: Dict[str, Any]): """ Create a new attributes type for the `ViewableResourceTag` which is about to be created. This type will inherit from `ResourceAttributes` and have fields corresponding to all the fields of unique to the new `ViewableResourceTag`. This type, as well as all attributes types inherited from the bases, are added to the namespace of the new class under the dunders `_VIEW_ATTRIBUTES_TYPE` and `_COMPOSED_ATTRIBUTES_TYPE`. This method also checks for possible attempted polymorphism, raising warnings if any method overrides are detected. The `__new__` method can inspect and edit the names, bases, and namespace (attributes and methods) of the class. It is called after the class definition has been parsed, and before the new class object has been created. :param name: Name of the new class :param bases: Base classes of the new class :param namespace: Namespace of attributes for the new class, in the form of a dictionary mapping attribute names to objects :return: """ _check_for_polymorphism(name, bases, namespace) attributes_type = mcs._create_attributes_type(name, bases, namespace) composed_attributes_types = [attributes_type] for base_cls in bases: composed_attributes_types.extend(_get_attributes_types_recursively(base_cls)) composed_attributes_types = _filter_attributes_types(composed_attributes_types) namespace[_VIEW_ATTRIBUTES_TYPE] = attributes_type namespace[_COMPOSED_ATTRIBUTES_TYPE] = composed_attributes_types return super().__new__(mcs, name, bases, namespace) def __init__(cls, name: str, bases: Tuple[Type, ...], namespace: Dict[str, Any]): """ Fix up any `@index` defined in this `ViewableResourceTag`. Each index descriptor needs an owner which is a `ResourceAttributes` subclass, and an automatically populated owner will be set to the newly created `cls`, which is an instance of `ViewableResourceTag` and not a `ResourceAttributes` subclass. This is a little hacky, but it works just fine. Can inspect and edit the names, bases, and namespace (attributes and methods) of the class. Called after the new class object has been created. :param name: Name of the new class :param bases: Base classes of the new class :param namespace: Namespace of attributes for the new class, in the form of a dictionary mapping attribute names to objects :raises TypeError: If the viewable tag has no attributes but does have an @index :return: """ # Change owner of the indexes to be the attributes type for name, index_descriptor in _get_indexes(namespace).items(): if getattr(cls, _VIEW_ATTRIBUTES_TYPE) is None: raise TypeError( f"Cannot have an index in a ResourceView which has no attributes " f"- an index should only access one set of attributes, " f"so this index is likely illegal anyway." ) index_descriptor.__set_name__(getattr(cls, _VIEW_ATTRIBUTES_TYPE), name) super().__init__(cls, name, bases) # type: ignore def attributes_type(cls) -> Type[ResourceAttributes]: """ Get the auto-generated `ResourceAttributes` subclass for this `ViewableResourceTag`. The returned class is a `dataclass` which encapsulates the fields defined in one specific `ViewableResourceTag`. For example if `B` inherits from `A` and `A` defines several fields, `B.attributes_type` has only fields defined in `B`, and none of the fields defined in `A`. :return: The auto-generated `ResourceAttributes` subclass for this `ViewableResourceTag` class, in no particular order. """ warn( "T.attributes_type is deprecated! Use AttributesType[T] instead.", category=DeprecationWarning, ) return getattr(cls, _VIEW_ATTRIBUTES_TYPE) def composed_attributes_types(cls) -> Iterable[Type[ResourceAttributes]]: """ Get all of the `ResourceAttributes` subclasses which this class is composed of. This means walking back through the class hierarchy and getting the `AttributesType[base]` for every base class of this class. :return: The `attributes_type` of every `ViewableResourceTag` this class inherits from, in no particular order. """ return getattr(cls, _COMPOSED_ATTRIBUTES_TYPE) def _create_attributes_type( mcs, name: str, bases: Tuple[Type, ...], namespace: Dict[str, Any] ) -> Type[ResourceAttributes]: """ Get a type inheriting from `ResourceAttributes` which is unique to this tag (`cls`). :return: """ # First make this a dataclass to easily get its fields # We can't depend on the class already being a dataclass because class decorators run # after metaclass __new__ and __init__ methods, so it is not yet a dataclass tmp_cls = super().__new__(mcs, name, bases, namespace) tmp_dataclass: object = dataclass(tmp_cls) # type: ignore base_fields: Set[dataclasses.Field] = set() for base in bases: if type(base) is ViewableResourceTag: base_fields.update(dataclasses.fields(base)) fields = [ (_field.name, _field.type, _field) for _field in dataclasses.fields(tmp_dataclass) if _field not in base_fields and not _field.name.startswith("_") ] if len(fields) == 0: return _NO_RESOURCE_ATTRIBUTES_TYPE indexed_attributes_namespace = _get_indexes(namespace) # Creates a new class inheriting from ResourceAttributes, with the same fields as this # ViewableResourceTag, as well as the same indexed attribute descriptors attributes_type = dataclasses.make_dataclass( f"{AttributesType.__name__}[{name}]", fields, bases=(ResourceAttributes,), namespace=indexed_attributes_namespace, **ResourceAttributes.DATACLASS_PARAMS, ) # By default, this new attributes_type is part of the "types" module. # Instead, we make it part of the module ofrak.model._auto_attributes. attributes_type.__module__ = "ofrak.model._auto_attributes" setattr( ofrak.model._auto_attributes, attributes_type.__name__, attributes_type, ) return attributes_type The provided code snippet includes necessary dependencies for implementing the `_check_for_polymorphism` function. Write a Python function `def _check_for_polymorphism(name: str, bases: Iterable[Type], namespace: Dict[str, Any])` to solve the following problem: Check for any methods in a new class which override a parent's method. The behavior of `view_as` means that overriding methods might not work as users think it does. Calling something like `a = resource.view_as(A)` will always and only return instances of `A`. These resources may have tags `B` and/or `C` which inherit from `A` but the returned view is not an instance of `B` or `C`. Calling a method `a.foo()` on that resource will therefore **ALWAYS** dispatch to `A.foo`, never to `B.foo` or `C.foo`. This would break a design in which a user expects to only program against a common viewable tag's interface and let individual instances dispatch to some unique behavior. For example, if one gets all descendants with a common tag as a view of that tag, then calls a virtual method of those views, expecting each view to possibly do some unique behavior. Such a class hierarchy could be made to work, but we raise a warning to ensure that developers are conscious of this restriction and program accordingly. :param name: Name of the new class :param bases: Base classes of the new class :param namespace: Namespace of attributes for the new class, in the form of a dictionary mapping attribute names to objects Here is the function: def _check_for_polymorphism(name: str, bases: Iterable[Type], namespace: Dict[str, Any]): """ Check for any methods in a new class which override a parent's method. The behavior of `view_as` means that overriding methods might not work as users think it does. Calling something like `a = resource.view_as(A)` will always and only return instances of `A`. These resources may have tags `B` and/or `C` which inherit from `A` but the returned view is not an instance of `B` or `C`. Calling a method `a.foo()` on that resource will therefore **ALWAYS** dispatch to `A.foo`, never to `B.foo` or `C.foo`. This would break a design in which a user expects to only program against a common viewable tag's interface and let individual instances dispatch to some unique behavior. For example, if one gets all descendants with a common tag as a view of that tag, then calls a virtual method of those views, expecting each view to possibly do some unique behavior. Such a class hierarchy could be made to work, but we raise a warning to ensure that developers are conscious of this restriction and program accordingly. :param name: Name of the new class :param bases: Base classes of the new class :param namespace: Namespace of attributes for the new class, in the form of a dictionary mapping attribute names to objects """ for base_cls in bases: # TODO: Figure out cleaner way to make an exception for ResourceView if name == "ResourceView": continue if isinstance(base_cls, ViewableResourceTag): parent_cls = base_cls parent_class_namespace = dir(parent_cls) common_attrs = set(namespace.keys()).intersection(set(parent_class_namespace)) common_attrs = common_attrs.difference(dir(ViewableResourceTag)) common_methods = { attr_name: namespace[attr_name] for attr_name in common_attrs if callable(namespace[attr_name]) } overwritten_methods = { attr_name: attr for attr_name, attr in common_methods.items() if namespace[attr_name] != getattr(parent_cls, attr_name) } for method_name, method in overwritten_methods.items(): warn( f"{name}.{method_name} overrides the parent's method " f"{method_name}; OFRAK Resources do not support runtime polymorphism, " f"and this function may depend on runtime polymorphism." )
Check for any methods in a new class which override a parent's method. The behavior of `view_as` means that overriding methods might not work as users think it does. Calling something like `a = resource.view_as(A)` will always and only return instances of `A`. These resources may have tags `B` and/or `C` which inherit from `A` but the returned view is not an instance of `B` or `C`. Calling a method `a.foo()` on that resource will therefore **ALWAYS** dispatch to `A.foo`, never to `B.foo` or `C.foo`. This would break a design in which a user expects to only program against a common viewable tag's interface and let individual instances dispatch to some unique behavior. For example, if one gets all descendants with a common tag as a view of that tag, then calls a virtual method of those views, expecting each view to possibly do some unique behavior. Such a class hierarchy could be made to work, but we raise a warning to ensure that developers are conscious of this restriction and program accordingly. :param name: Name of the new class :param bases: Base classes of the new class :param namespace: Namespace of attributes for the new class, in the form of a dictionary mapping attribute names to objects
15,361
import dataclasses from _warnings import warn from collections import defaultdict from dataclasses import dataclass from typing import Tuple, Type, Dict, Any, List, Set, TypeVar, Iterable, MutableMapping, Generic import ofrak.model._auto_attributes from ofrak.model.resource_model import ( ResourceAttributes, ResourceIndexedAttribute, ResourceModel, ResourceContext, ) from ofrak.model.tag_model import ResourceTag class ResourceIndexedAttribute(Generic[X]): """ Descriptor class for values in resource attributes which can be indexed. When a field `Foo` of a [ResourceAttributes][ofrak.model.resource_model.ResourceAttributes] type `A` is indexed, it is possible to include an `r_filter` or `r_sort` in a query to the resource service which filters the returned resource by the value of `foo` each of them have. This class should not be explicitly instantiated, instead created using the @index decorator. For example: ```python class A(ResourceAttributes): x: int def Foo(self) -> int: return self.x ``` """ def __init__( self, getter_func: Callable[[Any], X], uses_indexes: Iterable["ResourceIndexedAttribute"] = (), ): """ :param getter_func: Getter function for the property :param uses_indexes: Additional index types that are required to calculate the value of this index :raises TypeError: if the getter function does not have a return type annotation :raises TypeError: if the getter does not return an indexable type """ _validate_indexed_type(getter_func) self.fget: Callable[[Any], X] = getter_func self.attributes_owner: Optional[Type[ResourceAttributes]] = None self.uses_indexes = uses_indexes self.used_by_indexes: List["ResourceIndexedAttribute"] = [] self.index_name: str = getter_func.__name__ for other_index in self.uses_indexes: other_index.used_by_indexes.append(self) def __set_name__(self, owner, name): self.attributes_owner = owner self.__name__ = f"{owner.__name__}.{name}" self.index_name = name def __get__(self, instance: None, owner: type) -> "ResourceIndexedAttribute[X]": """ Applicable when getting the ResourceIndexedAttribute of a class, not an instance. Example (continued building off of example from class docstring): A.X # Returns a ResourceIndexedAttribute[int] """ ... def __get__(self, instance: Any, owner: type) -> X: """ Applicable when getting the ResourceIndexedAttribute of an instance Example (continued building off of example from class docstring): a = A(10) a.X # Returns 10 """ ... def __get__( self, instance: Optional[Any], owner: type ) -> Union["ResourceIndexedAttribute[X]", X]: if instance is None: return self else: return self.fget(instance) def __getattr__(self, name) -> Any: ... def __set__(self, instance, value): raise ValueError("Cannot set value of indexed attributes") def get_value( self, index_holder: "ResourceModel", ) -> Optional[X]: if self.attributes_owner is None: raise TypeError( f"Cannot get index value for {self.__name__} of model " f"{index_holder.id.hex()} because {self.__name__}'s owner has not " f"been set. This cannot happen unless `get_index` has somehow been " f"called during class creation, before the owner is set." ) else: attributes = index_holder.get_attributes(self.attributes_owner) if attributes is None: return None elif self.uses_indexes: # Create new copy of attributes to inject index values into attributes_plus_required_indexes = dataclasses.replace(attributes) for nested_index in self.uses_indexes: val = nested_index.get_value(index_holder) if val is None: # Not all of the nested indexes are available, can't calculate index val. return None object.__setattr__( attributes_plus_required_indexes, nested_index.index_name, val ) return self.fget(attributes_plus_required_indexes) else: return self.fget(attributes) def __repr__(self) -> str: return self.__name__ The provided code snippet includes necessary dependencies for implementing the `_get_indexes` function. Write a Python function `def _get_indexes(namespace: Dict[str, Any]) -> Dict[str, ResourceIndexedAttribute]` to solve the following problem: Extract the index descriptors from a namespaces. Here is the function: def _get_indexes(namespace: Dict[str, Any]) -> Dict[str, ResourceIndexedAttribute]: """ Extract the index descriptors from a namespaces. """ return { name: item for name, item in namespace.items() if isinstance(item, ResourceIndexedAttribute) }
Extract the index descriptors from a namespaces.
15,362
import itertools from typing import FrozenSet, Set, Tuple, Type from dataclasses import dataclass from ofrak.model.resource_model import ResourceAttributes from ofrak.model.tag_model import ResourceTag from ofrak.component.interface import ComponentInterface from ofrak.component.analyzer import Analyzer from ofrak.service.component_locator_i import ComponentFilter from functools import lru_cache def _isinstance(*args, **kwargs): return isinstance(*args, **kwargs)
null
15,364
import configparser import os from multiprocessing import Pool, cpu_count from typing import Optional, Dict, Mapping, Tuple import math from ofrak_patch_maker.toolchain.model import BinFileType, Segment from ofrak_type.error import NotFoundError from ofrak_type.memory_permissions import MemoryPermissions def get_file_format(path): try: import magic except ImportError: # ImportError is likely raise because libmagic cannot be found on the system. See error message. raise result = magic.from_file(path).split(" ")[0].lower() try: return BinFileType(result) except: ValueError("Invalid BinFileType!!!")
null
15,365
import configparser import os from multiprocessing import Pool, cpu_count from typing import Optional, Dict, Mapping, Tuple import math from ofrak_patch_maker.toolchain.model import BinFileType, Segment from ofrak_type.error import NotFoundError from ofrak_type.memory_permissions import MemoryPermissions The provided code snippet includes necessary dependencies for implementing the `get_repository_config` function. Write a Python function `def get_repository_config(section: str, key: Optional[str] = None)` to solve the following problem: Get config values from toolchain.conf. :param section: section name in config file :param key: key in `config[section]` :raises SystemExit: If `config[section]` or `config[section][key]` not found. :return Union[str, List[Tuple[str, str]]]: the result of ``config.get(section, key)`` or ``config.items(section)`` Here is the function: def get_repository_config(section: str, key: Optional[str] = None): """ Get config values from toolchain.conf. :param section: section name in config file :param key: key in `config[section]` :raises SystemExit: If `config[section]` or `config[section][key]` not found. :return Union[str, List[Tuple[str, str]]]: the result of ``config.get(section, key)`` or ``config.items(section)`` """ config = configparser.RawConfigParser() config_name = "toolchain.conf" local_config = os.path.join(os.path.dirname(__file__), os.path.pardir) config_paths = [local_config] error_by_config_file: Dict[str, Exception] = dict() for p in config_paths: conf = os.path.join(p, config_name) if not os.path.exists(conf): continue try: config.read(conf) if key: ret = config.get(section, key) else: ret = config.items(section) # type: ignore return ret except (configparser.NoSectionError, configparser.NoOptionError) as e: error_by_config_file[conf] = e continue if 0 == len(error_by_config_file): raise NotFoundError(f"Configuration file {config_name} not found") elif 1 == len(error_by_config_file): _config, _e = next(iter(error_by_config_file.items())) raise NotFoundError(f"Section or option not found in {_config}", _e) else: raise NotFoundError( f"Section {section:s} or option {key:s} not found in any of the configs searched: " f"{error_by_config_file}" )
Get config values from toolchain.conf. :param section: section name in config file :param key: key in `config[section]` :raises SystemExit: If `config[section]` or `config[section][key]` not found. :return Union[str, List[Tuple[str, str]]]: the result of ``config.get(section, key)`` or ``config.items(section)``
15,366
import configparser import os from multiprocessing import Pool, cpu_count from typing import Optional, Dict, Mapping, Tuple import math from ofrak_patch_maker.toolchain.model import BinFileType, Segment from ofrak_type.error import NotFoundError from ofrak_type.memory_permissions import MemoryPermissions def _gen_file( # pragma: no cover name: str, address: int, stub_str: str, out_dir: str ) -> Mapping[str, Tuple[Segment, Segment]]: path = os.path.join(out_dir, name + ".as") with open(path, "w") as f: f.write(f"{stub_str}\n.global {name}\n{name}:\n") segment = Segment( segment_name=".text", vm_address=address, offset=0, is_entry=False, length=0, access_perms=MemoryPermissions.RX, ) return {path: (segment, NULL_DATA)} The provided code snippet includes necessary dependencies for implementing the `generate_arm_stubs` function. Write a Python function `def generate_arm_stubs( func_names: Mapping[str, int], out_dir: str, thumb: bool = False ) -> Mapping[str, Tuple[Segment, Segment]]` to solve the following problem: Utility function to generate assembly stubs. This is necessary when function calls need to switch between ARM and thumb mode (when code generated by the PatchMaker is ARM and needs to jump to thumb code, or the opposite). With those stubs, the linker has explicit information about the destination mode, so it jumps correctly (exchanging mode or not). It is not [PatchMaker][ofrak_patch_maker.patch_maker.PatchMaker]'s responsibility to programmatically generate source in this way. Furthermore, this functionality is much more complex than `base_symbols={}` addition implies, as actual object files are generated and linked against. :param func_names: names to effective address :param out_dir: object file output directory :param thumb: Whether or not to generate thumb stubs :return Dict[str, Tuple[Segment, Segment]: maps object file to dummy `[text_segment, data segment]` Here is the function: def generate_arm_stubs( func_names: Mapping[str, int], out_dir: str, thumb: bool = False ) -> Mapping[str, Tuple[Segment, Segment]]: """ Utility function to generate assembly stubs. This is necessary when function calls need to switch between ARM and thumb mode (when code generated by the PatchMaker is ARM and needs to jump to thumb code, or the opposite). With those stubs, the linker has explicit information about the destination mode, so it jumps correctly (exchanging mode or not). It is not [PatchMaker][ofrak_patch_maker.patch_maker.PatchMaker]'s responsibility to programmatically generate source in this way. Furthermore, this functionality is much more complex than `base_symbols={}` addition implies, as actual object files are generated and linked against. :param func_names: names to effective address :param out_dir: object file output directory :param thumb: Whether or not to generate thumb stubs :return Dict[str, Tuple[Segment, Segment]: maps object file to dummy `[text_segment, data segment]` """ print(f"Generating ARM stubs...") names = list(func_names.keys()) addresses = list(func_names.values()) out_dirs = [out_dir] * len(names) if thumb: stub_strs = [".thumb_func"] * len(names) else: stub_strs = [f".type {name}, %function" for name in names] args = zip(names, addresses, stub_strs, out_dirs) workers = math.ceil(0.6 * cpu_count()) with Pool(processes=workers) as pool: result = pool.starmap(_gen_file, args, chunksize=math.ceil(len(names) / workers)) segment_map: Dict[str, Tuple[Segment, Segment]] = {} for r in result: segment_map.update(r) print(list(r.keys())) return segment_map
Utility function to generate assembly stubs. This is necessary when function calls need to switch between ARM and thumb mode (when code generated by the PatchMaker is ARM and needs to jump to thumb code, or the opposite). With those stubs, the linker has explicit information about the destination mode, so it jumps correctly (exchanging mode or not). It is not [PatchMaker][ofrak_patch_maker.patch_maker.PatchMaker]'s responsibility to programmatically generate source in this way. Furthermore, this functionality is much more complex than `base_symbols={}` addition implies, as actual object files are generated and linked against. :param func_names: names to effective address :param out_dir: object file output directory :param thumb: Whether or not to generate thumb stubs :return Dict[str, Tuple[Segment, Segment]: maps object file to dummy `[text_segment, data segment]`
15,367
import argparse from parsy import forward_declaration, generate, regex, seq, string def create_parser(): bb_parser = forward_declaration() @generate def tag(): start_tag = ( string("[") >> ( seq(regex(r"\w+") << string("="), regex(r"[^]]+")).map(tuple) | regex(r"\w+").map(lambda w: (w, None)) ) << string("]") ) start = yield start_tag inner = yield bb_parser tag_name, _ = start end_tag = string("[/") >> string(tag_name) << string("]") yield end_tag return start, inner bb_parser.become((tag | regex(r"[^[]+")).many()) return bb_parser
null
15,368
import argparse from parsy import forward_declaration, generate, regex, seq, string def pad_line(l, pad_char=" "): length = sum(map(len, l.split("%c"))) return l + (80 - length) * pad_char def build_log_string(parsed_original): def build_log_lists(parsed): str_list, format_list = [], [] for x in parsed: if isinstance(x, tuple): (tagname, value), inner = x str_list.append("%c") format_list.append( f'"font-family: monospace; background: black; ' f'{tagname}: {value};"' ) new_str_list, new_format_list = build_log_lists(inner) str_list.extend(new_str_list) format_list.extend(new_format_list) elif isinstance(x, str): str_list.append(x) else: raise ValueError(f"Unexpected type {type(x)} of {x}") return str_list, format_list str_list, format_list = build_log_lists(parsed_original) ascii_art_string = "\n".join(map(pad_line, "".join(str_list).splitlines())) return f"""console.log(`{ascii_art_string}`, {", ".join(format_list)}, ); """
null
15,369
import asyncio from abc import ABC, abstractmethod from typing import TypeVar, Generic, Callable, Dict, Awaitable, Iterable, Tuple, ClassVar Request = TypeVar("Request") Result = TypeVar("Result") _RequestKeyT = str _BatchHandlerFunctionT = Callable[ [Tuple[Request, ...]], Awaitable[Iterable[Tuple[Request, Result]]] ] _DEFAULT_RATE_LIMIT = 10 def _DEFAULT_REQUEST_KEY(req): return str(hash(req)) class BatchManagerInterface(Generic[Request, Result], ABC): """ Class which manages automatically batching async requests to some resource (like a remote server) to limit the number of individual requests. """ async def get_result(self, request: Request) -> Result: """ Get the result for a request. The request may be batched with one or more other pending requests before being passed to the `handler_function`. :param request: request to be passed in the argument of `handler_function` :return: result for the given request :raises NotAllRequestsHandledError: if this or any other requests were not handled by the `handler_function` passed to the constructor. """ raise NotImplementedError() class _BatchManagerImplementation(BatchManagerInterface[Request, Result]): def __init__( self, handler_function: _BatchHandlerFunctionT, rate_limit: int, request_key_f: Callable[[Request], _RequestKeyT], ): # Basic state setup self._request_key_f = request_key_f self._handler_function = handler_function self._rate_limit = rate_limit # Background task and batch setup loop = asyncio.get_event_loop() self._handler_loop_task = loop.create_task(self._periodic_batch_handler()) self._current_batch = self._new_batch() async def get_result(self, request: Request) -> Result: current_batch = self._current_batch current_batch.add_request(request) # Gives self._handler_loop_task a chance to raise its errors done, _ = await asyncio.wait( (current_batch.result(request), self._handler_loop_task), return_when=asyncio.FIRST_COMPLETED, ) return next(iter(done)).result() async def _periodic_batch_handler(self): while True: await asyncio.sleep(1.0 / float(self._rate_limit)) old_batch = self._current_batch if old_batch.has_requests(): self._current_batch = self._new_batch() handled_results = await self._handler_function(old_batch.get_requests()) old_batch.resolve_batch_requests(handled_results) def _new_batch(self): return _Batch[Request, Result](self._request_key_f) The provided code snippet includes necessary dependencies for implementing the `make_batch_manager` function. Write a Python function `def make_batch_manager( handler_function: _BatchHandlerFunctionT, rate_limit: int = _DEFAULT_RATE_LIMIT, request_key_f: Callable[[Request], _RequestKeyT] = _DEFAULT_REQUEST_KEY, ) -> BatchManagerInterface[Request, Result]` to solve the following problem: Construct an object which will automatically batch every call to `get_result` into periodic calls to `handler_function`. This function is the preferred way to make a one-off batch manager with minimal lines of code. If you find yourself calling this function with the same arguments multiple times, consider instead defining a subclass of `AbstractBatchManager`. This is functionally equivalent to calling `make_batch_manager` with the same arguments, but the code is cleaner. The returned BatchManagerInterface is implemented for asyncio, but is NOT GENERALLY THREAD-SAFE! That is, it must not be shared between manually accessed threads. :param handler_function: function to handle multiple requests at once and return the results as pairs of (request, result) :param rate_limit: maximum number of times `handler_function` will be called per second :param request_key_f: function this manager should use to uniquely identify requests :return: an instance of a `BatchManagerInterface` using `handler_function` to get results Here is the function: def make_batch_manager( handler_function: _BatchHandlerFunctionT, rate_limit: int = _DEFAULT_RATE_LIMIT, request_key_f: Callable[[Request], _RequestKeyT] = _DEFAULT_REQUEST_KEY, ) -> BatchManagerInterface[Request, Result]: """ Construct an object which will automatically batch every call to `get_result` into periodic calls to `handler_function`. This function is the preferred way to make a one-off batch manager with minimal lines of code. If you find yourself calling this function with the same arguments multiple times, consider instead defining a subclass of `AbstractBatchManager`. This is functionally equivalent to calling `make_batch_manager` with the same arguments, but the code is cleaner. The returned BatchManagerInterface is implemented for asyncio, but is NOT GENERALLY THREAD-SAFE! That is, it must not be shared between manually accessed threads. :param handler_function: function to handle multiple requests at once and return the results as pairs of (request, result) :param rate_limit: maximum number of times `handler_function` will be called per second :param request_key_f: function this manager should use to uniquely identify requests :return: an instance of a `BatchManagerInterface` using `handler_function` to get results """ return _BatchManagerImplementation(handler_function, rate_limit, request_key_f)
Construct an object which will automatically batch every call to `get_result` into periodic calls to `handler_function`. This function is the preferred way to make a one-off batch manager with minimal lines of code. If you find yourself calling this function with the same arguments multiple times, consider instead defining a subclass of `AbstractBatchManager`. This is functionally equivalent to calling `make_batch_manager` with the same arguments, but the code is cleaner. The returned BatchManagerInterface is implemented for asyncio, but is NOT GENERALLY THREAD-SAFE! That is, it must not be shared between manually accessed threads. :param handler_function: function to handle multiple requests at once and return the results as pairs of (request, result) :param rate_limit: maximum number of times `handler_function` will be called per second :param request_key_f: function this manager should use to uniquely identify requests :return: an instance of a `BatchManagerInterface` using `handler_function` to get results
15,370
import bisect import functools import logging import numbers import os import signal import sys import traceback import warnings import torch from pytorch_lightning import seed_everything import platform def check_and_warn_input_range(tensor, min_value, max_value, name): actual_min = tensor.min() actual_max = tensor.max() if actual_min < min_value or actual_max > max_value: warnings.warn(f"{name} must be in {min_value}..{max_value} range, but it ranges {actual_min}..{actual_max}")
null
15,371
import bisect import functools import logging import numbers import os import signal import sys import traceback import warnings import torch from pytorch_lightning import seed_everything import platform def sum_dict_with_prefix(target, cur_dict, prefix, default=0): for k, v in cur_dict.items(): target_key = prefix + k target[target_key] = target.get(target_key, default) + v def average_dicts(dict_list): result = {} norm = 1e-3 for dct in dict_list: sum_dict_with_prefix(result, dct, '') norm += 1 for k in list(result): result[k] /= norm return result
null
15,372
import bisect import functools import logging import numbers import os import signal import sys import traceback import warnings import torch from pytorch_lightning import seed_everything import platform def add_prefix_to_keys(dct, prefix): return {prefix + k: v for k, v in dct.items()}
null
15,373
import bisect import functools import logging import numbers import os import signal import sys import traceback import warnings import torch from pytorch_lightning import seed_everything import platform def set_requires_grad(module, value): for param in module.parameters(): param.requires_grad = value
null
15,374
import bisect import functools import logging import numbers import os import signal import sys import traceback import warnings import torch from pytorch_lightning import seed_everything import platform def flatten_dict(dct): result = {} for k, v in dct.items(): if isinstance(k, tuple): k = '_'.join(k) if isinstance(v, dict): for sub_k, sub_v in flatten_dict(v).items(): result[f'{k}_{sub_k}'] = sub_v else: result[k] = v return result
null
15,375
import bisect import functools import logging import numbers import os import signal import sys import traceback import warnings import torch from pytorch_lightning import seed_everything import platform class LinearRamp: def __init__(self, start_value=0, end_value=1, start_iter=-1, end_iter=0): self.start_value = start_value self.end_value = end_value self.start_iter = start_iter self.end_iter = end_iter def __call__(self, i): if i < self.start_iter: return self.start_value if i >= self.end_iter: return self.end_value part = (i - self.start_iter) / (self.end_iter - self.start_iter) return self.start_value * (1 - part) + self.end_value * part class LadderRamp: def __init__(self, start_iters, values): self.start_iters = start_iters self.values = values assert len(values) == len(start_iters) + 1, (len(values), len(start_iters)) def __call__(self, i): segment_i = bisect.bisect_right(self.start_iters, i) return self.values[segment_i] def get_ramp(kind='ladder', **kwargs): if kind == 'linear': return LinearRamp(**kwargs) if kind == 'ladder': return LadderRamp(**kwargs) raise ValueError(f'Unexpected ramp kind: {kind}')
null
15,376
import bisect import functools import logging import numbers import os import signal import sys import traceback import warnings import torch from pytorch_lightning import seed_everything LOGGER = logging.getLogger(__name__) import platform def print_traceback_handler(sig, frame): LOGGER.warning(f'Received signal {sig}') bt = ''.join(traceback.format_stack()) LOGGER.warning(f'Requested stack trace:\n{bt}') def register_debug_signal_handlers(sig=signal.SIGUSR1, handler=print_traceback_handler): LOGGER.warning(f'Setting signal {sig} handler {handler}') signal.signal(sig, handler)
null
15,377
import bisect import functools import logging import numbers import os import signal import sys import traceback import warnings import torch from pytorch_lightning import seed_everything import platform def handle_deterministic_config(config): seed = dict(config).get('seed', None) if seed is None: return False seed_everything(seed) return True
null
15,378
import bisect import functools import logging import numbers import os import signal import sys import traceback import warnings import torch from pytorch_lightning import seed_everything import platform def get_shape(t): if torch.is_tensor(t): return tuple(t.shape) elif isinstance(t, dict): return {n: get_shape(q) for n, q in t.items()} elif isinstance(t, (list, tuple)): return [get_shape(q) for q in t] elif isinstance(t, numbers.Number): return type(t) else: raise ValueError('unexpected type {}'.format(type(t)))
null