diff --git a/source_code/SegMamba/.DS_Store b/source_code/SegMamba/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..012e2d62b0baf86b81f29caf4c5324e021192325 Binary files /dev/null and b/source_code/SegMamba/.DS_Store differ diff --git a/source_code/SegMamba/.gitignore b/source_code/SegMamba/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..68bc17f9ff2104a9d7b6777058bb4c343ca72609 --- /dev/null +++ b/source_code/SegMamba/.gitignore @@ -0,0 +1,160 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ diff --git a/source_code/SegMamba/0_inference.py b/source_code/SegMamba/0_inference.py new file mode 100644 index 0000000000000000000000000000000000000000..e7f2b4530214000a644360a860527648dce98546 --- /dev/null +++ b/source_code/SegMamba/0_inference.py @@ -0,0 +1,20 @@ + + +import torch +from model_segmamba.segmamba import SegMamba + +t1 = torch.rand(1, 4, 128, 128, 128).cuda() + + +model = SegMamba(in_chans=4, + out_chans=4, + depths=[2,2,2,2], + feat_size=[48, 96, 192, 384]).cuda() + +out = model(t1) + +print(out.shape) + + + + diff --git a/source_code/SegMamba/1_rename_mri_data.py b/source_code/SegMamba/1_rename_mri_data.py new file mode 100644 index 0000000000000000000000000000000000000000..060e44d3b4f0e62c464967e3e1aa38d93f52650d --- /dev/null +++ b/source_code/SegMamba/1_rename_mri_data.py @@ -0,0 +1,47 @@ + + + +import os + +def main(): + import argparse + + parser = argparse.ArgumentParser( + description="Rename BraTS2023 case files to short names (t1c/t1n/t2f/t2w/seg.nii.gz)." + ) + parser.add_argument( + "--data_dir", + type=str, + default="./data/raw_data/BraTS2023/ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData/", + help="BraTS2023 directory that contains case folders.", + ) + args = parser.parse_args() + + data_dir = args.data_dir + if not os.path.isdir(data_dir): + raise FileNotFoundError(f"data_dir not found: {data_dir}") + + all_cases = sorted(os.listdir(data_dir)) + for case_name in all_cases: + case_dir = os.path.join(data_dir, case_name) + if not os.path.isdir(case_dir): + continue + + for data_name in os.listdir(case_dir): + if "-" not in data_name: + continue + + new_name = data_name.split("-")[-1] + new_path = os.path.join(case_dir, new_name) + old_path = os.path.join(case_dir, data_name) + + if os.path.exists(new_path): + # already renamed (or conflict). Skip to be safe. + continue + + os.rename(old_path, new_path) + print(f"{new_path} 命名成功") + + +if __name__ == "__main__": + main() diff --git a/source_code/SegMamba/2_preprocessing_mri.py b/source_code/SegMamba/2_preprocessing_mri.py new file mode 100644 index 0000000000000000000000000000000000000000..59a7ffcbc383e896ec67b60cadaa2bc9646e3eda --- /dev/null +++ b/source_code/SegMamba/2_preprocessing_mri.py @@ -0,0 +1,85 @@ + +from light_training.preprocessing.preprocessors.preprocessor_mri import MultiModalityPreprocessor +import argparse + +data_filename = ["t2w.nii.gz", + "t2f.nii.gz", + "t1n.nii.gz", + "t1c.nii.gz"] +seg_filename = "seg.nii.gz" + +def _parse_spacing(s: str): + parts = [p.strip() for p in s.split(",") if p.strip()] + if len(parts) != 3: + raise ValueError(f"output_spacing should be like '1,1,1', got: {s}") + return [float(parts[0]), float(parts[1]), float(parts[2])] + + +def main(): + parser = argparse.ArgumentParser(description="BraTS2023 preprocessing (resample/normalization/cropping).") + parser.add_argument( + "--base_dir", + type=str, + default="./data/raw_data/BraTS2023/", + help="Base directory that contains the BraTS2023 image_dir folder.", + ) + parser.add_argument( + "--image_dir", + type=str, + default="ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData", + help="Folder name under base_dir.", + ) + parser.add_argument( + "--output_dir", + type=str, + default="./data/fullres/train/", + help="Output directory for preprocessed npz/npy/pkl files.", + ) + parser.add_argument( + "--output_spacing", + type=str, + default="1,1,1", + help="Target spacing, e.g. '1,1,1'.", + ) + parser.add_argument( + "--num_processes", + type=int, + default=8, + help="Number of worker processes for preprocessing.", + ) + parser.add_argument( + "--only_plan", + action="store_true", + help="Only run planning (statistics) and exit.", + ) + parser.add_argument( + "--skip_plan", + action="store_true", + help="Skip planning step.", + ) + args = parser.parse_args() + + preprocessor = MultiModalityPreprocessor( + base_dir=args.base_dir, + image_dir=args.image_dir, + data_filenames=data_filename, + seg_filename=seg_filename, + ) + + if not args.skip_plan: + preprocessor.run_plan() + if args.only_plan: + return + + out_spacing = _parse_spacing(args.output_spacing) + preprocessor.run( + output_spacing=out_spacing, + output_dir=args.output_dir, + all_labels=[1, 2, 3], + num_processes=args.num_processes, + ) + + +if __name__ == "__main__": + main() + diff --git a/source_code/SegMamba/3_train.py b/source_code/SegMamba/3_train.py new file mode 100644 index 0000000000000000000000000000000000000000..f9bffdc45b201703ef70e015daa68ac42b9b8658 --- /dev/null +++ b/source_code/SegMamba/3_train.py @@ -0,0 +1,225 @@ +import numpy as np +from light_training.dataloading.dataset import get_train_val_test_loader_from_train +import torch +import torch.nn as nn +from monai.inferers import SlidingWindowInferer +from light_training.evaluation.metric import dice +from light_training.trainer import Trainer +from monai.utils import set_determinism +from light_training.utils.files_helper import save_new_model_and_delete_last +from monai.losses.dice import DiceLoss +set_determinism(123) +import os +import argparse + +def func(m, epochs): + return np.exp(-10*(1- m / epochs)**2) + +class BraTSTrainer(Trainer): + def __init__( + self, + env_type, + max_epochs, + batch_size, + device="cpu", + val_every=1, + num_gpus=1, + logdir="./logs/", + roi_size=(128, 128, 128), + augmentation=True, + train_process=18, + master_ip='localhost', + master_port=17750, + training_script="train.py", + ): + super().__init__( + env_type, + max_epochs, + batch_size, + device, + val_every, + num_gpus, + logdir, + master_ip, + master_port, + training_script, + train_process=train_process, + ) + self.window_infer = SlidingWindowInferer(roi_size=list(roi_size), sw_batch_size=1, overlap=0.5) + self.augmentation = augmentation + from model_segmamba.segmamba import SegMamba + + self.model = SegMamba(in_chans=4, + out_chans=4, + depths=[2,2,2,2], + feat_size=[48, 96, 192, 384]) + + self.patch_size = list(roi_size) + self.best_mean_dice = 0.0 + self.ce = nn.CrossEntropyLoss() + self.mse = nn.MSELoss() + self.train_process = train_process + self.model_save_path = os.path.join(logdir, "model") + self.optimizer = torch.optim.SGD(self.model.parameters(), lr=1e-2, weight_decay=3e-5, + momentum=0.99, nesterov=True) + + self.scheduler_type = "poly" + self.cross = nn.CrossEntropyLoss() + + def training_step(self, batch): + image, label = self.get_input(batch) + + pred = self.model(image) + + loss = self.cross(pred, label) + + self.log("training_loss", loss, step=self.global_step) + + return loss + + def convert_labels(self, labels): + ## TC, WT and ET + result = [(labels == 1) | (labels == 3), (labels == 1) | (labels == 3) | (labels == 2), labels == 3] + + return torch.cat(result, dim=1).float() + + + def get_input(self, batch): + image = batch["data"] + label = batch["seg"] + + label = label[:, 0].long() + return image, label + + def cal_metric(self, gt, pred, voxel_spacing=[1.0, 1.0, 1.0]): + if pred.sum() > 0 and gt.sum() > 0: + d = dice(pred, gt) + return np.array([d, 50]) + + elif gt.sum() == 0 and pred.sum() == 0: + return np.array([1.0, 50]) + + else: + return np.array([0.0, 50]) + + def validation_step(self, batch): + image, label = self.get_input(batch) + + output = self.model(image) + + output = output.argmax(dim=1) + + output = output[:, None] + output = self.convert_labels(output) + + label = label[:, None] + label = self.convert_labels(label) + + output = output.cpu().numpy() + target = label.cpu().numpy() + + dices = [] + + c = 3 + for i in range(0, c): + pred_c = output[:, i] + target_c = target[:, i] + + cal_dice, _ = self.cal_metric(target_c, pred_c) + dices.append(cal_dice) + + return dices + + def validation_end(self, val_outputs): + dices = val_outputs + + tc, wt, et = dices[0].mean(), dices[1].mean(), dices[2].mean() + + print(f"dices is {tc, wt, et}") + + mean_dice = (tc + wt + et) / 3 + + self.log("tc", tc, step=self.epoch) + self.log("wt", wt, step=self.epoch) + self.log("et", et, step=self.epoch) + + self.log("mean_dice", mean_dice, step=self.epoch) + + if mean_dice > self.best_mean_dice: + self.best_mean_dice = mean_dice + save_new_model_and_delete_last(self.model, + os.path.join(self.model_save_path, + f"best_model_{mean_dice:.4f}.pt"), + delete_symbol="best_model") + + save_new_model_and_delete_last(self.model, + os.path.join(self.model_save_path, + f"final_model_{mean_dice:.4f}.pt"), + delete_symbol="final_model") + + + if (self.epoch + 1) % 100 == 0: + torch.save(self.model.state_dict(), os.path.join(self.model_save_path, f"tmp_model_ep{self.epoch}_{mean_dice:.4f}.pt")) + + print(f"mean_dice is {mean_dice}") + +def _parse_csv_ints(s: str, n: int): + parts = [p.strip() for p in s.split(",") if p.strip()] + if len(parts) != n: + raise ValueError(f"expect {n} integers like '128,128,128', got: {s}") + return [int(x) for x in parts] + + +def _parse_augmentation(s: str): + s = str(s).strip().lower() + if s in {"true", "1", "yes", "y"}: + return True + if s in {"false", "0", "no", "n"}: + return False + # allow special modes used by Trainer.get_multi_processor_loader + # e.g. nomirror / onlymirror / onlyspatial + return s + + +def main(): + parser = argparse.ArgumentParser(description="SegMamba BraTS2023 training.") + parser.add_argument("--data_dir", type=str, default="./data/fullres/train", help="Preprocessed data directory (contains *.npz).") + parser.add_argument("--logdir", type=str, default="./logs/segmamba", help="Log/checkpoint directory.") + parser.add_argument("--env", type=str, default="pytorch", choices=["pytorch", "DDP", "ddp"], help="Training environment.") + parser.add_argument("--max_epoch", type=int, default=1000) + parser.add_argument("--batch_size", type=int, default=2) + parser.add_argument("--val_every", type=int, default=2) + parser.add_argument("--num_gpus", type=int, default=1) + parser.add_argument("--device", type=str, default="cuda:0", help="Device for single GPU; DDP will use LOCAL_RANK.") + parser.add_argument("--roi_size", type=str, default="128,128,128", help="Patch/ROI size, e.g. '128,128,128'.") + parser.add_argument("--augmentation", type=str, default="true", help="true/false/nomirror/onlymirror/onlyspatial") + parser.add_argument("--train_process", type=int, default=18, help="Number of augmentation worker processes (per rank).") + parser.add_argument("--master_port", type=int, default=17759) + # torchrun launcher will append this; ignore it here (Trainer will read it too) + parser.add_argument("--not_call_launch", action="store_true", help=argparse.SUPPRESS) + args, _ = parser.parse_known_args() + + roi_size = _parse_csv_ints(args.roi_size, 3) + augmentation = _parse_augmentation(args.augmentation) + + trainer = BraTSTrainer( + env_type=args.env, + max_epochs=args.max_epoch, + batch_size=args.batch_size, + device=args.device, + logdir=args.logdir, + val_every=args.val_every, + num_gpus=args.num_gpus, + master_port=args.master_port, + training_script=__file__, + roi_size=roi_size, + augmentation=augmentation, + train_process=args.train_process, + ) + + train_ds, val_ds, test_ds = get_train_val_test_loader_from_train(args.data_dir) + trainer.train(train_dataset=train_ds, val_dataset=val_ds) + + +if __name__ == "__main__": + main() diff --git a/source_code/SegMamba/4_predict.py b/source_code/SegMamba/4_predict.py new file mode 100644 index 0000000000000000000000000000000000000000..d8940186e061ed421ab1021093fa822fc79c0a68 --- /dev/null +++ b/source_code/SegMamba/4_predict.py @@ -0,0 +1,261 @@ +import argparse +import glob +import os +import re +import sys + +import numpy as np +import torch +import SimpleITK as sitk + +# Prefer pip-installed MONAI over the local monai/ folder. +os.environ.setdefault("MONAI_SKIP_SUBMODULES", "1") +_repo_root = os.path.abspath(os.path.dirname(__file__)) +if "" in sys.path: + sys.path.remove("") +if _repo_root in sys.path: + sys.path.remove(_repo_root) +import monai # noqa: E402 +sys.path.insert(0, _repo_root) + +from monai.inferers import SlidingWindowInferer +from monai.utils import set_determinism + +from light_training.dataloading.dataset import MedicalDataset, get_train_val_test_loader_from_train +from light_training.evaluation.metric import dice +from light_training.prediction import Predictor +from light_training.trainer import Trainer + +set_determinism(123) + + +def _parse_csv_ints(s: str, n: int): + parts = [p.strip() for p in str(s).split(",") if p.strip()] + if len(parts) != n: + raise ValueError(f"expect {n} integers like '128,128,128', got: {s}") + return [int(x) for x in parts] + + +def _parse_csv_floats(s: str, n: int): + parts = [p.strip() for p in str(s).split(",") if p.strip()] + if len(parts) != n: + raise ValueError(f"expect {n} floats like '1,1,1', got: {s}") + return [float(x) for x in parts] + + +def _find_ckpt_from_logdir(logdir: str, prefer: str = "best") -> str: + model_dir = os.path.join(logdir, "model") + if not os.path.isdir(model_dir): + raise FileNotFoundError(f"model dir not found: {model_dir}") + + best = sorted(glob.glob(os.path.join(model_dir, "best_model_*.pt"))) + final = sorted(glob.glob(os.path.join(model_dir, "final_model_*.pt"))) + tmp = sorted(glob.glob(os.path.join(model_dir, "tmp_model_ep*.pt"))) + any_pt = sorted(glob.glob(os.path.join(model_dir, "*.pt"))) + + def pick_by_score(paths): + # filenames like best_model_0.9038.pt / final_model_0.9038.pt + scored = [] + for p in paths: + m = re.search(r"_(\d+\\.?\\d*)\\.pt$", os.path.basename(p)) + if m is None: + continue + try: + scored.append((float(m.group(1)), p)) + except ValueError: + continue + if scored: + scored.sort(key=lambda x: x[0], reverse=True) + return scored[0][1] + return None + + if prefer == "best": + picked = pick_by_score(best) or (best[-1] if best else None) + if picked: + return picked + if prefer in {"best", "final"}: + picked = pick_by_score(final) or (final[-1] if final else None) + if picked: + return picked + if prefer in {"best", "final", "latest"}: + if tmp: + tmp.sort(key=lambda p: os.path.getmtime(p), reverse=True) + return tmp[0] + if any_pt: + any_pt.sort(key=lambda p: os.path.getmtime(p), reverse=True) + return any_pt[0] + + raise FileNotFoundError(f"no checkpoint found under: {model_dir}") + + +class BraTSTrainer(Trainer): + def __init__( + self, + ckpt_path: str, + save_path: str, + patch_size, + sw_batch_size: int = 2, + overlap: float = 0.5, + mirror_axes=(0, 1, 2), + raw_spacing=(1.0, 1.0, 1.0), + device="cuda:0", + print_dice: bool = False, + ): + super().__init__( + env_type="pytorch", + max_epochs=1, + batch_size=1, + device=device, + val_every=1, + num_gpus=1, + logdir="", + master_port=17751, + training_script=__file__, + ) + + self.patch_size = patch_size + self.augmentation = False + self.print_dice = print_dice + self.save_path = save_path + self.raw_spacing = raw_spacing + + from model_segmamba.segmamba import SegMamba + + self.model = SegMamba( + in_chans=4, + out_chans=4, + depths=[2, 2, 2, 2], + feat_size=[48, 96, 192, 384], + ) + self.load_state_dict(ckpt_path, strict=True) + self.model.eval() + + window_infer = SlidingWindowInferer( + roi_size=patch_size, + sw_batch_size=sw_batch_size, + overlap=overlap, + progress=True, + mode="gaussian", + ) + + self.predictor = Predictor( + window_infer=window_infer, + mirror_axes=list(mirror_axes) if mirror_axes is not None else None, + ) + + os.makedirs(self.save_path, exist_ok=True) + + def convert_labels(self, labels): + ## TC, WT and ET + result = [(labels == 1) | (labels == 3), (labels == 1) | (labels == 3) | (labels == 2), labels == 3] + + return torch.cat(result, dim=1).float() + + def get_input(self, batch): + image = batch["data"] + label = batch["seg"] + properties = batch["properties"] + label = self.convert_labels(label) + + return image, label, properties + + def validation_step(self, batch): + image, label, properties = self.get_input(batch) + # The preprocessed datasets used in many setups (including /data/yty/brats23_processed) + # do NOT contain cropping/resample metadata (shape_before_cropping, bbox_used_for_cropping, ...), + # so we directly save predictions in the same (D,H,W) space as the inputs. + # + # We save as a TRUE 4D NIfTI (t,z,y,x) with t=3 (TC/WT/ET) so that + # `sitk.GetArrayFromImage` returns shape (3, D, H, W), matching `5_compute_metrics.py`. + + logits = self.predictor.maybe_mirror_and_predict(image, self.model, device=self.device) # (1,4,D,H,W) on CPU + pred_lbl = logits.argmax(dim=1) # (1,D,H,W) + pred_3c = self.convert_labels(pred_lbl[:, None])[0].cpu().numpy().astype(np.uint8) # (3,D,H,W) + + if self.print_dice: + gt_3c = label[0].cpu().numpy() + dices = [dice(pred_3c[i], gt_3c[i]) for i in range(3)] + print(dices) + + case_name = properties.get("name", "") + if isinstance(case_name, (list, tuple)) and len(case_name) > 0: + case_name = case_name[0] + + out_path = os.path.join(self.save_path, f"{case_name}.nii.gz") + pred_itk = sitk.GetImageFromArray(pred_3c, isVector=False) + pred_itk.SetSpacing((float(self.raw_spacing[0]), float(self.raw_spacing[1]), float(self.raw_spacing[2]), 1.0)) + sitk.WriteImage(pred_itk, out_path) + print(f"saved: {out_path}") + + return 0 + + def convert_labels_dim0(self, labels): + ## TC, WT and ET + result = [(labels == 1) | (labels == 3), (labels == 1) | (labels == 3) | (labels == 2), labels == 3] + + return torch.cat(result, dim=0).float() + + +def main(): + parser = argparse.ArgumentParser(description="SegMamba inference/prediction for BraTS2023.") + parser.add_argument("--data_dir", type=str, default="./data/fullres/train", help="Preprocessed data directory (contains *.npz).") + parser.add_argument("--split", type=str, default="test", choices=["train", "val", "test", "all"]) + parser.add_argument("--train_rate", type=float, default=0.7) + parser.add_argument("--val_rate", type=float, default=0.1) + parser.add_argument("--test_rate", type=float, default=0.2) + parser.add_argument("--seed", type=int, default=42) + + parser.add_argument("--ckpt", type=str, default="", help="Checkpoint path (*.pt). If empty, will search under --logdir/model.") + parser.add_argument("--logdir", type=str, default="./logs/segmamba", help="Training logdir to locate checkpoints when --ckpt is empty.") + parser.add_argument("--ckpt_prefer", type=str, default="best", choices=["best", "final", "latest"]) + + parser.add_argument("--save_dir", type=str, default="./prediction_results/segmamba", help="Directory to save prediction nii.gz.") + parser.add_argument("--device", type=str, default="cuda:0") + parser.add_argument("--patch_size", type=str, default="128,128,128") + parser.add_argument("--sw_batch_size", type=int, default=2) + parser.add_argument("--overlap", type=float, default=0.5) + parser.add_argument("--raw_spacing", type=str, default="1,1,1", help="Spacing used when saving NIfTI, e.g. '1,1,1'.") + parser.add_argument("--no_mirror", action="store_true", help="Disable mirror TTA.") + parser.add_argument("--print_dice", action="store_true", help="Print dice against preprocessed seg (if available).") + args = parser.parse_args() + + patch_size = _parse_csv_ints(args.patch_size, 3) + raw_spacing = _parse_csv_floats(args.raw_spacing, 3) + + ckpt_path = args.ckpt.strip() + if ckpt_path == "": + ckpt_path = _find_ckpt_from_logdir(args.logdir, prefer=args.ckpt_prefer) + if not os.path.isfile(ckpt_path): + raise FileNotFoundError(f"checkpoint not found: {ckpt_path}") + print(f"Using checkpoint: {ckpt_path}") + + trainer = BraTSTrainer( + ckpt_path=ckpt_path, + save_path=args.save_dir, + patch_size=patch_size, + sw_batch_size=args.sw_batch_size, + overlap=args.overlap, + mirror_axes=None if args.no_mirror else (0, 1, 2), + raw_spacing=raw_spacing, + device=args.device, + print_dice=args.print_dice, + ) + + if args.split == "all": + all_paths = sorted(glob.glob(os.path.join(args.data_dir, "*.npz"))) + ds = MedicalDataset(all_paths, test=False) + else: + train_ds, val_ds, test_ds = get_train_val_test_loader_from_train( + args.data_dir, + train_rate=args.train_rate, + val_rate=args.val_rate, + test_rate=args.test_rate, + seed=args.seed, + ) + ds = {"train": train_ds, "val": val_ds, "test": test_ds}[args.split] + + trainer.validation_single_gpu(ds) + + +if __name__ == "__main__": + main() diff --git a/source_code/SegMamba/5_compute_metrics.py b/source_code/SegMamba/5_compute_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..159712e1979dd50ac508f2868e77687027459a9f --- /dev/null +++ b/source_code/SegMamba/5_compute_metrics.py @@ -0,0 +1,175 @@ +import argparse +import glob +import json +import os +import sys + +import numpy as np +import SimpleITK as sitk +import torch +from medpy import metric + +# Prefer pip-installed MONAI over the local monai/ folder. +os.environ.setdefault("MONAI_SKIP_SUBMODULES", "1") +_repo_root = os.path.abspath(os.path.dirname(__file__)) +if "" in sys.path: + sys.path.remove("") +if _repo_root in sys.path: + sys.path.remove(_repo_root) +import monai # noqa: E402 +sys.path.insert(0, _repo_root) + +from monai.utils import set_determinism +from tqdm import tqdm + +from light_training.dataloading.dataset import MedicalDataset, get_train_val_test_loader_from_train + +set_determinism(123) + +def cal_metric(gt, pred, voxel_spacing): + if pred.sum() > 0 and gt.sum() > 0: + dice = metric.binary.dc(pred, gt) + hd95 = metric.binary.hd95(pred, gt, voxelspacing=voxel_spacing) + return np.array([dice, hd95]) + else: + return np.array([0.0, 50]) + +def each_cases_metric(gt, pred, voxel_spacing): + classes_num = 3 + class_wise_metric = np.zeros((classes_num, 2)) + for cls in range(0, classes_num): + class_wise_metric[cls, ...] = cal_metric(pred[cls], gt[cls], voxel_spacing) + print(class_wise_metric) + return class_wise_metric + +def convert_labels(labels): + ## TC, WT and ET + labels = labels.unsqueeze(dim=0) + + result = [(labels == 1) | (labels == 3), (labels == 1) | (labels == 3) | (labels == 2), labels == 3] + + return torch.cat(result, dim=0).float() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Compute Dice/HD95 for BraTS2023 (TC/WT/ET) from saved predictions.") + parser.add_argument("--pred_name", required=True, type=str, help="Prediction folder name under results_root.") + parser.add_argument("--results_root", type=str, default="prediction_results") + parser.add_argument("--data_dir", type=str, default="./data/fullres/train", help="Preprocessed data directory (contains *.npz).") + parser.add_argument( + "--gt_source", + type=str, + default="processed", + choices=["processed", "raw"], + help="GT source. 'processed' uses *_seg.npy from preprocessed dataset (recommended for /data/yty/brats23_processed). " + "'raw' uses seg.nii.gz from --raw_data_dir.", + ) + parser.add_argument( + "--raw_data_dir", + type=str, + default="./data/raw_data/BraTS2023/ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData/", + help="Raw BraTS2023 training data directory that contains case folders with seg.nii.gz.", + ) + parser.add_argument("--split", type=str, default="test", choices=["train", "val", "test", "all"]) + parser.add_argument("--train_rate", type=float, default=0.7) + parser.add_argument("--val_rate", type=float, default=0.1) + parser.add_argument("--test_rate", type=float, default=0.2) + parser.add_argument("--seed", type=int, default=42) + parser.add_argument("--voxel_spacing", type=str, default="1,1,1", help="Voxel spacing for HD95, e.g. '1,1,1'.") + args = parser.parse_args() + + voxel_spacing = [float(x) for x in args.voxel_spacing.split(",")] + + if args.split == "all": + all_paths = sorted(glob.glob(os.path.join(args.data_dir, "*.npz"))) + ds = MedicalDataset(all_paths, test=False) + else: + train_ds, val_ds, test_ds = get_train_val_test_loader_from_train( + args.data_dir, + train_rate=args.train_rate, + val_rate=args.val_rate, + test_rate=args.test_rate, + seed=args.seed, + ) + ds = {"train": train_ds, "val": val_ds, "test": test_ds}[args.split] + + print(f"Evaluating {len(ds)} cases from split={args.split}") + + all_results = np.zeros((len(ds), 3, 2), dtype=np.float32) + + for ind, batch in enumerate(tqdm(ds, total=len(ds))): + properties = batch["properties"] + case_name = properties["name"] + pred_path = os.path.join(args.results_root, args.pred_name, f"{case_name}.nii.gz") + if not os.path.isfile(pred_path): + raise FileNotFoundError(f"Prediction not found: {pred_path}") + + if args.gt_source == "raw": + gt_path = os.path.join(args.raw_data_dir, case_name, "seg.nii.gz") + if not os.path.isfile(gt_path): + raise FileNotFoundError(f"GT not found: {gt_path}") + gt_itk = sitk.ReadImage(gt_path) + gt_array = sitk.GetArrayFromImage(gt_itk).astype(np.int32) + gt_array = torch.from_numpy(gt_array) + gt_array = convert_labels(gt_array).numpy() + else: + # preprocessed GT (same space as saved predictions from 4_predict.py) + if "seg" not in batch: + raise KeyError("gt_source=processed requires 'seg' in dataset samples, but it's missing.") + seg = batch["seg"] # expected shape: (1, D, H, W) + if isinstance(seg, np.ndarray): + seg_t = torch.from_numpy(seg) + else: + # np.memmap is also an ndarray subclass, keep it generic + seg_t = torch.from_numpy(np.asarray(seg)) + if seg_t.ndim == 4 and seg_t.shape[0] == 1: + seg_t = seg_t[0] + gt_array = convert_labels(seg_t).numpy() + + pred_itk = sitk.ReadImage(pred_path) + pred_array = sitk.GetArrayFromImage(pred_itk) + + m = each_cases_metric(gt_array, pred_array, voxel_spacing) + all_results[ind, ...] = m + + out_dir = os.path.join(args.results_root, "result_metrics") + os.makedirs(out_dir, exist_ok=True) + out_path = os.path.join(out_dir, f"{args.pred_name}.npy") + np.save(out_path, all_results) + + result = np.load(out_path) + mean_per_class = result.mean(axis=0) + std_per_class = result.std(axis=0) + mean_dice = float(mean_per_class[:, 0].mean()) + mean_hd95 = float(mean_per_class[:, 1].mean()) + + summary = { + "pred_name": args.pred_name, + "results_root": args.results_root, + "data_dir": args.data_dir, + "split": args.split, + "gt_source": args.gt_source, + "raw_data_dir": args.raw_data_dir if args.gt_source == "raw" else None, + "voxel_spacing": voxel_spacing, + "num_cases": int(result.shape[0]), + "mean_per_class": mean_per_class.tolist(), # [TC, WT, ET] x [dice, hd95] + "std_per_class": std_per_class.tolist(), + "mean_dice": mean_dice, + "mean_hd95": mean_hd95, + } + summary_path = os.path.join(out_dir, f"{args.pred_name}_summary.json") + with open(summary_path, "w") as f: + json.dump(summary, f, indent=2) + + print("saved:", out_path) + print("summary:", summary_path) + print(result.shape) + print("mean(TC/WT/ET) [dice, hd95]:") + print(mean_per_class) + print("std(TC/WT/ET) [dice, hd95]:") + print(std_per_class) + print("mean dice:", mean_dice) + print("mean hd95:", mean_hd95) + + + diff --git a/source_code/SegMamba/6_visualize_predictions.py b/source_code/SegMamba/6_visualize_predictions.py new file mode 100644 index 0000000000000000000000000000000000000000..a32c65b4906ce40df3890e657b7f4099f73dba65 --- /dev/null +++ b/source_code/SegMamba/6_visualize_predictions.py @@ -0,0 +1,219 @@ +import argparse +import os +import sys + +import numpy as np +import SimpleITK as sitk + +import matplotlib +matplotlib.use("Agg") +import matplotlib.pyplot as plt +from matplotlib.patches import Patch + + +MODALITY_MAP = { + "t2w": 0, + "t2f": 1, + "t1n": 2, + "t1c": 3, +} + + +def _parse_cases(s: str): + if not s: + return [] + return [p.strip() for p in s.split(",") if p.strip()] + + +def _normalize_slice(img2d: np.ndarray) -> np.ndarray: + p1, p99 = np.percentile(img2d, (1, 99)) + if p99 <= p1: + return np.zeros_like(img2d, dtype=np.float32) + img = (img2d - p1) / (p99 - p1) + return np.clip(img, 0.0, 1.0).astype(np.float32) + + +def _pred_to_three_channels(pred: np.ndarray) -> np.ndarray: + # Accept either 4D (3, D, H, W) or 3D label map (D, H, W) with labels 0-3. + if pred.ndim == 4: + return pred + if pred.ndim != 3: + raise ValueError(f"unexpected pred shape: {pred.shape}") + # label -> TC/WT/ET + labels = pred + tc = (labels == 1) | (labels == 3) + wt = (labels == 1) | (labels == 2) | (labels == 3) + et = labels == 3 + return np.stack([tc, wt, et], axis=0).astype(np.uint8) + + +def _pick_slices(mask_3c: np.ndarray, num_slices: int) -> list[int]: + # mask_3c: (3, D, H, W) + mask_sum = mask_3c.sum(axis=0) # (D, H, W) + per_slice = mask_sum.reshape(mask_sum.shape[0], -1).sum(axis=1) + if per_slice.max() == 0: + # fallback: evenly spaced slices + return sorted(set(np.linspace(0, mask_sum.shape[0] - 1, num_slices, dtype=int).tolist())) + idx = np.argsort(per_slice)[::-1] + chosen = [] + for i in idx: + if len(chosen) >= num_slices: + break + chosen.append(int(i)) + return sorted(chosen) + + +def _overlay_mask(gray: np.ndarray, masks: list[np.ndarray]) -> np.ndarray: + # gray: (H, W), masks: [tc, wt, et] + rgb = np.stack([gray, gray, gray], axis=-1) + colors = [ + (1.0, 0.0, 0.0), # TC - red + (0.0, 1.0, 0.0), # WT - green + (1.0, 1.0, 0.0), # ET - yellow + ] + alphas = [0.5, 0.25, 0.5] + for mask, color, alpha in zip(masks, colors, alphas): + m = mask.astype(bool) + if m.any(): + rgb[m] = rgb[m] * (1.0 - alpha) + np.array(color) * alpha + return rgb + + +def _load_processed_image(processed_dir: str, case_name: str, modality: int) -> np.ndarray: + img_path = os.path.join(processed_dir, f"{case_name}.npy") + if not os.path.isfile(img_path): + raise FileNotFoundError(f"processed image not found: {img_path}") + arr = np.load(img_path, mmap_mode="r") + if arr.ndim != 4: + raise ValueError(f"unexpected image shape: {arr.shape}") + return np.asarray(arr[modality], dtype=np.float32) # (D, H, W) + + +def _load_prediction(pred_dir: str, case_name: str) -> np.ndarray: + pred_path = os.path.join(pred_dir, f"{case_name}.nii.gz") + if not os.path.isfile(pred_path): + raise FileNotFoundError(f"prediction not found: {pred_path}") + pred_itk = sitk.ReadImage(pred_path) + pred_arr = sitk.GetArrayFromImage(pred_itk) + return _pred_to_three_channels(np.asarray(pred_arr)) + +def _load_gt(processed_dir: str, case_name: str) -> np.ndarray: + seg_path = os.path.join(processed_dir, f"{case_name}_seg.npy") + if not os.path.isfile(seg_path): + raise FileNotFoundError(f"gt seg not found: {seg_path}") + seg = np.load(seg_path, mmap_mode="r") + seg = np.asarray(seg) + if seg.ndim == 4 and seg.shape[0] == 1: + seg = seg[0] + return _pred_to_three_channels(seg) + + +def visualize_case(case_name: str, pred_dir: str, processed_dir: str, modality: int, num_slices: int, out_dir: str, show_gt: bool = True): + img = _load_processed_image(processed_dir, case_name, modality) # (D, H, W) + pred = _load_prediction(pred_dir, case_name) # (3, D, H, W) + gt = None + if show_gt: + try: + gt = _load_gt(processed_dir, case_name) + except FileNotFoundError: + gt = None + + if pred.shape[1:] != img.shape: + raise ValueError(f"shape mismatch for {case_name}: img={img.shape}, pred={pred.shape}") + if gt is not None and gt.shape[1:] != img.shape: + raise ValueError(f"shape mismatch for {case_name}: img={img.shape}, gt={gt.shape}") + + slice_ids = _pick_slices(pred, num_slices) + + ncols = 3 if gt is not None else 2 + fig, axes = plt.subplots(nrows=len(slice_ids), ncols=ncols, figsize=(4 * ncols, 3 * len(slice_ids))) + if len(slice_ids) == 1: + axes = np.array([axes]) + if ncols == 2 and axes.ndim == 1: + axes = axes[None, :] + + for row, z in enumerate(slice_ids): + img2d = img[z] + gray = _normalize_slice(img2d) + tc = pred[0, z] + wt = pred[1, z] + et = pred[2, z] + + axes[row, 0].imshow(gray, cmap="gray") + axes[row, 0].set_title(f"{case_name} z={z} (raw)") + axes[row, 0].axis("off") + + overlay = _overlay_mask(gray, [tc, wt, et]) + axes[row, 1].imshow(overlay) + axes[row, 1].set_title(f"{case_name} z={z} (pred)") + axes[row, 1].axis("off") + + if gt is not None: + gt_tc = gt[0, z] + gt_wt = gt[1, z] + gt_et = gt[2, z] + gt_overlay = _overlay_mask(gray, [gt_tc, gt_wt, gt_et]) + axes[row, 2].imshow(gt_overlay) + axes[row, 2].set_title(f"{case_name} z={z} (gt)") + axes[row, 2].axis("off") + + legend = [ + Patch(color=(1.0, 0.0, 0.0), label="TC"), + Patch(color=(0.0, 1.0, 0.0), label="WT"), + Patch(color=(1.0, 1.0, 0.0), label="ET"), + ] + fig.legend(handles=legend, loc="lower center", ncol=3) + fig.tight_layout(rect=[0, 0.05, 1, 1]) + + os.makedirs(out_dir, exist_ok=True) + out_path = os.path.join(out_dir, f"{case_name}_overlay.png") + fig.savefig(out_path, dpi=150) + plt.close(fig) + + return out_path + + +def main(): + parser = argparse.ArgumentParser(description="Visualize SegMamba predictions (overlay on processed images).") + parser.add_argument("--pred_dir", type=str, required=True, help="Prediction folder containing case_name.nii.gz.") + parser.add_argument("--processed_dir", type=str, required=True, help="Processed data dir containing case_name.npy.") + parser.add_argument("--out_dir", type=str, default="./prediction_results/visualizations") + parser.add_argument("--modality", type=str, default="t2f", help="t2w|t2f|t1n|t1c or an int index.") + parser.add_argument("--num_cases", type=int, default=5) + parser.add_argument("--num_slices", type=int, default=3) + parser.add_argument("--cases", type=str, default="", help="Comma-separated case names to visualize.") + parser.add_argument("--no_gt", action="store_true", help="Disable GT overlay (prediction only).") + args = parser.parse_args() + + if args.modality.isdigit(): + modality = int(args.modality) + else: + modality = MODALITY_MAP.get(args.modality.lower(), 1) + if modality < 0 or modality > 3: + raise ValueError("modality index must be 0..3") + + cases = _parse_cases(args.cases) + if not cases: + pred_files = sorted([f for f in os.listdir(args.pred_dir) if f.endswith(".nii.gz")]) + cases = [os.path.splitext(os.path.splitext(f)[0])[0] for f in pred_files][: args.num_cases] + + if not cases: + print("No cases found.") + sys.exit(0) + + print(f"Visualizing {len(cases)} cases, modality={modality}") + for case_name in cases: + out_path = visualize_case( + case_name=case_name, + pred_dir=args.pred_dir, + processed_dir=args.processed_dir, + modality=modality, + num_slices=args.num_slices, + out_dir=args.out_dir, + show_gt=not args.no_gt, + ) + print(f"saved: {out_path}") + + +if __name__ == "__main__": + main() diff --git a/source_code/SegMamba/README.md b/source_code/SegMamba/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7d6f555a49e21b9d8ab942bb58545787229d64aa --- /dev/null +++ b/source_code/SegMamba/README.md @@ -0,0 +1,132 @@ +# SegMamba + +**Recent news: If you are interested in the research about vision language models, please refers to the latest work: https://github.com/MrGiovanni/RadGPT (ICCV2025)** + +**Now we have open-sourced the pre-processing, training, inference, and metrics computation codes.** + +SegMamba: Long-range Sequential Modeling Mamba For 3D Medical Image Segmentation + +[https://arxiv.org/abs/2401.13560](https://arxiv.org/abs/2401.13560) + +![](images/method_figure.jpg) + +![](images/modules.jpg) + +Our advantage in speed and memory. +![](images/segmamba_ablation.jpg) + +## Contact +If you have any questions about our project, please feel free to contact us by email at zxing565@connect.hkust-gz.edu.cn or via WeChat at 18340097191. Furthermore, the data underlying this article will be shared on reasonable request to gaof57@mail.sysu.edu.cn. + +## Environment install +Clone this repository and navigate to the root directory of the project. + +```bash +git clone https://github.com/ge-xing/SegMamba.git + +cd SegMamba +``` +### Install causal-conv1d + +```bash +cd causal-conv1d + +python setup.py install +``` + +### Install mamba + +```bash +cd mamba + +python setup.py install +``` + +### Install monai + +```bash +pip install monai +``` + +## Simple test + +```bash +python 0_inference.py +``` + +## Preprocessing, training, testing, inference, and metrics computation + +### Data downloading + +Data is from [https://arxiv.org/abs/2305.17033](https://arxiv.org/abs/2305.17033) + +Download from Baidu Disk [https://pan.baidu.com/s/1C0FUHdDtWNaYWLtDDP9TnA?pwd=ty22提取码ty22](https://pan.baidu.com/s/1C0FUHdDtWNaYWLtDDP9TnA?pwd=ty22) + +Download from OneDrive [https://hkustgz-my.sharepoint.com/:f:/g/personal/zxing565_connect_hkust-gz_edu_cn/EqqaINbHRxREuIj0XGicY2EBv8hjwEFKgFOhF_Ub0mvENw?e=yTpE9B](https://hkustgz-my.sharepoint.com/:f:/g/personal/zxing565_connect_hkust-gz_edu_cn/EqqaINbHRxREuIj0XGicY2EBv8hjwEFKgFOhF_Ub0mvENw?e=yTpE9B) + +### Preprocessing +In my setting, the data directory of BraTS2023 is : "./data/raw_data/BraTS2023/ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData/" + +First, we need to run the rename process. + +```bash +python 1_rename_mri_data.py +``` + +Then, we need to run the pre-processing code to do resample, normalization, and crop processes. + +```bash +python 2_preprocessing_mri.py +``` + +After pre-processing, the data structure will be in this format: + +![](images/data_structure.jpg) +### Training + +When the pre-processing process is done, we can train our model. + +We mainly use the pre-processde data from last step: **data_dir = "./data/fullres/train"** + + +```bash +python 3_train.py +``` + +The training logs and checkpoints are saved in: +**logdir = f"./logs/segmamba"** + + + + +### Inference + +When we have trained our models, we can inference all the data in testing set. + +```bash +python 4_predict.py +``` + +When this process is done, the prediction cases will be put in this path: +**save_path = "./prediction_results/segmamba"** + +### Metrics computation +We can obtain the Dice score and HD95 on each segmentation target (WT, TC, ET for BraTS2023 dataset) using this code: + +```bash +python 5_compute_metrics.py --pred_name="segmamba" +``` + + + +## Acknowledgement +Many thanks for these repos for their great contribution! + +[https://github.com/MIC-DKFZ/nnUNet](https://github.com/MIC-DKFZ/nnUNet) + +[https://github.com/Project-MONAI/MONAI](https://github.com/Project-MONAI/MONAI) + +[https://github.com/hustvl/Vim](https://github.com/hustvl/Vim) + +[https://github.com/bowang-lab/U-Mamba](https://github.com/bowang-lab/U-Mamba) + diff --git a/source_code/SegMamba/causal-conv1d/.DS_Store b/source_code/SegMamba/causal-conv1d/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..c6665734a44cf14bd7d2d71492646e87eae6cfa1 Binary files /dev/null and b/source_code/SegMamba/causal-conv1d/.DS_Store differ diff --git a/source_code/SegMamba/causal-conv1d/AUTHORS b/source_code/SegMamba/causal-conv1d/AUTHORS new file mode 100644 index 0000000000000000000000000000000000000000..88193855314bb723ced1860384e417954f559700 --- /dev/null +++ b/source_code/SegMamba/causal-conv1d/AUTHORS @@ -0,0 +1 @@ +Tri Dao, tri@tridao.me diff --git a/source_code/SegMamba/causal-conv1d/LICENSE b/source_code/SegMamba/causal-conv1d/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..5860e4b33f3d9d85fc636137c559331d51783a5b --- /dev/null +++ b/source_code/SegMamba/causal-conv1d/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2022, the respective contributors, as shown by the AUTHORS file. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/source_code/SegMamba/causal-conv1d/README.md b/source_code/SegMamba/causal-conv1d/README.md new file mode 100644 index 0000000000000000000000000000000000000000..4e905425a650d77c5c4854e4c4a261778c4d2690 --- /dev/null +++ b/source_code/SegMamba/causal-conv1d/README.md @@ -0,0 +1 @@ +# Causal depthwise conv1d in CUDA with a PyTorch interface diff --git a/source_code/SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d/__init__.py b/source_code/SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cc4d610a1e557cabd723fb6e33438f03c5c4bf66 --- /dev/null +++ b/source_code/SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d/__init__.py @@ -0,0 +1,3 @@ +__version__ = "1.0.0" + +from causal_conv1d.causal_conv1d_interface import causal_conv1d_fn, causal_conv1d_update diff --git a/source_code/SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d/causal_conv1d_interface.py b/source_code/SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d/causal_conv1d_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..f66143c39e767572ca12112811a384239b8beb63 --- /dev/null +++ b/source_code/SegMamba/causal-conv1d/build/lib.linux-x86_64-cpython-312/causal_conv1d/causal_conv1d_interface.py @@ -0,0 +1,104 @@ +# Copyright (c) 2023, Tri Dao. + +import torch +import torch.nn.functional as F + + +import causal_conv1d_cuda + + +class CausalConv1dFn(torch.autograd.Function): + @staticmethod + def forward(ctx, x, weight, bias=None, activation=None): + if activation not in [None, "silu", "swish"]: + raise NotImplementedError("activation must be None, silu, or swish") + if x.stride(2) != 1 and x.stride(1) != 1: + x = x.contiguous() + bias = bias.contiguous() if bias is not None else None + ctx.save_for_backward(x, weight, bias) + ctx.activation = activation in ["silu", "swish"] + out = causal_conv1d_cuda.causal_conv1d_fwd(x, weight, bias, ctx.activation) + return out + + @staticmethod + def backward(ctx, dout): + x, weight, bias = ctx.saved_tensors + if dout.stride(2) != 1 and dout.stride(1) != 1: + dout = dout.contiguous() + # The kernel supports passing in a pre-allocated dx (e.g., in case we want to fuse the + # backward of conv1d with the backward of chunk). + # Here we just pass in None and dx will be allocated in the C++ code. + dx, dweight, dbias = causal_conv1d_cuda.causal_conv1d_bwd( + x, weight, bias, dout, None, ctx.activation + ) + return dx, dweight, dbias if bias is not None else None, None + + +def causal_conv1d_fn(x, weight, bias=None, activation=None): + """ + x: (batch, dim, seqlen) + weight: (dim, width) + bias: (dim,) + activation: either None or "silu" or "swish" + + out: (batch, dim, seqlen) + """ + return CausalConv1dFn.apply(x, weight, bias, activation) + + +def causal_conv1d_ref(x, weight, bias=None, activation=None): + """ + x: (batch, dim, seqlen) + weight: (dim, width) + bias: (dim,) + + out: (batch, dim, seqlen) + """ + if activation not in [None, "silu", "swish"]: + raise NotImplementedError("activation must be None, silu, or swish") + dtype_in = x.dtype + x = x.to(weight.dtype) + seqlen = x.shape[-1] + dim, width = weight.shape + out = F.conv1d(x, weight.unsqueeze(1), bias, padding=width - 1, groups=dim) + out = out[..., :seqlen] + return (out if activation is None else F.silu(out)).to(dtype=dtype_in) + + +def causal_conv1d_update(x, conv_state, weight, bias=None, activation=None): + """ + x: (batch, dim) + conv_state: (batch, dim, width) + weight: (dim, width) + bias: (dim,) + + out: (batch, dim) + """ + if activation not in [None, "silu", "swish"]: + raise NotImplementedError("activation must be None, silu, or swish") + activation = activation in ["silu", "swish"] + return causal_conv1d_cuda.causal_conv1d_update(x, conv_state, weight, bias, activation) + + +def causal_conv1d_update_ref(x, conv_state, weight, bias=None, activation=None): + """ + x: (batch, dim) + conv_state: (batch, dim, width) + weight: (dim, width) + bias: (dim,) + + out: (batch, dim) + """ + if activation not in [None, "silu", "swish"]: + raise NotImplementedError("activation must be None, silu, or swish") + dtype_in = x.dtype + batch, dim = x.shape + width = weight.shape[1] + assert conv_state.shape == (batch, dim, width) + assert weight.shape == (dim, width) + conv_state.copy_(torch.roll(conv_state, shifts=-1, dims=-1)) # Update state (B D W) + conv_state[:, :, -1] = x + out = torch.sum(conv_state * weight, dim=-1) # (B D) + if bias is not None: + out += bias + return (out if activation is None else F.silu(out)).to(dtype=dtype_in) diff --git a/source_code/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/.ninja_log b/source_code/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/.ninja_log new file mode 100644 index 0000000000000000000000000000000000000000..8a3ad9f21815e6044a9ef78bf640bcac1b0c719b --- /dev/null +++ b/source_code/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/.ninja_log @@ -0,0 +1,5 @@ +# ninja log v5 +1 3925 1769349295059886191 /root/githubs/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_update.o 3402387f7700f2cb +0 8815 1769349299946937322 /root/githubs/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_fwd.o f8d3256741ca6581 +0 19513 1769349310651049313 /root/githubs/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d.o 9aac860c790009d8 +0 21969 1769349313087074800 /root/githubs/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_bwd.o 2a543079906b9d85 diff --git a/source_code/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/build.ninja b/source_code/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/build.ninja new file mode 100644 index 0000000000000000000000000000000000000000..bea3fd5830c22be247a66664a8bd13840548293c --- /dev/null +++ b/source_code/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/build.ninja @@ -0,0 +1,40 @@ +ninja_required_version = 1.3 +cxx = c++ +nvcc = /usr/local/cuda/bin/nvcc + +cflags = -pthread -B /root/miniforge/compiler_compat -fno-strict-overflow -Wsign-compare -DNDEBUG -O2 -Wall -fPIC -O2 -isystem /root/miniforge/include -fPIC -O2 -isystem /root/miniforge/include -fPIC -I/root/githubs/SegMamba/causal-conv1d -I/root/miniforge/lib/python3.12/site-packages/torch/include -I/root/miniforge/lib/python3.12/site-packages/torch/include/torch/csrc/api/include -I/usr/local/cuda/include -I/root/miniforge/include/python3.12 -c +post_cflags = -O3 -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=causal_conv1d_cuda -std=c++17 +cuda_cflags = -I/root/githubs/SegMamba/causal-conv1d -I/root/miniforge/lib/python3.12/site-packages/torch/include -I/root/miniforge/lib/python3.12/site-packages/torch/include/torch/csrc/api/include -I/usr/local/cuda/include -I/root/miniforge/include/python3.12 -c +cuda_post_cflags = -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_BFLOAT16_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --expt-relaxed-constexpr --compiler-options ''"'"'-fPIC'"'"'' -O3 -U__CUDA_NO_HALF_OPERATORS__ -U__CUDA_NO_HALF_CONVERSIONS__ -U__CUDA_NO_BFLOAT16_OPERATORS__ -U__CUDA_NO_BFLOAT16_CONVERSIONS__ -U__CUDA_NO_BFLOAT162_OPERATORS__ -U__CUDA_NO_BFLOAT162_CONVERSIONS__ --expt-relaxed-constexpr --expt-extended-lambda --use_fast_math --ptxas-options=-v -lineinfo -gencode arch=compute_70,code=sm_70 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_90,code=sm_90 --threads 4 -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=causal_conv1d_cuda -std=c++17 +cuda_dlink_post_cflags = +sycl_dlink_post_cflags = +ldflags = + +rule compile + command = $cxx -MMD -MF $out.d $cflags -c $in -o $out $post_cflags + depfile = $out.d + deps = gcc + +rule cuda_compile + depfile = $out.d + deps = gcc + command = $nvcc --generate-dependencies-with-compile --dependency-output $out.d $cuda_cflags -c $in -o $out $cuda_post_cflags + + + + + + + +build /root/githubs/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d.o: compile /root/githubs/SegMamba/causal-conv1d/csrc/causal_conv1d.cpp +build /root/githubs/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_bwd.o: cuda_compile /root/githubs/SegMamba/causal-conv1d/csrc/causal_conv1d_bwd.cu +build /root/githubs/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_fwd.o: cuda_compile /root/githubs/SegMamba/causal-conv1d/csrc/causal_conv1d_fwd.cu +build /root/githubs/SegMamba/causal-conv1d/build/temp.linux-x86_64-cpython-312/csrc/causal_conv1d_update.o: cuda_compile /root/githubs/SegMamba/causal-conv1d/csrc/causal_conv1d_update.cu + + + + + + + + diff --git a/source_code/SegMamba/causal-conv1d/causal_conv1d.egg-info/PKG-INFO b/source_code/SegMamba/causal-conv1d/causal_conv1d.egg-info/PKG-INFO new file mode 100644 index 0000000000000000000000000000000000000000..e933a3d307c4158492494dae393112800cfd6b36 --- /dev/null +++ b/source_code/SegMamba/causal-conv1d/causal_conv1d.egg-info/PKG-INFO @@ -0,0 +1,29 @@ +Metadata-Version: 2.4 +Name: causal_conv1d +Version: 1.0.0 +Summary: Causal depthwise conv1d in CUDA, with a PyTorch interface +Home-page: https://github.com/Dao-AILab/causal-conv1d +Author: Tri Dao +Author-email: tri@tridao.me +Classifier: Programming Language :: Python :: 3 +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: Unix +Requires-Python: >=3.7 +Description-Content-Type: text/markdown +License-File: LICENSE +License-File: AUTHORS +Requires-Dist: torch +Requires-Dist: packaging +Requires-Dist: ninja +Dynamic: author +Dynamic: author-email +Dynamic: classifier +Dynamic: description +Dynamic: description-content-type +Dynamic: home-page +Dynamic: license-file +Dynamic: requires-dist +Dynamic: requires-python +Dynamic: summary + +# Causal depthwise conv1d in CUDA with a PyTorch interface diff --git a/source_code/SegMamba/causal-conv1d/causal_conv1d.egg-info/SOURCES.txt b/source_code/SegMamba/causal-conv1d/causal_conv1d.egg-info/SOURCES.txt new file mode 100644 index 0000000000000000000000000000000000000000..085f8c1e16070f5d8c191ccba0365e90512ae180 --- /dev/null +++ b/source_code/SegMamba/causal-conv1d/causal_conv1d.egg-info/SOURCES.txt @@ -0,0 +1,16 @@ +AUTHORS +LICENSE +README.md +setup.py +causal_conv1d/__init__.py +causal_conv1d/causal_conv1d_interface.py +causal_conv1d.egg-info/PKG-INFO +causal_conv1d.egg-info/SOURCES.txt +causal_conv1d.egg-info/dependency_links.txt +causal_conv1d.egg-info/requires.txt +causal_conv1d.egg-info/top_level.txt +csrc/causal_conv1d.cpp +csrc/causal_conv1d_bwd.cu +csrc/causal_conv1d_fwd.cu +csrc/causal_conv1d_update.cu +tests/test_causal_conv1d.py \ No newline at end of file diff --git a/source_code/SegMamba/causal-conv1d/causal_conv1d.egg-info/dependency_links.txt b/source_code/SegMamba/causal-conv1d/causal_conv1d.egg-info/dependency_links.txt new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/source_code/SegMamba/causal-conv1d/causal_conv1d.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/source_code/SegMamba/causal-conv1d/causal_conv1d.egg-info/requires.txt b/source_code/SegMamba/causal-conv1d/causal_conv1d.egg-info/requires.txt new file mode 100644 index 0000000000000000000000000000000000000000..4abdfa4e1eeb60695a9dc850226f9ca2cf8d3c94 --- /dev/null +++ b/source_code/SegMamba/causal-conv1d/causal_conv1d.egg-info/requires.txt @@ -0,0 +1,3 @@ +torch +packaging +ninja diff --git a/source_code/SegMamba/causal-conv1d/causal_conv1d.egg-info/top_level.txt b/source_code/SegMamba/causal-conv1d/causal_conv1d.egg-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..4e7fccded0b0646bbe9f67acaba866f9861f3333 --- /dev/null +++ b/source_code/SegMamba/causal-conv1d/causal_conv1d.egg-info/top_level.txt @@ -0,0 +1,2 @@ +causal_conv1d +causal_conv1d_cuda diff --git a/source_code/SegMamba/causal-conv1d/causal_conv1d/__init__.py b/source_code/SegMamba/causal-conv1d/causal_conv1d/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cc4d610a1e557cabd723fb6e33438f03c5c4bf66 --- /dev/null +++ b/source_code/SegMamba/causal-conv1d/causal_conv1d/__init__.py @@ -0,0 +1,3 @@ +__version__ = "1.0.0" + +from causal_conv1d.causal_conv1d_interface import causal_conv1d_fn, causal_conv1d_update diff --git a/source_code/SegMamba/causal-conv1d/causal_conv1d/causal_conv1d_interface.py b/source_code/SegMamba/causal-conv1d/causal_conv1d/causal_conv1d_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..f66143c39e767572ca12112811a384239b8beb63 --- /dev/null +++ b/source_code/SegMamba/causal-conv1d/causal_conv1d/causal_conv1d_interface.py @@ -0,0 +1,104 @@ +# Copyright (c) 2023, Tri Dao. + +import torch +import torch.nn.functional as F + + +import causal_conv1d_cuda + + +class CausalConv1dFn(torch.autograd.Function): + @staticmethod + def forward(ctx, x, weight, bias=None, activation=None): + if activation not in [None, "silu", "swish"]: + raise NotImplementedError("activation must be None, silu, or swish") + if x.stride(2) != 1 and x.stride(1) != 1: + x = x.contiguous() + bias = bias.contiguous() if bias is not None else None + ctx.save_for_backward(x, weight, bias) + ctx.activation = activation in ["silu", "swish"] + out = causal_conv1d_cuda.causal_conv1d_fwd(x, weight, bias, ctx.activation) + return out + + @staticmethod + def backward(ctx, dout): + x, weight, bias = ctx.saved_tensors + if dout.stride(2) != 1 and dout.stride(1) != 1: + dout = dout.contiguous() + # The kernel supports passing in a pre-allocated dx (e.g., in case we want to fuse the + # backward of conv1d with the backward of chunk). + # Here we just pass in None and dx will be allocated in the C++ code. + dx, dweight, dbias = causal_conv1d_cuda.causal_conv1d_bwd( + x, weight, bias, dout, None, ctx.activation + ) + return dx, dweight, dbias if bias is not None else None, None + + +def causal_conv1d_fn(x, weight, bias=None, activation=None): + """ + x: (batch, dim, seqlen) + weight: (dim, width) + bias: (dim,) + activation: either None or "silu" or "swish" + + out: (batch, dim, seqlen) + """ + return CausalConv1dFn.apply(x, weight, bias, activation) + + +def causal_conv1d_ref(x, weight, bias=None, activation=None): + """ + x: (batch, dim, seqlen) + weight: (dim, width) + bias: (dim,) + + out: (batch, dim, seqlen) + """ + if activation not in [None, "silu", "swish"]: + raise NotImplementedError("activation must be None, silu, or swish") + dtype_in = x.dtype + x = x.to(weight.dtype) + seqlen = x.shape[-1] + dim, width = weight.shape + out = F.conv1d(x, weight.unsqueeze(1), bias, padding=width - 1, groups=dim) + out = out[..., :seqlen] + return (out if activation is None else F.silu(out)).to(dtype=dtype_in) + + +def causal_conv1d_update(x, conv_state, weight, bias=None, activation=None): + """ + x: (batch, dim) + conv_state: (batch, dim, width) + weight: (dim, width) + bias: (dim,) + + out: (batch, dim) + """ + if activation not in [None, "silu", "swish"]: + raise NotImplementedError("activation must be None, silu, or swish") + activation = activation in ["silu", "swish"] + return causal_conv1d_cuda.causal_conv1d_update(x, conv_state, weight, bias, activation) + + +def causal_conv1d_update_ref(x, conv_state, weight, bias=None, activation=None): + """ + x: (batch, dim) + conv_state: (batch, dim, width) + weight: (dim, width) + bias: (dim,) + + out: (batch, dim) + """ + if activation not in [None, "silu", "swish"]: + raise NotImplementedError("activation must be None, silu, or swish") + dtype_in = x.dtype + batch, dim = x.shape + width = weight.shape[1] + assert conv_state.shape == (batch, dim, width) + assert weight.shape == (dim, width) + conv_state.copy_(torch.roll(conv_state, shifts=-1, dims=-1)) # Update state (B D W) + conv_state[:, :, -1] = x + out = torch.sum(conv_state * weight, dim=-1) # (B D) + if bias is not None: + out += bias + return (out if activation is None else F.silu(out)).to(dtype=dtype_in) diff --git a/source_code/SegMamba/causal-conv1d/csrc/causal_conv1d.cpp b/source_code/SegMamba/causal-conv1d/csrc/causal_conv1d.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1c80516ac8599d4d80910a1d4d85c4c435cf1e4f --- /dev/null +++ b/source_code/SegMamba/causal-conv1d/csrc/causal_conv1d.cpp @@ -0,0 +1,333 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +#include +#include +#include +#include + +#include "causal_conv1d.h" + +#define CHECK_SHAPE(x, ...) TORCH_CHECK(x.sizes() == torch::IntArrayRef({__VA_ARGS__}), #x " must have shape (" #__VA_ARGS__ ")") + +#define DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(ITYPE, NAME, ...) \ + if (ITYPE == at::ScalarType::Half) { \ + using input_t = at::Half; \ + __VA_ARGS__(); \ + } else if (ITYPE == at::ScalarType::BFloat16) { \ + using input_t = at::BFloat16; \ + __VA_ARGS__(); \ + } else if (ITYPE == at::ScalarType::Float) { \ + using input_t = float; \ + __VA_ARGS__(); \ + } else { \ + AT_ERROR(#NAME, " not implemented for input type '", toString(ITYPE), "'"); \ + } + +#define DISPATCH_WTYPE_FLOAT_AND_HALF_AND_BF16(WTYPE, NAME, ...) \ + if (WTYPE == at::ScalarType::Half) { \ + using weight_t = at::Half; \ + __VA_ARGS__(); \ + } else if (WTYPE == at::ScalarType::BFloat16) { \ + using weight_t = at::BFloat16; \ + __VA_ARGS__(); \ + } else if (WTYPE == at::ScalarType::Float) { \ + using weight_t = float; \ + __VA_ARGS__(); \ + } else { \ + AT_ERROR(#NAME, " not implemented for weight type '", toString(WTYPE), "'"); \ + } + +template +void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template +void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); + +template +void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template +void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); + +template +void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream); + +void set_conv_params_fwd(ConvParamsBase ¶ms, + // sizes + const size_t batch, + const size_t dim, + const size_t seqlen, + const size_t width, + // device pointers + const at::Tensor x, + const at::Tensor weight, + const at::Tensor out, + void* bias_ptr, + bool silu_activation) { + + // Reset the parameters + memset(¶ms, 0, sizeof(params)); + + params.batch = batch; + params.dim = dim; + params.seqlen = seqlen; + params.width = width; + + params.silu_activation = silu_activation; + + // Set the pointers and strides. + params.x_ptr = x.data_ptr(); + params.weight_ptr = weight.data_ptr(); + params.bias_ptr = bias_ptr; + params.out_ptr = out.data_ptr(); + // All stride are in elements, not bytes. + params.x_batch_stride = x.stride(0); + params.x_c_stride = x.stride(1); + params.x_l_stride = x.stride(-1); + params.weight_c_stride = weight.stride(0); + params.weight_width_stride = weight.stride(1); + params.out_batch_stride = out.stride(0); + params.out_c_stride = out.stride(1); + params.out_l_stride = out.stride(-1); +} + + +void set_conv_params_bwd(ConvParamsBwd ¶ms, + // sizes + const size_t batch, + const size_t dim, + const size_t seqlen, + const size_t width, + // device pointers + const at::Tensor x, + const at::Tensor weight, + void* bias_ptr, + const at::Tensor dout, + const at::Tensor dx, + const at::Tensor dweight, + void* dbias_ptr, + bool silu_activation) { + // Pass in "dout" instead of "out", we're not gonna use "out" at all. + set_conv_params_fwd(params, batch, dim, seqlen, width, + x, weight, dout, bias_ptr, silu_activation); + + // Set the pointers and strides. + params.dout_ptr = dout.data_ptr(); + params.dx_ptr = dx.data_ptr(); + params.dweight_ptr = dweight.data_ptr(); + params.dbias_ptr = dbias_ptr; + // All stride are in elements, not bytes. + params.dout_batch_stride = dout.stride(0); + params.dout_c_stride = dout.stride(1); + params.dout_l_stride = dout.stride(2); + params.dweight_c_stride = dweight.stride(0); + params.dweight_width_stride = dweight.stride(1); + params.dx_batch_stride = dx.stride(0); + params.dx_c_stride = dx.stride(1); + params.dx_l_stride = dx.stride(2); +} + +at::Tensor +causal_conv1d_fwd(const at::Tensor &x, const at::Tensor &weight, + const c10::optional &bias_, + bool silu_activation) { + auto input_type = x.scalar_type(); + auto weight_type = weight.scalar_type(); + TORCH_CHECK(input_type == at::ScalarType::Float || input_type == at::ScalarType::Half || input_type == at::ScalarType::BFloat16); + TORCH_CHECK(weight_type == at::ScalarType::Float || weight_type == at::ScalarType::Half || weight_type == at::ScalarType::BFloat16); + + TORCH_CHECK(x.is_cuda()); + TORCH_CHECK(weight.is_cuda()); + + const auto sizes = x.sizes(); + const int batch_size = sizes[0]; + const int dim = sizes[1]; + const int seqlen = sizes[2]; + const int width = weight.size(-1); + + CHECK_SHAPE(x, batch_size, dim, seqlen); + CHECK_SHAPE(weight, dim, width); + + TORCH_CHECK(x.stride(2) == 1 || x.stride(1) == 1); + const bool is_channel_last = x.stride(1) == 1 && x.stride(2) > 1; + + if (is_channel_last) { + TORCH_CHECK(dim % 8 == 0, "causal_conv1d only supports channel dimension divisible by 8 for now"); + } + TORCH_CHECK(width >= 2 && width <= 4, "causal_conv1d only supports width between 2 and 4"); + + + if (bias_.has_value()) { + auto bias = bias_.value(); + TORCH_CHECK(bias.scalar_type() == weight_type); + TORCH_CHECK(bias.is_cuda()); + TORCH_CHECK(bias.stride(-1) == 1); + CHECK_SHAPE(bias, dim); + } + + at::Tensor out = torch::empty_like(x); + + ConvParamsBase params; + set_conv_params_fwd(params, batch_size, dim, seqlen, width, x, weight, out, + bias_.has_value() ? bias_.value().data_ptr() : nullptr, + silu_activation); + + // Otherwise the kernel will be launched from cuda:0 device + // Cast to char to avoid compiler warning about narrowing + at::cuda::CUDAGuard device_guard{(char)x.get_device()}; + auto stream = at::cuda::getCurrentCUDAStream().stream(); + DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(x.scalar_type(), "causal_conv1d_fwd", [&] { + DISPATCH_WTYPE_FLOAT_AND_HALF_AND_BF16(weight.scalar_type(), "causal_conv1d_fwd", [&] { + if (!is_channel_last) { + causal_conv1d_fwd_cuda(params, stream); + } else { + causal_conv1d_channellast_fwd_cuda(params, stream); + } + }); + }); + return out; +} + +std::vector +causal_conv1d_bwd(const at::Tensor &x, const at::Tensor &weight, + const c10::optional &bias_, + at::Tensor &dout, + c10::optional &dx_, + bool silu_activation) { + auto input_type = x.scalar_type(); + auto weight_type = weight.scalar_type(); + TORCH_CHECK(input_type == at::ScalarType::Float || input_type == at::ScalarType::Half || input_type == at::ScalarType::BFloat16); + TORCH_CHECK(weight_type == at::ScalarType::Float || weight_type == at::ScalarType::Half || weight_type == at::ScalarType::BFloat16); + + TORCH_CHECK(x.is_cuda()); + TORCH_CHECK(weight.is_cuda()); + TORCH_CHECK(dout.is_cuda()); + + const auto sizes = x.sizes(); + const int batch_size = sizes[0]; + const int dim = sizes[1]; + const int seqlen = sizes[2]; + const int width = weight.size(-1); + + TORCH_CHECK(width >= 2 && width <= 4, "causal_conv1d only supports width between 2 and 4"); + + CHECK_SHAPE(x, batch_size, dim, seqlen); + CHECK_SHAPE(weight, dim, width); + CHECK_SHAPE(dout, batch_size, dim, seqlen); + + TORCH_CHECK(x.stride(2) == 1 || x.stride(1) == 1); + const bool is_channel_last = x.stride(1) == 1 && x.stride(2) > 1; + if (!is_channel_last && dout.stride(2) != 1) { dout = dout.contiguous(); } + if (is_channel_last && dout.stride(1) != 1) { dout = dout.transpose(-1, -2).contiguous().transpose(-1, -2); } + + if (bias_.has_value()) { + auto bias = bias_.value(); + TORCH_CHECK(bias.scalar_type() == weight_type); + TORCH_CHECK(bias.is_cuda()); + TORCH_CHECK(bias.stride(-1) == 1); + CHECK_SHAPE(bias, dim); + } + + at::Tensor dx; + if (dx_.has_value()) { + dx = dx_.value(); + TORCH_CHECK(dx.scalar_type() == input_type); + TORCH_CHECK(dx.is_cuda()); + CHECK_SHAPE(dx, batch_size, dim, seqlen); + if (!is_channel_last) { TORCH_CHECK(dx.stride(2) == 1); } + if (is_channel_last) { TORCH_CHECK(dx.stride(1) == 1); } + } else { + dx = torch::empty_like(x); + } + + // Otherwise the kernel will be launched from cuda:0 device + // Cast to char to avoid compiler warning about narrowing + at::cuda::CUDAGuard device_guard{(char)x.get_device()}; + + at::Tensor dweight = torch::zeros_like(weight, weight.options().dtype(at::kFloat)); + at::Tensor dbias; + if (bias_.has_value()) { dbias = torch::zeros_like(bias_.value(), bias_.value().options().dtype(at::kFloat)); } + + ConvParamsBwd params; + set_conv_params_bwd(params, batch_size, dim, seqlen, width, + x, weight, bias_.has_value() ? bias_.value().data_ptr() : nullptr, + dout, dx, dweight, bias_.has_value() ? dbias.data_ptr() : nullptr, + silu_activation); + + auto stream = at::cuda::getCurrentCUDAStream().stream(); + DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(x.scalar_type(), "causal_conv1d_bwd", [&] { + DISPATCH_WTYPE_FLOAT_AND_HALF_AND_BF16(weight.scalar_type(), "causal_conv1d_bwd", [&] { + if (!is_channel_last) { + causal_conv1d_bwd_cuda(params, stream); + } else { + causal_conv1d_channellast_bwd_cuda(params, stream); + } + }); + }); + return {dx, dweight.to(weight.dtype()), bias_.has_value() ? dbias.to(bias_.value().dtype()) : dbias}; +} + +at::Tensor +causal_conv1d_update(const at::Tensor &x, + const at::Tensor &conv_state, + const at::Tensor &weight, + const c10::optional &bias_, + bool silu_activation) { + auto input_type = x.scalar_type(); + auto weight_type = weight.scalar_type(); + TORCH_CHECK(input_type == at::ScalarType::Float || input_type == at::ScalarType::Half || input_type == at::ScalarType::BFloat16); + TORCH_CHECK(weight_type == at::ScalarType::Float || weight_type == at::ScalarType::Half || weight_type == at::ScalarType::BFloat16); + TORCH_CHECK(conv_state.scalar_type() == input_type); + + TORCH_CHECK(x.is_cuda()); + TORCH_CHECK(conv_state.is_cuda()); + TORCH_CHECK(weight.is_cuda()); + + const auto sizes = x.sizes(); + const int batch_size = sizes[0]; + const int dim = sizes[1]; + const int width = weight.size(-1); + + CHECK_SHAPE(x, batch_size, dim); + CHECK_SHAPE(conv_state, batch_size, dim, width); + CHECK_SHAPE(weight, dim, width); + + TORCH_CHECK(width >= 2 && width <= 4, "causal_conv1d only supports width between 2 and 4"); + + if (bias_.has_value()) { + auto bias = bias_.value(); + TORCH_CHECK(bias.scalar_type() == weight_type); + TORCH_CHECK(bias.is_cuda()); + TORCH_CHECK(bias.stride(-1) == 1); + CHECK_SHAPE(bias, dim); + } + + at::Tensor out = torch::empty_like(x); + + ConvParamsBase params; + set_conv_params_fwd(params, batch_size, dim, /*seqlen=*/1, width, x, weight, out, + bias_.has_value() ? bias_.value().data_ptr() : nullptr, + silu_activation); + params.conv_state_ptr = conv_state.data_ptr(); + // All stride are in elements, not bytes. + params.conv_state_batch_stride = conv_state.stride(0); + params.conv_state_c_stride = conv_state.stride(1); + params.conv_state_l_stride = conv_state.stride(2); + + // Otherwise the kernel will be launched from cuda:0 device + // Cast to char to avoid compiler warning about narrowing + at::cuda::CUDAGuard device_guard{(char)x.get_device()}; + auto stream = at::cuda::getCurrentCUDAStream().stream(); + DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(x.scalar_type(), "causal_conv1d_update", [&] { + DISPATCH_WTYPE_FLOAT_AND_HALF_AND_BF16(weight.scalar_type(), "causal_conv1d_update", [&] { + causal_conv1d_update_cuda(params, stream); + }); + }); + return out; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("causal_conv1d_fwd", &causal_conv1d_fwd, "Causal conv1d forward"); + m.def("causal_conv1d_bwd", &causal_conv1d_bwd, "Causal conv1d backward"); + m.def("causal_conv1d_update", &causal_conv1d_update, "Causal conv1d update"); +} diff --git a/source_code/SegMamba/causal-conv1d/csrc/causal_conv1d.h b/source_code/SegMamba/causal-conv1d/csrc/causal_conv1d.h new file mode 100644 index 0000000000000000000000000000000000000000..844ed92cfc91a881e58fccfca001a13ebcc434cc --- /dev/null +++ b/source_code/SegMamba/causal-conv1d/csrc/causal_conv1d.h @@ -0,0 +1,53 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +#pragma once + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +struct ConvParamsBase { + using index_t = uint32_t; + + int batch, dim, seqlen, width; + bool silu_activation; + + index_t x_batch_stride; + index_t x_c_stride; + index_t x_l_stride; + index_t weight_c_stride; + index_t weight_width_stride; + index_t out_batch_stride; + index_t out_c_stride; + index_t out_l_stride; + + index_t conv_state_batch_stride; + index_t conv_state_c_stride; + index_t conv_state_l_stride; + + // Common data pointers. + void *__restrict__ x_ptr; + void *__restrict__ weight_ptr; + void *__restrict__ bias_ptr; + void *__restrict__ out_ptr; + + void *__restrict__ conv_state_ptr; +}; + +struct ConvParamsBwd: public ConvParamsBase { + index_t dx_batch_stride; + index_t dx_c_stride; + index_t dx_l_stride; + index_t dweight_c_stride; + index_t dweight_width_stride; + index_t dout_batch_stride; + index_t dout_c_stride; + index_t dout_l_stride; + + // Common data pointers. + void *__restrict__ dx_ptr; + void *__restrict__ dweight_ptr; + void *__restrict__ dbias_ptr; + void *__restrict__ dout_ptr; +}; + diff --git a/source_code/SegMamba/causal-conv1d/csrc/causal_conv1d_bwd.cu b/source_code/SegMamba/causal-conv1d/csrc/causal_conv1d_bwd.cu new file mode 100644 index 0000000000000000000000000000000000000000..66609750a30a86a284451871ca163d79a0529047 --- /dev/null +++ b/source_code/SegMamba/causal-conv1d/csrc/causal_conv1d_bwd.cu @@ -0,0 +1,525 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +#include +#include +#include // For C10_CUDA_CHECK and C10_CUDA_KERNEL_LAUNCH_CHECK + +#include +#include +#include + +#include "causal_conv1d.h" +#include "causal_conv1d_common.h" +#include "static_switch.h" + +template +struct Causal_conv1d_bwd_kernel_traits { + using input_t = input_t_; + using weight_t = weight_t_; + static constexpr int kNThreads = kNThreads_; + static constexpr int kWidth = kWidth_; + static constexpr bool kSiluAct = kSiluAct_; + static constexpr int kNBytes = sizeof(input_t); + static_assert(kNBytes == 2 || kNBytes == 4); + static constexpr int kNElts = kNBytes == 4 ? 4 : 8; + static_assert(kWidth <= kNElts); + // It's possible that we need to do 2 rounds of exchange if input_t is 16 bits + // (since then we'd have 8 values of float, and each round we can exchange 4 floats). + static constexpr int kNExchangeRounds = sizeof(float) / sizeof(input_t); + static constexpr bool kIsVecLoad = kIsVecLoad_; + using vec_t = typename BytesToType::Type; + using BlockLoadT = cub::BlockLoad; + using BlockLoadVecT = cub::BlockLoad; + using BlockStoreT = cub::BlockStore; + using BlockStoreVecT = cub::BlockStore; + using BlockReduceFloatT = cub::BlockReduce; + static constexpr int kSmemIOSize = kIsVecLoad + ? 0 + : std::max({sizeof(typename BlockLoadT::TempStorage), sizeof(typename BlockStoreT::TempStorage)}); + static constexpr int kSmemExchangeSize = kNThreads * kNBytes * kNElts * (!kSiluAct ? 1 : kNExchangeRounds + 1); + static constexpr int kSmemSize = std::max({kSmemExchangeSize, + int(sizeof(typename BlockReduceFloatT::TempStorage))}) + (kIsVecLoad ? 0 : kSmemIOSize); +}; + +template +__global__ __launch_bounds__(Ktraits::kNThreads) +void causal_conv1d_bwd_kernel(ConvParamsBwd params) { + constexpr int kWidth = Ktraits::kWidth; + constexpr int kNThreads = Ktraits::kNThreads; + constexpr bool kSiluAct = Ktraits::kSiluAct; + constexpr int kNElts = Ktraits::kNElts; + constexpr int kNExchangeRounds = Ktraits::kNExchangeRounds; + constexpr bool kIsVecLoad = Ktraits::kIsVecLoad; + using input_t = typename Ktraits::input_t; + using vec_t = typename Ktraits::vec_t; + using weight_t = typename Ktraits::weight_t; + + // Shared memory. + extern __shared__ char smem_[]; + auto& smem_load = reinterpret_cast(smem_); + auto& smem_load_vec = reinterpret_cast(smem_); + auto& smem_store = reinterpret_cast(smem_); + auto& smem_store_vec = reinterpret_cast(smem_); + vec_t *smem_exchange = reinterpret_cast(smem_ + Ktraits::kSmemIOSize); + vec_t *smem_exchange_x = reinterpret_cast(smem_ + Ktraits::kSmemIOSize) + kNThreads * kNExchangeRounds; + auto& smem_reduce_float = *reinterpret_cast(smem_ + Ktraits::kSmemIOSize); + + const int tidx = threadIdx.x; + const int batch_id = blockIdx.x; + const int dim_id = blockIdx.y; + input_t *x = reinterpret_cast(params.x_ptr) + batch_id * params.x_batch_stride + + dim_id * params.x_c_stride; + weight_t *weight = reinterpret_cast(params.weight_ptr) + dim_id * params.weight_c_stride; + input_t *dout = reinterpret_cast(params.dout_ptr) + batch_id * params.dout_batch_stride + + dim_id * params.dout_c_stride; + input_t *dx = reinterpret_cast(params.dx_ptr) + batch_id * params.dx_batch_stride + + dim_id * params.dx_c_stride; + float *dweight = reinterpret_cast(params.dweight_ptr) + dim_id * params.dweight_c_stride; + float bias_val = params.bias_ptr == nullptr ? 0.f : float(reinterpret_cast(params.bias_ptr)[dim_id]); + + // Thread kNThreads - 1 will load the first elements of the next chunk so we initialize those to 0. + if (tidx == 0) { + if constexpr (!kSiluAct) { + input_t zeros[kNElts] = {0}; + smem_exchange[0] = reinterpret_cast(zeros)[0]; + } else { + float zeros[kNElts] = {0}; + #pragma unroll + for (int r = 0; r < kNExchangeRounds; ++r) { + smem_exchange[r * kNThreads] = reinterpret_cast(zeros)[r]; + } + } + } + + float weight_vals[kWidth]; + #pragma unroll + for (int i = 0; i < kWidth; ++i) { weight_vals[i] = weight[i * params.weight_width_stride]; } + + float dweight_vals[kWidth] = {0}; + float dbias_val = 0; + + constexpr int kChunkSize = kNThreads * kNElts; + const int n_chunks = (params.seqlen + kChunkSize - 1) / kChunkSize; + x += (n_chunks - 1) * kChunkSize; + dout += (n_chunks - 1) * kChunkSize; + dx += (n_chunks - 1) * kChunkSize; + for (int chunk = n_chunks - 1; chunk >= 0; --chunk) { + input_t x_vals_load[2 * kNElts] = {0}; + input_t dout_vals_load[2 * kNElts] = {0}; + if constexpr(kIsVecLoad) { + Ktraits::BlockLoadVecT(smem_load_vec).Load(reinterpret_cast(x), *reinterpret_cast(&x_vals_load[kNElts]), (params.seqlen - chunk * kChunkSize) / kNElts); + Ktraits::BlockLoadVecT(smem_load_vec).Load(reinterpret_cast(dout), *reinterpret_cast(&dout_vals_load[0]), (params.seqlen - chunk * kChunkSize) / kNElts); + } else { + __syncthreads(); + Ktraits::BlockLoadT(smem_load).Load(x, *reinterpret_cast(&x_vals_load[kNElts]), params.seqlen - chunk * kChunkSize); + __syncthreads(); + Ktraits::BlockLoadT(smem_load).Load(dout, *reinterpret_cast(&dout_vals_load[0]), params.seqlen - chunk * kChunkSize); + } + float dout_vals[2 * kNElts], x_vals[2 * kNElts]; + if constexpr (!kSiluAct) { + __syncthreads(); + // Thread 0 don't write yet, so that thread kNThreads - 1 can read + // the first elements of the next chunk. + if (tidx > 0) { smem_exchange[tidx] = reinterpret_cast(dout_vals_load)[0]; } + __syncthreads(); + reinterpret_cast(dout_vals_load)[1] = smem_exchange[tidx < kNThreads - 1 ? tidx + 1 : 0]; + __syncthreads(); + // Now thread 0 can write the first elements of the current chunk. + if (tidx == 0) { smem_exchange[tidx] = reinterpret_cast(dout_vals_load)[0]; } + #pragma unroll + for (int i = 0; i < 2 * kNElts; ++i) { + dout_vals[i] = float(dout_vals_load[i]); + x_vals[i] = float(x_vals_load[i]); + } + } else { + if (tidx == 0 && chunk > 0) { + if constexpr(kIsVecLoad) { + reinterpret_cast(x_vals_load)[0] = reinterpret_cast(x)[-1]; + } else { + #pragma unroll + for (int i = 0; i < kNElts; ++i) { + if (chunk * kChunkSize + i < params.seqlen) { x_vals_load[i] = x[-kNElts + i]; } + } + } + } + __syncthreads(); + smem_exchange_x[tidx] = reinterpret_cast(x_vals_load)[1]; + __syncthreads(); + if (tidx > 0) { reinterpret_cast(x_vals_load)[0] = smem_exchange_x[tidx - 1]; } + #pragma unroll + for (int i = 0; i < 2 * kNElts; ++i) { x_vals[i] = float(x_vals_load[i]); } + // Recompute the output + #pragma unroll + for (int i = 0; i < kNElts; ++i) { + float out_val = bias_val; + #pragma unroll + for (int w = 0; w < kWidth; ++w) { + out_val += weight_vals[w] * x_vals[kNElts + i - (kWidth - w - 1)]; + } + float out_sigmoid_val = 1.0f / (1.0f + expf(-out_val)); + dout_vals[i] = float(dout_vals_load[i]) * out_sigmoid_val + * (1.0f + out_val * (1.0f - out_sigmoid_val)); + } + // Exchange the dout_vals. It's possible that we need to do 2 rounds of exchange + // if input_t is 16 bits (since then we'd have 8 values of float) + __syncthreads(); + // Thread 0 don't write yet, so that thread kNThreads - 1 can read + // the first elements of the next chunk. + if (tidx > 0) { + #pragma unroll + for (int r = 0; r < kNExchangeRounds; ++r) { + smem_exchange[r * kNThreads + tidx] = reinterpret_cast(dout_vals)[r]; + } + } + __syncthreads(); + #pragma unroll + for (int r = 0; r < kNExchangeRounds; ++r) { + reinterpret_cast(dout_vals)[kNExchangeRounds + r] + = smem_exchange[r * kNThreads + (tidx < kNThreads - 1 ? tidx + 1 : 0)]; + } + __syncthreads(); + // Now thread 0 can write the first elements of the current chunk. + if (tidx == 0) { + #pragma unroll + for (int r = 0; r < kNExchangeRounds; ++r) { + smem_exchange[r * kNThreads + tidx] = reinterpret_cast(dout_vals)[r]; + } + } + } + dout -= kChunkSize; + x -= kChunkSize; + + #pragma unroll + for (int i = 0; i < kNElts; ++i) { dbias_val += dout_vals[i]; } + + float dx_vals[kNElts] = {0}; + #pragma unroll + for (int i = 0; i < kNElts; ++i) { + #pragma unroll + for (int w = 0; w < kWidth; ++w) { + dx_vals[i] += weight_vals[w] * dout_vals[i + kWidth - w - 1]; + } + } + + input_t dx_vals_store[kNElts]; + #pragma unroll + for (int i = 0; i < kNElts; ++i) { dx_vals_store[i] = dx_vals[i]; } + if constexpr(kIsVecLoad) { + Ktraits::BlockStoreVecT(smem_store_vec).Store(reinterpret_cast(dx), reinterpret_cast(dx_vals_store), (params.seqlen - chunk * kChunkSize) / kNElts); + } else { + Ktraits::BlockStoreT(smem_store).Store(dx, dx_vals_store, params.seqlen - chunk * kChunkSize); + } + dx -= kChunkSize; + + #pragma unroll + for (int w = 0; w < kWidth; ++w) { + #pragma unroll + for (int i = 0; i < kNElts; ++i) { + dweight_vals[w] += x_vals[kNElts + i] * dout_vals[i + kWidth - w - 1]; + } + } + } + + #pragma unroll + for (int w = 0; w < kWidth; ++w) { + __syncthreads(); + dweight_vals[w] = Ktraits::BlockReduceFloatT(smem_reduce_float).Sum(dweight_vals[w]); + if (tidx == 0) { + atomicAdd(&reinterpret_cast(dweight)[w * params.dweight_width_stride], dweight_vals[w]); + } + } + if (params.bias_ptr != nullptr) { + __syncthreads(); + dbias_val = Ktraits::BlockReduceFloatT(smem_reduce_float).Sum(dbias_val); + if (tidx == 0) { + atomicAdd(&reinterpret_cast(params.dbias_ptr)[dim_id], dbias_val); + } + } +} + +template +void causal_conv1d_bwd_launch(ConvParamsBwd ¶ms, cudaStream_t stream) { + static constexpr int kNElts = sizeof(input_t) == 4 ? 4 : 8; + BOOL_SWITCH(params.seqlen % kNElts == 0, kIsVecLoad, [&] { + BOOL_SWITCH(params.silu_activation, kSiluAct, [&] { + using Ktraits = Causal_conv1d_bwd_kernel_traits; + constexpr int kSmemSize = Ktraits::kSmemSize; + dim3 grid(params.batch, params.dim); + auto kernel = &causal_conv1d_bwd_kernel; + if (kSmemSize >= 48 * 1024) { + C10_CUDA_CHECK(cudaFuncSetAttribute( + kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, kSmemSize)); + } + kernel<<>>(params); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); + }); +} + +template +void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream) { + if (params.width == 2) { + causal_conv1d_bwd_launch<128, 2, input_t, weight_t>(params, stream); + } else if (params.width == 3) { + causal_conv1d_bwd_launch<128, 3, input_t, weight_t>(params, stream); + } else if (params.width == 4) { + causal_conv1d_bwd_launch<128, 4, input_t, weight_t>(params, stream); + } +} + +template +struct Causal_conv1d_channellast_bwd_kernel_traits { + // The cache line is 128 bytes, and we try to read 16 bytes per thread. + // So we have 8 threads per "row", so 32 or 64 elements in the channel dimension. + // That leaves 4 columns per warp, and so 16 columns per block (assuming each block has 128 + // threads). Each each load is 16 x 32|64 elements in the L x C dimensions. + using input_t = input_t_; + using weight_t = weight_t_; + static constexpr bool kSiluAct = kSiluAct_; + static constexpr int kNThreads = kNThreads_; + static_assert(kNThreads % 32 == 0); + static constexpr int kNWarps = kNThreads / 32; + static constexpr int kWidth = kWidth_; + static constexpr int kChunkSizeL = kChunkSizeL_; + static constexpr int kNBytes = sizeof(input_t); + static_assert(kNBytes == 2 || kNBytes == 4); + static constexpr int kNElts = kNBytes == 4 ? 4 : 8; + static constexpr int kNEltsPerRow = 128 / kNBytes; + static constexpr int kNThreadsPerRow = kNEltsPerRow / kNElts; // Always 8 for now + static_assert(kNThreadsPerRow * kNBytes * kNElts == 128); + static constexpr int kNColsPerWarp = 32 / kNThreadsPerRow; // Always 4 for now + static_assert(kNColsPerWarp * kNThreadsPerRow == 32); + static constexpr int kNColsPerLoad = kNColsPerWarp * kNWarps; + static constexpr int kNLoads = kChunkSizeL / kNColsPerLoad; + static_assert(kNLoads * kNColsPerLoad == kChunkSizeL); + static constexpr bool kIsVecLoad = kIsVecLoad_; + using vec_t = typename BytesToType::Type; + // using BlockLoadT = cub::BlockLoad; + // using BlockStoreT = cub::BlockStore; + // static constexpr int kSmemSize = std::max({sizeof(typename BlockLoadT::TempStorage), + // sizeof(typename BlockStoreT::TempStorage)}); + // static constexpr int kSmemSize = kChunkSizeL * kNEltsPerRow * kNBytes; +}; + +template +__global__ __launch_bounds__(Ktraits::kNThreads) +void causal_conv1d_channellast_bwd_kernel(ConvParamsBwd params) { + constexpr int kWidth = Ktraits::kWidth; + constexpr int kNThreads = Ktraits::kNThreads; + constexpr bool kSiluAct = Ktraits::kSiluAct; + constexpr int kNElts = Ktraits::kNElts; + constexpr int kNWarp = Ktraits::kNWarps; + constexpr int kNThreadsPerC = Ktraits::kNThreadsPerRow; + constexpr int kLPerLoad = Ktraits::kNColsPerLoad; + constexpr int kChunkSizeL = Ktraits::kChunkSizeL; + constexpr int kChunkSizeC = Ktraits::kNEltsPerRow; + using input_t = typename Ktraits::input_t; + using vec_t = typename Ktraits::vec_t; + using weight_t = typename Ktraits::weight_t; + + // Shared memory. + __shared__ input_t dout_smem[kChunkSizeL + kWidth - 1][kChunkSizeC + kNElts]; + __shared__ input_t x_smem[kWidth - 1 + kChunkSizeL + kWidth - 1][kChunkSizeC + kNElts]; + + const int tid = threadIdx.x; + const int l_idx = tid / kNThreadsPerC; + const int c_idx = tid % kNThreadsPerC; + const int batch_id = blockIdx.x; + const int chunk_l_id = blockIdx.y; + const int chunk_c_id = blockIdx.z; + input_t *x = reinterpret_cast(params.x_ptr) + batch_id * params.x_batch_stride + + (chunk_l_id * kChunkSizeL + l_idx) * params.x_l_stride + chunk_c_id * kChunkSizeC + c_idx * kNElts; + weight_t *weight = reinterpret_cast(params.weight_ptr) + + chunk_c_id * kChunkSizeC * params.weight_c_stride; + input_t *dout = reinterpret_cast(params.dout_ptr) + batch_id * params.dout_batch_stride + + (chunk_l_id * kChunkSizeL + l_idx) * params.dout_l_stride + chunk_c_id * kChunkSizeC + c_idx * kNElts; + input_t *dx = reinterpret_cast(params.dx_ptr) + batch_id * params.dx_batch_stride + + (chunk_l_id * kChunkSizeL + l_idx) * params.dx_l_stride + chunk_c_id * kChunkSizeC + c_idx * kNElts; + float *dweight = reinterpret_cast(params.dweight_ptr) + + chunk_c_id * kChunkSizeC * params.dweight_c_stride; + + #pragma unroll + for (int l = 0; l < Ktraits::kNLoads; ++l) { + input_t dout_vals_load[kNElts] = {0}; + input_t x_vals_load[kNElts] = {0}; + if (chunk_l_id * kChunkSizeL + l * kLPerLoad + l_idx < params.seqlen + && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) { + reinterpret_cast(dout_vals_load)[0] = *reinterpret_cast(dout + l * kLPerLoad * params.dout_l_stride); + reinterpret_cast(x_vals_load)[0] = *reinterpret_cast(x + l * kLPerLoad * params.x_l_stride); + } + reinterpret_cast(dout_smem[l * kLPerLoad + l_idx])[c_idx] = reinterpret_cast(dout_vals_load)[0]; + reinterpret_cast(x_smem[kWidth - 1 + l * kLPerLoad + l_idx])[c_idx] = reinterpret_cast(x_vals_load)[0]; + } + // Load the elements from the previous chunk or next chunk that are needed for convolution. + if (l_idx < kWidth - 1) { + input_t dout_vals_load[kNElts] = {0}; + input_t x_vals_load[kNElts] = {0}; + if ((chunk_l_id + 1) * kChunkSizeL + l_idx < params.seqlen + && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) { + reinterpret_cast(dout_vals_load)[0] = *reinterpret_cast(dout + kChunkSizeL * params.dout_l_stride); + } + if (chunk_l_id * kChunkSizeL + l_idx - (kWidth - 1) >= 0 + && chunk_l_id * kChunkSizeL + l_idx - (kWidth - 1) < params.seqlen + && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) { + reinterpret_cast(x_vals_load)[0] = *reinterpret_cast(x - (kWidth - 1) * params.x_l_stride); + } + reinterpret_cast(dout_smem[kChunkSizeL + l_idx])[c_idx] = reinterpret_cast(dout_vals_load)[0]; + reinterpret_cast(x_smem[l_idx])[c_idx] = reinterpret_cast(x_vals_load)[0]; + } + // Need to load (kWdith - 1) extra x's on the right to recompute the (kChunkSizeL + kWidth - 1) outputs + if constexpr (kSiluAct) { + if (l_idx < kWidth - 1) { + input_t x_vals_load[kNElts] = {0}; + if ((chunk_l_id + 1) * kChunkSizeL + l_idx < params.seqlen + && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) { + reinterpret_cast(x_vals_load)[0] = *reinterpret_cast(x + kChunkSizeL * params.x_l_stride); + } + reinterpret_cast(x_smem[kWidth - 1 + kChunkSizeL + l_idx])[c_idx] = reinterpret_cast(x_vals_load)[0]; + } + } + + __syncthreads(); + + constexpr int kLPerThread = std::min(kChunkSizeL * kChunkSizeC / kNThreads, kChunkSizeL); + static_assert(kLPerThread * kNThreads == kChunkSizeL * kChunkSizeC); + constexpr int kNThreadsPerRow = kChunkSizeL / kLPerThread; + static_assert(kNThreadsPerRow * kLPerThread == kChunkSizeL); + // kChunkSizeL, kLPerThread, kNThreadsPerRow should be powers of 2 for simplicity + static_assert((kChunkSizeL & (kChunkSizeL - 1)) == 0); + static_assert((kLPerThread & (kLPerThread - 1)) == 0); + static_assert((kNThreadsPerRow & (kNThreadsPerRow - 1)) == 0); + static_assert(kNThreadsPerRow <= 32); + + const int row_idx = tid / kNThreadsPerRow; + const int col_idx = tid % kNThreadsPerRow; + + float bias_val = params.bias_ptr == nullptr || chunk_c_id * kChunkSizeC + row_idx >= params.dim ? 0.f : float(reinterpret_cast(params.bias_ptr)[chunk_c_id * kChunkSizeC + row_idx]); + float weight_vals[kWidth] = {0}; + if (chunk_c_id * kChunkSizeC + row_idx < params.dim) { + #pragma unroll + for (int w = 0; w < kWidth; ++w) { + weight_vals[w] = weight[row_idx * params.weight_c_stride + w * params.weight_width_stride]; + } + } + float dout_vals[kLPerThread + kWidth - 1]; + float x_vals[kWidth - 1 + kLPerThread + kWidth - 1]; + #pragma unroll + for (int i = 0; i < kWidth - 1 + kLPerThread; ++i) { + dout_vals[i] = float(dout_smem[col_idx * kLPerThread + i][row_idx]); + x_vals[i] = float(x_smem[col_idx * kLPerThread + i][row_idx]); + } + + if constexpr (kSiluAct) { // Recompute the output + #pragma unroll + for (int i = kWidth - 1 + kLPerThread; i < kWidth - 1 + kLPerThread + kWidth - 1; ++i) { + x_vals[i] = float(x_smem[col_idx * kLPerThread + i][row_idx]); + } + #pragma unroll + for (int i = 0; i < kLPerThread + kWidth - 1; ++i) { + float out_val = bias_val; + #pragma unroll + for (int w = 0; w < kWidth; ++w) { out_val += weight_vals[w] * x_vals[i + w]; } + float out_val_sigmoid = 1.f / (1.f + expf(-out_val)); + dout_vals[i] *= out_val_sigmoid * (1 + out_val * (1 - out_val_sigmoid)); + } + } + + float dweight_vals[kWidth] = {0}; + SumOp sum_op; + #pragma unroll + for (int w = 0; w < kWidth; ++w) { + #pragma unroll + for (int i = 0; i < kLPerThread; ++i) { dweight_vals[w] += x_vals[i + w] * dout_vals[i]; } + dweight_vals[w] = Allreduce::run(dweight_vals[w], sum_op); + if (col_idx == 0 && chunk_c_id * kChunkSizeC + row_idx < params.dim) { + atomicAdd(&reinterpret_cast(dweight)[row_idx * params.dweight_c_stride + w * params.dweight_width_stride], dweight_vals[w]); + } + } + + if (params.bias_ptr != nullptr) { + float dbias_val = 0.f; + for (int i = 0; i < kLPerThread; ++i) { dbias_val += dout_vals[i]; } + dbias_val = Allreduce::run(dbias_val, sum_op); + if (col_idx == 0 && chunk_c_id * kChunkSizeC + row_idx < params.dim) { + atomicAdd(&reinterpret_cast(params.dbias_ptr)[chunk_c_id * kChunkSizeC + row_idx], dbias_val); + } + } + + float dx_vals[kLPerThread] = {0}; + #pragma unroll + for (int i = 0; i < kLPerThread; ++i) { + #pragma unroll + for (int w = 0; w < kWidth; ++w) { dx_vals[i] += weight_vals[kWidth - 1 - w] * dout_vals[i + w]; } + } + // Since kNThreadsPerRow is a power of 2 and <= 32, we only need syncwarp and not syncthreads. + __syncwarp(); + #pragma unroll + for (int i = 0; i < kLPerThread; ++i) { x_smem[col_idx * kLPerThread + i][row_idx] = dx_vals[i]; } + __syncthreads(); + + #pragma unroll + for (int l = 0; l < Ktraits::kNLoads; ++l) { + input_t dx_vals_store[kNElts]; + reinterpret_cast(dx_vals_store)[0] = reinterpret_cast(x_smem[l * kLPerLoad + l_idx])[c_idx]; + if (chunk_l_id * kChunkSizeL + l * kLPerLoad + l_idx < params.seqlen + && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) { + *reinterpret_cast(dx + l * kLPerLoad * params.dx_l_stride) = reinterpret_cast(dx_vals_store)[0]; + } + } + +} + +template +void causal_conv1d_channellast_bwd_launch(ConvParamsBwd ¶ms, cudaStream_t stream) { + BOOL_SWITCH(params.silu_activation, kSiluAct, [&] { + using Ktraits = Causal_conv1d_channellast_bwd_kernel_traits; + // constexpr int kSmemSize = Ktraits::kSmemSize; + constexpr int kChunkSizeL = Ktraits::kChunkSizeL; + constexpr int kChunkSizeC = Ktraits::kNEltsPerRow; + const int n_chunks_L = (params.seqlen + kChunkSizeL - 1) / kChunkSizeL; + const int n_chunks_C = (params.dim + kChunkSizeC - 1) / kChunkSizeC; + dim3 grid(params.batch, n_chunks_L, n_chunks_C); + dim3 block(Ktraits::kNThreads); + auto kernel = &causal_conv1d_channellast_bwd_kernel; + // if (kSmemSize >= 48 * 1024) { + // C10_CUDA_CHECK(cudaFuncSetAttribute( + // kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, kSmemSize)); + // } + // kernel<<>>(params); + kernel<<>>(params); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); +} + +template +void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream) { + if (params.width == 2) { + causal_conv1d_channellast_bwd_launch<128, 2, input_t, weight_t>(params, stream); + } else if (params.width == 3) { + causal_conv1d_channellast_bwd_launch<128, 3, input_t, weight_t>(params, stream); + } else if (params.width == 4) { + causal_conv1d_channellast_bwd_launch<128, 4, input_t, weight_t>(params, stream); + } +} + +template void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); + +template void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_bwd_cuda(ConvParamsBwd ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/source_code/SegMamba/causal-conv1d/csrc/causal_conv1d_common.h b/source_code/SegMamba/causal-conv1d/csrc/causal_conv1d_common.h new file mode 100644 index 0000000000000000000000000000000000000000..8dd6a333b52163986c085f71475709706ce8f9c3 --- /dev/null +++ b/source_code/SegMamba/causal-conv1d/csrc/causal_conv1d_common.h @@ -0,0 +1,64 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +#pragma once + +#include +#include + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +template struct BytesToType {}; + +template<> struct BytesToType<16> { + using Type = uint4; + static_assert(sizeof(Type) == 16); +}; + +template<> struct BytesToType<8> { + using Type = uint64_t; + static_assert(sizeof(Type) == 8); +}; + +template<> struct BytesToType<4> { + using Type = uint32_t; + static_assert(sizeof(Type) == 4); +}; + +template<> struct BytesToType<2> { + using Type = uint16_t; + static_assert(sizeof(Type) == 2); +}; + +template<> struct BytesToType<1> { + using Type = uint8_t; + static_assert(sizeof(Type) == 1); +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct SumOp { +__device__ inline T operator()(T const & x, T const & y) { return x + y; } +}; + +template +struct Allreduce { + static_assert(THREADS == 32 || THREADS == 16 || THREADS == 8 || THREADS == 4); + template + static __device__ inline T run(T x, Operator &op) { + constexpr int OFFSET = THREADS / 2; + x = op(x, __shfl_xor_sync(uint32_t(-1), x, OFFSET)); + return Allreduce::run(x, op); + } +}; + +template<> +struct Allreduce<2> { +template +static __device__ inline T run(T x, Operator &op) { + x = op(x, __shfl_xor_sync(uint32_t(-1), x, 1)); + return x; +} +}; diff --git a/source_code/SegMamba/causal-conv1d/csrc/causal_conv1d_fwd.cu b/source_code/SegMamba/causal-conv1d/csrc/causal_conv1d_fwd.cu new file mode 100644 index 0000000000000000000000000000000000000000..74a1459f88a87ef427075a25e5081899e382efc0 --- /dev/null +++ b/source_code/SegMamba/causal-conv1d/csrc/causal_conv1d_fwd.cu @@ -0,0 +1,350 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +#include +#include +#include // For C10_CUDA_CHECK and C10_CUDA_KERNEL_LAUNCH_CHECK + +#include +#include + +#include "causal_conv1d.h" +#include "causal_conv1d_common.h" +#include "static_switch.h" + +template +struct Causal_conv1d_fwd_kernel_traits { + using input_t = input_t_; + using weight_t = weight_t_; + static constexpr int kNThreads = kNThreads_; + static constexpr int kWidth = kWidth_; + static constexpr int kNBytes = sizeof(input_t); + static_assert(kNBytes == 2 || kNBytes == 4); + static constexpr int kNElts = kNBytes == 4 ? 4 : 8; + static_assert(kWidth <= kNElts); + static constexpr bool kIsVecLoad = kIsVecLoad_; + using vec_t = typename BytesToType::Type; + using BlockLoadT = cub::BlockLoad; + using BlockLoadVecT = cub::BlockLoad; + using BlockStoreT = cub::BlockStore; + using BlockStoreVecT = cub::BlockStore; + static constexpr int kSmemIOSize = kIsVecLoad + ? 0 + : std::max({sizeof(typename BlockLoadT::TempStorage), sizeof(typename BlockStoreT::TempStorage)}); + static constexpr int kSmemExchangeSize = kNThreads * kNBytes * kNElts; + static constexpr int kSmemSize = kSmemIOSize + kSmemExchangeSize; +}; + +template +__global__ __launch_bounds__(Ktraits::kNThreads) +void causal_conv1d_fwd_kernel(ConvParamsBase params) { + constexpr int kWidth = Ktraits::kWidth; + constexpr int kNThreads = Ktraits::kNThreads; + constexpr int kNElts = Ktraits::kNElts; + constexpr bool kIsVecLoad = Ktraits::kIsVecLoad; + using input_t = typename Ktraits::input_t; + using vec_t = typename Ktraits::vec_t; + using weight_t = typename Ktraits::weight_t; + + // Shared memory. + extern __shared__ char smem_[]; + auto& smem_load = reinterpret_cast(smem_); + auto& smem_load_vec = reinterpret_cast(smem_); + auto& smem_store = reinterpret_cast(smem_); + auto& smem_store_vec = reinterpret_cast(smem_); + vec_t *smem_exchange = reinterpret_cast(smem_ + Ktraits::kSmemIOSize); + + const int tidx = threadIdx.x; + const int batch_id = blockIdx.x; + const int channel_id = blockIdx.y; + input_t *x = reinterpret_cast(params.x_ptr) + batch_id * params.x_batch_stride + + channel_id * params.x_c_stride; + weight_t *weight = reinterpret_cast(params.weight_ptr) + channel_id * params.weight_c_stride; + input_t *out = reinterpret_cast(params.out_ptr) + batch_id * params.out_batch_stride + + channel_id * params.out_c_stride; + float bias_val = params.bias_ptr == nullptr ? 0.f : float(reinterpret_cast(params.bias_ptr)[channel_id]); + + // Thread 0 will load the last elements of the previous chunk, so we initialize those to 0. + if (tidx == 0) { + input_t zeros[kNElts] = {0}; + smem_exchange[kNThreads - 1] = reinterpret_cast(zeros)[0]; + } + + float weight_vals[kWidth]; + #pragma unroll + for (int i = 0; i < kWidth; ++i) { weight_vals[i] = float(weight[i * params.weight_width_stride]); } + + constexpr int kChunkSize = kNThreads * kNElts; + const int n_chunks = (params.seqlen + kChunkSize - 1) / kChunkSize; + for (int chunk = 0; chunk < n_chunks; ++chunk) { + input_t x_vals_load[2 * kNElts] = {0}; + if constexpr(kIsVecLoad) { + Ktraits::BlockLoadVecT(smem_load_vec).Load(reinterpret_cast(x), *reinterpret_cast(&x_vals_load[kNElts]), (params.seqlen - chunk * kChunkSize) / kNElts); + } else { + __syncthreads(); + Ktraits::BlockLoadT(smem_load).Load(x, *reinterpret_cast(&x_vals_load[kNElts]), params.seqlen - chunk * kChunkSize); + } + x += kChunkSize; + __syncthreads(); + // Thread kNThreads - 1 don't write yet, so that thread 0 can read + // the last elements of the previous chunk. + if (tidx < kNThreads - 1) { smem_exchange[tidx] = reinterpret_cast(x_vals_load)[1]; } + __syncthreads(); + reinterpret_cast(x_vals_load)[0] = smem_exchange[tidx > 0 ? tidx - 1 : kNThreads - 1]; + __syncthreads(); + // Now thread kNThreads - 1 can write the last elements of the current chunk. + if (tidx == kNThreads - 1) { smem_exchange[tidx] = reinterpret_cast(x_vals_load)[1]; } + + float x_vals[2 * kNElts]; + #pragma unroll + for (int i = 0; i < 2 * kNElts; ++i) { x_vals[i] = float(x_vals_load[i]); } + + float out_vals[kNElts]; + #pragma unroll + for (int i = 0; i < kNElts; ++i) { + out_vals[i] = bias_val; + #pragma unroll + for (int w = 0; w < kWidth; ++w) { + out_vals[i] += weight_vals[w] * x_vals[kNElts + i - (kWidth - w - 1)]; + } + } + + if (params.silu_activation) { + #pragma unroll + for (int i = 0; i < kNElts; ++i) { + out_vals[i] = out_vals[i] / (1 + expf(-out_vals[i])); + } + } + + input_t out_vals_store[kNElts]; + #pragma unroll + for (int i = 0; i < kNElts; ++i) { out_vals_store[i] = out_vals[i]; } + if constexpr(kIsVecLoad) { + Ktraits::BlockStoreVecT(smem_store_vec).Store(reinterpret_cast(out), reinterpret_cast(out_vals_store), (params.seqlen - chunk * kChunkSize) / kNElts); + } else { + Ktraits::BlockStoreT(smem_store).Store(out, out_vals_store, params.seqlen - chunk * kChunkSize); + } + out += kChunkSize; + } +} + +template +void causal_conv1d_fwd_launch(ConvParamsBase ¶ms, cudaStream_t stream) { + static constexpr int kNElts = sizeof(input_t) == 4 ? 4 : 8; + BOOL_SWITCH(params.seqlen % kNElts == 0, kIsVecLoad, [&] { + using Ktraits = Causal_conv1d_fwd_kernel_traits; + constexpr int kSmemSize = Ktraits::kSmemSize; + dim3 grid(params.batch, params.dim); + auto kernel = &causal_conv1d_fwd_kernel; + if (kSmemSize >= 48 * 1024) { + C10_CUDA_CHECK(cudaFuncSetAttribute( + kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, kSmemSize)); + } + kernel<<>>(params); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); +} + +template +void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream) { + if (params.width == 2) { + causal_conv1d_fwd_launch<128, 2, input_t, weight_t>(params, stream); + } else if (params.width == 3) { + causal_conv1d_fwd_launch<128, 3, input_t, weight_t>(params, stream); + } else if (params.width == 4) { + causal_conv1d_fwd_launch<128, 4, input_t, weight_t>(params, stream); + } +} + +template +struct Causal_conv1d_channellast_fwd_kernel_traits { + // The cache line is 128 bytes, and we try to read 16 bytes per thread. + // So we have 8 threads per "row", so 32 or 64 elements in the channel dimension. + // That leaves 4 columns per warp, and so 16 columns per block (assuming each block has 128 + // threads). Each each load is 16 x 32|64 elements in the L x C dimensions. + using input_t = input_t_; + using weight_t = weight_t_; + static constexpr int kNThreads = kNThreads_; + static_assert(kNThreads % 32 == 0); + static constexpr int kNWarps = kNThreads / 32; + static constexpr int kWidth = kWidth_; + static constexpr int kChunkSizeL = kChunkSizeL_; + static constexpr int kNBytes = sizeof(input_t); + static_assert(kNBytes == 2 || kNBytes == 4); + static constexpr int kNElts = kNBytes == 4 ? 4 : 8; + static constexpr int kNEltsPerRow = 128 / kNBytes; + static constexpr int kNThreadsPerRow = kNEltsPerRow / kNElts; // Always 8 for now + static_assert(kNThreadsPerRow * kNBytes * kNElts == 128); + static constexpr int kNColsPerWarp = 32 / kNThreadsPerRow; // Always 4 for now + static_assert(kNColsPerWarp * kNThreadsPerRow == 32); + static constexpr int kNColsPerLoad = kNColsPerWarp * kNWarps; + static constexpr int kNLoads = kChunkSizeL / kNColsPerLoad; + static_assert(kNLoads * kNColsPerLoad == kChunkSizeL); + static constexpr bool kIsVecLoad = kIsVecLoad_; + using vec_t = typename BytesToType::Type; + // using BlockLoadT = cub::BlockLoad; + // using BlockStoreT = cub::BlockStore; + // static constexpr int kSmemSize = std::max({sizeof(typename BlockLoadT::TempStorage), + // sizeof(typename BlockStoreT::TempStorage)}); + // static constexpr int kSmemSize = kChunkSizeL * kNEltsPerRow * kNBytes; +}; + +template +__global__ __launch_bounds__(Ktraits::kNThreads) +void causal_conv1d_channellast_fwd_kernel(ConvParamsBase params) { + constexpr int kWidth = Ktraits::kWidth; + constexpr int kNThreads = Ktraits::kNThreads; + constexpr int kNElts = Ktraits::kNElts; + constexpr int kNWarp = Ktraits::kNWarps; + constexpr int kNThreadsPerC = Ktraits::kNThreadsPerRow; + constexpr int kLPerLoad = Ktraits::kNColsPerLoad; + constexpr int kChunkSizeL = Ktraits::kChunkSizeL; + constexpr int kChunkSizeC = Ktraits::kNEltsPerRow; + using input_t = typename Ktraits::input_t; + using vec_t = typename Ktraits::vec_t; + using weight_t = typename Ktraits::weight_t; + + // Shared memory. + __shared__ input_t x_smem[kWidth - 1 + kChunkSizeL][kChunkSizeC + kNElts]; + + const int tid = threadIdx.x; + const int l_idx = tid / kNThreadsPerC; + const int c_idx = tid % kNThreadsPerC; + const int batch_id = blockIdx.x; + const int chunk_l_id = blockIdx.y; + const int chunk_c_id = blockIdx.z; + input_t *x = reinterpret_cast(params.x_ptr) + batch_id * params.x_batch_stride + + (chunk_l_id * kChunkSizeL + l_idx) * params.x_l_stride + chunk_c_id * kChunkSizeC + c_idx * kNElts; + weight_t *weight = reinterpret_cast(params.weight_ptr) + + chunk_c_id * kChunkSizeC * params.weight_c_stride; + input_t *out = reinterpret_cast(params.out_ptr) + batch_id * params.out_batch_stride + + (chunk_l_id * kChunkSizeL + l_idx) * params.out_l_stride + chunk_c_id * kChunkSizeC + c_idx * kNElts; + + #pragma unroll + for (int l = 0; l < Ktraits::kNLoads; ++l) { + input_t x_vals_load[kNElts] = {0}; + if (chunk_l_id * kChunkSizeL + l * kLPerLoad + l_idx < params.seqlen + && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) { + reinterpret_cast(x_vals_load)[0] = *reinterpret_cast(x + l * kLPerLoad * params.x_l_stride); + } + reinterpret_cast(x_smem[kWidth - 1 + l * kLPerLoad + l_idx])[c_idx] = reinterpret_cast(x_vals_load)[0]; + } + // Load the elements from the previous chunk that are needed for convolution. + if (l_idx < kWidth - 1) { + input_t x_vals_load[kNElts] = {0}; + if (chunk_l_id * kChunkSizeL + l_idx - (kWidth - 1) >= 0 + && chunk_l_id * kChunkSizeL + l_idx - (kWidth - 1) < params.seqlen + && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) { + reinterpret_cast(x_vals_load)[0] = *reinterpret_cast(x - (kWidth - 1) * params.x_l_stride); + } + reinterpret_cast(x_smem[l_idx])[c_idx] = reinterpret_cast(x_vals_load)[0]; + } + + __syncthreads(); + + constexpr int kLPerThread = std::min(kChunkSizeL * kChunkSizeC / kNThreads, kChunkSizeL); + static_assert(kLPerThread * kNThreads == kChunkSizeL * kChunkSizeC); + constexpr int kNThreadsPerRow = kChunkSizeL / kLPerThread; + static_assert(kNThreadsPerRow * kLPerThread == kChunkSizeL); + // kChunkSizeL, kLPerThread, kNThreadsPerRow should be powers of 2 for simplicity + static_assert((kChunkSizeL & (kChunkSizeL - 1)) == 0); + static_assert((kLPerThread & (kLPerThread - 1)) == 0); + static_assert((kNThreadsPerRow & (kNThreadsPerRow - 1)) == 0); + static_assert(kNThreadsPerRow <= 32); + + const int row_idx = tid / kNThreadsPerRow; + const int col_idx = tid % kNThreadsPerRow; + + float bias_val = params.bias_ptr == nullptr || chunk_c_id * kChunkSizeC + row_idx >= params.dim ? 0.f : float(reinterpret_cast(params.bias_ptr)[chunk_c_id * kChunkSizeC + row_idx]); + float weight_vals[kWidth] = {0}; + if (chunk_c_id + kChunkSizeC + row_idx < params.dim) { + #pragma unroll + for (int w = 0; w < kWidth; ++w) { + weight_vals[w] = weight[row_idx * params.weight_c_stride + w * params.weight_width_stride]; + } + } + float x_vals[kWidth - 1 + kLPerThread]; + #pragma unroll + for (int i = 0; i < kWidth - 1 + kLPerThread; ++i) { + x_vals[i] = float(x_smem[col_idx * kLPerThread + i][row_idx]); + } + + float out_vals[kLPerThread]; + #pragma unroll + for (int i = 0; i < kLPerThread; ++i) { + out_vals[i] = bias_val; + #pragma unroll + for (int w = 0; w < kWidth; ++w) { out_vals[i] += weight_vals[w] * x_vals[i + w]; } + if (params.silu_activation) {out_vals[i] = out_vals[i] / (1 + expf(-out_vals[i])); } + } + + // Since kNThreadsPerRow is a power of 2 and <= 32, we only need syncwarp and not syncthreads. + __syncwarp(); + #pragma unroll + for (int i = 0; i < kLPerThread; ++i) { x_smem[col_idx * kLPerThread + i][row_idx] = out_vals[i]; } + __syncthreads(); + + #pragma unroll + for (int l = 0; l < Ktraits::kNLoads; ++l) { + input_t out_vals_store[kNElts]; + reinterpret_cast(out_vals_store)[0] = reinterpret_cast(x_smem[l * kLPerLoad + l_idx])[c_idx]; + if (chunk_l_id * kChunkSizeL + l * kLPerLoad + l_idx < params.seqlen + && chunk_c_id * kChunkSizeC + c_idx * kNElts < params.dim) { + *reinterpret_cast(out + l * kLPerLoad * params.out_l_stride) = reinterpret_cast(out_vals_store)[0]; + } + } + +} + +template +void causal_conv1d_channellast_fwd_launch(ConvParamsBase ¶ms, cudaStream_t stream) { + using Ktraits = Causal_conv1d_channellast_fwd_kernel_traits; + // constexpr int kSmemSize = Ktraits::kSmemSize; + constexpr int kChunkSizeL = Ktraits::kChunkSizeL; + constexpr int kChunkSizeC = Ktraits::kNEltsPerRow; + const int n_chunks_L = (params.seqlen + kChunkSizeL - 1) / kChunkSizeL; + const int n_chunks_C = (params.dim + kChunkSizeC - 1) / kChunkSizeC; + // printf("n_chunks_L: %d, n_chunks_C: %d\n", n_chunks_L, n_chunks_C); + dim3 grid(params.batch, n_chunks_L, n_chunks_C); + dim3 block(Ktraits::kNThreads); + auto kernel = &causal_conv1d_channellast_fwd_kernel; + // if (kSmemSize >= 48 * 1024) { + // C10_CUDA_CHECK(cudaFuncSetAttribute( + // kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, kSmemSize)); + // } + // kernel<<>>(params); + kernel<<>>(params); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +} + +template +void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream) { + if (params.width == 2) { + causal_conv1d_channellast_fwd_launch<128, 2, input_t, weight_t>(params, stream); + } else if (params.width == 3) { + causal_conv1d_channellast_fwd_launch<128, 3, input_t, weight_t>(params, stream); + } else if (params.width == 4) { + causal_conv1d_channellast_fwd_launch<128, 4, input_t, weight_t>(params, stream); + } +} + +template void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); + +template void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_channellast_fwd_cuda(ConvParamsBase ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/source_code/SegMamba/causal-conv1d/csrc/causal_conv1d_update.cu b/source_code/SegMamba/causal-conv1d/csrc/causal_conv1d_update.cu new file mode 100644 index 0000000000000000000000000000000000000000..713e0ac883853491f9bdb0015b578657c228c1e7 --- /dev/null +++ b/source_code/SegMamba/causal-conv1d/csrc/causal_conv1d_update.cu @@ -0,0 +1,96 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +#include +#include +#include // For C10_CUDA_CHECK and C10_CUDA_KERNEL_LAUNCH_CHECK + +#include +#include + +#include "causal_conv1d.h" +#include "causal_conv1d_common.h" +#include "static_switch.h" + +template +struct Causal_conv1d_update_kernel_traits { + using input_t = input_t_; + using weight_t = weight_t_; + static constexpr int kNThreads = kNThreads_; + static constexpr int kWidth = kWidth_; + static constexpr int kNBytes = sizeof(input_t); + static_assert(kNBytes == 2 || kNBytes == 4); +}; + +template +__global__ __launch_bounds__(Ktraits::kNThreads) +void causal_conv1d_update_kernel(ConvParamsBase params) { + constexpr int kWidth = Ktraits::kWidth; + constexpr int kNThreads = Ktraits::kNThreads; + using input_t = typename Ktraits::input_t; + using weight_t = typename Ktraits::weight_t; + + const int tidx = threadIdx.x; + const int batch_id = blockIdx.x; + const int channel_id = blockIdx.y * kNThreads + tidx; + input_t *x = reinterpret_cast(params.x_ptr) + batch_id * params.x_batch_stride + + channel_id * params.x_c_stride; + input_t *conv_state = reinterpret_cast(params.conv_state_ptr) + batch_id * params.conv_state_batch_stride + + channel_id * params.conv_state_c_stride; + weight_t *weight = reinterpret_cast(params.weight_ptr) + channel_id * params.weight_c_stride; + input_t *out = reinterpret_cast(params.out_ptr) + batch_id * params.out_batch_stride + + channel_id * params.out_c_stride; + float bias_val = params.bias_ptr == nullptr || channel_id >= params.dim ? 0.f : float(reinterpret_cast(params.bias_ptr)[channel_id]); + + float weight_vals[kWidth] = {0}; + if (channel_id < params.dim) { + #pragma unroll + for (int i = 0; i < kWidth; ++i) { weight_vals[i] = float(weight[i * params.weight_width_stride]); } + } + + float x_vals[kWidth] = {0}; + if (channel_id < params.dim) { + #pragma unroll + for (int i = 0; i < kWidth - 1; ++i) { x_vals[i] = float(conv_state[(i + 1) * params.conv_state_l_stride]); } + x_vals[kWidth - 1] = float(x[0]); + #pragma unroll + for (int i = 0; i < kWidth; ++i) { conv_state[i * params.conv_state_l_stride] = input_t(x_vals[i]); } + } + + float out_val = bias_val; + #pragma unroll + for (int i = 0; i < kWidth; ++i) { out_val += weight_vals[i] * x_vals[i]; } + if (params.silu_activation) { out_val = out_val / (1 + expf(-out_val)); } + if (channel_id < params.dim) { out[0] = input_t(out_val); } +} + +template +void causal_conv1d_update_launch(ConvParamsBase ¶ms, cudaStream_t stream) { + using Ktraits = Causal_conv1d_update_kernel_traits; + dim3 grid(params.batch, (params.dim + kNThreads - 1) / kNThreads); + auto kernel = &causal_conv1d_update_kernel; + kernel<<>>(params); + C10_CUDA_KERNEL_LAUNCH_CHECK(); +} + +template +void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream) { + if (params.width == 2) { + causal_conv1d_update_launch<64, 2, input_t, weight_t>(params, stream); + } else if (params.width == 3) { + causal_conv1d_update_launch<64, 3, input_t, weight_t>(params, stream); + } else if (params.width == 4) { + causal_conv1d_update_launch<64, 4, input_t, weight_t>(params, stream); + } +} + +template void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream); +template void causal_conv1d_update_cuda(ConvParamsBase ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/source_code/SegMamba/causal-conv1d/csrc/static_switch.h b/source_code/SegMamba/causal-conv1d/csrc/static_switch.h new file mode 100644 index 0000000000000000000000000000000000000000..0f4ad3eb62235443d15c454b6691c2ec63645219 --- /dev/null +++ b/source_code/SegMamba/causal-conv1d/csrc/static_switch.h @@ -0,0 +1,25 @@ +// Inspired by https://github.com/NVIDIA/DALI/blob/main/include/dali/core/static_switch.h +// and https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Dispatch.h + +#pragma once + +/// @param COND - a boolean expression to switch by +/// @param CONST_NAME - a name given for the constexpr bool variable. +/// @param ... - code to execute for true and false +/// +/// Usage: +/// ``` +/// BOOL_SWITCH(flag, BoolConst, [&] { +/// some_function(...); +/// }); +/// ``` +#define BOOL_SWITCH(COND, CONST_NAME, ...) \ + [&] { \ + if (COND) { \ + static constexpr bool CONST_NAME = true; \ + return __VA_ARGS__(); \ + } else { \ + static constexpr bool CONST_NAME = false; \ + return __VA_ARGS__(); \ + } \ + }() diff --git a/source_code/SegMamba/causal-conv1d/setup.py b/source_code/SegMamba/causal-conv1d/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..12e36bf988215a4c536278026e6f4401e66534da --- /dev/null +++ b/source_code/SegMamba/causal-conv1d/setup.py @@ -0,0 +1,264 @@ +# Copyright (c) 2023, Tri Dao. +import sys +import warnings +import os +import re +import ast +from pathlib import Path +from packaging.version import parse, Version +import platform + +from setuptools import setup, find_packages +import subprocess + +import urllib.request +import urllib.error +from wheel.bdist_wheel import bdist_wheel as _bdist_wheel + +import torch +from torch.utils.cpp_extension import ( + BuildExtension, + CppExtension, + CUDAExtension, + CUDA_HOME, +) + + +with open("README.md", "r", encoding="utf-8") as fh: + long_description = fh.read() + + +# ninja build does not work unless include_dirs are abs path +this_dir = os.path.dirname(os.path.abspath(__file__)) + +PACKAGE_NAME = "causal_conv1d" + +BASE_WHEEL_URL = "https://github.com/Dao-AILab/causal-conv1d/releases/download/{tag_name}/{wheel_name}" + +# FORCE_BUILD: Force a fresh build locally, instead of attempting to find prebuilt wheels +# SKIP_CUDA_BUILD: Intended to allow CI to use a simple `python setup.py sdist` run to copy over raw files, without any cuda compilation +FORCE_BUILD = os.getenv("CAUSAL_CONV1D_FORCE_BUILD", "FALSE") == "TRUE" +SKIP_CUDA_BUILD = os.getenv("CAUSAL_CONV1D_SKIP_CUDA_BUILD", "FALSE") == "TRUE" +# For CI, we want the option to build with C++11 ABI since the nvcr images use C++11 ABI +FORCE_CXX11_ABI = os.getenv("CAUSAL_CONV1D_FORCE_CXX11_ABI", "FALSE") == "TRUE" + + +def get_platform(): + """ + Returns the platform name as used in wheel filenames. + """ + if sys.platform.startswith("linux"): + return "linux_x86_64" + elif sys.platform == "darwin": + mac_version = ".".join(platform.mac_ver()[0].split(".")[:2]) + return f"macosx_{mac_version}_x86_64" + elif sys.platform == "win32": + return "win_amd64" + else: + raise ValueError("Unsupported platform: {}".format(sys.platform)) + + +def get_cuda_bare_metal_version(cuda_dir): + raw_output = subprocess.check_output( + [cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True + ) + output = raw_output.split() + release_idx = output.index("release") + 1 + bare_metal_version = parse(output[release_idx].split(",")[0]) + + return raw_output, bare_metal_version + + +def check_if_cuda_home_none(global_option: str) -> None: + if CUDA_HOME is not None: + return + # warn instead of error because user could be downloading prebuilt wheels, so nvcc won't be necessary + # in that case. + warnings.warn( + f"{global_option} was requested, but nvcc was not found. Are you sure your environment has nvcc available? " + "If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, " + "only images whose names contain 'devel' will provide nvcc." + ) + + +def append_nvcc_threads(nvcc_extra_args): + return nvcc_extra_args + ["--threads", "4"] + + +cmdclass = {} +ext_modules = [] + +if not SKIP_CUDA_BUILD: + print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__)) + TORCH_MAJOR = int(torch.__version__.split(".")[0]) + TORCH_MINOR = int(torch.__version__.split(".")[1]) + + check_if_cuda_home_none("causal_conv1d") + # Check, if CUDA11 is installed for compute capability 8.0 + cc_flag = [] + if CUDA_HOME is not None: + _, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME) + if bare_metal_version < Version("11.6"): + raise RuntimeError( + "causal_conv1d is only supported on CUDA 11.6 and above. " + "Note: make sure nvcc has a supported version by running nvcc -V." + ) + + cc_flag.append("-gencode") + cc_flag.append("arch=compute_70,code=sm_70") + cc_flag.append("-gencode") + cc_flag.append("arch=compute_80,code=sm_80") + if bare_metal_version >= Version("11.8"): + cc_flag.append("-gencode") + cc_flag.append("arch=compute_90,code=sm_90") + + # HACK: The compiler flag -D_GLIBCXX_USE_CXX11_ABI is set to be the same as + # torch._C._GLIBCXX_USE_CXX11_ABI + # https://github.com/pytorch/pytorch/blob/8472c24e3b5b60150096486616d98b7bea01500b/torch/utils/cpp_extension.py#L920 + if FORCE_CXX11_ABI: + torch._C._GLIBCXX_USE_CXX11_ABI = True + + ext_modules.append( + CUDAExtension( + name="causal_conv1d_cuda", + sources=[ + "csrc/causal_conv1d.cpp", + "csrc/causal_conv1d_fwd.cu", + "csrc/causal_conv1d_bwd.cu", + "csrc/causal_conv1d_update.cu", + ], + extra_compile_args={ + "cxx": ["-O3"], + "nvcc": append_nvcc_threads( + [ + "-O3", + "-U__CUDA_NO_HALF_OPERATORS__", + "-U__CUDA_NO_HALF_CONVERSIONS__", + "-U__CUDA_NO_BFLOAT16_OPERATORS__", + "-U__CUDA_NO_BFLOAT16_CONVERSIONS__", + "-U__CUDA_NO_BFLOAT162_OPERATORS__", + "-U__CUDA_NO_BFLOAT162_CONVERSIONS__", + "--expt-relaxed-constexpr", + "--expt-extended-lambda", + "--use_fast_math", + "--ptxas-options=-v", + "-lineinfo", + ] + + cc_flag + ), + }, + include_dirs=[this_dir], + ) + ) + + +def get_package_version(): + with open(Path(this_dir) / "causal_conv1d" / "__init__.py", "r") as f: + version_match = re.search(r"^__version__\s*=\s*(.*)$", f.read(), re.MULTILINE) + public_version = ast.literal_eval(version_match.group(1)) + local_version = os.environ.get("CAUSAL_CONV1D_LOCAL_VERSION") + if local_version: + return f"{public_version}+{local_version}" + else: + return str(public_version) + + +def get_wheel_url(): + # Determine the version numbers that will be used to determine the correct wheel + # We're using the CUDA version used to build torch, not the one currently installed + # _, cuda_version_raw = get_cuda_bare_metal_version(CUDA_HOME) + torch_cuda_version = parse(torch.version.cuda) + torch_version_raw = parse(torch.__version__) + # For CUDA 11, we only compile for CUDA 11.8, and for CUDA 12 we only compile for CUDA 12.2 + # to save CI time. Minor versions should be compatible. + torch_cuda_version = parse("11.8") if torch_cuda_version.major == 11 else parse("12.2") + python_version = f"cp{sys.version_info.major}{sys.version_info.minor}" + platform_name = get_platform() + causal_conv1d_version = get_package_version() + # cuda_version = f"{cuda_version_raw.major}{cuda_version_raw.minor}" + cuda_version = f"{torch_cuda_version.major}{torch_cuda_version.minor}" + torch_version = f"{torch_version_raw.major}.{torch_version_raw.minor}" + cxx11_abi = str(torch._C._GLIBCXX_USE_CXX11_ABI).upper() + + # Determine wheel URL based on CUDA version, torch version, python version and OS + wheel_filename = f"{PACKAGE_NAME}-{causal_conv1d_version}+cu{cuda_version}torch{torch_version}cxx11abi{cxx11_abi}-{python_version}-{python_version}-{platform_name}.whl" + wheel_url = BASE_WHEEL_URL.format( + tag_name=f"v{causal_conv1d_version}", wheel_name=wheel_filename + ) + return wheel_url, wheel_filename + + +class CachedWheelsCommand(_bdist_wheel): + """ + The CachedWheelsCommand plugs into the default bdist wheel, which is ran by pip when it cannot + find an existing wheel (which is currently the case for all installs). We use + the environment parameters to detect whether there is already a pre-built version of a compatible + wheel available and short-circuits the standard full build pipeline. + """ + + def run(self): + if FORCE_BUILD: + return super().run() + + wheel_url, wheel_filename = get_wheel_url() + print("Guessing wheel URL: ", wheel_url) + try: + urllib.request.urlretrieve(wheel_url, wheel_filename) + + # Make the archive + # Lifted from the root wheel processing command + # https://github.com/pypa/wheel/blob/cf71108ff9f6ffc36978069acb28824b44ae028e/src/wheel/bdist_wheel.py#LL381C9-L381C85 + if not os.path.exists(self.dist_dir): + os.makedirs(self.dist_dir) + + impl_tag, abi_tag, plat_tag = self.get_tag() + archive_basename = f"{self.wheel_dist_name}-{impl_tag}-{abi_tag}-{plat_tag}" + + wheel_path = os.path.join(self.dist_dir, archive_basename + ".whl") + print("Raw wheel path", wheel_path) + os.rename(wheel_filename, wheel_path) + except urllib.error.HTTPError: + print("Precompiled wheel not found. Building from source...") + # If the wheel could not be downloaded, build from source + super().run() + + +setup( + name=PACKAGE_NAME, + version=get_package_version(), + packages=find_packages( + exclude=( + "build", + "csrc", + "include", + "tests", + "dist", + "docs", + "benchmarks", + "causal_conv1d.egg-info", + ) + ), + author="Tri Dao", + author_email="tri@tridao.me", + description="Causal depthwise conv1d in CUDA, with a PyTorch interface", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/Dao-AILab/causal-conv1d", + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: BSD License", + "Operating System :: Unix", + ], + ext_modules=ext_modules, + cmdclass={"bdist_wheel": CachedWheelsCommand, "build_ext": BuildExtension} + if ext_modules + else { + "bdist_wheel": CachedWheelsCommand, + }, + python_requires=">=3.7", + install_requires=[ + "torch", + "packaging", + "ninja", + ], +) diff --git a/source_code/SegMamba/causal-conv1d/tests/test_causal_conv1d.py b/source_code/SegMamba/causal-conv1d/tests/test_causal_conv1d.py new file mode 100644 index 0000000000000000000000000000000000000000..6e5985cfb0582e6656afb1d8b5c1de78f24f4276 --- /dev/null +++ b/source_code/SegMamba/causal-conv1d/tests/test_causal_conv1d.py @@ -0,0 +1,173 @@ +# Copyright (C) 2023, Tri Dao. + +import math + +import torch +import pytest + +from einops import rearrange + +from causal_conv1d.causal_conv1d_interface import causal_conv1d_fn, causal_conv1d_ref +from causal_conv1d.causal_conv1d_interface import causal_conv1d_update, causal_conv1d_update_ref + + +@pytest.mark.parametrize("channel_last", [False, True]) +# @pytest.mark.parametrize('channel_last', [True]) +@pytest.mark.parametrize("itype", [torch.float32, torch.float16, torch.bfloat16]) +# @pytest.mark.parametrize('itype', [torch.float16]) +@pytest.mark.parametrize("silu_activation", [False, True]) +# @pytest.mark.parametrize('silu_activation', [True]) +@pytest.mark.parametrize("has_bias", [False, True]) +# @pytest.mark.parametrize('has_bias', [True]) +@pytest.mark.parametrize("width", [2, 3, 4]) +# @pytest.mark.parametrize('width', [2]) +@pytest.mark.parametrize( + "seqlen", [8, 16, 32, 64, 128, 151, 256, 372, 512, 784, 1024, 1134, 2048, 4096] +) +# @pytest.mark.parametrize('seqlen', [8, 16, 32, 64, 128, 256, 512, 784, 1024, 2048, 4096]) +# @pytest.mark.parametrize('seqlen', [128]) +def test_causal_conv1d(seqlen, width, has_bias, silu_activation, itype, channel_last): + device = "cuda" + rtol, atol = (3e-4, 1e-3) if itype == torch.float32 else (3e-3, 5e-3) + if itype == torch.bfloat16: + rtol, atol = 1e-2, 5e-2 + rtolw, atolw = (1e-3, 1e-3) + # set seed + torch.random.manual_seed(0) + batch_size = 2 + # batch_size = 1 + dim = 4096 + 32 # Try dim not divisible by 64 + # dim = 64 + if not channel_last: + x = torch.randn(batch_size, 4096 + dim + 64, seqlen, device=device, dtype=itype)[:, 4096:4096 + dim, :].requires_grad_() + else: + x = rearrange( + torch.randn(batch_size, seqlen, 4096 + dim + 64, device=device, dtype=itype)[:, :, 4096:4096 + dim], "b s d -> b d s" + ).requires_grad_() + weight = torch.randn(dim, width, device=device, dtype=torch.float32, requires_grad=True) + if has_bias: + bias = torch.randn(dim, device=device, dtype=torch.float32, requires_grad=True) + else: + bias = None + x_ref = x.detach().clone().requires_grad_() + weight_ref = weight.detach().clone().requires_grad_() + bias_ref = bias.detach().clone().requires_grad_() if bias is not None else None + activation = None if not silu_activation else "silu" + out = causal_conv1d_fn(x, weight, bias, activation=activation) + out_ref = causal_conv1d_ref(x_ref, weight_ref, bias_ref, activation=activation) + + print(f"Output max diff: {(out - out_ref).abs().max().item()}") + print(f"Output mean diff: {(out - out_ref).abs().mean().item()}") + assert torch.allclose(out, out_ref, rtol=rtol, atol=atol) + + g = torch.randn_like(out) + out_ref.backward(g) + out.backward(g) + + print(f"dx max diff: {(x.grad - x_ref.grad).abs().max().item()}") + print(f"dweight max diff: {(weight.grad - weight_ref.grad).abs().max().item()}") + if has_bias: + print(f"dbias max diff: {(bias.grad - bias_ref.grad).abs().max().item()}") + + assert torch.allclose(x.grad, x_ref.grad.to(dtype=itype), rtol=rtol, atol=atol) + assert torch.allclose(weight.grad, weight_ref.grad, rtol=rtolw, atol=atolw) + if has_bias: + assert torch.allclose(bias.grad, bias_ref.grad, rtol=rtolw, atol=atolw) + + +@pytest.mark.parametrize("itype", [torch.float32, torch.float16, torch.bfloat16]) +# @pytest.mark.parametrize('itype', [torch.float16]) +@pytest.mark.parametrize("silu_activation", [False, True]) +# @pytest.mark.parametrize('silu_activation', [False]) +@pytest.mark.parametrize("has_bias", [False, True]) +# @pytest.mark.parametrize('has_bias', [True]) +@pytest.mark.parametrize("width", [2, 3, 4]) +# @pytest.mark.parametrize('width', [2]) +@pytest.mark.parametrize("dim", [2048, 2048 + 16, 4096]) +# @pytest.mark.parametrize("dim", [2048]) +def test_causal_conv1d_update(dim, width, has_bias, silu_activation, itype): + device = "cuda" + rtol, atol = (3e-4, 1e-3) if itype == torch.float32 else (3e-3, 5e-3) + if itype == torch.bfloat16: + rtol, atol = 1e-2, 5e-2 + rtolw, atolw = (1e-3, 1e-3) + # set seed + torch.random.manual_seed(0) + batch_size = 2 + # batch_size = 1 + # dim = 64 + x = torch.randn(batch_size, dim, device=device, dtype=itype) + conv_state = torch.randn(batch_size, dim, width, device=device, dtype=itype) + weight = torch.randn(dim, width, device=device, dtype=torch.float32, requires_grad=True) + if has_bias: + bias = torch.randn(dim, device=device, dtype=torch.float32, requires_grad=True) + else: + bias = None + conv_state_ref = conv_state.detach().clone() + activation = None if not silu_activation else "silu" + out = causal_conv1d_update(x, conv_state, weight, bias, activation=activation) + out_ref = causal_conv1d_update_ref(x, conv_state_ref, weight, bias, activation=activation) + + print(f"Output max diff: {(out - out_ref).abs().max().item()}") + print(f"Output mean diff: {(out - out_ref).abs().mean().item()}") + assert torch.equal(conv_state, conv_state_ref) + assert torch.allclose(out, out_ref, rtol=rtol, atol=atol) + + +# @pytest.mark.parametrize("channel_last", [False, True]) +@pytest.mark.parametrize('channel_last', [True]) +# @pytest.mark.parametrize("itype", [torch.float32, torch.float16, torch.bfloat16]) +@pytest.mark.parametrize('itype', [torch.bfloat16]) +# @pytest.mark.parametrize("silu_activation", [False, True]) +@pytest.mark.parametrize('silu_activation', [True]) +# @pytest.mark.parametrize("has_bias", [False, True]) +@pytest.mark.parametrize('has_bias', [True]) +# @pytest.mark.parametrize("width", [2, 3, 4]) +@pytest.mark.parametrize('width', [4]) +@pytest.mark.parametrize( + # "seqlen", [8, 16, 32, 64, 128, 151, 256, 372, 512, 784, 1024, 1134, 2048, 4096] + "seqlen", [2048] +) +# @pytest.mark.parametrize('seqlen', [8, 16, 32, 64, 128, 256, 512, 784, 1024, 2048, 4096]) +# @pytest.mark.parametrize('seqlen', [128]) +def test_causal_conv1d_race_condition(seqlen, width, has_bias, silu_activation, itype, channel_last): + device = "cuda" + # set seed + torch.random.manual_seed(0) + batch_size = 2 + # batch_size = 1 + dim = 4096 + 32 # Try dim not divisible by 64 + # dim = 64 + if not channel_last: + x = torch.randn(batch_size, 4096 + dim + 64, seqlen, device=device, dtype=itype)[:, 4096:4096 + dim, :].requires_grad_() + else: + x = rearrange( + torch.randn(batch_size, seqlen, 4096 + dim + 64, device=device, dtype=itype)[:, :, 4096:4096 + dim], "b s d -> b d s" + ).requires_grad_() + weight = torch.randn(dim, width, device=device, dtype=torch.float32, requires_grad=True) + if has_bias: + bias = torch.randn(dim, device=device, dtype=torch.float32, requires_grad=True) + else: + bias = None + activation = None if not silu_activation else "silu" + out0 = causal_conv1d_fn(x, weight, bias, activation=activation) + g = torch.randn_like(out0) + dx0, dw0, db0 = torch.autograd.grad(out0, (x, weight, bias), g) + dw_atol = 1e-4 + db_atol = 1e-4 + + for i in range(10000): + out = causal_conv1d_fn(x, weight, bias, activation=activation) + dx, dw, db = torch.autograd.grad(out, (x, weight, bias), g) + dw_equal = torch.allclose(dw, dw0, atol=dw_atol) + # if not dw_equal: + # breakpoint() + if has_bias: + db_equal = torch.allclose(db, db0, atol=db_atol) + # if not db_equal: + # breakpoint() + assert torch.equal(out, out0) + assert torch.equal(dx, dx0) + assert dw_equal + if has_bias: + assert dw_equal diff --git a/source_code/SegMamba/light_training/.DS_Store b/source_code/SegMamba/light_training/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..bbf3ff2ee2fe133356efb34f5891a28e41a58550 Binary files /dev/null and b/source_code/SegMamba/light_training/.DS_Store differ diff --git a/source_code/SegMamba/light_training/augment/multi_processor.py b/source_code/SegMamba/light_training/augment/multi_processor.py new file mode 100644 index 0000000000000000000000000000000000000000..84efb9bd7d8b65f8f05020cf2bfef3db396eaa2c --- /dev/null +++ b/source_code/SegMamba/light_training/augment/multi_processor.py @@ -0,0 +1,10 @@ +from batchgenerators.dataloading.nondet_multi_threaded_augmenter import NonDetMultiThreadedAugmenter + + +class LimitedLenWrapper(NonDetMultiThreadedAugmenter): + def __init__(self, my_imaginary_length, *args, **kwargs): + super().__init__(*args, **kwargs) + self.len = my_imaginary_length + + def __len__(self): + return self.len \ No newline at end of file diff --git a/source_code/SegMamba/light_training/augment/train_augment.py b/source_code/SegMamba/light_training/augment/train_augment.py new file mode 100644 index 0000000000000000000000000000000000000000..086f133487cc8e4920531b0284edc166b3b20c79 --- /dev/null +++ b/source_code/SegMamba/light_training/augment/train_augment.py @@ -0,0 +1,279 @@ +import inspect +import multiprocessing +import os +import shutil +import sys +import warnings +from copy import deepcopy +from datetime import datetime +from time import time, sleep +from typing import Union, Tuple, List +import numpy as np +import torch +from batchgenerators.dataloading.single_threaded_augmenter import SingleThreadedAugmenter +from batchgenerators.transforms.abstract_transforms import AbstractTransform, Compose +from batchgenerators.transforms.color_transforms import BrightnessMultiplicativeTransform, \ + ContrastAugmentationTransform, GammaTransform +from batchgenerators.transforms.noise_transforms import GaussianNoiseTransform, GaussianBlurTransform +from batchgenerators.transforms.resample_transforms import SimulateLowResolutionTransform +from batchgenerators.transforms.spatial_transforms import SpatialTransform, MirrorTransform +from batchgenerators.transforms.utility_transforms import RemoveLabelTransform, RenameTransform, NumpyToTensor + + +def get_train_transforms(patch_size, mirror_axes=None): + tr_transforms = [] + patch_size_spatial = patch_size + ignore_axes = None + angle = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) + + tr_transforms.append(SpatialTransform( + patch_size_spatial, patch_center_dist_from_border=None, + do_elastic_deform=False, alpha=(0, 0), sigma=(0, 0), + do_rotation=True, angle_x=angle, angle_y=angle, angle_z=angle, + p_rot_per_axis=1, # todo experiment with this + do_scale=True, scale=(0.7, 1.4), + border_mode_data="constant", border_cval_data=0, order_data=3, + border_mode_seg="constant", border_cval_seg=-1, order_seg=1, + random_crop=False, # random cropping is part of our dataloaders + p_el_per_sample=0, p_scale_per_sample=0.2, p_rot_per_sample=0.2, + independent_scale_for_each_axis=False # todo experiment with this + )) + + tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1)) + tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2, + p_per_channel=0.5)) + tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15)) + tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15)) + tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True, + p_per_channel=0.5, + order_downsample=0, order_upsample=3, p_per_sample=0.25, + ignore_axes=ignore_axes)) + tr_transforms.append(GammaTransform((0.7, 1.5), True, True, retain_stats=True, p_per_sample=0.1)) + tr_transforms.append(GammaTransform((0.7, 1.5), False, True, retain_stats=True, p_per_sample=0.3)) + + if mirror_axes is not None and len(mirror_axes) > 0: + tr_transforms.append(MirrorTransform(mirror_axes)) + + tr_transforms.append(RemoveLabelTransform(-1, 0)) + tr_transforms.append(NumpyToTensor(['data', 'seg'], 'float')) + + tr_transforms = Compose(tr_transforms) + + return tr_transforms + +def get_train_transforms_nomirror(patch_size, mirror_axes=None): + tr_transforms = [] + patch_size_spatial = patch_size + ignore_axes = None + angle = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) + + tr_transforms.append(SpatialTransform( + patch_size_spatial, patch_center_dist_from_border=None, + do_elastic_deform=False, alpha=(0, 0), sigma=(0, 0), + do_rotation=True, angle_x=angle, angle_y=angle, angle_z=angle, + p_rot_per_axis=1, # todo experiment with this + do_scale=True, scale=(0.7, 1.4), + border_mode_data="constant", border_cval_data=0, order_data=3, + border_mode_seg="constant", border_cval_seg=-1, order_seg=1, + random_crop=False, # random cropping is part of our dataloaders + p_el_per_sample=0, p_scale_per_sample=0.2, p_rot_per_sample=0.2, + independent_scale_for_each_axis=False # todo experiment with this + )) + + tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1)) + tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2, + p_per_channel=0.5)) + tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15)) + tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15)) + tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True, + p_per_channel=0.5, + order_downsample=0, order_upsample=3, p_per_sample=0.25, + ignore_axes=ignore_axes)) + tr_transforms.append(GammaTransform((0.7, 1.5), True, True, retain_stats=True, p_per_sample=0.1)) + tr_transforms.append(GammaTransform((0.7, 1.5), False, True, retain_stats=True, p_per_sample=0.3)) + + # if mirror_axes is not None and len(mirror_axes) > 0: + # tr_transforms.append(MirrorTransform(mirror_axes)) + + tr_transforms.append(RemoveLabelTransform(-1, 0)) + tr_transforms.append(NumpyToTensor(['data', 'seg'], 'float')) + + tr_transforms = Compose(tr_transforms) + + return tr_transforms + +def get_train_transforms_onlymirror(patch_size, mirror_axes=None): + tr_transforms = [] + patch_size_spatial = patch_size + ignore_axes = None + angle = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) + + # tr_transforms.append(SpatialTransform( + # patch_size_spatial, patch_center_dist_from_border=None, + # do_elastic_deform=False, alpha=(0, 0), sigma=(0, 0), + # do_rotation=True, angle_x=angle, angle_y=angle, angle_z=angle, + # p_rot_per_axis=1, # todo experiment with this + # do_scale=True, scale=(0.7, 1.4), + # border_mode_data="constant", border_cval_data=0, order_data=3, + # border_mode_seg="constant", border_cval_seg=-1, order_seg=1, + # random_crop=False, # random cropping is part of our dataloaders + # p_el_per_sample=0, p_scale_per_sample=0.2, p_rot_per_sample=0.2, + # independent_scale_for_each_axis=False # todo experiment with this + # )) + + tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1)) + tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2, + p_per_channel=0.5)) + tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15)) + tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15)) + tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True, + p_per_channel=0.5, + order_downsample=0, order_upsample=3, p_per_sample=0.25, + ignore_axes=ignore_axes)) + tr_transforms.append(GammaTransform((0.7, 1.5), True, True, retain_stats=True, p_per_sample=0.1)) + tr_transforms.append(GammaTransform((0.7, 1.5), False, True, retain_stats=True, p_per_sample=0.3)) + + if mirror_axes is not None and len(mirror_axes) > 0: + tr_transforms.append(MirrorTransform(mirror_axes)) + + tr_transforms.append(RemoveLabelTransform(-1, 0)) + tr_transforms.append(NumpyToTensor(['data', 'seg'], 'float')) + + tr_transforms = Compose(tr_transforms) + + return tr_transforms + +def get_train_transforms_onlyspatial(patch_size, mirror_axes=None): + tr_transforms = [] + patch_size_spatial = patch_size + ignore_axes = None + angle = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) + + tr_transforms.append(SpatialTransform( + patch_size_spatial, patch_center_dist_from_border=None, + do_elastic_deform=False, alpha=(0, 0), sigma=(0, 0), + do_rotation=True, angle_x=angle, angle_y=angle, angle_z=angle, + p_rot_per_axis=1, # todo experiment with this + do_scale=True, scale=(0.7, 1.4), + border_mode_data="constant", border_cval_data=0, order_data=3, + border_mode_seg="constant", border_cval_seg=-1, order_seg=1, + random_crop=False, # random cropping is part of our dataloaders + p_el_per_sample=0, p_scale_per_sample=0.2, p_rot_per_sample=0.2, + independent_scale_for_each_axis=False # todo experiment with this + )) + + # tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1)) + # tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2, + # p_per_channel=0.5)) + # tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15)) + # tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15)) + # tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True, + # p_per_channel=0.5, + # order_downsample=0, order_upsample=3, p_per_sample=0.25, + # ignore_axes=ignore_axes)) + # tr_transforms.append(GammaTransform((0.7, 1.5), True, True, retain_stats=True, p_per_sample=0.1)) + # tr_transforms.append(GammaTransform((0.7, 1.5), False, True, retain_stats=True, p_per_sample=0.3)) + + if mirror_axes is not None and len(mirror_axes) > 0: + tr_transforms.append(MirrorTransform(mirror_axes)) + + tr_transforms.append(RemoveLabelTransform(-1, 0)) + tr_transforms.append(NumpyToTensor(['data', 'seg'], 'float')) + + tr_transforms = Compose(tr_transforms) + + return tr_transforms + +def get_train_transforms_noaug(patch_size, mirror_axes=None): + tr_transforms = [] + # patch_size_spatial = patch_size + # ignore_axes = None + # angle = (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi) + + # tr_transforms.append(SpatialTransform( + # patch_size_spatial, patch_center_dist_from_border=None, + # do_elastic_deform=False, alpha=(0, 0), sigma=(0, 0), + # do_rotation=True, angle_x=angle, angle_y=angle, angle_z=angle, + # p_rot_per_axis=1, # todo experiment with this + # do_scale=True, scale=(0.7, 1.4), + # border_mode_data="constant", border_cval_data=0, order_data=3, + # border_mode_seg="constant", border_cval_seg=-1, order_seg=1, + # random_crop=False, # random cropping is part of our dataloaders + # p_el_per_sample=0, p_scale_per_sample=0.2, p_rot_per_sample=0.2, + # independent_scale_for_each_axis=False # todo experiment with this + # )) + + # tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1)) + # tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2, + # p_per_channel=0.5)) + # tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15)) + # tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15)) + # tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True, + # p_per_channel=0.5, + # order_downsample=0, order_upsample=3, p_per_sample=0.25, + # ignore_axes=ignore_axes)) + # tr_transforms.append(GammaTransform((0.7, 1.5), True, True, retain_stats=True, p_per_sample=0.1)) + # tr_transforms.append(GammaTransform((0.7, 1.5), False, True, retain_stats=True, p_per_sample=0.3)) + + # if mirror_axes is not None and len(mirror_axes) > 0: + # tr_transforms.append(MirrorTransform(mirror_axes)) + + tr_transforms.append(RemoveLabelTransform(-1, 0)) + tr_transforms.append(NumpyToTensor(['data', 'seg'], 'float')) + + tr_transforms = Compose(tr_transforms) + + return tr_transforms + +def get_validation_transforms() -> AbstractTransform: + val_transforms = [] + val_transforms.append(RemoveLabelTransform(-1, 0)) + + # val_transforms.append(RenameTransform('seg', 'target', True)) + + val_transforms.append(NumpyToTensor(['data', 'seg'], 'float')) + val_transforms = Compose(val_transforms) + return val_transforms + +# import SimpleITK as sitk +# import matplotlib.pyplot as plt + +# image = sitk.ReadImage("/Users/xingzhaohu/Documents/工作/code/medical_image_processing/SSL/BraTS20_Training_365/BraTS20_Training_365_flair.nii.gz") +# label = sitk.ReadImage("/Users/xingzhaohu/Documents/工作/code/medical_image_processing/SSL/BraTS20_Training_365/BraTS20_Training_365_seg.nii.gz") + +# # image = sitk.ReadImage("./AIIB/image/AIIB23_171.nii.gz") +# # label = sitk.ReadImage("./AIIB/gt/AIIB23_171.nii.gz") + +# image_arr = sitk.GetArrayFromImage(image) +# label_arr = sitk.GetArrayFromImage(label) +# intensityproperties = {} + +# norm = RescaleTo01Normalization(intensityproperties=intensityproperties) +# image_arr = image_arr[0:128, 0:128, 0:128][None, None] +# label_arr = label_arr[0:128, 0:128, 0:128][None, None] + + +# image_arr = norm.run(image_arr, label_arr) + +# print(image_arr.shape, label_arr.shape) + +# tr_transforms = Compose(tr_transforms) + +# trans_out = tr_transforms(data=image_arr, seg=label_arr) + +# image_arr_aug = trans_out["data"] +# label_arr_aug = trans_out["seg"] + +# print(image_arr_aug.shape, label_arr_aug.shape) + + +# for i in range(40, 128): +# plt.subplot(1, 4, 1) +# plt.imshow(image_arr[0, 0, i], cmap="gray") +# plt.subplot(1, 4, 2) +# plt.imshow(label_arr[0, 0, i], cmap="gray") +# plt.subplot(1, 4, 3) +# plt.imshow(image_arr_aug[0, 0, i], cmap="gray") +# plt.subplot(1, 4, 4) +# plt.imshow(label_arr_aug[0, 0, i], cmap="gray") +# plt.show() \ No newline at end of file diff --git a/source_code/SegMamba/light_training/dataloading/__init__.py b/source_code/SegMamba/light_training/dataloading/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/source_code/SegMamba/light_training/dataloading/base_data_loader.py b/source_code/SegMamba/light_training/dataloading/base_data_loader.py new file mode 100644 index 0000000000000000000000000000000000000000..e22a4438c6f0f2e2ac6715caff55eefcf2855a1a --- /dev/null +++ b/source_code/SegMamba/light_training/dataloading/base_data_loader.py @@ -0,0 +1,213 @@ +import numpy as np +from typing import Union, Tuple +import time + +class DataLoaderMultiProcess: + def __init__(self, dataset, + patch_size, + batch_size=2, + oversample_foreground_percent=0.33, + probabilistic_oversampling=False, + print_time=False): + pass + self.dataset = dataset + self.patch_size = patch_size + # self.annotated_classes_key = annotated_classes_key ## (1, 2, 3 ..) + self.batch_size = batch_size + self.keys = [i for i in range(len(dataset))] + self.thread_id = 0 + self.oversample_foreground_percent = oversample_foreground_percent + self.need_to_pad = (np.array([0, 0, 0])).astype(int) + + self.get_do_oversample = self._oversample_last_XX_percent if not probabilistic_oversampling \ + else self._probabilistic_oversampling + self.data_shape = None + self.seg_shape = None + self.print_time = print_time + + def determine_shapes(self): + # load one case + item = self.dataset.__getitem__(0) + data, seg, properties = item["data"], item["seg"], item["properties"] + num_color_channels = data.shape[0] + num_output_channels = seg.shape[0] + patch_size = self.patch_size + data_shape = (self.batch_size, num_color_channels, patch_size[0], patch_size[1], patch_size[2]) + seg_shape = (self.batch_size, num_output_channels, patch_size[0], patch_size[1], patch_size[2]) + return data_shape, seg_shape + + def generate_train_batch(self): + + selected_keys = np.random.choice(self.keys, self.batch_size, True, None) + if self.data_shape is None: + self.data_shape, self.seg_shape = self.determine_shapes() + + data_all = np.zeros(self.data_shape, dtype=np.float32) + data_all_global = np.zeros(self.data_shape, dtype=np.float32) + seg_all_global = np.zeros(self.seg_shape, dtype=np.float32) + data_global = None + seg_global = None + seg_all = np.zeros(self.seg_shape, dtype=np.float32) + + case_properties = [] + + index = 0 + for j, key in enumerate(selected_keys): + + force_fg = self.get_do_oversample(j) + s = time.time() + item = self.dataset.__getitem__(key) + e = time.time() + if self.print_time: + print(f"read single data time is {e - s}") + # print(f"read data time is {e - s}") + data, seg, properties = item["data"], item["seg"], item["properties"] + + if "data_global" in item: + data_global = item["data_global"] + + if "seg_global" in item: + seg_global = item["seg_global"] + + case_properties.append(properties) + # If we are doing the cascade then the segmentation from the previous stage will already have been loaded by + # self._data.load_case(i) (see nnUNetDataset.load_case) + shape = data.shape[1:] + dim = len(shape) + + s = time.time() + bbox_lbs, bbox_ubs = self.get_bbox(shape, force_fg, properties['class_locations']) + e = time.time() + if self.print_time: + print(f"get bbox time is {e - s}") + # whoever wrote this knew what he was doing (hint: it was me). We first crop the data to the region of the + # bbox that actually lies within the data. This will result in a smaller array which is then faster to pad. + # valid_bbox is just the coord that lied within the data cube. It will be padded to match the patch size + # later + valid_bbox_lbs = [max(0, bbox_lbs[i]) for i in range(dim)] + valid_bbox_ubs = [min(shape[i], bbox_ubs[i]) for i in range(dim)] + + # At this point you might ask yourself why we would treat seg differently from seg_from_previous_stage. + # Why not just concatenate them here and forget about the if statements? Well that's because segneeds to + # be padded with -1 constant whereas seg_from_previous_stage needs to be padded with 0s (we could also + # remove label -1 in the data augmentation but this way it is less error prone) + this_slice = tuple([slice(0, data.shape[0])] + [slice(i, j) for i, j in zip(valid_bbox_lbs, valid_bbox_ubs)]) + data = data[this_slice] + + this_slice = tuple([slice(0, seg.shape[0])] + [slice(i, j) for i, j in zip(valid_bbox_lbs, valid_bbox_ubs)]) + seg = seg[this_slice] + + + s = time.time() + padding = [(-min(0, bbox_lbs[i]), max(bbox_ubs[i] - shape[i], 0)) for i in range(dim)] + # print(f"box is {bbox_lbs, bbox_ubs}, padding is {padding}") + data_all[j] = np.pad(data, ((0, 0), *padding), 'constant', constant_values=0) + seg_all[j] = np.pad(seg, ((0, 0), *padding), 'constant', constant_values=0) + + if data_global is not None : + data_all_global[j] = data_global + + if seg_global is not None : + seg_all_global[j] = seg_global + + + e = time.time() + if self.print_time: + print(f"box is {bbox_lbs, bbox_ubs}, padding is {padding}") + print(f"setting data value time is {e - s}") + + + if data_global is None: + return {'data': data_all, + 'seg': seg_all, 'properties': case_properties, + 'keys': selected_keys} + + return {'data': data_all, "data_global": data_all_global, + "seg_global": seg_all_global, + 'seg': seg_all, 'properties': case_properties, + 'keys': selected_keys} + + def __next__(self): + + return self.generate_train_batch() + + def set_thread_id(self, thread_id): + self.thread_id = thread_id + + def _oversample_last_XX_percent(self, sample_idx: int) -> bool: + """ + determines whether sample sample_idx in a minibatch needs to be guaranteed foreground + """ + return not sample_idx < round(self.batch_size * (1 - self.oversample_foreground_percent)) + + def _probabilistic_oversampling(self, sample_idx: int) -> bool: + # print('YEAH BOIIIIII') + return np.random.uniform() < self.oversample_foreground_percent + + def get_bbox(self, data_shape: np.ndarray, force_fg: bool, class_locations: Union[dict, None], + overwrite_class: Union[int, Tuple[int, ...]] = None, verbose: bool = False): + # in dataloader 2d we need to select the slice prior to this and also modify the class_locations to only have + # locations for the given slice + need_to_pad = self.need_to_pad.copy() + dim = len(data_shape) + + for d in range(dim): + # if case_all_data.shape + need_to_pad is still < patch size we need to pad more! We pad on both sides + # always + if need_to_pad[d] + data_shape[d] < self.patch_size[d]: + need_to_pad[d] = self.patch_size[d] - data_shape[d] + + # we can now choose the bbox from -need_to_pad // 2 to shape - patch_size + need_to_pad // 2. Here we + # define what the upper and lower bound can be to then sample form them with np.random.randint + lbs = [- need_to_pad[i] // 2 for i in range(dim)] + ubs = [data_shape[i] + need_to_pad[i] // 2 + need_to_pad[i] % 2 - self.patch_size[i] for i in range(dim)] + + # if not force_fg then we can just sample the bbox randomly from lb and ub. Else we need to make sure we get + # at least one of the foreground classes in the patch + if not force_fg: + bbox_lbs = [np.random.randint(lbs[i], ubs[i] + 1) for i in range(dim)] + # print('I want a random location') + else: + assert class_locations is not None, 'if force_fg is set class_locations cannot be None' + if overwrite_class is not None: + assert overwrite_class in class_locations.keys(), 'desired class ("overwrite_class") does not ' \ + 'have class_locations (missing key)' + # this saves us a np.unique. Preprocessing already did that for all cases. Neat. + # class_locations keys can also be tuple + eligible_classes_or_regions = [i for i in class_locations.keys() if len(class_locations[i]) > 0] + + # if we have annotated_classes_key locations and other classes are present, remove the annotated_classes_key from the list + # strange formulation needed to circumvent + # ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all() + # tmp = [i == self.annotated_classes_key if isinstance(i, tuple) else False for i in eligible_classes_or_regions] + # if any(tmp): + # if len(eligible_classes_or_regions) > 1: + # eligible_classes_or_regions.pop(np.where(tmp)[0][0]) + + if len(eligible_classes_or_regions) == 0: + # this only happens if some image does not contain foreground voxels at all + selected_class = None + if verbose: + print('case does not contain any foreground classes') + else: + # I hate myself. Future me aint gonna be happy to read this + # 2022_11_25: had to read it today. Wasn't too bad + selected_class = eligible_classes_or_regions[np.random.choice(len(eligible_classes_or_regions))] if \ + (overwrite_class is None or (overwrite_class not in eligible_classes_or_regions)) else overwrite_class + # print(f'I want to have foreground, selected class: {selected_class}') + + voxels_of_that_class = class_locations[selected_class] if selected_class is not None else None + + if voxels_of_that_class is not None and len(voxels_of_that_class) > 0: + selected_voxel = voxels_of_that_class[np.random.choice(len(voxels_of_that_class))] + # selected voxel is center voxel. Subtract half the patch size to get lower bbox voxel. + # Make sure it is within the bounds of lb and ub + # i + 1 because we have first dimension 0! + bbox_lbs = [max(lbs[i], selected_voxel[i + 1] - self.patch_size[i] // 2) for i in range(dim)] + else: + # If the image does not contain any foreground classes, we fall back to random cropping + bbox_lbs = [np.random.randint(lbs[i], ubs[i] + 1) for i in range(dim)] + + bbox_ubs = [bbox_lbs[i] + self.patch_size[i] for i in range(dim)] + + return bbox_lbs, bbox_ubs \ No newline at end of file diff --git a/source_code/SegMamba/light_training/dataloading/dataset.py b/source_code/SegMamba/light_training/dataloading/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..89a8a257a6f75d3daa4410e95e8a68d9d377a1ca --- /dev/null +++ b/source_code/SegMamba/light_training/dataloading/dataset.py @@ -0,0 +1,318 @@ + +# Copyright 2020 - 2022 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from sklearn.model_selection import KFold ## K折交叉验证 +import pickle +import os +import json +import math +import numpy as np +import torch +import SimpleITK as sitk +from tqdm import tqdm +from torch.utils.data import Dataset +import glob +from light_training.dataloading.utils import unpack_dataset +import random + +class MedicalDataset(Dataset): + def __init__(self, datalist, test=False) -> None: + super().__init__() + + self.datalist = datalist + self.test = test + + self.data_cached = [] + for p in tqdm(self.datalist, total=len(self.datalist)): + info = self.load_pkl(p) + + self.data_cached.append(info) + + ## unpacking + print(f"unpacking data ....") + # for + folder = [] + for p in self.datalist: + f = os.path.dirname(p) + if f not in folder: + folder.append(f) + for f in folder: + unpack_dataset(f, + unpack_segmentation=True, + overwrite_existing=False, + num_processes=8) + + + print(f"data length is {len(self.datalist)}") + + def load_pkl(self, data_path): + pass + properties_path = f"{data_path[:-4]}.pkl" + df = open(properties_path, "rb") + info = pickle.load(df) + + return info + + def post(self, batch_data): + return batch_data + + def read_data(self, data_path): + + image_path = data_path.replace(".npz", ".npy") + seg_path = data_path.replace(".npz", "_seg.npy") + image_data = np.load(image_path, "r+") + + seg_data = None + if not self.test: + seg_data = np.load(seg_path, "r+") + return image_data, seg_data + + def __getitem__(self, i): + + image, seg = self.read_data(self.datalist[i]) + + properties = self.data_cached[i] + + if seg is None: + return { + "data": image, + "properties": properties + } + else : + return { + "data": image, + "seg": seg, + "properties": properties + } + + def __len__(self): + return len(self.datalist) + +def get_train_test_loader_from_test_list(data_dir, test_list): + all_paths = glob.glob(f"{data_dir}/*.npz") + + test_datalist = [] + train_datalist = [] + + test_list_1 = [] + for t in test_list: + test_list_1.append(t.replace(".nii.gz", "")) + + test_list = test_list_1 + for p in all_paths: + p2 = p.split("/")[-1].split(".")[0] + if p2 in test_list: + test_datalist.append(p) + else : + train_datalist.append(p) + + print(f"training data is {len(train_datalist)}") + print(f"test data is {len(test_datalist)}", test_datalist) + + train_ds = MedicalDataset(train_datalist) + test_ds = MedicalDataset(test_datalist) + + loader = [train_ds, test_ds] + + return loader + +def get_kfold_data(data_paths, n_splits, shuffle=False): + X = np.arange(len(data_paths)) + kfold = KFold(n_splits=n_splits, shuffle=shuffle) ## kfold为KFolf类的一个对象 + return_res = [] + for a, b in kfold.split(X): + fold_train = [] + fold_val = [] + for i in a: + fold_train.append(data_paths[i]) + for j in b: + fold_val.append(data_paths[j]) + return_res.append({"train_data": fold_train, "val_data": fold_val}) + + return return_res + +def get_kfold_loader(data_dir, fold=0, test_dir=None): + + all_paths = glob.glob(f"{data_dir}/*.npz") + fold_data = get_kfold_data(all_paths, 5)[fold] + + train_datalist = fold_data["train_data"] + val_datalist = fold_data["val_data"] + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + train_ds = MedicalDataset(train_datalist) + + val_ds = MedicalDataset(val_datalist) + + if test_dir is not None: + test_paths = glob.glob(f"{test_dir}/*.npz") + test_ds = MedicalDataset(test_paths, test=True) + else: + test_ds = None + + loader = [train_ds, val_ds, test_ds] + + return loader + +def get_all_training_loader(data_dir, fold=0, test_dir=None): + ## train all labeled data + ## fold denote the validation data in training data + all_paths = glob.glob(f"{data_dir}/*.npz") + fold_data = get_kfold_data(all_paths, 5)[fold] + + train_datalist = all_paths + val_datalist = fold_data["val_data"] + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + train_ds = MedicalDataset(train_datalist) + + val_ds = MedicalDataset(val_datalist) + + if test_dir is not None: + test_paths = glob.glob(f"{test_dir}/*.npz") + test_ds = MedicalDataset(test_paths, test=True) + else: + test_ds = None + + loader = [train_ds, val_ds, test_ds] + + return loader + +def get_train_val_test_loader_seperate(train_dir, val_dir, test_dir=None): + train_datalist = glob.glob(f"{train_dir}/*.npz") + val_datalist = glob.glob(f"{val_dir}/*.npz") + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + + if test_dir is not None: + test_datalist = glob.glob(f"{test_dir}/*.npz") + print(f"test data is {len(test_datalist)}") + test_ds = MedicalDataset(test_datalist, test=True) + else : + test_ds = None + + train_ds = MedicalDataset(train_datalist) + val_ds = MedicalDataset(val_datalist) + + loader = [train_ds, val_ds, test_ds] + + return loader + +def get_train_val_test_loader_from_split_json(data_dir, split_json_file): + import json + + with open(split_json_file, "r") as f: + + datalist = json.loads(f.read()) + + train_datalist = datalist["train"] + val_datalist = datalist["validation"] + test_datalist = datalist["test"] + + def add_pre(datalist): + for i in range(len(datalist)): + datalist[i] = os.path.join(data_dir, datalist[i]) + + add_pre(train_datalist) + add_pre(val_datalist) + add_pre(test_datalist) + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + print(f"test data is {len(test_datalist)}", sorted(test_datalist)) + + train_ds = MedicalDataset(train_datalist) + val_ds = MedicalDataset(val_datalist) + test_ds = MedicalDataset(test_datalist) + + loader = [train_ds, val_ds, test_ds] + + return loader + + +def get_train_val_test_loader_from_train(data_dir, train_rate=0.7, val_rate=0.1, test_rate=0.2, seed=42): + ## train all labeled data + ## fold denote the validation data in training data + all_paths = glob.glob(f"{data_dir}/*.npz") + # fold_data = get_kfold_data(all_paths, 5)[fold] + + train_number = int(len(all_paths) * train_rate) + val_number = int(len(all_paths) * val_rate) + test_number = int(len(all_paths) * test_rate) + random.seed(seed) + # random_state = random.random + random.shuffle(all_paths) + + train_datalist = all_paths[:train_number] + val_datalist = all_paths[train_number: train_number + val_number] + test_datalist = all_paths[-test_number:] + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + print(f"test data is {len(test_datalist)}", sorted(test_datalist)) + + train_ds = MedicalDataset(train_datalist) + val_ds = MedicalDataset(val_datalist) + test_ds = MedicalDataset(test_datalist) + + loader = [train_ds, val_ds, test_ds] + + return loader + +def get_train_loader_from_train(data_dir): + ## train all labeled data + ## fold denote the validation data in training data + all_paths = glob.glob(f"{data_dir}/*.npz") + # fold_data = get_kfold_data(all_paths, 5)[fold] + + train_ds = MedicalDataset(all_paths) + + return train_ds + +def get_test_loader_from_test(data_dir): + all_paths = glob.glob(f"{data_dir}/*.npz") + + test_ds = MedicalDataset(all_paths) + + return test_ds + +def get_multi_dir_training_loader(data_dir, fold=0, test_dir=None): + ## train all labeled data + ## fold denote the validation data in training data + all_paths = [] + for p in data_dir: + paths = glob.glob(f"{p}/*.npz") + for pp in paths: + all_paths.append(pp) + + # print(all_paths) + fold_data = get_kfold_data(all_paths, 5)[fold] + + train_datalist = all_paths + val_datalist = fold_data["val_data"] + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + train_ds = MedicalDataset(train_datalist) + + val_ds = MedicalDataset(val_datalist) + + if test_dir is not None: + test_paths = glob.glob(f"{test_dir}/*.npz") + test_ds = MedicalDataset(test_paths, test=True) + else: + test_ds = None + + loader = [train_ds, val_ds, test_ds] + + return loader \ No newline at end of file diff --git a/source_code/SegMamba/light_training/dataloading/dataset_sdm_edge.py b/source_code/SegMamba/light_training/dataloading/dataset_sdm_edge.py new file mode 100644 index 0000000000000000000000000000000000000000..496d906b6b50b5fc2dde0b265ce4684b9ebc2394 --- /dev/null +++ b/source_code/SegMamba/light_training/dataloading/dataset_sdm_edge.py @@ -0,0 +1,331 @@ + +# Copyright 2020 - 2022 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from sklearn.model_selection import KFold ## K折交叉验证 +import pickle +import os +import json +import math +import numpy as np +import torch +from monai import transforms +import SimpleITK as sitk +from tqdm import tqdm +from torch.utils.data import Dataset +import glob +from light_training.dataloading.utils import unpack_dataset +import random +import torch +import numpy as np +from scipy.ndimage import distance_transform_edt as distance +from skimage import segmentation as skimage_seg +from skimage.morphology import dilation, disk +import scipy.ndimage as ndimage + +def get_edge_points(img): + """ + get edge points of a binary segmentation result + """ + dim = len(img.shape) + if (dim == 2): + strt = ndimage.generate_binary_structure(2, 1) + else: + strt = ndimage.generate_binary_structure(3, 1) + ero = ndimage.binary_erosion(img, strt) + edge = np.asarray(img, np.uint8) - np.asarray(ero, np.uint8) + return edge + +def edge_3d(image_3d): + # image_3d = torch.from_numpy(image_3d) + return_edge = np.zeros_like(image_3d) + + for i in range(image_3d.shape[0]): + for j in range(image_3d.shape[1]): + return_edge[i, j] = get_edge_points(image_3d[i, j]) + + return return_edge + +def compute_sdf(img_gt, out_shape): + """ + compute the signed distance map of binary mask + input: segmentation, shape = (batch_size,c, x, y, z) + output: the Signed Distance Map (SDM) + sdf(x) = 0; x in segmentation boundary + -inf|x-y|; x in segmentation + +inf|x-y|; x out of segmentation + normalize sdf to [-1,1] + + """ + + img_gt = img_gt.astype(np.uint8) + normalized_sdf = np.zeros(out_shape) + + for b in range(out_shape[0]): # batch size + for c in range(out_shape[1]): + posmask = img_gt[b, c].astype(np.bool_) + if posmask.any(): + negmask = ~posmask + posdis = distance(posmask) + negdis = distance(negmask) + boundary = skimage_seg.find_boundaries(posmask, mode='inner').astype(np.uint8) + sdf = (negdis-np.min(negdis))/(np.max(negdis)-np.min(negdis)) - (posdis-np.min(posdis))/(np.max(posdis)-np.min(posdis)) + sdf[boundary==1] = 0 + normalized_sdf[b][c] = sdf + assert np.min(sdf) == -1.0, print(np.min(posdis), np.max(posdis), np.min(negdis), np.max(negdis)) + assert np.max(sdf) == 1.0, print(np.min(posdis), np.min(negdis), np.max(posdis), np.max(negdis)) + + return normalized_sdf + +def convert_labels(labels): + ## TC, WT and ET + labels = labels[None, None] + result = [(labels == 1) | (labels == 3), (labels == 1) | (labels == 3) | (labels == 2), labels == 3] + + return torch.cat(result, dim=1).float() + +class MedicalDataset(Dataset): + def __init__(self, datalist, test=False) -> None: + super().__init__() + + self.datalist = datalist + self.test = test + + self.data_cached = [] + for p in tqdm(self.datalist, total=len(self.datalist)): + info = self.load_pkl(p) + + self.data_cached.append(info) + + ## unpacking + print(f"unpacking data ....") + # for + folder = [] + for p in self.datalist: + f = os.path.dirname(p) + if f not in folder: + folder.append(f) + for f in folder: + unpack_dataset(f, + unpack_segmentation=True, + overwrite_existing=False, + num_processes=8) + + + print(f"data length is {len(self.datalist)}") + + def load_pkl(self, data_path): + pass + properties_path = f"{data_path[:-4]}.pkl" + df = open(properties_path, "rb") + info = pickle.load(df) + + return info + + def read_data(self, data_path): + + image_path = data_path.replace(".npz", ".npy") + seg_path = data_path.replace(".npz", "_seg.npy") + image_data = np.load(image_path, "r") + + seg_data = None + if not self.test: + seg_data = np.load(seg_path, "r") + return image_data, seg_data + + # def post(self, batch_data): + # seg = convert_labels(batch_data["seg"]).numpy() + # seg_shape = seg.shape + # seg_edge = edge_3d(seg) + # seg_sdm = 1 - compute_sdf(seg, out_shape=seg_shape) + # seg_sdm = seg_sdm + seg_edge + + # seg_edge = torch.from_numpy(seg_edge) + # seg_sdm = torch.from_numpy(seg_sdm) + + # batch_data["seg_edge"] = seg_edge + # batch_data["seg_sdm"] = seg_sdm + + # print(f"post!!!!!!!!!") + # return batch_data + + def __getitem__(self, i): + + image, seg = self.read_data(self.datalist[i]) + + properties = self.data_cached[i] + case_name = properties["name"] + + if seg is not None: + sdm = np.load(os.path.join("./data/fullres/train_sdm/", f"{case_name}_seg_sdm.npy"), "r") + + # print(seg.shape, sdm.shape) + sdm = sdm[0] + seg = np.concatenate([seg, sdm], axis=0) + + # print(f"sdm sum is {sdm.sum()}") + if seg is None: + return { + "data": image, + "properties": properties + } + else : + return { + "data": image, + "seg": seg, + "properties": properties + } + + def __len__(self): + return len(self.datalist) + +def get_kfold_data(data_paths, n_splits, shuffle=False): + X = np.arange(len(data_paths)) + kfold = KFold(n_splits=n_splits, shuffle=shuffle) ## kfold为KFolf类的一个对象 + return_res = [] + for a, b in kfold.split(X): + fold_train = [] + fold_val = [] + for i in a: + fold_train.append(data_paths[i]) + for j in b: + fold_val.append(data_paths[j]) + return_res.append({"train_data": fold_train, "val_data": fold_val}) + + return return_res + +def get_kfold_loader(data_dir, fold=0, test_dir=None): + + all_paths = glob.glob(f"{data_dir}/*.npz") + fold_data = get_kfold_data(all_paths, 5)[fold] + + train_datalist = fold_data["train_data"] + val_datalist = fold_data["val_data"] + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + train_ds = MedicalDataset(train_datalist) + + val_ds = MedicalDataset(val_datalist) + + if test_dir is not None: + test_paths = glob.glob(f"{test_dir}/*.npz") + test_ds = MedicalDataset(test_paths, test=True) + else: + test_ds = None + + loader = [train_ds, val_ds, test_ds] + + return loader + +def get_all_training_loader(data_dir, fold=0, test_dir=None): + ## train all labeled data + ## fold denote the validation data in training data + all_paths = glob.glob(f"{data_dir}/*.npz") + fold_data = get_kfold_data(all_paths, 5)[fold] + + train_datalist = all_paths + val_datalist = fold_data["val_data"] + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + train_ds = MedicalDataset(train_datalist) + + val_ds = MedicalDataset(val_datalist) + + if test_dir is not None: + test_paths = glob.glob(f"{test_dir}/*.npz") + test_ds = MedicalDataset(test_paths, test=True) + else: + test_ds = None + + loader = [train_ds, val_ds, test_ds] + + return loader + +def get_train_val_test_loader_seperate(train_dir, val_dir, test_dir=None): + train_datalist = glob.glob(f"{train_dir}/*.npz") + val_datalist = glob.glob(f"{val_dir}/*.npz") + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + + if test_dir is not None: + test_datalist = glob.glob(f"{test_dir}/*.npz") + print(f"test data is {len(test_datalist)}") + test_ds = MedicalDataset(test_datalist, test=True) + else : + test_ds = None + + train_ds = MedicalDataset(train_datalist) + val_ds = MedicalDataset(val_datalist) + + loader = [train_ds, val_ds, test_ds] + + return loader + +def get_train_val_test_loader_from_train(data_dir, train_rate=0.7, val_rate=0.1, test_rate=0.2): + ## train all labeled data + ## fold denote the validation data in training data + all_paths = glob.glob(f"{data_dir}/*.npz") + # fold_data = get_kfold_data(all_paths, 5)[fold] + + train_number = int(len(all_paths) * train_rate) + val_number = int(len(all_paths) * val_rate) + test_number = int(len(all_paths) * test_rate) + + random.shuffle(all_paths) + + train_datalist = all_paths[:train_number] + val_datalist = all_paths[train_number: train_number + val_number] + test_datalist = all_paths[-test_number:] + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + print(f"test data is {len(test_datalist)}") + + train_ds = MedicalDataset(train_datalist) + val_ds = MedicalDataset(val_datalist) + test_ds = MedicalDataset(test_datalist) + + loader = [train_ds, val_ds, test_ds] + + return loader + +def get_multi_dir_training_loader(data_dir, fold=0, test_dir=None): + ## train all labeled data + ## fold denote the validation data in training data + all_paths = [] + for p in data_dir: + paths = glob.glob(f"{p}/*.npz") + for pp in paths: + all_paths.append(pp) + + # print(all_paths) + fold_data = get_kfold_data(all_paths, 5)[fold] + + train_datalist = all_paths + val_datalist = fold_data["val_data"] + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + train_ds = MedicalDataset(train_datalist) + + val_ds = MedicalDataset(val_datalist) + + if test_dir is not None: + test_paths = glob.glob(f"{test_dir}/*.npz") + test_ds = MedicalDataset(test_paths, test=True) + else: + test_ds = None + + loader = [train_ds, val_ds, test_ds] + + return loader \ No newline at end of file diff --git a/source_code/SegMamba/light_training/dataloading/get_train_val_test_datalist.py b/source_code/SegMamba/light_training/dataloading/get_train_val_test_datalist.py new file mode 100644 index 0000000000000000000000000000000000000000..22edcd46c83c6347fc8dbcc59c4cd5bb0789515a --- /dev/null +++ b/source_code/SegMamba/light_training/dataloading/get_train_val_test_datalist.py @@ -0,0 +1,36 @@ + +import glob +import random +import json + +def get_train_val_test_list_from_fulldata(data_dir, train_rate=0.7, val_rate=0.1, test_rate=0.2, seed=42): + all_paths = glob.glob(f"{data_dir}/*.npz") + + ## eliminate the pre + all_paths_save = [] + for p in all_paths: + all_paths_save.append(p.split("/")[-1]) + all_paths = all_paths_save + train_number = int(len(all_paths) * train_rate) + val_number = int(len(all_paths) * val_rate) + test_number = int(len(all_paths) * test_rate) + random.seed(seed) + random.shuffle(all_paths) + train_datalist = all_paths[:train_number] + val_datalist = all_paths[train_number: train_number + val_number] + test_datalist = all_paths[-test_number:] + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + print(f"test data is {len(test_datalist)}", sorted(test_datalist)) + + datalist = { + "train": train_datalist, + "validation": val_datalist, + "test": test_datalist + } + + datalist = json.dumps(datalist) + + with open("./data_split.json", "w") as f: + f.write(datalist) diff --git a/source_code/SegMamba/light_training/dataloading/utils.py b/source_code/SegMamba/light_training/dataloading/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3cc60e7b992fb915c01bb7a7a5f6857bb1fb4dc8 --- /dev/null +++ b/source_code/SegMamba/light_training/dataloading/utils.py @@ -0,0 +1,25 @@ +import numpy as np +import os +from batchgenerators.utilities.file_and_folder_operations import isfile, subfiles +import multiprocessing + +def _convert_to_npy(npz_file: str, unpack_segmentation: bool = True, overwrite_existing: bool = False) -> None: + # try: + a = np.load(npz_file) # inexpensive, no compression is done here. This just reads metadata + if overwrite_existing or not isfile(npz_file[:-3] + "npy"): + np.save(npz_file[:-3] + "npy", a['data']) + + if unpack_segmentation and (overwrite_existing or not isfile(npz_file[:-4] + "_seg.npy")): + np.save(npz_file[:-4] + "_seg.npy", a['seg']) + +def unpack_dataset(folder: str, unpack_segmentation: bool = True, overwrite_existing: bool = False, + num_processes: int = 8): + """ + all npz files in this folder belong to the dataset, unpack them all + """ + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + npz_files = subfiles(folder, True, None, ".npz", True) + p.starmap(_convert_to_npy, zip(npz_files, + [unpack_segmentation] * len(npz_files), + [overwrite_existing] * len(npz_files)) + ) diff --git a/source_code/SegMamba/light_training/dataloading_global/__init__.py b/source_code/SegMamba/light_training/dataloading_global/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/source_code/SegMamba/light_training/dataloading_global/dataset.py b/source_code/SegMamba/light_training/dataloading_global/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..2fbf3646921db9d0a390fd250099ab76e2296d1b --- /dev/null +++ b/source_code/SegMamba/light_training/dataloading_global/dataset.py @@ -0,0 +1,329 @@ + +# Copyright 2020 - 2022 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from sklearn.model_selection import KFold ## K折交叉验证 +import pickle +import os +import json +import math +import numpy as np +import torch +from monai import transforms +import SimpleITK as sitk +from tqdm import tqdm +from torch.utils.data import Dataset +import glob +from light_training.dataloading_global.utils import unpack_dataset +import random + +class MedicalDataset(Dataset): + def __init__(self, datalist, test=False) -> None: + super().__init__() + + self.datalist = datalist + self.test = test + + self.data_cached = [] + for p in tqdm(self.datalist, total=len(self.datalist)): + info = self.load_pkl(p) + + self.data_cached.append(info) + + ## unpacking + print(f"unpacking data ....") + # for + folder = [] + for p in self.datalist: + f = os.path.dirname(p) + if f not in folder: + folder.append(f) + for f in folder: + unpack_dataset(f, + unpack_segmentation=True, + overwrite_existing=False, + num_processes=8) + + + print(f"data length is {len(self.datalist)}") + + def load_pkl(self, data_path): + pass + properties_path = f"{data_path[:-4]}.pkl" + df = open(properties_path, "rb") + info = pickle.load(df) + + return info + + def post(self, batch_data): + return batch_data + + def read_data(self, data_path): + + image_path = data_path.replace(".npz", ".npy") + seg_path = data_path.replace(".npz", "_seg.npy") + image_global_path = data_path.replace(".npz", "_global.npy") + seg_global_path = data_path.replace(".npz", "_global_seg.npy") + + image_data = np.load(image_path, "r+") + image_data_global = np.load(image_global_path, "r+") + + seg_data = None + if not self.test: + seg_data = np.load(seg_path, "r+") + seg_global_data = np.load(seg_global_path, "r+") + + return image_data, image_data_global, seg_data, seg_global_data + + + def __getitem__(self, i): + + image, image_data_global, seg, seg_global = self.read_data(self.datalist[i]) + + # print(image_data_global.shape) + properties = self.data_cached[i] + + if seg is None: + return { + "data": image, + "data_global": image_data_global, + "properties": properties + } + else : + return { + "data": image, + "data_global": image_data_global, + "seg": seg, + "seg_global": seg_global, + "properties": properties + } + + def __len__(self): + return len(self.datalist) + +def get_train_test_loader_from_test_list(data_dir, test_list): + all_paths = glob.glob(f"{data_dir}/*.npz") + + test_datalist = [] + train_datalist = [] + + test_list_1 = [] + for t in test_list: + test_list_1.append(t.replace(".nii.gz", "")) + + test_list = test_list_1 + for p in all_paths: + p2 = p.split("/")[-1].split(".")[0] + if p2 in test_list: + test_datalist.append(p) + else : + train_datalist.append(p) + + print(f"training data is {len(train_datalist)}") + print(f"test data is {len(test_datalist)}", test_datalist) + + train_ds = MedicalDataset(train_datalist) + test_ds = MedicalDataset(test_datalist) + + loader = [train_ds, test_ds] + + return loader + +def get_kfold_data(data_paths, n_splits, shuffle=False): + X = np.arange(len(data_paths)) + kfold = KFold(n_splits=n_splits, shuffle=shuffle) ## kfold为KFolf类的一个对象 + return_res = [] + for a, b in kfold.split(X): + fold_train = [] + fold_val = [] + for i in a: + fold_train.append(data_paths[i]) + for j in b: + fold_val.append(data_paths[j]) + return_res.append({"train_data": fold_train, "val_data": fold_val}) + + return return_res + +def get_kfold_loader(data_dir, fold=0, test_dir=None): + + all_paths = glob.glob(f"{data_dir}/*.npz") + fold_data = get_kfold_data(all_paths, 5)[fold] + + train_datalist = fold_data["train_data"] + val_datalist = fold_data["val_data"] + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + train_ds = MedicalDataset(train_datalist) + + val_ds = MedicalDataset(val_datalist) + + if test_dir is not None: + test_paths = glob.glob(f"{test_dir}/*.npz") + test_ds = MedicalDataset(test_paths, test=True) + else: + test_ds = None + + loader = [train_ds, val_ds, test_ds] + + return loader + +def get_all_training_loader(data_dir, fold=0, test_dir=None): + ## train all labeled data + ## fold denote the validation data in training data + all_paths = glob.glob(f"{data_dir}/*.npz") + fold_data = get_kfold_data(all_paths, 5)[fold] + + train_datalist = all_paths + val_datalist = fold_data["val_data"] + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + train_ds = MedicalDataset(train_datalist) + + val_ds = MedicalDataset(val_datalist) + + if test_dir is not None: + test_paths = glob.glob(f"{test_dir}/*.npz") + test_ds = MedicalDataset(test_paths, test=True) + else: + test_ds = None + + loader = [train_ds, val_ds, test_ds] + + return loader + +def get_train_val_test_loader_seperate(train_dir, val_dir, test_dir=None): + train_datalist = glob.glob(f"{train_dir}/*.npz") + val_datalist = glob.glob(f"{val_dir}/*.npz") + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + + if test_dir is not None: + test_datalist = glob.glob(f"{test_dir}/*.npz") + print(f"test data is {len(test_datalist)}") + test_ds = MedicalDataset(test_datalist, test=True) + else : + test_ds = None + + train_ds = MedicalDataset(train_datalist) + val_ds = MedicalDataset(val_datalist) + + loader = [train_ds, val_ds, test_ds] + + return loader + +def get_train_val_test_loader_from_train(data_dir, train_rate=0.7, val_rate=0.1, test_rate=0.2, seed=42): + ## train all labeled data + ## fold denote the validation data in training data + all_paths = glob.glob(f"{data_dir}/*.npz") + # fold_data = get_kfold_data(all_paths, 5)[fold] + + train_number = int(len(all_paths) * train_rate) + val_number = int(len(all_paths) * val_rate) + test_number = int(len(all_paths) * test_rate) + random.seed(seed) + # random_state = random.random + random.shuffle(all_paths) + train_datalist = all_paths[:train_number] + val_datalist = all_paths[train_number: train_number + val_number] + test_datalist = all_paths[-test_number:] + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + print(f"test data is {len(test_datalist)}", sorted(test_datalist)) + + train_ds = MedicalDataset(train_datalist) + val_ds = MedicalDataset(val_datalist) + test_ds = MedicalDataset(test_datalist) + + loader = [train_ds, val_ds, test_ds] + + return loader + +def get_train_val_test_loader_from_split_json(data_dir, split_json_file): + import json + + with open(split_json_file, "r") as f: + + datalist = json.loads(f.read()) + + train_datalist = datalist["train"] + val_datalist = datalist["validation"] + test_datalist = datalist["test"] + + def add_pre(datalist): + for i in range(len(datalist)): + datalist[i] = os.path.join(data_dir, datalist[i]) + + add_pre(train_datalist) + add_pre(val_datalist) + add_pre(test_datalist) + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + print(f"test data is {len(test_datalist)}", sorted(test_datalist)) + + train_ds = MedicalDataset(train_datalist) + val_ds = MedicalDataset(val_datalist) + test_ds = MedicalDataset(test_datalist) + + loader = [train_ds, val_ds, test_ds] + + return loader + +def get_train_loader_from_train(data_dir): + ## train all labeled data + ## fold denote the validation data in training data + all_paths = glob.glob(f"{data_dir}/*.npz") + # fold_data = get_kfold_data(all_paths, 5)[fold] + + train_ds = MedicalDataset(all_paths) + + return train_ds + +def get_test_loader_from_test(data_dir): + all_paths = glob.glob(f"{data_dir}/*.npz") + + test_ds = MedicalDataset(all_paths) + + return test_ds + +def get_multi_dir_training_loader(data_dir, fold=0, test_dir=None): + ## train all labeled data + ## fold denote the validation data in training data + all_paths = [] + for p in data_dir: + paths = glob.glob(f"{p}/*.npz") + for pp in paths: + all_paths.append(pp) + + # print(all_paths) + fold_data = get_kfold_data(all_paths, 5)[fold] + + train_datalist = all_paths + val_datalist = fold_data["val_data"] + + print(f"training data is {len(train_datalist)}") + print(f"validation data is {len(val_datalist)}") + train_ds = MedicalDataset(train_datalist) + + val_ds = MedicalDataset(val_datalist) + + if test_dir is not None: + test_paths = glob.glob(f"{test_dir}/*.npz") + test_ds = MedicalDataset(test_paths, test=True) + else: + test_ds = None + + loader = [train_ds, val_ds, test_ds] + + return loader \ No newline at end of file diff --git a/source_code/SegMamba/light_training/dataloading_global/utils.py b/source_code/SegMamba/light_training/dataloading_global/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..cf25e09c6e722316c21b4fefddaf146de1cd4358 --- /dev/null +++ b/source_code/SegMamba/light_training/dataloading_global/utils.py @@ -0,0 +1,27 @@ +import numpy as np +import os +from batchgenerators.utilities.file_and_folder_operations import isfile, subfiles +import multiprocessing + +def _convert_to_npy(npz_file: str, unpack_segmentation: bool = True, overwrite_existing: bool = False) -> None: + # try: + a = np.load(npz_file) # inexpensive, no compression is done here. This just reads metadata + if overwrite_existing or not isfile(npz_file[:-3] + "npy"): + np.save(npz_file[:-3] + "npy", a['data']) + np.save(npz_file[:-4] + "_global.npy", a['data_global']) + np.save(npz_file[:-4] + "_global_seg.npy", a['seg_global']) + + if unpack_segmentation and (overwrite_existing or not isfile(npz_file[:-4] + "_seg.npy")): + np.save(npz_file[:-4] + "_seg.npy", a['seg']) + +def unpack_dataset(folder: str, unpack_segmentation: bool = True, overwrite_existing: bool = False, + num_processes: int = 8): + """ + all npz files in this folder belong to the dataset, unpack them all + """ + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + npz_files = subfiles(folder, True, None, ".npz", True) + p.starmap(_convert_to_npy, zip(npz_files, + [unpack_segmentation] * len(npz_files), + [overwrite_existing] * len(npz_files)) + ) diff --git a/source_code/SegMamba/light_training/evaluation/metric.py b/source_code/SegMamba/light_training/evaluation/metric.py new file mode 100644 index 0000000000000000000000000000000000000000..eed0b2c9debaad190b3807ff14b113920789059c --- /dev/null +++ b/source_code/SegMamba/light_training/evaluation/metric.py @@ -0,0 +1,406 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +from medpy import metric + + +def assert_shape(test, reference): + + assert test.shape == reference.shape, "Shape mismatch: {} and {}".format( + test.shape, reference.shape) + + +class ConfusionMatrix: + + def __init__(self, test=None, reference=None): + + self.tp = None + self.fp = None + self.tn = None + self.fn = None + self.size = None + self.reference_empty = None + self.reference_full = None + self.test_empty = None + self.test_full = None + self.set_reference(reference) + self.set_test(test) + + def set_test(self, test): + + self.test = test + self.reset() + + def set_reference(self, reference): + + self.reference = reference + self.reset() + + def reset(self): + + self.tp = None + self.fp = None + self.tn = None + self.fn = None + self.size = None + self.test_empty = None + self.test_full = None + self.reference_empty = None + self.reference_full = None + + def compute(self): + + if self.test is None or self.reference is None: + raise ValueError("'test' and 'reference' must both be set to compute confusion matrix.") + + assert_shape(self.test, self.reference) + + self.tp = int(((self.test != 0) * (self.reference != 0)).sum()) + self.fp = int(((self.test != 0) * (self.reference == 0)).sum()) + self.tn = int(((self.test == 0) * (self.reference == 0)).sum()) + self.fn = int(((self.test == 0) * (self.reference != 0)).sum()) + self.size = int(np.prod(self.reference.shape, dtype=np.int64)) + self.test_empty = not np.any(self.test) + self.test_full = np.all(self.test) + self.reference_empty = not np.any(self.reference) + self.reference_full = np.all(self.reference) + + def get_matrix(self): + + for entry in (self.tp, self.fp, self.tn, self.fn): + if entry is None: + self.compute() + break + + return self.tp, self.fp, self.tn, self.fn + + def get_size(self): + + if self.size is None: + self.compute() + return self.size + + def get_existence(self): + + for case in (self.test_empty, self.test_full, self.reference_empty, self.reference_full): + if case is None: + self.compute() + break + + return self.test_empty, self.test_full, self.reference_empty, self.reference_full + + +def dice(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs): + """2TP / (2TP + FP + FN)""" + + if confusion_matrix is None: + confusion_matrix = ConfusionMatrix(test, reference) + + tp, fp, tn, fn = confusion_matrix.get_matrix() + test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence() + + if test_empty and reference_empty: + if nan_for_nonexisting: + return float("NaN") + else: + return 0. + + return float(2. * tp / (2 * tp + fp + fn)) + + +def jaccard(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs): + """TP / (TP + FP + FN)""" + + if confusion_matrix is None: + confusion_matrix = ConfusionMatrix(test, reference) + + tp, fp, tn, fn = confusion_matrix.get_matrix() + test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence() + + if test_empty and reference_empty: + if nan_for_nonexisting: + return float("NaN") + else: + return 0. + + return float(tp / (tp + fp + fn)) + + +def precision(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs): + """TP / (TP + FP)""" + + if confusion_matrix is None: + confusion_matrix = ConfusionMatrix(test, reference) + + tp, fp, tn, fn = confusion_matrix.get_matrix() + test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence() + + if test_empty: + if nan_for_nonexisting: + return float("NaN") + else: + return 0. + + return float(tp / (tp + fp)) + + +def sensitivity(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs): + """TP / (TP + FN)""" + + if confusion_matrix is None: + confusion_matrix = ConfusionMatrix(test, reference) + + tp, fp, tn, fn = confusion_matrix.get_matrix() + test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence() + + if reference_empty: + if nan_for_nonexisting: + return float("NaN") + else: + return 0. + + return float(tp / (tp + fn)) + + +def recall(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs): + """TP / (TP + FN)""" + + return sensitivity(test, reference, confusion_matrix, nan_for_nonexisting, **kwargs) + + +def specificity(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs): + """TN / (TN + FP)""" + + if confusion_matrix is None: + confusion_matrix = ConfusionMatrix(test, reference) + + tp, fp, tn, fn = confusion_matrix.get_matrix() + test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence() + + if reference_full: + if nan_for_nonexisting: + return float("NaN") + else: + return 0. + + return float(tn / (tn + fp)) + + +def accuracy(test=None, reference=None, confusion_matrix=None, **kwargs): + """(TP + TN) / (TP + FP + FN + TN)""" + + if confusion_matrix is None: + confusion_matrix = ConfusionMatrix(test, reference) + + tp, fp, tn, fn = confusion_matrix.get_matrix() + + return float((tp + tn) / (tp + fp + tn + fn)) + + +def fscore(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, beta=1., **kwargs): + """(1 + b^2) * TP / ((1 + b^2) * TP + b^2 * FN + FP)""" + + precision_ = precision(test, reference, confusion_matrix, nan_for_nonexisting) + recall_ = recall(test, reference, confusion_matrix, nan_for_nonexisting) + + return (1 + beta*beta) * precision_ * recall_ /\ + ((beta*beta * precision_) + recall_) + + +def false_positive_rate(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs): + """FP / (FP + TN)""" + + return 1 - specificity(test, reference, confusion_matrix, nan_for_nonexisting) + + +def false_omission_rate(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs): + """FN / (TN + FN)""" + + if confusion_matrix is None: + confusion_matrix = ConfusionMatrix(test, reference) + + tp, fp, tn, fn = confusion_matrix.get_matrix() + test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence() + + if test_full: + if nan_for_nonexisting: + return float("NaN") + else: + return 0. + + return float(fn / (fn + tn)) + + +def false_negative_rate(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs): + """FN / (TP + FN)""" + + return 1 - sensitivity(test, reference, confusion_matrix, nan_for_nonexisting) + + +def true_negative_rate(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs): + """TN / (TN + FP)""" + + return specificity(test, reference, confusion_matrix, nan_for_nonexisting) + + +def false_discovery_rate(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs): + """FP / (TP + FP)""" + + return 1 - precision(test, reference, confusion_matrix, nan_for_nonexisting) + + +def negative_predictive_value(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, **kwargs): + """TN / (TN + FN)""" + + return 1 - false_omission_rate(test, reference, confusion_matrix, nan_for_nonexisting) + + +def total_positives_test(test=None, reference=None, confusion_matrix=None, **kwargs): + """TP + FP""" + + if confusion_matrix is None: + confusion_matrix = ConfusionMatrix(test, reference) + + tp, fp, tn, fn = confusion_matrix.get_matrix() + + return tp + fp + + +def total_negatives_test(test=None, reference=None, confusion_matrix=None, **kwargs): + """TN + FN""" + + if confusion_matrix is None: + confusion_matrix = ConfusionMatrix(test, reference) + + tp, fp, tn, fn = confusion_matrix.get_matrix() + + return tn + fn + + +def total_positives_reference(test=None, reference=None, confusion_matrix=None, **kwargs): + """TP + FN""" + + if confusion_matrix is None: + confusion_matrix = ConfusionMatrix(test, reference) + + tp, fp, tn, fn = confusion_matrix.get_matrix() + + return tp + fn + + +def total_negatives_reference(test=None, reference=None, confusion_matrix=None, **kwargs): + """TN + FP""" + + if confusion_matrix is None: + confusion_matrix = ConfusionMatrix(test, reference) + + tp, fp, tn, fn = confusion_matrix.get_matrix() + + return tn + fp + + +def hausdorff_distance(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, voxel_spacing=None, connectivity=1, **kwargs): + + if confusion_matrix is None: + confusion_matrix = ConfusionMatrix(test, reference) + + test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence() + + if test_empty or test_full or reference_empty or reference_full: + if nan_for_nonexisting: + return float("NaN") + else: + return 0 + + test, reference = confusion_matrix.test, confusion_matrix.reference + + return metric.hd(test, reference, voxel_spacing, connectivity) + + +def hausdorff_distance_95(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, voxel_spacing=None, connectivity=1, **kwargs): + + if confusion_matrix is None: + confusion_matrix = ConfusionMatrix(test, reference) + + test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence() + + if test_empty or test_full or reference_empty or reference_full: + if nan_for_nonexisting: + return float("NaN") + else: + return 0 + + test, reference = confusion_matrix.test, confusion_matrix.reference + + return metric.hd95(test, reference, voxel_spacing, connectivity) + + +def avg_surface_distance(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, voxel_spacing=None, connectivity=1, **kwargs): + + if confusion_matrix is None: + confusion_matrix = ConfusionMatrix(test, reference) + + test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence() + + if test_empty or test_full or reference_empty or reference_full: + if nan_for_nonexisting: + return float("NaN") + else: + return 0 + + test, reference = confusion_matrix.test, confusion_matrix.reference + + return metric.asd(test, reference, voxel_spacing, connectivity) + + +def avg_surface_distance_symmetric(test=None, reference=None, confusion_matrix=None, nan_for_nonexisting=True, voxel_spacing=None, connectivity=1, **kwargs): + + if confusion_matrix is None: + confusion_matrix = ConfusionMatrix(test, reference) + + test_empty, test_full, reference_empty, reference_full = confusion_matrix.get_existence() + + if test_empty or test_full or reference_empty or reference_full: + if nan_for_nonexisting: + return float("NaN") + else: + return 0 + + test, reference = confusion_matrix.test, confusion_matrix.reference + + return metric.assd(test, reference, voxel_spacing, connectivity) + + +ALL_METRICS = { + "False Positive Rate": false_positive_rate, + "Dice": dice, + "Jaccard": jaccard, + "Hausdorff Distance": hausdorff_distance, + "Hausdorff Distance 95": hausdorff_distance_95, + "Precision": precision, + "Recall": recall, + "Avg. Symmetric Surface Distance": avg_surface_distance_symmetric, + "Avg. Surface Distance": avg_surface_distance, + "Accuracy": accuracy, + "False Omission Rate": false_omission_rate, + "Negative Predictive Value": negative_predictive_value, + "False Negative Rate": false_negative_rate, + "True Negative Rate": true_negative_rate, + "False Discovery Rate": false_discovery_rate, + "Total Positives Test": total_positives_test, + "Total Negatives Test": total_negatives_test, + "Total Positives Reference": total_positives_reference, + "total Negatives Reference": total_negatives_reference +} \ No newline at end of file diff --git a/source_code/SegMamba/light_training/examples/1_rename_mri_data_BraTS2023.py b/source_code/SegMamba/light_training/examples/1_rename_mri_data_BraTS2023.py new file mode 100644 index 0000000000000000000000000000000000000000..223733edcf5f4c52c832b52df8c1a9d29513182d --- /dev/null +++ b/source_code/SegMamba/light_training/examples/1_rename_mri_data_BraTS2023.py @@ -0,0 +1,27 @@ + + + +import os + +# data_dir = "./data/raw_data/BraTS2023/ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData/" +data_dir = "./data/raw_data/BraTS2023/ASNR-MICCAI-BraTS2023-GLI-Challenge-ValidationData/" + +all_cases = os.listdir(data_dir) + +for case_name in all_cases: + case_dir = os.path.join(data_dir, case_name) + + for data_name in os.listdir(case_dir): + + if "-" not in data_name: + continue + new_name = data_name.split("-")[-1] + + new_path = os.path.join(case_dir, new_name) + + old_path = os.path.join(case_dir, data_name) + + os.rename(old_path, new_path) + + print(f"{new_path} 命名成功") + diff --git a/source_code/SegMamba/light_training/examples/2_preprocessing_AIIB23.py b/source_code/SegMamba/light_training/examples/2_preprocessing_AIIB23.py new file mode 100644 index 0000000000000000000000000000000000000000..77b52c2050af19bdbfe8a2998d9cf35d918cef95 --- /dev/null +++ b/source_code/SegMamba/light_training/examples/2_preprocessing_AIIB23.py @@ -0,0 +1,130 @@ + +from light_training.preprocessing.preprocessors.default_preprocessor import DefaultPreprocessor +import numpy as np +import pickle +import json + + +def process_train(): + # fullres spacing is [0.5 0.70410156 0.70410156] + # median_shape is [602.5 516.5 516.5] + base_dir = "./data/raw_data/AIIB23_Train_T1" + image_dir = "img" + label_dir = "gt" + preprocessor = DefaultPreprocessor(base_dir=base_dir, + image_dir=image_dir, + label_dir=label_dir, + ) + + out_spacing = [0.5, 0.70410156, 0.70410156] + output_dir = "./data/fullres/train/" + + with open("./data_analysis_result.txt", "r") as f: + content = f.read().strip("\n") + print(content) + content = eval(content) + foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"] + + preprocessor.run(output_spacing=out_spacing, + output_dir=output_dir, + all_labels=[1, ], + num_processes=16, + foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel) + +def process_val(): + # fullres spacing is [0.5 0.70410156 0.70410156] + # median_shape is [602.5 516.5 516.5] + base_dir = "./data/raw_data/Val" + image_dir = "img" + preprocessor = DefaultPreprocessor(base_dir=base_dir, + image_dir=image_dir, + label_dir=None, + ) + + out_spacing = [0.5, 0.70410156, 0.70410156] + + with open("./data_analysis_result.txt", "r") as f: + content = f.read().strip("\n") + print(content) + content = eval(content) + foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"] + + output_dir = "./data/fullres/val_test/" + preprocessor.run(output_spacing=out_spacing, + output_dir=output_dir, + all_labels=[1, ], + foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel, + num_processes=16) + +def process_val_semi(): + # fullres spacing is [0.5 0.70410156 0.70410156] + # median_shape is [602.5 516.5 516.5] + base_dir = "./data/raw_data/Val_semi_postprocess" + image_dir = "img" + preprocessor = DefaultPreprocessor(base_dir=base_dir, + image_dir=image_dir, + label_dir="gt", + ) + + out_spacing = [0.5, 0.70410156, 0.70410156] + + with open("./data_analysis_result.txt", "r") as f: + content = f.read().strip("\n") + print(content) + content = eval(content) + foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"] + + output_dir = "./data/fullres/val_semi_postprocess/" + preprocessor.run(output_spacing=out_spacing, + output_dir=output_dir, + all_labels=[1, ], + foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel) + + +def plan(): + base_dir = "./data/raw_data/AIIB23_Train_T1" + image_dir = "img" + label_dir = "gt" + + preprocessor = DefaultPreprocessor(base_dir=base_dir, + image_dir=image_dir, + label_dir=label_dir, + ) + + preprocessor.run_plan() + +if __name__ == "__main__": + + # plan() + + process_train() + # import time + # s = time.time() + # process_val() + # e = time.time() + + # print(f"preprocessing time is {e - s}") + + # process_val_semi() + + +# + # preprocessor.run(output_spacing=[3, 0.9765625, 0.9765625], output_dir=output_dir) + + # data = np.load("/home/xingzhaohu/sharefs/datasets/AIIB23_nnunet/train/AIIB23_96.npz") + + # image = data["data"] + # label = data["seg"] + # print(image.shape) + # print(label.shape) + + # import matplotlib.pyplot as plt + + # for i in range(20): + # plt.imshow(image[0, i], cmap="gray") + # plt.show() + + # df = open("/home/xingzhaohu/sharefs/datasets/AIIB23_nnunet/train/AIIB23_96.pkl", "rb") + + # info = pickle.load(df) + # print(info) \ No newline at end of file diff --git a/source_code/SegMamba/light_training/examples/2_preprocessing_BraTS2023.py b/source_code/SegMamba/light_training/examples/2_preprocessing_BraTS2023.py new file mode 100644 index 0000000000000000000000000000000000000000..ac97dbdeea89eab4c281db066b23452599b638cd --- /dev/null +++ b/source_code/SegMamba/light_training/examples/2_preprocessing_BraTS2023.py @@ -0,0 +1,94 @@ + +from light_training.preprocessing.preprocessors.preprocessor_mri import MultiModalityPreprocessor +import numpy as np +import pickle +import json + +data_filename = ["t2w.nii.gz", + "t2f.nii.gz", + "t1n.nii.gz", + "t1c.nii.gz"] +seg_filename = "seg.nii.gz" + +def process_train(): + # fullres spacing is [0.5 0.70410156 0.70410156] + # median_shape is [602.5 516.5 516.5] + base_dir = "./data/raw_data/BraTS2023/" + image_dir = "ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData" + preprocessor = MultiModalityPreprocessor(base_dir=base_dir, + image_dir=image_dir, + data_filenames=data_filename, + seg_filename=seg_filename + ) + + out_spacing = [1.0, 1.0, 1.0] + output_dir = "./data/fullres/train/" + + preprocessor.run(output_spacing=out_spacing, + output_dir=output_dir, + all_labels=[1, 2, 3], + ) + +def process_val(): + base_dir = "./data/raw_data/BraTS2023/" + image_dir = "ASNR-MICCAI-BraTS2023-GLI-Challenge-ValidationData" + preprocessor = MultiModalityPreprocessor(base_dir=base_dir, + image_dir=image_dir, + data_filenames=data_filename, + seg_filename="" + ) + + out_spacing = [1.0, 1.0, 1.0] + output_dir = "./data/fullres/val/" + + preprocessor.run(output_spacing=out_spacing, + output_dir=output_dir, + all_labels=[1, 2, 3], + ) + +def process_test(): + # fullres spacing is [0.5 0.70410156 0.70410156] + # median_shape is [602.5 516.5 516.5] + base_dir = "/home/xingzhaohu/sharefs/datasets/WORD-V0.1.0/" + image_dir = "imagesTs" + label_dir = "labelsTs" + preprocessor = DefaultPreprocessor(base_dir=base_dir, + image_dir=image_dir, + label_dir=label_dir, + ) + + out_spacing = [3.0, 0.9765625, 0.9765625] + + output_dir = "./data/fullres/test/" + with open("./data_analysis_result.txt", "r") as f: + content = f.read().strip("\n") + print(content) + content = json.loads(content) + foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"] + + preprocessor.run(output_spacing=out_spacing, + output_dir=output_dir, + all_labels=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], + foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel) + + +def plan(): + base_dir = "./data/raw_data/BraTS2023/" + image_dir = "ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData" + preprocessor = MultiModalityPreprocessor(base_dir=base_dir, + image_dir=image_dir, + data_filenames=data_filename, + seg_filename=seg_filename + ) + + preprocessor.run_plan() + + +if __name__ == "__main__": +# + # plan() + + process_train() + # process_val() + # process_test() + diff --git a/source_code/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/2_preprocessing_AbdomenAtlas1_0Mini.py b/source_code/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/2_preprocessing_AbdomenAtlas1_0Mini.py new file mode 100644 index 0000000000000000000000000000000000000000..00d5a587a8ee9b197990ee1f6538fce386062963 --- /dev/null +++ b/source_code/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/2_preprocessing_AbdomenAtlas1_0Mini.py @@ -0,0 +1,122 @@ + +from light_training.preprocessing.preprocessors.default_preprocessor_AbdomenAtlas1_0Mini import DefaultPreprocessor +import numpy as np +import pickle +import json + + +def process_train(): + # fullres spacing is [0.5 0.70410156 0.70410156] + # median_shape is [602.5 516.5 516.5] + base_dir = "/home/xingzhaohu/data/AbdomenAtlas1.0Mini" + + preprocessor = DefaultPreprocessor(base_dir=base_dir) + + out_spacing = [2.0, 0.8134765, 0.83007812] + output_dir = "./data/fullres/train/" + + with open("./data_analysis_result.txt", "r") as f: + content = f.read().strip("\n") + print(content) + content = eval(content) + foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"] + + preprocessor.run(output_spacing=out_spacing, + output_dir=output_dir, + all_labels=[1, 2, 3, 4, 5, 6, 7, 8, 9], + num_processes=16, + foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel) + +def process_val(): + # fullres spacing is [0.5 0.70410156 0.70410156] + # median_shape is [602.5 516.5 516.5] + base_dir = "./data/raw_data/Val" + image_dir = "img" + preprocessor = DefaultPreprocessor(base_dir=base_dir, + image_dir=image_dir, + label_dir=None, + ) + + out_spacing = [0.5, 0.70410156, 0.70410156] + + with open("./data_analysis_result.txt", "r") as f: + content = f.read().strip("\n") + print(content) + content = eval(content) + foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"] + + output_dir = "./data/fullres/val_test/" + preprocessor.run(output_spacing=out_spacing, + output_dir=output_dir, + all_labels=[1, ], + foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel, + num_processes=16) + +def process_test(): + # fullres spacing is [0.5 0.70410156 0.70410156] + # median_shape is [602.5 516.5 516.5] + base_dir = "/home/xingzhaohu/data/AbdomenAtlas1.0Mini_test" + preprocessor = DefaultPreprocessor(base_dir=base_dir) + + out_spacing = [2.0, 0.8134765, 0.83007812] + output_dir = "./data/fullres/test/" + + with open("./data_analysis_result.txt", "r") as f: + content = f.read().strip("\n") + print(content) + content = eval(content) + foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"] + + preprocessor.run(output_spacing=out_spacing, + output_dir=output_dir, + all_labels=[1, 2, 3, 4, 5, 6, 7, 8, 9], + num_processes=16, + foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel) + + +def plan(): + base_dir = "/home/xingzhaohu/data/AbdomenAtlas1.0Mini" + + preprocessor = DefaultPreprocessor(base_dir=base_dir, + + ) + + preprocessor.run_plan() + +if __name__ == "__main__": + + # plan() + + # process_train() + + process_test() + # import time + # s = time.time() + # process_val() + # e = time.time() + + # print(f"preprocessing time is {e - s}") + + # process_val_semi() + + +# + # preprocessor.run(output_spacing=[3, 0.9765625, 0.9765625], output_dir=output_dir) + + # data = np.load("/home/xingzhaohu/sharefs/datasets/AIIB23_nnunet/train/AIIB23_96.npz") + + # image = data["data"] + # label = data["seg"] + # print(image.shape) + # print(label.shape) + + # import matplotlib.pyplot as plt + + # for i in range(20): + # plt.imshow(image[0, i], cmap="gray") + # plt.show() + + # df = open("/home/xingzhaohu/sharefs/datasets/AIIB23_nnunet/train/AIIB23_96.pkl", "rb") + + # info = pickle.load(df) + # print(info) \ No newline at end of file diff --git a/source_code/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/3_train_diffunet.py b/source_code/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/3_train_diffunet.py new file mode 100644 index 0000000000000000000000000000000000000000..eebd98d634e7dbd2efc4e8fc31c9745ee84b46da --- /dev/null +++ b/source_code/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/3_train_diffunet.py @@ -0,0 +1,215 @@ +import numpy as np +from light_training.dataloading.dataset import get_train_val_test_loader_from_train +# from dataset.brats_data_utils_resample128 import get_loader_brats +import torch +import torch.nn as nn +# from ddim_seg.basic_unet import BasicUNet +from monai.networks.nets.unetr import UNETR +from monai.networks.nets.swin_unetr import SwinUNETR +from monai.inferers import SlidingWindowInferer +from light_training.evaluation.metric import dice +from light_training.trainer import Trainer +from monai.utils import set_determinism +from light_training.utils.lr_scheduler import LinearWarmupCosineAnnealingLR +from light_training.utils.files_helper import save_new_model_and_delete_last +from models.uent2d import UNet2D +from models.uent3d import UNet3D +from monai.networks.nets.segresnet import SegResNet +# from ddim_seg.unet3d import DiffusionUNet +# from ddim_seg.ddim import DDIM +# from ddim_seg.nnunet3d_raw import Generic_UNet +# from ddim_seg.basic_unet_denose import BasicUNetDe +# from ddim_seg.basic_unet import BasicUNetEncoder +from models.transbts.TransBTS_downsample8x_skipconnection import TransBTS +import argparse +from monai.losses.dice import DiceLoss +# from light_training.model.bit_diffusion import decimal_to_bits, bits_to_decimal + +# from guided_diffusion.gaussian_diffusion import get_named_beta_schedule, ModelMeanType, ModelVarType,LossType +# from guided_diffusion.respace import SpacedDiffusion, space_timesteps +# from guided_diffusion.resample import UniformSampler +set_determinism(123) +import os +from scipy import ndimage + + +os.environ["CUDA_VISIBLE_DEVICES"] = "6,7" +data_dir = "./data/fullres/train" + +logdir = f"./logs_gpu4/diffunet_ep2000" + +model_save_path = os.path.join(logdir, "model") +# augmentation = "nomirror" +augmentation = True + +env = "pytorch" +max_epoch = 2000 +batch_size = 2 +val_every = 2 +num_gpus = 1 +device = "cuda:0" +roi_size = [128, 128, 128] + +def get_edge_points(img): + """ + get edge points of a binary segmentation result + """ + dim = len(img.shape) + if (dim == 2): + strt = ndimage.generate_binary_structure(2, 1) + else: + strt = ndimage.generate_binary_structure(3, 1) + ero = ndimage.binary_erosion(img, strt) + edge = np.asarray(img, np.uint8) - np.asarray(ero, np.uint8) + return edge + +def edge_3d(image_3d): + # image_3d = torch.from_numpy(image_3d) + b, c, d, h, w = image_3d.shape + + image_3d = image_3d[:, 0] > 0 + + return_edge = [] + + for i in range(image_3d.shape[0]): + return_edge.append(get_edge_points(image_3d[i])[None,]) + + return_edge = np.concatenate(return_edge, axis=0) + + return return_edge + +class BraTSTrainer(Trainer): + def __init__(self, env_type, max_epochs, batch_size, device="cpu", val_every=1, num_gpus=1, logdir="./logs/", master_ip='localhost', master_port=17750, training_script="train.py"): + super().__init__(env_type, max_epochs, batch_size, device, val_every, num_gpus, logdir, master_ip, master_port, training_script) + self.window_infer = SlidingWindowInferer(roi_size=roi_size, + sw_batch_size=1, + overlap=0.5) + self.augmentation = augmentation + + from models.nnunet_denoise_ddp_infer.get_unet3d_denoise_uncer_edge import DiffUNet + self.model = DiffUNet(1, 10, 3, 1, bta=True) + + self.patch_size = roi_size + self.best_mean_dice = 0.0 + self.ce = nn.CrossEntropyLoss() + self.mse = nn.MSELoss() + self.train_process = 20 + self.optimizer = torch.optim.SGD(self.model.parameters(), lr=1e-2, weight_decay=3e-5, + momentum=0.99, nesterov=True) + + self.scheduler_type = "poly" + self.bce = nn.BCEWithLogitsLoss() + self.dice_loss = DiceLoss(sigmoid=True) + self.cross = nn.CrossEntropyLoss() + + def training_step(self, batch): + image, label = self.get_input(batch) + + pred, pred_edge = self.model(image, label) + + loss_edge = self.cross(pred_edge, label) + loss_seg = self.cross(pred, label) + + self.log("loss_seg", loss_seg, step=self.global_step) + self.log("loss_edge", loss_edge, step=self.global_step) + + loss = loss_edge + loss_seg + return loss + + + def get_input(self, batch): + image = batch["data"] + label = batch["seg"] + # label = self.convert_labels(label) + + # label = label.float() + label = label[:, 0].long() + return image, label + + def cal_metric(self, gt, pred, voxel_spacing=[1.0, 1.0, 1.0]): + if pred.sum() > 0 and gt.sum() > 0: + d = dice(pred, gt) + # hd95 = metric.binary.hd95(pred, gt) + return np.array([d, 50]) + + elif gt.sum() == 0 and pred.sum() == 0: + return np.array([1.0, 50]) + + else: + return np.array([0.0, 50]) + + def validation_step(self, batch): + image, label = self.get_input(batch) + + output = self.model(image, ddim=True) + + # output = output > 0 + output = output.argmax(dim=1) + + output = output.cpu().numpy() + target = label.cpu().numpy() + + dices = [] + + c = 10 + for i in range(1, c): + pred_c = output == i + target_c = target == i + + cal_dice, _ = self.cal_metric(target_c, pred_c) + dices.append(cal_dice) + + return dices + + def validation_end(self, val_outputs): + dices = val_outputs + + dices_mean = [] + c = 9 + for i in range(0, c): + dices_mean.append(dices[i].mean()) + + mean_dice = sum(dices_mean) / len(dices_mean) + + self.log("0", dices_mean[0], step=self.epoch) + self.log("1", dices_mean[1], step=self.epoch) + self.log("2", dices_mean[2], step=self.epoch) + self.log("3", dices_mean[3], step=self.epoch) + self.log("4", dices_mean[4], step=self.epoch) + self.log("5", dices_mean[5], step=self.epoch) + self.log("6", dices_mean[6], step=self.epoch) + self.log("7", dices_mean[7], step=self.epoch) + self.log("8", dices_mean[8], step=self.epoch) + + self.log("mean_dice", mean_dice, step=self.epoch) + + if mean_dice > self.best_mean_dice: + self.best_mean_dice = mean_dice + save_new_model_and_delete_last(self.model, + os.path.join(model_save_path, + f"best_model_{mean_dice:.4f}.pt"), + delete_symbol="best_model") + + save_new_model_and_delete_last(self.model, + os.path.join(model_save_path, + f"final_model_{mean_dice:.4f}.pt"), + delete_symbol="final_model") + + + print(f"mean_dice is {mean_dice}") + +if __name__ == "__main__": + + trainer = BraTSTrainer(env_type=env, + max_epochs=max_epoch, + batch_size=batch_size, + device=device, + logdir=logdir, + val_every=val_every, + num_gpus=num_gpus, + master_port=17759, + training_script=__file__) + + train_ds, val_ds, test_ds = get_train_val_test_loader_from_train(data_dir) + + trainer.train(train_dataset=train_ds, val_dataset=val_ds) diff --git a/source_code/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/3_train_diffunet_train_all.py b/source_code/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/3_train_diffunet_train_all.py new file mode 100644 index 0000000000000000000000000000000000000000..de03789c5e55196b542fdfe678e2e79e6466caa5 --- /dev/null +++ b/source_code/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/3_train_diffunet_train_all.py @@ -0,0 +1,215 @@ +import numpy as np +from light_training.dataloading.dataset import get_train_loader_from_train +# from dataset.brats_data_utils_resample128 import get_loader_brats +import torch +import torch.nn as nn +# from ddim_seg.basic_unet import BasicUNet +from monai.networks.nets.unetr import UNETR +from monai.networks.nets.swin_unetr import SwinUNETR +from monai.inferers import SlidingWindowInferer +from light_training.evaluation.metric import dice +from light_training.trainer import Trainer +from monai.utils import set_determinism +from light_training.utils.lr_scheduler import LinearWarmupCosineAnnealingLR +from light_training.utils.files_helper import save_new_model_and_delete_last +from models.uent2d import UNet2D +from models.uent3d import UNet3D +from monai.networks.nets.segresnet import SegResNet +# from ddim_seg.unet3d import DiffusionUNet +# from ddim_seg.ddim import DDIM +# from ddim_seg.nnunet3d_raw import Generic_UNet +# from ddim_seg.basic_unet_denose import BasicUNetDe +# from ddim_seg.basic_unet import BasicUNetEncoder +from models.transbts.TransBTS_downsample8x_skipconnection import TransBTS +import argparse +from monai.losses.dice import DiceLoss +# from light_training.model.bit_diffusion import decimal_to_bits, bits_to_decimal + +# from guided_diffusion.gaussian_diffusion import get_named_beta_schedule, ModelMeanType, ModelVarType,LossType +# from guided_diffusion.respace import SpacedDiffusion, space_timesteps +# from guided_diffusion.resample import UniformSampler +set_determinism(123) +import os +from scipy import ndimage + + +os.environ["CUDA_VISIBLE_DEVICES"] = "4,5,6,7" +data_dir = "./data/fullres/train" + +logdir = f"./logs_gpu4/diffunet_ep2000_train_all_data" + +model_save_path = os.path.join(logdir, "model") +# augmentation = "nomirror" +augmentation = True + +env = "pytorch" +max_epoch = 2000 +batch_size = 2 +val_every = 2 +num_gpus = 1 +device = "cuda:0" +roi_size = [128, 128, 128] + +def get_edge_points(img): + """ + get edge points of a binary segmentation result + """ + dim = len(img.shape) + if (dim == 2): + strt = ndimage.generate_binary_structure(2, 1) + else: + strt = ndimage.generate_binary_structure(3, 1) + ero = ndimage.binary_erosion(img, strt) + edge = np.asarray(img, np.uint8) - np.asarray(ero, np.uint8) + return edge + +def edge_3d(image_3d): + # image_3d = torch.from_numpy(image_3d) + b, c, d, h, w = image_3d.shape + + image_3d = image_3d[:, 0] > 0 + + return_edge = [] + + for i in range(image_3d.shape[0]): + return_edge.append(get_edge_points(image_3d[i])[None,]) + + return_edge = np.concatenate(return_edge, axis=0) + + return return_edge + +class BraTSTrainer(Trainer): + def __init__(self, env_type, max_epochs, batch_size, device="cpu", val_every=1, num_gpus=1, logdir="./logs/", master_ip='localhost', master_port=17750, training_script="train.py"): + super().__init__(env_type, max_epochs, batch_size, device, val_every, num_gpus, logdir, master_ip, master_port, training_script) + self.window_infer = SlidingWindowInferer(roi_size=roi_size, + sw_batch_size=1, + overlap=0.5) + self.augmentation = augmentation + + from models.nnunet_denoise_ddp_infer.get_unet3d_denoise_uncer_edge import DiffUNet + self.model = DiffUNet(1, 10, 3, 1, bta=True) + + self.patch_size = roi_size + self.best_mean_dice = 0.0 + self.ce = nn.CrossEntropyLoss() + self.mse = nn.MSELoss() + self.train_process = 24 + self.optimizer = torch.optim.SGD(self.model.parameters(), lr=1e-2, weight_decay=3e-5, + momentum=0.99, nesterov=True) + + self.scheduler_type = "poly" + self.bce = nn.BCEWithLogitsLoss() + self.dice_loss = DiceLoss(sigmoid=True) + self.cross = nn.CrossEntropyLoss() + + def training_step(self, batch): + image, label = self.get_input(batch) + + pred, pred_edge = self.model(image, label) + + loss_edge = self.cross(pred_edge, label) + loss_seg = self.cross(pred, label) + + self.log("loss_seg", loss_seg, step=self.global_step) + self.log("loss_edge", loss_edge, step=self.global_step) + + loss = loss_edge + loss_seg + return loss + + + def get_input(self, batch): + image = batch["data"] + label = batch["seg"] + # label = self.convert_labels(label) + + # label = label.float() + label = label[:, 0].long() + return image, label + + def cal_metric(self, gt, pred, voxel_spacing=[1.0, 1.0, 1.0]): + if pred.sum() > 0 and gt.sum() > 0: + d = dice(pred, gt) + # hd95 = metric.binary.hd95(pred, gt) + return np.array([d, 50]) + + elif gt.sum() == 0 and pred.sum() == 0: + return np.array([1.0, 50]) + + else: + return np.array([0.0, 50]) + + def validation_step(self, batch): + image, label = self.get_input(batch) + + output = self.model(image, ddim=True) + + # output = output > 0 + output = output.argmax(dim=1) + + output = output.cpu().numpy() + target = label.cpu().numpy() + + dices = [] + + c = 10 + for i in range(1, c): + pred_c = output == i + target_c = target == i + + cal_dice, _ = self.cal_metric(target_c, pred_c) + dices.append(cal_dice) + + return dices + + def validation_end(self, val_outputs): + dices = val_outputs + + dices_mean = [] + c = 9 + for i in range(0, c): + dices_mean.append(dices[i].mean()) + + mean_dice = sum(dices_mean) / len(dices_mean) + + self.log("0", dices_mean[0], step=self.epoch) + self.log("1", dices_mean[1], step=self.epoch) + self.log("2", dices_mean[2], step=self.epoch) + self.log("3", dices_mean[3], step=self.epoch) + self.log("4", dices_mean[4], step=self.epoch) + self.log("5", dices_mean[5], step=self.epoch) + self.log("6", dices_mean[6], step=self.epoch) + self.log("7", dices_mean[7], step=self.epoch) + self.log("8", dices_mean[8], step=self.epoch) + + self.log("mean_dice", mean_dice, step=self.epoch) + + if mean_dice > self.best_mean_dice: + self.best_mean_dice = mean_dice + save_new_model_and_delete_last(self.model, + os.path.join(model_save_path, + f"best_model_{mean_dice:.4f}.pt"), + delete_symbol="best_model") + + save_new_model_and_delete_last(self.model, + os.path.join(model_save_path, + f"final_model_{mean_dice:.4f}.pt"), + delete_symbol="final_model") + + + print(f"mean_dice is {mean_dice}") + +if __name__ == "__main__": + + trainer = BraTSTrainer(env_type=env, + max_epochs=max_epoch, + batch_size=batch_size, + device=device, + logdir=logdir, + val_every=val_every, + num_gpus=num_gpus, + master_port=17759, + training_script=__file__) + + train_ds = get_train_loader_from_train(data_dir) + + trainer.train(train_dataset=train_ds, val_dataset=train_ds) diff --git a/source_code/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/4_predict_diffunet.py b/source_code/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/4_predict_diffunet.py new file mode 100644 index 0000000000000000000000000000000000000000..470b04e65af3d1032ee7f2fcb83dca8d8d441d37 --- /dev/null +++ b/source_code/SegMamba/light_training/examples/AbdomenAtlas1.0Mini/4_predict_diffunet.py @@ -0,0 +1,141 @@ +import numpy as np +from light_training.dataloading.dataset import get_test_loader_from_test +import torch +import torch.nn as nn +from monai.networks.nets.basic_unet import BasicUNet +from monai.networks.nets.swin_unetr import SwinUNETR +from monai.inferers import SlidingWindowInferer +from light_training.evaluation.metric import dice +from light_training.trainer import Trainer +from monai.utils import set_determinism +from light_training.utils.files_helper import save_new_model_and_delete_last +from models.uent3d import UNet3D +from monai.networks.nets.segresnet import SegResNet +from models.transbts.TransBTS_downsample8x_skipconnection import TransBTS +from einops import rearrange +from models.modelgenesis.unet3d import UNet3DModelGen +from models.transvw.models.ynet3d import UNet3DTransVW +from monai.networks.nets.basic_unet import BasicUNet +from monai.networks.nets.attentionunet import AttentionUnet +from light_training.loss.compound_losses import DC_and_CE_loss +from light_training.loss.dice import MemoryEfficientSoftDiceLoss +from light_training.evaluation.metric import dice +set_determinism(123) +from light_training.loss.compound_losses import DC_and_CE_loss +import os +from medpy import metric +from light_training.prediction import Predictor + + +data_dir = "./data/fullres/test" +env = "pytorch" +max_epoch = 1000 +batch_size = 2 +val_every = 2 +num_gpus = 1 +device = "cuda:2" +patch_size = [128, 128, 128] + +class BraTSTrainer(Trainer): + def __init__(self, env_type, max_epochs, batch_size, device="cpu", val_every=1, num_gpus=1, logdir="./logs/", master_ip='localhost', master_port=17750, training_script="train.py"): + super().__init__(env_type, max_epochs, batch_size, device, val_every, num_gpus, logdir, master_ip, master_port, training_script) + + self.patch_size = patch_size + + def get_input(self, batch): + image = batch["data"] + label = batch["seg"] + properties = batch["properties"] + # label = self.convert_labels(label) + del batch + return image, label, properties + + def define_model_diffunet(self): + from models.nnunet_denoise_ddp_infer.get_unet3d_denoise_uncer_edge import DiffUNet + model = DiffUNet(1, 10, 3, 1, bta=True) + + model_path = "/home/xingzhaohu/zongweizhou/logs_gpu4/diffunet/model/final_model_0.8384.pt" + new_sd = self.filte_state_dict(torch.load(model_path, map_location="cpu")) + model.load_state_dict(new_sd, strict=False) + model.eval() + window_infer = SlidingWindowInferer(roi_size=patch_size, + sw_batch_size=2, + overlap=0.3, + progress=True, + mode="gaussian") + + predictor = Predictor(window_infer=window_infer, + mirror_axes=[0,1,2]) + save_path = "./prediction_results/diffunet_ep1000_test" + + os.makedirs(save_path, exist_ok=True) + + return model, predictor, save_path + + def validation_step(self, batch): + image, label, properties = self.get_input(batch) + print(properties['spacing']) + + ddim = True + model, predictor, save_path = self.define_model_diffunet() + + if ddim: + model_output = predictor.maybe_mirror_and_predict(image, model, device=device, ddim=True) + else : + model_output = predictor.maybe_mirror_and_predict(image, model, device=device) + + model_output = predictor.predict_raw_probability(model_output, + properties=properties).cpu() + + + model_output = model_output.argmax(dim=0) + + model_output = predictor.predict_noncrop_probability(model_output, properties) + print(f"save shape is {model_output.shape}") + + + seg_list = ["aorta", "gall_bladder", "kidney_left", + "kidney_right", "liver", "pancreas", + "postcava", "spleen", "stomach"] + + save_path = os.path.join(save_path, properties['name'][0], "predictions") + # print(f"save_path is {save_path}") + os.makedirs(save_path, exist_ok=True) + for i in range(1, len(seg_list) + 1): + model_output_c = model_output == i + predictor.save_to_nii(model_output_c, + raw_spacing=properties['spacing'], + case_name=seg_list[i-1], + save_dir=save_path) + + return 0 + + + def filte_state_dict(self, sd): + if "module" in sd : + sd = sd["module"] + new_sd = {} + for k, v in sd.items(): + k = str(k) + new_k = k[7:] if k.startswith("module") else k + new_sd[new_k] = v + del sd + return new_sd + +if __name__ == "__main__": + + trainer = BraTSTrainer(env_type=env, + max_epochs=max_epoch, + batch_size=batch_size, + device=device, + logdir="", + val_every=val_every, + num_gpus=num_gpus, + master_port=17751, + training_script=__file__) + + test_ds = get_test_loader_from_test(data_dir=data_dir) + + trainer.validation_single_gpu(test_ds) + + diff --git a/source_code/SegMamba/light_training/examples/liver2017/2_preprocessing_liver2017.py b/source_code/SegMamba/light_training/examples/liver2017/2_preprocessing_liver2017.py new file mode 100644 index 0000000000000000000000000000000000000000..dcb78601a53ae6141d80fff7ba791e4cab922d5d --- /dev/null +++ b/source_code/SegMamba/light_training/examples/liver2017/2_preprocessing_liver2017.py @@ -0,0 +1,123 @@ + +from light_training.preprocessing.preprocessors.default_preprocessor_liver_2017 import DefaultPreprocessor +import numpy as np +import pickle +import json + + +def process_train(): + # fullres spacing is [0.5 0.70410156 0.70410156] + # median_shape is [602.5 516.5 516.5] + base_dir = "/home/xingzhaohu/data/Liver_2017" + + preprocessor = DefaultPreprocessor(base_dir=base_dir) + + out_spacing = [1.0, 0.76757812, 0.76757812] + output_dir = "./data/fullres/train/" + + with open("./data_analysis_result.txt", "r") as f: + content = f.read().strip("\n") + print(content) + content = eval(content) + foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"] + + preprocessor.run(output_spacing=out_spacing, + output_dir=output_dir, + all_labels=[1, 2], + num_processes=16, + foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel) + +def process_val(): + # fullres spacing is [0.5 0.70410156 0.70410156] + # median_shape is [602.5 516.5 516.5] + base_dir = "./data/raw_data/Val" + image_dir = "img" + preprocessor = DefaultPreprocessor(base_dir=base_dir, + image_dir=image_dir, + label_dir=None, + ) + + out_spacing = [0.5, 0.70410156, 0.70410156] + + with open("./data_analysis_result.txt", "r") as f: + content = f.read().strip("\n") + print(content) + content = eval(content) + foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"] + + output_dir = "./data/fullres/val_test/" + preprocessor.run(output_spacing=out_spacing, + output_dir=output_dir, + all_labels=[1, ], + foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel, + num_processes=16) + +def process_val_semi(): + # fullres spacing is [0.5 0.70410156 0.70410156] + # median_shape is [602.5 516.5 516.5] + base_dir = "./data/raw_data/Val_semi_postprocess" + image_dir = "img" + preprocessor = DefaultPreprocessor(base_dir=base_dir, + image_dir=image_dir, + label_dir="gt", + ) + + out_spacing = [0.5, 0.70410156, 0.70410156] + + with open("./data_analysis_result.txt", "r") as f: + content = f.read().strip("\n") + print(content) + content = eval(content) + foreground_intensity_properties_per_channel = content["intensity_statistics_per_channel"] + + output_dir = "./data/fullres/val_semi_postprocess/" + preprocessor.run(output_spacing=out_spacing, + output_dir=output_dir, + all_labels=[1, ], + foreground_intensity_properties_per_channel=foreground_intensity_properties_per_channel) + + +def plan(): + base_dir = "/home/xingzhaohu/data/Liver_2017" + + preprocessor = DefaultPreprocessor(base_dir=base_dir, + + ) + + preprocessor.run_plan() + +if __name__ == "__main__": + + # plan() + + process_train() + # import time + # s = time.time() + # process_val() + # e = time.time() + + # print(f"preprocessing time is {e - s}") + + # process_val_semi() + + +# + # preprocessor.run(output_spacing=[3, 0.9765625, 0.9765625], output_dir=output_dir) + + # data = np.load("/home/xingzhaohu/sharefs/datasets/AIIB23_nnunet/train/AIIB23_96.npz") + + # image = data["data"] + # label = data["seg"] + # print(image.shape) + # print(label.shape) + + # import matplotlib.pyplot as plt + + # for i in range(20): + # plt.imshow(image[0, i], cmap="gray") + # plt.show() + + # df = open("/home/xingzhaohu/sharefs/datasets/AIIB23_nnunet/train/AIIB23_96.pkl", "rb") + + # info = pickle.load(df) + # print(info) \ No newline at end of file diff --git a/source_code/SegMamba/light_training/examples/read_pickle.py b/source_code/SegMamba/light_training/examples/read_pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..3833df033e6aff5fcc2b505140f7f6d15ecd584e --- /dev/null +++ b/source_code/SegMamba/light_training/examples/read_pickle.py @@ -0,0 +1,8 @@ +import pickle + +f = "/home/xingzhaohu/jiuding_code/SegRap2023/data/fullres/train/segrap_0000.pkl" + +with open(f, "rb") as ff: + s = pickle.load(ff) + + print(s) \ No newline at end of file diff --git a/source_code/SegMamba/light_training/launch.py b/source_code/SegMamba/light_training/launch.py new file mode 100644 index 0000000000000000000000000000000000000000..35bdc0017c419750c68fc99f9319f093a85eb6f1 --- /dev/null +++ b/source_code/SegMamba/light_training/launch.py @@ -0,0 +1,117 @@ +# Copyright 2020 The Microsoft DeepSpeed Team +""" +sailing runner is the main front-end to launching multi-worker +training jobs with DeepSpeed. By default this uses pdsh to parallel +ssh into multiple worker nodes and launch all the necessary processes +per rank for training. +""" + +import os +import sys +import json +import subprocess +import collections +import socket +import signal +import logging + +import torch.distributed as dist + + +def fetch_hostfile(hostfile_path): + if not os.path.isfile(hostfile_path): + print("Unable to find hostfile, will proceed with training " + "with local resources only.") + return None + # e.g., worker-0 slots=16 + with open(hostfile_path, 'r') as fd: + resource_pool = collections.OrderedDict() + for line in fd.readlines(): + line = line.strip() + if line == '': + # skip empty lines + continue + try: + hostname, slots = line.split() + _, slot_count = slots.split("=") + slot_count = int(slot_count) + except ValueError as err: + raise err + if hostname in resource_pool: + raise ValueError(f"host {hostname} is already defined") + resource_pool[hostname] = slot_count + + return resource_pool + + +def cmd_load_hyperparam(config_path=None, format="json", encoding="utf-8"): + """ + shell load arguments form argparse and config file + """ + # config_path='config/config_block_large_chinese.json' + format = config_path.rsplit('.')[-1] + with open(config_path, 'r', encoding=encoding) as f: + if format == "json": + config_dict = json.load(f) + else: + raise NameError("current format%s for hyperparam file is invalid" % + format) + config_cmd = [] + for key in config_dict: + if len(str(config_dict[key])) == 0: + config_cmd.append('--' + key) + else: + config_cmd.append('--' + key) + config_cmd.append(str(config_dict[key])) + return config_cmd + + +def launch_dist( + env_type="DDP", + num_nodes=1, + gpus_per_node=1, + master_addr='localhost', + master_port=17500, + training_script='train.py', + ): + + if num_nodes != 1: + print("多机多卡待测试。暂不支持。") + os._exit(0) + if env_type == "DDP": + cmd_launch = [] + cmd_launch.extend([ + # 'export NUM_NODES=' + str(num_nodes) + ';', + # 'export GPUS_PER_NODE=' + str(gpus_per_node) + ';', + # sys.executable, + # "python", + # '-m', + "torchrun" + # 'torch.distributed.launch' + ]) + torch_distributed_args = [ + '--nproc_per_node', + str(gpus_per_node), + '--nnodes', + str(num_nodes), + '--node_rank', + str(0), + '--master_addr', + master_addr, + '--master_port', + str(master_port), + ] + cmd_launch.extend(torch_distributed_args) + cmd_launch.append(training_script) + cmd_launch.append('--not_call_launch') + run_cmd = ' '.join(cmd_launch) + p = subprocess.Popen(run_cmd, shell=True, preexec_fn=os.setsid) + def signal_handler(signal, frame): + os.killpg(os.getpgid(p.pid), 9) + signal.signal(signal.SIGINT, signal_handler) + p.wait() + print ('finish') + + else : + print("不支持的env_type") + os._exit(0) diff --git a/source_code/SegMamba/light_training/loss/__init__.py b/source_code/SegMamba/light_training/loss/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/source_code/SegMamba/light_training/loss/compound_losses.py b/source_code/SegMamba/light_training/loss/compound_losses.py new file mode 100644 index 0000000000000000000000000000000000000000..c7b1912a83e70cd8083fc6f2aafc915fae20e9e6 --- /dev/null +++ b/source_code/SegMamba/light_training/loss/compound_losses.py @@ -0,0 +1,151 @@ +import torch +from .dice import SoftDiceLoss, MemoryEfficientSoftDiceLoss +from .robust_ce_loss import RobustCrossEntropyLoss, TopKLoss +from .helpers import softmax_helper_dim1 +from torch import nn + + +class DC_and_CE_loss(nn.Module): + def __init__(self, soft_dice_kwargs, ce_kwargs, weight_ce=1, weight_dice=1, ignore_label=None, + dice_class=SoftDiceLoss): + """ + Weights for CE and Dice do not need to sum to one. You can set whatever you want. + :param soft_dice_kwargs: + :param ce_kwargs: + :param aggregate: + :param square_dice: + :param weight_ce: + :param weight_dice: + """ + super(DC_and_CE_loss, self).__init__() + if ignore_label is not None: + ce_kwargs['ignore_index'] = ignore_label + + self.weight_dice = weight_dice + self.weight_ce = weight_ce + self.ignore_label = ignore_label + + self.ce = RobustCrossEntropyLoss(**ce_kwargs) + self.dc = dice_class(apply_nonlin=softmax_helper_dim1, **soft_dice_kwargs) + + def forward(self, net_output: torch.Tensor, target: torch.Tensor): + """ + target must be b, c, x, y(, z) with c=1 + :param net_output: + :param target: + :return: + """ + if self.ignore_label is not None: + assert target.shape[1] == 1, 'ignore label is not implemented for one hot encoded target variables ' \ + '(DC_and_CE_loss)' + mask = (target != self.ignore_label).bool() + # remove ignore label from target, replace with one of the known labels. It doesn't matter because we + # ignore gradients in those areas anyway + target_dice = torch.clone(target) + target_dice[target == self.ignore_label] = 0 + num_fg = mask.sum() + else: + target_dice = target + mask = None + + dc_loss = self.dc(net_output, target_dice, loss_mask=mask) \ + if self.weight_dice != 0 else 0 + ce_loss = self.ce(net_output, target[:, 0].long()) \ + if self.weight_ce != 0 and (self.ignore_label is None or num_fg > 0) else 0 + + result = self.weight_ce * ce_loss + self.weight_dice * dc_loss + return result + + +class DC_and_BCE_loss(nn.Module): + def __init__(self, bce_kwargs, soft_dice_kwargs, weight_ce=1, weight_dice=1, use_ignore_label: bool = False, + dice_class=MemoryEfficientSoftDiceLoss): + """ + DO NOT APPLY NONLINEARITY IN YOUR NETWORK! + + target mut be one hot encoded + IMPORTANT: We assume use_ignore_label is located in target[:, -1]!!! + + :param soft_dice_kwargs: + :param bce_kwargs: + :param aggregate: + """ + super(DC_and_BCE_loss, self).__init__() + if use_ignore_label: + bce_kwargs['reduction'] = 'none' + + self.weight_dice = weight_dice + self.weight_ce = weight_ce + self.use_ignore_label = use_ignore_label + + self.ce = nn.BCEWithLogitsLoss(**bce_kwargs) + self.dc = dice_class(apply_nonlin=torch.sigmoid, **soft_dice_kwargs) + + def forward(self, net_output: torch.Tensor, target: torch.Tensor): + if self.use_ignore_label: + # target is one hot encoded here. invert it so that it is True wherever we can compute the loss + mask = (1 - target[:, -1:]).bool() + # remove ignore channel now that we have the mask + target_regions = torch.clone(target[:, :-1]) + else: + target_regions = target + mask = None + + dc_loss = self.dc(net_output, target_regions, loss_mask=mask) + if mask is not None: + ce_loss = (self.ce(net_output, target_regions) * mask).sum() / torch.clip(mask.sum(), min=1e-8) + else: + ce_loss = self.ce(net_output, target_regions) + result = self.weight_ce * ce_loss + self.weight_dice * dc_loss + return result + + +class DC_and_topk_loss(nn.Module): + def __init__(self, soft_dice_kwargs, ce_kwargs, weight_ce=1, weight_dice=1, ignore_label=None): + """ + Weights for CE and Dice do not need to sum to one. You can set whatever you want. + :param soft_dice_kwargs: + :param ce_kwargs: + :param aggregate: + :param square_dice: + :param weight_ce: + :param weight_dice: + """ + super().__init__() + if ignore_label is not None: + ce_kwargs['ignore_index'] = ignore_label + + self.weight_dice = weight_dice + self.weight_ce = weight_ce + self.ignore_label = ignore_label + + self.ce = TopKLoss(**ce_kwargs) + self.dc = SoftDiceLoss(apply_nonlin=softmax_helper_dim1, **soft_dice_kwargs) + + def forward(self, net_output: torch.Tensor, target: torch.Tensor): + """ + target must be b, c, x, y(, z) with c=1 + :param net_output: + :param target: + :return: + """ + if self.ignore_label is not None: + assert target.shape[1] == 1, 'ignore label is not implemented for one hot encoded target variables ' \ + '(DC_and_CE_loss)' + mask = (target != self.ignore_label).bool() + # remove ignore label from target, replace with one of the known labels. It doesn't matter because we + # ignore gradients in those areas anyway + target_dice = torch.clone(target) + target_dice[target == self.ignore_label] = 0 + num_fg = mask.sum() + else: + target_dice = target + mask = None + + dc_loss = self.dc(net_output, target_dice, loss_mask=mask) \ + if self.weight_dice != 0 else 0 + ce_loss = self.ce(net_output, target) \ + if self.weight_ce != 0 and (self.ignore_label is None or num_fg > 0) else 0 + + result = self.weight_ce * ce_loss + self.weight_dice * dc_loss + return result diff --git a/source_code/SegMamba/light_training/loss/ddp_allgather.py b/source_code/SegMamba/light_training/loss/ddp_allgather.py new file mode 100644 index 0000000000000000000000000000000000000000..c42b3ef654f361904d5fe1868621b3f6f5cd29a6 --- /dev/null +++ b/source_code/SegMamba/light_training/loss/ddp_allgather.py @@ -0,0 +1,49 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Optional, Tuple + +import torch +from torch import distributed + + +def print_if_rank0(*args): + if distributed.get_rank() == 0: + print(*args) + + +class AllGatherGrad(torch.autograd.Function): + # stolen from pytorch lightning + @staticmethod + def forward( + ctx: Any, + tensor: torch.Tensor, + group: Optional["torch.distributed.ProcessGroup"] = None, + ) -> torch.Tensor: + ctx.group = group + + gathered_tensor = [torch.zeros_like(tensor) for _ in range(torch.distributed.get_world_size())] + + torch.distributed.all_gather(gathered_tensor, tensor, group=group) + gathered_tensor = torch.stack(gathered_tensor, dim=0) + + return gathered_tensor + + @staticmethod + def backward(ctx: Any, *grad_output: torch.Tensor) -> Tuple[torch.Tensor, None]: + grad_output = torch.cat(grad_output) + + torch.distributed.all_reduce(grad_output, op=torch.distributed.ReduceOp.SUM, async_op=False, group=ctx.group) + + return grad_output[torch.distributed.get_rank()], None + diff --git a/source_code/SegMamba/light_training/loss/deepsupervision.py b/source_code/SegMamba/light_training/loss/deepsupervision.py new file mode 100644 index 0000000000000000000000000000000000000000..e8a3cf639c7b961317859aadf55c93c9744de657 --- /dev/null +++ b/source_code/SegMamba/light_training/loss/deepsupervision.py @@ -0,0 +1,65 @@ +import torch +import torch.nn as nn +import numpy as np + +class DeepSupervisionWrapper(nn.Module): + def __init__(self, loss, weight_factors=None): + """ + Wraps a loss function so that it can be applied to multiple outputs. Forward accepts an arbitrary number of + inputs. Each input is expected to be a tuple/list. Each tuple/list must have the same length. The loss is then + applied to each entry like this: + l = w0 * loss(input0[0], input1[0], ...) + w1 * loss(input0[1], input1[1], ...) + ... + If weights are None, all w will be 1. + """ + super(DeepSupervisionWrapper, self).__init__() + self.weight_factors = weight_factors + self.loss = loss + + def forward(self, *args): + for i in args: + assert isinstance(i, (tuple, list)), "all args must be either tuple or list, got %s" % type(i) + # we could check for equal lengths here as well but we really shouldn't overdo it with checks because + # this code is executed a lot of times! + + if self.weight_factors is None: + weights = [1] * len(args[0]) + else: + weights = self.weight_factors + + # we initialize the loss like this instead of 0 to ensure it sits on the correct device, not sure if that's + # really necessary + l = weights[0] * self.loss(*[j[0] for j in args]) + for i, inputs in enumerate(zip(*args)): + if i == 0: + continue + l += weights[i] * self.loss(*inputs) + return l + + + +class AutoDeepSupervision(nn.Module): + def __init__(self, loss, label_scale) -> None: + super().__init__() + + weights = np.array([1 / (2 ** i) for i in range(len(label_scale))]) + weights[-1] = 0 + # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 + weights = weights / weights.sum() + print(f"loss weights is {weights}") + + self.warpper = DeepSupervisionWrapper(loss, weights) + self.label_scale = label_scale + + def forward(self, preds, label): + pred_len = len(preds) + assert pred_len == len(self.label_scale) + labels = [] + for scale in self.label_scale: + labels.append(torch.nn.functional.interpolate(label, scale_factor=scale, mode="nearest")) + # label_1_2 = torch.nn.functional.interpolate(label, scale_factor=self.label_scale[1], mode="nearest") + # label_1_4 = torch.nn.functional.interpolate(label, scale_factor=self.label_scale[2], mode="nearest") + # label_1_8 = torch.nn.functional.interpolate(label, scale_factor=self.label_scale[3], mode="nearest") + # label_1_16 = torch.nn.functional.interpolate(label, scale_factor=self.label_scale[4], mode="nearest") + # labels = [label, label_1_2, label_1_4, label_1_8, label_1_16] + + return self.warpper(preds, labels) \ No newline at end of file diff --git a/source_code/SegMamba/light_training/loss/dice.py b/source_code/SegMamba/light_training/loss/dice.py new file mode 100644 index 0000000000000000000000000000000000000000..7ae7d0dd75c9d8582279ea5dd28a9c13f9f533a8 --- /dev/null +++ b/source_code/SegMamba/light_training/loss/dice.py @@ -0,0 +1,192 @@ +from typing import Callable + +import torch +from .ddp_allgather import AllGatherGrad +from .tensor_utilities import sum_tensor +from torch import nn + + +class SoftDiceLoss(nn.Module): + def __init__(self, apply_nonlin: Callable = None, batch_dice: bool = False, do_bg: bool = True, smooth: float = 1., + ddp: bool = True, clip_tp: float = None): + """ + """ + super(SoftDiceLoss, self).__init__() + + self.do_bg = do_bg + self.batch_dice = batch_dice + self.apply_nonlin = apply_nonlin + self.smooth = smooth + self.clip_tp = clip_tp + self.ddp = ddp + + def forward(self, x, y, loss_mask=None): + shp_x = x.shape + + if self.batch_dice: + axes = [0] + list(range(2, len(shp_x))) + else: + axes = list(range(2, len(shp_x))) + + if self.apply_nonlin is not None: + x = self.apply_nonlin(x) + + tp, fp, fn, _ = get_tp_fp_fn_tn(x, y, axes, loss_mask, False) + + if self.ddp and self.batch_dice: + tp = AllGatherGrad.apply(tp).sum(0) + fp = AllGatherGrad.apply(fp).sum(0) + fn = AllGatherGrad.apply(fn).sum(0) + + if self.clip_tp is not None: + tp = torch.clip(tp, min=self.clip_tp , max=None) + + nominator = 2 * tp + denominator = 2 * tp + fp + fn + + dc = (nominator + self.smooth) / (torch.clip(denominator + self.smooth, 1e-8)) + + if not self.do_bg: + if self.batch_dice: + dc = dc[1:] + else: + dc = dc[:, 1:] + dc = dc.mean() + + return -dc + +class MemoryEfficientSoftDiceLoss(nn.Module): + def __init__(self, apply_nonlin: Callable = None, batch_dice: bool = False, do_bg: bool = True, smooth: float = 1., + ddp: bool = True): + """ + saves 1.6 GB on Dataset017 3d_lowres + """ + super(MemoryEfficientSoftDiceLoss, self).__init__() + + self.do_bg = do_bg + self.batch_dice = batch_dice + self.apply_nonlin = apply_nonlin + self.smooth = smooth + self.ddp = ddp + + def forward(self, x, y, loss_mask=None): + shp_x, shp_y = x.shape, y.shape + + if self.apply_nonlin is not None: + x = self.apply_nonlin(x) + + if not self.do_bg: + x = x[:, 1:] + + # make everything shape (b, c) + axes = list(range(2, len(shp_x))) + + with torch.no_grad(): + if len(shp_x) != len(shp_y): + y = y.view((shp_y[0], 1, *shp_y[1:])) + + if all([i == j for i, j in zip(shp_x, shp_y)]): + # if this is the case then gt is probably already a one hot encoding + y_onehot = y + else: + gt = y.long() + y_onehot = torch.zeros(shp_x, device=x.device, dtype=torch.bool) + y_onehot.scatter_(1, gt, 1) + + if not self.do_bg: + y_onehot = y_onehot[:, 1:] + sum_gt = y_onehot.sum(axes) if loss_mask is None else (y_onehot * loss_mask).sum(axes) + + intersect = (x * y_onehot).sum(axes) if loss_mask is None else (x * y_onehot * loss_mask).sum(axes) + sum_pred = x.sum(axes) if loss_mask is None else (x * loss_mask).sum(axes) + + if self.ddp and self.batch_dice: + intersect = AllGatherGrad.apply(intersect).sum(0) + sum_pred = AllGatherGrad.apply(sum_pred).sum(0) + sum_gt = AllGatherGrad.apply(sum_gt).sum(0) + + if self.batch_dice: + intersect = intersect.sum(0) + sum_pred = sum_pred.sum(0) + sum_gt = sum_gt.sum(0) + + dc = (2 * intersect + self.smooth) / (torch.clip(sum_gt + sum_pred + self.smooth, 1e-8)) + + dc = dc.mean() + return -dc + +def get_tp_fp_fn_tn(net_output, gt, axes=None, mask=None, square=False): + """ + net_output must be (b, c, x, y(, z))) + gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z)) + if mask is provided it must have shape (b, 1, x, y(, z))) + :param net_output: + :param gt: + :param axes: can be (, ) = no summation + :param mask: mask must be 1 for valid pixels and 0 for invalid pixels + :param square: if True then fp, tp and fn will be squared before summation + :return: + """ + if axes is None: + axes = tuple(range(2, len(net_output.size()))) + + shp_x = net_output.shape + shp_y = gt.shape + + with torch.no_grad(): + if len(shp_x) != len(shp_y): + gt = gt.view((shp_y[0], 1, *shp_y[1:])) + + if all([i == j for i, j in zip(net_output.shape, gt.shape)]): + # if this is the case then gt is probably already a one hot encoding + y_onehot = gt + else: + gt = gt.long() + y_onehot = torch.zeros(shp_x, device=net_output.device) + y_onehot.scatter_(1, gt, 1) + + tp = net_output * y_onehot + fp = net_output * (1 - y_onehot) + fn = (1 - net_output) * y_onehot + tn = (1 - net_output) * (1 - y_onehot) + + if mask is not None: + with torch.no_grad(): + mask_here = torch.tile(mask, (1, tp.shape[1], *[1 for i in range(2, len(tp.shape))])) + tp *= mask_here + fp *= mask_here + fn *= mask_here + tn *= mask_here + # benchmark whether tiling the mask would be faster (torch.tile). It probably is for large batch sizes + # OK it barely makes a difference but the implementation above is a tiny bit faster + uses less vram + # (using nnUNetv2_train 998 3d_fullres 0) + # tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp, dim=1)), dim=1) + # fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp, dim=1)), dim=1) + # fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn, dim=1)), dim=1) + # tn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tn, dim=1)), dim=1) + + if square: + tp = tp ** 2 + fp = fp ** 2 + fn = fn ** 2 + tn = tn ** 2 + + if len(axes) > 0: + tp = sum_tensor(tp, axes, keepdim=False) + fp = sum_tensor(fp, axes, keepdim=False) + fn = sum_tensor(fn, axes, keepdim=False) + tn = sum_tensor(tn, axes, keepdim=False) + + return tp, fp, fn, tn + + +if __name__ == '__main__': + from nnunetv2.utilities.helpers import softmax_helper_dim1 + pred = torch.rand((2, 3, 32, 32, 32)) + ref = torch.randint(0, 3, (2, 32, 32, 32)) + + dl_old = SoftDiceLoss(apply_nonlin=softmax_helper_dim1, batch_dice=True, do_bg=False, smooth=0, ddp=False) + dl_new = MemoryEfficientSoftDiceLoss(apply_nonlin=softmax_helper_dim1, batch_dice=True, do_bg=False, smooth=0, ddp=False) + res_old = dl_old(pred, ref) + res_new = dl_new(pred, ref) + print(res_old, res_new) diff --git a/source_code/SegMamba/light_training/loss/helpers.py b/source_code/SegMamba/light_training/loss/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..42448e3f9c3de88ba13568ff7585797ee29607ab --- /dev/null +++ b/source_code/SegMamba/light_training/loss/helpers.py @@ -0,0 +1,27 @@ +import torch + + +def softmax_helper_dim0(x: torch.Tensor) -> torch.Tensor: + return torch.softmax(x, 0) + + +def softmax_helper_dim1(x: torch.Tensor) -> torch.Tensor: + return torch.softmax(x, 1) + + +def empty_cache(device: torch.device): + if device.type == 'cuda': + torch.cuda.empty_cache() + elif device.type == 'mps': + from torch import mps + mps.empty_cache() + else: + pass + + +class dummy_context(object): + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, exc_tb): + pass diff --git a/source_code/SegMamba/light_training/loss/robust_ce_loss.py b/source_code/SegMamba/light_training/loss/robust_ce_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..ad46659193ce1dbbff8ee6829bbf5e4223b6ed8f --- /dev/null +++ b/source_code/SegMamba/light_training/loss/robust_ce_loss.py @@ -0,0 +1,33 @@ +import torch +from torch import nn, Tensor +import numpy as np + + +class RobustCrossEntropyLoss(nn.CrossEntropyLoss): + """ + this is just a compatibility layer because my target tensor is float and has an extra dimension + + input must be logits, not probabilities! + """ + def forward(self, input: Tensor, target: Tensor) -> Tensor: + if len(target.shape) == len(input.shape): + assert target.shape[1] == 1 + target = target[:, 0] + return super().forward(input, target.long()) + + +class TopKLoss(RobustCrossEntropyLoss): + """ + input must be logits, not probabilities! + """ + def __init__(self, weight=None, ignore_index: int = -100, k: float = 10, label_smoothing: float = 0): + self.k = k + super(TopKLoss, self).__init__(weight, False, ignore_index, reduce=False, label_smoothing=label_smoothing) + + def forward(self, inp, target): + target = target[:, 0].long() + res = super(TopKLoss, self).forward(inp, target) + num_voxels = np.prod(res.shape, dtype=np.int64) + res, _ = torch.topk(res.view((-1, )), int(num_voxels * self.k / 100), sorted=False) + return res.mean() + diff --git a/source_code/SegMamba/light_training/loss/tensor_utilities.py b/source_code/SegMamba/light_training/loss/tensor_utilities.py new file mode 100644 index 0000000000000000000000000000000000000000..b16ffcac2e46d93c19522937098f0af5b208aca7 --- /dev/null +++ b/source_code/SegMamba/light_training/loss/tensor_utilities.py @@ -0,0 +1,15 @@ +from typing import Union, List, Tuple + +import numpy as np +import torch + + +def sum_tensor(inp: torch.Tensor, axes: Union[np.ndarray, Tuple, List], keepdim: bool = False) -> torch.Tensor: + axes = np.unique(axes).astype(int) + if keepdim: + for ax in axes: + inp = inp.sum(int(ax), keepdim=True) + else: + for ax in sorted(axes, reverse=True): + inp = inp.sum(int(ax)) + return inp diff --git a/source_code/SegMamba/light_training/prediction.py b/source_code/SegMamba/light_training/prediction.py new file mode 100644 index 0000000000000000000000000000000000000000..191274eb2321f25e6207dbeea471ca8723c2fa08 --- /dev/null +++ b/source_code/SegMamba/light_training/prediction.py @@ -0,0 +1,243 @@ + +import torch +import numpy as np +import SimpleITK as sitk +import os +from light_training.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape +from scipy import ndimage +import skimage.measure as measure + +class dummy_context(object): + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, exc_tb): + pass + +def large_connected_domain(label): + cd, num = measure.label(label, return_num=True, connectivity=1) + volume = np.zeros([num]) + for k in range(num): + volume[k] = ((cd == (k + 1)).astype(np.uint8)).sum() + volume_sort = np.argsort(volume) + # print(volume_sort) + label = (cd == (volume_sort[-1] + 1)).astype(np.uint8) + label = ndimage.binary_fill_holes(label) + label = label.astype(np.uint8) + return label + +class Predictor: + def __init__(self, window_infer, mirror_axes=None) -> None: + self.window_infer = window_infer + self.mirror_axes = mirror_axes + + @staticmethod + def predict_raw_probability(model_output, properties): + if len(model_output.shape) == 5: + model_output = model_output[0] + + device = model_output.device + shape_after_cropping_before_resample = properties["shape_after_cropping_before_resample"] + d, w, h = shape_after_cropping_before_resample[0], shape_after_cropping_before_resample[1], shape_after_cropping_before_resample[2] + print(f"resample....") + channel = model_output.shape[0] + + try: + with torch.no_grad(): + resample_output = torch.zeros((channel, d, w, h), dtype=torch.half, device=device) + for c in range(channel): + resample_output[c] = torch.nn.functional.interpolate(model_output[c][None, None], mode="trilinear", size=(d, w, h))[0, 0] + + del model_output + + except RuntimeError: + with torch.no_grad(): + model_output = model_output.to("cpu") + resample_output = torch.zeros((channel, d, w, h)) + for c in range(channel): + resample_output[c] = torch.nn.functional.interpolate(model_output[c][None, None], mode="trilinear", size=(d, w, h))[0, 0] + del model_output + + torch.cuda.empty_cache() + + return resample_output + + @staticmethod + def predict_noncrop_probability(model_output, properties): + + print(f"restoring noncrop region......") + if isinstance(model_output, torch.Tensor): + model_output = model_output.cpu().numpy() + + torch.cuda.empty_cache() + + if len(model_output.shape) == 3: + shape_before_cropping = properties["shape_before_cropping"] + if isinstance(shape_before_cropping[0], torch.Tensor): + shape_before_cropping = [shape_before_cropping[0].item(), shape_before_cropping[1].item(), shape_before_cropping[2].item()] + + none_crop_pred = np.zeros([shape_before_cropping[0], shape_before_cropping[1], shape_before_cropping[2]], dtype=np.uint8) + bbox_used_for_cropping = properties["bbox_used_for_cropping"] + + none_crop_pred[ + bbox_used_for_cropping[0][0]: bbox_used_for_cropping[0][1], + bbox_used_for_cropping[1][0]: bbox_used_for_cropping[1][1], + bbox_used_for_cropping[2][0]: bbox_used_for_cropping[2][1]] = model_output + del model_output + return none_crop_pred + + elif len(model_output.shape) == 4: + shape_before_cropping = properties["shape_before_cropping"] + if isinstance(shape_before_cropping[0], torch.Tensor): + shape_before_cropping = [shape_before_cropping[0].item(), shape_before_cropping[1].item(), shape_before_cropping[2].item()] + + none_crop_pred = np.zeros([model_output.shape[0], shape_before_cropping[0], shape_before_cropping[1], shape_before_cropping[2]], dtype=np.uint8) + bbox_used_for_cropping = properties["bbox_used_for_cropping"] + + none_crop_pred[ + :, + bbox_used_for_cropping[0][0]: bbox_used_for_cropping[0][1], + bbox_used_for_cropping[1][0]: bbox_used_for_cropping[1][1], + bbox_used_for_cropping[2][0]: bbox_used_for_cropping[2][1]] = model_output + del model_output + + return none_crop_pred + + else: + print(f"restore crop error") + exit(0) + + def maybe_mirror_and_predict(self, x, model, device=torch.device("cpu"), **kwargs) -> torch.Tensor: + # mirror_axes = [0, 1, 2] + window_infer = self.window_infer + if type(device) is str: + device = torch.device(device) + + model.to(device) + # if type(x) is list: + # for i in range(len(x)): + # x[i] = x[i].to(device) + # else : + x = x.to(device) + with torch.no_grad(): + print(f"predicting....") + with torch.autocast("cuda", enabled=True) if device.type == "cuda" else dummy_context(): + prediction = window_infer(x, model, **kwargs).cpu() + mirror_axes = self.mirror_axes + + if mirror_axes is not None: + # check for invalid numbers in mirror_axes + # x should be 5d for 3d images and 4d for 2d. so the max value of mirror_axes cannot exceed len(x.shape) - 3 + assert max(mirror_axes) <= len(x.shape) - 3, 'mirror_axes does not match the dimension of the input!' + + num_predictons = 2 ** len(mirror_axes) + if 0 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (2,)), model, **kwargs), (2,)).cpu() + torch.cuda.empty_cache() + if 1 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (3,)), model, **kwargs), (3,)).cpu() + torch.cuda.empty_cache() + if 2 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (4,)), model, **kwargs), (4,)).cpu() + torch.cuda.empty_cache() + if 0 in mirror_axes and 1 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (2, 3)), model, **kwargs), (2, 3)).cpu() + torch.cuda.empty_cache() + if 0 in mirror_axes and 2 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (2, 4)), model, **kwargs), (2, 4)).cpu() + torch.cuda.empty_cache() + if 1 in mirror_axes and 2 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (3, 4)), model, **kwargs), (3, 4)).cpu() + torch.cuda.empty_cache() + if 0 in mirror_axes and 1 in mirror_axes and 2 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (2, 3, 4)), model, **kwargs), (2, 3, 4)).cpu() + torch.cuda.empty_cache() + prediction /= num_predictons + + torch.cuda.empty_cache() + del x + return prediction + + def maybe_mirror_and_predict_cuda(self, x, model, device=torch.device("cpu"), **kwargs) -> torch.Tensor: + # mirror_axes = [0, 1, 2] + window_infer = self.window_infer + if type(device) is str: + device = torch.device(device) + + model.to(device) + x = x.to(device) + with torch.no_grad(): + print(f"predicting....") + with torch.autocast("cuda", enabled=True) if device.type == "cuda" else dummy_context(): + prediction = window_infer(x, model, **kwargs) + mirror_axes = self.mirror_axes + + if mirror_axes is not None: + # check for invalid numbers in mirror_axes + # x should be 5d for 3d images and 4d for 2d. so the max value of mirror_axes cannot exceed len(x.shape) - 3 + assert max(mirror_axes) <= len(x.shape) - 3, 'mirror_axes does not match the dimension of the input!' + + num_predictons = 2 ** len(mirror_axes) + if 0 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (2,)), model, **kwargs), (2,)) + torch.cuda.empty_cache() + if 1 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (3,)), model, **kwargs), (3,)) + torch.cuda.empty_cache() + if 2 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (4,)), model, **kwargs), (4,)) + torch.cuda.empty_cache() + if 0 in mirror_axes and 1 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (2, 3)), model, **kwargs), (2, 3)) + torch.cuda.empty_cache() + if 0 in mirror_axes and 2 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (2, 4)), model, **kwargs), (2, 4)) + torch.cuda.empty_cache() + if 1 in mirror_axes and 2 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (3, 4)), model, **kwargs), (3, 4)) + torch.cuda.empty_cache() + if 0 in mirror_axes and 1 in mirror_axes and 2 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (2, 3, 4)), model, **kwargs), (2, 3, 4)).cpu() + torch.cuda.empty_cache() + prediction /= num_predictons + + torch.cuda.empty_cache() + del x + return prediction + + def save_to_nii(self, return_output, + raw_spacing, + save_dir, + case_name, + postprocess=False): + return_output = return_output.astype(np.uint8) + + # # postprocessing + if postprocess: + if return_output.ndim == 3: + return_output = large_connected_domain(return_output) + elif return_output.ndim == 4: + # apply postprocess per channel + for c in range(return_output.shape[0]): + return_output[c] = large_connected_domain(return_output[c]) + + # IMPORTANT: + # For 4D arrays with shape (C, Z, Y, X) we must use isVector=False, + # otherwise SimpleITK will treat the last dimension as vector components. + if return_output.ndim == 4: + return_output = sitk.GetImageFromArray(return_output, isVector=False) + else: + return_output = sitk.GetImageFromArray(return_output) + + if isinstance(raw_spacing[0], torch.Tensor): + raw_spacing = [raw_spacing[0].item(), raw_spacing[1].item(), raw_spacing[2].item()] + raw_spacing = [float(raw_spacing[0]), float(raw_spacing[1]), float(raw_spacing[2])] + + if return_output.GetDimension() == 4: + return_output.SetSpacing((raw_spacing[0], raw_spacing[1], raw_spacing[2], 1.0)) + else: + return_output.SetSpacing((raw_spacing[0], raw_spacing[1], raw_spacing[2])) + + sitk.WriteImage(return_output, os.path.join(save_dir, f"{case_name}.nii.gz")) + + print(f"{os.path.join(save_dir, f'{case_name}.nii.gz')} is saved successfully") \ No newline at end of file diff --git a/source_code/SegMamba/light_training/prediction_fp32.py b/source_code/SegMamba/light_training/prediction_fp32.py new file mode 100644 index 0000000000000000000000000000000000000000..be3776bbade56b98a1fb55143dd26b355607d491 --- /dev/null +++ b/source_code/SegMamba/light_training/prediction_fp32.py @@ -0,0 +1,142 @@ + +import torch +import numpy as np +import SimpleITK as sitk +import os +from light_training.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape +from scipy import ndimage +import skimage.measure as measure + +class dummy_context(object): + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, exc_tb): + pass + +def large_connected_domain(label): + cd, num = measure.label(label, return_num=True, connectivity=1) + volume = np.zeros([num]) + for k in range(num): + volume[k] = ((cd == (k + 1)).astype(np.uint8)).sum() + volume_sort = np.argsort(volume) + # print(volume_sort) + label = (cd == (volume_sort[-1] + 1)).astype(np.uint8) + label = ndimage.binary_fill_holes(label) + label = label.astype(np.uint8) + return label + +class Predictor: + def __init__(self, window_infer, mirror_axes=None) -> None: + self.window_infer = window_infer + self.mirror_axes = mirror_axes + + @staticmethod + def predict_raw_probability(model_output, properties): + if len(model_output.shape) == 5: + model_output = model_output[0] + + shape_before_resample = model_output.shape + if isinstance(model_output, torch.Tensor): + model_output = model_output.cpu().numpy() + + spacing = properties["spacing"] + new_spacing = [spacing[0].item(), spacing[1].item(), spacing[2].item()] + new_spacing_trans = new_spacing[::-1] + + print(f"current spacing is {[0.5, 0.70410156, 0.70410156]}, new_spacing is {new_spacing_trans}") + shape_after_cropping_before_resample = properties["shape_after_cropping_before_resample"] + d, w, h = shape_after_cropping_before_resample[0].item(), shape_after_cropping_before_resample[1].item(), shape_after_cropping_before_resample[2].item() + # model_output = torch.nn.functional.interpolate(model_output, mode="nearest", size=(d, w, h)) + model_output = resample_data_or_seg_to_shape(model_output, + new_shape=(d, w, h), + current_spacing=[0.5, 0.70410156, 0.70410156], + new_spacing=new_spacing_trans, + is_seg=False, + order=1, + order_z=0) + shape_after_resample = model_output.shape + print(f"before resample shape: {shape_before_resample}, after resample shape: {shape_after_resample}") + + return model_output + + @staticmethod + def apply_nonlinear(model_output, nonlinear_type="softmax"): + if isinstance(model_output, np.ndarray): + model_output = torch.from_numpy(model_output) + assert len(model_output.shape) == 4 + + assert nonlinear_type in ["softmax", "sigmoid"] + + if nonlinear_type == "softmax": + model_output = torch.softmax(model_output, dim=0) + model_output = model_output.argmax(dim=0) + else : + model_output = torch.sigmoid(model_output) + + return model_output.numpy() + + + @staticmethod + def predict_noncrop_probability(model_output, properties): + assert len(model_output.shape) == 3 + + shape_before_cropping = properties["shape_before_cropping"] + none_crop_pred = np.zeros([shape_before_cropping[0], shape_before_cropping[1], shape_before_cropping[2]], dtype=np.uint8) + bbox_used_for_cropping = properties["bbox_used_for_cropping"] + + none_crop_pred[ + bbox_used_for_cropping[0][0]: bbox_used_for_cropping[0][1], + bbox_used_for_cropping[1][0]: bbox_used_for_cropping[1][1], + bbox_used_for_cropping[2][0]: bbox_used_for_cropping[2][1]] = model_output + + return model_output + + def maybe_mirror_and_predict(self, x, model, **kwargs) -> torch.Tensor: + # mirror_axes = [0, 1, 2] + window_infer = self.window_infer + device = next(model.parameters()).device + + with torch.no_grad(): + prediction = window_infer(x, model, **kwargs) + mirror_axes = self.mirror_axes + + if mirror_axes is not None: + # check for invalid numbers in mirror_axes + # x should be 5d for 3d images and 4d for 2d. so the max value of mirror_axes cannot exceed len(x.shape) - 3 + assert max(mirror_axes) <= len(x.shape) - 3, 'mirror_axes does not match the dimension of the input!' + + num_predictons = 2 ** len(mirror_axes) + if 0 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (2,)), model, **kwargs), (2,)) + if 1 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (3,)), model, **kwargs), (3,)) + if 2 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (4,)), model, **kwargs), (4,)) + if 0 in mirror_axes and 1 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (2, 3)), model, **kwargs), (2, 3)) + if 0 in mirror_axes and 2 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (2, 4)), model, **kwargs), (2, 4)) + if 1 in mirror_axes and 2 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (3, 4)), model, **kwargs), (3, 4)) + if 0 in mirror_axes and 1 in mirror_axes and 2 in mirror_axes: + prediction += torch.flip(window_infer(torch.flip(x, (2, 3, 4)), model, **kwargs), (2, 3, 4)) + prediction /= num_predictons + + return prediction + + def save_to_nii(self, return_output, + raw_spacing, + save_dir, + case_name, + postprocess=False): + return_output = return_output.astype(np.uint8) + + # # postprocessing + if postprocess: + return_output = large_connected_domain(return_output) + + return_output = sitk.GetImageFromArray(return_output) + return_output.SetSpacing((raw_spacing[0].item(), raw_spacing[1].item(), raw_spacing[2].item())) + + sitk.WriteImage(return_output, os.path.join(save_dir, f"{case_name}.nii.gz")) \ No newline at end of file diff --git a/source_code/SegMamba/light_training/preprocessing/__init__.py b/source_code/SegMamba/light_training/preprocessing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/source_code/SegMamba/light_training/preprocessing/cropping/__init__.py b/source_code/SegMamba/light_training/preprocessing/cropping/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/source_code/SegMamba/light_training/preprocessing/cropping/cropping.py b/source_code/SegMamba/light_training/preprocessing/cropping/cropping.py new file mode 100644 index 0000000000000000000000000000000000000000..cb6052c7adaf8322e94c1003e1a86a5396e4afe4 --- /dev/null +++ b/source_code/SegMamba/light_training/preprocessing/cropping/cropping.py @@ -0,0 +1,51 @@ +import numpy as np + + +# Hello! crop_to_nonzero is the function you are looking for. Ignore the rest. +from acvl_utils.cropping_and_padding.bounding_boxes import get_bbox_from_mask, crop_to_bbox, bounding_box_to_slice + + +def create_nonzero_mask(data): + """ + + :param data: + :return: the mask is True where the data is nonzero + """ + from scipy.ndimage import binary_fill_holes + assert len(data.shape) == 4 or len(data.shape) == 3, "data must have shape (C, X, Y, Z) or shape (C, X, Y)" + nonzero_mask = np.zeros(data.shape[1:], dtype=bool) + for c in range(data.shape[0]): + this_mask = data[c] != 0 + nonzero_mask = nonzero_mask | this_mask + nonzero_mask = binary_fill_holes(nonzero_mask) + return nonzero_mask + + +def crop_to_nonzero(data, seg=None, nonzero_label=-1): + """ + + :param data: + :param seg: + :param nonzero_label: this will be written into the segmentation map + :return: + """ + nonzero_mask = create_nonzero_mask(data) + bbox = get_bbox_from_mask(nonzero_mask) + + slicer = bounding_box_to_slice(bbox) + data = data[tuple([slice(None), *slicer])] + + if seg is not None: + seg = seg[tuple([slice(None), *slicer])] + + nonzero_mask = nonzero_mask[slicer][None] + if seg is not None: + seg[(seg == 0) & (~nonzero_mask)] = nonzero_label + else: + nonzero_mask = nonzero_mask.astype(np.int8) + nonzero_mask[nonzero_mask == 0] = nonzero_label + nonzero_mask[nonzero_mask > 0] = 0 + seg = nonzero_mask + return data, seg, bbox + + diff --git a/source_code/SegMamba/light_training/preprocessing/normalization/__init__.py b/source_code/SegMamba/light_training/preprocessing/normalization/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/source_code/SegMamba/light_training/preprocessing/normalization/default_normalization_schemes.py b/source_code/SegMamba/light_training/preprocessing/normalization/default_normalization_schemes.py new file mode 100644 index 0000000000000000000000000000000000000000..ef96408773b6f53247173b83999313aa8ad4ff8d --- /dev/null +++ b/source_code/SegMamba/light_training/preprocessing/normalization/default_normalization_schemes.py @@ -0,0 +1,126 @@ +from abc import ABC, abstractmethod +from typing import Type + +import numpy as np +from numpy import number +from monai.transforms.utils_pytorch_numpy_unification import clip + + +class ImageNormalization(ABC): + leaves_pixels_outside_mask_at_zero_if_use_mask_for_norm_is_true = None + + def __init__(self, use_mask_for_norm: bool = None, intensityproperties: dict = None, + target_dtype: Type[number] = np.float32): + assert use_mask_for_norm is None or isinstance(use_mask_for_norm, bool) + self.use_mask_for_norm = use_mask_for_norm + assert isinstance(intensityproperties, dict) + self.intensityproperties = intensityproperties + self.target_dtype = target_dtype + + @abstractmethod + def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray: + """ + Image and seg must have the same shape. Seg is not always used + """ + pass + + +class ZScoreNormalization(ImageNormalization): + leaves_pixels_outside_mask_at_zero_if_use_mask_for_norm_is_true = True + + def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray: + """ + here seg is used to store the zero valued region. The value for that region in the segmentation is -1 by + default. + """ + image = image.astype(self.target_dtype) + if self.use_mask_for_norm is not None and self.use_mask_for_norm: + # negative values in the segmentation encode the 'outside' region (think zero values around the brain as + # in BraTS). We want to run the normalization only in the brain region, so we need to mask the image. + # The default nnU-net sets use_mask_for_norm to True if cropping to the nonzero region substantially + # reduced the image size. + mask = seg >= 0 + mean = image[mask].mean() + std = image[mask].std() + image[mask] = (image[mask] - mean) / (max(std, 1e-8)) + else: + mean = image.mean() + std = image.std() + image = (image - mean) / (max(std, 1e-8)) + return image + + +class CTNormStandard: + def __init__( + self, + a_min: float, + a_max: float, + b_min, + b_max, + clip=False, + dtype=np.float32, + ): + self.a_min = a_min + self.a_max = a_max + self.b_min = b_min + self.b_max = b_max + self.clip = clip + self.dtype = dtype + + def __call__(self, img): + """ + Apply the transform to `img`. + """ + + img = (img - self.a_min) / (self.a_max - self.a_min) + if (self.b_min is not None) and (self.b_max is not None): + img = img * (self.b_max - self.b_min) + self.b_min + if self.clip: + img = clip(img, self.b_min, self.b_max) + + return img + +class CTNormalization(ImageNormalization): + leaves_pixels_outside_mask_at_zero_if_use_mask_for_norm_is_true = False + + def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray: + assert self.intensityproperties is not None, "CTNormalization requires intensity properties" + image = image.astype(self.target_dtype) + mean_intensity = self.intensityproperties['mean'] + std_intensity = self.intensityproperties['std'] + lower_bound = self.intensityproperties['percentile_00_5'] + upper_bound = self.intensityproperties['percentile_99_5'] + image = np.clip(image, lower_bound, upper_bound) + image = (image - mean_intensity) / max(std_intensity, 1e-8) + return image + + +class NoNormalization(ImageNormalization): + leaves_pixels_outside_mask_at_zero_if_use_mask_for_norm_is_true = False + + def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray: + return image.astype(self.target_dtype) + + +class RescaleTo01Normalization(ImageNormalization): + leaves_pixels_outside_mask_at_zero_if_use_mask_for_norm_is_true = False + + def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray: + image = image.astype(self.target_dtype) + image = image - image.min() + image = image / np.clip(image.max(), a_min=1e-8, a_max=None) + return image + + +class RGBTo01Normalization(ImageNormalization): + leaves_pixels_outside_mask_at_zero_if_use_mask_for_norm_is_true = False + + def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray: + assert image.min() >= 0, "RGB images are uint 8, for whatever reason I found pixel values smaller than 0. " \ + "Your images do not seem to be RGB images" + assert image.max() <= 255, "RGB images are uint 8, for whatever reason I found pixel values greater than 255" \ + ". Your images do not seem to be RGB images" + image = image.astype(self.target_dtype) + image = image / 255. + return image + diff --git a/source_code/SegMamba/light_training/preprocessing/normalization/map_channel_name_to_normalization.py b/source_code/SegMamba/light_training/preprocessing/normalization/map_channel_name_to_normalization.py new file mode 100644 index 0000000000000000000000000000000000000000..e82165069a078b1290e1ba96e2061e4d450cb12d --- /dev/null +++ b/source_code/SegMamba/light_training/preprocessing/normalization/map_channel_name_to_normalization.py @@ -0,0 +1,24 @@ +from typing import Type + +from nnunetv2.preprocessing.normalization.default_normalization_schemes import CTNormalization, NoNormalization, \ + ZScoreNormalization, RescaleTo01Normalization, RGBTo01Normalization, ImageNormalization + +channel_name_to_normalization_mapping = { + 'CT': CTNormalization, + 'noNorm': NoNormalization, + 'zscore': ZScoreNormalization, + 'rescale_0_1': RescaleTo01Normalization, + 'rgb_to_0_1': RGBTo01Normalization +} + + +def get_normalization_scheme(channel_name: str) -> Type[ImageNormalization]: + """ + If we find the channel_name in channel_name_to_normalization_mapping return the corresponding normalization. If it is + not found, use the default (ZScoreNormalization) + """ + norm_scheme = channel_name_to_normalization_mapping.get(channel_name) + if norm_scheme is None: + norm_scheme = ZScoreNormalization + # print('Using %s for image normalization' % norm_scheme.__name__) + return norm_scheme diff --git a/source_code/SegMamba/light_training/preprocessing/normalization/readme.md b/source_code/SegMamba/light_training/preprocessing/normalization/readme.md new file mode 100644 index 0000000000000000000000000000000000000000..7b5439612571240eba0926370bb1fed5044eecce --- /dev/null +++ b/source_code/SegMamba/light_training/preprocessing/normalization/readme.md @@ -0,0 +1,5 @@ +The channel_names entry in dataset.json only determines the normlaization scheme. So if you want to use something different +then you can just +- create a new subclass of ImageNormalization +- map your custom channel identifier to that subclass in channel_name_to_normalization_mapping +- run plan and preprocess again with your custom normlaization scheme \ No newline at end of file diff --git a/source_code/SegMamba/light_training/preprocessing/preprocessors/__init__.py b/source_code/SegMamba/light_training/preprocessing/preprocessors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/source_code/SegMamba/light_training/preprocessing/preprocessors/default_preprocessor.py b/source_code/SegMamba/light_training/preprocessing/preprocessors/default_preprocessor.py new file mode 100644 index 0000000000000000000000000000000000000000..15b7f599733069c5542930588f8b5e070fea3c5e --- /dev/null +++ b/source_code/SegMamba/light_training/preprocessing/preprocessors/default_preprocessor.py @@ -0,0 +1,528 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import multiprocessing +import shutil +from time import sleep +from typing import Union, Tuple +import glob +import numpy as np +from batchgenerators.utilities.file_and_folder_operations import * +from light_training.preprocessing.cropping.cropping import crop_to_nonzero +# from .default_resampling import resample_data_or_seg_to_spacing, resample_img +from light_training.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape, compute_new_shape +from tqdm import tqdm +from light_training.preprocessing.normalization.default_normalization_schemes import CTNormalization, ZScoreNormalization +import SimpleITK as sitk +from tqdm import tqdm +from copy import deepcopy +import json + +def create_image(image_arr, spacing): + image = sitk.GetImageFromArray(image_arr) + image.SetSpacing(spacing) + return image + +def get_shape_must_be_divisible_by(net_numpool_per_axis): + return 2 ** np.array(net_numpool_per_axis) + +def pad_shape(shape, must_be_divisible_by): + """ + pads shape so that it is divisible by must_be_divisible_by + :param shape: + :param must_be_divisible_by: + :return: + """ + if not isinstance(must_be_divisible_by, (tuple, list, np.ndarray)): + must_be_divisible_by = [must_be_divisible_by] * len(shape) + else: + assert len(must_be_divisible_by) == len(shape) + + new_shp = [shape[i] + must_be_divisible_by[i] - shape[i] % must_be_divisible_by[i] for i in range(len(shape))] + + for i in range(len(shape)): + if shape[i] % must_be_divisible_by[i] == 0: + new_shp[i] -= must_be_divisible_by[i] + new_shp = np.array(new_shp).astype(int) + return new_shp + +def get_pool_and_conv_props(spacing, patch_size, min_feature_map_size, max_numpool): + """ + this is the same as get_pool_and_conv_props_v2 from old nnunet + + :param spacing: + :param patch_size: + :param min_feature_map_size: min edge length of feature maps in bottleneck + :param max_numpool: + :return: + """ + # todo review this code + dim = len(spacing) + + current_spacing = deepcopy(list(spacing)) + current_size = deepcopy(list(patch_size)) + + pool_op_kernel_sizes = [[1] * len(spacing)] + conv_kernel_sizes = [] + + num_pool_per_axis = [0] * dim + kernel_size = [1] * dim + + while True: + # exclude axes that we cannot pool further because of min_feature_map_size constraint + valid_axes_for_pool = [i for i in range(dim) if current_size[i] >= 2*min_feature_map_size] + if len(valid_axes_for_pool) < 1: + break + + spacings_of_axes = [current_spacing[i] for i in valid_axes_for_pool] + + # find axis that are within factor of 2 within smallest spacing + min_spacing_of_valid = min(spacings_of_axes) + valid_axes_for_pool = [i for i in valid_axes_for_pool if current_spacing[i] / min_spacing_of_valid < 2] + + # max_numpool constraint + valid_axes_for_pool = [i for i in valid_axes_for_pool if num_pool_per_axis[i] < max_numpool] + + if len(valid_axes_for_pool) == 1: + if current_size[valid_axes_for_pool[0]] >= 3 * min_feature_map_size: + pass + else: + break + if len(valid_axes_for_pool) < 1: + break + + # now we need to find kernel sizes + # kernel sizes are initialized to 1. They are successively set to 3 when their associated axis becomes within + # factor 2 of min_spacing. Once they are 3 they remain 3 + for d in range(dim): + if kernel_size[d] == 3: + continue + else: + if spacings_of_axes[d] / min(current_spacing) < 2: + kernel_size[d] = 3 + + other_axes = [i for i in range(dim) if i not in valid_axes_for_pool] + + pool_kernel_sizes = [0] * dim + for v in valid_axes_for_pool: + pool_kernel_sizes[v] = 2 + num_pool_per_axis[v] += 1 + current_spacing[v] *= 2 + current_size[v] = np.ceil(current_size[v] / 2) + for nv in other_axes: + pool_kernel_sizes[nv] = 1 + + pool_op_kernel_sizes.append(pool_kernel_sizes) + conv_kernel_sizes.append(deepcopy(kernel_size)) + #print(conv_kernel_sizes) + + must_be_divisible_by = get_shape_must_be_divisible_by(num_pool_per_axis) + patch_size = pad_shape(patch_size, must_be_divisible_by) + + # we need to add one more conv_kernel_size for the bottleneck. We always use 3x3(x3) conv here + conv_kernel_sizes.append([3]*dim) + return num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, must_be_divisible_by + + +class DefaultPreprocessor(object): + def __init__(self, + base_dir, + image_dir, + # output_dir, + # out_spacing, + label_dir=None, + data_type="CT"): + """ + Everything we need is in the plans. Those are given when run() is called + """ + self.base_dir = base_dir + self.image_dir = image_dir + self.label_dir = label_dir + + self.data_type = data_type + + def run_case_npy(self, data: np.ndarray, seg, properties: dict): + # let's not mess up the inputs! + data = np.copy(data) + old_shape = data.shape + original_spacing = list(properties['spacing']) + ## 由于old spacing读出来是反的,因此这里需要转置一下 + + original_spacing_trans = original_spacing[::-1] + properties["original_spacing_trans"] = original_spacing_trans + properties["target_spacing_trans"] = self.out_spacing + + shape_before_cropping = data.shape[1:] + ## crop + properties['shape_before_cropping'] = shape_before_cropping + # this command will generate a segmentation. This is important because of the nonzero mask which we may need + data, seg, bbox = crop_to_nonzero(data, seg) + properties['bbox_used_for_cropping'] = bbox + + # crop, remember to store size before cropping! + shape_before_resample = data.shape[1:] + properties['shape_after_cropping_before_resample'] = shape_before_resample + + new_shape = compute_new_shape(data.shape[1:], original_spacing_trans, self.out_spacing) + + if seg is None : + seg_norm = np.zeros_like(data) + else : + seg_norm = seg + data = self._normalize(data, seg_norm, + self.foreground_intensity_properties_per_channel) + + assert len(data.shape) == 4 + + data = resample_data_or_seg_to_shape(data, new_shape, + original_spacing, + self.out_spacing, + order=3, + order_z=0) + properties['shape_after_resample'] = new_shape + + if seg is not None : + assert len(seg.shape) == 4 + seg = resample_data_or_seg_to_shape(seg, new_shape, + original_spacing, + self.out_spacing, + is_seg=True, + order=1, + order_z=0) + + properties['class_locations'] = self._sample_foreground_locations(seg, + self.all_labels, + ) + + if np.max(seg) > 127: + seg = seg.astype(np.int16) + else: + seg = seg.astype(np.int8) + + # assert len(seg.shape) == 4 + # seg = create_image(seg[0], original_spacing) + # seg = resample_img(seg, out_spacing=self.out_spacing, is_label=True) + # seg = sitk.GetArrayFromImage(seg)[None] + # print(f"all_labels is {np.unique(seg)}") + # if np.max(seg) > 127: + # seg = seg.astype(np.int16) + # else: + # seg = seg.astype(np.int8) + # seg = resample_data_or_seg_to_spacing(seg, current_spacing=original_spacing, + # new_spacing=self.out_spacing, is_seg=True) + + print(f'old shape: {old_shape}, new_shape after crop and resample: {new_shape}, old_spacing: {original_spacing}, ' + f'new_spacing: {self.out_spacing}, boxes is {bbox}') + + return data, seg + + # need to modify + def get_iterable_list(self): + all_cases = os.listdir(os.path.join(self.base_dir, self.image_dir)) + + return all_cases + + def _normalize(self, data: np.ndarray, seg: np.ndarray, + foreground_intensity_properties_per_channel: dict) -> np.ndarray: + for c in range(data.shape[0]): + normalizer_class = CTNormalization + normalizer = normalizer_class(use_mask_for_norm=False, + intensityproperties=foreground_intensity_properties_per_channel[str(c)]) + data[c] = normalizer.run(data[c], seg[0]) + return data + + # need to modify + def read_data(self, case_name): + ## only for CT dataset + data = sitk.ReadImage(os.path.join(self.base_dir, self.image_dir, case_name)) + seg_arr = None + ## 一定要是float32!!!! + data_arr = sitk.GetArrayFromImage(data).astype(np.float32) + data_arr = data_arr[None] + + if self.label_dir is not None: + seg = sitk.ReadImage(os.path.join(self.base_dir, self.label_dir, case_name)) + ## 读出来以后一定转float32!!! + seg_arr = sitk.GetArrayFromImage(seg).astype(np.float32) + seg_arr = seg_arr[None] + intensities_per_channel, intensity_statistics_per_channel = self.collect_foreground_intensities(seg_arr, data_arr) + + else : + intensities_per_channel = [] + intensity_statistics_per_channel = [] + + properties = {"spacing": data.GetSpacing(), + "raw_size": data_arr.shape[1:], + "name": case_name.split(".")[0], + "intensities_per_channel": intensities_per_channel, + "intensity_statistics_per_channel": intensity_statistics_per_channel} + + return data_arr, seg_arr, properties + + def run_case(self, case_name): + """ + seg file can be none (test cases) + + order of operations is: transpose -> crop -> resample + so when we export we need to run the following order: resample -> crop -> transpose (we could also run + transpose at a different place, but reverting the order of operations done during preprocessing seems cleaner) + """ + data, seg, properties = self.read_data(case_name) + + data, seg = self.run_case_npy(data, seg, properties) + return data, seg, properties + + def run_case_save(self, case_name): + print(case_name + "~~~~~~~~" * 10) + data, seg, properties = self.run_case(case_name) + # print('dtypes', data.dtype, seg.dtype) + case_name = case_name.split(".")[0] + np.savez_compressed(os.path.join(self.output_dir, case_name) + '.npz', data=data, seg=seg) + write_pickle(properties, os.path.join(self.output_dir, case_name) + '.pkl') + print(f"data is saved at: {os.path.join(self.output_dir, case_name) + '.npz'}") + + def experiment_plan(self, case_name): + + data, seg, properties = self.read_data(case_name) + print(f"labels is {np.unique(seg)}") + spacing = properties["spacing"] + raw_size = properties["raw_size"] + intensities_per_channel = properties["intensities_per_channel"] + + return spacing, raw_size, intensities_per_channel + + def determine_fullres_target_spacing(self, spacings, sizes) -> np.ndarray: + # if self.overwrite_target_spacing is not None: + # return np.array(self.overwrite_target_spacing) + + # spacings = self.dataset_fingerprint['spacings'] + # sizes = self.dataset_fingerprint['shapes_after_crop'] + + target = np.percentile(np.vstack(spacings), 50, 0) + target_size = np.percentile(np.vstack(sizes), 50, 0) + # we need to identify datasets for which a different target spacing could be beneficial. These datasets have + # the following properties: + # - one axis which much lower resolution than the others + # - the lowres axis has much less voxels than the others + # - (the size in mm of the lowres axis is also reduced) + worst_spacing_axis = np.argmax(target) + other_axes = [i for i in range(len(target)) if i != worst_spacing_axis] + other_spacings = [target[i] for i in other_axes] + other_sizes = [target_size[i] for i in other_axes] + + has_aniso_spacing = target[worst_spacing_axis] > (3 * max(other_spacings)) + has_aniso_voxels = target_size[worst_spacing_axis] * 3 < min(other_sizes) + + if has_aniso_spacing and has_aniso_voxels: + spacings_of_that_axis = np.vstack(spacings)[:, worst_spacing_axis] + target_spacing_of_that_axis = np.percentile(spacings_of_that_axis, 10) + # don't let the spacing of that axis get higher than the other axes + if target_spacing_of_that_axis < max(other_spacings): + target_spacing_of_that_axis = max(max(other_spacings), target_spacing_of_that_axis) + 1e-5 + target[worst_spacing_axis] = target_spacing_of_that_axis + return target + + def compute_new_shape(self, old_shape: Union[Tuple[int, ...], List[int], np.ndarray], + old_spacing: Union[Tuple[float, ...], List[float], np.ndarray], + new_spacing: Union[Tuple[float, ...], List[float], np.ndarray]) -> np.ndarray: + ## spacing need to be transposed + old_spacing = list(old_spacing)[::-1] + new_spacing = list(new_spacing)[::-1] + + assert len(old_spacing) == len(old_shape) + assert len(old_shape) == len(new_spacing) + new_shape = np.array([int(round(i / j * k)) for i, j, k in zip(old_spacing, new_spacing, old_shape)]) + return new_shape + + def run_plan(self): + all_iter = self.get_iterable_list() + spacings = [] + sizes = [] + intensities_per_channels = [] + print(f"analysing data......") + for case in tqdm(all_iter, total=len(all_iter)): + spacing, size, intensities_per_channel = self.experiment_plan(case) + spacings.append(spacing) + sizes.append(size) + intensities_per_channels.append(intensities_per_channel) + + print(f"all spacing is {spacings}") + print(f"all sizes is {sizes}") + foreground_intensities_per_channel = [np.concatenate([r[i] for r in intensities_per_channels]) for i in + range(len(intensities_per_channels[0]))] + + num_channels = len(intensities_per_channels[0]) + + intensity_statistics_per_channel = {} + for i in range(num_channels): + intensity_statistics_per_channel[i] = { + 'mean': float(np.mean(foreground_intensities_per_channel[i])), + 'median': float(np.median(foreground_intensities_per_channel[i])), + 'std': float(np.std(foreground_intensities_per_channel[i])), + 'min': float(np.min(foreground_intensities_per_channel[i])), + 'max': float(np.max(foreground_intensities_per_channel[i])), + 'percentile_99_5': float(np.percentile(foreground_intensities_per_channel[i], 99.5)), + 'percentile_00_5': float(np.percentile(foreground_intensities_per_channel[i], 0.5)), + } + + print(f"intensity_statistics_per_channel is {intensity_statistics_per_channel}") + + fullres_spacing = self.determine_fullres_target_spacing(spacings, sizes) + print(f"fullres spacing is {fullres_spacing[::-1]}") + + # get transposed new median shape (what we would have after resampling) + new_shapes = [self.compute_new_shape(j, i, fullres_spacing) for i, j in + zip(spacings, sizes)] + new_median_shape = np.median(new_shapes, 0) + print(f"median_shape is {new_median_shape}") + + tmp = 1 / np.array(fullres_spacing) + initial_patch_size = [round(i) for i in tmp * (256 ** 3 / np.prod(tmp)) ** (1 / 3)] + + print(f"initial_patch_size is {initial_patch_size[::-1]}") + + network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, \ + shape_must_be_divisible_by = get_pool_and_conv_props(fullres_spacing, initial_patch_size, + 4, + 999999) + print(f"target medium patch size is {patch_size[::-1]}") + + analysis_path = "./data_analysis_result.txt" + with open(analysis_path, "w") as f: + + f.write(json.dumps({ + "intensity_statistics_per_channel": intensity_statistics_per_channel, + "fullres spacing": fullres_spacing.tolist(), + "median_shape": new_median_shape.tolist(), + "initial_patch_size": initial_patch_size, + "target medium patch size": patch_size[::-1].tolist() + })) + print(f"Analysis done, save to {analysis_path}") + + + def collect_foreground_intensities(self, segmentation: np.ndarray, images: np.ndarray, seed: int = 1234, + num_samples: int = 10000): + """ + images=image with multiple channels = shape (c, x, y(, z)) + """ + assert len(images.shape) == 4 + assert len(segmentation.shape) == 4 + + assert not np.any(np.isnan(segmentation)), "Segmentation contains NaN values. grrrr.... :-(" + assert not np.any(np.isnan(images)), "Images contains NaN values. grrrr.... :-(" + + rs = np.random.RandomState(seed) + + intensities_per_channel = [] + # we don't use the intensity_statistics_per_channel at all, it's just something that might be nice to have + intensity_statistics_per_channel = [] + + # segmentation is 4d: 1,x,y,z. We need to remove the empty dimension for the following code to work + foreground_mask = segmentation[0] > 0 + + for i in range(len(images)): + foreground_pixels = images[i][foreground_mask] + num_fg = len(foreground_pixels) + # sample with replacement so that we don't get issues with cases that have less than num_samples + # foreground_pixels. We could also just sample less in those cases but that would than cause these + # training cases to be underrepresented + intensities_per_channel.append( + rs.choice(foreground_pixels, num_samples, replace=True) if num_fg > 0 else []) + intensity_statistics_per_channel.append({ + 'mean': np.mean(foreground_pixels) if num_fg > 0 else np.nan, + 'median': np.median(foreground_pixels) if num_fg > 0 else np.nan, + 'min': np.min(foreground_pixels) if num_fg > 0 else np.nan, + 'max': np.max(foreground_pixels) if num_fg > 0 else np.nan, + 'percentile_99_5': np.percentile(foreground_pixels, 99.5) if num_fg > 0 else np.nan, + 'percentile_00_5': np.percentile(foreground_pixels, 0.5) if num_fg > 0 else np.nan, + + }) + + return intensities_per_channel, intensity_statistics_per_channel + + @staticmethod + def _sample_foreground_locations(seg: np.ndarray, classes_or_regions: Union[List[int], List[Tuple[int, ...]]], + seed: int = 1234, verbose: bool = False): + num_samples = 10000 + min_percent_coverage = 0.01 # at least 1% of the class voxels need to be selected, otherwise it may be too + # sparse + rndst = np.random.RandomState(seed) + class_locs = {} + for c in classes_or_regions: + k = c if not isinstance(c, list) else tuple(c) + if isinstance(c, (tuple, list)): + ## region + mask = seg == c[0] + for cc in c[1:]: + mask = mask | (seg == cc) + all_locs = np.argwhere(mask) + else: + all_locs = np.argwhere(seg == c) + if len(all_locs) == 0: + class_locs[k] = [] + continue + target_num_samples = min(num_samples, len(all_locs)) + target_num_samples = max(target_num_samples, int(np.ceil(len(all_locs) * min_percent_coverage))) + + selected = all_locs[rndst.choice(len(all_locs), target_num_samples, replace=False)] + class_locs[k] = selected + if verbose: + print(c, target_num_samples) + + return class_locs + + def run(self, output_spacing, + output_dir, + all_labels, + foreground_intensity_properties_per_channel=None, + num_processes=8): + self.out_spacing = output_spacing + self.all_labels = all_labels + self.output_dir = output_dir + self.foreground_intensity_properties_per_channel = foreground_intensity_properties_per_channel + + all_iter = self.get_iterable_list() + + maybe_mkdir_p(self.output_dir) + + # test_run + for case_name in all_iter: + self.run_case_save(case_name) + break + + # multiprocessing magic. + r = [] + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + for case_name in all_iter: + r.append(p.starmap_async(self.run_case_save, + ((case_name, ),))) + remaining = list(range(len(all_iter))) + # p is pretty nifti. If we kill workers they just respawn but don't do any work. + # So we need to store the original pool of workers. + workers = [j for j in p._pool] + with tqdm(desc=None, total=len(all_iter)) as pbar: + while len(remaining) > 0: + all_alive = all([j.is_alive() for j in workers]) + if not all_alive: + raise RuntimeError('Some background worker is 6 feet under. Yuck. \n' + 'OK jokes aside.\n' + 'One of your background processes is missing. This could be because of ' + 'an error (look for an error message) or because it was killed ' + 'by your OS due to running out of RAM. If you don\'t see ' + 'an error message, out of RAM is likely the problem. In that case ' + 'reducing the number of workers might help') + done = [i for i in remaining if r[i].ready()] + for _ in done: + pbar.update() + remaining = [i for i in remaining if i not in done] + sleep(0.1) \ No newline at end of file diff --git a/source_code/SegMamba/light_training/preprocessing/preprocessors/default_preprocessor_AbdomenAtlas1_0Mini.py b/source_code/SegMamba/light_training/preprocessing/preprocessors/default_preprocessor_AbdomenAtlas1_0Mini.py new file mode 100644 index 0000000000000000000000000000000000000000..05672b29a3e7546e9cfdbaa8354d184f1af72182 --- /dev/null +++ b/source_code/SegMamba/light_training/preprocessing/preprocessors/default_preprocessor_AbdomenAtlas1_0Mini.py @@ -0,0 +1,540 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import multiprocessing +import shutil +from time import sleep +from typing import Union, Tuple +import glob +import numpy as np +from batchgenerators.utilities.file_and_folder_operations import * +from light_training.preprocessing.cropping.cropping import crop_to_nonzero +# from .default_resampling import resample_data_or_seg_to_spacing, resample_img +from light_training.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape, compute_new_shape +from tqdm import tqdm +from light_training.preprocessing.normalization.default_normalization_schemes import CTNormalization, ZScoreNormalization +import SimpleITK as sitk +from tqdm import tqdm +from copy import deepcopy +import json + +def create_image(image_arr, spacing): + image = sitk.GetImageFromArray(image_arr) + image.SetSpacing(spacing) + return image + +def get_shape_must_be_divisible_by(net_numpool_per_axis): + return 2 ** np.array(net_numpool_per_axis) + +def pad_shape(shape, must_be_divisible_by): + """ + pads shape so that it is divisible by must_be_divisible_by + :param shape: + :param must_be_divisible_by: + :return: + """ + if not isinstance(must_be_divisible_by, (tuple, list, np.ndarray)): + must_be_divisible_by = [must_be_divisible_by] * len(shape) + else: + assert len(must_be_divisible_by) == len(shape) + + new_shp = [shape[i] + must_be_divisible_by[i] - shape[i] % must_be_divisible_by[i] for i in range(len(shape))] + + for i in range(len(shape)): + if shape[i] % must_be_divisible_by[i] == 0: + new_shp[i] -= must_be_divisible_by[i] + new_shp = np.array(new_shp).astype(int) + return new_shp + +def get_pool_and_conv_props(spacing, patch_size, min_feature_map_size, max_numpool): + """ + this is the same as get_pool_and_conv_props_v2 from old nnunet + + :param spacing: + :param patch_size: + :param min_feature_map_size: min edge length of feature maps in bottleneck + :param max_numpool: + :return: + """ + # todo review this code + dim = len(spacing) + + current_spacing = deepcopy(list(spacing)) + current_size = deepcopy(list(patch_size)) + + pool_op_kernel_sizes = [[1] * len(spacing)] + conv_kernel_sizes = [] + + num_pool_per_axis = [0] * dim + kernel_size = [1] * dim + + while True: + # exclude axes that we cannot pool further because of min_feature_map_size constraint + valid_axes_for_pool = [i for i in range(dim) if current_size[i] >= 2*min_feature_map_size] + if len(valid_axes_for_pool) < 1: + break + + spacings_of_axes = [current_spacing[i] for i in valid_axes_for_pool] + + # find axis that are within factor of 2 within smallest spacing + min_spacing_of_valid = min(spacings_of_axes) + valid_axes_for_pool = [i for i in valid_axes_for_pool if current_spacing[i] / min_spacing_of_valid < 2] + + # max_numpool constraint + valid_axes_for_pool = [i for i in valid_axes_for_pool if num_pool_per_axis[i] < max_numpool] + + if len(valid_axes_for_pool) == 1: + if current_size[valid_axes_for_pool[0]] >= 3 * min_feature_map_size: + pass + else: + break + if len(valid_axes_for_pool) < 1: + break + + # now we need to find kernel sizes + # kernel sizes are initialized to 1. They are successively set to 3 when their associated axis becomes within + # factor 2 of min_spacing. Once they are 3 they remain 3 + for d in range(dim): + if kernel_size[d] == 3: + continue + else: + if spacings_of_axes[d] / min(current_spacing) < 2: + kernel_size[d] = 3 + + other_axes = [i for i in range(dim) if i not in valid_axes_for_pool] + + pool_kernel_sizes = [0] * dim + for v in valid_axes_for_pool: + pool_kernel_sizes[v] = 2 + num_pool_per_axis[v] += 1 + current_spacing[v] *= 2 + current_size[v] = np.ceil(current_size[v] / 2) + for nv in other_axes: + pool_kernel_sizes[nv] = 1 + + pool_op_kernel_sizes.append(pool_kernel_sizes) + conv_kernel_sizes.append(deepcopy(kernel_size)) + #print(conv_kernel_sizes) + + must_be_divisible_by = get_shape_must_be_divisible_by(num_pool_per_axis) + patch_size = pad_shape(patch_size, must_be_divisible_by) + + # we need to add one more conv_kernel_size for the bottleneck. We always use 3x3(x3) conv here + conv_kernel_sizes.append([3]*dim) + return num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, must_be_divisible_by + + +class DefaultPreprocessor(object): + def __init__(self, + base_dir, + ): + """ + Everything we need is in the plans. Those are given when run() is called + """ + self.base_dir = base_dir + self.image_name = "ct.nii.gz" + self.seg_dir = "segmentations" + self.seg_list = ["aorta.nii.gz", "gall_bladder.nii.gz", "kidney_left.nii.gz", + "kidney_right.nii.gz", "liver.nii.gz", "pancreas.nii.gz", + "postcava.nii.gz", "spleen.nii.gz", "stomach.nii.gz"] + + def run_case_npy(self, data: np.ndarray, seg, properties: dict): + # let's not mess up the inputs! + data = np.copy(data) + old_shape = data.shape + original_spacing = list(properties['spacing']) + ## 由于old spacing读出来是反的,因此这里需要转置一下 + + original_spacing_trans = original_spacing[::-1] + properties["original_spacing_trans"] = original_spacing_trans + properties["target_spacing_trans"] = self.out_spacing + + shape_before_cropping = data.shape[1:] + ## crop + properties['shape_before_cropping'] = shape_before_cropping + # this command will generate a segmentation. This is important because of the nonzero mask which we may need + data, seg, bbox = crop_to_nonzero(data, seg) + properties['bbox_used_for_cropping'] = bbox + + # crop, remember to store size before cropping! + shape_before_resample = data.shape[1:] + properties['shape_after_cropping_before_resample'] = shape_before_resample + + new_shape = compute_new_shape(data.shape[1:], original_spacing_trans, self.out_spacing) + + if seg is None : + seg_norm = np.zeros_like(data) + else : + seg_norm = seg + data = self._normalize(data, seg_norm, + self.foreground_intensity_properties_per_channel) + + assert len(data.shape) == 4 + + data = resample_data_or_seg_to_shape(data, new_shape, + original_spacing, + self.out_spacing, + order=3, + order_z=0) + properties['shape_after_resample'] = new_shape + + if seg is not None : + assert len(seg.shape) == 4 + seg = resample_data_or_seg_to_shape(seg, new_shape, + original_spacing, + self.out_spacing, + is_seg=True, + order=1, + order_z=0) + + properties['class_locations'] = self._sample_foreground_locations(seg, + self.all_labels, + ) + + if np.max(seg) > 127: + seg = seg.astype(np.int16) + else: + seg = seg.astype(np.int8) + + print(f'old shape: {old_shape}, new_shape after crop and resample: {new_shape}, old_spacing: {original_spacing}, ' + f'new_spacing: {self.out_spacing}, boxes is {bbox}') + + return data, seg + + # need to modify + def get_iterable_list(self): + all_cases = os.listdir(os.path.join(self.base_dir)) + + all_cases_2 = [] + for c in all_cases: + if os.path.isdir(os.path.join(self.base_dir, c)): + all_cases_2.append(c) + + return all_cases_2 + + def _normalize(self, data: np.ndarray, seg: np.ndarray, + foreground_intensity_properties_per_channel: dict) -> np.ndarray: + for c in range(data.shape[0]): + normalizer_class = CTNormalization + normalizer = normalizer_class(use_mask_for_norm=False, + intensityproperties=foreground_intensity_properties_per_channel[str(c)]) + data[c] = normalizer.run(data[c], seg[0]) + return data + + # need to modify + def read_data(self, case_name): + ## only for CT dataset + try: + data = sitk.ReadImage(os.path.join(self.base_dir, case_name, self.image_name)) + except: + print(f"data read error: {self.base_dir, case_name}") + return None, None, None + seg_arr = None + ## 一定要是float32!!!! + data_arr = sitk.GetArrayFromImage(data).astype(np.float32) + data_arr = data_arr[None] + + if os.path.exists(os.path.join(self.base_dir, case_name, self.seg_dir)): + segs = None + index = 0 + for target in self.seg_list: + index += 1 + seg = sitk.ReadImage(os.path.join(self.base_dir, case_name, self.seg_dir, target)) + ## 读出来以后一定转float32!!! + seg_arr = sitk.GetArrayFromImage(seg).astype(np.float32) + if segs is None: + segs = seg_arr + else : + segs[seg_arr == 1] = index + + segs = segs[None] + intensities_per_channel, intensity_statistics_per_channel = self.collect_foreground_intensities(segs, data_arr) + else : + intensities_per_channel = [] + intensity_statistics_per_channel = [] + + properties = {"spacing": data.GetSpacing(), + "raw_size": data_arr.shape[1:], + "name": case_name.split(".")[0], + "intensities_per_channel": intensities_per_channel, + "intensity_statistics_per_channel": intensity_statistics_per_channel} + + return data_arr, segs, properties + + def run_case(self, case_name): + """ + seg file can be none (test cases) + + order of operations is: transpose -> crop -> resample + so when we export we need to run the following order: resample -> crop -> transpose (we could also run + transpose at a different place, but reverting the order of operations done during preprocessing seems cleaner) + """ + data, seg, properties = self.read_data(case_name) + if data is not None: + data, seg = self.run_case_npy(data, seg, properties) + return data, seg, properties + else : + return None, None, None + + def run_case_save(self, case_name): + print(case_name + "~~~~~~~~" * 10) + data, seg, properties = self.run_case(case_name) + if data is not None: + # print('dtypes', data.dtype, seg.dtype) + case_name = case_name.split(".")[0] + np.savez_compressed(os.path.join(self.output_dir, case_name) + '.npz', data=data, seg=seg) + write_pickle(properties, os.path.join(self.output_dir, case_name) + '.pkl') + print(f"data is saved at: {os.path.join(self.output_dir, case_name) + '.npz'}") + + def experiment_plan(self, case_name): + + data, seg, properties = self.read_data(case_name) + if data is None: + return None, None, None + + print(f"labels is {np.unique(seg)}") + spacing = properties["spacing"] + raw_size = properties["raw_size"] + intensities_per_channel = properties["intensities_per_channel"] + + return spacing, raw_size, intensities_per_channel + + def determine_fullres_target_spacing(self, spacings, sizes) -> np.ndarray: + # if self.overwrite_target_spacing is not None: + # return np.array(self.overwrite_target_spacing) + + # spacings = self.dataset_fingerprint['spacings'] + # sizes = self.dataset_fingerprint['shapes_after_crop'] + + target = np.percentile(np.vstack(spacings), 50, 0) + target_size = np.percentile(np.vstack(sizes), 50, 0) + # we need to identify datasets for which a different target spacing could be beneficial. These datasets have + # the following properties: + # - one axis which much lower resolution than the others + # - the lowres axis has much less voxels than the others + # - (the size in mm of the lowres axis is also reduced) + worst_spacing_axis = np.argmax(target) + other_axes = [i for i in range(len(target)) if i != worst_spacing_axis] + other_spacings = [target[i] for i in other_axes] + other_sizes = [target_size[i] for i in other_axes] + + has_aniso_spacing = target[worst_spacing_axis] > (3 * max(other_spacings)) + has_aniso_voxels = target_size[worst_spacing_axis] * 3 < min(other_sizes) + + if has_aniso_spacing and has_aniso_voxels: + spacings_of_that_axis = np.vstack(spacings)[:, worst_spacing_axis] + target_spacing_of_that_axis = np.percentile(spacings_of_that_axis, 10) + # don't let the spacing of that axis get higher than the other axes + if target_spacing_of_that_axis < max(other_spacings): + target_spacing_of_that_axis = max(max(other_spacings), target_spacing_of_that_axis) + 1e-5 + target[worst_spacing_axis] = target_spacing_of_that_axis + return target + + def compute_new_shape(self, old_shape: Union[Tuple[int, ...], List[int], np.ndarray], + old_spacing: Union[Tuple[float, ...], List[float], np.ndarray], + new_spacing: Union[Tuple[float, ...], List[float], np.ndarray]) -> np.ndarray: + ## spacing need to be transposed + old_spacing = list(old_spacing)[::-1] + new_spacing = list(new_spacing)[::-1] + + assert len(old_spacing) == len(old_shape) + assert len(old_shape) == len(new_spacing) + new_shape = np.array([int(round(i / j * k)) for i, j, k in zip(old_spacing, new_spacing, old_shape)]) + return new_shape + + def run_plan(self): + all_iter = self.get_iterable_list() + spacings = [] + sizes = [] + intensities_per_channels = [] + print(f"analysing data......") + for case in tqdm(all_iter, total=len(all_iter)): + if os.path.isdir(os.path.join(self.base_dir, case)): + spacing, size, intensities_per_channel = self.experiment_plan(case) + if spacing is None: + continue + + spacings.append(spacing) + sizes.append(size) + intensities_per_channels.append(intensities_per_channel) + + print(f"all spacing is {spacings}") + print(f"all sizes is {sizes}") + foreground_intensities_per_channel = [np.concatenate([r[i] for r in intensities_per_channels]) for i in + range(len(intensities_per_channels[0]))] + + num_channels = len(intensities_per_channels[0]) + + intensity_statistics_per_channel = {} + for i in range(num_channels): + intensity_statistics_per_channel[i] = { + 'mean': float(np.mean(foreground_intensities_per_channel[i])), + 'median': float(np.median(foreground_intensities_per_channel[i])), + 'std': float(np.std(foreground_intensities_per_channel[i])), + 'min': float(np.min(foreground_intensities_per_channel[i])), + 'max': float(np.max(foreground_intensities_per_channel[i])), + 'percentile_99_5': float(np.percentile(foreground_intensities_per_channel[i], 99.5)), + 'percentile_00_5': float(np.percentile(foreground_intensities_per_channel[i], 0.5)), + } + + print(f"intensity_statistics_per_channel is {intensity_statistics_per_channel}") + + fullres_spacing = self.determine_fullres_target_spacing(spacings, sizes) + print(f"fullres spacing is {fullres_spacing[::-1]}") + + # get transposed new median shape (what we would have after resampling) + new_shapes = [self.compute_new_shape(j, i, fullres_spacing) for i, j in + zip(spacings, sizes)] + new_median_shape = np.median(new_shapes, 0) + print(f"median_shape is {new_median_shape}") + + tmp = 1 / np.array(fullres_spacing) + initial_patch_size = [round(i) for i in tmp * (256 ** 3 / np.prod(tmp)) ** (1 / 3)] + + print(f"initial_patch_size is {initial_patch_size[::-1]}") + + network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, \ + shape_must_be_divisible_by = get_pool_and_conv_props(fullres_spacing, initial_patch_size, + 4, + 999999) + print(f"target medium patch size is {patch_size[::-1]}") + + analysis_path = "./data_analysis_result.txt" + with open(analysis_path, "w") as f: + + f.write(json.dumps({ + "intensity_statistics_per_channel": intensity_statistics_per_channel, + "fullres spacing": fullres_spacing.tolist(), + "median_shape": new_median_shape.tolist(), + "initial_patch_size": initial_patch_size, + "target medium patch size": patch_size[::-1].tolist() + })) + print(f"Analysis done, save to {analysis_path}") + + + def collect_foreground_intensities(self, segmentation: np.ndarray, images: np.ndarray, seed: int = 1234, + num_samples: int = 10000): + """ + images=image with multiple channels = shape (c, x, y(, z)) + """ + assert len(images.shape) == 4 + assert len(segmentation.shape) == 4 + + assert not np.any(np.isnan(segmentation)), "Segmentation contains NaN values. grrrr.... :-(" + assert not np.any(np.isnan(images)), "Images contains NaN values. grrrr.... :-(" + + rs = np.random.RandomState(seed) + + intensities_per_channel = [] + # we don't use the intensity_statistics_per_channel at all, it's just something that might be nice to have + intensity_statistics_per_channel = [] + + # segmentation is 4d: 1,x,y,z. We need to remove the empty dimension for the following code to work + foreground_mask = segmentation[0] > 0 + + for i in range(len(images)): + foreground_pixels = images[i][foreground_mask] + num_fg = len(foreground_pixels) + # sample with replacement so that we don't get issues with cases that have less than num_samples + # foreground_pixels. We could also just sample less in those cases but that would than cause these + # training cases to be underrepresented + intensities_per_channel.append( + rs.choice(foreground_pixels, num_samples, replace=True) if num_fg > 0 else []) + intensity_statistics_per_channel.append({ + 'mean': np.mean(foreground_pixels) if num_fg > 0 else np.nan, + 'median': np.median(foreground_pixels) if num_fg > 0 else np.nan, + 'min': np.min(foreground_pixels) if num_fg > 0 else np.nan, + 'max': np.max(foreground_pixels) if num_fg > 0 else np.nan, + 'percentile_99_5': np.percentile(foreground_pixels, 99.5) if num_fg > 0 else np.nan, + 'percentile_00_5': np.percentile(foreground_pixels, 0.5) if num_fg > 0 else np.nan, + + }) + + return intensities_per_channel, intensity_statistics_per_channel + + @staticmethod + def _sample_foreground_locations(seg: np.ndarray, classes_or_regions: Union[List[int], List[Tuple[int, ...]]], + seed: int = 1234, verbose: bool = False): + num_samples = 10000 + min_percent_coverage = 0.01 # at least 1% of the class voxels need to be selected, otherwise it may be too + # sparse + rndst = np.random.RandomState(seed) + class_locs = {} + for c in classes_or_regions: + k = c if not isinstance(c, list) else tuple(c) + if isinstance(c, (tuple, list)): + ## region + mask = seg == c[0] + for cc in c[1:]: + mask = mask | (seg == cc) + all_locs = np.argwhere(mask) + else: + all_locs = np.argwhere(seg == c) + if len(all_locs) == 0: + class_locs[k] = [] + continue + target_num_samples = min(num_samples, len(all_locs)) + target_num_samples = max(target_num_samples, int(np.ceil(len(all_locs) * min_percent_coverage))) + + selected = all_locs[rndst.choice(len(all_locs), target_num_samples, replace=False)] + class_locs[k] = selected + if verbose: + print(c, target_num_samples) + + return class_locs + + def run(self, output_spacing, + output_dir, + all_labels, + foreground_intensity_properties_per_channel=None, + num_processes=8): + self.out_spacing = output_spacing + self.all_labels = all_labels + self.output_dir = output_dir + self.foreground_intensity_properties_per_channel = foreground_intensity_properties_per_channel + + all_iter = self.get_iterable_list() + + maybe_mkdir_p(self.output_dir) + + # test_run + for case_name in all_iter: + self.run_case_save(case_name) + break + + # multiprocessing magic. + r = [] + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + for case_name in all_iter: + r.append(p.starmap_async(self.run_case_save, + ((case_name, ),))) + remaining = list(range(len(all_iter))) + # p is pretty nifti. If we kill workers they just respawn but don't do any work. + # So we need to store the original pool of workers. + workers = [j for j in p._pool] + with tqdm(desc=None, total=len(all_iter)) as pbar: + while len(remaining) > 0: + all_alive = all([j.is_alive() for j in workers]) + if not all_alive: + raise RuntimeError('Some background worker is 6 feet under. Yuck. \n' + 'OK jokes aside.\n' + 'One of your background processes is missing. This could be because of ' + 'an error (look for an error message) or because it was killed ' + 'by your OS due to running out of RAM. If you don\'t see ' + 'an error message, out of RAM is likely the problem. In that case ' + 'reducing the number of workers might help') + done = [i for i in remaining if r[i].ready()] + for _ in done: + pbar.update() + remaining = [i for i in remaining if i not in done] + sleep(0.1) \ No newline at end of file diff --git a/source_code/SegMamba/light_training/preprocessing/preprocessors/default_preprocessor_liver_2017.py b/source_code/SegMamba/light_training/preprocessing/preprocessors/default_preprocessor_liver_2017.py new file mode 100644 index 0000000000000000000000000000000000000000..5fb40d91dd6644d76fad9e1a0efe4b610dc5659b --- /dev/null +++ b/source_code/SegMamba/light_training/preprocessing/preprocessors/default_preprocessor_liver_2017.py @@ -0,0 +1,526 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import multiprocessing +import shutil +from time import sleep +from typing import Union, Tuple +import glob +import numpy as np +from batchgenerators.utilities.file_and_folder_operations import * +from light_training.preprocessing.cropping.cropping import crop_to_nonzero +# from .default_resampling import resample_data_or_seg_to_spacing, resample_img +from light_training.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape, compute_new_shape +from tqdm import tqdm +from light_training.preprocessing.normalization.default_normalization_schemes import CTNormalization, ZScoreNormalization +import SimpleITK as sitk +from tqdm import tqdm +from copy import deepcopy +import json + +def create_image(image_arr, spacing): + image = sitk.GetImageFromArray(image_arr) + image.SetSpacing(spacing) + return image + +def get_shape_must_be_divisible_by(net_numpool_per_axis): + return 2 ** np.array(net_numpool_per_axis) + +def pad_shape(shape, must_be_divisible_by): + """ + pads shape so that it is divisible by must_be_divisible_by + :param shape: + :param must_be_divisible_by: + :return: + """ + if not isinstance(must_be_divisible_by, (tuple, list, np.ndarray)): + must_be_divisible_by = [must_be_divisible_by] * len(shape) + else: + assert len(must_be_divisible_by) == len(shape) + + new_shp = [shape[i] + must_be_divisible_by[i] - shape[i] % must_be_divisible_by[i] for i in range(len(shape))] + + for i in range(len(shape)): + if shape[i] % must_be_divisible_by[i] == 0: + new_shp[i] -= must_be_divisible_by[i] + new_shp = np.array(new_shp).astype(int) + return new_shp + +def get_pool_and_conv_props(spacing, patch_size, min_feature_map_size, max_numpool): + """ + this is the same as get_pool_and_conv_props_v2 from old nnunet + + :param spacing: + :param patch_size: + :param min_feature_map_size: min edge length of feature maps in bottleneck + :param max_numpool: + :return: + """ + # todo review this code + dim = len(spacing) + + current_spacing = deepcopy(list(spacing)) + current_size = deepcopy(list(patch_size)) + + pool_op_kernel_sizes = [[1] * len(spacing)] + conv_kernel_sizes = [] + + num_pool_per_axis = [0] * dim + kernel_size = [1] * dim + + while True: + # exclude axes that we cannot pool further because of min_feature_map_size constraint + valid_axes_for_pool = [i for i in range(dim) if current_size[i] >= 2*min_feature_map_size] + if len(valid_axes_for_pool) < 1: + break + + spacings_of_axes = [current_spacing[i] for i in valid_axes_for_pool] + + # find axis that are within factor of 2 within smallest spacing + min_spacing_of_valid = min(spacings_of_axes) + valid_axes_for_pool = [i for i in valid_axes_for_pool if current_spacing[i] / min_spacing_of_valid < 2] + + # max_numpool constraint + valid_axes_for_pool = [i for i in valid_axes_for_pool if num_pool_per_axis[i] < max_numpool] + + if len(valid_axes_for_pool) == 1: + if current_size[valid_axes_for_pool[0]] >= 3 * min_feature_map_size: + pass + else: + break + if len(valid_axes_for_pool) < 1: + break + + # now we need to find kernel sizes + # kernel sizes are initialized to 1. They are successively set to 3 when their associated axis becomes within + # factor 2 of min_spacing. Once they are 3 they remain 3 + for d in range(dim): + if kernel_size[d] == 3: + continue + else: + if spacings_of_axes[d] / min(current_spacing) < 2: + kernel_size[d] = 3 + + other_axes = [i for i in range(dim) if i not in valid_axes_for_pool] + + pool_kernel_sizes = [0] * dim + for v in valid_axes_for_pool: + pool_kernel_sizes[v] = 2 + num_pool_per_axis[v] += 1 + current_spacing[v] *= 2 + current_size[v] = np.ceil(current_size[v] / 2) + for nv in other_axes: + pool_kernel_sizes[nv] = 1 + + pool_op_kernel_sizes.append(pool_kernel_sizes) + conv_kernel_sizes.append(deepcopy(kernel_size)) + #print(conv_kernel_sizes) + + must_be_divisible_by = get_shape_must_be_divisible_by(num_pool_per_axis) + patch_size = pad_shape(patch_size, must_be_divisible_by) + + # we need to add one more conv_kernel_size for the bottleneck. We always use 3x3(x3) conv here + conv_kernel_sizes.append([3]*dim) + return num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, must_be_divisible_by + + +class DefaultPreprocessor(object): + def __init__(self, + base_dir, + ): + """ + Everything we need is in the plans. Those are given when run() is called + """ + self.base_dir = base_dir + + def run_case_npy(self, data: np.ndarray, seg, properties: dict): + # let's not mess up the inputs! + data = np.copy(data) + old_shape = data.shape + original_spacing = list(properties['spacing']) + ## 由于old spacing读出来是反的,因此这里需要转置一下 + + original_spacing_trans = original_spacing[::-1] + properties["original_spacing_trans"] = original_spacing_trans + properties["target_spacing_trans"] = self.out_spacing + + shape_before_cropping = data.shape[1:] + ## crop + properties['shape_before_cropping'] = shape_before_cropping + # this command will generate a segmentation. This is important because of the nonzero mask which we may need + data, seg, bbox = crop_to_nonzero(data, seg) + properties['bbox_used_for_cropping'] = bbox + + # crop, remember to store size before cropping! + shape_before_resample = data.shape[1:] + properties['shape_after_cropping_before_resample'] = shape_before_resample + + new_shape = compute_new_shape(data.shape[1:], original_spacing_trans, self.out_spacing) + + if seg is None : + seg_norm = np.zeros_like(data) + else : + seg_norm = seg + data = self._normalize(data, seg_norm, + self.foreground_intensity_properties_per_channel) + + assert len(data.shape) == 4 + + data = resample_data_or_seg_to_shape(data, new_shape, + original_spacing, + self.out_spacing, + order=3, + order_z=0) + properties['shape_after_resample'] = new_shape + + if seg is not None : + assert len(seg.shape) == 4 + seg = resample_data_or_seg_to_shape(seg, new_shape, + original_spacing, + self.out_spacing, + is_seg=True, + order=1, + order_z=0) + + properties['class_locations'] = self._sample_foreground_locations(seg, + self.all_labels, + ) + + if np.max(seg) > 127: + seg = seg.astype(np.int16) + else: + seg = seg.astype(np.int8) + + print(f'old shape: {old_shape}, new_shape after crop and resample: {new_shape}, old_spacing: {original_spacing}, ' + f'new_spacing: {self.out_spacing}, boxes is {bbox}') + + return data, seg + + # need to modify + def get_iterable_list(self): + all_cases = os.listdir(self.base_dir) + + all_cases_2 = [] + for c in all_cases: + if "volume" in c and ".nii" in c: + ## get data id + all_cases_2.append(c.split("-")[-1].split(".")[0]) + + return all_cases_2 + + def _normalize(self, data: np.ndarray, seg: np.ndarray, + foreground_intensity_properties_per_channel: dict) -> np.ndarray: + for c in range(data.shape[0]): + normalizer_class = CTNormalization + normalizer = normalizer_class(use_mask_for_norm=False, + intensityproperties=foreground_intensity_properties_per_channel[str(c)]) + data[c] = normalizer.run(data[c], seg[0]) + return data + + # need to modify + def read_data(self, case_name): + ## only for CT dataset + try: + data = sitk.ReadImage(os.path.join(self.base_dir, f"volume-{case_name}.nii")) + except: + print(f"data read error: {self.base_dir, case_name}") + return None, None, None + seg_arr = None + ## 一定要是float32!!!! + data_arr = sitk.GetArrayFromImage(data).astype(np.float32) + data_arr = data_arr[None] + + if os.path.exists(os.path.join(self.base_dir, f"segmentation-{case_name}.nii")): + seg = sitk.ReadImage(os.path.join(self.base_dir, f"segmentation-{case_name}.nii")) + ## 读出来以后一定转float32!!! + seg_arr = sitk.GetArrayFromImage(seg).astype(np.float32)[None,] + + intensities_per_channel, intensity_statistics_per_channel = self.collect_foreground_intensities(seg_arr, data_arr) + else : + intensities_per_channel = [] + intensity_statistics_per_channel = [] + + properties = {"spacing": data.GetSpacing(), + "raw_size": data_arr.shape[1:], + "name": case_name.split(".")[0], + "intensities_per_channel": intensities_per_channel, + "intensity_statistics_per_channel": intensity_statistics_per_channel} + + return data_arr, seg_arr, properties + + def run_case(self, case_name): + """ + seg file can be none (test cases) + + order of operations is: transpose -> crop -> resample + so when we export we need to run the following order: resample -> crop -> transpose (we could also run + transpose at a different place, but reverting the order of operations done during preprocessing seems cleaner) + """ + data, seg, properties = self.read_data(case_name) + if data is not None: + data, seg = self.run_case_npy(data, seg, properties) + return data, seg, properties + else : + return None, None, None + + def run_case_save(self, case_name): + print(case_name + "~~~~~~~~" * 10) + data, seg, properties = self.run_case(case_name) + if data is not None: + # print('dtypes', data.dtype, seg.dtype) + case_name = case_name.split(".")[0] + np.savez_compressed(os.path.join(self.output_dir, case_name) + '.npz', data=data, seg=seg) + write_pickle(properties, os.path.join(self.output_dir, case_name) + '.pkl') + print(f"data is saved at: {os.path.join(self.output_dir, case_name) + '.npz'}") + + def experiment_plan(self, case_name): + + data, seg, properties = self.read_data(case_name) + if data is None: + return None, None, None + + print(f"labels is {np.unique(seg)}") + spacing = properties["spacing"] + raw_size = properties["raw_size"] + intensities_per_channel = properties["intensities_per_channel"] + + return spacing, raw_size, intensities_per_channel + + def determine_fullres_target_spacing(self, spacings, sizes) -> np.ndarray: + # if self.overwrite_target_spacing is not None: + # return np.array(self.overwrite_target_spacing) + + # spacings = self.dataset_fingerprint['spacings'] + # sizes = self.dataset_fingerprint['shapes_after_crop'] + + target = np.percentile(np.vstack(spacings), 50, 0) + target_size = np.percentile(np.vstack(sizes), 50, 0) + # we need to identify datasets for which a different target spacing could be beneficial. These datasets have + # the following properties: + # - one axis which much lower resolution than the others + # - the lowres axis has much less voxels than the others + # - (the size in mm of the lowres axis is also reduced) + worst_spacing_axis = np.argmax(target) + other_axes = [i for i in range(len(target)) if i != worst_spacing_axis] + other_spacings = [target[i] for i in other_axes] + other_sizes = [target_size[i] for i in other_axes] + + has_aniso_spacing = target[worst_spacing_axis] > (3 * max(other_spacings)) + has_aniso_voxels = target_size[worst_spacing_axis] * 3 < min(other_sizes) + + if has_aniso_spacing and has_aniso_voxels: + spacings_of_that_axis = np.vstack(spacings)[:, worst_spacing_axis] + target_spacing_of_that_axis = np.percentile(spacings_of_that_axis, 10) + # don't let the spacing of that axis get higher than the other axes + if target_spacing_of_that_axis < max(other_spacings): + target_spacing_of_that_axis = max(max(other_spacings), target_spacing_of_that_axis) + 1e-5 + target[worst_spacing_axis] = target_spacing_of_that_axis + return target + + def compute_new_shape(self, old_shape: Union[Tuple[int, ...], List[int], np.ndarray], + old_spacing: Union[Tuple[float, ...], List[float], np.ndarray], + new_spacing: Union[Tuple[float, ...], List[float], np.ndarray]) -> np.ndarray: + ## spacing need to be transposed + old_spacing = list(old_spacing)[::-1] + new_spacing = list(new_spacing)[::-1] + + assert len(old_spacing) == len(old_shape) + assert len(old_shape) == len(new_spacing) + new_shape = np.array([int(round(i / j * k)) for i, j, k in zip(old_spacing, new_spacing, old_shape)]) + return new_shape + + def run_plan(self): + all_iter = self.get_iterable_list() + spacings = [] + sizes = [] + intensities_per_channels = [] + print(f"analysing data......") + for case in tqdm(all_iter, total=len(all_iter)): + spacing, size, intensities_per_channel = self.experiment_plan(case) + if spacing is None: + continue + + spacings.append(spacing) + sizes.append(size) + intensities_per_channels.append(intensities_per_channel) + + print(f"all spacing is {spacings}") + print(f"all sizes is {sizes}") + foreground_intensities_per_channel = [np.concatenate([r[i] for r in intensities_per_channels]) for i in + range(len(intensities_per_channels[0]))] + + num_channels = len(intensities_per_channels[0]) + + intensity_statistics_per_channel = {} + for i in range(num_channels): + intensity_statistics_per_channel[i] = { + 'mean': float(np.mean(foreground_intensities_per_channel[i])), + 'median': float(np.median(foreground_intensities_per_channel[i])), + 'std': float(np.std(foreground_intensities_per_channel[i])), + 'min': float(np.min(foreground_intensities_per_channel[i])), + 'max': float(np.max(foreground_intensities_per_channel[i])), + 'percentile_99_5': float(np.percentile(foreground_intensities_per_channel[i], 99.5)), + 'percentile_00_5': float(np.percentile(foreground_intensities_per_channel[i], 0.5)), + } + + print(f"intensity_statistics_per_channel is {intensity_statistics_per_channel}") + + fullres_spacing = self.determine_fullres_target_spacing(spacings, sizes) + print(f"fullres spacing is {fullres_spacing[::-1]}") + + # get transposed new median shape (what we would have after resampling) + new_shapes = [self.compute_new_shape(j, i, fullres_spacing) for i, j in + zip(spacings, sizes)] + new_median_shape = np.median(new_shapes, 0) + print(f"median_shape is {new_median_shape}") + + tmp = 1 / np.array(fullres_spacing) + initial_patch_size = [round(i) for i in tmp * (256 ** 3 / np.prod(tmp)) ** (1 / 3)] + + print(f"initial_patch_size is {initial_patch_size[::-1]}") + + network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, \ + shape_must_be_divisible_by = get_pool_and_conv_props(fullres_spacing, initial_patch_size, + 4, + 999999) + print(f"target medium patch size is {patch_size[::-1]}") + + analysis_path = "./data_analysis_result.txt" + with open(analysis_path, "w") as f: + + f.write(json.dumps({ + "intensity_statistics_per_channel": intensity_statistics_per_channel, + "fullres spacing": fullres_spacing.tolist(), + "median_shape": new_median_shape.tolist(), + "initial_patch_size": initial_patch_size, + "target medium patch size": patch_size[::-1].tolist() + })) + print(f"Analysis done, save to {analysis_path}") + + + def collect_foreground_intensities(self, segmentation: np.ndarray, images: np.ndarray, seed: int = 1234, + num_samples: int = 10000): + """ + images=image with multiple channels = shape (c, x, y(, z)) + """ + assert len(images.shape) == 4 + assert len(segmentation.shape) == 4 + + assert not np.any(np.isnan(segmentation)), "Segmentation contains NaN values. grrrr.... :-(" + assert not np.any(np.isnan(images)), "Images contains NaN values. grrrr.... :-(" + + rs = np.random.RandomState(seed) + + intensities_per_channel = [] + # we don't use the intensity_statistics_per_channel at all, it's just something that might be nice to have + intensity_statistics_per_channel = [] + + # segmentation is 4d: 1,x,y,z. We need to remove the empty dimension for the following code to work + foreground_mask = segmentation[0] > 0 + + for i in range(len(images)): + foreground_pixels = images[i][foreground_mask] + num_fg = len(foreground_pixels) + # sample with replacement so that we don't get issues with cases that have less than num_samples + # foreground_pixels. We could also just sample less in those cases but that would than cause these + # training cases to be underrepresented + intensities_per_channel.append( + rs.choice(foreground_pixels, num_samples, replace=True) if num_fg > 0 else []) + intensity_statistics_per_channel.append({ + 'mean': np.mean(foreground_pixels) if num_fg > 0 else np.nan, + 'median': np.median(foreground_pixels) if num_fg > 0 else np.nan, + 'min': np.min(foreground_pixels) if num_fg > 0 else np.nan, + 'max': np.max(foreground_pixels) if num_fg > 0 else np.nan, + 'percentile_99_5': np.percentile(foreground_pixels, 99.5) if num_fg > 0 else np.nan, + 'percentile_00_5': np.percentile(foreground_pixels, 0.5) if num_fg > 0 else np.nan, + + }) + + return intensities_per_channel, intensity_statistics_per_channel + + @staticmethod + def _sample_foreground_locations(seg: np.ndarray, classes_or_regions: Union[List[int], List[Tuple[int, ...]]], + seed: int = 1234, verbose: bool = False): + num_samples = 10000 + min_percent_coverage = 0.01 # at least 1% of the class voxels need to be selected, otherwise it may be too + # sparse + rndst = np.random.RandomState(seed) + class_locs = {} + for c in classes_or_regions: + k = c if not isinstance(c, list) else tuple(c) + if isinstance(c, (tuple, list)): + ## region + mask = seg == c[0] + for cc in c[1:]: + mask = mask | (seg == cc) + all_locs = np.argwhere(mask) + else: + all_locs = np.argwhere(seg == c) + if len(all_locs) == 0: + class_locs[k] = [] + continue + target_num_samples = min(num_samples, len(all_locs)) + target_num_samples = max(target_num_samples, int(np.ceil(len(all_locs) * min_percent_coverage))) + + selected = all_locs[rndst.choice(len(all_locs), target_num_samples, replace=False)] + class_locs[k] = selected + if verbose: + print(c, target_num_samples) + + return class_locs + + def run(self, output_spacing, + output_dir, + all_labels, + foreground_intensity_properties_per_channel=None, + num_processes=8): + self.out_spacing = output_spacing + self.all_labels = all_labels + self.output_dir = output_dir + self.foreground_intensity_properties_per_channel = foreground_intensity_properties_per_channel + + all_iter = self.get_iterable_list() + + maybe_mkdir_p(self.output_dir) + + # test_run + for case_name in all_iter: + self.run_case_save(case_name) + break + + # multiprocessing magic. + r = [] + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + for case_name in all_iter: + r.append(p.starmap_async(self.run_case_save, + ((case_name, ),))) + remaining = list(range(len(all_iter))) + # p is pretty nifti. If we kill workers they just respawn but don't do any work. + # So we need to store the original pool of workers. + workers = [j for j in p._pool] + with tqdm(desc=None, total=len(all_iter)) as pbar: + while len(remaining) > 0: + all_alive = all([j.is_alive() for j in workers]) + if not all_alive: + raise RuntimeError('Some background worker is 6 feet under. Yuck. \n' + 'OK jokes aside.\n' + 'One of your background processes is missing. This could be because of ' + 'an error (look for an error message) or because it was killed ' + 'by your OS due to running out of RAM. If you don\'t see ' + 'an error message, out of RAM is likely the problem. In that case ' + 'reducing the number of workers might help') + done = [i for i in remaining if r[i].ready()] + for _ in done: + pbar.update() + remaining = [i for i in remaining if i not in done] + sleep(0.1) \ No newline at end of file diff --git a/source_code/SegMamba/light_training/preprocessing/preprocessors/preprocessor_brats23_global.py b/source_code/SegMamba/light_training/preprocessing/preprocessors/preprocessor_brats23_global.py new file mode 100644 index 0000000000000000000000000000000000000000..a328ff0eaddda3f179ce99b5d3a5c9b8337c101c --- /dev/null +++ b/source_code/SegMamba/light_training/preprocessing/preprocessors/preprocessor_brats23_global.py @@ -0,0 +1,542 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import multiprocessing +import shutil +from time import sleep +from typing import Union, Tuple +import glob +import numpy as np +from batchgenerators.utilities.file_and_folder_operations import * +from light_training.preprocessing.cropping.cropping import crop_to_nonzero +# from .default_resampling import resample_data_or_seg_to_spacing, resample_img +from light_training.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape, compute_new_shape +from tqdm import tqdm +from light_training.preprocessing.normalization.default_normalization_schemes import CTNormalization, ZScoreNormalization +import SimpleITK as sitk +from tqdm import tqdm +from copy import deepcopy +import json +# from .default_preprocessor import DefaultPreprocessor + +def create_image(image_arr, spacing): + image = sitk.GetImageFromArray(image_arr) + image.SetSpacing(spacing) + return image + +def get_shape_must_be_divisible_by(net_numpool_per_axis): + return 2 ** np.array(net_numpool_per_axis) + +def pad_shape(shape, must_be_divisible_by): + """ + pads shape so that it is divisible by must_be_divisible_by + :param shape: + :param must_be_divisible_by: + :return: + """ + if not isinstance(must_be_divisible_by, (tuple, list, np.ndarray)): + must_be_divisible_by = [must_be_divisible_by] * len(shape) + else: + assert len(must_be_divisible_by) == len(shape) + + new_shp = [shape[i] + must_be_divisible_by[i] - shape[i] % must_be_divisible_by[i] for i in range(len(shape))] + + for i in range(len(shape)): + if shape[i] % must_be_divisible_by[i] == 0: + new_shp[i] -= must_be_divisible_by[i] + new_shp = np.array(new_shp).astype(int) + return new_shp + +def get_pool_and_conv_props(spacing, patch_size, min_feature_map_size, max_numpool): + """ + this is the same as get_pool_and_conv_props_v2 from old nnunet + + :param spacing: + :param patch_size: + :param min_feature_map_size: min edge length of feature maps in bottleneck + :param max_numpool: + :return: + """ + # todo review this code + dim = len(spacing) + + current_spacing = deepcopy(list(spacing)) + current_size = deepcopy(list(patch_size)) + + pool_op_kernel_sizes = [[1] * len(spacing)] + conv_kernel_sizes = [] + + num_pool_per_axis = [0] * dim + kernel_size = [1] * dim + + while True: + # exclude axes that we cannot pool further because of min_feature_map_size constraint + valid_axes_for_pool = [i for i in range(dim) if current_size[i] >= 2*min_feature_map_size] + if len(valid_axes_for_pool) < 1: + break + + spacings_of_axes = [current_spacing[i] for i in valid_axes_for_pool] + + # find axis that are within factor of 2 within smallest spacing + min_spacing_of_valid = min(spacings_of_axes) + valid_axes_for_pool = [i for i in valid_axes_for_pool if current_spacing[i] / min_spacing_of_valid < 2] + + # max_numpool constraint + valid_axes_for_pool = [i for i in valid_axes_for_pool if num_pool_per_axis[i] < max_numpool] + + if len(valid_axes_for_pool) == 1: + if current_size[valid_axes_for_pool[0]] >= 3 * min_feature_map_size: + pass + else: + break + if len(valid_axes_for_pool) < 1: + break + + # now we need to find kernel sizes + # kernel sizes are initialized to 1. They are successively set to 3 when their associated axis becomes within + # factor 2 of min_spacing. Once they are 3 they remain 3 + for d in range(dim): + if kernel_size[d] == 3: + continue + else: + if spacings_of_axes[d] / min(current_spacing) < 2: + kernel_size[d] = 3 + + other_axes = [i for i in range(dim) if i not in valid_axes_for_pool] + + pool_kernel_sizes = [0] * dim + for v in valid_axes_for_pool: + pool_kernel_sizes[v] = 2 + num_pool_per_axis[v] += 1 + current_spacing[v] *= 2 + current_size[v] = np.ceil(current_size[v] / 2) + for nv in other_axes: + pool_kernel_sizes[nv] = 1 + + pool_op_kernel_sizes.append(pool_kernel_sizes) + conv_kernel_sizes.append(deepcopy(kernel_size)) + #print(conv_kernel_sizes) + + must_be_divisible_by = get_shape_must_be_divisible_by(num_pool_per_axis) + patch_size = pad_shape(patch_size, must_be_divisible_by) + + # we need to add one more conv_kernel_size for the bottleneck. We always use 3x3(x3) conv here + conv_kernel_sizes.append([3]*dim) + return num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, must_be_divisible_by + + + +class MultiModalityPreprocessor(object): + def __init__(self, + base_dir, + global_size=[128, 128, 128], + ): + + self.global_size = global_size + self.base_dir = base_dir + # self.image_dir = image_dir + # self.data_filenames = data_filenames + # self.seg_filename = seg_filename + # base_dir = "./data/raw_data/BraTS2023/" + self.base_dir = base_dir + self.data_filenames = ["t2w.nii.gz", + "t2f.nii.gz", + "t1n.nii.gz", + "t1c.nii.gz"] + self.seg_filename = "seg.nii.gz" + + def get_iterable_list(self): + all_cases = os.listdir(os.path.join(self.base_dir)) + return all_cases + + def _normalize(self, data: np.ndarray, seg: np.ndarray, + foreground_intensity_properties_per_channel: dict) -> np.ndarray: + for c in range(data.shape[0]): + normalizer_class = ZScoreNormalization + normalizer = normalizer_class(use_mask_for_norm=False, + intensityproperties=foreground_intensity_properties_per_channel) + data[c] = normalizer.run(data[c], seg[0]) + return data + + def run_case_npy(self, data: np.ndarray, seg, properties: dict): + # let's not mess up the inputs! + data = np.copy(data) + old_shape = data.shape + original_spacing = list(properties['spacing']) + ## 由于old spacing读出来是反的,因此这里需要转置一下 + + original_spacing_trans = original_spacing[::-1] + properties["original_spacing_trans"] = original_spacing_trans + properties["target_spacing_trans"] = self.out_spacing + + shape_before_cropping = data.shape[1:] + ## crop + properties['shape_before_cropping'] = shape_before_cropping + # this command will generate a segmentation. This is important because of the nonzero mask which we may need + data, seg, bbox = crop_to_nonzero(data, seg) + properties['bbox_used_for_cropping'] = bbox + + # crop, remember to store size before cropping! + shape_before_resample = data.shape[1:] + properties['shape_after_cropping_before_resample'] = shape_before_resample + + new_shape = compute_new_shape(data.shape[1:], original_spacing_trans, self.out_spacing) + + if seg is None : + seg_norm = np.zeros_like(data) + else : + seg_norm = seg + data = self._normalize(data, seg_norm, + self.foreground_intensity_properties_per_channel) + + assert len(data.shape) == 4 + + data = resample_data_or_seg_to_shape(data, new_shape, + original_spacing, + self.out_spacing, + order=3, + order_z=0) + + ## global view + data_global = resample_data_or_seg_to_shape(data, self.global_size, + original_spacing, + self.out_spacing, + order=3, + order_z=0) + + # print(data.shape, data_global.shape) + # data = np.concatenate([data, data_global], axis=0) + + properties['shape_after_resample'] = new_shape + + if seg is not None : + assert len(seg.shape) == 4 + seg = resample_data_or_seg_to_shape(seg, new_shape, + original_spacing, + self.out_spacing, + is_seg=True, + order=1, + order_z=0) + + properties['class_locations'] = self._sample_foreground_locations(seg, + self.all_labels, + ) + + ## global view + seg_global = resample_data_or_seg_to_shape(seg, self.global_size, + original_spacing, + self.out_spacing, + order=1, + order_z=0) + + if np.max(seg) > 127: + seg = seg.astype(np.int16) + else: + seg = seg.astype(np.int8) + + print(f'old shape: {old_shape}, new_shape after crop and resample: {new_shape}, old_spacing: {original_spacing}, ' + f'new_spacing: {self.out_spacing}, boxes is {bbox}') + + return data, data_global, seg, seg_global + + # need to modify + def read_data(self, case_name): + ## only for CT dataset + assert len(self.data_filenames) != 0 + data = [] + for dfname in self.data_filenames: + d = sitk.ReadImage(os.path.join(self.base_dir, case_name, dfname)) + spacing = d.GetSpacing() + data.append(sitk.GetArrayFromImage(d).astype(np.float32)[None,]) + + data = np.concatenate(data, axis=0) + + seg_arr = None + ## 一定要是float32!!!! + + if self.seg_filename != "": + seg = sitk.ReadImage(os.path.join(self.base_dir, case_name, self.seg_filename)) + ## 读出来以后一定转float32!!! + seg_arr = sitk.GetArrayFromImage(seg).astype(np.float32) + seg_arr = seg_arr[None] + intensities_per_channel, intensity_statistics_per_channel = self.collect_foreground_intensities(seg_arr, data) + + else : + intensities_per_channel = [] + intensity_statistics_per_channel = [] + + properties = {"spacing": spacing, + "raw_size": data.shape[1:], + "name": case_name.split(".")[0], + "intensities_per_channel": intensities_per_channel, + "intensity_statistics_per_channel": intensity_statistics_per_channel} + + return data, seg_arr, properties + + def run_case(self, case_name): + """ + seg file can be none (test cases) + + order of operations is: transpose -> crop -> resample + so when we export we need to run the following order: resample -> crop -> transpose (we could also run + transpose at a different place, but reverting the order of operations done during preprocessing seems cleaner) + """ + data, seg, properties = self.read_data(case_name) + + data, data_global, seg, seg_global = self.run_case_npy(data, seg, properties) + + return data, data_global, seg, seg_global, properties + + def run_case_save(self, case_name): + print(case_name + "~~~~~~~~" * 10) + data, data_global, seg, seg_global, properties = self.run_case(case_name) + # print('dtypes', data.dtype, seg.dtype) + case_name = case_name.split(".")[0] + np.savez_compressed(os.path.join(self.output_dir, case_name) + '.npz', data=data, data_global=data_global, seg=seg, seg_global=seg_global) + write_pickle(properties, os.path.join(self.output_dir, case_name) + '.pkl') + print(f"data is saved at: {os.path.join(self.output_dir, case_name) + '.npz'}, data shape is {data.shape}, data_global shape is {data_global.shape}") + + def experiment_plan(self, case_name): + + data, seg, properties = self.read_data(case_name) + print(f"labels is {np.unique(seg)}") + spacing = properties["spacing"] + raw_size = properties["raw_size"] + intensities_per_channel = properties["intensities_per_channel"] + + return spacing, raw_size, intensities_per_channel + + def determine_fullres_target_spacing(self, spacings, sizes) -> np.ndarray: + # if self.overwrite_target_spacing is not None: + # return np.array(self.overwrite_target_spacing) + + # spacings = self.dataset_fingerprint['spacings'] + # sizes = self.dataset_fingerprint['shapes_after_crop'] + + target = np.percentile(np.vstack(spacings), 50, 0) + target_size = np.percentile(np.vstack(sizes), 50, 0) + # we need to identify datasets for which a different target spacing could be beneficial. These datasets have + # the following properties: + # - one axis which much lower resolution than the others + # - the lowres axis has much less voxels than the others + # - (the size in mm of the lowres axis is also reduced) + worst_spacing_axis = np.argmax(target) + other_axes = [i for i in range(len(target)) if i != worst_spacing_axis] + other_spacings = [target[i] for i in other_axes] + other_sizes = [target_size[i] for i in other_axes] + + has_aniso_spacing = target[worst_spacing_axis] > (3 * max(other_spacings)) + has_aniso_voxels = target_size[worst_spacing_axis] * 3 < min(other_sizes) + + if has_aniso_spacing and has_aniso_voxels: + spacings_of_that_axis = np.vstack(spacings)[:, worst_spacing_axis] + target_spacing_of_that_axis = np.percentile(spacings_of_that_axis, 10) + # don't let the spacing of that axis get higher than the other axes + if target_spacing_of_that_axis < max(other_spacings): + target_spacing_of_that_axis = max(max(other_spacings), target_spacing_of_that_axis) + 1e-5 + target[worst_spacing_axis] = target_spacing_of_that_axis + return target + + def compute_new_shape(self, old_shape: Union[Tuple[int, ...], List[int], np.ndarray], + old_spacing: Union[Tuple[float, ...], List[float], np.ndarray], + new_spacing: Union[Tuple[float, ...], List[float], np.ndarray]) -> np.ndarray: + ## spacing need to be transposed + old_spacing = list(old_spacing)[::-1] + new_spacing = list(new_spacing)[::-1] + + assert len(old_spacing) == len(old_shape) + assert len(old_shape) == len(new_spacing) + new_shape = np.array([int(round(i / j * k)) for i, j, k in zip(old_spacing, new_spacing, old_shape)]) + return new_shape + + def run_plan(self): + all_iter = self.get_iterable_list() + spacings = [] + sizes = [] + intensities_per_channels = [] + print(f"analysing data......") + for case in tqdm(all_iter, total=len(all_iter)): + spacing, size, intensities_per_channel = self.experiment_plan(case) + spacings.append(spacing) + sizes.append(size) + intensities_per_channels.append(intensities_per_channel) + + print(f"all spacing is {spacings}") + print(f"all sizes is {sizes}") + foreground_intensities_per_channel = [np.concatenate([r[i] for r in intensities_per_channels]) for i in + range(len(intensities_per_channels[0]))] + + num_channels = len(intensities_per_channels[0]) + + intensity_statistics_per_channel = {} + for i in range(num_channels): + intensity_statistics_per_channel[i] = { + 'mean': float(np.mean(foreground_intensities_per_channel[i])), + 'median': float(np.median(foreground_intensities_per_channel[i])), + 'std': float(np.std(foreground_intensities_per_channel[i])), + 'min': float(np.min(foreground_intensities_per_channel[i])), + 'max': float(np.max(foreground_intensities_per_channel[i])), + 'percentile_99_5': float(np.percentile(foreground_intensities_per_channel[i], 99.5)), + 'percentile_00_5': float(np.percentile(foreground_intensities_per_channel[i], 0.5)), + } + + print(f"intensity_statistics_per_channel is {intensity_statistics_per_channel}") + + fullres_spacing = self.determine_fullres_target_spacing(spacings, sizes) + print(f"fullres spacing is {fullres_spacing[::-1]}") + + # get transposed new median shape (what we would have after resampling) + new_shapes = [self.compute_new_shape(j, i, fullres_spacing) for i, j in + zip(spacings, sizes)] + new_median_shape = np.median(new_shapes, 0) + print(f"median_shape is {new_median_shape}") + + tmp = 1 / np.array(fullres_spacing) + initial_patch_size = [round(i) for i in tmp * (256 ** 3 / np.prod(tmp)) ** (1 / 3)] + + print(f"initial_patch_size is {initial_patch_size[::-1]}") + + network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, \ + shape_must_be_divisible_by = get_pool_and_conv_props(fullres_spacing, initial_patch_size, + 4, + 999999) + print(f"target medium patch size is {patch_size[::-1]}") + + analysis_path = "./data_analysis_result.txt" + with open(analysis_path, "w") as f: + + f.write(json.dumps({ + "intensity_statistics_per_channel": intensity_statistics_per_channel, + "fullres spacing": fullres_spacing.tolist(), + "median_shape": new_median_shape.tolist(), + "initial_patch_size": initial_patch_size, + "target medium patch size": patch_size[::-1].tolist() + })) + print(f"Analysis done, save to {analysis_path}") + + + def collect_foreground_intensities(self, segmentation: np.ndarray, images: np.ndarray, seed: int = 1234, + num_samples: int = 10000): + """ + images=image with multiple channels = shape (c, x, y(, z)) + """ + assert len(images.shape) == 4 + assert len(segmentation.shape) == 4 + + assert not np.any(np.isnan(segmentation)), "Segmentation contains NaN values. grrrr.... :-(" + assert not np.any(np.isnan(images)), "Images contains NaN values. grrrr.... :-(" + + rs = np.random.RandomState(seed) + + intensities_per_channel = [] + # we don't use the intensity_statistics_per_channel at all, it's just something that might be nice to have + intensity_statistics_per_channel = [] + + # segmentation is 4d: 1,x,y,z. We need to remove the empty dimension for the following code to work + foreground_mask = segmentation[0] > 0 + + for i in range(len(images)): + foreground_pixels = images[i][foreground_mask] + num_fg = len(foreground_pixels) + # sample with replacement so that we don't get issues with cases that have less than num_samples + # foreground_pixels. We could also just sample less in those cases but that would than cause these + # training cases to be underrepresented + intensities_per_channel.append( + rs.choice(foreground_pixels, num_samples, replace=True) if num_fg > 0 else []) + intensity_statistics_per_channel.append({ + 'mean': np.mean(foreground_pixels) if num_fg > 0 else np.nan, + 'median': np.median(foreground_pixels) if num_fg > 0 else np.nan, + 'min': np.min(foreground_pixels) if num_fg > 0 else np.nan, + 'max': np.max(foreground_pixels) if num_fg > 0 else np.nan, + 'percentile_99_5': np.percentile(foreground_pixels, 99.5) if num_fg > 0 else np.nan, + 'percentile_00_5': np.percentile(foreground_pixels, 0.5) if num_fg > 0 else np.nan, + + }) + + return intensities_per_channel, intensity_statistics_per_channel + + @staticmethod + def _sample_foreground_locations(seg: np.ndarray, classes_or_regions: Union[List[int], List[Tuple[int, ...]]], + seed: int = 1234, verbose: bool = False): + num_samples = 10000 + min_percent_coverage = 0.01 # at least 1% of the class voxels need to be selected, otherwise it may be too + # sparse + rndst = np.random.RandomState(seed) + class_locs = {} + for c in classes_or_regions: + k = c if not isinstance(c, list) else tuple(c) + if isinstance(c, (tuple, list)): + ## region + mask = seg == c[0] + for cc in c[1:]: + mask = mask | (seg == cc) + all_locs = np.argwhere(mask) + else: + all_locs = np.argwhere(seg == c) + if len(all_locs) == 0: + class_locs[k] = [] + continue + target_num_samples = min(num_samples, len(all_locs)) + target_num_samples = max(target_num_samples, int(np.ceil(len(all_locs) * min_percent_coverage))) + + selected = all_locs[rndst.choice(len(all_locs), target_num_samples, replace=False)] + class_locs[k] = selected + if verbose: + print(c, target_num_samples) + + return class_locs + + def run(self, + output_spacing, + output_dir, + all_labels, + num_processes=8): + self.out_spacing = output_spacing + self.all_labels = all_labels + self.output_dir = output_dir + self.foreground_intensity_properties_per_channel = {} + + all_iter = self.get_iterable_list() + + maybe_mkdir_p(self.output_dir) + + # test_run + for case_name in all_iter: + self.run_case_save(case_name) + break + + r = [] + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + for case_name in all_iter: + r.append(p.starmap_async(self.run_case_save, + ((case_name, ),))) + remaining = list(range(len(all_iter))) + # p is pretty nifti. If we kill workers they just respawn but don't do any work. + # So we need to store the original pool of workers. + workers = [j for j in p._pool] + with tqdm(desc=None, total=len(all_iter)) as pbar: + while len(remaining) > 0: + all_alive = all([j.is_alive() for j in workers]) + if not all_alive: + raise RuntimeError('Some background worker is 6 feet under. Yuck. \n' + 'OK jokes aside.\n' + 'One of your background processes is missing. This could be because of ' + 'an error (look for an error message) or because it was killed ' + 'by your OS due to running out of RAM. If you don\'t see ' + 'an error message, out of RAM is likely the problem. In that case ' + 'reducing the number of workers might help') + done = [i for i in remaining if r[i].ready()] + for _ in done: + pbar.update() + remaining = [i for i in remaining if i not in done] + sleep(0.1) \ No newline at end of file diff --git a/source_code/SegMamba/light_training/preprocessing/preprocessors/preprocessor_mri.py b/source_code/SegMamba/light_training/preprocessing/preprocessors/preprocessor_mri.py new file mode 100644 index 0000000000000000000000000000000000000000..1dd6283d331372c574e545f371d7755eb3552d56 --- /dev/null +++ b/source_code/SegMamba/light_training/preprocessing/preprocessors/preprocessor_mri.py @@ -0,0 +1,134 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import multiprocessing +import shutil +from time import sleep +from typing import Union, Tuple +import glob +import numpy as np +from batchgenerators.utilities.file_and_folder_operations import * +from light_training.preprocessing.cropping.cropping import crop_to_nonzero +# from .default_resampling import resample_data_or_seg_to_spacing, resample_img +from light_training.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape, compute_new_shape +from tqdm import tqdm +from light_training.preprocessing.normalization.default_normalization_schemes import CTNormalization, ZScoreNormalization +import SimpleITK as sitk +from tqdm import tqdm +from copy import deepcopy +import json +from .default_preprocessor import DefaultPreprocessor + +class MultiModalityPreprocessor(DefaultPreprocessor): + def __init__(self, + base_dir, + image_dir, + data_filenames=[], + seg_filename="", + ): + self.base_dir = base_dir + self.image_dir = image_dir + self.data_filenames = data_filenames + self.seg_filename = seg_filename + + def get_iterable_list(self): + all_cases = os.listdir(os.path.join(self.base_dir, self.image_dir)) + return all_cases + + def _normalize(self, data: np.ndarray, seg: np.ndarray, + foreground_intensity_properties_per_channel: dict) -> np.ndarray: + for c in range(data.shape[0]): + normalizer_class = ZScoreNormalization + normalizer = normalizer_class(use_mask_for_norm=False, + intensityproperties=foreground_intensity_properties_per_channel) + data[c] = normalizer.run(data[c], seg[0]) + return data + + # need to modify + def read_data(self, case_name): + ## only for CT dataset + assert len(self.data_filenames) != 0 + data = [] + for dfname in self.data_filenames: + d = sitk.ReadImage(os.path.join(self.base_dir, self.image_dir, case_name, dfname)) + spacing = d.GetSpacing() + data.append(sitk.GetArrayFromImage(d).astype(np.float32)[None,]) + + data = np.concatenate(data, axis=0) + + seg_arr = None + ## 一定要是float32!!!! + + if self.seg_filename != "": + seg = sitk.ReadImage(os.path.join(self.base_dir, self.image_dir, case_name, self.seg_filename)) + ## 读出来以后一定转float32!!! + seg_arr = sitk.GetArrayFromImage(seg).astype(np.float32) + seg_arr = seg_arr[None] + intensities_per_channel, intensity_statistics_per_channel = self.collect_foreground_intensities(seg_arr, data) + + else : + intensities_per_channel = [] + intensity_statistics_per_channel = [] + + properties = {"spacing": spacing, + "raw_size": data.shape[1:], + "name": case_name.split(".")[0], + "intensities_per_channel": intensities_per_channel, + "intensity_statistics_per_channel": intensity_statistics_per_channel} + + return data, seg_arr, properties + + def run(self, + output_spacing, + output_dir, + all_labels, + num_processes=8): + self.out_spacing = output_spacing + self.all_labels = all_labels + self.output_dir = output_dir + self.foreground_intensity_properties_per_channel = {} + + all_iter = self.get_iterable_list() + + maybe_mkdir_p(self.output_dir) + + # test_run + for case_name in all_iter: + self.run_case_save(case_name) + break + + r = [] + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + for case_name in all_iter: + r.append(p.starmap_async(self.run_case_save, + ((case_name, ),))) + remaining = list(range(len(all_iter))) + # p is pretty nifti. If we kill workers they just respawn but don't do any work. + # So we need to store the original pool of workers. + workers = [j for j in p._pool] + with tqdm(desc=None, total=len(all_iter)) as pbar: + while len(remaining) > 0: + all_alive = all([j.is_alive() for j in workers]) + if not all_alive: + raise RuntimeError('Some background worker is 6 feet under. Yuck. \n' + 'OK jokes aside.\n' + 'One of your background processes is missing. This could be because of ' + 'an error (look for an error message) or because it was killed ' + 'by your OS due to running out of RAM. If you don\'t see ' + 'an error message, out of RAM is likely the problem. In that case ' + 'reducing the number of workers might help') + done = [i for i in remaining if r[i].ready()] + for _ in done: + pbar.update() + remaining = [i for i in remaining if i not in done] + sleep(0.1) \ No newline at end of file diff --git a/source_code/SegMamba/light_training/preprocessing/preprocessors/preprocessor_multiinput_and_region.py b/source_code/SegMamba/light_training/preprocessing/preprocessors/preprocessor_multiinput_and_region.py new file mode 100644 index 0000000000000000000000000000000000000000..7eaf2a0e22b906758bd5203e95b866e6f0bf5327 --- /dev/null +++ b/source_code/SegMamba/light_training/preprocessing/preprocessors/preprocessor_multiinput_and_region.py @@ -0,0 +1,209 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import multiprocessing +import shutil +from time import sleep +from typing import Union, Tuple +import glob +import numpy as np +from batchgenerators.utilities.file_and_folder_operations import * +from light_training.preprocessing.cropping.cropping import crop_to_nonzero +# from .default_resampling import resample_data_or_seg_to_spacing, resample_img +from light_training.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape, compute_new_shape +from tqdm import tqdm +from light_training.preprocessing.normalization.default_normalization_schemes import CTNormalization, ZScoreNormalization, CTNormStandard +import SimpleITK as sitk +from tqdm import tqdm +from copy import deepcopy +import json +from .default_preprocessor import DefaultPreprocessor + +class MultiInputAndRegionPreprocessor(DefaultPreprocessor): + def __init__(self, + base_dir, + image_dir, + data_filenames=[], + seg_filename="", + ): + + self.base_dir = base_dir + self.image_dir = image_dir + self.data_filenames = data_filenames + self.seg_filename = seg_filename + + + def get_iterable_list(self): + all_cases = os.listdir(os.path.join(self.base_dir, self.image_dir)) + return all_cases + + def _normalize(self, data: np.ndarray, seg: np.ndarray, + foreground_intensity_properties_per_channel: dict) -> np.ndarray: + for c in range(data.shape[0]): + normalizer_class = CTNormalization + normalizer = normalizer_class(use_mask_for_norm=False, + intensityproperties=foreground_intensity_properties_per_channel[str(c)]) + data[c] = normalizer.run(data[c], seg[0]) + return data + + def run_case_npy(self, data: np.ndarray, seg, properties: dict): + # let's not mess up the inputs! + data = np.copy(data) + old_shape = data.shape + original_spacing = list(properties['spacing']) + ## 由于old spacing读出来是反的,因此这里需要转置一下 + + original_spacing_trans = original_spacing[::-1] + properties["original_spacing_trans"] = original_spacing_trans + properties["target_spacing_trans"] = self.out_spacing + + shape_before_cropping = data.shape[1:] + ## crop + properties['shape_before_cropping'] = shape_before_cropping + # this command will generate a segmentation. This is important because of the nonzero mask which we may need + data, seg, bbox = crop_to_nonzero(data, seg) + + properties['bbox_used_for_cropping'] = bbox + + if seg is None : + seg_norm = np.zeros_like(data) + else : + seg_norm = seg + data = self._normalize(data, seg_norm, + self.foreground_intensity_properties_per_channel) + + # crop, remember to store size before cropping! + shape_before_resample = data.shape[1:] + properties['shape_after_cropping_before_resample'] = shape_before_resample + + new_shape = compute_new_shape(data.shape[1:], original_spacing_trans, self.out_spacing) + + assert len(data.shape) == 4 + + data = resample_data_or_seg_to_shape(data, new_shape, + original_spacing, + self.out_spacing, + order=3, + order_z=0) + properties['shape_after_resample'] = new_shape + + if seg is not None : + assert len(seg.shape) == 4 + seg = resample_data_or_seg_to_shape(seg, new_shape, + original_spacing, + self.out_spacing, + is_seg=True, + order=1, + order_z=0) + + properties['class_locations'] = self._sample_foreground_locations(seg, + self.all_labels, + True) + + if np.max(seg) > 127: + seg = seg.astype(np.int16) + else: + seg = seg.astype(np.int8) + + print(f'old shape: {old_shape}, shape_after_cropping_before_resample is {shape_before_resample}, new_shape after crop and resample: {new_shape}, old_spacing: {original_spacing}, ' + f'new_spacing: {self.out_spacing}, boxes is {bbox}') + + return data, seg + + # need to modify + def read_data(self, case_name): + ## only for CT dataset + assert len(self.data_filenames) != 0 + data = [] + for dfname in self.data_filenames: + d = sitk.ReadImage(os.path.join(self.base_dir, self.image_dir, case_name, dfname)) + spacing = d.GetSpacing() + data.append(sitk.GetArrayFromImage(d).astype(np.float32)[None,]) + + data = np.concatenate(data, axis=0) + + seg_arr = None + ## 一定要是float32!!!! + + if self.seg_filename != "": + seg = sitk.ReadImage(os.path.join(self.base_dir, self.image_dir, case_name, self.seg_filename)) + ## 读出来以后一定转float32!!! + seg_arr = sitk.GetArrayFromImage(seg).astype(np.float32) + seg_arr = seg_arr[None] + intensities_per_channel, intensity_statistics_per_channel = self.collect_foreground_intensities(seg_arr, data) + + else : + intensities_per_channel = [] + intensity_statistics_per_channel = [] + + properties = {"spacing": spacing, + "raw_size": data.shape[1:], + "name": case_name.split(".")[0], + "intensities_per_channel": intensities_per_channel, + "intensity_statistics_per_channel": intensity_statistics_per_channel} + + return data, seg_arr, properties + + def run(self, + output_spacing, + output_dir, + all_labels_dict, + num_processes=8, + foreground_intensity_properties_per_channel={} + ): + self.out_spacing = output_spacing + # all_labels 必须为region格式,例如[[0, 1, 2, 3], [4, 5], [6, 7, 8], 9, 10] + + self.all_labels_dict = all_labels_dict + self.all_labels = [] + + for k, v in all_labels_dict.items(): + self.all_labels.append(v) + + self.output_dir = output_dir + self.foreground_intensity_properties_per_channel = foreground_intensity_properties_per_channel + + all_iter = self.get_iterable_list() + + maybe_mkdir_p(self.output_dir) + + # test_run + for case_name in all_iter: + self.run_case_save(case_name) + break + + r = [] + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + for case_name in all_iter: + r.append(p.starmap_async(self.run_case_save, + ((case_name, ),))) + remaining = list(range(len(all_iter))) + # p is pretty nifti. If we kill workers they just respawn but don't do any work. + # So we need to store the original pool of workers. + workers = [j for j in p._pool] + with tqdm(desc=None, total=len(all_iter)) as pbar: + while len(remaining) > 0: + all_alive = all([j.is_alive() for j in workers]) + if not all_alive: + raise RuntimeError('Some background worker is 6 feet under. Yuck. \n' + 'OK jokes aside.\n' + 'One of your background processes is missing. This could be because of ' + 'an error (look for an error message) or because it was killed ' + 'by your OS due to running out of RAM. If you don\'t see ' + 'an error message, out of RAM is likely the problem. In that case ' + 'reducing the number of workers might help') + done = [i for i in remaining if r[i].ready()] + for _ in done: + pbar.update() + remaining = [i for i in remaining if i not in done] + sleep(0.1) \ No newline at end of file diff --git a/source_code/SegMamba/light_training/preprocessing/preprocessors/preprocessor_multiinput_and_region_01norm_first.py b/source_code/SegMamba/light_training/preprocessing/preprocessors/preprocessor_multiinput_and_region_01norm_first.py new file mode 100644 index 0000000000000000000000000000000000000000..05697234cfc7c8b6276ab71de26d1670246f2a0a --- /dev/null +++ b/source_code/SegMamba/light_training/preprocessing/preprocessors/preprocessor_multiinput_and_region_01norm_first.py @@ -0,0 +1,239 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import multiprocessing +import shutil +from time import sleep +from typing import Union, Tuple +import glob +import numpy as np +from batchgenerators.utilities.file_and_folder_operations import * +from light_training.preprocessing.cropping.cropping import crop_to_nonzero +# from .default_resampling import resample_data_or_seg_to_spacing, resample_img +from light_training.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape, compute_new_shape +from tqdm import tqdm +from light_training.preprocessing.normalization.default_normalization_schemes import CTNormalization, ZScoreNormalization, CTNormStandard +import SimpleITK as sitk +from tqdm import tqdm +from copy import deepcopy +import json +from .default_preprocessor import DefaultPreprocessor + +class MultiInputAndRegionPreprocessor(DefaultPreprocessor): + def __init__(self, + base_dir, + image_dir, + data_filenames=[], + seg_filename="", + norm_clip_min=-175, + norm_clip_max=250, + ): + + self.base_dir = base_dir + self.image_dir = image_dir + self.data_filenames = data_filenames + self.seg_filename = seg_filename + self.norm_clip_min = norm_clip_min + self.norm_clip_max = norm_clip_max + + def get_iterable_list(self): + all_cases = os.listdir(os.path.join(self.base_dir, self.image_dir)) + return all_cases + + def _normalize(self, data: np.ndarray, seg: np.ndarray, + foreground_intensity_properties_per_channel: dict) -> np.ndarray: + # for c in range(data.shape[0]): + normalizer = CTNormStandard(a_min=self.norm_clip_min, + a_max=self.norm_clip_max, + b_min=0.0, + b_max=1.0, clip=True) + + data = normalizer(data) + return data + + # def convert_labels_to_region(self, labels): + # patch_size = labels.shape[1:] + # one_hot_labels = np.zeros([self.all_labels_num, + # patch_size[0], + # patch_size[1], + # patch_size[2]]) + + # for k, v in self.all_labels_dict.items(): + # if isinstance(v, list): + # for vv in v: + # one_hot_labels[vv-1] = (labels == vv)[0] + + # return one_hot_labels + + def run_case_npy(self, data: np.ndarray, seg, properties: dict): + # let's not mess up the inputs! + data = np.copy(data) + old_shape = data.shape + original_spacing = list(properties['spacing']) + ## 由于old spacing读出来是反的,因此这里需要转置一下 + + original_spacing_trans = original_spacing[::-1] + properties["original_spacing_trans"] = original_spacing_trans + properties["target_spacing_trans"] = self.out_spacing + + ### norm first + need_to_check = False + if seg is None : + seg_norm = np.zeros_like(data) + else : + seg_norm = seg + before_crop_seg_sum = np.sum(seg.astype(np.uint8)) + need_to_check = True + data = self._normalize(data, seg_norm, + self.foreground_intensity_properties_per_channel) + + shape_before_cropping = data.shape[1:] + ## crop + properties['shape_before_cropping'] = shape_before_cropping + # this command will generate a segmentation. This is important because of the nonzero mask which we may need + data, seg, bbox = crop_to_nonzero(data, seg) + + if need_to_check: + seg_temp = np.copy(seg) + seg_temp[seg_temp==-1] = 0 + after_crop_seg_sum = np.sum(seg_temp.astype(np.uint8)) + print(f"before crop seg sum is {before_crop_seg_sum}, after is {after_crop_seg_sum}") + + properties['bbox_used_for_cropping'] = bbox + + # crop, remember to store size before cropping! + shape_before_resample = data.shape[1:] + properties['shape_after_cropping_before_resample'] = shape_before_resample + + new_shape = compute_new_shape(data.shape[1:], original_spacing_trans, self.out_spacing) + + assert len(data.shape) == 4 + + data = resample_data_or_seg_to_shape(data, new_shape, + original_spacing, + self.out_spacing, + order=3, + order_z=0) + properties['shape_after_resample'] = new_shape + + if seg is not None : + assert len(seg.shape) == 4 + seg = resample_data_or_seg_to_shape(seg, new_shape, + original_spacing, + self.out_spacing, + is_seg=True, + order=1, + order_z=0) + + properties['class_locations'] = self._sample_foreground_locations(seg, + self.all_labels, + True) + + ## convert to one-hot + # seg = self.convert_labels_to_region(seg) + + if np.max(seg) > 127: + seg = seg.astype(np.int16) + else: + seg = seg.astype(np.int8) + + print(f'old shape: {old_shape}, shape_after_cropping_before_resample is {shape_before_resample}, new_shape after crop and resample: {new_shape}, old_spacing: {original_spacing}, ' + f'new_spacing: {self.out_spacing}, boxes is {bbox}') + + return data, seg + + # need to modify + def read_data(self, case_name): + ## only for CT dataset + assert len(self.data_filenames) != 0 + data = [] + for dfname in self.data_filenames: + d = sitk.ReadImage(os.path.join(self.base_dir, self.image_dir, case_name, dfname)) + spacing = d.GetSpacing() + data.append(sitk.GetArrayFromImage(d).astype(np.float32)[None,]) + + data = np.concatenate(data, axis=0) + + seg_arr = None + ## 一定要是float32!!!! + + if self.seg_filename != "": + seg = sitk.ReadImage(os.path.join(self.base_dir, self.image_dir, case_name, self.seg_filename)) + ## 读出来以后一定转float32!!! + seg_arr = sitk.GetArrayFromImage(seg).astype(np.float32) + seg_arr = seg_arr[None] + intensities_per_channel, intensity_statistics_per_channel = self.collect_foreground_intensities(seg_arr, data) + + else : + intensities_per_channel = [] + intensity_statistics_per_channel = [] + + properties = {"spacing": spacing, + "raw_size": data.shape[1:], + "name": case_name.split(".")[0], + "intensities_per_channel": intensities_per_channel, + "intensity_statistics_per_channel": intensity_statistics_per_channel} + + return data, seg_arr, properties + + def run(self, + output_spacing, + output_dir, + all_labels_dict, + num_processes=8): + self.out_spacing = output_spacing + # all_labels 必须为region格式,例如[[0, 1, 2, 3], [4, 5], [6, 7, 8], 9, 10] + + self.all_labels_dict = all_labels_dict + self.all_labels = [] + + for k, v in all_labels_dict.items(): + self.all_labels.append(v) + + self.output_dir = output_dir + self.foreground_intensity_properties_per_channel = {} + + all_iter = self.get_iterable_list() + + maybe_mkdir_p(self.output_dir) + + # test_run + for case_name in all_iter: + self.run_case_save(case_name) + break + + r = [] + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + for case_name in all_iter: + r.append(p.starmap_async(self.run_case_save, + ((case_name, ),))) + remaining = list(range(len(all_iter))) + # p is pretty nifti. If we kill workers they just respawn but don't do any work. + # So we need to store the original pool of workers. + workers = [j for j in p._pool] + with tqdm(desc=None, total=len(all_iter)) as pbar: + while len(remaining) > 0: + all_alive = all([j.is_alive() for j in workers]) + if not all_alive: + raise RuntimeError('Some background worker is 6 feet under. Yuck. \n' + 'OK jokes aside.\n' + 'One of your background processes is missing. This could be because of ' + 'an error (look for an error message) or because it was killed ' + 'by your OS due to running out of RAM. If you don\'t see ' + 'an error message, out of RAM is likely the problem. In that case ' + 'reducing the number of workers might help') + done = [i for i in remaining if r[i].ready()] + for _ in done: + pbar.update() + remaining = [i for i in remaining if i not in done] + sleep(0.1) \ No newline at end of file diff --git a/source_code/SegMamba/light_training/preprocessing/preprocessors/preprocessor_multiinput_mrinorm_noresample_nocrop.py b/source_code/SegMamba/light_training/preprocessing/preprocessors/preprocessor_multiinput_mrinorm_noresample_nocrop.py new file mode 100644 index 0000000000000000000000000000000000000000..dc243413f747e40ee14437cba5a2489d97819c4e --- /dev/null +++ b/source_code/SegMamba/light_training/preprocessing/preprocessors/preprocessor_multiinput_mrinorm_noresample_nocrop.py @@ -0,0 +1,167 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import multiprocessing +import shutil +from time import sleep +from typing import Union, Tuple +import glob +import numpy as np +from batchgenerators.utilities.file_and_folder_operations import * +from light_training.preprocessing.cropping.cropping import crop_to_nonzero +# from .default_resampling import resample_data_or_seg_to_spacing, resample_img +from light_training.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape, compute_new_shape +from tqdm import tqdm +from light_training.preprocessing.normalization.default_normalization_schemes import CTNormalization, ZScoreNormalization +import SimpleITK as sitk +from tqdm import tqdm +from copy import deepcopy +import json +from .default_preprocessor import DefaultPreprocessor + +class Preprocessor(DefaultPreprocessor): + def __init__(self, + base_dir, + image_dir, + data_filenames=[], + seg_filename="", + ): + self.base_dir = base_dir + self.image_dir = image_dir + self.data_filenames = data_filenames + self.seg_filename = seg_filename + + def get_iterable_list(self): + all_cases = os.listdir(os.path.join(self.base_dir, self.image_dir)) + return all_cases + + def _normalize(self, data: np.ndarray, seg: np.ndarray, + foreground_intensity_properties_per_channel: dict) -> np.ndarray: + for c in range(data.shape[0]): + normalizer_class = ZScoreNormalization + normalizer = normalizer_class(use_mask_for_norm=False, + intensityproperties=foreground_intensity_properties_per_channel) + data[c] = normalizer.run(data[c], seg[0]) + return data + + def run_case_npy(self, data: np.ndarray, seg, properties: dict): + # let's not mess up the inputs! + data = np.copy(data) + old_shape = data.shape + original_spacing = list(properties['spacing']) + + if seg is None : + seg_norm = np.zeros_like(data) + else : + seg_norm = seg + data = self._normalize(data, seg_norm, + self.foreground_intensity_properties_per_channel) + + + assert len(data.shape) == 4 + + if seg is not None : + assert len(seg.shape) == 4 + properties['class_locations'] = self._sample_foreground_locations(seg, + self.all_labels, + True) + if np.max(seg) > 127: + seg = seg.astype(np.int16) + else: + seg = seg.astype(np.int8) + + print(f'old shape: {old_shape}') + + return data, seg + + # need to modify + def read_data(self, case_name): + ## only for CT dataset + assert len(self.data_filenames) != 0 + data = [] + for dfname in self.data_filenames: + d = sitk.ReadImage(os.path.join(self.base_dir, self.image_dir, case_name, dfname)) + spacing = d.GetSpacing() + data.append(sitk.GetArrayFromImage(d).astype(np.float32)[None,]) + + data = np.concatenate(data, axis=0) + + seg_arr = None + ## 一定要是float32!!!! + + if self.seg_filename != "": + seg = sitk.ReadImage(os.path.join(self.base_dir, self.image_dir, case_name, self.seg_filename)) + ## 读出来以后一定转float32!!! + seg_arr = sitk.GetArrayFromImage(seg).astype(np.float32) + seg_arr = seg_arr[None] + intensities_per_channel, intensity_statistics_per_channel = self.collect_foreground_intensities(seg_arr, data) + + else : + intensities_per_channel = [] + intensity_statistics_per_channel = [] + + properties = {"spacing": spacing, + "raw_size": data.shape[1:], + "name": case_name.split(".")[0], + "intensities_per_channel": intensities_per_channel, + "intensity_statistics_per_channel": intensity_statistics_per_channel} + + return data, seg_arr, properties + + def run(self, + output_dir, + all_labels_dict, + num_processes=8): + self.all_labels_dict = all_labels_dict + self.all_labels = [] + + for k, v in all_labels_dict.items(): + self.all_labels.append(v) + + self.output_dir = output_dir + self.foreground_intensity_properties_per_channel = {} + + all_iter = self.get_iterable_list() + + maybe_mkdir_p(self.output_dir) + + # test_run + for case_name in all_iter: + self.run_case_save(case_name) + break + + r = [] + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + for case_name in all_iter: + r.append(p.starmap_async(self.run_case_save, + ((case_name, ),))) + remaining = list(range(len(all_iter))) + # p is pretty nifti. If we kill workers they just respawn but don't do any work. + # So we need to store the original pool of workers. + workers = [j for j in p._pool] + with tqdm(desc=None, total=len(all_iter)) as pbar: + while len(remaining) > 0: + all_alive = all([j.is_alive() for j in workers]) + if not all_alive: + raise RuntimeError('Some background worker is 6 feet under. Yuck. \n' + 'OK jokes aside.\n' + 'One of your background processes is missing. This could be because of ' + 'an error (look for an error message) or because it was killed ' + 'by your OS due to running out of RAM. If you don\'t see ' + 'an error message, out of RAM is likely the problem. In that case ' + 'reducing the number of workers might help') + done = [i for i in remaining if r[i].ready()] + for _ in done: + pbar.update() + remaining = [i for i in remaining if i not in done] + sleep(0.1) \ No newline at end of file diff --git a/source_code/SegMamba/light_training/preprocessing/resampling/__init__.py b/source_code/SegMamba/light_training/preprocessing/resampling/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/source_code/SegMamba/light_training/preprocessing/resampling/default_resampling.py b/source_code/SegMamba/light_training/preprocessing/resampling/default_resampling.py new file mode 100644 index 0000000000000000000000000000000000000000..ed1ceb0230255d911dae887caed23a63f27ba6e0 --- /dev/null +++ b/source_code/SegMamba/light_training/preprocessing/resampling/default_resampling.py @@ -0,0 +1,217 @@ +from collections import OrderedDict +from typing import Union, Tuple, List + +import numpy as np +import pandas as pd +import torch +from batchgenerators.augmentations.utils import resize_segmentation +from scipy.ndimage.interpolation import map_coordinates +from skimage.transform import resize + +ANISO_THRESHOLD = 3 + +def get_do_separate_z(spacing: Union[Tuple[float, ...], List[float], np.ndarray], anisotropy_threshold=ANISO_THRESHOLD): + do_separate_z = (np.max(spacing) / np.min(spacing)) > anisotropy_threshold + return do_separate_z + + +def get_lowres_axis(new_spacing: Union[Tuple[float, ...], List[float], np.ndarray]): + axis = np.where(max(new_spacing) / np.array(new_spacing) == 1)[0] # find which axis is anisotropic + return axis + + +def compute_new_shape(old_shape: Union[Tuple[int, ...], List[int], np.ndarray], + old_spacing: Union[Tuple[float, ...], List[float], np.ndarray], + new_spacing: Union[Tuple[float, ...], List[float], np.ndarray]) -> np.ndarray: + assert len(old_spacing) == len(old_shape) + assert len(old_shape) == len(new_spacing) + + new_shape = np.array([int(round(i / j * k)) for i, j, k in zip(old_spacing, new_spacing, old_shape)]) + return new_shape + + +def resample_data_or_seg_to_spacing(data: np.ndarray, + current_spacing: Union[Tuple[float, ...], List[float], np.ndarray], + new_spacing: Union[Tuple[float, ...], List[float], np.ndarray], + is_seg: bool = False, + order: int = 3, order_z: int = 0, + force_separate_z: Union[bool, None] = False, + separate_z_anisotropy_threshold: float = ANISO_THRESHOLD): + if force_separate_z is not None: + do_separate_z = force_separate_z + if force_separate_z: + axis = get_lowres_axis(current_spacing) + else: + axis = None + else: + if get_do_separate_z(current_spacing, separate_z_anisotropy_threshold): + do_separate_z = True + axis = get_lowres_axis(current_spacing) + elif get_do_separate_z(new_spacing, separate_z_anisotropy_threshold): + do_separate_z = True + axis = get_lowres_axis(new_spacing) + else: + do_separate_z = False + axis = None + + if axis is not None: + if len(axis) == 3: + # every axis has the same spacing, this should never happen, why is this code here? + do_separate_z = False + elif len(axis) == 2: + # this happens for spacings like (0.24, 1.25, 1.25) for example. In that case we do not want to resample + # separately in the out of plane axis + do_separate_z = False + else: + pass + + if data is not None: + assert len(data.shape) == 4, "data must be c x y z" + + shape = np.array(data[0].shape) + new_shape = compute_new_shape(shape[1:], current_spacing, new_spacing) + + data_reshaped = resample_data_or_seg(data, new_shape, is_seg, axis, order, do_separate_z, order_z=order_z) + return data_reshaped + + +def resample_data_or_seg_to_shape(data: Union[torch.Tensor, np.ndarray], + new_shape: Union[Tuple[int, ...], List[int], np.ndarray], + current_spacing: Union[Tuple[float, ...], List[float], np.ndarray], + new_spacing: Union[Tuple[float, ...], List[float], np.ndarray], + is_seg: bool = False, + order: int = 3, order_z: int = 0, + force_separate_z: Union[bool, None] = False, + separate_z_anisotropy_threshold: float = ANISO_THRESHOLD): + """ + needed for segmentation export. Stupid, I know. Maybe we can fix that with Leos new resampling functions + """ + if isinstance(data, torch.Tensor): + data = data.cpu().numpy() + if force_separate_z is not None: + do_separate_z = force_separate_z + if force_separate_z: + axis = get_lowres_axis(current_spacing) + else: + axis = None + else: + if get_do_separate_z(current_spacing, separate_z_anisotropy_threshold): + do_separate_z = True + axis = get_lowres_axis(current_spacing) + elif get_do_separate_z(new_spacing, separate_z_anisotropy_threshold): + do_separate_z = True + axis = get_lowres_axis(new_spacing) + else: + do_separate_z = False + axis = None + + if axis is not None: + if len(axis) == 3: + # every axis has the same spacing, this should never happen, why is this code here? + do_separate_z = False + elif len(axis) == 2: + # this happens for spacings like (0.24, 1.25, 1.25) for example. In that case we do not want to resample + # separately in the out of plane axis + do_separate_z = False + else: + pass + + if data is not None: + assert len(data.shape) == 4, "data must be c x y z" + + data_reshaped = resample_data_or_seg(data, new_shape, is_seg, axis, order, do_separate_z, order_z=order_z) + return data_reshaped + + +def resample_data_or_seg(data: np.ndarray, new_shape: Union[Tuple[float, ...], List[float], np.ndarray], + is_seg: bool = False, axis: Union[None, int] = None, order: int = 3, + do_separate_z: bool = False, order_z: int = 0): + """ + separate_z=True will resample with order 0 along z + :param data: + :param new_shape: + :param is_seg: + :param axis: + :param order: + :param do_separate_z: + :param order_z: only applies if do_separate_z is True + :return: + """ + assert len(data.shape) == 4, "data must be (c, x, y, z)" + assert len(new_shape) == len(data.shape) - 1 + + if is_seg: + resize_fn = resize_segmentation + kwargs = OrderedDict() + else: + resize_fn = resize + kwargs = {'mode': 'edge', 'anti_aliasing': False} + dtype_data = data.dtype + shape = np.array(data[0].shape) + new_shape = np.array(new_shape) + if np.any(shape != new_shape): + data = data.astype(float) + if do_separate_z: + # print("separate z, order in z is", order_z, "order inplane is", order) + assert len(axis) == 1, "only one anisotropic axis supported" + axis = axis[0] + if axis == 0: + new_shape_2d = new_shape[1:] + elif axis == 1: + new_shape_2d = new_shape[[0, 2]] + else: + new_shape_2d = new_shape[:-1] + + reshaped_final_data = [] + for c in range(data.shape[0]): + reshaped_data = [] + for slice_id in range(shape[axis]): + if axis == 0: + reshaped_data.append(resize_fn(data[c, slice_id], new_shape_2d, order, **kwargs)) + elif axis == 1: + reshaped_data.append(resize_fn(data[c, :, slice_id], new_shape_2d, order, **kwargs)) + else: + reshaped_data.append(resize_fn(data[c, :, :, slice_id], new_shape_2d, order, **kwargs)) + reshaped_data = np.stack(reshaped_data, axis) + if shape[axis] != new_shape[axis]: + + # The following few lines are blatantly copied and modified from sklearn's resize() + rows, cols, dim = new_shape[0], new_shape[1], new_shape[2] + orig_rows, orig_cols, orig_dim = reshaped_data.shape + + row_scale = float(orig_rows) / rows + col_scale = float(orig_cols) / cols + dim_scale = float(orig_dim) / dim + + map_rows, map_cols, map_dims = np.mgrid[:rows, :cols, :dim] + map_rows = row_scale * (map_rows + 0.5) - 0.5 + map_cols = col_scale * (map_cols + 0.5) - 0.5 + map_dims = dim_scale * (map_dims + 0.5) - 0.5 + + coord_map = np.array([map_rows, map_cols, map_dims]) + if not is_seg or order_z == 0: + reshaped_final_data.append(map_coordinates(reshaped_data, coord_map, order=order_z, + mode='nearest')[None]) + else: + unique_labels = np.sort(pd.unique(reshaped_data.ravel())) # np.unique(reshaped_data) + reshaped = np.zeros(new_shape, dtype=dtype_data) + + for i, cl in enumerate(unique_labels): + reshaped_multihot = np.round( + map_coordinates((reshaped_data == cl).astype(float), coord_map, order=order_z, + mode='nearest')) + reshaped[reshaped_multihot > 0.5] = cl + reshaped_final_data.append(reshaped[None]) + else: + reshaped_final_data.append(reshaped_data[None]) + reshaped_final_data = np.vstack(reshaped_final_data) + else: + # print("no separate z, order", order) + reshaped = [] + for c in range(data.shape[0]): + reshaped.append(resize_fn(data[c], new_shape, order, **kwargs)[None]) + reshaped_final_data = np.vstack(reshaped) + return reshaped_final_data.astype(dtype_data) + else: + # print("no resampling necessary") + return data diff --git a/source_code/SegMamba/light_training/process_framework/norm.py b/source_code/SegMamba/light_training/process_framework/norm.py new file mode 100644 index 0000000000000000000000000000000000000000..d294fe5c9d81054b8dae218aa61927e276c673a6 --- /dev/null +++ b/source_code/SegMamba/light_training/process_framework/norm.py @@ -0,0 +1,16 @@ + + + +from light_training.preprocessing.normalization.default_normalization_schemes import CTNormalization, CTNormStandard + + +## need to custom, this example is about Segrap competition. +def norm_func(data, seg=None, **kwargs): + normalizer = CTNormStandard(a_min=-175, + a_max=250, + b_min=0.0, + b_max=1.0, clip=True) + + data = normalizer(data) + + return data diff --git a/source_code/SegMamba/light_training/process_framework/process.py b/source_code/SegMamba/light_training/process_framework/process.py new file mode 100644 index 0000000000000000000000000000000000000000..12e81de1130168b02ca31e90d5dce463249d851c --- /dev/null +++ b/source_code/SegMamba/light_training/process_framework/process.py @@ -0,0 +1,235 @@ +import torch +import numpy as np +import SimpleITK +import os +import sys +from monai.inferers import SlidingWindowInferer + +class Customalgorithm(): # SegmentationAlgorithm is not inherited in this class anymore + def __init__(self): + """ + Do not modify the `self.input_dir` and `self.output_dir`. + (Check https://grand-challenge.org/algorithms/interfaces/) + """ + self.input_dir = "/input/" + self.output_dir = "/output/images/head-neck-segmentation/" + + # self.out_spacing = [3.0, 0.54199219, 0.54199219] + self.out_spacing = [3.0, 1.0, 1.0] + + # self.device = "cpu" + + self.device = torch.device("cuda") + + self.patch_size = [64, 128, 128] + + def filte_state_dict(self, sd): + if "module" in sd : + sd = sd["module"] + new_sd = {} + for k, v in sd.items(): + k = str(k) + new_k = k[7:] if k.startswith("module") else k + new_sd[new_k] = v + del sd + return new_sd + + def convert_mha_to_nii(self, mha_input_path, nii_out_path): # nnUNet specific + img = SimpleITK.ReadImage(mha_input_path) + print(img.GetSize()) + SimpleITK.WriteImage(img, nii_out_path, True) + + def convert_nii_to_mha(self, nii_input_path, mha_out_path): # nnUNet specific + img = SimpleITK.ReadImage(nii_input_path) + SimpleITK.WriteImage(img, mha_out_path, True) + + def read(self, mha_path): + img = SimpleITK.ReadImage(mha_path) + spacing = img.GetSpacing() + raw_size = SimpleITK.GetArrayFromImage(img).shape + img = SimpleITK.GetArrayFromImage(img)[None,].astype(np.float32) + properties = { + "spacing": spacing, + "raw_size": raw_size + } + return img, properties + + def check_gpu(self): + """ + Check if GPU is available. Note that the Grand Challenge only has one available GPU. + """ + print('Checking GPU availability') + is_available = torch.cuda.is_available() + print('Available: ' + str(is_available)) + print(f'Device count: {torch.cuda.device_count()}') + if is_available: + print(f'Current device: {torch.cuda.current_device()}') + print('Device name: ' + torch.cuda.get_device_name(0)) + print('Device memory: ' + + str(torch.cuda.get_device_properties(0).total_memory)) + + def load_inputs(self): # use two modalities input data + """ + Read input data (two modalities) from `self.input_dir` (/input/). + Please do not modify the path for CT and contrast-CT images. + """ + ct_mha = os.listdir(os.path.join(self.input_dir, 'images/head-neck-ct/'))[0] + ctc_mha = os.listdir(os.path.join(self.input_dir, 'images/head-neck-contrast-enhanced-ct/'))[0] + uuid = os.path.splitext(ct_mha)[0] + + img, properties = self.read(os.path.join(self.input_dir, 'images/head-neck-ct/', ct_mha)) + img_c, _ = self.read(os.path.join(self.input_dir, 'images/head-neck-contrast-enhanced-ct/', ctc_mha)) + + data = np.concatenate([img, img_c], axis=0) + del img + del img_c + # data is (2, d, w, h) + return uuid, data, properties + + def crop(self, data, properties): + from light_training.preprocessing.cropping.cropping import crop_to_nonzero + + seg = np.zeros_like(data) + + shape_before_cropping = data.shape[1:] + ## crop + properties['shape_before_cropping'] = shape_before_cropping + # this command will generate a segmentation. This is important because of the nonzero mask which we may need + data, seg, bbox = crop_to_nonzero(data, seg) + del seg + + properties['bbox_used_for_cropping'] = bbox + + return data, properties + + def resample(self, data, properties): + from light_training.preprocessing.resampling.default_resampling import compute_new_shape, resample_data_or_seg_to_shape + # crop, remember to store size before cropping! + shape_before_resample = data.shape[1:] + properties['shape_after_cropping_before_resample'] = shape_before_resample + new_shape = compute_new_shape(data.shape[1:], original_spacing_trans, self.out_spacing) + + assert len(data.shape) == 4 + + data = resample_data_or_seg_to_shape(data, new_shape, + original_spacing, + self.out_spacing, + order=3, + order_z=0) + properties['shape_after_resample'] = new_shape + + return data, properties + + def preprocess(self, data, properties, crop_first=True): + from light_training.process_framework.norm import norm_func + + original_spacing = list(properties['spacing']) + ## 由于old spacing读出来是反的,因此这里需要转置一下 + original_spacing_trans = original_spacing[::-1] + properties["original_spacing_trans"] = original_spacing_trans + properties["target_spacing_trans"] = self.out_spacing + + if crop_first: + data, properties = self.crop(data, properties) + + data = norm_func(data) + + if not crop_first: + data, properties = self.crop(data, properties) + + + data, properties = self.resample(data, properties) + + data = data[None,] + + data = torch.from_numpy(data) + + return data, properties + + def predict(self, data, properties, uid): + torch.cuda.empty_cache() + + from models.nnunet3d import NNUNetWrapper + model = NNUNetWrapper(norm="ins") + + new_sd = self.filte_state_dict(torch.load("./weight/unet3d_0_addaug_bs2_ep1000_ds_gpu4/final_model_0.8552.pt", map_location="cpu")) + model.load_state_dict(new_sd) + + del new_sd + torch.cuda.empty_cache() + # data = data.to(self.deivce) + # model.to(self.device) + model.eval() + window_infer = SlidingWindowInferer(roi_size=self.patch_size, + sw_batch_size=1, + overlap=0.5, + progress=True, + mode="gaussian") + + predictor = Predictor(window_infer, mirror_axes=None) + try: + ensemble_output = predictor.maybe_mirror_and_predict(data, model, self.device) + + except RuntimeError: + ensemble_output = predictor.maybe_mirror_and_predict(data, model, torch.device("cpu")) + torch.cuda.empty_cache() + del model + del data + + print(f"prediction done") + ensemble_output = predictor.predict_raw_probability(ensemble_output, properties) + print(f"non linear....") + # ensemble_output = predictor.apply_nonlinear(ensemble_output, nonlinear_type="sigmoid") + ensemble_output = ensemble_output > 0 + + print(f"restore crop...") + ensemble_output = predictor.predict_noncrop_probability(ensemble_output, properties) + + raw_spacing = properties["spacing"] + case_name = uid + print(f"uuid is {uid}") + os.makedirs(os.path.dirname(self.output_dir), exist_ok=True) + + print(f"saving....") + predictor.save_to_nii_multi_organ(ensemble_output, + raw_spacing, + save_dir=self.output_dir, + case_name=case_name, + postprocess=False) + + # """ + # load the model and checkpoint, and generate the predictions. You can replace this part with your own model. + # """ + # predict_from_folder_segrap2023(self.weight, self.nii_path, self.result_path, 0, 0, 1) + # print("nnUNet segmentation done!") + # if not os.path.exists(os.path.join(self.result_path, self.nii_seg_file)): + # print('waiting for nnUNet segmentation to be created') + + # while not os.path.exists(os.path.join(self.result_path, self.nii_seg_file)): + # import time + # print('.', end='') + # time.sleep(5) + # # print(cproc) # since nnUNet_predict call is split into prediction and postprocess, a pre-mature exit code is received but segmentation file not yet written. This hack ensures that all spawned subprocesses are finished before being printed. + # print('Prediction finished !') + + def post_process(self): + self.check_gpu() + print('Start processing') + uuid, data, properties = self.load_inputs() + + data, properties = self.preprocess(data, properties) + print(properties) + print('Start prediction') + self.predict(data, properties, uuid) + # print('Start output writing') + # self.write_outputs(uuid) + + def process(self): + """ + Read inputs from /input, process with your algorithm and write to /output + """ + self.post_process() + + +if __name__ == "__main__": + Customalgorithm().process() diff --git a/source_code/SegMamba/light_training/sampler.py b/source_code/SegMamba/light_training/sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..b46a80bddbbadf4532b206fd06e62e608909765e --- /dev/null +++ b/source_code/SegMamba/light_training/sampler.py @@ -0,0 +1,48 @@ +import torch +import math +import numpy as np + +class SequentialDistributedSampler(torch.utils.data.sampler.Sampler): + """ + Distributed Sampler that subsamples indicies sequentially, + making it easier to collate all results at the end. + Even though we only use this sampler for eval and predict (no training), + which means that the model params won't have to be synced (i.e. will not hang + for synchronization even if varied number of forward passes), we still add extra + samples to the sampler to make it evenly divisible (like in `DistributedSampler`) + to make it easy to `gather` or `reduce` resulting tensors at the end of the loop. + """ + + def __init__(self, dataset, batch_size, rank=None, num_replicas=None): + if num_replicas is None: + if not torch.distributed.is_available(): + raise RuntimeError("Requires distributed package to be available") + num_replicas = torch.distributed.get_world_size() + if rank is None: + if not torch.distributed.is_available(): + raise RuntimeError("Requires distributed package to be available") + rank = torch.distributed.get_rank() + self.dataset = dataset + self.num_replicas = num_replicas + self.rank = rank + self.batch_size = batch_size + self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.batch_size / self.num_replicas)) * self.batch_size + self.total_size = self.num_samples * self.num_replicas + + def __iter__(self): + indices = list(range(len(self.dataset))) + # add extra samples to make it evenly divisible + indices += [indices[-1]] * (self.total_size - len(indices)) + # subsample + indices = indices[self.rank * self.num_samples : (self.rank + 1) * self.num_samples] + return iter(indices) + + def __len__(self): + return self.num_samples + + +def distributed_concat(tensor, num_total_examples): + output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())] + torch.distributed.all_gather(output_tensors, tensor) + concat = torch.cat(output_tensors, dim=0) + return concat[:num_total_examples] \ No newline at end of file diff --git a/source_code/SegMamba/light_training/trainer.py b/source_code/SegMamba/light_training/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..01be9834d717632ab90d0155cc6b2720951aae21 --- /dev/null +++ b/source_code/SegMamba/light_training/trainer.py @@ -0,0 +1,517 @@ +import os +from tqdm import tqdm +import numpy as np +import torch +import torch.nn.parallel +import torch.utils.data.distributed +from light_training.utils.lr_scheduler import get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup +from monai.data import DataLoader +import argparse +from .launch import launch_dist +from monai.utils import set_determinism +from .sampler import SequentialDistributedSampler, distributed_concat +from torch.utils.tensorboard import SummaryWriter +from torch.cuda.amp import GradScaler +from torch import autocast, nn +import time + +class dummy_context(object): + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, exc_tb): + pass + +class Trainer: + def __init__(self, env_type, + max_epochs, + batch_size, + device="cpu", + val_every=1, + num_gpus=1, + logdir="./logs/", + master_ip='localhost', + master_port=17750, + training_script="train.py", + train_process=12, + ): + assert env_type in ["pytorch", "ddp", "DDP"], f"not support this env_type: {env_type}" + self.env_type = env_type + self.val_every = val_every + self.max_epochs = max_epochs + self.ddp = False + self.num_gpus = num_gpus + self.device = device + self.local_rank = 0 + self.batch_size = batch_size + self.not_call_launch = True + self.logdir = logdir + self.scheduler = None + self.model = None + self.auto_optim = True + self.warmup = 0.0 + self.scheduler_type = None + + self.optimizer = None + self.patch_size = None + + self.num_step_per_epoch = 250 // self.num_gpus + self.val_number = 100 // self.num_gpus + self.augmentation = True + self.train_process = train_process + self.print_time = False + + if self.device == "cpu": + self.grad_scaler = None + else : + self.grad_scaler = GradScaler() + + torch.backends.cudnn.enabled = True + + gpu_count = torch.cuda.device_count() + if num_gpus > gpu_count: + print("gpu数量不符") + os._exit(0) + + if env_type == "DDP" or env_type == "ddp": + self.ddp = True + self.get_dist_args() + if not self.not_call_launch: + launch_dist(env_type=env_type, + num_nodes=1, + gpus_per_node=num_gpus, + master_addr=master_ip, + master_port=master_port, + training_script=training_script, + ) + os._exit(1) + self.initialize_distributed() + + def initialize_distributed(self): + """Initialize torch.distributed.""" + if self.env_type == 'pytorch': + self.print_rank_0('No need to initialize') + return + if self.env_type == 'DDP' or "deepspeed" in self.env_type: + + if self.local_rank is not None: + device = self.local_rank + torch.cuda.set_device(device) + # Call the init process + init_method = 'env://' + torch.distributed.init_process_group( + backend='nccl', + init_method=init_method) + self.world_size = torch.distributed.get_world_size() + + print(f"world size is {self.world_size}") + + def get_dataloader(self, dataset, shuffle=False, batch_size=1, train=True): + if dataset is None : + return None + if self.env_type == 'pytorch': + return DataLoader(dataset, + batch_size=batch_size, + shuffle=shuffle, + num_workers=12) + else : + if not train: + sampler = SequentialDistributedSampler(dataset, batch_size=batch_size) + + else : + sampler = torch.utils.data.distributed.DistributedSampler(dataset, shuffle=True) + return DataLoader(dataset, + batch_size=batch_size, + num_workers=12, + sampler=sampler, + drop_last=True) + + def get_multi_processor_loader(self, train_ds, val_ds): + from .augment.multi_processor import LimitedLenWrapper + from .augment.train_augment import get_train_transforms, get_validation_transforms, get_train_transforms_noaug, get_train_transforms_nomirror, get_train_transforms_onlymirror, get_train_transforms_onlyspatial + from light_training.dataloading.base_data_loader import DataLoaderMultiProcess + + assert self.patch_size != None + if self.augmentation: + if self.augmentation == "nomirror": + print(f"use augmentation: no mirror") + tr_transforms = get_train_transforms_nomirror(patch_size=self.patch_size, mirror_axes=[0, 1, 2]) + elif self.augmentation == "onlymirror": + print(f"use augmentation: only mirror") + tr_transforms = get_train_transforms_onlymirror(patch_size=self.patch_size, mirror_axes=[0, 1, 2]) + elif self.augmentation == "onlyspatial": + print(f"use augmentation: only spatial") + tr_transforms = get_train_transforms_onlyspatial(patch_size=self.patch_size, mirror_axes=[0, 1, 2]) + + else : + tr_transforms = get_train_transforms(patch_size=self.patch_size, mirror_axes=[0, 1, 2]) + else: + tr_transforms = get_train_transforms_noaug(patch_size=self.patch_size, mirror_axes=[0, 1, 2]) + + val_transforms = get_validation_transforms() + + # train_loader = DataLoader(train_ds, num_workers=1, drop_last=True, shuffle=True, batch_size=self.batch_size) + train_loader = DataLoaderMultiProcess(train_ds, + batch_size=self.batch_size, + patch_size=self.patch_size, + print_time=self.print_time) + + data_generator = LimitedLenWrapper(self.num_step_per_epoch, data_loader=train_loader, + transform=tr_transforms, + num_processes=self.train_process, num_cached=6, seeds=None, + pin_memory=True, wait_time=0.02) + if val_ds is None: + val_data_generator = None + else : + val_loader = DataLoaderMultiProcess(val_ds, + batch_size=1, + patch_size=self.patch_size, + oversample_foreground_percent=1.0) + + val_data_generator = LimitedLenWrapper(self.val_number, data_loader=val_loader, transform=val_transforms, + num_processes=6, num_cached=3, seeds=None, + pin_memory=True, wait_time=0.02) + return data_generator, val_data_generator + + + def get_dist_args(self): + parser = argparse.ArgumentParser() + # parser.add_argument('--local_rank', type=int, default = 0, help="local_rank") + parser.add_argument('--not_call_launch', + action='store_true', + help="not call launch!") + # allow training scripts to define their own CLI flags without breaking DDP init + ds_args, _ = parser.parse_known_args() + self.local_rank = int(os.environ.get("LOCAL_RANK", 0)) + + print(f"self.local_rank is {self.local_rank}") + self.not_call_launch = ds_args.not_call_launch + self.device = self.local_rank + + def to_device(self, batch): + if isinstance(batch, dict): + for k, v in batch.items(): + if isinstance(batch[k], np.ndarray): + batch[k] = torch.from_numpy(batch[k]) + + if (isinstance(batch[k], torch.Tensor) or isinstance(batch[k], torch.FloatTensor)): + batch[k] = batch[k].to(self.device).contiguous() + + elif isinstance(batch, list) : + batch = [torch.from_numpy(x) for x in batch if isinstance(x, np.ndarray)] + batch = [x.to(self.device).contiguous() for x in batch if (isinstance(x, torch.Tensor) or isinstance(x, torch.FloatTensor))] + + elif isinstance(batch, np.ndarray): + batch = torch.from_numpy(batch) + batch = batch.to(self.device).contiguous() + + else : + print("not support data type") + exit(0) + + return batch + + def validation_single_gpu(self, val_dataset,): + if self.ddp: + print(f"single gpu model not support the ddp") + exit(0) + val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False, pin_memory=True) + if self.model is not None: + self.model.to(self.device) + self.model.eval() + val_outputs = [] + + for idx, batch in tqdm(enumerate(val_loader), total=len(val_loader)): + batch = self.before_data_to_device(batch) + batch = self.to_device(batch) + + with torch.no_grad(): + val_out = self.validation_step(batch) + assert val_out is not None + + return_list = False + val_outputs.append(val_out) + if isinstance(val_out, list) or isinstance(val_out, tuple): + return_list = True + + val_outputs = torch.tensor(val_outputs) + if not return_list: + # 说明只有一个变量 + length = 0 + v_sum = 0.0 + for v in val_outputs: + if not torch.isnan(v): + v_sum += v + length += 1 + + if length == 0: + v_sum = 0 + else : + v_sum = v_sum / length + else : + num_val = len(val_outputs[0]) + length = [0.0 for i in range(num_val)] + v_sum = [0.0 for i in range(num_val)] + + for v in val_outputs: + for i in range(num_val): + if not torch.isnan(v[i]): + v_sum[i] += v[i] + length[i] += 1 + + for i in range(num_val): + if length[i] == 0: + v_sum[i] = 0 + else : + v_sum[i] = v_sum[i] / length[i] + return v_sum, val_outputs + + def validate(self): + val_outputs = [] + if self.global_step % self.val_every == 0 \ + and self.val_loader is not None : + if self.model is not None: + self.model.eval() + if self.ddp: + torch.distributed.barrier() + outputs_split = None + # for idx, batch in tqdm(enumerate(self.val_loader), total=len(self.val_loader)): + for i in tqdm(range(len(self.val_loader)), total=len(self.val_loader)): + batch = next(self.val_loader) + + batch = self.before_data_to_device(batch) + + batch = self.to_device(batch) + + with torch.no_grad(): + with torch.autocast("cuda", enabled=True) if (self.ddp or 'cuda' in self.device) else dummy_context(): + val_out = self.validation_step(batch) + assert val_out is not None + if type(val_out) is not list and type(val_out) is not tuple: + val_out = [val_out] + + if outputs_split is None: + outputs_split = [[] for i in range(len(val_out))] + + for i, v in enumerate(val_out): + outputs_split[i].append(v) + + # val_outputs.append(val_out) + + ## 先汇总结果。 + if self.ddp: + val_outputs = torch.tensor(val_outputs).cuda(self.local_rank) + torch.distributed.barrier() + val_outputs_merge = [] + for i in range(len(outputs_split)): + val_outputs = torch.tensor(outputs_split[i]).cuda(self.local_rank) + val_outputs_merge.append(distributed_concat(val_outputs, num_total_examples=len(self.val_loader) * self.num_gpus)) + + # val_outputs = distributed_concat(val_outputs, num_total_examples=len(self.val_loader.sampler.dataset)) + # val_outputs = distributed_concat(val_outputs, num_total_examples=len(self.val_loader) * self.num_gpus) + else : + val_outputs_merge = [] + for i in range(len(outputs_split)): + val_outputs = torch.tensor(outputs_split[i]) + val_outputs_merge.append(val_outputs) + # val_outputs = torch.tensor(val_outputs) + + if self.local_rank == 0: + if len(val_outputs_merge) == 1: + val_outputs_merge = val_outputs_merge[0] + self.validation_end(val_outputs_merge) + # self.validation_end(val_outputs) + + def train(self, + train_dataset, + val_dataset=None, + ): + print(f"augmentation: {self.augmentation}") + assert self.patch_size is not None, "please define the patch_size" + + set_determinism(42 + self.local_rank) + if self.model is not None: + print(f"check model parameter: {next(self.model.parameters()).sum()}, keep model parameters on different processes consistent") + para = sum([np.prod(list(p.size())) for p in self.model.parameters()]) + if self.local_rank == 0: + print(f"model parameters is {para / 1000 / 1000}M ") + + self.global_step = 0 + if self.env_type == "pytorch": + if self.model is not None: + self.model.to(self.device) + os.makedirs(self.logdir, exist_ok=True) + self.writer = SummaryWriter(self.logdir) + + elif self.ddp: + if self.local_rank == 0: + os.makedirs(self.logdir, exist_ok=True) + self.writer = SummaryWriter(self.logdir) + else: + self.writer = None + if self.model is not None: + self.model.cuda(self.local_rank) + self.model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.model) + self.model = torch.nn.parallel.DistributedDataParallel(self.model, + device_ids=[self.local_rank], + output_device=self.local_rank, + find_unused_parameters=True) + else : + print("not support env_type") + exit(0) + + # self.train_loader = self.get_dataloader(train_dataset, shuffle=True, batch_size=self.batch_size) + self.train_loader, self.val_loader = self.get_multi_processor_loader(train_dataset, val_dataset) + + self.max_steps = self.max_epochs * len(self.train_loader) + + print(f"step number is {self.max_steps}") + + if self.scheduler_type == "cosine_with_warmup": + if self.warmup == 0.0: + self.warmup = 0.1 + assert self.warmup < 1 and self.warmup > 0 + warmup_steps = self.max_steps * self.warmup + self.scheduler = get_cosine_schedule_with_warmup(self.optimizer, + num_warmup_steps=warmup_steps, + num_training_steps=self.max_steps) + print(f"warmup steps is {warmup_steps}") + elif self.scheduler_type == "constant_with_warmup": + if self.warmup == 0.0: + self.warmup = 0.1 + assert self.warmup < 1 and self.warmup > 0 + warmup_steps = self.max_steps * self.warmup + self.scheduler = get_constant_schedule_with_warmup(self.optimizer, + num_warmup_steps=warmup_steps, + ) + print(f"warmup steps is {warmup_steps}") + + elif self.scheduler_type == "poly_with_warmup": + if self.warmup == 0.0: + self.warmup = 0.1 + assert self.warmup < 1 and self.warmup > 0 + warmup_steps = self.max_steps * self.warmup + self.scheduler = get_polynomial_decay_schedule_with_warmup(self.optimizer, + num_warmup_steps=warmup_steps, + num_training_steps=self.max_steps + ) + print(f"warmup steps is {warmup_steps}") + + elif self.scheduler_type == "poly": + from light_training.utils.lr_scheduler import PolyLRScheduler + lr = self.optimizer.state_dict()['param_groups'][0]['lr'] + print(f"initial lr is {lr}") + self.scheduler = PolyLRScheduler(self.optimizer, initial_lr=lr, max_steps=self.max_steps) + print(f"scheduler_type is poly, warmup steps is {0}") + + for epoch in range(0, self.max_epochs): + self.epoch = epoch + if self.ddp: + torch.distributed.barrier() + self.train_epoch( + epoch, + ) + if (self.epoch + 1) % self.val_every == 0: + self.validate() + + if self.model is not None: + self.model.train() + + def before_data_to_device(self, batch_data): + return batch_data + + def train_epoch(self, + epoch, + ): + if self.model is not None: + self.model.train() + # if self.local_rank == 0: + with tqdm(total=self.num_step_per_epoch, disable=(self.local_rank != 0)) as t: + for i in range(self.num_step_per_epoch): + self.global_step += 1 + t.set_description('Epoch %i' % epoch) + + if self.print_time: + s = time.time() + batch = next(self.train_loader) + if self.print_time: + e = time.time() + print(f"get batch time is {e - s}") + + batch = self.before_data_to_device(batch) + + batch = self.to_device(batch) + + if self.model is not None: + for param in self.model.parameters(): param.grad = None + + if not self.auto_optim: + loss = self.training_step(batch) + else: + with autocast("cuda", enabled=True) if (self.ddp or 'cuda' in self.device) else dummy_context(): + if self.print_time: + s = time.time() + loss = self.training_step(batch) + if self.print_time: + e = time.time() + print(f"training step time is {e - s}") + + if self.print_time: + s = time.time() + + if self.grad_scaler is not None: + self.grad_scaler.scale(loss).backward() + self.grad_scaler.unscale_(self.optimizer) + torch.nn.utils.clip_grad_norm_(self.model.parameters(), 12) + self.grad_scaler.step(self.optimizer) + self.grad_scaler.update() + else: + loss.backward() + torch.nn.utils.clip_grad_norm_(self.model.parameters(), 12) + self.optimizer.step() + + if self.print_time: + e = time.time() + print(f"backward time is {e - s}") + + if self.scheduler is not None: + self.scheduler.step() + lr = self.optimizer.state_dict()['param_groups'][0]['lr'] + self.log("lr", lr, self.global_step) + + t.set_postfix(loss=loss.item(), lr=lr) + + t.update(1) + + def training_step(self, batch): + raise NotImplementedError + + def validation_step(self, batch): + raise NotImplementedError + + def validation_end(self, mean_val_outputs, val_outputs): + pass + + def log(self, k, v, step): + if self.local_rank == 0: + self.writer.add_scalar(k, scalar_value=v, global_step=step) + + def log_dict(self, dict_, step): + if self.local_rank == 0: + for k, v in dict_.items(): + self.writer.add_scalar(k, scalar_value=v, global_step=step) + + def load_state_dict(self, weight_path, strict=True): + sd = torch.load(weight_path, map_location="cpu") + if "module" in sd : + sd = sd["module"] + new_sd = {} + for k, v in sd.items(): + k = str(k) + new_k = k[7:] if k.startswith("module") else k + new_sd[new_k] = v + + self.model.load_state_dict(new_sd, strict=strict) + + print(f"model parameters are loaded successed.") + diff --git a/source_code/SegMamba/light_training/trainer_fp32.py b/source_code/SegMamba/light_training/trainer_fp32.py new file mode 100644 index 0000000000000000000000000000000000000000..7e45ab08c25764cd9c64c5b89fdb421497fa9887 --- /dev/null +++ b/source_code/SegMamba/light_training/trainer_fp32.py @@ -0,0 +1,472 @@ +import os +from tqdm import tqdm +import numpy as np +import torch +import torch.nn.parallel +import torch.utils.data.distributed +from light_training.utils.lr_scheduler import get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup +from monai.data import DataLoader +import argparse +from .launch import launch_dist +from monai.utils import set_determinism +from .sampler import SequentialDistributedSampler, distributed_concat +from torch.utils.tensorboard import SummaryWriter + +class dummy_context(object): + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, exc_tb): + pass + +class Trainer: + def __init__(self, env_type, + max_epochs, + batch_size, + device="cpu", + val_every=1, + num_gpus=1, + logdir="./logs/", + master_ip='localhost', + master_port=17750, + training_script="train.py", + ): + assert env_type in ["pytorch", "ddp", "DDP"], f"not support this env_type: {env_type}" + self.env_type = env_type + self.val_every = val_every + self.max_epochs = max_epochs + self.ddp = False + self.num_gpus = num_gpus + self.device = device + self.local_rank = 0 + self.batch_size = batch_size + self.not_call_launch = True + self.logdir = logdir + self.scheduler = None + self.model = None + self.auto_optim = True + self.warmup = 0.0 + self.scheduler_type = None + + self.optimizer = None + self.patch_size = None + + self.num_step_per_epoch = 250 // self.num_gpus + self.val_number = 100 // self.num_gpus + self.augmentation = True + + torch.backends.cudnn.enabled = True + + gpu_count = torch.cuda.device_count() + if num_gpus > gpu_count: + print("gpu数量不符") + os._exit(0) + + if env_type == "DDP" or env_type == "ddp": + self.ddp = True + self.get_dist_args() + if not self.not_call_launch: + launch_dist(env_type=env_type, + num_nodes=1, + gpus_per_node=num_gpus, + master_addr=master_ip, + master_port=master_port, + training_script=training_script, + ) + os._exit(1) + self.initialize_distributed() + + def initialize_distributed(self): + """Initialize torch.distributed.""" + if self.env_type == 'pytorch': + self.print_rank_0('No need to initialize') + return + if self.env_type == 'DDP' or "deepspeed" in self.env_type: + + if self.local_rank is not None: + device = self.local_rank + torch.cuda.set_device(device) + # Call the init process + init_method = 'env://' + torch.distributed.init_process_group( + backend='nccl', + init_method=init_method) + self.world_size = torch.distributed.get_world_size() + + print(f"world size is {self.world_size}") + + def get_dataloader(self, dataset, shuffle=False, batch_size=1, train=True): + if dataset is None : + return None + if self.env_type == 'pytorch': + return DataLoader(dataset, + batch_size=batch_size, + shuffle=shuffle, + num_workers=12) + else : + if not train: + sampler = SequentialDistributedSampler(dataset, batch_size=batch_size) + + else : + sampler = torch.utils.data.distributed.DistributedSampler(dataset, shuffle=True) + return DataLoader(dataset, + batch_size=batch_size, + num_workers=12, + sampler=sampler, + drop_last=True) + + def get_multi_processor_loader(self, train_ds, val_ds): + from .augment.multi_processor import LimitedLenWrapper + from .augment.train_augment import get_train_transforms, get_validation_transforms, get_train_transforms_noaug + from light_training.dataloading.base_data_loader import DataLoaderMultiProcess + + assert self.patch_size != None + if self.augmentation: + tr_transforms = get_train_transforms(patch_size=self.patch_size, mirror_axes=[0, 1, 2]) + else: + tr_transforms = get_train_transforms_noaug(patch_size=self.patch_size, mirror_axes=[0, 1, 2]) + + val_transforms = get_validation_transforms() + + # train_loader = DataLoader(train_ds, num_workers=1, drop_last=True, shuffle=True, batch_size=self.batch_size) + train_loader = DataLoaderMultiProcess(train_ds, annotated_classes_key=self.all_labels, + batch_size=self.batch_size, + patch_size=self.patch_size) + + data_generator = LimitedLenWrapper(self.num_step_per_epoch, data_loader=train_loader, + transform=tr_transforms, + num_processes=12, num_cached=6, seeds=None, + pin_memory=True, wait_time=0.02) + if val_ds is None: + val_data_generator = None + else : + val_loader = DataLoaderMultiProcess(val_ds, annotated_classes_key=self.all_labels, + batch_size=1, + patch_size=self.patch_size, + oversample_foreground_percent=1.0) + + val_data_generator = LimitedLenWrapper(self.val_number, data_loader=val_loader, transform=val_transforms, + num_processes=6, num_cached=3, seeds=None, + pin_memory=True, wait_time=0.02) + return data_generator, val_data_generator + + + def get_dist_args(self): + parser = argparse.ArgumentParser() + # parser.add_argument('--local_rank', type=int, default = 0, help="local_rank") + parser.add_argument('--not_call_launch', + action='store_true', + help="not call launch!") + # allow training scripts to define their own CLI flags without breaking DDP init + ds_args, _ = parser.parse_known_args() + self.local_rank = int(os.environ.get("LOCAL_RANK", 0)) + + print(f"self.local_rank is {self.local_rank}") + self.not_call_launch = ds_args.not_call_launch + self.device = self.local_rank + + def to_device(self, batch): + if isinstance(batch, dict): + for k, v in batch.items(): + if isinstance(batch[k], np.ndarray): + batch[k] = torch.from_numpy(batch[k]) + + if (isinstance(batch[k], torch.Tensor) or isinstance(batch[k], torch.FloatTensor)): + batch[k] = batch[k].to(self.device).contiguous() + + elif isinstance(batch, list) : + batch = [torch.from_numpy(x) for x in batch if isinstance(x, np.ndarray)] + batch = [x.to(self.device).contiguous() for x in batch if (isinstance(x, torch.Tensor) or isinstance(x, torch.FloatTensor))] + + elif isinstance(batch, np.ndarray): + batch = torch.from_numpy(batch) + batch = batch.to(self.device).contiguous() + + else : + print("not support data type") + exit(0) + + return batch + + def validation_single_gpu(self, val_dataset,): + if self.ddp: + print(f"single gpu model not support the ddp") + exit(0) + val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False) + self.model.to(self.device) + val_outputs = [] + self.model.eval() + for idx, batch in tqdm(enumerate(val_loader), total=len(val_loader)): + batch = self.to_device(batch) + + with torch.no_grad(): + val_out = self.validation_step(batch) + assert val_out is not None + + return_list = False + val_outputs.append(val_out) + if isinstance(val_out, list) or isinstance(val_out, tuple): + return_list = True + + val_outputs = torch.tensor(val_outputs) + if not return_list: + # 说明只有一个变量 + length = 0 + v_sum = 0.0 + for v in val_outputs: + if not torch.isnan(v): + v_sum += v + length += 1 + + if length == 0: + v_sum = 0 + else : + v_sum = v_sum / length + else : + num_val = len(val_outputs[0]) + length = [0.0 for i in range(num_val)] + v_sum = [0.0 for i in range(num_val)] + + for v in val_outputs: + for i in range(num_val): + if not torch.isnan(v[i]): + v_sum[i] += v[i] + length[i] += 1 + + for i in range(num_val): + if length[i] == 0: + v_sum[i] = 0 + else : + v_sum[i] = v_sum[i] / length[i] + return v_sum, val_outputs + + def validate(self): + val_outputs = [] + if self.global_step % self.val_every == 0 \ + and self.val_loader is not None : + if self.model is not None: + self.model.eval() + if self.ddp: + torch.distributed.barrier() + # for idx, batch in tqdm(enumerate(self.val_loader), total=len(self.val_loader)): + for i in tqdm(range(len(self.val_loader)), total=len(self.val_loader)): + batch = next(self.val_loader) + + batch = self.to_device(batch) + + with torch.no_grad(): + val_out = self.validation_step(batch) + assert val_out is not None + + return_list = False + val_outputs.append(val_out) + if isinstance(val_out, list) or isinstance(val_out, tuple): + return_list = True + + ## 先汇总结果。 + if self.ddp: + val_outputs = torch.tensor(val_outputs).cuda(self.local_rank) + torch.distributed.barrier() + # val_outputs = distributed_concat(val_outputs, num_total_examples=len(self.val_loader.sampler.dataset)) + val_outputs = distributed_concat(val_outputs, num_total_examples=len(self.val_loader) * self.num_gpus) + else : + val_outputs = torch.tensor(val_outputs) + + if self.local_rank == 0: + if not return_list: + # 说明只有一个变量 + length = 0 + v_sum = 0.0 + for v in val_outputs: + if not torch.isnan(v): + v_sum += v + length += 1 + + if length == 0: + v_sum = 0 + else : + v_sum = v_sum / length + self.validation_end(mean_val_outputs=v_sum, val_outputs=val_outputs) + + else : + num_val = len(val_outputs[0]) + length = [0.0 for i in range(num_val)] + v_sum = [0.0 for i in range(num_val)] + + for v in val_outputs: + for i in range(num_val): + if not torch.isnan(v[i]): + v_sum[i] += v[i] + length[i] += 1 + + for i in range(num_val): + if length[i] == 0: + v_sum[i] = 0 + else : + v_sum[i] = v_sum[i] / length[i] + + self.validation_end(mean_val_outputs=v_sum, val_outputs=val_outputs) + + def train(self, + train_dataset, + val_dataset=None, + ): + print(f"augmentation: {self.augmentation}") + assert self.patch_size is not None, "please define the patch_size" + assert self.all_labels is not None, "please define all the labels, for example, [1, 2, 3, ]" + + set_determinism(42 + self.local_rank) + if self.model is not None: + print(f"check model parameter: {next(self.model.parameters()).sum()}, keep model parameters on different processes consistent") + para = sum([np.prod(list(p.size())) for p in self.model.parameters()]) + if self.local_rank == 0: + print(f"model parameters is {para * 4 / 1000 / 1000}M ") + + self.global_step = 0 + if self.env_type == "pytorch": + if self.model is not None: + self.model.to(self.device) + os.makedirs(self.logdir, exist_ok=True) + self.writer = SummaryWriter(self.logdir) + + elif self.ddp: + if self.local_rank == 0: + os.makedirs(self.logdir, exist_ok=True) + self.writer = SummaryWriter(self.logdir) + else: + self.writer = None + if self.model is not None: + self.model.cuda(self.local_rank) + # self.model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.model) + self.model = torch.nn.parallel.DistributedDataParallel(self.model, + device_ids=[self.local_rank], + output_device=self.local_rank, + find_unused_parameters=True) + else : + print("not support env_type") + exit(0) + + # self.train_loader = self.get_dataloader(train_dataset, shuffle=True, batch_size=self.batch_size) + self.train_loader, self.val_loader = self.get_multi_processor_loader(train_dataset, val_dataset) + + self.max_steps = self.max_epochs * len(self.train_loader) + + print(f"step number is {self.max_steps}") + + if self.scheduler_type == "cosine_with_warmup": + if self.warmup == 0.0: + self.warmup = 0.1 + assert self.warmup < 1 and self.warmup > 0 + warmup_steps = self.max_steps * self.warmup + self.scheduler = get_cosine_schedule_with_warmup(self.optimizer, + num_warmup_steps=warmup_steps, + num_training_steps=self.max_steps) + print(f"warmup steps is {warmup_steps}") + elif self.scheduler_type == "constant_with_warmup": + if self.warmup == 0.0: + self.warmup = 0.1 + assert self.warmup < 1 and self.warmup > 0 + warmup_steps = self.max_steps * self.warmup + self.scheduler = get_constant_schedule_with_warmup(self.optimizer, + num_warmup_steps=warmup_steps, + ) + print(f"warmup steps is {warmup_steps}") + + elif self.scheduler_type == "poly_with_warmup": + if self.warmup == 0.0: + self.warmup = 0.1 + assert self.warmup < 1 and self.warmup > 0 + warmup_steps = self.max_steps * self.warmup + self.scheduler = get_polynomial_decay_schedule_with_warmup(self.optimizer, + num_warmup_steps=warmup_steps, + num_training_steps=self.max_steps + ) + print(f"warmup steps is {warmup_steps}") + + elif self.scheduler_type == "poly": + from light_training.utils.lr_scheduler import PolyLRScheduler + lr = self.optimizer.state_dict()['param_groups'][0]['lr'] + print(f"initial lr is {lr}") + self.scheduler = PolyLRScheduler(self.optimizer, initial_lr=lr, max_steps=self.max_steps) + print(f"scheduler_type is poly, warmup steps is {0}") + + for epoch in range(0, self.max_epochs): + self.epoch = epoch + if self.ddp: + torch.distributed.barrier() + self.train_epoch( + epoch, + ) + if (self.epoch + 1) % self.val_every == 0: + self.validate() + + if self.model is not None: + self.model.train() + + def train_epoch(self, + epoch, + ): + if self.model is not None: + self.model.train() + with tqdm(total=self.num_step_per_epoch, disable=(self.local_rank != 0)) as t: + for i in range(self.num_step_per_epoch): + # for idx, batch in enumerate(loader): + self.global_step += 1 + t.set_description('Epoch %i' % epoch) + + batch = next(self.train_loader) + + batch = self.to_device(batch) + + if self.model is not None: + for param in self.model.parameters(): param.grad = None + + if not self.auto_optim: + loss = self.training_step(batch) + else: + loss = self.training_step(batch) + loss.backward() + torch.nn.utils.clip_grad_norm_(self.model.parameters(), 12) + self.optimizer.step() + + if self.scheduler is not None: + self.scheduler.step() + lr = self.optimizer.state_dict()['param_groups'][0]['lr'] + self.log("lr", lr, self.global_step) + + t.set_postfix(loss=loss.item(), lr=lr) + + t.update(1) + + def training_step(self, batch): + raise NotImplementedError + + def validation_step(self, batch): + raise NotImplementedError + + def validation_end(self, mean_val_outputs, val_outputs): + pass + + def log(self, k, v, step): + if self.local_rank == 0: + self.writer.add_scalar(k, scalar_value=v, global_step=step) + + def log_dict(self, dict_, step): + if self.local_rank == 0: + for k, v in dict_.items(): + self.writer.add_scalar(k, scalar_value=v, global_step=step) + + def load_state_dict(self, weight_path, strict=True): + sd = torch.load(weight_path, map_location="cpu") + if "module" in sd : + sd = sd["module"] + new_sd = {} + for k, v in sd.items(): + k = str(k) + new_k = k[7:] if k.startswith("module") else k + new_sd[new_k] = v + + self.model.load_state_dict(new_sd, strict=strict) + + print(f"model parameters are loaded successed.") + diff --git a/source_code/SegMamba/light_training/utilities/__init__.py b/source_code/SegMamba/light_training/utilities/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/source_code/SegMamba/light_training/utilities/collate_outputs.py b/source_code/SegMamba/light_training/utilities/collate_outputs.py new file mode 100644 index 0000000000000000000000000000000000000000..c9d67984febd927b946b8e44f33eaab0530e4b73 --- /dev/null +++ b/source_code/SegMamba/light_training/utilities/collate_outputs.py @@ -0,0 +1,24 @@ +from typing import List + +import numpy as np + + +def collate_outputs(outputs: List[dict]): + """ + used to collate default train_step and validation_step outputs. If you want something different then you gotta + extend this + + we expect outputs to be a list of dictionaries where each of the dict has the same set of keys + """ + collated = {} + for k in outputs[0].keys(): + if np.isscalar(outputs[0][k]): + collated[k] = [o[k] for o in outputs] + elif isinstance(outputs[0][k], np.ndarray): + collated[k] = np.vstack([o[k][None] for o in outputs]) + elif isinstance(outputs[0][k], list): + collated[k] = [item for o in outputs for item in o[k]] + else: + raise ValueError(f'Cannot collate input of type {type(outputs[0][k])}. ' + f'Modify collate_outputs to add this functionality') + return collated \ No newline at end of file diff --git a/source_code/SegMamba/light_training/utilities/dataset_name_id_conversion.py b/source_code/SegMamba/light_training/utilities/dataset_name_id_conversion.py new file mode 100644 index 0000000000000000000000000000000000000000..1f2c35078009249d1c493639363be54059b1c2c7 --- /dev/null +++ b/source_code/SegMamba/light_training/utilities/dataset_name_id_conversion.py @@ -0,0 +1,74 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Union + +from nnunetv2.paths import nnUNet_preprocessed, nnUNet_raw, nnUNet_results +from batchgenerators.utilities.file_and_folder_operations import * +import numpy as np + + +def find_candidate_datasets(dataset_id: int): + startswith = "Dataset%03.0d" % dataset_id + if nnUNet_preprocessed is not None and isdir(nnUNet_preprocessed): + candidates_preprocessed = subdirs(nnUNet_preprocessed, prefix=startswith, join=False) + else: + candidates_preprocessed = [] + + if nnUNet_raw is not None and isdir(nnUNet_raw): + candidates_raw = subdirs(nnUNet_raw, prefix=startswith, join=False) + else: + candidates_raw = [] + + candidates_trained_models = [] + if nnUNet_results is not None and isdir(nnUNet_results): + candidates_trained_models += subdirs(nnUNet_results, prefix=startswith, join=False) + + all_candidates = candidates_preprocessed + candidates_raw + candidates_trained_models + unique_candidates = np.unique(all_candidates) + return unique_candidates + + +def convert_id_to_dataset_name(dataset_id: int): + unique_candidates = find_candidate_datasets(dataset_id) + if len(unique_candidates) > 1: + raise RuntimeError("More than one dataset name found for dataset id %d. Please correct that. (I looked in the " + "following folders:\n%s\n%s\n%s" % (dataset_id, nnUNet_raw, nnUNet_preprocessed, nnUNet_results)) + if len(unique_candidates) == 0: + raise RuntimeError(f"Could not find a dataset with the ID {dataset_id}. Make sure the requested dataset ID " + f"exists and that nnU-Net knows where raw and preprocessed data are located " + f"(see Documentation - Installation). Here are your currently defined folders:\n" + f"nnUNet_preprocessed={os.environ.get('nnUNet_preprocessed') if os.environ.get('nnUNet_preprocessed') is not None else 'None'}\n" + f"nnUNet_results={os.environ.get('nnUNet_results') if os.environ.get('nnUNet_results') is not None else 'None'}\n" + f"nnUNet_raw={os.environ.get('nnUNet_raw') if os.environ.get('nnUNet_raw') is not None else 'None'}\n" + f"If something is not right, adapt your environment variables.") + return unique_candidates[0] + + +def convert_dataset_name_to_id(dataset_name: str): + assert dataset_name.startswith("Dataset") + dataset_id = int(dataset_name[7:10]) + return dataset_id + + +def maybe_convert_to_dataset_name(dataset_name_or_id: Union[int, str]) -> str: + if isinstance(dataset_name_or_id, str) and dataset_name_or_id.startswith("Dataset"): + return dataset_name_or_id + if isinstance(dataset_name_or_id, str): + try: + dataset_name_or_id = int(dataset_name_or_id) + except ValueError: + raise ValueError("dataset_name_or_id was a string and did not start with 'Dataset' so we tried to " + "convert it to a dataset ID (int). That failed, however. Please give an integer number " + "('1', '2', etc) or a correct tast name. Your input: %s" % dataset_name_or_id) + return convert_id_to_dataset_name(dataset_name_or_id) \ No newline at end of file diff --git a/source_code/SegMamba/light_training/utilities/ddp_allgather.py b/source_code/SegMamba/light_training/utilities/ddp_allgather.py new file mode 100644 index 0000000000000000000000000000000000000000..c42b3ef654f361904d5fe1868621b3f6f5cd29a6 --- /dev/null +++ b/source_code/SegMamba/light_training/utilities/ddp_allgather.py @@ -0,0 +1,49 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Optional, Tuple + +import torch +from torch import distributed + + +def print_if_rank0(*args): + if distributed.get_rank() == 0: + print(*args) + + +class AllGatherGrad(torch.autograd.Function): + # stolen from pytorch lightning + @staticmethod + def forward( + ctx: Any, + tensor: torch.Tensor, + group: Optional["torch.distributed.ProcessGroup"] = None, + ) -> torch.Tensor: + ctx.group = group + + gathered_tensor = [torch.zeros_like(tensor) for _ in range(torch.distributed.get_world_size())] + + torch.distributed.all_gather(gathered_tensor, tensor, group=group) + gathered_tensor = torch.stack(gathered_tensor, dim=0) + + return gathered_tensor + + @staticmethod + def backward(ctx: Any, *grad_output: torch.Tensor) -> Tuple[torch.Tensor, None]: + grad_output = torch.cat(grad_output) + + torch.distributed.all_reduce(grad_output, op=torch.distributed.ReduceOp.SUM, async_op=False, group=ctx.group) + + return grad_output[torch.distributed.get_rank()], None + diff --git a/source_code/SegMamba/light_training/utilities/default_n_proc_DA.py b/source_code/SegMamba/light_training/utilities/default_n_proc_DA.py new file mode 100644 index 0000000000000000000000000000000000000000..3ecc9228296355d01087f216a9ea2640b90403f8 --- /dev/null +++ b/source_code/SegMamba/light_training/utilities/default_n_proc_DA.py @@ -0,0 +1,44 @@ +import subprocess +import os + + +def get_allowed_n_proc_DA(): + """ + This function is used to set the number of processes used on different Systems. It is specific to our cluster + infrastructure at DKFZ. You can modify it to suit your needs. Everything is allowed. + + IMPORTANT: if the environment variable nnUNet_n_proc_DA is set it will overwrite anything in this script + (see first line). + + Interpret the output as the number of processes used for data augmentation PER GPU. + + The way it is implemented here is simply a look up table. We know the hostnames, CPU and GPU configurations of our + systems and set the numbers accordingly. For example, a system with 4 GPUs and 48 threads can use 12 threads per + GPU without overloading the CPU (technically 11 because we have a main process as well), so that's what we use. + """ + + if 'nnUNet_n_proc_DA' in os.environ.keys(): + use_this = int(os.environ['nnUNet_n_proc_DA']) + else: + hostname = subprocess.getoutput(['hostname']) + if hostname in ['Fabian', ]: + use_this = 12 + elif hostname in ['hdf19-gpu16', 'hdf19-gpu17', 'hdf19-gpu18', 'hdf19-gpu19', 'e230-AMDworkstation']: + use_this = 16 + elif hostname.startswith('e230-dgx1'): + use_this = 10 + elif hostname.startswith('hdf18-gpu') or hostname.startswith('e132-comp'): + use_this = 16 + elif hostname.startswith('e230-dgx2'): + use_this = 6 + elif hostname.startswith('e230-dgxa100-'): + use_this = 28 + elif hostname.startswith('lsf22-gpu'): + use_this = 28 + elif hostname.startswith('hdf19-gpu') or hostname.startswith('e071-gpu'): + use_this = 12 + else: + use_this = 12 # default value + + use_this = min(use_this, os.cpu_count()) + return use_this diff --git a/source_code/SegMamba/light_training/utilities/file_path_utilities.py b/source_code/SegMamba/light_training/utilities/file_path_utilities.py new file mode 100644 index 0000000000000000000000000000000000000000..611f6e24dbcd12b69b1b1695e6a0e6a6318981bf --- /dev/null +++ b/source_code/SegMamba/light_training/utilities/file_path_utilities.py @@ -0,0 +1,123 @@ +from multiprocessing import Pool +from typing import Union, Tuple +import numpy as np +from batchgenerators.utilities.file_and_folder_operations import * + +from nnunetv2.configuration import default_num_processes +from nnunetv2.paths import nnUNet_results +from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name + + +def convert_trainer_plans_config_to_identifier(trainer_name, plans_identifier, configuration): + return f'{trainer_name}__{plans_identifier}__{configuration}' + + +def convert_identifier_to_trainer_plans_config(identifier: str): + return os.path.basename(identifier).split('__') + + +def get_output_folder(dataset_name_or_id: Union[str, int], trainer_name: str = 'nnUNetTrainer', + plans_identifier: str = 'nnUNetPlans', configuration: str = '3d_fullres', + fold: Union[str, int] = None) -> str: + tmp = join(nnUNet_results, maybe_convert_to_dataset_name(dataset_name_or_id), + convert_trainer_plans_config_to_identifier(trainer_name, plans_identifier, configuration)) + if fold is not None: + tmp = join(tmp, f'fold_{fold}') + return tmp + + +def parse_dataset_trainer_plans_configuration_from_path(path: str): + folders = split_path(path) + # this here can be a little tricky because we are making assumptions. Let's hope this never fails lol + + # safer to make this depend on two conditions, the fold_x and the DatasetXXX + # first let's see if some fold_X is present + fold_x_present = [i.startswith('fold_') for i in folders] + if any(fold_x_present): + idx = fold_x_present.index(True) + # OK now two entries before that there should be DatasetXXX + assert len(folders[:idx]) >= 2, 'Bad path, cannot extract what I need. Your path needs to be at least ' \ + 'DatasetXXX/MODULE__PLANS__CONFIGURATION for this to work' + if folders[idx - 2].startswith('Dataset'): + splitted = folders[idx - 1].split('__') + assert len(splitted) == 3, 'Bad path, cannot extract what I need. Your path needs to be at least ' \ + 'DatasetXXX/MODULE__PLANS__CONFIGURATION for this to work' + return folders[idx - 2], *splitted + else: + # we can only check for dataset followed by a string that is separable into three strings by splitting with '__' + # look for DatasetXXX + dataset_folder = [i.startswith('Dataset') for i in folders] + if any(dataset_folder): + idx = dataset_folder.index(True) + assert len(folders) >= (idx + 1), 'Bad path, cannot extract what I need. Your path needs to be at least ' \ + 'DatasetXXX/MODULE__PLANS__CONFIGURATION for this to work' + splitted = folders[idx + 1].split('__') + assert len(splitted) == 3, 'Bad path, cannot extract what I need. Your path needs to be at least ' \ + 'DatasetXXX/MODULE__PLANS__CONFIGURATION for this to work' + return folders[idx], *splitted + + +def get_ensemble_name(model1_folder, model2_folder, folds: Tuple[int, ...]): + identifier = 'ensemble___' + os.path.basename(model1_folder) + '___' + \ + os.path.basename(model2_folder) + '___' + folds_tuple_to_string(folds) + return identifier + + +def get_ensemble_name_from_d_tr_c(dataset, tr1, p1, c1, tr2, p2, c2, folds: Tuple[int, ...]): + model1_folder = get_output_folder(dataset, tr1, p1, c1) + model2_folder = get_output_folder(dataset, tr2, p2, c2) + + get_ensemble_name(model1_folder, model2_folder, folds) + + +def convert_ensemble_folder_to_model_identifiers_and_folds(ensemble_folder: str): + prefix, *models, folds = os.path.basename(ensemble_folder).split('___') + return models, folds + + +def folds_tuple_to_string(folds: Union[List[int], Tuple[int, ...]]): + s = str(folds[0]) + for f in folds[1:]: + s += f"_{f}" + return s + + +def folds_string_to_tuple(folds_string: str): + folds = folds_string.split('_') + res = [] + for f in folds: + try: + res.append(int(f)) + except ValueError: + res.append(f) + return res + + +def check_workers_alive_and_busy(export_pool: Pool, worker_list: List, results_list: List, allowed_num_queued: int = 0): + """ + + returns True if the number of results that are not ready is greater than the number of available workers + allowed_num_queued + """ + alive = [i.is_alive() for i in worker_list] + if not all(alive): + raise RuntimeError('Some background workers are no longer alive') + + not_ready = [not i.ready() for i in results_list] + if sum(not_ready) >= (len(export_pool._pool) + allowed_num_queued): + return True + return False + + +if __name__ == '__main__': + ### well at this point I could just write tests... + path = '/home/fabian/results/nnUNet_remake/Dataset002_Heart/nnUNetModule__nnUNetPlans__3d_fullres' + print(parse_dataset_trainer_plans_configuration_from_path(path)) + path = 'Dataset002_Heart/nnUNetModule__nnUNetPlans__3d_fullres' + print(parse_dataset_trainer_plans_configuration_from_path(path)) + path = '/home/fabian/results/nnUNet_remake/Dataset002_Heart/nnUNetModule__nnUNetPlans__3d_fullres/fold_all' + print(parse_dataset_trainer_plans_configuration_from_path(path)) + try: + path = '/home/fabian/results/nnUNet_remake/Dataset002_Heart/' + print(parse_dataset_trainer_plans_configuration_from_path(path)) + except AssertionError: + print('yayy, assertion works') diff --git a/source_code/SegMamba/light_training/utilities/find_class_by_name.py b/source_code/SegMamba/light_training/utilities/find_class_by_name.py new file mode 100644 index 0000000000000000000000000000000000000000..a345d99a707ad9f70eea6c991d9726b1efb4c062 --- /dev/null +++ b/source_code/SegMamba/light_training/utilities/find_class_by_name.py @@ -0,0 +1,24 @@ +import importlib +import pkgutil + +from batchgenerators.utilities.file_and_folder_operations import * + + +def recursive_find_python_class(folder: str, class_name: str, current_module: str): + tr = None + for importer, modname, ispkg in pkgutil.iter_modules([folder]): + # print(modname, ispkg) + if not ispkg: + m = importlib.import_module(current_module + "." + modname) + if hasattr(m, class_name): + tr = getattr(m, class_name) + break + + if tr is None: + for importer, modname, ispkg in pkgutil.iter_modules([folder]): + if ispkg: + next_current_module = current_module + "." + modname + tr = recursive_find_python_class(join(folder, modname), class_name, current_module=next_current_module) + if tr is not None: + break + return tr \ No newline at end of file diff --git a/source_code/SegMamba/light_training/utilities/get_network_from_plans.py b/source_code/SegMamba/light_training/utilities/get_network_from_plans.py new file mode 100644 index 0000000000000000000000000000000000000000..447d1d5e944c5cd24078338679912e3ba19915b5 --- /dev/null +++ b/source_code/SegMamba/light_training/utilities/get_network_from_plans.py @@ -0,0 +1,77 @@ +from dynamic_network_architectures.architectures.unet import PlainConvUNet, ResidualEncoderUNet +from dynamic_network_architectures.building_blocks.helper import get_matching_instancenorm, convert_dim_to_conv_op +from dynamic_network_architectures.initialization.weight_init import init_last_bn_before_add_to_0 +from nnunetv2.utilities.network_initialization import InitWeights_He +from nnunetv2.utilities.plans_handling.plans_handler import ConfigurationManager, PlansManager +from torch import nn + + +def get_network_from_plans(plans_manager: PlansManager, + dataset_json: dict, + configuration_manager: ConfigurationManager, + num_input_channels: int, + deep_supervision: bool = True): + """ + we may have to change this in the future to accommodate other plans -> network mappings + + num_input_channels can differ depending on whether we do cascade. Its best to make this info available in the + trainer rather than inferring it again from the plans here. + """ + num_stages = len(configuration_manager.conv_kernel_sizes) + + dim = len(configuration_manager.conv_kernel_sizes[0]) + conv_op = convert_dim_to_conv_op(dim) + + label_manager = plans_manager.get_label_manager(dataset_json) + + segmentation_network_class_name = configuration_manager.UNet_class_name + mapping = { + 'PlainConvUNet': PlainConvUNet, + 'ResidualEncoderUNet': ResidualEncoderUNet + } + kwargs = { + 'PlainConvUNet': { + 'conv_bias': True, + 'norm_op': get_matching_instancenorm(conv_op), + 'norm_op_kwargs': {'eps': 1e-5, 'affine': True}, + 'dropout_op': None, 'dropout_op_kwargs': None, + 'nonlin': nn.LeakyReLU, 'nonlin_kwargs': {'inplace': True}, + }, + 'ResidualEncoderUNet': { + 'conv_bias': True, + 'norm_op': get_matching_instancenorm(conv_op), + 'norm_op_kwargs': {'eps': 1e-5, 'affine': True}, + 'dropout_op': None, 'dropout_op_kwargs': None, + 'nonlin': nn.LeakyReLU, 'nonlin_kwargs': {'inplace': True}, + } + } + assert segmentation_network_class_name in mapping.keys(), 'The network architecture specified by the plans file ' \ + 'is non-standard (maybe your own?). Yo\'ll have to dive ' \ + 'into either this ' \ + 'function (get_network_from_plans) or ' \ + 'the init of your nnUNetModule to accomodate that.' + network_class = mapping[segmentation_network_class_name] + + conv_or_blocks_per_stage = { + 'n_conv_per_stage' + if network_class != ResidualEncoderUNet else 'n_blocks_per_stage': configuration_manager.n_conv_per_stage_encoder, + 'n_conv_per_stage_decoder': configuration_manager.n_conv_per_stage_decoder + } + # network class name!! + model = network_class( + input_channels=num_input_channels, + n_stages=num_stages, + features_per_stage=[min(configuration_manager.UNet_base_num_features * 2 ** i, + configuration_manager.unet_max_num_features) for i in range(num_stages)], + conv_op=conv_op, + kernel_sizes=configuration_manager.conv_kernel_sizes, + strides=configuration_manager.pool_op_kernel_sizes, + num_classes=label_manager.num_segmentation_heads, + deep_supervision=deep_supervision, + **conv_or_blocks_per_stage, + **kwargs[segmentation_network_class_name] + ) + model.apply(InitWeights_He(1e-2)) + if network_class == ResidualEncoderUNet: + model.apply(init_last_bn_before_add_to_0) + return model diff --git a/source_code/SegMamba/light_training/utilities/helpers.py b/source_code/SegMamba/light_training/utilities/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..42448e3f9c3de88ba13568ff7585797ee29607ab --- /dev/null +++ b/source_code/SegMamba/light_training/utilities/helpers.py @@ -0,0 +1,27 @@ +import torch + + +def softmax_helper_dim0(x: torch.Tensor) -> torch.Tensor: + return torch.softmax(x, 0) + + +def softmax_helper_dim1(x: torch.Tensor) -> torch.Tensor: + return torch.softmax(x, 1) + + +def empty_cache(device: torch.device): + if device.type == 'cuda': + torch.cuda.empty_cache() + elif device.type == 'mps': + from torch import mps + mps.empty_cache() + else: + pass + + +class dummy_context(object): + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, exc_tb): + pass diff --git a/source_code/SegMamba/light_training/utilities/json_export.py b/source_code/SegMamba/light_training/utilities/json_export.py new file mode 100644 index 0000000000000000000000000000000000000000..faed954f4a57f39c56851f899e447caab213f29d --- /dev/null +++ b/source_code/SegMamba/light_training/utilities/json_export.py @@ -0,0 +1,59 @@ +from collections.abc import Iterable + +import numpy as np +import torch + + +def recursive_fix_for_json_export(my_dict: dict): + # json is stupid. 'cannot serialize object of type bool_/int64/float64'. Come on bro. + keys = list(my_dict.keys()) # cannot iterate over keys() if we change keys.... + for k in keys: + if isinstance(k, (np.int64, np.int32, np.int8, np.uint8)): + tmp = my_dict[k] + del my_dict[k] + my_dict[int(k)] = tmp + del tmp + k = int(k) + + if isinstance(my_dict[k], dict): + recursive_fix_for_json_export(my_dict[k]) + elif isinstance(my_dict[k], np.ndarray): + assert len(my_dict[k].shape) == 1, 'only 1d arrays are supported' + my_dict[k] = fix_types_iterable(my_dict[k], output_type=list) + elif isinstance(my_dict[k], (np.bool_,)): + my_dict[k] = bool(my_dict[k]) + elif isinstance(my_dict[k], (np.int64, np.int32, np.int8, np.uint8)): + my_dict[k] = int(my_dict[k]) + elif isinstance(my_dict[k], (np.float32, np.float64, np.float16)): + my_dict[k] = float(my_dict[k]) + elif isinstance(my_dict[k], list): + my_dict[k] = fix_types_iterable(my_dict[k], output_type=type(my_dict[k])) + elif isinstance(my_dict[k], tuple): + my_dict[k] = fix_types_iterable(my_dict[k], output_type=tuple) + elif isinstance(my_dict[k], torch.device): + my_dict[k] = str(my_dict[k]) + else: + pass # pray it can be serialized + + +def fix_types_iterable(iterable, output_type): + # this sh!t is hacky as hell and will break if you use it for anything outside nnunet. Keep you hands off of this. + out = [] + for i in iterable: + if type(i) in (np.int64, np.int32, np.int8, np.uint8): + out.append(int(i)) + elif isinstance(i, dict): + recursive_fix_for_json_export(i) + out.append(i) + elif type(i) in (np.float32, np.float64, np.float16): + out.append(float(i)) + elif type(i) in (np.bool_,): + out.append(bool(i)) + elif isinstance(i, str): + out.append(i) + elif isinstance(i, Iterable): + # print('recursive call on', i, type(i)) + out.append(fix_types_iterable(i, type(i))) + else: + out.append(i) + return output_type(out) diff --git a/source_code/SegMamba/light_training/utilities/label_handling/__init__.py b/source_code/SegMamba/light_training/utilities/label_handling/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/source_code/SegMamba/light_training/utilities/label_handling/label_handling.py b/source_code/SegMamba/light_training/utilities/label_handling/label_handling.py new file mode 100644 index 0000000000000000000000000000000000000000..32f1b6d020189614fd0574b40f2d165def51a786 --- /dev/null +++ b/source_code/SegMamba/light_training/utilities/label_handling/label_handling.py @@ -0,0 +1,319 @@ +from __future__ import annotations +from time import time +from typing import Union, List, Tuple, Type + +import numpy as np +import torch +from acvl_utils.cropping_and_padding.bounding_boxes import bounding_box_to_slice +from batchgenerators.utilities.file_and_folder_operations import join + +import nnunetv2 +from nnunetv2.utilities.find_class_by_name import recursive_find_python_class +from nnunetv2.utilities.helpers import softmax_helper_dim0 + +from typing import TYPE_CHECKING + +# see https://adamj.eu/tech/2021/05/13/python-type-hints-how-to-fix-circular-imports/ +if TYPE_CHECKING: + from nnunetv2.utilities.plans_handling.plans_handler import PlansManager, ConfigurationManager + + +class LabelManager(object): + def __init__(self, label_dict: dict, regions_class_order: Union[List[int], None], force_use_labels: bool = False, + inference_nonlin=None): + self._sanity_check(label_dict) + self.label_dict = label_dict + self.regions_class_order = regions_class_order + self._force_use_labels = force_use_labels + + if force_use_labels: + self._has_regions = False + else: + self._has_regions: bool = any( + [isinstance(i, (tuple, list)) and len(i) > 1 for i in self.label_dict.values()]) + + self._ignore_label: Union[None, int] = self._determine_ignore_label() + self._all_labels: List[int] = self._get_all_labels() + + self._regions: Union[None, List[Union[int, Tuple[int, ...]]]] = self._get_regions() + + if self.has_ignore_label: + assert self.ignore_label == max( + self.all_labels) + 1, 'If you use the ignore label it must have the highest ' \ + 'label value! It cannot be 0 or in between other labels. ' \ + 'Sorry bro.' + + if inference_nonlin is None: + self.inference_nonlin = torch.sigmoid if self.has_regions else softmax_helper_dim0 + else: + self.inference_nonlin = inference_nonlin + + def _sanity_check(self, label_dict: dict): + if not 'background' in label_dict.keys(): + raise RuntimeError('Background label not declared (remeber that this should be label 0!)') + bg_label = label_dict['background'] + if isinstance(bg_label, (tuple, list)): + raise RuntimeError(f"Background label must be 0. Not a list. Not a tuple. Your background label: {bg_label}") + assert int(bg_label) == 0, f"Background label must be 0. Your background label: {bg_label}" + # not sure if we want to allow regions that contain background. I don't immediately see how this could cause + # problems so we allow it for now. That doesn't mean that this is explicitly supported. It could be that this + # just crashes. + + def _get_all_labels(self) -> List[int]: + all_labels = [] + for k, r in self.label_dict.items(): + # ignore label is not going to be used, hence the name. Duh. + if k == 'ignore': + continue + if isinstance(r, (tuple, list)): + for ri in r: + all_labels.append(int(ri)) + else: + all_labels.append(int(r)) + all_labels = list(np.unique(all_labels)) + all_labels.sort() + return all_labels + + def _get_regions(self) -> Union[None, List[Union[int, Tuple[int, ...]]]]: + if not self._has_regions or self._force_use_labels: + return None + else: + assert self.regions_class_order is not None, 'if region-based training is requested then you need to ' \ + 'define regions_class_order!' + regions = [] + for k, r in self.label_dict.items(): + # ignore ignore label + if k == 'ignore': + continue + # ignore regions that are background + if (np.isscalar(r) and r == 0) \ + or \ + (isinstance(r, (tuple, list)) and len(np.unique(r)) == 1 and np.unique(r)[0] == 0): + continue + if isinstance(r, list): + r = tuple(r) + regions.append(r) + assert len(self.regions_class_order) == len(regions), 'regions_class_order must have as ' \ + 'many entries as there are ' \ + 'regions' + return regions + + def _determine_ignore_label(self) -> Union[None, int]: + ignore_label = self.label_dict.get('ignore') + if ignore_label is not None: + assert isinstance(ignore_label, int), f'Ignore label has to be an integer. It cannot be a region ' \ + f'(list/tuple). Got {type(ignore_label)}.' + return ignore_label + + @property + def has_regions(self) -> bool: + return self._has_regions + + @property + def has_ignore_label(self) -> bool: + return self.ignore_label is not None + + @property + def all_regions(self) -> Union[None, List[Union[int, Tuple[int, ...]]]]: + return self._regions + + @property + def all_labels(self) -> List[int]: + return self._all_labels + + @property + def ignore_label(self) -> Union[None, int]: + return self._ignore_label + + def apply_inference_nonlin(self, logits: Union[np.ndarray, torch.Tensor]) -> \ + Union[np.ndarray, torch.Tensor]: + """ + logits has to have shape (c, x, y(, z)) where c is the number of classes/regions + """ + if isinstance(logits, np.ndarray): + logits = torch.from_numpy(logits) + + with torch.no_grad(): + # softmax etc is not implemented for half + logits = logits.float() + probabilities = self.inference_nonlin(logits) + + return probabilities + + def convert_probabilities_to_segmentation(self, predicted_probabilities: Union[np.ndarray, torch.Tensor]) -> \ + Union[np.ndarray, torch.Tensor]: + """ + assumes that inference_nonlinearity was already applied! + + predicted_probabilities has to have shape (c, x, y(, z)) where c is the number of classes/regions + """ + if not isinstance(predicted_probabilities, (np.ndarray, torch.Tensor)): + raise RuntimeError(f"Unexpected input type. Expected np.ndarray or torch.Tensor," + f" got {type(predicted_probabilities)}") + + if self.has_regions: + assert self.regions_class_order is not None, 'if region-based training is requested then you need to ' \ + 'define regions_class_order!' + # check correct number of outputs + assert predicted_probabilities.shape[0] == self.num_segmentation_heads, \ + f'unexpected number of channels in predicted_probabilities. Expected {self.num_segmentation_heads}, ' \ + f'got {predicted_probabilities.shape[0]}. Remeber that predicted_probabilities should have shape ' \ + f'(c, x, y(, z)).' + + if self.has_regions: + if isinstance(predicted_probabilities, np.ndarray): + segmentation = np.zeros(predicted_probabilities.shape[1:], dtype=np.uint16) + else: + # no uint16 in torch + segmentation = torch.zeros(predicted_probabilities.shape[1:], dtype=torch.int16, + device=predicted_probabilities.device) + for i, c in enumerate(self.regions_class_order): + segmentation[predicted_probabilities[i] > 0.5] = c + else: + segmentation = predicted_probabilities.argmax(0) + + return segmentation + + def convert_logits_to_segmentation(self, predicted_logits: Union[np.ndarray, torch.Tensor]) -> \ + Union[np.ndarray, torch.Tensor]: + probabilities = self.apply_inference_nonlin(predicted_logits) + return self.convert_probabilities_to_segmentation(probabilities) + + def revert_cropping_on_probabilities(self, predicted_probabilities: Union[torch.Tensor, np.ndarray], + bbox: List[List[int]], + original_shape: Union[List[int], Tuple[int, ...]]): + """ + ONLY USE THIS WITH PROBABILITIES, DO NOT USE LOGITS AND DO NOT USE FOR SEGMENTATION MAPS!!! + + predicted_probabilities must be (c, x, y(, z)) + + Why do we do this here? Well if we pad probabilities we need to make sure that convert_logits_to_segmentation + correctly returns background in the padded areas. Also we want to ba able to look at the padded probabilities + and not have strange artifacts. + Only LabelManager knows how this needs to be done. So let's let him/her do it, ok? + """ + # revert cropping + probs_reverted_cropping = np.zeros((predicted_probabilities.shape[0], *original_shape), + dtype=predicted_probabilities.dtype) \ + if isinstance(predicted_probabilities, np.ndarray) else \ + torch.zeros((predicted_probabilities.shape[0], *original_shape), dtype=predicted_probabilities.dtype) + + if not self.has_regions: + probs_reverted_cropping[0] = 1 + + slicer = bounding_box_to_slice(bbox) + probs_reverted_cropping[tuple([slice(None)] + list(slicer))] = predicted_probabilities + return probs_reverted_cropping + + @staticmethod + def filter_background(classes_or_regions: Union[List[int], List[Union[int, Tuple[int, ...]]]]): + # heck yeah + # This is definitely taking list comprehension too far. Enjoy. + return [i for i in classes_or_regions if + ((not isinstance(i, (tuple, list))) and i != 0) + or + (isinstance(i, (tuple, list)) and not ( + len(np.unique(i)) == 1 and np.unique(i)[0] == 0))] + + @property + def foreground_regions(self): + return self.filter_background(self.all_regions) + + @property + def foreground_labels(self): + return self.filter_background(self.all_labels) + + @property + def num_segmentation_heads(self): + if self.has_regions: + return len(self.foreground_regions) + else: + return len(self.all_labels) + + +def get_labelmanager_class_from_plans(plans: dict) -> Type[LabelManager]: + if 'label_manager' not in plans.keys(): + print('No label manager specified in plans. Using default: LabelManager') + return LabelManager + else: + labelmanager_class = recursive_find_python_class(join(nnunetv2.__path__[0], "utilities", "label_handling"), + plans['label_manager'], + current_module="nnunetv2.utilities.label_handling") + return labelmanager_class + + +def convert_labelmap_to_one_hot(segmentation: Union[np.ndarray, torch.Tensor], + all_labels: Union[List, torch.Tensor, np.ndarray, tuple], + output_dtype=None) -> Union[np.ndarray, torch.Tensor]: + """ + if output_dtype is None then we use np.uint8/torch.uint8 + if input is torch.Tensor then output will be on the same device + + np.ndarray is faster than torch.Tensor + + if segmentation is torch.Tensor, this function will be faster if it is LongTensor. If it is somethine else we have + to cast which takes time. + + IMPORTANT: This function only works properly if your labels are consecutive integers, so something like 0, 1, 2, 3, ... + DO NOT use it with 0, 32, 123, 255, ... or whatever (fix your labels, yo) + """ + if isinstance(segmentation, torch.Tensor): + result = torch.zeros((len(all_labels), *segmentation.shape), + dtype=output_dtype if output_dtype is not None else torch.uint8, + device=segmentation.device) + # variant 1, 2x faster than 2 + result.scatter_(0, segmentation[None].long(), 1) # why does this have to be long!? + # variant 2, slower than 1 + # for i, l in enumerate(all_labels): + # result[i] = segmentation == l + else: + result = np.zeros((len(all_labels), *segmentation.shape), + dtype=output_dtype if output_dtype is not None else np.uint8) + # variant 1, fastest in my testing + for i, l in enumerate(all_labels): + result[i] = segmentation == l + # variant 2. Takes about twice as long so nah + # result = np.eye(len(all_labels))[segmentation].transpose((3, 0, 1, 2)) + return result + + +def determine_num_input_channels(plans_manager: PlansManager, + configuration_or_config_manager: Union[str, ConfigurationManager], + dataset_json: dict) -> int: + if isinstance(configuration_or_config_manager, str): + config_manager = plans_manager.get_configuration(configuration_or_config_manager) + else: + config_manager = configuration_or_config_manager + + label_manager = plans_manager.get_label_manager(dataset_json) + num_modalities = len(dataset_json['modality']) if 'modality' in dataset_json.keys() else len(dataset_json['channel_names']) + + # cascade has different number of input channels + if config_manager.previous_stage_name is not None: + num_label_inputs = len(label_manager.foreground_labels) + num_input_channels = num_modalities + num_label_inputs + else: + num_input_channels = num_modalities + return num_input_channels + + +if __name__ == '__main__': + # this code used to be able to differentiate variant 1 and 2 to measure time. + num_labels = 7 + seg = np.random.randint(0, num_labels, size=(256, 256, 256), dtype=np.uint8) + seg_torch = torch.from_numpy(seg) + st = time() + onehot_npy = convert_labelmap_to_one_hot(seg, np.arange(num_labels)) + time_1 = time() + onehot_npy2 = convert_labelmap_to_one_hot(seg, np.arange(num_labels)) + time_2 = time() + onehot_torch = convert_labelmap_to_one_hot(seg_torch, np.arange(num_labels)) + time_torch = time() + onehot_torch2 = convert_labelmap_to_one_hot(seg_torch, np.arange(num_labels)) + time_torch2 = time() + print( + f'np: {time_1 - st}, np2: {time_2 - time_1}, torch: {time_torch - time_2}, torch2: {time_torch2 - time_torch}') + onehot_torch = onehot_torch.numpy() + onehot_torch2 = onehot_torch2.numpy() + print(np.all(onehot_torch == onehot_npy)) + print(np.all(onehot_torch2 == onehot_npy)) diff --git a/source_code/SegMamba/light_training/utilities/network_initialization.py b/source_code/SegMamba/light_training/utilities/network_initialization.py new file mode 100644 index 0000000000000000000000000000000000000000..1ead271800b20873040973280726ee51093d7919 --- /dev/null +++ b/source_code/SegMamba/light_training/utilities/network_initialization.py @@ -0,0 +1,12 @@ +from torch import nn + + +class InitWeights_He(object): + def __init__(self, neg_slope=1e-2): + self.neg_slope = neg_slope + + def __call__(self, module): + if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d): + module.weight = nn.init.kaiming_normal_(module.weight, a=self.neg_slope) + if module.bias is not None: + module.bias = nn.init.constant_(module.bias, 0) diff --git a/source_code/SegMamba/light_training/utilities/overlay_plots.py b/source_code/SegMamba/light_training/utilities/overlay_plots.py new file mode 100644 index 0000000000000000000000000000000000000000..f5d7f9439accd1644feddf7cc74846a2f74d7580 --- /dev/null +++ b/source_code/SegMamba/light_training/utilities/overlay_plots.py @@ -0,0 +1,274 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import multiprocessing +from multiprocessing.pool import Pool +from typing import Tuple, Union + +import numpy as np +import pandas as pd +from batchgenerators.utilities.file_and_folder_operations import * +from nnunetv2.configuration import default_num_processes +from nnunetv2.imageio.base_reader_writer import BaseReaderWriter +from nnunetv2.imageio.reader_writer_registry import determine_reader_writer_from_dataset_json +from nnunetv2.paths import nnUNet_raw, nnUNet_preprocessed +from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name +from nnunetv2.utilities.utils import get_identifiers_from_splitted_dataset_folder + +color_cycle = ( + "000000", + "4363d8", + "f58231", + "3cb44b", + "e6194B", + "911eb4", + "ffe119", + "bfef45", + "42d4f4", + "f032e6", + "000075", + "9A6324", + "808000", + "800000", + "469990", +) + + +def hex_to_rgb(hex: str): + assert len(hex) == 6 + return tuple(int(hex[i:i + 2], 16) for i in (0, 2, 4)) + + +def generate_overlay(input_image: np.ndarray, segmentation: np.ndarray, mapping: dict = None, + color_cycle: Tuple[str, ...] = color_cycle, + overlay_intensity: float = 0.6): + """ + image can be 2d greyscale or 2d RGB (color channel in last dimension!) + + Segmentation must be label map of same shape as image (w/o color channels) + + mapping can be label_id -> idx_in_cycle or None + + returned image is scaled to [0, 255] (uint8)!!! + """ + # create a copy of image + image = np.copy(input_image) + + if len(image.shape) == 2: + image = np.tile(image[:, :, None], (1, 1, 3)) + elif len(image.shape) == 3: + if image.shape[2] == 1: + image = np.tile(image, (1, 1, 3)) + else: + raise RuntimeError(f'if 3d image is given the last dimension must be the color channels (3 channels). ' + f'Only 2D images are supported. Your image shape: {image.shape}') + else: + raise RuntimeError("unexpected image shape. only 2D images and 2D images with color channels (color in " + "last dimension) are supported") + + # rescale image to [0, 255] + image = image - image.min() + image = image / image.max() * 255 + + # create output + if mapping is None: + uniques = np.sort(pd.unique(segmentation.ravel())) # np.unique(segmentation) + mapping = {i: c for c, i in enumerate(uniques)} + + for l in mapping.keys(): + image[segmentation == l] += overlay_intensity * np.array(hex_to_rgb(color_cycle[mapping[l]])) + + # rescale result to [0, 255] + image = image / image.max() * 255 + return image.astype(np.uint8) + + +def select_slice_to_plot(image: np.ndarray, segmentation: np.ndarray) -> int: + """ + image and segmentation are expected to be 3D + + selects the slice with the largest amount of fg (regardless of label) + + we give image so that we can easily replace this function if needed + """ + fg_mask = segmentation != 0 + fg_per_slice = fg_mask.sum((1, 2)) + selected_slice = int(np.argmax(fg_per_slice)) + return selected_slice + + +def select_slice_to_plot2(image: np.ndarray, segmentation: np.ndarray) -> int: + """ + image and segmentation are expected to be 3D (or 1, x, y) + + selects the slice with the largest amount of fg (how much percent of each class are in each slice? pick slice + with highest avg percent) + + we give image so that we can easily replace this function if needed + """ + classes = [i for i in np.sort(pd.unique(segmentation.ravel())) if i != 0] + fg_per_slice = np.zeros((image.shape[0], len(classes))) + for i, c in enumerate(classes): + fg_mask = segmentation == c + fg_per_slice[:, i] = fg_mask.sum((1, 2)) + fg_per_slice[:, i] /= fg_per_slice.sum() + fg_per_slice = fg_per_slice.mean(1) + return int(np.argmax(fg_per_slice)) + + +def plot_overlay(image_file: str, segmentation_file: str, image_reader_writer: BaseReaderWriter, output_file: str, + overlay_intensity: float = 0.6): + import matplotlib.pyplot as plt + + image, props = image_reader_writer.read_images((image_file, )) + image = image[0] + seg, props_seg = image_reader_writer.read_seg(segmentation_file) + seg = seg[0] + + assert all([i == j for i, j in zip(image.shape, seg.shape)]), "image and seg do not have the same shape: %s, %s" % ( + image_file, segmentation_file) + + assert len(image.shape) == 3, 'only 3D images/segs are supported' + + selected_slice = select_slice_to_plot2(image, seg) + # print(image.shape, selected_slice) + + overlay = generate_overlay(image[selected_slice], seg[selected_slice], overlay_intensity=overlay_intensity) + + plt.imsave(output_file, overlay) + + +def plot_overlay_preprocessed(case_file: str, output_file: str, overlay_intensity: float = 0.6, channel_idx=0): + import matplotlib.pyplot as plt + data = np.load(case_file)['data'] + seg = np.load(case_file)['seg'][0] + + assert channel_idx < (data.shape[0]), 'This dataset only supports channel index up to %d' % (data.shape[0] - 1) + + image = data[channel_idx] + seg[seg < 0] = 0 + + selected_slice = select_slice_to_plot2(image, seg) + + overlay = generate_overlay(image[selected_slice], seg[selected_slice], overlay_intensity=overlay_intensity) + + plt.imsave(output_file, overlay) + + +def multiprocessing_plot_overlay(list_of_image_files, list_of_seg_files, image_reader_writer, + list_of_output_files, overlay_intensity, + num_processes=8): + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + r = p.starmap_async(plot_overlay, zip( + list_of_image_files, list_of_seg_files, [image_reader_writer] * len(list_of_output_files), + list_of_output_files, [overlay_intensity] * len(list_of_output_files) + )) + r.get() + + +def multiprocessing_plot_overlay_preprocessed(list_of_case_files, list_of_output_files, overlay_intensity, + num_processes=8, channel_idx=0): + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + r = p.starmap_async(plot_overlay_preprocessed, zip( + list_of_case_files, list_of_output_files, [overlay_intensity] * len(list_of_output_files), + [channel_idx] * len(list_of_output_files) + )) + r.get() + + +def generate_overlays_from_raw(dataset_name_or_id: Union[int, str], output_folder: str, + num_processes: int = 8, channel_idx: int = 0, overlay_intensity: float = 0.6): + dataset_name = maybe_convert_to_dataset_name(dataset_name_or_id) + folder = join(nnUNet_raw, dataset_name) + dataset_json = load_json(join(folder, 'dataset.json')) + identifiers = get_identifiers_from_splitted_dataset_folder(join(folder, 'imagesTr'), dataset_json['file_ending']) + + image_files = [join(folder, 'imagesTr', i + "_%04.0d.nii.gz" % channel_idx) for i in identifiers] + seg_files = [join(folder, 'labelsTr', i + ".nii.gz") for i in identifiers] + + assert all([isfile(i) for i in image_files]) + assert all([isfile(i) for i in seg_files]) + + maybe_mkdir_p(output_folder) + output_files = [join(output_folder, i + '.png') for i in identifiers] + + image_reader_writer = determine_reader_writer_from_dataset_json(dataset_json, image_files[0])() + multiprocessing_plot_overlay(image_files, seg_files, image_reader_writer, output_files, overlay_intensity, num_processes) + + +def generate_overlays_from_preprocessed(dataset_name_or_id: Union[int, str], output_folder: str, + num_processes: int = 8, channel_idx: int = 0, + configuration: str = None, + plans_identifier: str = 'nnUNetPlans', + overlay_intensity: float = 0.6): + dataset_name = maybe_convert_to_dataset_name(dataset_name_or_id) + folder = join(nnUNet_preprocessed, dataset_name) + if not isdir(folder): raise RuntimeError("run preprocessing for that task first") + + plans = load_json(join(folder, plans_identifier + '.json')) + if configuration is None: + if '3d_fullres' in plans['configurations'].keys(): + configuration = '3d_fullres' + else: + configuration = '2d' + data_identifier = plans['configurations'][configuration]["data_identifier"] + preprocessed_folder = join(folder, data_identifier) + + if not isdir(preprocessed_folder): + raise RuntimeError(f"Preprocessed data folder for configuration {configuration} of plans identifier " + f"{plans_identifier} ({dataset_name}) does not exist. Run preprocessing for this " + f"configuration first!") + + identifiers = [i[:-4] for i in subfiles(preprocessed_folder, suffix='.npz', join=False)] + + output_files = [join(output_folder, i + '.png') for i in identifiers] + image_files = [join(preprocessed_folder, i + ".npz") for i in identifiers] + + maybe_mkdir_p(output_folder) + multiprocessing_plot_overlay_preprocessed(image_files, output_files, overlay_intensity=overlay_intensity, + num_processes=num_processes, channel_idx=channel_idx) + + +def entry_point_generate_overlay(): + import argparse + parser = argparse.ArgumentParser("Plots png overlays of the slice with the most foreground. Note that this " + "disregards spacing information!") + parser.add_argument('-d', type=str, help="Dataset name or id", required=True) + parser.add_argument('-o', type=str, help="output folder", required=True) + parser.add_argument('-np', type=int, default=default_num_processes, required=False, + help=f"number of processes used. Default: {default_num_processes}") + parser.add_argument('-channel_idx', type=int, default=0, required=False, + help="channel index used (0 = _0000). Default: 0") + parser.add_argument('--use_raw', action='store_true', required=False, help="if set then we use raw data. else " + "we use preprocessed") + parser.add_argument('-p', type=str, required=False, default='nnUNetPlans', + help='plans identifier. Only used if --use_raw is not set! Default: nnUNetPlans') + parser.add_argument('-c', type=str, required=False, default=None, + help='configuration name. Only used if --use_raw is not set! Default: None = ' + '3d_fullres if available, else 2d') + parser.add_argument('-overlay_intensity', type=float, required=False, default=0.6, + help='overlay intensity. Higher = brighter/less transparent') + + + args = parser.parse_args() + + if args.use_raw: + generate_overlays_from_raw(args.d, args.o, args.np, args.channel_idx, + overlay_intensity=args.overlay_intensity) + else: + generate_overlays_from_preprocessed(args.d, args.o, args.np, args.channel_idx, args.c, args.p, + overlay_intensity=args.overlay_intensity) + + +if __name__ == '__main__': + entry_point_generate_overlay() \ No newline at end of file diff --git a/source_code/SegMamba/light_training/utilities/plans_handling/__init__.py b/source_code/SegMamba/light_training/utilities/plans_handling/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/source_code/SegMamba/light_training/utilities/plans_handling/plans_handler.py b/source_code/SegMamba/light_training/utilities/plans_handling/plans_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..6c39fd1ede290094c2b4d5b12a1f2182cb1226dc --- /dev/null +++ b/source_code/SegMamba/light_training/utilities/plans_handling/plans_handler.py @@ -0,0 +1,307 @@ +from __future__ import annotations + +import dynamic_network_architectures +from copy import deepcopy +from functools import lru_cache, partial +from typing import Union, Tuple, List, Type, Callable + +import numpy as np +import torch + +from nnunetv2.preprocessing.resampling.utils import recursive_find_resampling_fn_by_name +from torch import nn + +import nnunetv2 +from batchgenerators.utilities.file_and_folder_operations import load_json, join + +from nnunetv2.imageio.reader_writer_registry import recursive_find_reader_writer_by_name +from nnunetv2.utilities.find_class_by_name import recursive_find_python_class +from nnunetv2.utilities.label_handling.label_handling import get_labelmanager_class_from_plans + + +# see https://adamj.eu/tech/2021/05/13/python-type-hints-how-to-fix-circular-imports/ +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from nnunetv2.utilities.label_handling.label_handling import LabelManager + from nnunetv2.imageio.base_reader_writer import BaseReaderWriter + from nnunetv2.preprocessing.preprocessors.default_preprocessor import DefaultPreprocessor + from nnunetv2.experiment_planning.experiment_planners.default_experiment_planner import ExperimentPlanner + + +class ConfigurationManager(object): + def __init__(self, configuration_dict: dict): + self.configuration = configuration_dict + + def __repr__(self): + return self.configuration.__repr__() + + @property + def data_identifier(self) -> str: + return self.configuration['data_identifier'] + + @property + def preprocessor_name(self) -> str: + return self.configuration['preprocessor_name'] + + @property + @lru_cache(maxsize=1) + def preprocessor_class(self) -> Type[DefaultPreprocessor]: + preprocessor_class = recursive_find_python_class(join(nnunetv2.__path__[0], "preprocessing"), + self.preprocessor_name, + current_module="nnunetv2.preprocessing") + return preprocessor_class + + @property + def batch_size(self) -> int: + return self.configuration['batch_size'] + + @property + def patch_size(self) -> List[int]: + return self.configuration['patch_size'] + + @property + def median_image_size_in_voxels(self) -> List[int]: + return self.configuration['median_image_size_in_voxels'] + + @property + def spacing(self) -> List[float]: + return self.configuration['spacing'] + + @property + def normalization_schemes(self) -> List[str]: + return self.configuration['normalization_schemes'] + + @property + def use_mask_for_norm(self) -> List[bool]: + return self.configuration['use_mask_for_norm'] + + @property + def UNet_class_name(self) -> str: + return self.configuration['UNet_class_name'] + + @property + @lru_cache(maxsize=1) + def UNet_class(self) -> Type[nn.Module]: + unet_class = recursive_find_python_class(join(dynamic_network_architectures.__path__[0], "architectures"), + self.UNet_class_name, + current_module="dynamic_network_architectures.architectures") + if unet_class is None: + raise RuntimeError('The network architecture specified by the plans file ' + 'is non-standard (maybe your own?). Fix this by not using ' + 'ConfigurationManager.UNet_class to instantiate ' + 'it (probably just overwrite build_network_architecture of your trainer.') + return unet_class + + @property + def UNet_base_num_features(self) -> int: + return self.configuration['UNet_base_num_features'] + + @property + def n_conv_per_stage_encoder(self) -> List[int]: + return self.configuration['n_conv_per_stage_encoder'] + + @property + def n_conv_per_stage_decoder(self) -> List[int]: + return self.configuration['n_conv_per_stage_decoder'] + + @property + def num_pool_per_axis(self) -> List[int]: + return self.configuration['num_pool_per_axis'] + + @property + def pool_op_kernel_sizes(self) -> List[List[int]]: + return self.configuration['pool_op_kernel_sizes'] + + @property + def conv_kernel_sizes(self) -> List[List[int]]: + return self.configuration['conv_kernel_sizes'] + + @property + def unet_max_num_features(self) -> int: + return self.configuration['unet_max_num_features'] + + @property + @lru_cache(maxsize=1) + def resampling_fn_data(self) -> Callable[ + [Union[torch.Tensor, np.ndarray], + Union[Tuple[int, ...], List[int], np.ndarray], + Union[Tuple[float, ...], List[float], np.ndarray], + Union[Tuple[float, ...], List[float], np.ndarray] + ], + Union[torch.Tensor, np.ndarray]]: + fn = recursive_find_resampling_fn_by_name(self.configuration['resampling_fn_data']) + fn = partial(fn, **self.configuration['resampling_fn_data_kwargs']) + return fn + + @property + @lru_cache(maxsize=1) + def resampling_fn_probabilities(self) -> Callable[ + [Union[torch.Tensor, np.ndarray], + Union[Tuple[int, ...], List[int], np.ndarray], + Union[Tuple[float, ...], List[float], np.ndarray], + Union[Tuple[float, ...], List[float], np.ndarray] + ], + Union[torch.Tensor, np.ndarray]]: + fn = recursive_find_resampling_fn_by_name(self.configuration['resampling_fn_probabilities']) + fn = partial(fn, **self.configuration['resampling_fn_probabilities_kwargs']) + return fn + + @property + @lru_cache(maxsize=1) + def resampling_fn_seg(self) -> Callable[ + [Union[torch.Tensor, np.ndarray], + Union[Tuple[int, ...], List[int], np.ndarray], + Union[Tuple[float, ...], List[float], np.ndarray], + Union[Tuple[float, ...], List[float], np.ndarray] + ], + Union[torch.Tensor, np.ndarray]]: + fn = recursive_find_resampling_fn_by_name(self.configuration['resampling_fn_seg']) + fn = partial(fn, **self.configuration['resampling_fn_seg_kwargs']) + return fn + + @property + def batch_dice(self) -> bool: + return self.configuration['batch_dice'] + + @property + def next_stage_names(self) -> Union[List[str], None]: + ret = self.configuration.get('next_stage') + if ret is not None: + if isinstance(ret, str): + ret = [ret] + return ret + + @property + def previous_stage_name(self) -> Union[str, None]: + return self.configuration.get('previous_stage') + + +class PlansManager(object): + def __init__(self, plans_file_or_dict: Union[str, dict]): + """ + Why do we need this? + 1) resolve inheritance in configurations + 2) expose otherwise annoying stuff like getting the label manager or IO class from a string + 3) clearly expose the things that are in the plans instead of hiding them in a dict + 4) cache shit + + This class does not prevent you from going wild. You can still use the plans directly if you prefer + (PlansHandler.plans['key']) + """ + self.plans = plans_file_or_dict if isinstance(plans_file_or_dict, dict) else load_json(plans_file_or_dict) + + def __repr__(self): + return self.plans.__repr__() + + def _internal_resolve_configuration_inheritance(self, configuration_name: str, + visited: Tuple[str, ...] = None) -> dict: + if configuration_name not in self.plans['configurations'].keys(): + raise ValueError(f'The configuration {configuration_name} does not exist in the plans I have. Valid ' + f'configuration names are {list(self.plans["configurations"].keys())}.') + configuration = deepcopy(self.plans['configurations'][configuration_name]) + if 'inherits_from' in configuration: + parent_config_name = configuration['inherits_from'] + + if visited is None: + visited = (configuration_name,) + else: + if parent_config_name in visited: + raise RuntimeError(f"Circular dependency detected. The following configurations were visited " + f"while solving inheritance (in that order!): {visited}. " + f"Current configuration: {configuration_name}. Its parent configuration " + f"is {parent_config_name}.") + visited = (*visited, configuration_name) + + base_config = self._internal_resolve_configuration_inheritance(parent_config_name, visited) + base_config.update(configuration) + configuration = base_config + return configuration + + @lru_cache(maxsize=10) + def get_configuration(self, configuration_name: str): + if configuration_name not in self.plans['configurations'].keys(): + raise RuntimeError(f"Requested configuration {configuration_name} not found in plans. " + f"Available configurations: {list(self.plans['configurations'].keys())}") + + configuration_dict = self._internal_resolve_configuration_inheritance(configuration_name) + return ConfigurationManager(configuration_dict) + + @property + def dataset_name(self) -> str: + return self.plans['dataset_name'] + + @property + def plans_name(self) -> str: + return self.plans['plans_name'] + + @property + def original_median_spacing_after_transp(self) -> List[float]: + return self.plans['original_median_spacing_after_transp'] + + @property + def original_median_shape_after_transp(self) -> List[float]: + return self.plans['original_median_shape_after_transp'] + + @property + @lru_cache(maxsize=1) + def image_reader_writer_class(self) -> Type[BaseReaderWriter]: + return recursive_find_reader_writer_by_name(self.plans['image_reader_writer']) + + @property + def transpose_forward(self) -> List[int]: + return self.plans['transpose_forward'] + + @property + def transpose_backward(self) -> List[int]: + return self.plans['transpose_backward'] + + @property + def available_configurations(self) -> List[str]: + return list(self.plans['configurations'].keys()) + + @property + @lru_cache(maxsize=1) + def experiment_planner_class(self) -> Type[ExperimentPlanner]: + planner_name = self.experiment_planner_name + experiment_planner = recursive_find_python_class(join(nnunetv2.__path__[0], "experiment_planning"), + planner_name, + current_module="nnunetv2.experiment_planning") + return experiment_planner + + @property + def experiment_planner_name(self) -> str: + return self.plans['experiment_planner_used'] + + @property + @lru_cache(maxsize=1) + def label_manager_class(self) -> Type[LabelManager]: + return get_labelmanager_class_from_plans(self.plans) + + def get_label_manager(self, dataset_json: dict, **kwargs) -> LabelManager: + return self.label_manager_class(label_dict=dataset_json['labels'], + regions_class_order=dataset_json.get('regions_class_order'), + **kwargs) + + @property + def foreground_intensity_properties_per_channel(self) -> dict: + if 'foreground_intensity_properties_per_channel' not in self.plans.keys(): + if 'foreground_intensity_properties_by_modality' in self.plans.keys(): + return self.plans['foreground_intensity_properties_by_modality'] + return self.plans['foreground_intensity_properties_per_channel'] + + +if __name__ == '__main__': + from nnunetv2.paths import nnUNet_preprocessed + from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name + + plans = load_json(join(nnUNet_preprocessed, maybe_convert_to_dataset_name(3), 'nnUNetPlans.json')) + # build new configuration that inherits from 3d_fullres + plans['configurations']['3d_fullres_bs4'] = { + 'batch_size': 4, + 'inherits_from': '3d_fullres' + } + # now get plans and configuration managers + plans_manager = PlansManager(plans) + configuration_manager = plans_manager.get_configuration('3d_fullres_bs4') + print(configuration_manager) # look for batch size 4 diff --git a/source_code/SegMamba/light_training/utilities/tensor_utilities.py b/source_code/SegMamba/light_training/utilities/tensor_utilities.py new file mode 100644 index 0000000000000000000000000000000000000000..b16ffcac2e46d93c19522937098f0af5b208aca7 --- /dev/null +++ b/source_code/SegMamba/light_training/utilities/tensor_utilities.py @@ -0,0 +1,15 @@ +from typing import Union, List, Tuple + +import numpy as np +import torch + + +def sum_tensor(inp: torch.Tensor, axes: Union[np.ndarray, Tuple, List], keepdim: bool = False) -> torch.Tensor: + axes = np.unique(axes).astype(int) + if keepdim: + for ax in axes: + inp = inp.sum(int(ax), keepdim=True) + else: + for ax in sorted(axes, reverse=True): + inp = inp.sum(int(ax)) + return inp diff --git a/source_code/SegMamba/light_training/utilities/utils.py b/source_code/SegMamba/light_training/utilities/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8703e58055498e47eaf89d3bff445799cc8cc64b --- /dev/null +++ b/source_code/SegMamba/light_training/utilities/utils.py @@ -0,0 +1,56 @@ +# Copyright 2021 HIP Applied Computer Vision Lab, Division of Medical Image Computing, German Cancer Research Center +# (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Union + +from batchgenerators.utilities.file_and_folder_operations import * +import numpy as np +import re + +def _convert_to_npy(npz_file: str, unpack_segmentation: bool = True, overwrite_existing: bool = False) -> None: + try: + a = np.load(npz_file) # inexpensive, no compression is done here. This just reads metadata + if overwrite_existing or not isfile(npz_file[:-3] + "npy"): + np.save(npz_file[:-3] + "npy", a['data']) + if unpack_segmentation and (overwrite_existing or not isfile(npz_file[:-4] + "_seg.npy")): + np.save(npz_file[:-4] + "_seg.npy", a['seg']) + except KeyboardInterrupt: + if isfile(npz_file[:-3] + "npy"): + os.remove(npz_file[:-3] + "npy") + if isfile(npz_file[:-4] + "_seg.npy"): + os.remove(npz_file[:-4] + "_seg.npy") + raise KeyboardInterrupt + +def get_identifiers_from_splitted_dataset_folder(folder: str, file_ending: str): + files = subfiles(folder, suffix=file_ending, join=False) + # all files must be .nii.gz and have 4 digit channel index + crop = len(file_ending) + 5 + files = [i[:-crop] for i in files] + # only unique image ids + files = np.unique(files) + return files + + +def create_lists_from_splitted_dataset_folder(folder: str, file_ending: str, identifiers: List[str] = None) -> List[List[str]]: + """ + does not rely on dataset.json + """ + if identifiers is None: + identifiers = get_identifiers_from_splitted_dataset_folder(folder, file_ending) + files = subfiles(folder, suffix=file_ending, join=False, sort=True) + list_of_lists = [] + for f in identifiers: + p = re.compile(re.escape(f) + r"_\d\d\d\d" + re.escape(file_ending)) + list_of_lists.append([join(folder, i) for i in files if p.fullmatch(i)]) + return list_of_lists diff --git a/source_code/SegMamba/light_training/utils/files_helper.py b/source_code/SegMamba/light_training/utils/files_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..471e746fd45de31db297b0de47417728a1c9128e --- /dev/null +++ b/source_code/SegMamba/light_training/utils/files_helper.py @@ -0,0 +1,22 @@ + +import os +import glob +import torch + +def delete_last_model(model_dir, symbol): + + last_model = glob.glob(f"{model_dir}/{symbol}*.pt") + if len(last_model) != 0: + os.remove(last_model[0]) + + +def save_new_model_and_delete_last(model, save_path, delete_symbol=None): + save_dir = os.path.dirname(save_path) + + os.makedirs(save_dir, exist_ok=True) + if delete_last_model is not None: + delete_last_model(save_dir, delete_symbol) + + torch.save(model.state_dict(), save_path) + + print(f"model is saved in {save_path}") diff --git a/source_code/SegMamba/light_training/utils/log_image.py b/source_code/SegMamba/light_training/utils/log_image.py new file mode 100644 index 0000000000000000000000000000000000000000..3e9b590f4859cce6cbfc60b94a3dd0fffdf05ab4 --- /dev/null +++ b/source_code/SegMamba/light_training/utils/log_image.py @@ -0,0 +1,20 @@ + + +import os +from PIL import Image + + +def log_image(save_dir, split, images, + global_step, current_epoch): + root = os.path.join(save_dir, "images", split) + for k in images: + + filename = "{}_gs-{:06}_e-{:06}.png".format( + k, + global_step, + current_epoch, + ) + path = os.path.join(root, filename) + os.makedirs(os.path.split(path)[0], exist_ok=True) + + Image.fromarray(images[k]).save(path) \ No newline at end of file diff --git a/source_code/SegMamba/light_training/utils/lr_scheduler.py b/source_code/SegMamba/light_training/utils/lr_scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..8e36cca7d8726dac6e14a76ab648ec3a8059a03f --- /dev/null +++ b/source_code/SegMamba/light_training/utils/lr_scheduler.py @@ -0,0 +1,222 @@ +# Copyright 2020 - 2021 MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +import warnings +from typing import List + +from torch.optim import Adam, Optimizer +from torch.optim.lr_scheduler import _LRScheduler +from torch.optim.lr_scheduler import LambdaLR +import math +from torch.optim import Optimizer + +class PolyLRScheduler(_LRScheduler): + def __init__(self, optimizer, initial_lr: float, max_steps: int, exponent: float = 0.9, current_step: int = None): + self.optimizer = optimizer + self.initial_lr = initial_lr + self.max_steps = max_steps + self.exponent = exponent + self.ctr = 0 + super().__init__(optimizer, current_step if current_step is not None else -1) + + def step(self, current_step=None): + if current_step is None or current_step == -1: + current_step = self.ctr + self.ctr += 1 + + new_lr = self.initial_lr * (1 - current_step / self.max_steps) ** self.exponent + for param_group in self.optimizer.param_groups: + param_group['lr'] = new_lr + +def get_polynomial_decay_schedule_with_warmup( + optimizer, num_warmup_steps, num_training_steps, lr_end=1e-7, power=1.0, last_epoch=-1 +): + """ + Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the + optimizer to end lr defined by *lr_end*, after a warmup period during which it increases linearly from 0 to the + initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + num_training_steps (`int`): + The total number of training steps. + lr_end (`float`, *optional*, defaults to 1e-7): + The end LR. + power (`float`, *optional*, defaults to 1.0): + Power factor. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Note: *power* defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT + implementation at + https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37 + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + + """ + + lr_init = optimizer.defaults["lr"] + if not (lr_init > lr_end): + raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})") + + def lr_lambda(current_step: int): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1, num_warmup_steps)) + elif current_step > num_training_steps: + return lr_end / lr_init # as LambdaLR multiplies by lr_init + else: + lr_range = lr_init - lr_end + decay_steps = num_training_steps - num_warmup_steps + pct_remaining = 1 - (current_step - num_warmup_steps) / decay_steps + decay = lr_range * pct_remaining**power + lr_end + return decay / lr_init # as LambdaLR multiplies by lr_init + + return LambdaLR(optimizer, lr_lambda, last_epoch) + +def get_cosine_schedule_with_warmup( + optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float = 0.5, last_epoch: int = -1 +): + """ + Create a schedule with a learning rate that decreases following the values of the cosine function between the + initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the + initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + num_training_steps (`int`): + The total number of training steps. + num_periods (`float`, *optional*, defaults to 0.5): + The number of periods of the cosine function in a schedule (the default is to just decrease from the max + value to 0 following a half-cosine). + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + def lr_lambda(current_step): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1, num_warmup_steps)) + progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) + return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))) + + return LambdaLR(optimizer, lr_lambda, last_epoch) + +def get_constant_schedule_with_warmup(optimizer, num_warmup_steps: int, last_epoch: int = -1): + """ + Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate + increases linearly between 0 and the initial lr set in the optimizer. + + Args: + optimizer ([`~torch.optim.Optimizer`]): + The optimizer for which to schedule the learning rate. + num_warmup_steps (`int`): + The number of steps for the warmup phase. + last_epoch (`int`, *optional*, defaults to -1): + The index of the last epoch when resuming training. + + Return: + `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. + """ + + def lr_lambda(current_step: int): + if current_step < num_warmup_steps: + return float(current_step) / float(max(1.0, num_warmup_steps)) + return 1.0 + + return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) + +class LinearWarmupCosineAnnealingLR(_LRScheduler): + + def __init__( + self, + optimizer: Optimizer, + warmup_epochs: int, + max_epochs: int, + warmup_start_lr: float = 0.0, + eta_min: float = 0.0, + last_epoch: int = -1, + ) -> None: + """ + Args: + optimizer (Optimizer): Wrapped optimizer. + warmup_epochs (int): Maximum number of iterations for linear warmup + max_epochs (int): Maximum number of iterations + warmup_start_lr (float): Learning rate to start the linear warmup. Default: 0. + eta_min (float): Minimum learning rate. Default: 0. + last_epoch (int): The index of last epoch. Default: -1. + """ + self.warmup_epochs = warmup_epochs + self.max_epochs = max_epochs + self.warmup_start_lr = warmup_start_lr + self.eta_min = eta_min + + super(LinearWarmupCosineAnnealingLR, self).__init__(optimizer, last_epoch) + + def get_lr(self) -> List[float]: + """ + Compute learning rate using chainable form of the scheduler + """ + if not self._get_lr_called_within_step: + warnings.warn( + "To get the last learning rate computed by the scheduler, " + "please use `get_last_lr()`.", + UserWarning, + ) + + if self.last_epoch == 0: + return [self.warmup_start_lr] * len(self.base_lrs) + elif self.last_epoch < self.warmup_epochs: + return [ + group["lr"] + (base_lr - self.warmup_start_lr) / (self.warmup_epochs - 1) + for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups) + ] + elif self.last_epoch == self.warmup_epochs: + return self.base_lrs + elif (self.last_epoch - 1 - self.max_epochs) % (2 * (self.max_epochs - self.warmup_epochs)) == 0: + return [ + group["lr"] + (base_lr - self.eta_min) * + (1 - math.cos(math.pi / (self.max_epochs - self.warmup_epochs))) / 2 + for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups) + ] + + return [ + (1 + math.cos(math.pi * (self.last_epoch - self.warmup_epochs) / (self.max_epochs - self.warmup_epochs))) / + ( + 1 + + math.cos(math.pi * (self.last_epoch - self.warmup_epochs - 1) / (self.max_epochs - self.warmup_epochs)) + ) * (group["lr"] - self.eta_min) + self.eta_min for group in self.optimizer.param_groups + ] + + def _get_closed_form_lr(self) -> List[float]: + """ + Called when epoch is passed as a param to the `step` function of the scheduler. + """ + if self.last_epoch < self.warmup_epochs: + return [ + self.warmup_start_lr + self.last_epoch * (base_lr - self.warmup_start_lr) / (self.warmup_epochs - 1) + for base_lr in self.base_lrs + ] + + return [ + self.eta_min + 0.5 * (base_lr - self.eta_min) * + (1 + math.cos(math.pi * (self.last_epoch - self.warmup_epochs) / (self.max_epochs - self.warmup_epochs))) + for base_lr in self.base_lrs + ] diff --git a/source_code/SegMamba/mamba/.DS_Store b/source_code/SegMamba/mamba/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..d3b3e2def10b2c5558a50a9a3604687560b957ed Binary files /dev/null and b/source_code/SegMamba/mamba/.DS_Store differ diff --git a/source_code/SegMamba/mamba/.gitmodules b/source_code/SegMamba/mamba/.gitmodules new file mode 100644 index 0000000000000000000000000000000000000000..a7445800fb64f3ae664c0b994a54235105986d2e --- /dev/null +++ b/source_code/SegMamba/mamba/.gitmodules @@ -0,0 +1,3 @@ +[submodule "3rdparty/lm-evaluation-harness"] + path = 3rdparty/lm-evaluation-harness + url = https://github.com/EleutherAI/lm-evaluation-harness/ diff --git a/source_code/SegMamba/mamba/AUTHORS b/source_code/SegMamba/mamba/AUTHORS new file mode 100644 index 0000000000000000000000000000000000000000..38557a872f8d603ed963a05c211de7032de5926b --- /dev/null +++ b/source_code/SegMamba/mamba/AUTHORS @@ -0,0 +1,2 @@ +Tri Dao, tri@tridao.me +Albert Gu, agu@andrew.cmu.edu diff --git a/source_code/SegMamba/mamba/LICENSE b/source_code/SegMamba/mamba/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f4abe24eb520fbb077753ae4f34bfaa43cb3b83f --- /dev/null +++ b/source_code/SegMamba/mamba/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2023 Tri Dao, Albert Gu + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/source_code/SegMamba/mamba/README.md b/source_code/SegMamba/mamba/README.md new file mode 100644 index 0000000000000000000000000000000000000000..754cefd7f862a90bad8fbdff71e3793a4e7849e3 --- /dev/null +++ b/source_code/SegMamba/mamba/README.md @@ -0,0 +1,149 @@ +# Mamba + +![Mamba](assets/selection.png "Selective State Space") +> **Mamba: Linear-Time Sequence Modeling with Selective State Spaces**\ +> Albert Gu*, Tri Dao*\ +> Paper: https://arxiv.org/abs/2312.00752 + +## About + +Mamba is a new state space model architecture showing promising performance on information-dense data such as language modeling, where previous subquadratic models fall short of Transformers. +It is based on the line of progress on [structured state space models](https://github.com/state-spaces/s4), +with an efficient hardware-aware design and implementation in the spirit of [FlashAttention](https://github.com/Dao-AILab/flash-attention). + +## Installation + +- `pip install causal-conv1d`: an efficient implementation of a simple causal Conv1d layer used inside the Mamba block. +- `pip install mamba-ssm`: the core Mamba package. + +It can also be built from source with `pip install .` from this repository. + +If `pip` complains about PyTorch versions, try passing `--no-build-isolation` to `pip`. + +Other requirements: +- Linux +- NVIDIA GPU +- PyTorch 1.12+ +- CUDA 11.6+ + +## Usage + +We expose several levels of interface with the Mamba model. + +### Selective SSM + +Mamba is based on a selective SSM layer, which is the focus of the paper (Section 3; Algorithm 2). + +Source: [ops/selective_scan_interface.py](mamba_ssm/ops/selective_scan_interface.py). + +### Mamba Block + +The main module of this repository is the Mamba architecture block wrapping the selective SSM. + +Source: [modules/mamba_simple.py](mamba_ssm/modules/mamba_simple.py). + +Usage: +``` +from mamba_ssm import Mamba + +batch, length, dim = 2, 64, 16 +x = torch.randn(batch, length, dim).to("cuda") +model = Mamba( + # This module uses roughly 3 * expand * d_model^2 parameters + d_model=dim, # Model dimension d_model + d_state=16, # SSM state expansion factor + d_conv=4, # Local convolution width + expand=2, # Block expansion factor +).to("cuda") +y = model(x) +assert y.shape == x.shape +``` + +### Mamba Language Model + +Finally, we provide an example of a complete language model: a deep sequence model backbone (with repeating Mamba blocks) + language model head. + +Source: [models/mixer_seq_simple.py](mamba_ssm/models/mixer_seq_simple.py). + +This is an example of how to integrate Mamba into an end-to-end neural network. +This example is used in the generation scripts below. + + + +## Pretrained Models + +Pretrained models are uploaded to +[HuggingFace](https://huggingface.co/state-spaces): `mamba-130m`, `mamba-370m`, +`mamba-790m`, `mamba-1.4b`, `mamba-2.8b`. + +The models will be autodownloaded by the generation script below. + +These models were trained on the [Pile](https://huggingface.co/datasets/EleutherAI/pile), and follow the standard model dimensions described by GPT-3 and followed by many open source models: + +| Parameters | Layers | Model dim. | +|------------|--------|------------| +| 130M | 12 | 768 | +| 370M | 24 | 1024 | +| 790M | 24 | 1536 | +| 1.4B | 24 | 2048 | +| 2.8B | 32 | 2560 | + +(The layer count of Mamba should be doubled, as two Mamba blocks are needed for each "layer" (MHA block + MLP block) of a Transformer.) + +Note: these are base models trained only for 300B tokens, without any form of downstream modification (instruction tuning, etc.). +Performance is expected to be comparable or better than other architectures trained on similar data, but not to match larger or fine-tuned models. + + +## Evaluations + +To run zero-shot evaluations of models (corresponding to Table 3 of the paper), +we use the +[lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness/tree/big-refactor) +library. + +1. Pull the `lm-evaluation-harness` repo by `git submodule update --init + --recursive`. We use the `big-refactor` branch. +2. Install `lm-evaluation-harness`: `pip install -e 3rdparty/lm-evaluation-harness` +3. Run evaluation with (more documentation at the [lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness/tree/big-refactor) repo): +``` +python evals/lm_harness_eval.py --model mamba --model_args pretrained=state-spaces/mamba-130m --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande --device cuda --batch_size 64 +python evals/lm_harness_eval.py --model hf --model_args pretrained=EleutherAI/pythia-160m --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande --device cuda --batch_size 64 +``` + +Note that the result of each task might differ from reported values by 0.1-0.3 due to noise in the evaluation process. + +## Inference + +The script [benchmarks/benchmark_generation_mamba_simple.py](benchmarks/benchmark_generation_mamba_simple.py) +1. autoloads a model from the HuggingFace Hub, +2. generates completions of a user-specified prompt, +3. benchmarks the inference speed of this generation. + +Other configurable options include the top-p (nucleus sampling) probability, and the softmax temperature. + +### Examples + +To test generation latency (e.g. batch size = 1) with different sampling strategies: + +``` +python benchmarks/benchmark_generation_mamba_simple.py --model-name "state-spaces/mamba-2.8b" --prompt "My cat wrote all this CUDA code for a new language model and" --topp 0.9 --temperature 0.5 +python benchmarks/benchmark_generation_mamba_simple.py --model-name "EleutherAI/pythia-2.8b" --prompt "My cat wrote all this CUDA code for a new language model and" --topp 0.9 --temperature 0.5 +``` + +To test generation throughput with random prompts (e.g. large batch size): +``` +python benchmarks/benchmark_generation_mamba_simple.py --model-name "state-spaces/mamba-2.8b" --batch 128 +python benchmarks/benchmark_generation_mamba_simple.py --model-name "EleutherAI/pythia-2.8b" --batch 128 +``` + +## Citation + +If you use this codebase, or otherwise found our work valuable, please cite Mamba: +``` +@article{mamba, + title={Mamba: Linear-Time Sequence Modeling with Selective State Spaces}, + author={Gu, Albert and Dao, Tri}, + journal={arXiv preprint arXiv:2312.00752}, + year={2023} +} +``` diff --git a/source_code/SegMamba/mamba/benchmarks/benchmark_generation_mamba_simple.py b/source_code/SegMamba/mamba/benchmarks/benchmark_generation_mamba_simple.py new file mode 100644 index 0000000000000000000000000000000000000000..8f2943cb4bde6f25eddb82b7b999c5c5f8b39acc --- /dev/null +++ b/source_code/SegMamba/mamba/benchmarks/benchmark_generation_mamba_simple.py @@ -0,0 +1,88 @@ +# Copyright (c) 2023, Tri Dao, Albert Gu. + +import argparse +import time +import json + +import torch +import torch.nn.functional as F + +from einops import rearrange + +from transformers import AutoTokenizer, AutoModelForCausalLM + +from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel + + +parser = argparse.ArgumentParser(description="Generation benchmarking") +parser.add_argument("--model-name", type=str, default="state-spaces/mamba-130m") +parser.add_argument("--prompt", type=str, default=None) +parser.add_argument("--promptlen", type=int, default=100) +parser.add_argument("--genlen", type=int, default=100) +parser.add_argument("--temperature", type=float, default=1.0) +parser.add_argument("--topk", type=int, default=1) +parser.add_argument("--topp", type=float, default=1.0) +parser.add_argument("--batch", type=int, default=1) +args = parser.parse_args() + +repeats = 3 +device = "cuda" +dtype = torch.float16 + +print(f"Loading model {args.model_name}") +is_mamba = args.model_name.startswith("state-spaces/mamba-") or "mamba" in args.model_name + +if is_mamba: + tokenizer = AutoTokenizer.from_pretrained("/home/zhulianghui/VisionProjects/mamba/ckpts/gpt-neox-20b-tokenizer") + model = MambaLMHeadModel.from_pretrained(args.model_name, device=device, dtype=dtype) +else: + tokenizer = AutoTokenizer.from_pretrained(args.model_name) + model = AutoModelForCausalLM.from_pretrained(args.model_name, device_map={"": device}, torch_dtype=dtype) +model.eval() +print(f"Number of parameters: {sum(p.numel() for p in model.parameters() if p.requires_grad)}") + +torch.random.manual_seed(0) +if args.prompt is None: + input_ids = torch.randint(1, 1000, (args.batch, args.promptlen), dtype=torch.long, device="cuda") + attn_mask = torch.ones_like(input_ids, dtype=torch.long, device="cuda") +else: + tokens = tokenizer(args.prompt, return_tensors="pt") + input_ids = tokens.input_ids.to(device=device) + attn_mask = tokens.attention_mask.to(device=device) +max_length = input_ids.shape[1] + args.genlen + +if is_mamba: + fn = lambda: model.generate( + input_ids=input_ids, + max_length=max_length, + cg=True, + return_dict_in_generate=True, + output_scores=True, + enable_timing=False, + temperature=args.temperature, + top_k=args.topk, + top_p=args.topp, + ) +else: + fn = lambda: model.generate( + input_ids=input_ids, + attention_mask=attn_mask, + max_length=max_length, + return_dict_in_generate=True, + pad_token_id=tokenizer.eos_token_id, + do_sample=True, + temperature=args.temperature, + top_k=args.topk, + top_p=args.topp, + ) +out = fn() +if args.prompt is not None: + print(tokenizer.batch_decode(out.sequences.tolist())) + +torch.cuda.synchronize() +start = time.time() +for _ in range(repeats): + fn() +torch.cuda.synchronize() +print(f"Prompt length: {len(input_ids[0])}, generation length: {len(out.sequences[0]) - len(input_ids[0])}") +print(f"{args.model_name} prompt processing + decoding time: {(time.time() - start) / repeats * 1000:.0f}ms") diff --git a/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/__init__.py b/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2ecd144db5dbec72bcfcdcea28c624a7e2bf053b --- /dev/null +++ b/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/__init__.py @@ -0,0 +1,5 @@ +__version__ = "1.0.1" + +from mamba_ssm.ops.selective_scan_interface import selective_scan_fn, mamba_inner_fn, bimamba_inner_fn +from mamba_ssm.modules.mamba_simple import Mamba +from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel diff --git a/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/models/__init__.py b/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/models/mixer_seq_simple.py b/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/models/mixer_seq_simple.py new file mode 100644 index 0000000000000000000000000000000000000000..383f773f1f700cd53176e51327a5d8dc58158da0 --- /dev/null +++ b/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/models/mixer_seq_simple.py @@ -0,0 +1,233 @@ +# Copyright (c) 2023, Albert Gu, Tri Dao. + +import math +from functools import partial + +from collections import namedtuple + +import torch +import torch.nn as nn + +from mamba_ssm.modules.mamba_simple import Mamba, Block +from mamba_ssm.utils.generation import GenerationMixin +from mamba_ssm.utils.hf import load_config_hf, load_state_dict_hf + +try: + from mamba_ssm.ops.triton.layernorm import RMSNorm, layer_norm_fn, rms_norm_fn +except ImportError: + RMSNorm, layer_norm_fn, rms_norm_fn = None, None, None + + +def create_block( + d_model, + ssm_cfg=None, + norm_epsilon=1e-5, + rms_norm=False, + residual_in_fp32=False, + fused_add_norm=False, + layer_idx=None, + device=None, + dtype=None, +): + if ssm_cfg is None: + ssm_cfg = {} + factory_kwargs = {"device": device, "dtype": dtype} + mixer_cls = partial(Mamba, layer_idx=layer_idx, **ssm_cfg, **factory_kwargs) + norm_cls = partial( + nn.LayerNorm if not rms_norm else RMSNorm, eps=norm_epsilon, **factory_kwargs + ) + block = Block( + d_model, + mixer_cls, + norm_cls=norm_cls, + fused_add_norm=fused_add_norm, + residual_in_fp32=residual_in_fp32, + ) + block.layer_idx = layer_idx + return block + + +# https://github.com/huggingface/transformers/blob/c28d04e9e252a1a099944e325685f14d242ecdcd/src/transformers/models/gpt2/modeling_gpt2.py#L454 +def _init_weights( + module, + n_layer, + initializer_range=0.02, # Now only used for embedding layer. + rescale_prenorm_residual=True, + n_residuals_per_layer=1, # Change to 2 if we have MLP +): + if isinstance(module, nn.Linear): + if module.bias is not None: + if not getattr(module.bias, "_no_reinit", False): + nn.init.zeros_(module.bias) + elif isinstance(module, nn.Embedding): + nn.init.normal_(module.weight, std=initializer_range) + + if rescale_prenorm_residual: + # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme: + # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale + # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers. + # > -- GPT-2 :: https://openai.com/blog/better-language-models/ + # + # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py + for name, p in module.named_parameters(): + if name in ["out_proj.weight", "fc2.weight"]: + # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block + # Following Pytorch init, except scale by 1/sqrt(2 * n_layer) + # We need to reinit p since this code could be called multiple times + # Having just p *= scale would repeatedly scale it down + nn.init.kaiming_uniform_(p, a=math.sqrt(5)) + with torch.no_grad(): + p /= math.sqrt(n_residuals_per_layer * n_layer) + + +class MixerModel(nn.Module): + def __init__( + self, + d_model: int, + n_layer: int, + vocab_size: int, + ssm_cfg=None, + norm_epsilon: float = 1e-5, + rms_norm: bool = False, + initializer_cfg=None, + fused_add_norm=False, + residual_in_fp32=False, + device=None, + dtype=None, + ) -> None: + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.residual_in_fp32 = residual_in_fp32 + + self.embedding = nn.Embedding(vocab_size, d_model, **factory_kwargs) + + # We change the order of residual and layer norm: + # Instead of LN -> Attn / MLP -> Add, we do: + # Add -> LN -> Attn / MLP / Mixer, returning both the residual branch (output of Add) and + # the main branch (output of MLP / Mixer). The model definition is unchanged. + # This is for performance reason: we can fuse add + layer_norm. + self.fused_add_norm = fused_add_norm + if self.fused_add_norm: + if layer_norm_fn is None or rms_norm_fn is None: + raise ImportError("Failed to import Triton LayerNorm / RMSNorm kernels") + + self.layers = nn.ModuleList( + [ + create_block( + d_model, + ssm_cfg=ssm_cfg, + norm_epsilon=norm_epsilon, + rms_norm=rms_norm, + residual_in_fp32=residual_in_fp32, + fused_add_norm=fused_add_norm, + layer_idx=i, + **factory_kwargs, + ) + for i in range(n_layer) + ] + ) + + self.norm_f = (nn.LayerNorm if not rms_norm else RMSNorm)( + d_model, eps=norm_epsilon, **factory_kwargs + ) + + self.apply( + partial( + _init_weights, + n_layer=n_layer, + **(initializer_cfg if initializer_cfg is not None else {}), + ) + ) + + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): + return { + i: layer.allocate_inference_cache(batch_size, max_seqlen, dtype=dtype, **kwargs) + for i, layer in enumerate(self.layers) + } + + def forward(self, input_ids, inference_params=None): + hidden_states = self.embedding(input_ids) + residual = None + for layer in self.layers: + hidden_states, residual = layer( + hidden_states, residual, inference_params=inference_params + ) + if not self.fused_add_norm: + residual = (hidden_states + residual) if residual is not None else hidden_states + hidden_states = self.norm_f(residual.to(dtype=self.norm_f.weight.dtype)) + else: + # Set prenorm=False here since we don't need the residual + fused_add_norm_fn = rms_norm_fn if isinstance(self.norm_f, RMSNorm) else layer_norm_fn + hidden_states = fused_add_norm_fn( + hidden_states, + self.norm_f.weight, + self.norm_f.bias, + eps=self.norm_f.eps, + residual=residual, + prenorm=False, + residual_in_fp32=self.residual_in_fp32, + ) + return hidden_states + + +class MambaLMHeadModel(nn.Module, GenerationMixin): + + def __init__( + self, + d_model: int, + n_layer: int, + vocab_size: int, + initializer_cfg=None, + pad_vocab_size_multiple: int = 1, + device=None, + dtype=None, + **backbone_kwargs, + ) -> None: + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + if vocab_size % pad_vocab_size_multiple != 0: + vocab_size += pad_vocab_size_multiple - (vocab_size % pad_vocab_size_multiple) + self.backbone = MixerModel( + d_model=d_model, + n_layer=n_layer, + vocab_size=vocab_size, + initializer_cfg=initializer_cfg, + **backbone_kwargs, + **factory_kwargs, + ) + self.lm_head = nn.Linear(d_model, vocab_size, bias=False, **factory_kwargs) + + # Initialize weights and apply final processing + self.apply( + partial( + _init_weights, + n_layer=n_layer, + **(initializer_cfg if initializer_cfg is not None else {}), + ) + ) + self.tie_weights() + + def tie_weights(self): + self.lm_head.weight = self.backbone.embedding.weight + + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): + return self.backbone.allocate_inference_cache(batch_size, max_seqlen, dtype=dtype, **kwargs) + + def forward(self, input_ids, position_ids=None, inference_params=None, num_last_tokens=0): + """ + "position_ids" is just to be compatible with Transformer generation. We don't use it. + num_last_tokens: if > 0, only return the logits for the last n tokens + """ + hidden_states = self.backbone(input_ids, inference_params=inference_params) + if num_last_tokens > 0: + hidden_states = hidden_states[:, -num_last_tokens:] + lm_logits = self.lm_head(hidden_states) + CausalLMOutput = namedtuple("CausalLMOutput", ["logits"]) + return CausalLMOutput(logits=lm_logits) + + @classmethod + def from_pretrained(cls, pretrained_model_name, device=None, dtype=None, **kwargs): + config = load_config_hf(pretrained_model_name) + model = cls(**config, device=device, dtype=dtype, **kwargs) + model.load_state_dict(load_state_dict_hf(pretrained_model_name, device=device, dtype=dtype)) + return model diff --git a/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/modules/__init__.py b/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/modules/mamba_simple.py b/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/modules/mamba_simple.py new file mode 100644 index 0000000000000000000000000000000000000000..8ffc53d24110bc39651d086f7f3969cf5069f196 --- /dev/null +++ b/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/modules/mamba_simple.py @@ -0,0 +1,501 @@ +# Copyright (c) 2023, Tri Dao, Albert Gu. + +import math +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor + +from einops import rearrange, repeat + +try: + from causal_conv1d import causal_conv1d_fn, causal_conv1d_update +except ImportError: + causal_conv1d_fn, causal_conv1d_update = None + +try: + from mamba_ssm.ops.selective_scan_interface import selective_scan_fn, mamba_inner_fn, bimamba_inner_fn, mamba_inner_fn_no_out_proj +except ImportError: + selective_scan_fn, mamba_inner_fn, bimamba_inner_fn, mamba_inner_fn_no_out_proj = None, None, None, None, None + +try: + from mamba_ssm.ops.triton.selective_state_update import selective_state_update +except ImportError: + selective_state_update = None + +try: + from mamba_ssm.ops.triton.layernorm import RMSNorm, layer_norm_fn, rms_norm_fn +except ImportError: + RMSNorm, layer_norm_fn, rms_norm_fn = None, None, None + + +class Mamba(nn.Module): + def __init__( + self, + d_model, + d_state=16, + d_conv=4, + expand=2, + dt_rank="auto", + dt_min=0.001, + dt_max=0.1, + dt_init="random", + dt_scale=1.0, + dt_init_floor=1e-4, + conv_bias=True, + bias=False, + use_fast_path=True, # Fused kernel options + layer_idx=None, + device=None, + dtype=None, + bimamba_type="none", + nslices=5 + ): + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.d_model = d_model + self.d_state = d_state + self.d_conv = d_conv + self.expand = expand + self.d_inner = int(self.expand * self.d_model) + self.dt_rank = math.ceil(self.d_model / 16) if dt_rank == "auto" else dt_rank + self.use_fast_path = use_fast_path + self.layer_idx = layer_idx + self.bimamba_type = bimamba_type + self.nslices = nslices + + self.in_proj = nn.Linear(self.d_model, self.d_inner * 2, bias=bias, **factory_kwargs) + + self.conv1d = nn.Conv1d( + in_channels=self.d_inner, + out_channels=self.d_inner, + bias=conv_bias, + kernel_size=d_conv, + groups=self.d_inner, + padding=d_conv - 1, + **factory_kwargs, + ) + + self.activation = "silu" + self.act = nn.SiLU() + + self.x_proj = nn.Linear( + self.d_inner, self.dt_rank + self.d_state * 2, bias=False, **factory_kwargs + ) + self.dt_proj = nn.Linear(self.dt_rank, self.d_inner, bias=True, **factory_kwargs) + + # Initialize special dt projection to preserve variance at initialization + dt_init_std = self.dt_rank**-0.5 * dt_scale + if dt_init == "constant": + nn.init.constant_(self.dt_proj.weight, dt_init_std) + elif dt_init == "random": + nn.init.uniform_(self.dt_proj.weight, -dt_init_std, dt_init_std) + else: + raise NotImplementedError + + # Initialize dt bias so that F.softplus(dt_bias) is between dt_min and dt_max + dt = torch.exp( + torch.rand(self.d_inner, **factory_kwargs) * (math.log(dt_max) - math.log(dt_min)) + + math.log(dt_min) + ).clamp(min=dt_init_floor) + # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759 + inv_dt = dt + torch.log(-torch.expm1(-dt)) + with torch.no_grad(): + self.dt_proj.bias.copy_(inv_dt) + # Our initialization would set all Linear.bias to zero, need to mark this one as _no_reinit + self.dt_proj.bias._no_reinit = True + + # S4D real initialization + A = repeat( + torch.arange(1, self.d_state + 1, dtype=torch.float32, device=device), + "n -> d n", + d=self.d_inner, + ).contiguous() + A_log = torch.log(A) # Keep A_log in fp32 + self.A_log = nn.Parameter(A_log) + self.A_log._no_weight_decay = True + + # D "skip" parameter + self.D = nn.Parameter(torch.ones(self.d_inner, device=device)) # Keep in fp32 + self.D._no_weight_decay = True + + # bidirectional + assert bimamba_type == "v3" + + A_b = repeat( + torch.arange(1, self.d_state + 1, dtype=torch.float32, device=device), + "n -> d n", + d=self.d_inner, + ).contiguous() + A_b_log = torch.log(A_b) # Keep A_b_log in fp32 + self.A_b_log = nn.Parameter(A_b_log) + self.A_b_log._no_weight_decay = True + + self.conv1d_b = nn.Conv1d( + in_channels=self.d_inner, + out_channels=self.d_inner, + bias=conv_bias, + kernel_size=d_conv, + groups=self.d_inner, + padding=d_conv - 1, + **factory_kwargs, + ) + + self.x_proj_b = nn.Linear( + self.d_inner, self.dt_rank + self.d_state * 2, bias=False, **factory_kwargs + ) + self.dt_proj_b = nn.Linear(self.dt_rank, self.d_inner, bias=True, **factory_kwargs) + + self.D_b = nn.Parameter(torch.ones(self.d_inner, device=device)) # Keep in fp32 + self.D_b._no_weight_decay = True + + # assert bimamba_type == "v3" + # spatial + A_s = repeat( + torch.arange(1, self.d_state + 1, dtype=torch.float32, device=device), + "n -> d n", + d=self.d_inner, + ).contiguous() + A_s_log = torch.log(A_s) # Keep A_b_log in fp32 + self.A_s_log = nn.Parameter(A_s_log) + self.A_s_log._no_weight_decay = True + + self.conv1d_s = nn.Conv1d( + in_channels=self.d_inner, + out_channels=self.d_inner, + bias=conv_bias, + kernel_size=d_conv, + groups=self.d_inner, + padding=d_conv - 1, + **factory_kwargs, + ) + + self.x_proj_s = nn.Linear( + self.d_inner, self.dt_rank + self.d_state * 2, bias=False, **factory_kwargs + ) + self.dt_proj_s = nn.Linear(self.dt_rank, self.d_inner, bias=True, **factory_kwargs) + + self.D_s = nn.Parameter(torch.ones(self.d_inner, device=device)) # Keep in fp32 + self.D_s._no_weight_decay = True + + + + + self.out_proj = nn.Linear(self.d_inner, self.d_model, bias=bias, **factory_kwargs) + + def forward(self, hidden_states, inference_params=None): + """ + hidden_states: (B, L, D) + Returns: same shape as hidden_states + """ + batch, seqlen, dim = hidden_states.shape + + conv_state, ssm_state = None, None + if inference_params is not None: + conv_state, ssm_state = self._get_states_from_cache(inference_params, batch) + if inference_params.seqlen_offset > 0: + # The states are updated inplace + out, _, _ = self.step(hidden_states, conv_state, ssm_state) + return out + + # We do matmul and transpose BLH -> HBL at the same time + xz = rearrange( + self.in_proj.weight @ rearrange(hidden_states, "b l d -> d (b l)"), + "d (b l) -> b d l", + l=seqlen, + ) + if self.in_proj.bias is not None: + xz = xz + rearrange(self.in_proj.bias.to(dtype=xz.dtype), "d -> d 1") + + A = -torch.exp(self.A_log.float()) # (d_inner, d_state) + # In the backward pass we write dx and dz next to each other to avoid torch.cat + if self.use_fast_path and inference_params is None: # Doesn't support outputting the states + if self.bimamba_type == "v3": + A_b = -torch.exp(self.A_b_log.float()) + out = mamba_inner_fn_no_out_proj( + xz, + self.conv1d.weight, + self.conv1d.bias, + self.x_proj.weight, + self.dt_proj.weight, + A, + None, # input-dependent B + None, # input-dependent C + self.D.float(), + delta_bias=self.dt_proj.bias.float(), + delta_softplus=True, + ) + out_b = mamba_inner_fn_no_out_proj( + xz.flip([-1]), + self.conv1d_b.weight, + self.conv1d_b.bias, + self.x_proj_b.weight, + self.dt_proj_b.weight, + A_b, + None, + None, + self.D_b.float(), + delta_bias=self.dt_proj_b.bias.float(), + delta_softplus=True, + ) + A_s = -torch.exp(self.A_s_log.float()) + + xz_s = xz.chunk(self.nslices, dim=-1) + xz_s = torch.stack(xz_s,dim=-1) + xz_s = xz_s.flatten(-2) + out_s = mamba_inner_fn_no_out_proj( + xz_s, + self.conv1d_s.weight, + self.conv1d_s.bias, + self.x_proj_s.weight, + self.dt_proj_s.weight, + A_s, + None, + None, + self.D_s.float(), + delta_bias=self.dt_proj_s.bias.float(), + delta_softplus=True, + ) + out_s = out_s.reshape(batch,self.d_inner,seqlen//self.nslices,self.nslices).permute(0,1,3,2).flatten(-2) + + # F.linear(rearrange(out_z, "b d l -> b l d"), out_proj_weight, out_proj_bias) + out = F.linear(rearrange(out + out_b.flip([-1]) + out_s, "b d l -> b l d"), self.out_proj.weight, self.out_proj.bias) + elif self.bimamba_type == "v2": + A_b = -torch.exp(self.A_b_log.float()) + out = mamba_inner_fn_no_out_proj( + xz, + self.conv1d.weight, + self.conv1d.bias, + self.x_proj.weight, + self.dt_proj.weight, + A, + None, # input-dependent B + None, # input-dependent C + self.D.float(), + delta_bias=self.dt_proj.bias.float(), + delta_softplus=True, + ) + out_b = mamba_inner_fn_no_out_proj( + xz.flip([-1]), + self.conv1d_b.weight, + self.conv1d_b.bias, + self.x_proj_b.weight, + self.dt_proj_b.weight, + A_b, + None, + None, + self.D_b.float(), + delta_bias=self.dt_proj_b.bias.float(), + delta_softplus=True, + ) + # F.linear(rearrange(out_z, "b d l -> b l d"), out_proj_weight, out_proj_bias) + out = F.linear(rearrange(out + out_b.flip([-1]), "b d l -> b l d"), self.out_proj.weight, self.out_proj.bias) + else: + out = mamba_inner_fn( + xz, + self.conv1d.weight, + self.conv1d.bias, + self.x_proj.weight, + self.dt_proj.weight, + self.out_proj.weight, + self.out_proj.bias, + A, + None, # input-dependent B + None, # input-dependent C + self.D.float(), + delta_bias=self.dt_proj.bias.float(), + delta_softplus=True, + ) + else: + x, z = xz.chunk(2, dim=1) + # Compute short convolution + if conv_state is not None: + conv_state.copy_(x[:, :, -self.d_conv :]) # Update state (B D W) + if causal_conv1d_fn is None: + x = self.act(self.conv1d(x)[..., :seqlen]) + else: + assert self.activation in ["silu", "swish"] + x = causal_conv1d_fn( + x, + rearrange(self.conv1d.weight, "d 1 w -> d w"), + self.conv1d.bias, + self.activation, + ) + + # We're careful here about the layout, to avoid extra transposes. + # We want dt to have d as the slowest moving dimension + # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects. + x_dbl = self.x_proj(rearrange(x, "b d l -> (b l) d")) # (bl d) + dt, B, C = torch.split(x_dbl, [self.dt_rank, self.d_state, self.d_state], dim=-1) + dt = self.dt_proj.weight @ dt.t() + dt = rearrange(dt, "d (b l) -> b d l", l=seqlen) + B = rearrange(B, "(b l) dstate -> b dstate l", l=seqlen).contiguous() + C = rearrange(C, "(b l) dstate -> b dstate l", l=seqlen).contiguous() + assert self.activation in ["silu", "swish"] + y = selective_scan_fn( + x, + dt, + A, + B, + C, + self.D.float(), + z=z, + delta_bias=self.dt_proj.bias.float(), + delta_softplus=True, + return_last_state=ssm_state is not None, + ) + if ssm_state is not None: + y, last_state = y + ssm_state.copy_(last_state) + y = rearrange(y, "b d l -> b l d") + out = self.out_proj(y) + return out + + def step(self, hidden_states, conv_state, ssm_state): + dtype = hidden_states.dtype + assert hidden_states.shape[1] == 1, "Only support decoding with 1 token at a time for now" + xz = self.in_proj(hidden_states.squeeze(1)) # (B 2D) + x, z = xz.chunk(2, dim=-1) # (B D) + + # Conv step + if causal_conv1d_update is None: + conv_state.copy_(torch.roll(conv_state, shifts=-1, dims=-1)) # Update state (B D W) + conv_state[:, :, -1] = x + x = torch.sum(conv_state * rearrange(self.conv1d.weight, "d 1 w -> d w"), dim=-1) # (B D) + if self.conv1d.bias is not None: + x = x + self.conv1d.bias + x = self.act(x).to(dtype=dtype) + else: + x = causal_conv1d_update( + x, + conv_state, + rearrange(self.conv1d.weight, "d 1 w -> d w"), + self.conv1d.bias, + self.activation, + ) + + x_db = self.x_proj(x) # (B dt_rank+2*d_state) + dt, B, C = torch.split(x_db, [self.dt_rank, self.d_state, self.d_state], dim=-1) + # Don't add dt_bias here + dt = F.linear(dt, self.dt_proj.weight) # (B d_inner) + A = -torch.exp(self.A_log.float()) # (d_inner, d_state) + + # SSM step + if selective_state_update is None: + # Discretize A and B + dt = F.softplus(dt + self.dt_proj.bias.to(dtype=dt.dtype)) + dA = torch.exp(torch.einsum("bd,dn->bdn", dt, A)) + dB = torch.einsum("bd,bn->bdn", dt, B) + ssm_state.copy_(ssm_state * dA + rearrange(x, "b d -> b d 1") * dB) + y = torch.einsum("bdn,bn->bd", ssm_state.to(dtype), C) + y = y + self.D.to(dtype) * x + y = y * self.act(z) # (B D) + else: + y = selective_state_update( + ssm_state, x, dt, A, B, C, self.D, z=z, dt_bias=self.dt_proj.bias, dt_softplus=True + ) + + out = self.out_proj(y) + return out.unsqueeze(1), conv_state, ssm_state + + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): + device = self.out_proj.weight.device + conv_dtype = self.conv1d.weight.dtype if dtype is None else dtype + conv_state = torch.zeros( + batch_size, self.d_model * self.expand, self.d_conv, device=device, dtype=conv_dtype + ) + ssm_dtype = self.dt_proj.weight.dtype if dtype is None else dtype + # ssm_dtype = torch.float32 + ssm_state = torch.zeros( + batch_size, self.d_model * self.expand, self.d_state, device=device, dtype=ssm_dtype + ) + return conv_state, ssm_state + + def _get_states_from_cache(self, inference_params, batch_size, initialize_states=False): + assert self.layer_idx is not None + if self.layer_idx not in inference_params.key_value_memory_dict: + batch_shape = (batch_size,) + conv_state = torch.zeros( + batch_size, + self.d_model * self.expand, + self.d_conv, + device=self.conv1d.weight.device, + dtype=self.conv1d.weight.dtype, + ) + ssm_state = torch.zeros( + batch_size, + self.d_model * self.expand, + self.d_state, + device=self.dt_proj.weight.device, + dtype=self.dt_proj.weight.dtype, + # dtype=torch.float32, + ) + inference_params.key_value_memory_dict[self.layer_idx] = (conv_state, ssm_state) + else: + conv_state, ssm_state = inference_params.key_value_memory_dict[self.layer_idx] + # TODO: What if batch size changes between generation, and we reuse the same states? + if initialize_states: + conv_state.zero_() + ssm_state.zero_() + return conv_state, ssm_state + + +class Block(nn.Module): + def __init__( + self, dim, mixer_cls, norm_cls=nn.LayerNorm, fused_add_norm=False, residual_in_fp32=False + ): + """ + Simple block wrapping a mixer class with LayerNorm/RMSNorm and residual connection" + + This Block has a slightly different structure compared to a regular + prenorm Transformer block. + The standard block is: LN -> MHA/MLP -> Add. + [Ref: https://arxiv.org/abs/2002.04745] + Here we have: Add -> LN -> Mixer, returning both + the hidden_states (output of the mixer) and the residual. + This is purely for performance reasons, as we can fuse add and LayerNorm. + The residual needs to be provided (except for the very first block). + """ + super().__init__() + self.residual_in_fp32 = residual_in_fp32 + self.fused_add_norm = fused_add_norm + self.mixer = mixer_cls(dim) + self.norm = norm_cls(dim) + if self.fused_add_norm: + assert RMSNorm is not None, "RMSNorm import fails" + assert isinstance( + self.norm, (nn.LayerNorm, RMSNorm) + ), "Only LayerNorm and RMSNorm are supported for fused_add_norm" + + def forward( + self, hidden_states: Tensor, residual: Optional[Tensor] = None, inference_params=None + ): + r"""Pass the input through the encoder layer. + + Args: + hidden_states: the sequence to the encoder layer (required). + residual: hidden_states = Mixer(LN(residual)) + """ + if not self.fused_add_norm: + residual = (hidden_states + residual) if residual is not None else hidden_states + hidden_states = self.norm(residual.to(dtype=self.norm.weight.dtype)) + if self.residual_in_fp32: + residual = residual.to(torch.float32) + else: + fused_add_norm_fn = rms_norm_fn if isinstance(self.norm, RMSNorm) else layer_norm_fn + hidden_states, residual = fused_add_norm_fn( + hidden_states, + self.norm.weight, + self.norm.bias, + residual=residual, + prenorm=True, + residual_in_fp32=self.residual_in_fp32, + eps=self.norm.eps, + ) + hidden_states = self.mixer(hidden_states, inference_params=inference_params) + return hidden_states, residual + + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): + return self.mixer.allocate_inference_cache(batch_size, max_seqlen, dtype=dtype, **kwargs) diff --git a/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/__init__.py b/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/selective_scan_interface.py b/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/selective_scan_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..99b455ed949c123bb453922d5ac88d00f401e392 --- /dev/null +++ b/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/selective_scan_interface.py @@ -0,0 +1,709 @@ +# Copyright (c) 2023, Tri Dao, Albert Gu. + +import torch +import torch.nn.functional as F +from torch.cuda.amp import custom_bwd, custom_fwd + +from einops import rearrange, repeat + +from causal_conv1d import causal_conv1d_fn +import causal_conv1d_cuda +import selective_scan_cuda + + +class SelectiveScanFn(torch.autograd.Function): + + @staticmethod + def forward(ctx, u, delta, A, B, C, D=None, z=None, delta_bias=None, delta_softplus=False, + return_last_state=False): + if u.stride(-1) != 1: + u = u.contiguous() + if delta.stride(-1) != 1: + delta = delta.contiguous() + if D is not None: + D = D.contiguous() + if B.stride(-1) != 1: + B = B.contiguous() + if C.stride(-1) != 1: + C = C.contiguous() + if z is not None and z.stride(-1) != 1: + z = z.contiguous() + if B.dim() == 3: + B = rearrange(B, "b dstate l -> b 1 dstate l") + ctx.squeeze_B = True + if C.dim() == 3: + C = rearrange(C, "b dstate l -> b 1 dstate l") + ctx.squeeze_C = True + out, x, *rest = selective_scan_cuda.fwd(u, delta, A, B, C, D, z, delta_bias, delta_softplus) + ctx.delta_softplus = delta_softplus + ctx.has_z = z is not None + last_state = x[:, :, -1, 1::2] # (batch, dim, dstate) + if not ctx.has_z: + ctx.save_for_backward(u, delta, A, B, C, D, delta_bias, x) + return out if not return_last_state else (out, last_state) + else: + ctx.save_for_backward(u, delta, A, B, C, D, z, delta_bias, x, out) + out_z = rest[0] + return out_z if not return_last_state else (out_z, last_state) + + @staticmethod + def backward(ctx, dout, *args): + if not ctx.has_z: + u, delta, A, B, C, D, delta_bias, x = ctx.saved_tensors + z = None + out = None + else: + u, delta, A, B, C, D, z, delta_bias, x, out = ctx.saved_tensors + if dout.stride(-1) != 1: + dout = dout.contiguous() + # The kernel supports passing in a pre-allocated dz (e.g., in case we want to fuse the + # backward of selective_scan_cuda with the backward of chunk). + # Here we just pass in None and dz will be allocated in the C++ code. + du, ddelta, dA, dB, dC, dD, ddelta_bias, *rest = selective_scan_cuda.bwd( + u, delta, A, B, C, D, z, delta_bias, dout, x, out, None, ctx.delta_softplus, + False # option to recompute out_z, not used here + ) + dz = rest[0] if ctx.has_z else None + dB = dB.squeeze(1) if getattr(ctx, "squeeze_B", False) else dB + dC = dC.squeeze(1) if getattr(ctx, "squeeze_C", False) else dC + return (du, ddelta, dA, dB, dC, + dD if D is not None else None, + dz, + ddelta_bias if delta_bias is not None else None, + None, + None) + + +def selective_scan_fn(u, delta, A, B, C, D=None, z=None, delta_bias=None, delta_softplus=False, + return_last_state=False): + """if return_last_state is True, returns (out, last_state) + last_state has shape (batch, dim, dstate). Note that the gradient of the last state is + not considered in the backward pass. + """ + return SelectiveScanFn.apply(u, delta, A, B, C, D, z, delta_bias, delta_softplus, return_last_state) + + +def selective_scan_ref(u, delta, A, B, C, D=None, z=None, delta_bias=None, delta_softplus=False, + return_last_state=False): + """ + u: r(B D L) + delta: r(B D L) + A: c(D N) or r(D N) + B: c(D N) or r(B N L) or r(B N 2L) or r(B G N L) or (B G N L) + C: c(D N) or r(B N L) or r(B N 2L) or r(B G N L) or (B G N L) + D: r(D) + z: r(B D L) + delta_bias: r(D), fp32 + + out: r(B D L) + last_state (optional): r(B D dstate) or c(B D dstate) + """ + dtype_in = u.dtype + u = u.float() + delta = delta.float() + if delta_bias is not None: + delta = delta + delta_bias[..., None].float() + if delta_softplus: + delta = F.softplus(delta) + batch, dim, dstate = u.shape[0], A.shape[0], A.shape[1] + is_variable_B = B.dim() >= 3 + is_variable_C = C.dim() >= 3 + if A.is_complex(): + if is_variable_B: + B = torch.view_as_complex(rearrange(B.float(), "... (L two) -> ... L two", two=2)) + if is_variable_C: + C = torch.view_as_complex(rearrange(C.float(), "... (L two) -> ... L two", two=2)) + else: + B = B.float() + C = C.float() + x = A.new_zeros((batch, dim, dstate)) + ys = [] + deltaA = torch.exp(torch.einsum('bdl,dn->bdln', delta, A)) + if not is_variable_B: + deltaB_u = torch.einsum('bdl,dn,bdl->bdln', delta, B, u) + else: + if B.dim() == 3: + deltaB_u = torch.einsum('bdl,bnl,bdl->bdln', delta, B, u) + else: + B = repeat(B, "B G N L -> B (G H) N L", H=dim // B.shape[1]) + deltaB_u = torch.einsum('bdl,bdnl,bdl->bdln', delta, B, u) + if is_variable_C and C.dim() == 4: + C = repeat(C, "B G N L -> B (G H) N L", H=dim // C.shape[1]) + last_state = None + for i in range(u.shape[2]): + x = deltaA[:, :, i] * x + deltaB_u[:, :, i] + if not is_variable_C: + y = torch.einsum('bdn,dn->bd', x, C) + else: + if C.dim() == 3: + y = torch.einsum('bdn,bn->bd', x, C[:, :, i]) + else: + y = torch.einsum('bdn,bdn->bd', x, C[:, :, :, i]) + if i == u.shape[2] - 1: + last_state = x + if y.is_complex(): + y = y.real * 2 + ys.append(y) + y = torch.stack(ys, dim=2) # (batch dim L) + out = y if D is None else y + u * rearrange(D, "d -> d 1") + if z is not None: + out = out * F.silu(z) + out = out.to(dtype=dtype_in) + return out if not return_last_state else (out, last_state) + + +class MambaInnerFnNoOutProj(torch.autograd.Function): + + @staticmethod + @custom_fwd + def forward(ctx, xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, + C_proj_bias=None, delta_softplus=True, checkpoint_lvl=1): + """ + xz: (batch, dim, seqlen) + """ + assert checkpoint_lvl in [0, 1] + L = xz.shape[-1] + delta_rank = delta_proj_weight.shape[1] + d_state = A.shape[-1] * (1 if not A.is_complex() else 2) + if torch.is_autocast_enabled(): + x_proj_weight = x_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) + delta_proj_weight = delta_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) + if xz.stride(-1) != 1: + xz = xz.contiguous() + conv1d_weight = rearrange(conv1d_weight, "d 1 w -> d w") + x, z = xz.chunk(2, dim=1) + conv1d_bias = conv1d_bias.contiguous() if conv1d_bias is not None else None + conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, True) + # We're being very careful here about the layout, to avoid extra transposes. + # We want delta to have d as the slowest moving dimension + # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects. + x_dbl = F.linear(rearrange(conv1d_out, 'b d l -> (b l) d'), x_proj_weight) # (bl d) + delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), "d (b l) -> b d l", l = L) + ctx.is_variable_B = B is None + ctx.is_variable_C = C is None + ctx.B_proj_bias_is_None = B_proj_bias is None + ctx.C_proj_bias_is_None = C_proj_bias is None + if B is None: # variable B + B = x_dbl[:, delta_rank:delta_rank + d_state] # (bl dstate) + if B_proj_bias is not None: + B = B + B_proj_bias.to(dtype=B.dtype) + if not A.is_complex(): + # B = rearrange(B, "(b l) dstate -> b dstate l", l=L).contiguous() + B = rearrange(B, "(b l) dstate -> b 1 dstate l", l=L).contiguous() + else: + B = rearrange(B, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous() + else: + if B.stride(-1) != 1: + B = B.contiguous() + if C is None: # variable C + C = x_dbl[:, -d_state:] # (bl dstate) + if C_proj_bias is not None: + C = C + C_proj_bias.to(dtype=C.dtype) + if not A.is_complex(): + # C = rearrange(C, "(b l) dstate -> b dstate l", l=L).contiguous() + C = rearrange(C, "(b l) dstate -> b 1 dstate l", l=L).contiguous() + else: + C = rearrange(C, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous() + else: + if C.stride(-1) != 1: + C = C.contiguous() + if D is not None: + D = D.contiguous() + out, scan_intermediates, out_z = selective_scan_cuda.fwd( + conv1d_out, delta, A, B, C, D, z, delta_bias, delta_softplus + ) + ctx.delta_softplus = delta_softplus + ctx.checkpoint_lvl = checkpoint_lvl + if checkpoint_lvl >= 1: # Will recompute conv1d_out and delta in the backward pass + conv1d_out, delta = None, None + ctx.save_for_backward(xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, + delta_proj_weight, conv1d_out, delta, + A, B, C, D, delta_bias, scan_intermediates, out) + # return rearrange(out_z, "b d l -> b l d") + return out_z + + @staticmethod + @custom_bwd + def backward(ctx, dout): + # dout: (batch, seqlen, dim) + (xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, delta_proj_weight, + conv1d_out, delta, A, B, C, D, delta_bias, scan_intermediates, out) = ctx.saved_tensors + L = xz.shape[-1] + delta_rank = delta_proj_weight.shape[1] + d_state = A.shape[-1] * (1 if not A.is_complex() else 2) + x, z = xz.chunk(2, dim=1) + if dout.stride(-1) != 1: + dout = dout.contiguous() + if ctx.checkpoint_lvl == 1: + conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, True) + delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), + "d (b l) -> b d l", l = L) + # The kernel supports passing in a pre-allocated dz (e.g., in case we want to fuse the + # backward of selective_scan_cuda with the backward of chunk). + dxz = torch.empty_like(xz) # (batch, dim, seqlen) + dx, dz = dxz.chunk(2, dim=1) + # dout_y = rearrange(dout, "b l d -> b d l") # because no arrange at end of forward, so dout shape is b d l + dconv1d_out, ddelta, dA, dB, dC, dD, ddelta_bias, dz, out_z = selective_scan_cuda.bwd( + conv1d_out, delta, A, B, C, D, z, delta_bias, dout, scan_intermediates, out, dz, + ctx.delta_softplus, + True # option to recompute out_z + ) + dD = dD if D is not None else None + dx_dbl = torch.empty_like(x_dbl) + dB_proj_bias = None + if ctx.is_variable_B: + if not A.is_complex(): + dB = rearrange(dB, "b 1 dstate l -> (b l) dstate").contiguous() + else: + dB = rearrange(dB, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous() + dB_proj_bias = dB.sum(0) if not ctx.B_proj_bias_is_None else None + dx_dbl[:, delta_rank:delta_rank + d_state] = dB # (bl d) + dB = None + dC_proj_bias = None + if ctx.is_variable_C: + if not A.is_complex(): + dC = rearrange(dC, "b 1 dstate l -> (b l) dstate").contiguous() + else: + dC = rearrange(dC, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous() + dC_proj_bias = dC.sum(0) if not ctx.C_proj_bias_is_None else None + dx_dbl[:, -d_state:] = dC # (bl d) + dC = None + ddelta = rearrange(ddelta, "b d l -> d (b l)") + ddelta_proj_weight = torch.einsum("dB,Br->dr", ddelta, x_dbl[:, :delta_rank]) + dx_dbl[:, :delta_rank] = torch.einsum("dB,dr->Br", ddelta, delta_proj_weight) + dconv1d_out = rearrange(dconv1d_out, "b d l -> d (b l)") + dx_proj_weight = torch.einsum("Br,Bd->rd", dx_dbl, rearrange(conv1d_out, "b d l -> (b l) d")) + dconv1d_out = torch.addmm(dconv1d_out, x_proj_weight.t(), dx_dbl.t(), out=dconv1d_out) + dconv1d_out = rearrange(dconv1d_out, "d (b l) -> b d l", b=x.shape[0], l=x.shape[-1]) + # The kernel supports passing in a pre-allocated dx (e.g., in case we want to fuse the + # backward of conv1d with the backward of chunk). + dx, dconv1d_weight, dconv1d_bias = causal_conv1d_cuda.causal_conv1d_bwd( + x, conv1d_weight, conv1d_bias, dconv1d_out, dx, True + ) + dconv1d_bias = dconv1d_bias if conv1d_bias is not None else None + dconv1d_weight = rearrange(dconv1d_weight, "d w -> d 1 w") + return (dxz, dconv1d_weight, dconv1d_bias, dx_proj_weight, ddelta_proj_weight, + dA, dB, dC, dD, + ddelta_bias if delta_bias is not None else None, + dB_proj_bias, dC_proj_bias, None) + + +class MambaInnerFn(torch.autograd.Function): + + @staticmethod + @custom_fwd + def forward(ctx, xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, + C_proj_bias=None, delta_softplus=True, checkpoint_lvl=1): + """ + xz: (batch, dim, seqlen) + """ + assert checkpoint_lvl in [0, 1] + L = xz.shape[-1] + delta_rank = delta_proj_weight.shape[1] + d_state = A.shape[-1] * (1 if not A.is_complex() else 2) + if torch.is_autocast_enabled(): + x_proj_weight = x_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) + delta_proj_weight = delta_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) + out_proj_weight = out_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) + out_proj_bias = (out_proj_bias.to(dtype=torch.get_autocast_gpu_dtype()) + if out_proj_bias is not None else None) + if xz.stride(-1) != 1: + xz = xz.contiguous() + conv1d_weight = rearrange(conv1d_weight, "d 1 w -> d w") + x, z = xz.chunk(2, dim=1) + conv1d_bias = conv1d_bias.contiguous() if conv1d_bias is not None else None + conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, True) + # We're being very careful here about the layout, to avoid extra transposes. + # We want delta to have d as the slowest moving dimension + # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects. + x_dbl = F.linear(rearrange(conv1d_out, 'b d l -> (b l) d'), x_proj_weight) # (bl d) + delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), "d (b l) -> b d l", l = L) + ctx.is_variable_B = B is None + ctx.is_variable_C = C is None + ctx.B_proj_bias_is_None = B_proj_bias is None + ctx.C_proj_bias_is_None = C_proj_bias is None + if B is None: # variable B + B = x_dbl[:, delta_rank:delta_rank + d_state] # (bl dstate) + if B_proj_bias is not None: + B = B + B_proj_bias.to(dtype=B.dtype) + if not A.is_complex(): + # B = rearrange(B, "(b l) dstate -> b dstate l", l=L).contiguous() + B = rearrange(B, "(b l) dstate -> b 1 dstate l", l=L).contiguous() + else: + B = rearrange(B, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous() + else: + if B.stride(-1) != 1: + B = B.contiguous() + if C is None: # variable C + C = x_dbl[:, -d_state:] # (bl dstate) + if C_proj_bias is not None: + C = C + C_proj_bias.to(dtype=C.dtype) + if not A.is_complex(): + # C = rearrange(C, "(b l) dstate -> b dstate l", l=L).contiguous() + C = rearrange(C, "(b l) dstate -> b 1 dstate l", l=L).contiguous() + else: + C = rearrange(C, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous() + else: + if C.stride(-1) != 1: + C = C.contiguous() + if D is not None: + D = D.contiguous() + out, scan_intermediates, out_z = selective_scan_cuda.fwd( + conv1d_out, delta, A, B, C, D, z, delta_bias, delta_softplus + ) + ctx.delta_softplus = delta_softplus + ctx.out_proj_bias_is_None = out_proj_bias is None + ctx.checkpoint_lvl = checkpoint_lvl + if checkpoint_lvl >= 1: # Will recompute conv1d_out and delta in the backward pass + conv1d_out, delta = None, None + ctx.save_for_backward(xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, + delta_proj_weight, out_proj_weight, conv1d_out, delta, + A, B, C, D, delta_bias, scan_intermediates, out) + return F.linear(rearrange(out_z, "b d l -> b l d"), out_proj_weight, out_proj_bias) + + @staticmethod + @custom_bwd + def backward(ctx, dout): + # dout: (batch, seqlen, dim) + (xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, delta_proj_weight, out_proj_weight, + conv1d_out, delta, A, B, C, D, delta_bias, scan_intermediates, out) = ctx.saved_tensors + L = xz.shape[-1] + delta_rank = delta_proj_weight.shape[1] + d_state = A.shape[-1] * (1 if not A.is_complex() else 2) + x, z = xz.chunk(2, dim=1) + if dout.stride(-1) != 1: + dout = dout.contiguous() + if ctx.checkpoint_lvl == 1: + conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, True) + delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), + "d (b l) -> b d l", l = L) + # The kernel supports passing in a pre-allocated dz (e.g., in case we want to fuse the + # backward of selective_scan_cuda with the backward of chunk). + dxz = torch.empty_like(xz) # (batch, dim, seqlen) + dx, dz = dxz.chunk(2, dim=1) + dout = rearrange(dout, "b l e -> e (b l)") + dout_y = rearrange(out_proj_weight.t() @ dout, "d (b l) -> b d l", l=L) + dconv1d_out, ddelta, dA, dB, dC, dD, ddelta_bias, dz, out_z = selective_scan_cuda.bwd( + conv1d_out, delta, A, B, C, D, z, delta_bias, dout_y, scan_intermediates, out, dz, + ctx.delta_softplus, + True # option to recompute out_z + ) + dout_proj_weight = torch.einsum("eB,dB->ed", dout, rearrange(out_z, "b d l -> d (b l)")) + dout_proj_bias = dout.sum(dim=(0, 1)) if not ctx.out_proj_bias_is_None else None + dD = dD if D is not None else None + dx_dbl = torch.empty_like(x_dbl) + dB_proj_bias = None + if ctx.is_variable_B: + if not A.is_complex(): + dB = rearrange(dB, "b 1 dstate l -> (b l) dstate").contiguous() + else: + dB = rearrange(dB, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous() + dB_proj_bias = dB.sum(0) if not ctx.B_proj_bias_is_None else None + dx_dbl[:, delta_rank:delta_rank + d_state] = dB # (bl d) + dB = None + dC_proj_bias = None + if ctx.is_variable_C: + if not A.is_complex(): + dC = rearrange(dC, "b 1 dstate l -> (b l) dstate").contiguous() + else: + dC = rearrange(dC, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous() + dC_proj_bias = dC.sum(0) if not ctx.C_proj_bias_is_None else None + dx_dbl[:, -d_state:] = dC # (bl d) + dC = None + ddelta = rearrange(ddelta, "b d l -> d (b l)") + ddelta_proj_weight = torch.einsum("dB,Br->dr", ddelta, x_dbl[:, :delta_rank]) + dx_dbl[:, :delta_rank] = torch.einsum("dB,dr->Br", ddelta, delta_proj_weight) + dconv1d_out = rearrange(dconv1d_out, "b d l -> d (b l)") + dx_proj_weight = torch.einsum("Br,Bd->rd", dx_dbl, rearrange(conv1d_out, "b d l -> (b l) d")) + dconv1d_out = torch.addmm(dconv1d_out, x_proj_weight.t(), dx_dbl.t(), out=dconv1d_out) + dconv1d_out = rearrange(dconv1d_out, "d (b l) -> b d l", b=x.shape[0], l=x.shape[-1]) + # The kernel supports passing in a pre-allocated dx (e.g., in case we want to fuse the + # backward of conv1d with the backward of chunk). + dx, dconv1d_weight, dconv1d_bias = causal_conv1d_cuda.causal_conv1d_bwd( + x, conv1d_weight, conv1d_bias, dconv1d_out, dx, True + ) + dconv1d_bias = dconv1d_bias if conv1d_bias is not None else None + dconv1d_weight = rearrange(dconv1d_weight, "d w -> d 1 w") + return (dxz, dconv1d_weight, dconv1d_bias, dx_proj_weight, ddelta_proj_weight, + dout_proj_weight, dout_proj_bias, + dA, dB, dC, dD, + ddelta_bias if delta_bias is not None else None, + dB_proj_bias, dC_proj_bias, None) + + +class BiMambaInnerFn(torch.autograd.Function): + + @staticmethod + @custom_fwd + def forward(ctx, xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, A_b, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, + C_proj_bias=None, delta_softplus=True, checkpoint_lvl=1): + """ + xz: (batch, dim, seqlen) + """ + assert checkpoint_lvl in [0, 1] + L = xz.shape[-1] + delta_rank = delta_proj_weight.shape[1] + d_state = A.shape[-1] * (1 if not A.is_complex() else 2) + if torch.is_autocast_enabled(): + x_proj_weight = x_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) + delta_proj_weight = delta_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) + out_proj_weight = out_proj_weight.to(dtype=torch.get_autocast_gpu_dtype()) + out_proj_bias = (out_proj_bias.to(dtype=torch.get_autocast_gpu_dtype()) + if out_proj_bias is not None else None) + if xz.stride(-1) != 1: + xz = xz.contiguous() + conv1d_weight = rearrange(conv1d_weight, "d 1 w -> d w") + x, z = xz.chunk(2, dim=1) + conv1d_bias = conv1d_bias.contiguous() if conv1d_bias is not None else None + conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, True) + # We're being very careful here about the layout, to avoid extra transposes. + # We want delta to have d as the slowest moving dimension + # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects. + x_dbl = F.linear(rearrange(conv1d_out, 'b d l -> (b l) d'), x_proj_weight) # (bl d) + delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), "d (b l) -> b d l", l = L) + ctx.is_variable_B = B is None + ctx.is_variable_C = C is None + ctx.B_proj_bias_is_None = B_proj_bias is None + ctx.C_proj_bias_is_None = C_proj_bias is None + if B is None: # variable B + B = x_dbl[:, delta_rank:delta_rank + d_state] # (bl dstate) + if B_proj_bias is not None: + B = B + B_proj_bias.to(dtype=B.dtype) + if not A.is_complex(): + # B = rearrange(B, "(b l) dstate -> b dstate l", l=L).contiguous() + B = rearrange(B, "(b l) dstate -> b 1 dstate l", l=L).contiguous() + else: + B = rearrange(B, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous() + else: + if B.stride(-1) != 1: + B = B.contiguous() + if C is None: # variable C + C = x_dbl[:, -d_state:] # (bl dstate) + if C_proj_bias is not None: + C = C + C_proj_bias.to(dtype=C.dtype) + if not A.is_complex(): + # C = rearrange(C, "(b l) dstate -> b dstate l", l=L).contiguous() + C = rearrange(C, "(b l) dstate -> b 1 dstate l", l=L).contiguous() + else: + C = rearrange(C, "(b l) (dstate two) -> b 1 dstate (l two)", l=L, two=2).contiguous() + else: + if C.stride(-1) != 1: + C = C.contiguous() + if D is not None: + D = D.contiguous() + out_f, scan_intermediates_f, out_z_f = selective_scan_cuda.fwd( + conv1d_out, delta, A, B, C, D, z, delta_bias, delta_softplus + ) + assert not A_b.is_complex(), "A should not be complex!!" + out_b, scan_intermediates_b, out_z_b = selective_scan_cuda.fwd( + conv1d_out.flip([-1]), delta.flip([-1]), A_b, B.flip([-1]), C.flip([-1]), D, z.flip([-1]), delta_bias, delta_softplus, + ) + + out_z = out_z_f + out_z_b.flip([-1]) + + ctx.delta_softplus = delta_softplus + ctx.out_proj_bias_is_None = out_proj_bias is None + ctx.checkpoint_lvl = checkpoint_lvl + if checkpoint_lvl >= 1: # Will recompute conv1d_out and delta in the backward pass + conv1d_out, delta = None, None + ctx.save_for_backward(xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, + delta_proj_weight, out_proj_weight, conv1d_out, delta, + A, A_b, B, C, D, delta_bias, scan_intermediates_f, scan_intermediates_b, out_f, out_b) + return F.linear(rearrange(out_z, "b d l -> b l d"), out_proj_weight, out_proj_bias) + + @staticmethod + @custom_bwd + def backward(ctx, dout): + # dout: (batch, seqlen, dim) + (xz, conv1d_weight, conv1d_bias, x_dbl, x_proj_weight, delta_proj_weight, out_proj_weight, + conv1d_out, delta, A, A_b, B, C, D, delta_bias, scan_intermediates_f, scan_intermediates_b, out_f, out_b) = ctx.saved_tensors + L = xz.shape[-1] + delta_rank = delta_proj_weight.shape[1] + d_state = A.shape[-1] * (1 if not A.is_complex() else 2) + x, z = xz.chunk(2, dim=1) + if dout.stride(-1) != 1: + dout = dout.contiguous() + if ctx.checkpoint_lvl == 1: + conv1d_out = causal_conv1d_cuda.causal_conv1d_fwd(x, conv1d_weight, conv1d_bias, True) + delta = rearrange(delta_proj_weight @ x_dbl[:, :delta_rank].t(), + "d (b l) -> b d l", l = L) + # The kernel supports passing in a pre-allocated dz (e.g., in case we want to fuse the + # backward of selective_scan_cuda with the backward of chunk). + dxz = torch.empty_like(xz) # (batch, dim, seqlen) + dx, dz = dxz.chunk(2, dim=1) + dout = rearrange(dout, "b l e -> e (b l)") + dout_y = rearrange(out_proj_weight.t() @ dout, "d (b l) -> b d l", l=L) + dconv1d_out, ddelta, dA, dB, dC, dD, ddelta_bias, dz, out_z_f = selective_scan_cuda.bwd( + conv1d_out, delta, A, B, C, D, z, delta_bias, dout_y, scan_intermediates_f, out_f, dz, + ctx.delta_softplus, + True # option to recompute out_z + ) + # flip one + dz_b = torch.empty_like(dz) + dconv1d_out_f_b, ddelta_f_b, dA_b, dB_f_b, dC_f_b, dD_b, ddelta_bias_b, dz_b, out_z_b = selective_scan_cuda.bwd( + conv1d_out.flip([-1]), delta.flip([-1]), A_b, B.flip([-1]), C.flip([-1]), D, z.flip([-1]), delta_bias, dout_y.flip([-1]), scan_intermediates_b, out_b, dz_b, + ctx.delta_softplus, + True # option to recompute out_z + ) + + dconv1d_out = dconv1d_out + dconv1d_out_f_b.flip([-1]) + ddelta = ddelta + ddelta_f_b.flip([-1]) + dB = dB + dB_f_b.flip([-1]) + dC = dC + dC_f_b.flip([-1]) + dD = dD + dD_b + ddelta_bias = ddelta_bias + ddelta_bias_b + dz = dz + dz_b.flip([-1]) + out_z = out_z_f + out_z_b.flip([-1]) + + dout_proj_weight = torch.einsum("eB,dB->ed", dout, rearrange(out_z, "b d l -> d (b l)")) + dout_proj_bias = dout.sum(dim=(0, 1)) if not ctx.out_proj_bias_is_None else None + dD = dD if D is not None else None + dx_dbl = torch.empty_like(x_dbl) + dB_proj_bias = None + if ctx.is_variable_B: + if not A.is_complex(): + dB = rearrange(dB, "b 1 dstate l -> (b l) dstate").contiguous() + else: + dB = rearrange(dB, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous() + dB_proj_bias = dB.sum(0) if not ctx.B_proj_bias_is_None else None + dx_dbl[:, delta_rank:delta_rank + d_state] = dB # (bl d) + dB = None + dC_proj_bias = None + if ctx.is_variable_C: + if not A.is_complex(): + dC = rearrange(dC, "b 1 dstate l -> (b l) dstate").contiguous() + else: + dC = rearrange(dC, "b 1 dstate (l two) -> (b l) (dstate two)", two=2).contiguous() + dC_proj_bias = dC.sum(0) if not ctx.C_proj_bias_is_None else None + dx_dbl[:, -d_state:] = dC # (bl d) + dC = None + ddelta = rearrange(ddelta, "b d l -> d (b l)") + ddelta_proj_weight = torch.einsum("dB,Br->dr", ddelta, x_dbl[:, :delta_rank]) + dx_dbl[:, :delta_rank] = torch.einsum("dB,dr->Br", ddelta, delta_proj_weight) + dconv1d_out = rearrange(dconv1d_out, "b d l -> d (b l)") + dx_proj_weight = torch.einsum("Br,Bd->rd", dx_dbl, rearrange(conv1d_out, "b d l -> (b l) d")) + dconv1d_out = torch.addmm(dconv1d_out, x_proj_weight.t(), dx_dbl.t(), out=dconv1d_out) + dconv1d_out = rearrange(dconv1d_out, "d (b l) -> b d l", b=x.shape[0], l=x.shape[-1]) + # The kernel supports passing in a pre-allocated dx (e.g., in case we want to fuse the + # backward of conv1d with the backward of chunk). + dx, dconv1d_weight, dconv1d_bias = causal_conv1d_cuda.causal_conv1d_bwd( + x, conv1d_weight, conv1d_bias, dconv1d_out, dx, True + ) + dconv1d_bias = dconv1d_bias if conv1d_bias is not None else None + dconv1d_weight = rearrange(dconv1d_weight, "d w -> d 1 w") + return (dxz, dconv1d_weight, dconv1d_bias, dx_proj_weight, ddelta_proj_weight, + dout_proj_weight, dout_proj_bias, + dA, dA_b, dB, dC, dD, + ddelta_bias if delta_bias is not None else None, + dB_proj_bias, dC_proj_bias, None) + + +def mamba_inner_fn( + xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, + C_proj_bias=None, delta_softplus=True +): + return MambaInnerFn.apply(xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, B, C, D, delta_bias, B_proj_bias, C_proj_bias, delta_softplus) + +def bimamba_inner_fn( + xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, A_b, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, + C_proj_bias=None, delta_softplus=True +): + return BiMambaInnerFn.apply(xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, A_b, B, C, D, delta_bias, B_proj_bias, C_proj_bias, delta_softplus) + + +def mamba_inner_fn_no_out_proj( + xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, + C_proj_bias=None, delta_softplus=True +): + return MambaInnerFnNoOutProj.apply(xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + A, B, C, D, delta_bias, B_proj_bias, C_proj_bias, delta_softplus) + + +def mamba_inner_ref( + xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, + C_proj_bias=None, delta_softplus=True +): + L = xz.shape[-1] + delta_rank = delta_proj_weight.shape[1] + d_state = A.shape[-1] * (1 if not A.is_complex() else 2) + x, z = xz.chunk(2, dim=1) + x = causal_conv1d_fn(x, rearrange(conv1d_weight, "d 1 w -> d w"), conv1d_bias, "silu") + # We're being very careful here about the layout, to avoid extra transposes. + # We want delta to have d as the slowest moving dimension + # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects. + x_dbl = F.linear(rearrange(x, 'b d l -> (b l) d'), x_proj_weight) # (bl d) + delta = delta_proj_weight @ x_dbl[:, :delta_rank].t() + delta = rearrange(delta, "d (b l) -> b d l", l=L) + if B is None: # variable B + B = x_dbl[:, delta_rank:delta_rank + d_state] # (bl d) + if B_proj_bias is not None: + B = B + B_proj_bias.to(dtype=B.dtype) + if not A.is_complex(): + B = rearrange(B, "(b l) dstate -> b dstate l", l=L).contiguous() + else: + B = rearrange(B, "(b l) (dstate two) -> b dstate (l two)", l=L, two=2).contiguous() + if C is None: # variable B + C = x_dbl[:, -d_state:] # (bl d) + if C_proj_bias is not None: + C = C + C_proj_bias.to(dtype=C.dtype) + if not A.is_complex(): + C = rearrange(C, "(b l) dstate -> b dstate l", l=L).contiguous() + else: + C = rearrange(C, "(b l) (dstate two) -> b dstate (l two)", l=L, two=2).contiguous() + y = selective_scan_fn(x, delta, A, B, C, D, z=z, delta_bias=delta_bias, delta_softplus=True) + return F.linear(rearrange(y, "b d l -> b l d"), out_proj_weight, out_proj_bias) + + +def bimamba_inner_ref( + xz, conv1d_weight, conv1d_bias, x_proj_weight, delta_proj_weight, + out_proj_weight, out_proj_bias, + A, A_b, B=None, C=None, D=None, delta_bias=None, B_proj_bias=None, + C_proj_bias=None, delta_softplus=True +): + L = xz.shape[-1] + delta_rank = delta_proj_weight.shape[1] + d_state = A.shape[-1] * (1 if not A.is_complex() else 2) + x, z = xz.chunk(2, dim=1) + x = causal_conv1d_fn(x, rearrange(conv1d_weight, "d 1 w -> d w"), conv1d_bias, "silu") + # We're being very careful here about the layout, to avoid extra transposes. + # We want delta to have d as the slowest moving dimension + # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects. + x_dbl = F.linear(rearrange(x, 'b d l -> (b l) d'), x_proj_weight) # (bl d) + delta = delta_proj_weight @ x_dbl[:, :delta_rank].t() + delta = rearrange(delta, "d (b l) -> b d l", l=L) + if B is None: # variable B + B = x_dbl[:, delta_rank:delta_rank + d_state] # (bl d) + if B_proj_bias is not None: + B = B + B_proj_bias.to(dtype=B.dtype) + if not A.is_complex(): + B = rearrange(B, "(b l) dstate -> b dstate l", l=L).contiguous() + else: + B = rearrange(B, "(b l) (dstate two) -> b dstate (l two)", l=L, two=2).contiguous() + if C is None: # variable B + C = x_dbl[:, -d_state:] # (bl d) + if C_proj_bias is not None: + C = C + C_proj_bias.to(dtype=C.dtype) + if not A.is_complex(): + C = rearrange(C, "(b l) dstate -> b dstate l", l=L).contiguous() + else: + C = rearrange(C, "(b l) (dstate two) -> b dstate (l two)", l=L, two=2).contiguous() + y = selective_scan_fn(x, delta, A, B, C, D, z=z, delta_bias=delta_bias, delta_softplus=True) + y_b = selective_scan_fn(x.flip([-1]), delta.flip([-1]), A_b, B.flip([-1]), C.flip([-1]), D, z.flip([-1]), delta_bias, delta_softplus=True) + y = y + y_b.flip([-1]) + return F.linear(rearrange(y, "b d l -> b l d"), out_proj_weight, out_proj_bias) diff --git a/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/triton/__init__.py b/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/triton/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/triton/layernorm.py b/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/triton/layernorm.py new file mode 100644 index 0000000000000000000000000000000000000000..8df9d042a34b6584196f218f5ffeeb104799bd5e --- /dev/null +++ b/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/triton/layernorm.py @@ -0,0 +1,636 @@ +# Copyright (c) 2023, Tri Dao. +# Implement residual + layer_norm / rms_norm. + +# Based on the Triton LayerNorm tutorial: https://triton-lang.org/main/getting-started/tutorials/05-layer-norm.html +# For the backward pass, we keep weight_grad and bias_grad in registers and accumulate. +# This is faster for dimensions up to 8k, but after that it's much slower due to register spilling. +# The models we train have hidden dim up to 8k anyway (e.g. Llama 70B), so this is fine. + +import math + +import torch +import torch.nn.functional as F +from torch.cuda.amp import custom_fwd, custom_bwd + +import triton +import triton.language as tl + + +def layer_norm_ref(x, weight, bias, residual=None, eps=1e-6, prenorm=False, upcast=False): + dtype = x.dtype + if upcast: + weight = weight.float() + bias = bias.float() if bias is not None else None + if upcast: + x = x.float() + residual = residual.float() if residual is not None else residual + if residual is not None: + x = (x + residual).to(x.dtype) + out = F.layer_norm(x.to(weight.dtype), x.shape[-1:], weight=weight, bias=bias, eps=eps).to( + dtype + ) + return out if not prenorm else (out, x) + + +def rms_norm_ref(x, weight, bias, residual=None, eps=1e-6, prenorm=False, upcast=False): + dtype = x.dtype + if upcast: + weight = weight.float() + bias = bias.float() if bias is not None else None + if upcast: + x = x.float() + residual = residual.float() if residual is not None else residual + if residual is not None: + x = (x + residual).to(x.dtype) + rstd = 1 / torch.sqrt((x.square()).mean(dim=-1, keepdim=True) + eps) + out = (x * rstd * weight) + bias if bias is not None else (x * rstd * weight) + out = out.to(dtype) + return out if not prenorm else (out, x) + + +@triton.autotune( + configs=[ + triton.Config({}, num_warps=1), + triton.Config({}, num_warps=2), + triton.Config({}, num_warps=4), + triton.Config({}, num_warps=8), + triton.Config({}, num_warps=16), + triton.Config({}, num_warps=32), + ], + key=["N", "HAS_RESIDUAL", "STORE_RESIDUAL_OUT", "IS_RMS_NORM", "HAS_BIAS"], +) +# @triton.heuristics({"HAS_BIAS": lambda args: args["B"] is not None}) +# @triton.heuristics({"HAS_RESIDUAL": lambda args: args["RESIDUAL"] is not None}) +@triton.jit +def _layer_norm_fwd_1pass_kernel( + X, # pointer to the input + Y, # pointer to the output + W, # pointer to the weights + B, # pointer to the biases + RESIDUAL, # pointer to the residual + RESIDUAL_OUT, # pointer to the residual + Mean, # pointer to the mean + Rstd, # pointer to the 1/std + stride_x_row, # how much to increase the pointer when moving by 1 row + stride_y_row, + stride_res_row, + stride_res_out_row, + N, # number of columns in X + eps, # epsilon to avoid division by zero + IS_RMS_NORM: tl.constexpr, + BLOCK_N: tl.constexpr, + HAS_RESIDUAL: tl.constexpr, + STORE_RESIDUAL_OUT: tl.constexpr, + HAS_BIAS: tl.constexpr, +): + # Map the program id to the row of X and Y it should compute. + row = tl.program_id(0) + X += row * stride_x_row + Y += row * stride_y_row + if HAS_RESIDUAL: + RESIDUAL += row * stride_res_row + if STORE_RESIDUAL_OUT: + RESIDUAL_OUT += row * stride_res_out_row + # Compute mean and variance + cols = tl.arange(0, BLOCK_N) + x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32) + if HAS_RESIDUAL: + residual = tl.load(RESIDUAL + cols, mask=cols < N, other=0.0).to(tl.float32) + x += residual + if STORE_RESIDUAL_OUT: + tl.store(RESIDUAL_OUT + cols, x, mask=cols < N) + if not IS_RMS_NORM: + mean = tl.sum(x, axis=0) / N + tl.store(Mean + row, mean) + xbar = tl.where(cols < N, x - mean, 0.0) + var = tl.sum(xbar * xbar, axis=0) / N + else: + xbar = tl.where(cols < N, x, 0.0) + var = tl.sum(xbar * xbar, axis=0) / N + rstd = 1 / tl.sqrt(var + eps) + tl.store(Rstd + row, rstd) + # Normalize and apply linear transformation + mask = cols < N + w = tl.load(W + cols, mask=mask).to(tl.float32) + if HAS_BIAS: + b = tl.load(B + cols, mask=mask).to(tl.float32) + x_hat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd + y = x_hat * w + b if HAS_BIAS else x_hat * w + # Write output + tl.store(Y + cols, y, mask=mask) + + +def _layer_norm_fwd( + x, weight, bias, eps, residual=None, out_dtype=None, residual_dtype=None, is_rms_norm=False +): + if residual is not None: + residual_dtype = residual.dtype + M, N = x.shape + assert x.stride(-1) == 1 + if residual is not None: + assert residual.stride(-1) == 1 + assert residual.shape == (M, N) + assert weight.shape == (N,) + assert weight.stride(-1) == 1 + if bias is not None: + assert bias.stride(-1) == 1 + assert bias.shape == (N,) + # allocate output + y = torch.empty_like(x, dtype=x.dtype if out_dtype is None else out_dtype) + assert y.stride(-1) == 1 + if residual is not None or (residual_dtype is not None and residual_dtype != x.dtype): + residual_out = torch.empty(M, N, device=x.device, dtype=residual_dtype) + assert residual_out.stride(-1) == 1 + else: + residual_out = None + mean = torch.empty((M,), dtype=torch.float32, device="cuda") if not is_rms_norm else None + rstd = torch.empty((M,), dtype=torch.float32, device="cuda") + # Less than 64KB per feature: enqueue fused kernel + MAX_FUSED_SIZE = 65536 // x.element_size() + BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N)) + if N > BLOCK_N: + raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.") + # heuristics for number of warps + with torch.cuda.device(x.device.index): + _layer_norm_fwd_1pass_kernel[(M,)]( + x, + y, + weight, + bias, + residual, + residual_out, + mean, + rstd, + x.stride(0), + y.stride(0), + residual.stride(0) if residual is not None else 0, + residual_out.stride(0) if residual_out is not None else 0, + N, + eps, + is_rms_norm, + BLOCK_N, + residual is not None, + residual_out is not None, + bias is not None, + ) + # residual_out is None if residual is None and residual_dtype == input_dtype + return y, mean, rstd, residual_out if residual_out is not None else x + + +@triton.autotune( + configs=[ + triton.Config({}, num_warps=1), + triton.Config({}, num_warps=2), + triton.Config({}, num_warps=4), + triton.Config({}, num_warps=8), + triton.Config({}, num_warps=16), + triton.Config({}, num_warps=32), + ], + key=["N", "HAS_DRESIDUAL", "STORE_DRESIDUAL", "IS_RMS_NORM", "HAS_BIAS"], +) +# @triton.heuristics({"HAS_BIAS": lambda args: args["B"] is not None}) +# @triton.heuristics({"HAS_DRESIDUAL": lambda args: args["DRESIDUAL"] is not None}) +# @triton.heuristics({"STORE_DRESIDUAL": lambda args: args["DRESIDUAL_IN"] is not None}) +@triton.heuristics({"RECOMPUTE_OUTPUT": lambda args: args["Y"] is not None}) +@triton.jit +def _layer_norm_bwd_kernel( + X, # pointer to the input + W, # pointer to the weights + B, # pointer to the biases + Y, # pointer to the output to be recomputed + DY, # pointer to the output gradient + DX, # pointer to the input gradient + DW, # pointer to the partial sum of weights gradient + DB, # pointer to the partial sum of biases gradient + DRESIDUAL, + DRESIDUAL_IN, + Mean, # pointer to the mean + Rstd, # pointer to the 1/std + stride_x_row, # how much to increase the pointer when moving by 1 row + stride_y_row, + stride_dy_row, + stride_dx_row, + stride_dres_row, + stride_dres_in_row, + M, # number of rows in X + N, # number of columns in X + eps, # epsilon to avoid division by zero + rows_per_program, + IS_RMS_NORM: tl.constexpr, + BLOCK_N: tl.constexpr, + HAS_DRESIDUAL: tl.constexpr, + STORE_DRESIDUAL: tl.constexpr, + HAS_BIAS: tl.constexpr, + RECOMPUTE_OUTPUT: tl.constexpr, +): + # Map the program id to the elements of X, DX, and DY it should compute. + row_block_id = tl.program_id(0) + row_start = row_block_id * rows_per_program + cols = tl.arange(0, BLOCK_N) + mask = cols < N + X += row_start * stride_x_row + if HAS_DRESIDUAL: + DRESIDUAL += row_start * stride_dres_row + if STORE_DRESIDUAL: + DRESIDUAL_IN += row_start * stride_dres_in_row + DY += row_start * stride_dy_row + DX += row_start * stride_dx_row + if RECOMPUTE_OUTPUT: + Y += row_start * stride_y_row + w = tl.load(W + cols, mask=mask).to(tl.float32) + if RECOMPUTE_OUTPUT and HAS_BIAS: + b = tl.load(B + cols, mask=mask, other=0.0).to(tl.float32) + dw = tl.zeros((BLOCK_N,), dtype=tl.float32) + if HAS_BIAS: + db = tl.zeros((BLOCK_N,), dtype=tl.float32) + row_end = min((row_block_id + 1) * rows_per_program, M) + for row in range(row_start, row_end): + # Load data to SRAM + x = tl.load(X + cols, mask=mask, other=0).to(tl.float32) + dy = tl.load(DY + cols, mask=mask, other=0).to(tl.float32) + if not IS_RMS_NORM: + mean = tl.load(Mean + row) + rstd = tl.load(Rstd + row) + # Compute dx + xhat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd + xhat = tl.where(mask, xhat, 0.0) + if RECOMPUTE_OUTPUT: + y = xhat * w + b if HAS_BIAS else xhat * w + tl.store(Y + cols, y, mask=mask) + wdy = w * dy + dw += dy * xhat + if HAS_BIAS: + db += dy + if not IS_RMS_NORM: + c1 = tl.sum(xhat * wdy, axis=0) / N + c2 = tl.sum(wdy, axis=0) / N + dx = (wdy - (xhat * c1 + c2)) * rstd + else: + c1 = tl.sum(xhat * wdy, axis=0) / N + dx = (wdy - xhat * c1) * rstd + if HAS_DRESIDUAL: + dres = tl.load(DRESIDUAL + cols, mask=mask, other=0).to(tl.float32) + dx += dres + # Write dx + if STORE_DRESIDUAL: + tl.store(DRESIDUAL_IN + cols, dx, mask=mask) + tl.store(DX + cols, dx, mask=mask) + + X += stride_x_row + if HAS_DRESIDUAL: + DRESIDUAL += stride_dres_row + if STORE_DRESIDUAL: + DRESIDUAL_IN += stride_dres_in_row + if RECOMPUTE_OUTPUT: + Y += stride_y_row + DY += stride_dy_row + DX += stride_dx_row + tl.store(DW + row_block_id * N + cols, dw, mask=mask) + if HAS_BIAS: + tl.store(DB + row_block_id * N + cols, db, mask=mask) + + +def _layer_norm_bwd( + dy, + x, + weight, + bias, + eps, + mean, + rstd, + dresidual=None, + has_residual=False, + is_rms_norm=False, + x_dtype=None, + recompute_output=False, +): + M, N = x.shape + assert x.stride(-1) == 1 + assert dy.stride(-1) == 1 + assert dy.shape == (M, N) + if dresidual is not None: + assert dresidual.stride(-1) == 1 + assert dresidual.shape == (M, N) + assert weight.shape == (N,) + assert weight.stride(-1) == 1 + if bias is not None: + assert bias.stride(-1) == 1 + assert bias.shape == (N,) + # allocate output + dx = ( + torch.empty_like(x) + if x_dtype is None + else torch.empty(M, N, dtype=x_dtype, device=x.device) + ) + dresidual_in = torch.empty_like(x) if has_residual and dx.dtype != x.dtype else None + y = torch.empty(M, N, dtype=dy.dtype, device=dy.device) if recompute_output else None + + # Less than 64KB per feature: enqueue fused kernel + MAX_FUSED_SIZE = 65536 // x.element_size() + BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N)) + if N > BLOCK_N: + raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.") + sm_count = torch.cuda.get_device_properties(x.device).multi_processor_count + _dw = torch.empty((sm_count, N), dtype=torch.float32, device=weight.device) + _db = ( + torch.empty((sm_count, N), dtype=torch.float32, device=bias.device) + if bias is not None + else None + ) + rows_per_program = math.ceil(M / sm_count) + grid = (sm_count,) + with torch.cuda.device(x.device.index): + _layer_norm_bwd_kernel[grid]( + x, + weight, + bias, + y, + dy, + dx, + _dw, + _db, + dresidual, + dresidual_in, + mean, + rstd, + x.stride(0), + 0 if not recompute_output else y.stride(0), + dy.stride(0), + dx.stride(0), + dresidual.stride(0) if dresidual is not None else 0, + dresidual_in.stride(0) if dresidual_in is not None else 0, + M, + N, + eps, + rows_per_program, + is_rms_norm, + BLOCK_N, + dresidual is not None, + dresidual_in is not None, + bias is not None, + ) + dw = _dw.sum(0).to(weight.dtype) + db = _db.sum(0).to(bias.dtype) if bias is not None else None + # Don't need to compute dresidual_in separately in this case + if has_residual and dx.dtype == x.dtype: + dresidual_in = dx + return (dx, dw, db, dresidual_in) if not recompute_output else (dx, dw, db, dresidual_in, y) + + +class LayerNormFn(torch.autograd.Function): + @staticmethod + def forward( + ctx, + x, + weight, + bias, + residual=None, + eps=1e-6, + prenorm=False, + residual_in_fp32=False, + is_rms_norm=False, + ): + x_shape_og = x.shape + # reshape input data into 2D tensor + x = x.reshape(-1, x.shape[-1]) + if x.stride(-1) != 1: + x = x.contiguous() + if residual is not None: + assert residual.shape == x_shape_og + residual = residual.reshape(-1, residual.shape[-1]) + if residual.stride(-1) != 1: + residual = residual.contiguous() + weight = weight.contiguous() + if bias is not None: + bias = bias.contiguous() + residual_dtype = ( + residual.dtype + if residual is not None + else (torch.float32 if residual_in_fp32 else None) + ) + y, mean, rstd, residual_out = _layer_norm_fwd( + x, weight, bias, eps, residual, residual_dtype=residual_dtype, is_rms_norm=is_rms_norm + ) + ctx.save_for_backward(residual_out, weight, bias, mean, rstd) + ctx.x_shape_og = x_shape_og + ctx.eps = eps + ctx.is_rms_norm = is_rms_norm + ctx.has_residual = residual is not None + ctx.prenorm = prenorm + ctx.x_dtype = x.dtype + y = y.reshape(x_shape_og) + return y if not prenorm else (y, residual_out.reshape(x_shape_og)) + + @staticmethod + def backward(ctx, dy, *args): + x, weight, bias, mean, rstd = ctx.saved_tensors + dy = dy.reshape(-1, dy.shape[-1]) + if dy.stride(-1) != 1: + dy = dy.contiguous() + assert dy.shape == x.shape + if ctx.prenorm: + dresidual = args[0] + dresidual = dresidual.reshape(-1, dresidual.shape[-1]) + if dresidual.stride(-1) != 1: + dresidual = dresidual.contiguous() + assert dresidual.shape == x.shape + else: + dresidual = None + dx, dw, db, dresidual_in = _layer_norm_bwd( + dy, + x, + weight, + bias, + ctx.eps, + mean, + rstd, + dresidual, + ctx.has_residual, + ctx.is_rms_norm, + x_dtype=ctx.x_dtype, + ) + return ( + dx.reshape(ctx.x_shape_og), + dw, + db, + dresidual_in.reshape(ctx.x_shape_og) if ctx.has_residual else None, + None, + None, + None, + None, + ) + + +def layer_norm_fn( + x, + weight, + bias, + residual=None, + eps=1e-6, + prenorm=False, + residual_in_fp32=False, + is_rms_norm=False, +): + return LayerNormFn.apply(x, weight, bias, residual, eps, prenorm, residual_in_fp32, is_rms_norm) + + +def rms_norm_fn(x, weight, bias, residual=None, prenorm=False, residual_in_fp32=False, eps=1e-6): + return LayerNormFn.apply(x, weight, bias, residual, eps, prenorm, residual_in_fp32, True) + + +class RMSNorm(torch.nn.Module): + def __init__(self, hidden_size, eps=1e-5, device=None, dtype=None): + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.eps = eps + self.weight = torch.nn.Parameter(torch.empty(hidden_size, **factory_kwargs)) + self.register_parameter("bias", None) + self.reset_parameters() + + def reset_parameters(self): + torch.nn.init.ones_(self.weight) + + def forward(self, x, residual=None, prenorm=False, residual_in_fp32=False): + return rms_norm_fn( + x, + self.weight, + self.bias, + residual=residual, + eps=self.eps, + prenorm=prenorm, + residual_in_fp32=residual_in_fp32, + is_rms_norm=True, + ) + + +class LayerNormLinearFn(torch.autograd.Function): + @staticmethod + @custom_fwd + def forward( + ctx, + x, + norm_weight, + norm_bias, + linear_weight, + linear_bias, + residual=None, + eps=1e-6, + prenorm=False, + residual_in_fp32=False, + is_rms_norm=False, + ): + x_shape_og = x.shape + # reshape input data into 2D tensor + x = x.reshape(-1, x.shape[-1]) + if x.stride(-1) != 1: + x = x.contiguous() + if residual is not None: + assert residual.shape == x_shape_og + residual = residual.reshape(-1, residual.shape[-1]) + if residual.stride(-1) != 1: + residual = residual.contiguous() + norm_weight = norm_weight.contiguous() + if norm_bias is not None: + norm_bias = norm_bias.contiguous() + residual_dtype = ( + residual.dtype + if residual is not None + else (torch.float32 if residual_in_fp32 else None) + ) + y, mean, rstd, residual_out = _layer_norm_fwd( + x, + norm_weight, + norm_bias, + eps, + residual, + out_dtype=None if not torch.is_autocast_enabled() else torch.get_autocast_gpu_dtype(), + residual_dtype=residual_dtype, + is_rms_norm=is_rms_norm, + ) + y = y.reshape(x_shape_og) + dtype = torch.get_autocast_gpu_dtype() if torch.is_autocast_enabled() else y.dtype + linear_weight = linear_weight.to(dtype) + linear_bias = linear_bias.to(dtype) if linear_bias is not None else None + out = F.linear(y.to(linear_weight.dtype), linear_weight, linear_bias) + # We don't store y, will be recomputed in the backward pass to save memory + ctx.save_for_backward(residual_out, norm_weight, norm_bias, linear_weight, mean, rstd) + ctx.x_shape_og = x_shape_og + ctx.eps = eps + ctx.is_rms_norm = is_rms_norm + ctx.has_residual = residual is not None + ctx.prenorm = prenorm + ctx.x_dtype = x.dtype + ctx.linear_bias_is_none = linear_bias is None + return out if not prenorm else (out, residual_out.reshape(x_shape_og)) + + @staticmethod + @custom_bwd + def backward(ctx, dout, *args): + x, norm_weight, norm_bias, linear_weight, mean, rstd = ctx.saved_tensors + dout = dout.reshape(-1, dout.shape[-1]) + dy = F.linear(dout, linear_weight.t()) + dlinear_bias = None if ctx.linear_bias_is_none else dout.sum(0) + if dy.stride(-1) != 1: + dy = dy.contiguous() + assert dy.shape == x.shape + if ctx.prenorm: + dresidual = args[0] + dresidual = dresidual.reshape(-1, dresidual.shape[-1]) + if dresidual.stride(-1) != 1: + dresidual = dresidual.contiguous() + assert dresidual.shape == x.shape + else: + dresidual = None + dx, dnorm_weight, dnorm_bias, dresidual_in, y = _layer_norm_bwd( + dy, + x, + norm_weight, + norm_bias, + ctx.eps, + mean, + rstd, + dresidual, + ctx.has_residual, + ctx.is_rms_norm, + x_dtype=ctx.x_dtype, + recompute_output=True, + ) + dlinear_weight = torch.einsum("bo,bi->oi", dout, y) + return ( + dx.reshape(ctx.x_shape_og), + dnorm_weight, + dnorm_bias, + dlinear_weight, + dlinear_bias, + dresidual_in.reshape(ctx.x_shape_og) if ctx.has_residual else None, + None, + None, + None, + None, + ) + + +def layer_norm_linear_fn( + x, + norm_weight, + norm_bias, + linear_weight, + linear_bias, + residual=None, + eps=1e-6, + prenorm=False, + residual_in_fp32=False, + is_rms_norm=False, +): + return LayerNormLinearFn.apply( + x, + norm_weight, + norm_bias, + linear_weight, + linear_bias, + residual, + eps, + prenorm, + residual_in_fp32, + is_rms_norm, + ) diff --git a/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/triton/selective_state_update.py b/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/triton/selective_state_update.py new file mode 100644 index 0000000000000000000000000000000000000000..fa95de73f173292914c5f00fbe9426937d00e502 --- /dev/null +++ b/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/ops/triton/selective_state_update.py @@ -0,0 +1,192 @@ +# Copyright (c) 2023, Tri Dao. + +"""We want triton==2.1.0 for this +""" + +import math +import torch +import torch.nn.functional as F + +import triton +import triton.language as tl + +from einops import rearrange, repeat + + +@triton.heuristics({"HAS_DT_BIAS": lambda args: args["dt_bias_ptr"] is not None}) +@triton.heuristics({"HAS_D": lambda args: args["D_ptr"] is not None}) +@triton.heuristics({"HAS_Z": lambda args: args["z_ptr"] is not None}) +@triton.heuristics({"BLOCK_SIZE_DSTATE": lambda args: triton.next_power_of_2(args["dstate"])}) +@triton.jit +def _selective_scan_update_kernel( + # Pointers to matrices + state_ptr, x_ptr, dt_ptr, dt_bias_ptr, A_ptr, B_ptr, C_ptr, D_ptr, z_ptr, out_ptr, + # Matrix dimensions + batch, dim, dstate, + # Strides + stride_state_batch, stride_state_dim, stride_state_dstate, + stride_x_batch, stride_x_dim, + stride_dt_batch, stride_dt_dim, + stride_dt_bias_dim, + stride_A_dim, stride_A_dstate, + stride_B_batch, stride_B_dstate, + stride_C_batch, stride_C_dstate, + stride_D_dim, + stride_z_batch, stride_z_dim, + stride_out_batch, stride_out_dim, + # Meta-parameters + DT_SOFTPLUS: tl.constexpr, + BLOCK_SIZE_M: tl.constexpr, + HAS_DT_BIAS: tl.constexpr, + HAS_D: tl.constexpr, + HAS_Z: tl.constexpr, + BLOCK_SIZE_DSTATE: tl.constexpr, +): + pid_m = tl.program_id(axis=0) + pid_b = tl.program_id(axis=1) + state_ptr += pid_b * stride_state_batch + x_ptr += pid_b * stride_x_batch + dt_ptr += pid_b * stride_dt_batch + B_ptr += pid_b * stride_B_batch + C_ptr += pid_b * stride_C_batch + if HAS_Z: + z_ptr += pid_b * stride_z_batch + out_ptr += pid_b * stride_out_batch + + offs_m = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) + offs_n = tl.arange(0, BLOCK_SIZE_DSTATE) + state_ptrs = state_ptr + (offs_m[:, None] * stride_state_dim + offs_n[None, :] * stride_state_dstate) + x_ptrs = x_ptr + offs_m * stride_x_dim + dt_ptrs = dt_ptr + offs_m * stride_dt_dim + if HAS_DT_BIAS: + dt_bias_ptrs = dt_bias_ptr + offs_m * stride_dt_bias_dim + A_ptrs = A_ptr + (offs_m[:, None] * stride_A_dim + offs_n[None, :] * stride_A_dstate) + B_ptrs = B_ptr + offs_n * stride_B_dstate + C_ptrs = C_ptr + offs_n * stride_C_dstate + if HAS_D: + D_ptrs = D_ptr + offs_m * stride_D_dim + if HAS_Z: + z_ptrs = z_ptr + offs_m * stride_z_dim + out_ptrs = out_ptr + offs_m * stride_out_dim + + state = tl.load(state_ptrs, mask=(offs_m[:, None] < dim) & (offs_n[None, :] < dstate), other=0.0) + x = tl.load(x_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + dt = tl.load(dt_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + if HAS_DT_BIAS: + dt += tl.load(dt_bias_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + if DT_SOFTPLUS: + dt = tl.log(1.0 + tl.exp(dt)) + A = tl.load(A_ptrs, mask=(offs_m[:, None] < dim) & (offs_n[None, :] < dstate), other=0.0).to(tl.float32) + dA = tl.exp(A * dt[:, None]) + B = tl.load(B_ptrs, mask=offs_n < dstate, other=0.0).to(tl.float32) + C = tl.load(C_ptrs, mask=offs_n < dstate, other=0.0).to(tl.float32) + if HAS_D: + D = tl.load(D_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + if HAS_Z: + z = tl.load(z_ptrs, mask=offs_m < dim, other=0.0).to(tl.float32) + + dB = B[None, :] * dt[:, None] + state = state * dA + dB * x[:, None] + tl.store(state_ptrs, state, mask=(offs_m[:, None] < dim) & (offs_n[None, :] < dstate)) + out = tl.sum(state * C[None, :], axis=1) + if HAS_D: + out += x * D + if HAS_Z: + out *= z * tl.sigmoid(z) + tl.store(out_ptrs, out, mask=offs_m < dim) + + +def selective_state_update(state, x, dt, A, B, C, D=None, z=None, dt_bias=None, dt_softplus=False): + """ + Argument: + state: (batch, dim, dstate) + x: (batch, dim) + dt: (batch, dim) + A: (dim, dstate) + B: (batch, dstate) + C: (batch, dstate) + D: (dim,) + z: (batch, dim) + dt_bias: (dim,) + Return: + out: (batch, dim) + """ + batch, dim, dstate = state.shape + assert x.shape == (batch, dim) + assert dt.shape == x.shape + assert A.shape == (dim, dstate) + assert B.shape == (batch, dstate) + assert C.shape == B.shape + if D is not None: + assert D.shape == (dim,) + if z is not None: + assert z.shape == x.shape + if dt_bias is not None: + assert dt_bias.shape == (dim,) + out = torch.empty_like(x) + grid = lambda META: (triton.cdiv(dim, META['BLOCK_SIZE_M']), batch) + z_strides = ((z.stride(0), z.stride(1)) if z is not None else (0, 0)) + # We don't want autotune since it will overwrite the state + # We instead tune by hand. + BLOCK_SIZE_M, num_warps = ((32, 4) if dstate <= 16 + else ((16, 4) if dstate <= 32 else + ((8, 4) if dstate <= 64 else + ((4, 4) if dstate <= 128 else + ((4, 8)))))) + with torch.cuda.device(x.device.index): + _selective_scan_update_kernel[grid]( + state, x, dt, dt_bias, A, B, C, D, z, out, + batch, dim, dstate, + state.stride(0), state.stride(1), state.stride(2), + x.stride(0), x.stride(1), + dt.stride(0), dt.stride(1), + dt_bias.stride(0) if dt_bias is not None else 0, + A.stride(0), A.stride(1), + B.stride(0), B.stride(1), + C.stride(0), C.stride(1), + D.stride(0) if D is not None else 0, + z_strides[0], z_strides[1], + out.stride(0), out.stride(1), + dt_softplus, + BLOCK_SIZE_M, + num_warps=num_warps, + ) + return out + + +def selective_state_update_ref(state, x, dt, A, B, C, D=None, z=None, dt_bias=None, dt_softplus=False): + """ + Argument: + state: (batch, dim, dstate) + x: (batch, dim) + dt: (batch, dim) + A: (dim, dstate) + B: (batch, dstate) + C: (batch, dstate) + D: (dim,) + z: (batch, dim) + dt_bias: (dim,) + Return: + out: (batch, dim) + """ + batch, dim, dstate = state.shape + assert x.shape == (batch, dim) + assert dt.shape == x.shape + assert A.shape == (dim, dstate) + assert B.shape == (batch, dstate) + assert C.shape == B.shape + if D is not None: + assert D.shape == (dim,) + if z is not None: + assert z.shape == x.shape + if dt_bias is not None: + assert dt_bias.shape == (dim,) + dt = dt + dt_bias + dt = F.softplus(dt) if dt_softplus else dt + dA = torch.exp(rearrange(dt, "b d -> b d 1") * A) # (batch, dim, dstate) + dB = rearrange(dt, "b d -> b d 1") * rearrange(B, "b n -> b 1 n") # (batch, dim, dstate) + state.copy_(state * dA + dB * rearrange(x, "b d -> b d 1")) # (batch, dim, dstate + out = torch.einsum("bdn,bn->bd", state.to(C.dtype), C) + if D is not None: + out += (x * D).to(out.dtype) + return (out if z is None else out * F.silu(z)).to(x.dtype) diff --git a/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/utils/__init__.py b/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/utils/generation.py b/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/utils/generation.py new file mode 100644 index 0000000000000000000000000000000000000000..9d766b29ac28a388a7d77b22aa2cb1eda733c0f4 --- /dev/null +++ b/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/utils/generation.py @@ -0,0 +1,377 @@ +# Copyright (c) 2023, Albert Gu, Tri Dao. +import gc +import time +from collections import namedtuple +from dataclasses import dataclass, field +from functools import partial +from typing import Callable, Optional, Sequence, Union + +import torch +import torch.nn.functional as F +from einops import rearrange, repeat +from torch import Tensor +from torch.profiler import ProfilerActivity, profile, record_function +from transformers.generation import GreedySearchDecoderOnlyOutput, SampleDecoderOnlyOutput + + +@dataclass +class InferenceParams: + """Inference parameters that are passed to the main model in order + to efficienly calculate and store the context during inference.""" + + max_seqlen: int + max_batch_size: int + seqlen_offset: int = 0 + batch_size_offset: int = 0 + key_value_memory_dict: dict = field(default_factory=dict) + lengths_per_sample: Optional[Tensor] = None + + def reset(self, max_seqlen, max_batch_size): + self.max_seqlen = max_seqlen + self.max_batch_size = max_batch_size + self.seqlen_offset = 0 + if self.lengths_per_sample is not None: + self.lengths_per_sample.zero_() + + +# https://github.com/NVIDIA/Megatron-LM/blob/0bb597b42c53355a567aba2a1357cc34b9d99ddd/megatron/text_generation/sampling.py +# https://github.com/huggingface/transformers/blob/a44985b41cfa2de48a5e1de7f1f93b7483da25d1/src/transformers/generation/logits_process.py#L231 +def modify_logits_for_top_k_filtering(logits, top_k): + """Set the logits for none top-k values to -inf. Done in-place.""" + indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None] + logits.masked_fill_(indices_to_remove, float("-Inf")) + + +# https://github.com/NVIDIA/Megatron-LM/blob/0bb597b42c53355a567aba2a1357cc34b9d99ddd/megatron/text_generation/sampling.py +# https://github.com/huggingface/transformers/blob/a44985b41cfa2de48a5e1de7f1f93b7483da25d1/src/transformers/generation/logits_process.py#L170 +def modify_logits_for_top_p_filtering(logits, top_p): + """Set the logits for none top-p values to -inf. Done in-place.""" + if top_p <= 0.0 or top_p >= 1.0: + return + # First sort and calculate cumulative sum of probabilities. + sorted_logits, sorted_indices = torch.sort(logits, descending=False) + cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1) + # Remove tokens with cumulative top_p above the threshold (token with 0 are kept) + sorted_indices_to_remove = cumulative_probs <= (1 - top_p) + # scatter sorted tensors to original indexing + indices_to_remove = sorted_indices_to_remove.scatter( + 1, sorted_indices, sorted_indices_to_remove + ) + logits.masked_fill_(indices_to_remove, float("-inf")) + + +def sample(logits, top_k=1, top_p=0.0, temperature=1.0): + """Sample from top-k logits. + Arguments: + logits: Tensor of shape (batch_size, vocab_size) + """ + if top_k == 1: # Short-circuit for greedy decoding + return logits.argmax(dim=-1) + else: + if top_p > 0.0: + assert top_p <= 1.0, "top-p should be in (0, 1]." + if top_k > 0: + top_k = min(top_k, logits.size(-1)) # Safety check + logits_top, indices = torch.topk(logits, top_k, dim=-1) + if temperature != 1.0: + logits_top /= temperature + modify_logits_for_top_p_filtering(logits_top, top_p) + return indices[ + torch.arange(indices.shape[0], device=indices.device), + torch.multinomial(torch.softmax(logits_top, dim=-1), num_samples=1).squeeze(dim=-1), + ] + else: + # Clone so that when we modify for top_p we don't change the original logits + logits_top = logits / temperature if temperature != 1.0 else logits.clone() + modify_logits_for_top_p_filtering(logits_top, top_p) + return torch.multinomial(torch.softmax(logits_top, dim=-1), num_samples=1).squeeze( + dim=-1 + ) + + +@torch.inference_mode() +def decode( + input_ids, + model, + max_length, + top_k=1, + top_p=0.0, + temperature=1.0, + eos_token_id=None, + teacher_outputs=None, + vocab_size=None, + tensor_parallel=1, + cg=False, + enable_timing=False, +): + """Decoding, either greedy or with top-k or top-p sampling. + If top-k = 0, don't limit the number of candidates (pure sampling). + Top-k and top-p can be used together. If top_k > 0 and top_p > 0, then top-k is applied first, + then top-p. + We assume that all sequences in the same batch have the same length. + + Arguments: + input_ids: (batch, seq_len) + max_length: int + teacher_outputs (optional): (batch, seq_len). If provided, instead of sampling from the + logits, the next token is taken from the teacher_outputs. Useful for testing. + Returns: GreedySearchDecoderOnlyOutput or SampleDecoderOnlyOutput, with the following fields: + sequences: (batch, max_length) + scores: tuples of (batch, vocab_size) + """ + batch_size, seqlen_og = input_ids.shape + teacher_output_len = teacher_outputs.shape[1] if teacher_outputs is not None else 0 + if cg: + if not hasattr(model, "_decoding_cache"): + model._decoding_cache = None + model._decoding_cache = update_graph_cache( + model, + model._decoding_cache, + batch_size, + seqlen_og, + max_length, + tensor_parallel=tensor_parallel, + ) + inference_params = model._decoding_cache.inference_params + inference_params.reset(max_length, batch_size) + else: + inference_params = InferenceParams(max_seqlen=max_length, max_batch_size=batch_size) + + def get_logits(input_ids, inference_params): + decoding = inference_params.seqlen_offset > 0 + if decoding: + position_ids = torch.full( + (batch_size, 1), + inference_params.seqlen_offset, + dtype=torch.long, + device=input_ids.device, + ) + else: + position_ids = None + if not cg or not decoding: + logits = model( + input_ids, + position_ids=position_ids, + inference_params=inference_params, + num_last_tokens=1, + ).logits.squeeze(dim=1) + else: + logits = model._decoding_cache.run( + input_ids, position_ids, inference_params.seqlen_offset + ).squeeze(dim=1) + return logits[..., :vocab_size] if vocab_size is not None else logits + + def sample_tokens(logits, inference_params): + if teacher_outputs is None or teacher_output_len <= inference_params.seqlen_offset: + token = sample(logits, top_k=top_k, top_p=top_p, temperature=temperature) + else: + token = teacher_outputs[:, inference_params.seqlen_offset] + # return rearrange(token, "b -> b 1") + return token.unsqueeze(1) + + def should_stop(current_token, inference_params): + if inference_params.seqlen_offset == 0: + return False + if eos_token_id is not None and (current_token == eos_token_id).all(): + return True + if inference_params.seqlen_offset >= max_length - 1: + return True + return False + + start = torch.cuda.Event(enable_timing=enable_timing) + end = torch.cuda.Event(enable_timing=enable_timing) + + if enable_timing: + if tensor_parallel > 1: + torch.distributed.barrier() + start.record() + scores, sequences = [], [input_ids] + while not should_stop(sequences[-1], inference_params): + scores.append(get_logits(sequences[-1], inference_params)) + inference_params.seqlen_offset += sequences[-1].shape[1] + sequences.append(sample_tokens(scores[-1], inference_params)) + if enable_timing: + end.record() + if tensor_parallel > 1: + torch.distributed.barrier() + torch.cuda.synchronize() + print(f"Prompt processing + decoding time: {(start.elapsed_time(end)):.0f}ms") + output_cls = GreedySearchDecoderOnlyOutput if top_k == 1 else SampleDecoderOnlyOutput + return output_cls(sequences=torch.cat(sequences, dim=1), scores=tuple(scores)) + + +class GenerationMixin: + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): + raise NotImplementedError + + def generate( + self, + input_ids, + max_length, + top_k=1, + top_p=0.0, + temperature=1.0, + return_dict_in_generate=False, + output_scores=False, + **kwargs, + ): + output = decode( + input_ids, self, max_length, top_k=top_k, top_p=top_p, temperature=temperature, **kwargs + ) + if not output_scores: + output.scores = None + return output if return_dict_in_generate else output.sequences + + +def allocate_inference_cache( + max_batch_size, + max_seqlen, + nheads, + headdim, + layers: Union[int, Sequence], + device, + dtype=torch.float16, +): + assert dtype in [torch.float16, torch.bfloat16, torch.float32] + kv_cache_shape = (max_batch_size, max_seqlen, 2, nheads, headdim) + if isinstance(layers, int): + layers = range(layers) + return {i: torch.empty(kv_cache_shape, device=device, dtype=dtype) for i in layers} + + +@dataclass +class DecodingCGCache: + max_batch_size: int = 0 + max_seqlen: int = 0 + device = None + dtype = None + callables: dict = field(default_factory=dict) + mempool = None + inference_params: Optional[InferenceParams] = None + run: Optional[Callable] = None + + +@torch.inference_mode() +def update_graph_cache( + model, + cache, + batch_size, + seqlen_og, + max_seqlen, + decoding_seqlens=(1,), + tensor_parallel=1, + dtype=None, + n_warmups=2, +): + if cache is None: + cache = DecodingCGCache() + param_example = next(iter(model.parameters())) + device = param_example.device + if dtype is None: + dtype = param_example.dtype + if ( + (device, dtype) != (cache.device, cache.dtype) + or batch_size > cache.max_batch_size + or max_seqlen > cache.max_seqlen + ): # Invalidate the cache + cache.callables = {} + cache.mempool = None + cache.inference_params = None + gc.collect() + cache.device, cache.dtype = device, dtype + cache.max_batch_size, cache.max_seqlen = batch_size, max_seqlen + if hasattr(model, "allocate_inference_cache"): + inf_cache = model.allocate_inference_cache(batch_size, max_seqlen, dtype) + else: + headdim = getattr( + model.config, + "head_dim", + model.config.hidden_size // model.config.num_attention_heads, + ) + inf_cache = allocate_inference_cache( + batch_size, + max_seqlen, + model.config.num_attention_heads // tensor_parallel, + headdim, + model.config.num_hidden_layers, + device, + dtype, + ) + lengths_per_sample = torch.full((batch_size,), seqlen_og, dtype=torch.int32, device=device) + cache.inference_params = InferenceParams( + max_seqlen=max_seqlen, + max_batch_size=batch_size, + seqlen_offset=seqlen_og, + key_value_memory_dict=inf_cache, + lengths_per_sample=lengths_per_sample, + ) + cache.mempool = torch.cuda.graphs.graph_pool_handle() + for decoding_seqlen in decoding_seqlens: + if (batch_size, decoding_seqlen) not in cache.callables: + cache.callables[batch_size, decoding_seqlen] = capture_graph( + model, + cache.inference_params, + batch_size, + max_seqlen, + decoding_seqlen=decoding_seqlen, + mempool=cache.mempool, + n_warmups=n_warmups, + ) + + def dispatch(input_ids, position_ids, seqlen): + batch_size, decoding_seqlen = input_ids.shape[:2] + return cache.callables[batch_size, decoding_seqlen](input_ids, position_ids, seqlen) + + cache.run = dispatch + cache.inference_params.seqlen_offset = 0 # Reset so it's not confusing + return cache + + +def capture_graph( + model, inference_params, batch_size, max_seqlen, decoding_seqlen=1, mempool=None, n_warmups=2 +): + device = next(iter(model.parameters())).device + input_ids = torch.full((batch_size, decoding_seqlen), 0, dtype=torch.long, device=device) + position_ids = torch.full((batch_size, decoding_seqlen), 0, dtype=torch.long, device=device) + seqlen_offset_og = inference_params.seqlen_offset + inference_params.seqlen_offset = max_seqlen - decoding_seqlen + inference_params.lengths_per_sample[:] = inference_params.seqlen_offset + + # Warmup before capture + s = torch.cuda.Stream() + s.wait_stream(torch.cuda.current_stream()) + with torch.cuda.stream(s): + for _ in range(n_warmups): + logits = model( + input_ids, + position_ids=position_ids, + inference_params=inference_params, + num_last_tokens=decoding_seqlen, + ).logits + s.synchronize() + # This might be needed for correctness if we run with NCCL_GRAPH_MIXING_SUPPORT=0, + # which requires that graph launch and non-captured launch to not overlap (I think, + # that's how I interpret the documentation). I'm not sure if this is required. + if torch.distributed.is_initialized(): + torch.distributed.barrier() + torch.cuda.current_stream().wait_stream(s) + # Captures the graph + # To allow capture, automatically sets a side stream as the current stream in the context + graph = torch.cuda.CUDAGraph() + with torch.cuda.graph(graph, pool=mempool): + logits = model( + input_ids, + position_ids=position_ids, + inference_params=inference_params, + num_last_tokens=decoding_seqlen, + ).logits + + def run(new_input_ids, new_position_ids, seqlen): + inference_params.lengths_per_sample[:] = seqlen + input_ids.copy_(new_input_ids) + position_ids.copy_(new_position_ids) + graph.replay() + return logits.clone() + + inference_params.seqlen_offset = seqlen_offset_og + return run diff --git a/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/utils/hf.py b/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/utils/hf.py new file mode 100644 index 0000000000000000000000000000000000000000..0d7555acddbd260636d1d14d5bd6324f6af0056a --- /dev/null +++ b/source_code/SegMamba/mamba/build/lib.linux-x86_64-cpython-312/mamba_ssm/utils/hf.py @@ -0,0 +1,23 @@ +import json + +import torch + +from transformers.utils import WEIGHTS_NAME, CONFIG_NAME +from transformers.utils.hub import cached_file + + +def load_config_hf(model_name): + resolved_archive_file = cached_file(model_name, CONFIG_NAME, _raise_exceptions_for_missing_entries=False) + return json.load(open(resolved_archive_file)) + + +def load_state_dict_hf(model_name, device=None, dtype=None): + # If not fp32, then we don't want to load directly to the GPU + mapped_device = "cpu" if dtype not in [torch.float32, None] else device + resolved_archive_file = cached_file(model_name, WEIGHTS_NAME, _raise_exceptions_for_missing_entries=False) + return torch.load(resolved_archive_file, map_location=mapped_device) + # Convert dtype before moving to GPU to save memory + if dtype is not None: + state_dict = {k: v.to(dtype=dtype) for k, v in state_dict.items()} + state_dict = {k: v.to(device=device) for k, v in state_dict.items()} + return state_dict diff --git a/source_code/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/build.ninja b/source_code/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/build.ninja new file mode 100644 index 0000000000000000000000000000000000000000..4ab36d358cc366ff9246782e75c879b6f2b8c77f --- /dev/null +++ b/source_code/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/build.ninja @@ -0,0 +1,46 @@ +ninja_required_version = 1.3 +cxx = c++ +nvcc = /usr/local/cuda/bin/nvcc + +cflags = -pthread -B /root/miniforge/compiler_compat -fno-strict-overflow -Wsign-compare -DNDEBUG -O2 -Wall -fPIC -O2 -isystem /root/miniforge/include -fPIC -O2 -isystem /root/miniforge/include -fPIC -I/root/githubs/SegMamba/mamba/csrc/selective_scan -I/root/miniforge/lib/python3.12/site-packages/torch/include -I/root/miniforge/lib/python3.12/site-packages/torch/include/torch/csrc/api/include -I/usr/local/cuda/include -I/root/miniforge/include/python3.12 -c +post_cflags = -O3 -std=c++17 -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=selective_scan_cuda +cuda_cflags = -I/root/githubs/SegMamba/mamba/csrc/selective_scan -I/root/miniforge/lib/python3.12/site-packages/torch/include -I/root/miniforge/lib/python3.12/site-packages/torch/include/torch/csrc/api/include -I/usr/local/cuda/include -I/root/miniforge/include/python3.12 -c +cuda_post_cflags = -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_BFLOAT16_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --expt-relaxed-constexpr --compiler-options ''"'"'-fPIC'"'"'' -O3 -std=c++17 -U__CUDA_NO_HALF_OPERATORS__ -U__CUDA_NO_HALF_CONVERSIONS__ -U__CUDA_NO_BFLOAT16_OPERATORS__ -U__CUDA_NO_BFLOAT16_CONVERSIONS__ -U__CUDA_NO_BFLOAT162_OPERATORS__ -U__CUDA_NO_BFLOAT162_CONVERSIONS__ --expt-relaxed-constexpr --expt-extended-lambda --use_fast_math --ptxas-options=-v -lineinfo -gencode arch=compute_70,code=sm_70 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_90,code=sm_90 --threads 4 -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=selective_scan_cuda +cuda_dlink_post_cflags = +sycl_dlink_post_cflags = +ldflags = + +rule compile + command = $cxx -MMD -MF $out.d $cflags -c $in -o $out $post_cflags + depfile = $out.d + deps = gcc + +rule cuda_compile + depfile = $out.d + deps = gcc + command = $nvcc --generate-dependencies-with-compile --dependency-output $out.d $cuda_cflags -c $in -o $out $cuda_post_cflags + + + + + + + +build /root/githubs/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan.o: compile /root/githubs/SegMamba/mamba/csrc/selective_scan/selective_scan.cpp +build /root/githubs/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_bf16_complex.o: cuda_compile /root/githubs/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_bf16_complex.cu +build /root/githubs/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_bf16_real.o: cuda_compile /root/githubs/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_bf16_real.cu +build /root/githubs/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp16_complex.o: cuda_compile /root/githubs/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp16_complex.cu +build /root/githubs/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp16_real.o: cuda_compile /root/githubs/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp16_real.cu +build /root/githubs/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp32_complex.o: cuda_compile /root/githubs/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp32_complex.cu +build /root/githubs/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_bwd_fp32_real.o: cuda_compile /root/githubs/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp32_real.cu +build /root/githubs/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_bf16.o: cuda_compile /root/githubs/SegMamba/mamba/csrc/selective_scan/selective_scan_fwd_bf16.cu +build /root/githubs/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_fp16.o: cuda_compile /root/githubs/SegMamba/mamba/csrc/selective_scan/selective_scan_fwd_fp16.cu +build /root/githubs/SegMamba/mamba/build/temp.linux-x86_64-cpython-312/csrc/selective_scan/selective_scan_fwd_fp32.o: cuda_compile /root/githubs/SegMamba/mamba/csrc/selective_scan/selective_scan_fwd_fp32.cu + + + + + + + + diff --git a/source_code/SegMamba/mamba/csrc/selective_scan/selective_scan.cpp b/source_code/SegMamba/mamba/csrc/selective_scan/selective_scan.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f51af402a190dc14247ef8185a7d01b697313f02 --- /dev/null +++ b/source_code/SegMamba/mamba/csrc/selective_scan/selective_scan.cpp @@ -0,0 +1,497 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +#include +#include +#include +#include + +#include "selective_scan.h" + +#define CHECK_SHAPE(x, ...) TORCH_CHECK(x.sizes() == torch::IntArrayRef({__VA_ARGS__}), #x " must have shape (" #__VA_ARGS__ ")") + +#define DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(ITYPE, NAME, ...) \ + if (ITYPE == at::ScalarType::Half) { \ + using input_t = at::Half; \ + __VA_ARGS__(); \ + } else if (ITYPE == at::ScalarType::BFloat16) { \ + using input_t = at::BFloat16; \ + __VA_ARGS__(); \ + } else if (ITYPE == at::ScalarType::Float) { \ + using input_t = float; \ + __VA_ARGS__(); \ + } else { \ + AT_ERROR(#NAME, " not implemented for input type '", toString(ITYPE), "'"); \ + } + +#define DISPATCH_WTYPE_FLOAT_AND_HALF_AND_BF16(WTYPE, NAME, ...) \ + if (WTYPE == at::ScalarType::Half) { \ + using weight_t = at::Half; \ + __VA_ARGS__(); \ + } else if (WTYPE == at::ScalarType::BFloat16) { \ + using weight_t = at::BFloat16; \ + __VA_ARGS__(); \ + } else if (WTYPE == at::ScalarType::Float) { \ + using weight_t = float; \ + __VA_ARGS__(); \ + } else { \ + AT_ERROR(#NAME, " not implemented for weight type '", toString(WTYPE), "'"); \ + } + +#define DISPATCH_WTYPE_FLOAT_AND_COMPLEX(WTYPE, NAME, ...) \ + if (WTYPE == at::ScalarType::Float) { \ + using weight_t = float; \ + __VA_ARGS__(); \ + } else if (WTYPE == at::ScalarType::ComplexFloat) { \ + using weight_t = c10::complex; \ + __VA_ARGS__(); \ + } else { \ + AT_ERROR(#NAME, " not implemented for weight type '", toString(WTYPE), "'"); \ + } + +template +void selective_scan_fwd_cuda(SSMParamsBase ¶ms, cudaStream_t stream); + +template +void selective_scan_bwd_cuda(SSMParamsBwd ¶ms, cudaStream_t stream); + +void set_ssm_params_fwd(SSMParamsBase ¶ms, + // sizes + const size_t batch, + const size_t dim, + const size_t seqlen, + const size_t dstate, + const size_t n_groups, + const size_t n_chunks, + const bool is_variable_B, + const bool is_variable_C, + // device pointers + const at::Tensor u, + const at::Tensor delta, + const at::Tensor A, + const at::Tensor B, + const at::Tensor C, + const at::Tensor out, + const at::Tensor z, + const at::Tensor out_z, + void* D_ptr, + void* delta_bias_ptr, + void* x_ptr, + bool has_z, + bool delta_softplus) { + + // Reset the parameters + memset(¶ms, 0, sizeof(params)); + + params.batch = batch; + params.dim = dim; + params.seqlen = seqlen; + params.dstate = dstate; + params.n_groups = n_groups; + params.n_chunks = n_chunks; + params.dim_ngroups_ratio = dim / n_groups; + + params.delta_softplus = delta_softplus; + + params.is_variable_B = is_variable_B; + params.is_variable_C = is_variable_C; + + // Set the pointers and strides. + params.u_ptr = u.data_ptr(); + params.delta_ptr = delta.data_ptr(); + params.A_ptr = A.data_ptr(); + params.B_ptr = B.data_ptr(); + params.C_ptr = C.data_ptr(); + params.D_ptr = D_ptr; + params.delta_bias_ptr = delta_bias_ptr; + params.out_ptr = out.data_ptr(); + params.x_ptr = x_ptr; + params.z_ptr = has_z ? z.data_ptr() : nullptr; + params.out_z_ptr = has_z ? out_z.data_ptr() : nullptr; + // All stride are in elements, not bytes. + params.A_d_stride = A.stride(0); + params.A_dstate_stride = A.stride(1); + if (!is_variable_B) { + params.B_d_stride = B.stride(0); + } else { + params.B_batch_stride = B.stride(0); + params.B_group_stride = B.stride(1); + } + params.B_dstate_stride = !is_variable_B ? B.stride(1) : B.stride(2); + if (!is_variable_C) { + params.C_d_stride = C.stride(0); + } else { + params.C_batch_stride = C.stride(0); + params.C_group_stride = C.stride(1); + } + params.C_dstate_stride = !is_variable_C ? C.stride(1) : C.stride(2); + params.u_batch_stride = u.stride(0); + params.u_d_stride = u.stride(1); + params.delta_batch_stride = delta.stride(0); + params.delta_d_stride = delta.stride(1); + if (has_z) { + params.z_batch_stride = z.stride(0); + params.z_d_stride = z.stride(1); + params.out_z_batch_stride = out_z.stride(0); + params.out_z_d_stride = out_z.stride(1); + } + params.out_batch_stride = out.stride(0); + params.out_d_stride = out.stride(1); +} + +void set_ssm_params_bwd(SSMParamsBwd ¶ms, + // sizes + const size_t batch, + const size_t dim, + const size_t seqlen, + const size_t dstate, + const size_t n_groups, + const size_t n_chunks, + const bool is_variable_B, + const bool is_variable_C, + // device pointers + const at::Tensor u, + const at::Tensor delta, + const at::Tensor A, + const at::Tensor B, + const at::Tensor C, + const at::Tensor z, + const at::Tensor out, + const at::Tensor out_z, + void* D_ptr, + void* delta_bias_ptr, + void* x_ptr, + const at::Tensor dout, + const at::Tensor du, + const at::Tensor ddelta, + const at::Tensor dA, + const at::Tensor dB, + const at::Tensor dC, + const at::Tensor dz, + void* dD_ptr, + void* ddelta_bias_ptr, + bool has_z, + bool delta_softplus, + bool recompute_out_z) { + // Pass in "dout" instead of "out", we're not gonna use "out" unless we have z + set_ssm_params_fwd(params, batch, dim, seqlen, dstate, n_groups, n_chunks, is_variable_B, is_variable_C, + u, delta, A, B, C, has_z ? out : dout, + has_z ? z : dout, + // If not recompute_out_z, pass dout instead of out_z. + // This won't be used by the bwd kernel + recompute_out_z ? out_z : dout, + D_ptr, delta_bias_ptr, x_ptr, has_z, delta_softplus); + if (!recompute_out_z) { params.out_z_ptr = nullptr; } + + // Set the pointers and strides. + params.dout_ptr = dout.data_ptr(); + params.du_ptr = du.data_ptr(); + params.dA_ptr = dA.data_ptr(); + params.dB_ptr = dB.data_ptr(); + params.dC_ptr = dC.data_ptr(); + params.dD_ptr = dD_ptr; + params.ddelta_ptr = ddelta.data_ptr(); + params.ddelta_bias_ptr = ddelta_bias_ptr; + params.dz_ptr = has_z ? dz.data_ptr() : nullptr; + // All stride are in elements, not bytes. + params.dout_batch_stride = dout.stride(0); + params.dout_d_stride = dout.stride(1); + params.dA_d_stride = dA.stride(0); + params.dA_dstate_stride = dA.stride(1); + if (!is_variable_B) { + params.dB_d_stride = dB.stride(0); + } else { + params.dB_batch_stride = dB.stride(0); + params.dB_group_stride = dB.stride(1); + } + params.dB_dstate_stride = !is_variable_B ? dB.stride(1) : dB.stride(2); + if (!is_variable_C) { + params.dC_d_stride = dC.stride(0); + } else { + params.dC_batch_stride = dC.stride(0); + params.dC_group_stride = dC.stride(1); + } + params.dC_dstate_stride = !is_variable_C ? dC.stride(1) : dC.stride(2); + params.du_batch_stride = du.stride(0); + params.du_d_stride = du.stride(1); + params.ddelta_batch_stride = ddelta.stride(0); + params.ddelta_d_stride = ddelta.stride(1); + if (has_z) { + params.dz_batch_stride = dz.stride(0); + params.dz_d_stride = dz.stride(1); + } +} + +std::vector +selective_scan_fwd(const at::Tensor &u, const at::Tensor &delta, + const at::Tensor &A, const at::Tensor &B, const at::Tensor &C, + const c10::optional &D_, + const c10::optional &z_, + const c10::optional &delta_bias_, + bool delta_softplus) { + auto input_type = u.scalar_type(); + auto weight_type = A.scalar_type(); + TORCH_CHECK(input_type == at::ScalarType::Float || input_type == at::ScalarType::Half || input_type == at::ScalarType::BFloat16); + TORCH_CHECK(weight_type == at::ScalarType::Float || weight_type == at::ScalarType::ComplexFloat); + + const bool is_variable_B = B.dim() >= 3; + const bool is_variable_C = C.dim() >= 3; + const bool is_complex = weight_type == at::ScalarType::ComplexFloat; + + TORCH_CHECK(delta.scalar_type() == input_type); + TORCH_CHECK(B.scalar_type() == (!is_variable_B ? weight_type : input_type)); + TORCH_CHECK(C.scalar_type() == (!is_variable_C ? weight_type : input_type)); + + TORCH_CHECK(u.is_cuda()); + TORCH_CHECK(delta.is_cuda()); + TORCH_CHECK(A.is_cuda()); + TORCH_CHECK(B.is_cuda()); + TORCH_CHECK(C.is_cuda()); + + TORCH_CHECK(u.stride(-1) == 1); + TORCH_CHECK(delta.stride(-1) == 1); + + const auto sizes = u.sizes(); + const int batch_size = sizes[0]; + const int dim = sizes[1]; + const int seqlen = sizes[2]; + const int dstate = A.size(1); + const int n_groups = is_variable_B ? B.size(1) : 1; + + TORCH_CHECK(dstate <= 256, "selective_scan only supports state dimension <= 256"); + + CHECK_SHAPE(u, batch_size, dim, seqlen); + CHECK_SHAPE(delta, batch_size, dim, seqlen); + CHECK_SHAPE(A, dim, dstate); + if (!is_variable_B) { + CHECK_SHAPE(B, dim, dstate); + } else { + CHECK_SHAPE(B, batch_size, n_groups, dstate, !is_complex ? seqlen : seqlen * 2); + TORCH_CHECK(B.stride(-1) == 1); + } + if (!is_variable_C) { + CHECK_SHAPE(C, dim, dstate); + } else { + CHECK_SHAPE(C, batch_size, n_groups, dstate, !is_complex ? seqlen: seqlen * 2); + TORCH_CHECK(C.stride(-1) == 1); + } + + if (D_.has_value()) { + auto D = D_.value(); + TORCH_CHECK(D.scalar_type() == at::ScalarType::Float); + TORCH_CHECK(D.is_cuda()); + TORCH_CHECK(D.stride(-1) == 1); + CHECK_SHAPE(D, dim); + } + + if (delta_bias_.has_value()) { + auto delta_bias = delta_bias_.value(); + TORCH_CHECK(delta_bias.scalar_type() == at::ScalarType::Float); + TORCH_CHECK(delta_bias.is_cuda()); + TORCH_CHECK(delta_bias.stride(-1) == 1); + CHECK_SHAPE(delta_bias, dim); + } + + at::Tensor z, out_z; + const bool has_z = z_.has_value(); + if (has_z) { + z = z_.value(); + TORCH_CHECK(z.scalar_type() == input_type); + TORCH_CHECK(z.is_cuda()); + TORCH_CHECK(z.stride(-1) == 1); + CHECK_SHAPE(z, batch_size, dim, seqlen); + out_z = torch::empty_like(z); + } + + const int n_chunks = (seqlen + 2048 - 1) / 2048; + // const int n_chunks = (seqlen + 1024 - 1) / 1024; + // at::Tensor out = torch::empty_like(u); + // Right now u has BHL layout and delta has HBL layout, and we want out to have HBL layout + at::Tensor out = torch::empty_like(delta); + at::Tensor x; + x = torch::empty({batch_size, dim, n_chunks, dstate * 2}, u.options().dtype(weight_type)); + + SSMParamsBase params; + set_ssm_params_fwd(params, batch_size, dim, seqlen, dstate, n_groups, n_chunks, is_variable_B, is_variable_C, + u, delta, A, B, C, out, z, out_z, + D_.has_value() ? D_.value().data_ptr() : nullptr, + delta_bias_.has_value() ? delta_bias_.value().data_ptr() : nullptr, + x.data_ptr(), + has_z, + delta_softplus); + + // Otherwise the kernel will be launched from cuda:0 device + // Cast to char to avoid compiler warning about narrowing + at::cuda::CUDAGuard device_guard{(char)u.get_device()}; + auto stream = at::cuda::getCurrentCUDAStream().stream(); + DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(u.scalar_type(), "selective_scan_fwd", [&] { + DISPATCH_WTYPE_FLOAT_AND_COMPLEX(A.scalar_type(), "selective_scan_fwd", [&] { + selective_scan_fwd_cuda(params, stream); + }); + }); + std::vector result = {out, x}; + if (has_z) { result.push_back(out_z); } + return result; +} + +std::vector +selective_scan_bwd(const at::Tensor &u, const at::Tensor &delta, + const at::Tensor &A, const at::Tensor &B, const at::Tensor &C, + const c10::optional &D_, + const c10::optional &z_, + const c10::optional &delta_bias_, + const at::Tensor &dout, + const c10::optional &x_, + const c10::optional &out_, + c10::optional &dz_, + bool delta_softplus, + bool recompute_out_z) { + auto input_type = u.scalar_type(); + auto weight_type = A.scalar_type(); + TORCH_CHECK(input_type == at::ScalarType::Float || input_type == at::ScalarType::Half || input_type == at::ScalarType::BFloat16); + TORCH_CHECK(weight_type == at::ScalarType::Float || weight_type == at::ScalarType::ComplexFloat); + + const bool is_variable_B = B.dim() >= 3; + const bool is_variable_C = C.dim() >= 3; + const bool is_complex = weight_type == at::ScalarType::ComplexFloat; + + TORCH_CHECK(delta.scalar_type() == input_type); + TORCH_CHECK(B.scalar_type() == (!is_variable_B ? weight_type : input_type)); + TORCH_CHECK(C.scalar_type() == (!is_variable_C ? weight_type : input_type)); + TORCH_CHECK(dout.scalar_type() == input_type); + + TORCH_CHECK(u.is_cuda()); + TORCH_CHECK(delta.is_cuda()); + TORCH_CHECK(A.is_cuda()); + TORCH_CHECK(B.is_cuda()); + TORCH_CHECK(C.is_cuda()); + TORCH_CHECK(dout.is_cuda()); + + TORCH_CHECK(u.stride(-1) == 1); + TORCH_CHECK(delta.stride(-1) == 1); + TORCH_CHECK(dout.stride(-1) == 1); + + const auto sizes = u.sizes(); + const int batch_size = sizes[0]; + const int dim = sizes[1]; + const int seqlen = sizes[2]; + const int dstate = A.size(1); + const int n_groups = is_variable_B ? B.size(1) : 1; + + TORCH_CHECK(dstate <= 256, "selective_scan only supports state dimension <= 256"); + + CHECK_SHAPE(u, batch_size, dim, seqlen); + CHECK_SHAPE(delta, batch_size, dim, seqlen); + CHECK_SHAPE(A, dim, dstate); + if (!is_variable_B) { + CHECK_SHAPE(B, dim, dstate); + } else { + CHECK_SHAPE(B, batch_size, n_groups, dstate, !is_complex ? seqlen : seqlen * 2); + TORCH_CHECK(B.stride(-1) == 1); + } + if (!is_variable_C) { + CHECK_SHAPE(C, dim, dstate); + } else { + CHECK_SHAPE(C, batch_size, n_groups, dstate, !is_complex ? seqlen: seqlen * 2); + TORCH_CHECK(C.stride(-1) == 1); + } + CHECK_SHAPE(dout, batch_size, dim, seqlen); + + if (D_.has_value()) { + auto D = D_.value(); + TORCH_CHECK(D.scalar_type() == at::ScalarType::Float); + TORCH_CHECK(D.is_cuda()); + TORCH_CHECK(D.stride(-1) == 1); + CHECK_SHAPE(D, dim); + } + + if (delta_bias_.has_value()) { + auto delta_bias = delta_bias_.value(); + TORCH_CHECK(delta_bias.scalar_type() == at::ScalarType::Float); + TORCH_CHECK(delta_bias.is_cuda()); + TORCH_CHECK(delta_bias.stride(-1) == 1); + CHECK_SHAPE(delta_bias, dim); + } + + at::Tensor z, out, dz, out_z; + const bool has_z = z_.has_value(); + if (has_z) { + z = z_.value(); + TORCH_CHECK(z.scalar_type() == input_type); + TORCH_CHECK(z.is_cuda()); + TORCH_CHECK(z.stride(-1) == 1); + CHECK_SHAPE(z, batch_size, dim, seqlen); + + TORCH_CHECK(out_.has_value()); + out = out_.value(); + TORCH_CHECK(out.scalar_type() == input_type); + TORCH_CHECK(out.is_cuda()); + TORCH_CHECK(out.stride(-1) == 1); + CHECK_SHAPE(out, batch_size, dim, seqlen); + + if (dz_.has_value()) { + dz = dz_.value(); + TORCH_CHECK(dz.scalar_type() == input_type); + TORCH_CHECK(dz.is_cuda()); + TORCH_CHECK(dz.stride(-1) == 1); + CHECK_SHAPE(dz, batch_size, dim, seqlen); + } else { + dz = torch::empty_like(z); + } + if (recompute_out_z) { + out_z = torch::empty_like(out); + } + } + + const int n_chunks = (seqlen + 2048 - 1) / 2048; + // const int n_chunks = (seqlen + 1024 - 1) / 1024; + if (n_chunks > 1) { TORCH_CHECK(x_.has_value()); } + if (x_.has_value()) { + auto x = x_.value(); + TORCH_CHECK(x.scalar_type() == weight_type); + TORCH_CHECK(x.is_cuda()); + TORCH_CHECK(x.is_contiguous()); + CHECK_SHAPE(x, batch_size, dim, n_chunks, 2 * dstate); + } + + at::Tensor du = torch::empty_like(u); + at::Tensor ddelta = torch::empty_like(delta); + at::Tensor dA = torch::zeros_like(A); + at::Tensor dB = !is_variable_B ? torch::zeros_like(B) : torch::zeros_like(B, B.options().dtype(torch::kFloat32)); + at::Tensor dC = !is_variable_C ? torch::zeros_like(C) : torch::zeros_like(C, C.options().dtype(torch::kFloat32)); + at::Tensor dD; + if (D_.has_value()) { dD = torch::zeros_like(D_.value()); } + at::Tensor ddelta_bias; + if (delta_bias_.has_value()) { ddelta_bias = torch::zeros_like(delta_bias_.value()); } + + SSMParamsBwd params; + set_ssm_params_bwd(params, batch_size, dim, seqlen, dstate, n_groups, n_chunks, is_variable_B, is_variable_C, + u, delta, A, B, C, z, out, out_z, + D_.has_value() ? D_.value().data_ptr() : nullptr, + delta_bias_.has_value() ? delta_bias_.value().data_ptr() : nullptr, + x_.has_value() ? x_.value().data_ptr() : nullptr, + dout, du, ddelta, dA, dB, dC, dz, + D_.has_value() ? dD.data_ptr() : nullptr, + delta_bias_.has_value() ? ddelta_bias.data_ptr() : nullptr, + has_z, delta_softplus, recompute_out_z); + + // Otherwise the kernel will be launched from cuda:0 device + // Cast to char to avoid compiler warning about narrowing + at::cuda::CUDAGuard device_guard{(char)u.get_device()}; + auto stream = at::cuda::getCurrentCUDAStream().stream(); + DISPATCH_ITYPE_FLOAT_AND_HALF_AND_BF16(u.scalar_type(), "selective_scan_bwd", [&] { + DISPATCH_WTYPE_FLOAT_AND_COMPLEX(A.scalar_type(), "selective_scan_bwd", [&] { + selective_scan_bwd_cuda(params, stream); + }); + }); + std::vector result = {du, ddelta, dA, dB.to(B.dtype()), dC.to(C.dtype()), dD, ddelta_bias}; + if (has_z) { result.push_back(dz); } + if (recompute_out_z) { result.push_back(out_z); } + return result; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("fwd", &selective_scan_fwd, "Selective scan forward"); + m.def("bwd", &selective_scan_bwd, "Selective scan backward"); +} diff --git a/source_code/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_bf16_complex.cu b/source_code/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_bf16_complex.cu new file mode 100644 index 0000000000000000000000000000000000000000..c55f0e858af4ebd246a5d251308ab920b4e01a50 --- /dev/null +++ b/source_code/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_bf16_complex.cu @@ -0,0 +1,9 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +// Split into multiple files to compile in paralell + +#include "selective_scan_bwd_kernel.cuh" + +template void selective_scan_bwd_cuda(SSMParamsBwd ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/source_code/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_bf16_real.cu b/source_code/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_bf16_real.cu new file mode 100644 index 0000000000000000000000000000000000000000..72adaf5cb13c6429e2f345a0a823c6bc3722b95a --- /dev/null +++ b/source_code/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_bf16_real.cu @@ -0,0 +1,9 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +// Split into multiple files to compile in paralell + +#include "selective_scan_bwd_kernel.cuh" + +template void selective_scan_bwd_cuda(SSMParamsBwd ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/source_code/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp16_complex.cu b/source_code/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp16_complex.cu new file mode 100644 index 0000000000000000000000000000000000000000..df126d7c8d5f9f0862273d2fe21ea15b35757b64 --- /dev/null +++ b/source_code/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp16_complex.cu @@ -0,0 +1,9 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +// Split into multiple files to compile in paralell + +#include "selective_scan_bwd_kernel.cuh" + +template void selective_scan_bwd_cuda(SSMParamsBwd ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/source_code/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp16_real.cu b/source_code/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp16_real.cu new file mode 100644 index 0000000000000000000000000000000000000000..3ff271b50eaff208ae33c16c87ab7aaee76dfd76 --- /dev/null +++ b/source_code/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp16_real.cu @@ -0,0 +1,9 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +// Split into multiple files to compile in paralell + +#include "selective_scan_bwd_kernel.cuh" + +template void selective_scan_bwd_cuda(SSMParamsBwd ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/source_code/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp32_complex.cu b/source_code/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp32_complex.cu new file mode 100644 index 0000000000000000000000000000000000000000..5554902342785b289b81c060a71a51734fc1e6bf --- /dev/null +++ b/source_code/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp32_complex.cu @@ -0,0 +1,9 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +// Split into multiple files to compile in paralell + +#include "selective_scan_bwd_kernel.cuh" + +template void selective_scan_bwd_cuda(SSMParamsBwd ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/source_code/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp32_real.cu b/source_code/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp32_real.cu new file mode 100644 index 0000000000000000000000000000000000000000..a7ed642231da80c455c0499702cc8a1cb4536ec2 --- /dev/null +++ b/source_code/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_fp32_real.cu @@ -0,0 +1,9 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +// Split into multiple files to compile in paralell + +#include "selective_scan_bwd_kernel.cuh" + +template void selective_scan_bwd_cuda(SSMParamsBwd ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/source_code/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_kernel.cuh b/source_code/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_kernel.cuh new file mode 100644 index 0000000000000000000000000000000000000000..2ed101148a4b32560111e5a832fc8b5881a4b243 --- /dev/null +++ b/source_code/SegMamba/mamba/csrc/selective_scan/selective_scan_bwd_kernel.cuh @@ -0,0 +1,531 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +#pragma once + +#include +#include +#include // For C10_CUDA_CHECK and C10_CUDA_KERNEL_LAUNCH_CHECK +#include // For atomicAdd on complex + +#include +#include +#include +#include + +#include "selective_scan.h" +#include "selective_scan_common.h" +#include "reverse_scan.cuh" +#include "static_switch.h" + +template __device__ __forceinline__ scalar_t conj(scalar_t x); +template<> __device__ __forceinline__ float conj(float x) { return x; } +template<> __device__ __forceinline__ complex_t conj(complex_t x) { return std::conj(x); } + +template +struct Selective_Scan_bwd_kernel_traits { + static_assert(kNItems_ % 4 == 0); + using input_t = input_t_; + using weight_t = weight_t_; + static constexpr int kNThreads = kNThreads_; + static constexpr int kNItems = kNItems_; + static constexpr int kNBytes = sizeof(input_t); + static_assert(kNBytes == 2 || kNBytes == 4); + static constexpr int kNElts = kNBytes == 4 ? 4 : std::min(8, kNItems); + static_assert(kNItems % kNElts == 0); + static constexpr int kNLoads = kNItems / kNElts; + static constexpr bool kIsComplex = std::is_same_v; + static constexpr bool kIsEvenLen = kIsEvenLen_; + static constexpr bool kIsVariableB = kIsVariableB_; + static constexpr bool kIsVariableC = kIsVariableC_; + static constexpr bool kDeltaSoftplus = kDeltaSoftplus_; + static constexpr bool kHasZ = kHasZ_; + // Setting MinBlocksPerMP to be 3 (instead of 2) for 128 threads with float improves occupancy. + // For complex this would lead to massive register spilling, so we keep it at 2. + static constexpr int kMinBlocks = kNThreads == 128 && !kIsComplex ? 3 : 2; + using vec_t = typename BytesToType::Type; + using scan_t = std::conditional_t; + using BlockLoadT = cub::BlockLoad; + using BlockLoadVecT = cub::BlockLoad; + using BlockLoadWeightT = cub::BlockLoad; + using BlockLoadWeightVecT = cub::BlockLoad; + using BlockStoreT = cub::BlockStore; + using BlockStoreVecT = cub::BlockStore; + // using BlockScanT = cub::BlockScan; + using BlockScanT = cub::BlockScan; + // using BlockScanT = cub::BlockScan; + using BlockReverseScanT = BlockReverseScan; + using BlockReduceT = cub::BlockReduce; + using BlockReduceFloatT = cub::BlockReduce; + using BlockReduceComplexT = cub::BlockReduce; + using BlockExchangeT = cub::BlockExchange; + static constexpr int kSmemIOSize = std::max({sizeof(typename BlockLoadT::TempStorage), + sizeof(typename BlockLoadVecT::TempStorage), + (int(kIsVariableB) + int(kIsVariableC)) * sizeof(typename BlockLoadWeightT::TempStorage), + (int(kIsVariableB) + int(kIsVariableC)) * sizeof(typename BlockLoadWeightVecT::TempStorage), + sizeof(typename BlockStoreT::TempStorage), + sizeof(typename BlockStoreVecT::TempStorage)}); + static constexpr int kSmemExchangeSize = (int(kIsVariableB) + int(kIsVariableC)) * sizeof(typename BlockExchangeT::TempStorage); + static constexpr int kSmemReduceSize = sizeof(typename BlockReduceT::TempStorage); + static constexpr int kSmemSize = kSmemIOSize + kSmemExchangeSize + kSmemReduceSize + sizeof(typename BlockScanT::TempStorage) + sizeof(typename BlockReverseScanT::TempStorage); +}; + +template +__global__ __launch_bounds__(Ktraits::kNThreads, Ktraits::kMinBlocks) +void selective_scan_bwd_kernel(SSMParamsBwd params) { + constexpr bool kIsComplex = Ktraits::kIsComplex; + constexpr bool kIsVariableB = Ktraits::kIsVariableB; + constexpr bool kIsVariableC = Ktraits::kIsVariableC; + constexpr bool kDeltaSoftplus = Ktraits::kDeltaSoftplus; + constexpr bool kHasZ = Ktraits::kHasZ; + constexpr int kNThreads = Ktraits::kNThreads; + constexpr int kNItems = Ktraits::kNItems; + using input_t = typename Ktraits::input_t; + using weight_t = typename Ktraits::weight_t; + using scan_t = typename Ktraits::scan_t; + + // Shared memory. + extern __shared__ char smem_[]; + // cast to lvalue reference of expected type + // char *smem_loadstorescan = smem_ + 2 * MAX_DSTATE * sizeof(weight_t); + // auto& smem_load = reinterpret_cast(smem_ + 2 * MAX_DSTATE * sizeof(weight_t)); + // auto& smem_load = reinterpret_cast(smem_loadstorescan); + auto& smem_load = reinterpret_cast(smem_); + auto& smem_load_weight = reinterpret_cast(smem_); + auto& smem_load_weight1 = *reinterpret_cast(smem_ + sizeof(typename Ktraits::BlockLoadWeightT::TempStorage)); + auto& smem_store = reinterpret_cast(smem_); + auto& smem_exchange = *reinterpret_cast(smem_ + Ktraits::kSmemIOSize); + auto& smem_exchange1 = *reinterpret_cast(smem_ + Ktraits::kSmemIOSize + sizeof(typename Ktraits::BlockExchangeT::TempStorage)); + auto& smem_reduce = *reinterpret_cast(reinterpret_cast(&smem_exchange) + Ktraits::kSmemExchangeSize); + auto& smem_reduce_float = *reinterpret_cast(&smem_reduce); + auto& smem_reduce_complex = *reinterpret_cast(&smem_reduce); + auto& smem_scan = *reinterpret_cast(reinterpret_cast(&smem_reduce) + Ktraits::kSmemReduceSize); + auto& smem_reverse_scan = *reinterpret_cast(reinterpret_cast(&smem_scan) + sizeof(typename Ktraits::BlockScanT::TempStorage)); + weight_t *smem_delta_a = reinterpret_cast(smem_ + Ktraits::kSmemSize); + scan_t *smem_running_postfix = reinterpret_cast(smem_delta_a + 2 * MAX_DSTATE + kNThreads); + weight_t *smem_da = reinterpret_cast(smem_running_postfix + MAX_DSTATE); + weight_t *smem_dbc = reinterpret_cast(smem_da + MAX_DSTATE); + + const int batch_id = blockIdx.x; + const int dim_id = blockIdx.y; + const int group_id = dim_id / (params.dim_ngroups_ratio); + input_t *u = reinterpret_cast(params.u_ptr) + batch_id * params.u_batch_stride + + dim_id * params.u_d_stride; + input_t *delta = reinterpret_cast(params.delta_ptr) + batch_id * params.delta_batch_stride + + dim_id * params.delta_d_stride; + input_t *dout = reinterpret_cast(params.dout_ptr) + batch_id * params.dout_batch_stride + + dim_id * params.dout_d_stride; + weight_t *A = reinterpret_cast(params.A_ptr) + dim_id * params.A_d_stride; + weight_t *B = reinterpret_cast(params.B_ptr) + dim_id * params.B_d_stride; + input_t *Bvar = reinterpret_cast(params.B_ptr) + batch_id * params.B_batch_stride + group_id * params.B_group_stride; + weight_t *C = reinterpret_cast(params.C_ptr) + dim_id * params.C_d_stride; + input_t *Cvar = reinterpret_cast(params.C_ptr) + batch_id * params.C_batch_stride + group_id * params.C_group_stride; + weight_t *dA = reinterpret_cast(params.dA_ptr) + dim_id * params.dA_d_stride; + weight_t *dB = reinterpret_cast(params.dB_ptr) + + (!kIsVariableB ? dim_id * params.dB_d_stride : batch_id * (!kIsComplex ? params.dB_batch_stride : params.dB_batch_stride / 2) + group_id * params.dB_group_stride); + weight_t *dC = reinterpret_cast(params.dC_ptr) + + (!kIsVariableC ? dim_id * params.dC_d_stride : batch_id * (!kIsComplex ? params.dC_batch_stride : params.dC_batch_stride / 2) + group_id * params.dC_group_stride); + float *dD = params.dD_ptr == nullptr ? nullptr : reinterpret_cast(params.dD_ptr) + dim_id; + float D_val = params.D_ptr == nullptr ? 0 : reinterpret_cast(params.D_ptr)[dim_id]; + float *ddelta_bias = params.ddelta_bias_ptr == nullptr ? nullptr : reinterpret_cast(params.ddelta_bias_ptr) + dim_id; + float delta_bias = params.delta_bias_ptr == nullptr ? 0 : reinterpret_cast(params.delta_bias_ptr)[dim_id]; + scan_t *x = params.x_ptr == nullptr + ? nullptr + : reinterpret_cast(params.x_ptr) + (batch_id * params.dim + dim_id) * (params.n_chunks) * params.dstate; + float dD_val = 0; + float ddelta_bias_val = 0; + + constexpr int kChunkSize = kNThreads * kNItems; + u += (params.n_chunks - 1) * kChunkSize; + delta += (params.n_chunks - 1) * kChunkSize; + dout += (params.n_chunks - 1) * kChunkSize; + Bvar += (params.n_chunks - 1) * kChunkSize * (!kIsComplex ? 1 : 2); + Cvar += (params.n_chunks - 1) * kChunkSize * (!kIsComplex ? 1 : 2); + for (int chunk = params.n_chunks - 1; chunk >= 0; --chunk) { + input_t u_vals[kNItems]; + input_t delta_vals_load[kNItems]; + input_t dout_vals_load[kNItems]; + __syncthreads(); + load_input(u, u_vals, smem_load, params.seqlen - chunk * kChunkSize); + u -= kChunkSize; + __syncthreads(); + load_input(delta, delta_vals_load, smem_load, params.seqlen - chunk * kChunkSize); + // Will reload delta at the same location if kDeltaSoftplus + if constexpr (!kDeltaSoftplus) { delta -= kChunkSize; } + __syncthreads(); + load_input(dout, dout_vals_load, smem_load, params.seqlen - chunk * kChunkSize); + dout -= kChunkSize; + + float dout_vals[kNItems], delta_vals[kNItems]; + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + dout_vals[i] = float(dout_vals_load[i]); + delta_vals[i] = float(delta_vals_load[i]) + delta_bias; + if constexpr (kDeltaSoftplus) { + delta_vals[i] = delta_vals[i] <= 20.f ? log1pf(expf(delta_vals[i])) : delta_vals[i]; + } + } + + if constexpr (kHasZ) { + input_t *z = reinterpret_cast(params.z_ptr) + batch_id * params.z_batch_stride + + dim_id * params.z_d_stride + chunk * kChunkSize; + input_t *out = reinterpret_cast(params.out_ptr) + batch_id * params.out_batch_stride + + dim_id * params.out_d_stride + chunk * kChunkSize; + input_t *dz = reinterpret_cast(params.dz_ptr) + batch_id * params.dz_batch_stride + + dim_id * params.dz_d_stride + chunk * kChunkSize; + input_t z_vals[kNItems], out_vals[kNItems]; + __syncthreads(); + load_input(z, z_vals, smem_load, params.seqlen - chunk * kChunkSize); + __syncthreads(); + load_input(out, out_vals, smem_load, params.seqlen - chunk * kChunkSize); + float dz_vals[kNItems], z_silu_vals[kNItems]; + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + float z_val = z_vals[i]; + float z_sigmoid_val = 1.0f / (1.0f + expf(-z_val)); + z_silu_vals[i] = z_val * z_sigmoid_val; + dz_vals[i] = dout_vals[i] * float(out_vals[i]) * z_sigmoid_val + * (1.0f + z_val * (1.0f - z_sigmoid_val)); + dout_vals[i] *= z_silu_vals[i]; + } + __syncthreads(); + store_output(dz, dz_vals, smem_store, params.seqlen - chunk * kChunkSize); + if (params.out_z_ptr != nullptr) { // Recompute and store out_z + float out_z_vals[kNItems]; + #pragma unroll + for (int i = 0; i < kNItems; ++i) { out_z_vals[i] = float(out_vals[i]) * z_silu_vals[i]; } + // if (blockIdx.x == 0 && blockIdx.y == 0 && threadIdx.x == 0) { + // printf("out_val=%f, z_silu_val = %f, out_z_val = %f\n", float(out_vals[0]), z_silu_vals[0], out_z_vals[0]); + // } + input_t *out_z = reinterpret_cast(params.out_z_ptr) + batch_id * params.out_z_batch_stride + + dim_id * params.out_z_d_stride + chunk * kChunkSize; + __syncthreads(); + store_output(out_z, out_z_vals, smem_store, params.seqlen - chunk * kChunkSize); + } + } + + float du_vals[kNItems]; + #pragma unroll + for (int i = 0; i < kNItems; ++i) { du_vals[i] = D_val * dout_vals[i]; } + #pragma unroll + for (int i = 0; i < kNItems; ++i) { dD_val += dout_vals[i] * float(u_vals[i]); } + + float ddelta_vals[kNItems] = {0}; + __syncthreads(); + for (int state_idx = 0; state_idx < params.dstate; ++state_idx) { + const weight_t A_val = A[state_idx * params.A_dstate_stride]; + // Multiply the real part of A with LOG2E so we can use exp2f instead of expf. + weight_t A_scaled; + constexpr float kLog2e = M_LOG2E; + if constexpr (!kIsComplex) { + A_scaled = A_val * kLog2e; + } else { + A_scaled = complex_t(A_val.real_ * kLog2e, A_val.imag_); + } + weight_t B_val, C_val; + weight_t B_vals[kNItems], C_vals[kNItems]; + if constexpr (!kIsVariableB) { + B_val = B[state_idx * params.B_dstate_stride]; + } else { + load_weight(Bvar + state_idx * params.B_dstate_stride, B_vals, + smem_load_weight, (params.seqlen - chunk * kChunkSize) * (!kIsComplex ? 1 : 2)); + } + if constexpr (!kIsVariableC) { + C_val = C[state_idx * params.C_dstate_stride]; + } else { + auto &smem_load_weight_C = !kIsVariableB ? smem_load_weight : smem_load_weight1; + load_weight(Cvar + state_idx * params.C_dstate_stride, C_vals, + smem_load_weight_C, (params.seqlen - chunk * kChunkSize) * (!kIsComplex ? 1 : 2)); + } + // const weight_t A_val = smem_a[state_idx]; + scan_t thread_data[kNItems], thread_reverse_data[kNItems]; + if constexpr (!kIsComplex) { + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + const float delta_a_exp = exp2f(delta_vals[i] * A_scaled); + thread_data[i] = make_float2(delta_a_exp, !kIsVariableB ? delta_vals[i] * float(u_vals[i]) : delta_vals[i] * float(u_vals[i]) * B_vals[i]); + if (i == 0) { + smem_delta_a[threadIdx.x == 0 ? state_idx + (chunk % 2) * MAX_DSTATE : threadIdx.x + 2 * MAX_DSTATE] = delta_a_exp; + } else { + thread_reverse_data[i - 1].x = delta_a_exp; + } + thread_reverse_data[i].y = dout_vals[i] * + (!kIsVariableC + ? (!kIsVariableB ? B_val * C_val : C_val) + : (!kIsVariableB ? B_val * C_vals[i] : C_vals[i])); + } + __syncthreads(); + thread_reverse_data[kNItems - 1].x = threadIdx.x == kNThreads - 1 + ? (chunk == params.n_chunks - 1 ? 1.f : smem_delta_a[state_idx + ((chunk + 1) % 2) * MAX_DSTATE]) + : smem_delta_a[threadIdx.x + 1 + 2 * MAX_DSTATE]; + // Initialize running total + scan_t running_prefix = chunk > 0 && threadIdx.x % 32 == 0 ? x[(chunk - 1) * params.dstate + state_idx] : make_float2(1.f, 0.f); + SSMScanPrefixCallbackOp prefix_op(running_prefix); + Ktraits::BlockScanT(smem_scan).InclusiveScan( + thread_data, thread_data, SSMScanOp(), prefix_op + ); + scan_t running_postfix = chunk < params.n_chunks - 1 && threadIdx.x % 32 == 0 ? smem_running_postfix[state_idx] : make_float2(1.f, 0.f); + SSMScanPrefixCallbackOp postfix_op(running_postfix); + Ktraits::BlockReverseScanT(smem_reverse_scan).InclusiveReverseScan( + thread_reverse_data, thread_reverse_data, SSMScanOp(), postfix_op + ); + if (threadIdx.x == 0) { smem_running_postfix[state_idx] = postfix_op.running_prefix; } + weight_t dA_val = 0, dBC_val = 0; + weight_t dB_vals[kNItems], dC_vals[kNItems]; + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + const float dx = thread_reverse_data[i].y; + const float ddelta_u = !kIsVariableB ? dx : dx * B_vals[i]; + du_vals[i] += ddelta_u * delta_vals[i]; + const float a = thread_data[i].y - (!kIsVariableB ? delta_vals[i] * float(u_vals[i]) : delta_vals[i] * float(u_vals[i]) * B_vals[i]); + ddelta_vals[i] += ddelta_u * float(u_vals[i]) + dx * A_val * a; + dA_val += dx * delta_vals[i] * a; + if constexpr (!kIsVariableB || !kIsVariableC) { + if constexpr (!kIsVariableB) { // dBC_val is dB_val + dBC_val += dout_vals[i] * (!kIsVariableC ? thread_data[i].y : thread_data[i].y * C_vals[i]); + } else { // dBC_val is dC_val + dBC_val += dout_vals[i] * thread_data[i].y; + } + } + if constexpr (kIsVariableB) { dB_vals[i] = dx * delta_vals[i] * float(u_vals[i]); } + if constexpr (kIsVariableC) { + dC_vals[i] = dout_vals[i] * (!kIsVariableB ? thread_data[i].y * B_val : thread_data[i].y); + } + } + // Block-exchange to make the atomicAdd's coalesced, otherwise they're much slower + if constexpr (kIsVariableB || kIsVariableC) { + if constexpr (kIsVariableB) { + Ktraits::BlockExchangeT(smem_exchange).BlockedToStriped(dB_vals, dB_vals); + } + if constexpr (kIsVariableC) { + auto &smem_exchange_C = !kIsVariableB ? smem_exchange : smem_exchange1; + Ktraits::BlockExchangeT(smem_exchange_C).BlockedToStriped(dC_vals, dC_vals); + } + const int seqlen_remaining = params.seqlen - chunk * kChunkSize - threadIdx.x; + weight_t *dB_cur = dB + state_idx * params.dB_dstate_stride + chunk * kChunkSize + threadIdx.x; + weight_t *dC_cur = dC + state_idx * params.dC_dstate_stride + chunk * kChunkSize + threadIdx.x; + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + if (i * kNThreads < seqlen_remaining) { + if constexpr (kIsVariableB) { gpuAtomicAdd(dB_cur + i * kNThreads, dB_vals[i]); } + if constexpr (kIsVariableC) { gpuAtomicAdd(dC_cur + i * kNThreads, dC_vals[i]); } + } + } + } + if constexpr (!kIsVariableB || !kIsVariableC) { + float2 dA_dBC_val = make_float2(dA_val, dBC_val); + dA_dBC_val = Ktraits::BlockReduceT(smem_reduce).Sum(dA_dBC_val); + dA_val = dA_dBC_val.x; + if (threadIdx.x == 0) { + smem_dbc[state_idx] = chunk == params.n_chunks - 1 ? dA_dBC_val.y : dA_dBC_val.y + smem_dbc[state_idx]; + } + } else { + dA_val = Ktraits::BlockReduceFloatT(smem_reduce_float).Sum(dA_val); + } + if (threadIdx.x == 0) { + smem_da[state_idx] = chunk == params.n_chunks - 1 ? dA_val : dA_val + smem_da[state_idx]; + } + } else { + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + // Pytorch's implementation of complex exp (which calls thrust) is very slow + complex_t delta_a_exp = cexp2f(delta_vals[i] * A_scaled); + weight_t B_delta_u_val = !kIsVariableB ? delta_vals[i] * float(u_vals[i]) : B_vals[i] * delta_vals[i] * float(u_vals[i]); + thread_data[i] = make_float4(delta_a_exp.real_, delta_a_exp.imag_, B_delta_u_val.real_, B_delta_u_val.imag_); + if (i == 0) { + smem_delta_a[threadIdx.x == 0 ? state_idx + (chunk % 2) * MAX_DSTATE : threadIdx.x + 2 * MAX_DSTATE] = delta_a_exp; + } else { + thread_reverse_data[i - 1].x = delta_a_exp.real_; + thread_reverse_data[i - 1].y = -delta_a_exp.imag_; + } + complex_t dout_BC = 2 * dout_vals[i] + * conj(!kIsVariableC + ? (!kIsVariableB ? B_val * C_val : C_val) + : (!kIsVariableB ? B_val * C_vals[i] : C_vals[i])); + thread_reverse_data[i].z = dout_BC.real_; + thread_reverse_data[i].w = dout_BC.imag_; + } + __syncthreads(); + complex_t delta_a_exp = threadIdx.x == kNThreads - 1 + ? (chunk == params.n_chunks - 1 ? 1.f : smem_delta_a[state_idx + ((chunk + 1) % 2) * MAX_DSTATE]) + : smem_delta_a[threadIdx.x + 1 + 2 * MAX_DSTATE]; + thread_reverse_data[kNItems - 1].x = delta_a_exp.real_; + thread_reverse_data[kNItems - 1].y = -delta_a_exp.imag_; + // Initialize running total + scan_t running_prefix = chunk > 0 && threadIdx.x % 32 == 0 ? x[(chunk - 1) * params.dstate + state_idx] : make_float4(1.f, 0.f, 0.f, 0.f); + SSMScanPrefixCallbackOp prefix_op(running_prefix); + Ktraits::BlockScanT(smem_scan).InclusiveScan( + thread_data, thread_data, SSMScanOp(), prefix_op + ); + scan_t running_postfix = chunk < params.n_chunks - 1 && threadIdx.x % 32 == 0 ? smem_running_postfix[state_idx] : make_float4(1.f, 0.f, 0.f, 0.f); + SSMScanPrefixCallbackOp postfix_op(running_postfix); + Ktraits::BlockReverseScanT(smem_reverse_scan).InclusiveReverseScan( + thread_reverse_data, thread_reverse_data, SSMScanOp(), postfix_op + ); + if (threadIdx.x == 0) { smem_running_postfix[state_idx] = postfix_op.running_prefix; } + weight_t dA_val = 0, dBC_val = 0; + weight_t dB_vals[kNItems], dC_vals[kNItems]; + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + complex_t x = complex_t(thread_data[i].z, thread_data[i].w); + complex_t dx = complex_t(thread_reverse_data[i].z, thread_reverse_data[i].w); + float ddelta_u = !kIsVariableB ? dx.real_ : (dx * conj(B_vals[i])).real_; + if constexpr (!kIsVariableB || !kIsVariableC) { + if constexpr (!kIsVariableB) { // dBC_val is dB_val + dBC_val += (2 * dout_vals[i]) * conj(!kIsVariableC ? x : x * C_vals[i]); + } else { // dBC_val is dC_val + dBC_val += (2 * dout_vals[i]) * conj(x); + } + } + const complex_t a_conj = conj(x - (!kIsVariableB ? delta_vals[i] * float(u_vals[i]) : delta_vals[i] * float(u_vals[i]) * B_vals[i])); + du_vals[i] += ddelta_u * delta_vals[i]; + ddelta_vals[i] += ddelta_u * float(u_vals[i]) + (dx * conj(A_val) * a_conj).real_; + dA_val += delta_vals[i] * dx * a_conj; + if constexpr (kIsVariableB) { dB_vals[i] = dx * delta_vals[i] * float(u_vals[i]); } + if constexpr (kIsVariableC) { + dC_vals[i] = (2 * dout_vals[i]) * conj(!kIsVariableB ? x * B_val : x); + } + } + // Block-exchange to make the atomicAdd's coalesced, otherwise they're much slower + if constexpr (kIsVariableB || kIsVariableC) { + float dB_vals_f[kNItems * 2], dC_vals_f[kNItems * 2]; + if constexpr (kIsVariableB) { + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + dB_vals_f[i * 2] = dB_vals[i].real_; + dB_vals_f[i * 2 + 1] = dB_vals[i].imag_; + } + Ktraits::BlockExchangeT(smem_exchange).BlockedToStriped(dB_vals_f, dB_vals_f); + } + if constexpr (kIsVariableC) { + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + dC_vals_f[i * 2] = dC_vals[i].real_; + dC_vals_f[i * 2 + 1] = dC_vals[i].imag_; + } + auto &smem_exchange_C = !kIsVariableB ? smem_exchange : smem_exchange1; + Ktraits::BlockExchangeT(smem_exchange_C).BlockedToStriped(dC_vals_f, dC_vals_f); + } + const int seqlen_remaining = (params.seqlen - chunk * kChunkSize) * 2 - threadIdx.x; + float *dB_cur = reinterpret_cast(dB) + state_idx * params.dB_dstate_stride + chunk * kChunkSize * 2 + threadIdx.x; + float *dC_cur = reinterpret_cast(dC) + state_idx * params.dC_dstate_stride + chunk * kChunkSize * 2 + threadIdx.x; + #pragma unroll + for (int i = 0; i < kNItems * 2; ++i) { + if (i * kNThreads < seqlen_remaining) { + if constexpr (kIsVariableB) { gpuAtomicAdd(dB_cur + i * kNThreads, dB_vals_f[i]); } + if constexpr (kIsVariableC) { gpuAtomicAdd(dC_cur + i * kNThreads, dC_vals_f[i]); } + } + } + } + if constexpr (!kIsVariableB || !kIsVariableC) { + float4 dA_dBC_val = make_float4(dA_val.real_, dA_val.imag_, dBC_val.real_, dBC_val.imag_); + dA_dBC_val = Ktraits::BlockReduceT(smem_reduce).Sum(dA_dBC_val); + dA_val = complex_t(dA_dBC_val.x, dA_dBC_val.y); + dBC_val = complex_t(dA_dBC_val.z, dA_dBC_val.w); + if (threadIdx.x == 0) { + smem_dbc[state_idx] = chunk == params.n_chunks - 1 ? dBC_val : dBC_val + smem_dbc[state_idx]; + } + } else { + dA_val = Ktraits::BlockReduceComplexT(smem_reduce_complex).Sum(dA_val); + } + if (threadIdx.x == 0) { + smem_da[state_idx] = chunk == params.n_chunks - 1 ? dA_val : dA_val + smem_da[state_idx]; + } + } + } + + if constexpr (kDeltaSoftplus) { + __syncthreads(); + input_t delta_vals_load[kNItems]; + load_input(delta, delta_vals_load, smem_load, params.seqlen - chunk * kChunkSize); + delta -= kChunkSize; + #pragma unroll + for (int i = 0; i < kNItems; ++i) { + float delta_val = float(delta_vals_load[i]) + delta_bias; + float delta_val_neg_exp = expf(-delta_val); + ddelta_vals[i] = delta_val <= 20.f + ? ddelta_vals[i] / (1.f + delta_val_neg_exp) + : ddelta_vals[i]; + } + } + for (int i = 0; i < kNItems; ++i) { ddelta_bias_val += ddelta_vals[i]; } + + input_t *du = reinterpret_cast(params.du_ptr) + batch_id * params.du_batch_stride + + dim_id * params.du_d_stride + chunk * kChunkSize; + input_t *ddelta = reinterpret_cast(params.ddelta_ptr) + batch_id * params.ddelta_batch_stride + + dim_id * params.ddelta_d_stride + chunk * kChunkSize; + __syncthreads(); + store_output(du, du_vals, smem_store, params.seqlen - chunk * kChunkSize); + __syncthreads(); + store_output(ddelta, ddelta_vals, smem_store, params.seqlen - chunk * kChunkSize); + + Bvar -= kChunkSize * (!kIsComplex ? 1 : 2); + Cvar -= kChunkSize * (!kIsComplex ? 1 : 2); + } + if (params.dD_ptr != nullptr) { + dD_val = Ktraits::BlockReduceFloatT(smem_reduce_float).Sum(dD_val); + if (threadIdx.x == 0) { gpuAtomicAdd(dD, dD_val); } + } + if (params.ddelta_bias_ptr != nullptr) { + __syncthreads(); + ddelta_bias_val = Ktraits::BlockReduceFloatT(smem_reduce_float).Sum(ddelta_bias_val); + if (threadIdx.x == 0) { gpuAtomicAdd(ddelta_bias, ddelta_bias_val); } + } + for (int state_idx = threadIdx.x; state_idx < params.dstate; state_idx += blockDim.x) { + gpuAtomicAdd(&(dA[state_idx * params.dA_dstate_stride]), smem_da[state_idx]); + weight_t dBC_val; + if (!kIsVariableB || !kIsVariableC) { dBC_val = smem_dbc[state_idx]; } + if constexpr (!kIsVariableB) { + gpuAtomicAdd(&(dB[state_idx * params.dB_dstate_stride]), + !kIsVariableC ? dBC_val * conj(C[state_idx * params.C_dstate_stride]) : dBC_val); + } + if constexpr (!kIsVariableC) { + gpuAtomicAdd(&(dC[state_idx * params.dC_dstate_stride]), + !kIsVariableB ? dBC_val * conj(B[state_idx * params.B_dstate_stride]) : dBC_val); + } + } +} + +template +void selective_scan_bwd_launch(SSMParamsBwd ¶ms, cudaStream_t stream) { + BOOL_SWITCH(params.seqlen % (kNThreads * kNItems) == 0, kIsEvenLen, [&] { + BOOL_SWITCH(params.is_variable_B, kIsVariableB, [&] { + BOOL_SWITCH(params.is_variable_C, kIsVariableC, [&] { + BOOL_SWITCH(params.delta_softplus, kDeltaSoftplus, [&] { + BOOL_SWITCH(params.z_ptr != nullptr , kHasZ, [&] { + using Ktraits = Selective_Scan_bwd_kernel_traits; + // using Ktraits = Selective_Scan_bwd_kernel_traits; + // TODO: check this + constexpr int kSmemSize = Ktraits::kSmemSize + MAX_DSTATE * sizeof(typename Ktraits::scan_t) + (kNThreads + 4 * MAX_DSTATE) * sizeof(typename Ktraits::weight_t); + // printf("smem_size = %d\n", kSmemSize); + dim3 grid(params.batch, params.dim); + auto kernel = &selective_scan_bwd_kernel; + if (kSmemSize >= 48 * 1024) { + C10_CUDA_CHECK(cudaFuncSetAttribute( + kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, kSmemSize)); + } + kernel<<>>(params); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); + }); + }); + }); + }); +} + +template +void selective_scan_bwd_cuda(SSMParamsBwd ¶ms, cudaStream_t stream) { + if (params.seqlen <= 128) { + selective_scan_bwd_launch<32, 4, input_t, weight_t>(params, stream); + } else if (params.seqlen <= 256) { + selective_scan_bwd_launch<32, 8, input_t, weight_t>(params, stream); + } else if (params.seqlen <= 512) { + selective_scan_bwd_launch<32, 16, input_t, weight_t>(params, stream); + } else if (params.seqlen <= 1024) { + selective_scan_bwd_launch<64, 16, input_t, weight_t>(params, stream); + } else { + selective_scan_bwd_launch<128, 16, input_t, weight_t>(params, stream); + } +} \ No newline at end of file diff --git a/source_code/SegMamba/mamba/csrc/selective_scan/selective_scan_fwd_bf16.cu b/source_code/SegMamba/mamba/csrc/selective_scan/selective_scan_fwd_bf16.cu new file mode 100644 index 0000000000000000000000000000000000000000..2b8615b1d522c119125d4cb6ff3dce42f2bd4659 --- /dev/null +++ b/source_code/SegMamba/mamba/csrc/selective_scan/selective_scan_fwd_bf16.cu @@ -0,0 +1,10 @@ +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +// Split into multiple files to compile in paralell + +#include "selective_scan_fwd_kernel.cuh" + +template void selective_scan_fwd_cuda(SSMParamsBase ¶ms, cudaStream_t stream); +template void selective_scan_fwd_cuda(SSMParamsBase ¶ms, cudaStream_t stream); \ No newline at end of file diff --git a/source_code/SegMamba/mamba/evals/lm_harness_eval.py b/source_code/SegMamba/mamba/evals/lm_harness_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..d09d40534cf53be4d1387666697c82aa53add625 --- /dev/null +++ b/source_code/SegMamba/mamba/evals/lm_harness_eval.py @@ -0,0 +1,39 @@ +import torch + +import transformers +from transformers import AutoTokenizer + +from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel + +from lm_eval.api.model import LM +from lm_eval.models.huggingface import HFLM +from lm_eval.api.registry import register_model +from lm_eval.__main__ import cli_evaluate + + +@register_model("mamba") +class MambaEvalWrapper(HFLM): + + AUTO_MODEL_CLASS = transformers.AutoModelForCausalLM + + def __init__(self, pretrained="state-spaces/mamba-2.8b", max_length=2048, batch_size=None, device="cuda", + dtype=torch.float16): + LM.__init__(self) + self._model = MambaLMHeadModel.from_pretrained(pretrained, device=device, dtype=dtype) + self.tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b") + self.tokenizer.pad_token_id = self.tokenizer.eos_token_id + self.vocab_size = self.tokenizer.vocab_size + self._batch_size = batch_size if batch_size is None else 64 + self._max_length = max_length + self._device = torch.device(device) + + @property + def batch_size(self): + return self._batch_size + + def _model_generate(self, context, max_length, stop, **generation_kwargs): + raise NotImplementedError() + + +if __name__ == "__main__": + cli_evaluate() diff --git a/source_code/SegMamba/mamba/mamba_ssm.egg-info/PKG-INFO b/source_code/SegMamba/mamba/mamba_ssm.egg-info/PKG-INFO new file mode 100644 index 0000000000000000000000000000000000000000..ed8c6e96be3e40e08d2015c80de632e3000fd561 --- /dev/null +++ b/source_code/SegMamba/mamba/mamba_ssm.egg-info/PKG-INFO @@ -0,0 +1,181 @@ +Metadata-Version: 2.4 +Name: mamba_ssm +Version: 1.0.1 +Summary: Mamba state-space model +Home-page: https://github.com/state-spaces/mamba +Author: Tri Dao, Albert Gu +Author-email: tri@tridao.me, agu@cs.cmu.edu +Classifier: Programming Language :: Python :: 3 +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: Unix +Requires-Python: >=3.7 +Description-Content-Type: text/markdown +License-File: LICENSE +License-File: AUTHORS +Requires-Dist: torch +Requires-Dist: packaging +Requires-Dist: ninja +Requires-Dist: einops +Requires-Dist: triton +Requires-Dist: transformers +Requires-Dist: causal_conv1d +Dynamic: author +Dynamic: author-email +Dynamic: classifier +Dynamic: description +Dynamic: description-content-type +Dynamic: home-page +Dynamic: license-file +Dynamic: requires-dist +Dynamic: requires-python +Dynamic: summary + +# Mamba + +![Mamba](assets/selection.png "Selective State Space") +> **Mamba: Linear-Time Sequence Modeling with Selective State Spaces**\ +> Albert Gu*, Tri Dao*\ +> Paper: https://arxiv.org/abs/2312.00752 + +## About + +Mamba is a new state space model architecture showing promising performance on information-dense data such as language modeling, where previous subquadratic models fall short of Transformers. +It is based on the line of progress on [structured state space models](https://github.com/state-spaces/s4), +with an efficient hardware-aware design and implementation in the spirit of [FlashAttention](https://github.com/Dao-AILab/flash-attention). + +## Installation + +- `pip install causal-conv1d`: an efficient implementation of a simple causal Conv1d layer used inside the Mamba block. +- `pip install mamba-ssm`: the core Mamba package. + +It can also be built from source with `pip install .` from this repository. + +If `pip` complains about PyTorch versions, try passing `--no-build-isolation` to `pip`. + +Other requirements: +- Linux +- NVIDIA GPU +- PyTorch 1.12+ +- CUDA 11.6+ + +## Usage + +We expose several levels of interface with the Mamba model. + +### Selective SSM + +Mamba is based on a selective SSM layer, which is the focus of the paper (Section 3; Algorithm 2). + +Source: [ops/selective_scan_interface.py](mamba_ssm/ops/selective_scan_interface.py). + +### Mamba Block + +The main module of this repository is the Mamba architecture block wrapping the selective SSM. + +Source: [modules/mamba_simple.py](mamba_ssm/modules/mamba_simple.py). + +Usage: +``` +from mamba_ssm import Mamba + +batch, length, dim = 2, 64, 16 +x = torch.randn(batch, length, dim).to("cuda") +model = Mamba( + # This module uses roughly 3 * expand * d_model^2 parameters + d_model=dim, # Model dimension d_model + d_state=16, # SSM state expansion factor + d_conv=4, # Local convolution width + expand=2, # Block expansion factor +).to("cuda") +y = model(x) +assert y.shape == x.shape +``` + +### Mamba Language Model + +Finally, we provide an example of a complete language model: a deep sequence model backbone (with repeating Mamba blocks) + language model head. + +Source: [models/mixer_seq_simple.py](mamba_ssm/models/mixer_seq_simple.py). + +This is an example of how to integrate Mamba into an end-to-end neural network. +This example is used in the generation scripts below. + + + +## Pretrained Models + +Pretrained models are uploaded to +[HuggingFace](https://huggingface.co/state-spaces): `mamba-130m`, `mamba-370m`, +`mamba-790m`, `mamba-1.4b`, `mamba-2.8b`. + +The models will be autodownloaded by the generation script below. + +These models were trained on the [Pile](https://huggingface.co/datasets/EleutherAI/pile), and follow the standard model dimensions described by GPT-3 and followed by many open source models: + +| Parameters | Layers | Model dim. | +|------------|--------|------------| +| 130M | 12 | 768 | +| 370M | 24 | 1024 | +| 790M | 24 | 1536 | +| 1.4B | 24 | 2048 | +| 2.8B | 32 | 2560 | + +(The layer count of Mamba should be doubled, as two Mamba blocks are needed for each "layer" (MHA block + MLP block) of a Transformer.) + +Note: these are base models trained only for 300B tokens, without any form of downstream modification (instruction tuning, etc.). +Performance is expected to be comparable or better than other architectures trained on similar data, but not to match larger or fine-tuned models. + + +## Evaluations + +To run zero-shot evaluations of models (corresponding to Table 3 of the paper), +we use the +[lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness/tree/big-refactor) +library. + +1. Pull the `lm-evaluation-harness` repo by `git submodule update --init + --recursive`. We use the `big-refactor` branch. +2. Install `lm-evaluation-harness`: `pip install -e 3rdparty/lm-evaluation-harness` +3. Run evaluation with (more documentation at the [lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness/tree/big-refactor) repo): +``` +python evals/lm_harness_eval.py --model mamba --model_args pretrained=state-spaces/mamba-130m --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande --device cuda --batch_size 64 +python evals/lm_harness_eval.py --model hf --model_args pretrained=EleutherAI/pythia-160m --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande --device cuda --batch_size 64 +``` + +Note that the result of each task might differ from reported values by 0.1-0.3 due to noise in the evaluation process. + +## Inference + +The script [benchmarks/benchmark_generation_mamba_simple.py](benchmarks/benchmark_generation_mamba_simple.py) +1. autoloads a model from the HuggingFace Hub, +2. generates completions of a user-specified prompt, +3. benchmarks the inference speed of this generation. + +Other configurable options include the top-p (nucleus sampling) probability, and the softmax temperature. + +### Examples + +To test generation latency (e.g. batch size = 1) with different sampling strategies: + +``` +python benchmarks/benchmark_generation_mamba_simple.py --model-name "state-spaces/mamba-2.8b" --prompt "My cat wrote all this CUDA code for a new language model and" --topp 0.9 --temperature 0.5 +python benchmarks/benchmark_generation_mamba_simple.py --model-name "EleutherAI/pythia-2.8b" --prompt "My cat wrote all this CUDA code for a new language model and" --topp 0.9 --temperature 0.5 +``` + +To test generation throughput with random prompts (e.g. large batch size): +``` +python benchmarks/benchmark_generation_mamba_simple.py --model-name "state-spaces/mamba-2.8b" --batch 128 +python benchmarks/benchmark_generation_mamba_simple.py --model-name "EleutherAI/pythia-2.8b" --batch 128 +``` + +## Citation + +If you use this codebase, or otherwise found our work valuable, please cite Mamba: +``` +@article{mamba, + title={Mamba: Linear-Time Sequence Modeling with Selective State Spaces}, + author={Gu, Albert and Dao, Tri}, + journal={arXiv preprint arXiv:2312.00752}, + year={2023} +} +``` diff --git a/source_code/SegMamba/mamba/mamba_ssm.egg-info/SOURCES.txt b/source_code/SegMamba/mamba/mamba_ssm.egg-info/SOURCES.txt new file mode 100644 index 0000000000000000000000000000000000000000..52536c42f43c8e90c131d567ceee396c09076a2e --- /dev/null +++ b/source_code/SegMamba/mamba/mamba_ssm.egg-info/SOURCES.txt @@ -0,0 +1,32 @@ +AUTHORS +LICENSE +README.md +setup.py +csrc/selective_scan/selective_scan.cpp +csrc/selective_scan/selective_scan_bwd_bf16_complex.cu +csrc/selective_scan/selective_scan_bwd_bf16_real.cu +csrc/selective_scan/selective_scan_bwd_fp16_complex.cu +csrc/selective_scan/selective_scan_bwd_fp16_real.cu +csrc/selective_scan/selective_scan_bwd_fp32_complex.cu +csrc/selective_scan/selective_scan_bwd_fp32_real.cu +csrc/selective_scan/selective_scan_fwd_bf16.cu +csrc/selective_scan/selective_scan_fwd_fp16.cu +csrc/selective_scan/selective_scan_fwd_fp32.cu +mamba_ssm/__init__.py +mamba_ssm.egg-info/PKG-INFO +mamba_ssm.egg-info/SOURCES.txt +mamba_ssm.egg-info/dependency_links.txt +mamba_ssm.egg-info/requires.txt +mamba_ssm.egg-info/top_level.txt +mamba_ssm/models/__init__.py +mamba_ssm/models/mixer_seq_simple.py +mamba_ssm/modules/__init__.py +mamba_ssm/modules/mamba_simple.py +mamba_ssm/ops/__init__.py +mamba_ssm/ops/selective_scan_interface.py +mamba_ssm/ops/triton/__init__.py +mamba_ssm/ops/triton/layernorm.py +mamba_ssm/ops/triton/selective_state_update.py +mamba_ssm/utils/__init__.py +mamba_ssm/utils/generation.py +mamba_ssm/utils/hf.py \ No newline at end of file diff --git a/source_code/SegMamba/mamba/mamba_ssm.egg-info/dependency_links.txt b/source_code/SegMamba/mamba/mamba_ssm.egg-info/dependency_links.txt new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/source_code/SegMamba/mamba/mamba_ssm.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/source_code/SegMamba/mamba/mamba_ssm.egg-info/requires.txt b/source_code/SegMamba/mamba/mamba_ssm.egg-info/requires.txt new file mode 100644 index 0000000000000000000000000000000000000000..8bf31e81ea760588be6fc8a4c24a43a599d29eab --- /dev/null +++ b/source_code/SegMamba/mamba/mamba_ssm.egg-info/requires.txt @@ -0,0 +1,7 @@ +torch +packaging +ninja +einops +triton +transformers +causal_conv1d diff --git a/source_code/SegMamba/mamba/mamba_ssm.egg-info/top_level.txt b/source_code/SegMamba/mamba/mamba_ssm.egg-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..def7a96b278675080fd2ada3941fd1499491296f --- /dev/null +++ b/source_code/SegMamba/mamba/mamba_ssm.egg-info/top_level.txt @@ -0,0 +1,2 @@ +mamba_ssm +selective_scan_cuda diff --git a/source_code/SegMamba/mamba/mamba_ssm/.DS_Store b/source_code/SegMamba/mamba/mamba_ssm/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..029c7125f94cd01f8ed4fcedbb1636be3bb28de4 Binary files /dev/null and b/source_code/SegMamba/mamba/mamba_ssm/.DS_Store differ diff --git a/source_code/SegMamba/mamba/mamba_ssm/__init__.py b/source_code/SegMamba/mamba/mamba_ssm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2ecd144db5dbec72bcfcdcea28c624a7e2bf053b --- /dev/null +++ b/source_code/SegMamba/mamba/mamba_ssm/__init__.py @@ -0,0 +1,5 @@ +__version__ = "1.0.1" + +from mamba_ssm.ops.selective_scan_interface import selective_scan_fn, mamba_inner_fn, bimamba_inner_fn +from mamba_ssm.modules.mamba_simple import Mamba +from mamba_ssm.models.mixer_seq_simple import MambaLMHeadModel diff --git a/source_code/SegMamba/mamba/mamba_ssm/models/__init__.py b/source_code/SegMamba/mamba/mamba_ssm/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/source_code/SegMamba/mamba/mamba_ssm/modules/__init__.py b/source_code/SegMamba/mamba/mamba_ssm/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/source_code/SegMamba/mamba/mamba_ssm/modules/mamba_simple.py b/source_code/SegMamba/mamba/mamba_ssm/modules/mamba_simple.py new file mode 100644 index 0000000000000000000000000000000000000000..8ffc53d24110bc39651d086f7f3969cf5069f196 --- /dev/null +++ b/source_code/SegMamba/mamba/mamba_ssm/modules/mamba_simple.py @@ -0,0 +1,501 @@ +# Copyright (c) 2023, Tri Dao, Albert Gu. + +import math +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor + +from einops import rearrange, repeat + +try: + from causal_conv1d import causal_conv1d_fn, causal_conv1d_update +except ImportError: + causal_conv1d_fn, causal_conv1d_update = None + +try: + from mamba_ssm.ops.selective_scan_interface import selective_scan_fn, mamba_inner_fn, bimamba_inner_fn, mamba_inner_fn_no_out_proj +except ImportError: + selective_scan_fn, mamba_inner_fn, bimamba_inner_fn, mamba_inner_fn_no_out_proj = None, None, None, None, None + +try: + from mamba_ssm.ops.triton.selective_state_update import selective_state_update +except ImportError: + selective_state_update = None + +try: + from mamba_ssm.ops.triton.layernorm import RMSNorm, layer_norm_fn, rms_norm_fn +except ImportError: + RMSNorm, layer_norm_fn, rms_norm_fn = None, None, None + + +class Mamba(nn.Module): + def __init__( + self, + d_model, + d_state=16, + d_conv=4, + expand=2, + dt_rank="auto", + dt_min=0.001, + dt_max=0.1, + dt_init="random", + dt_scale=1.0, + dt_init_floor=1e-4, + conv_bias=True, + bias=False, + use_fast_path=True, # Fused kernel options + layer_idx=None, + device=None, + dtype=None, + bimamba_type="none", + nslices=5 + ): + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.d_model = d_model + self.d_state = d_state + self.d_conv = d_conv + self.expand = expand + self.d_inner = int(self.expand * self.d_model) + self.dt_rank = math.ceil(self.d_model / 16) if dt_rank == "auto" else dt_rank + self.use_fast_path = use_fast_path + self.layer_idx = layer_idx + self.bimamba_type = bimamba_type + self.nslices = nslices + + self.in_proj = nn.Linear(self.d_model, self.d_inner * 2, bias=bias, **factory_kwargs) + + self.conv1d = nn.Conv1d( + in_channels=self.d_inner, + out_channels=self.d_inner, + bias=conv_bias, + kernel_size=d_conv, + groups=self.d_inner, + padding=d_conv - 1, + **factory_kwargs, + ) + + self.activation = "silu" + self.act = nn.SiLU() + + self.x_proj = nn.Linear( + self.d_inner, self.dt_rank + self.d_state * 2, bias=False, **factory_kwargs + ) + self.dt_proj = nn.Linear(self.dt_rank, self.d_inner, bias=True, **factory_kwargs) + + # Initialize special dt projection to preserve variance at initialization + dt_init_std = self.dt_rank**-0.5 * dt_scale + if dt_init == "constant": + nn.init.constant_(self.dt_proj.weight, dt_init_std) + elif dt_init == "random": + nn.init.uniform_(self.dt_proj.weight, -dt_init_std, dt_init_std) + else: + raise NotImplementedError + + # Initialize dt bias so that F.softplus(dt_bias) is between dt_min and dt_max + dt = torch.exp( + torch.rand(self.d_inner, **factory_kwargs) * (math.log(dt_max) - math.log(dt_min)) + + math.log(dt_min) + ).clamp(min=dt_init_floor) + # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759 + inv_dt = dt + torch.log(-torch.expm1(-dt)) + with torch.no_grad(): + self.dt_proj.bias.copy_(inv_dt) + # Our initialization would set all Linear.bias to zero, need to mark this one as _no_reinit + self.dt_proj.bias._no_reinit = True + + # S4D real initialization + A = repeat( + torch.arange(1, self.d_state + 1, dtype=torch.float32, device=device), + "n -> d n", + d=self.d_inner, + ).contiguous() + A_log = torch.log(A) # Keep A_log in fp32 + self.A_log = nn.Parameter(A_log) + self.A_log._no_weight_decay = True + + # D "skip" parameter + self.D = nn.Parameter(torch.ones(self.d_inner, device=device)) # Keep in fp32 + self.D._no_weight_decay = True + + # bidirectional + assert bimamba_type == "v3" + + A_b = repeat( + torch.arange(1, self.d_state + 1, dtype=torch.float32, device=device), + "n -> d n", + d=self.d_inner, + ).contiguous() + A_b_log = torch.log(A_b) # Keep A_b_log in fp32 + self.A_b_log = nn.Parameter(A_b_log) + self.A_b_log._no_weight_decay = True + + self.conv1d_b = nn.Conv1d( + in_channels=self.d_inner, + out_channels=self.d_inner, + bias=conv_bias, + kernel_size=d_conv, + groups=self.d_inner, + padding=d_conv - 1, + **factory_kwargs, + ) + + self.x_proj_b = nn.Linear( + self.d_inner, self.dt_rank + self.d_state * 2, bias=False, **factory_kwargs + ) + self.dt_proj_b = nn.Linear(self.dt_rank, self.d_inner, bias=True, **factory_kwargs) + + self.D_b = nn.Parameter(torch.ones(self.d_inner, device=device)) # Keep in fp32 + self.D_b._no_weight_decay = True + + # assert bimamba_type == "v3" + # spatial + A_s = repeat( + torch.arange(1, self.d_state + 1, dtype=torch.float32, device=device), + "n -> d n", + d=self.d_inner, + ).contiguous() + A_s_log = torch.log(A_s) # Keep A_b_log in fp32 + self.A_s_log = nn.Parameter(A_s_log) + self.A_s_log._no_weight_decay = True + + self.conv1d_s = nn.Conv1d( + in_channels=self.d_inner, + out_channels=self.d_inner, + bias=conv_bias, + kernel_size=d_conv, + groups=self.d_inner, + padding=d_conv - 1, + **factory_kwargs, + ) + + self.x_proj_s = nn.Linear( + self.d_inner, self.dt_rank + self.d_state * 2, bias=False, **factory_kwargs + ) + self.dt_proj_s = nn.Linear(self.dt_rank, self.d_inner, bias=True, **factory_kwargs) + + self.D_s = nn.Parameter(torch.ones(self.d_inner, device=device)) # Keep in fp32 + self.D_s._no_weight_decay = True + + + + + self.out_proj = nn.Linear(self.d_inner, self.d_model, bias=bias, **factory_kwargs) + + def forward(self, hidden_states, inference_params=None): + """ + hidden_states: (B, L, D) + Returns: same shape as hidden_states + """ + batch, seqlen, dim = hidden_states.shape + + conv_state, ssm_state = None, None + if inference_params is not None: + conv_state, ssm_state = self._get_states_from_cache(inference_params, batch) + if inference_params.seqlen_offset > 0: + # The states are updated inplace + out, _, _ = self.step(hidden_states, conv_state, ssm_state) + return out + + # We do matmul and transpose BLH -> HBL at the same time + xz = rearrange( + self.in_proj.weight @ rearrange(hidden_states, "b l d -> d (b l)"), + "d (b l) -> b d l", + l=seqlen, + ) + if self.in_proj.bias is not None: + xz = xz + rearrange(self.in_proj.bias.to(dtype=xz.dtype), "d -> d 1") + + A = -torch.exp(self.A_log.float()) # (d_inner, d_state) + # In the backward pass we write dx and dz next to each other to avoid torch.cat + if self.use_fast_path and inference_params is None: # Doesn't support outputting the states + if self.bimamba_type == "v3": + A_b = -torch.exp(self.A_b_log.float()) + out = mamba_inner_fn_no_out_proj( + xz, + self.conv1d.weight, + self.conv1d.bias, + self.x_proj.weight, + self.dt_proj.weight, + A, + None, # input-dependent B + None, # input-dependent C + self.D.float(), + delta_bias=self.dt_proj.bias.float(), + delta_softplus=True, + ) + out_b = mamba_inner_fn_no_out_proj( + xz.flip([-1]), + self.conv1d_b.weight, + self.conv1d_b.bias, + self.x_proj_b.weight, + self.dt_proj_b.weight, + A_b, + None, + None, + self.D_b.float(), + delta_bias=self.dt_proj_b.bias.float(), + delta_softplus=True, + ) + A_s = -torch.exp(self.A_s_log.float()) + + xz_s = xz.chunk(self.nslices, dim=-1) + xz_s = torch.stack(xz_s,dim=-1) + xz_s = xz_s.flatten(-2) + out_s = mamba_inner_fn_no_out_proj( + xz_s, + self.conv1d_s.weight, + self.conv1d_s.bias, + self.x_proj_s.weight, + self.dt_proj_s.weight, + A_s, + None, + None, + self.D_s.float(), + delta_bias=self.dt_proj_s.bias.float(), + delta_softplus=True, + ) + out_s = out_s.reshape(batch,self.d_inner,seqlen//self.nslices,self.nslices).permute(0,1,3,2).flatten(-2) + + # F.linear(rearrange(out_z, "b d l -> b l d"), out_proj_weight, out_proj_bias) + out = F.linear(rearrange(out + out_b.flip([-1]) + out_s, "b d l -> b l d"), self.out_proj.weight, self.out_proj.bias) + elif self.bimamba_type == "v2": + A_b = -torch.exp(self.A_b_log.float()) + out = mamba_inner_fn_no_out_proj( + xz, + self.conv1d.weight, + self.conv1d.bias, + self.x_proj.weight, + self.dt_proj.weight, + A, + None, # input-dependent B + None, # input-dependent C + self.D.float(), + delta_bias=self.dt_proj.bias.float(), + delta_softplus=True, + ) + out_b = mamba_inner_fn_no_out_proj( + xz.flip([-1]), + self.conv1d_b.weight, + self.conv1d_b.bias, + self.x_proj_b.weight, + self.dt_proj_b.weight, + A_b, + None, + None, + self.D_b.float(), + delta_bias=self.dt_proj_b.bias.float(), + delta_softplus=True, + ) + # F.linear(rearrange(out_z, "b d l -> b l d"), out_proj_weight, out_proj_bias) + out = F.linear(rearrange(out + out_b.flip([-1]), "b d l -> b l d"), self.out_proj.weight, self.out_proj.bias) + else: + out = mamba_inner_fn( + xz, + self.conv1d.weight, + self.conv1d.bias, + self.x_proj.weight, + self.dt_proj.weight, + self.out_proj.weight, + self.out_proj.bias, + A, + None, # input-dependent B + None, # input-dependent C + self.D.float(), + delta_bias=self.dt_proj.bias.float(), + delta_softplus=True, + ) + else: + x, z = xz.chunk(2, dim=1) + # Compute short convolution + if conv_state is not None: + conv_state.copy_(x[:, :, -self.d_conv :]) # Update state (B D W) + if causal_conv1d_fn is None: + x = self.act(self.conv1d(x)[..., :seqlen]) + else: + assert self.activation in ["silu", "swish"] + x = causal_conv1d_fn( + x, + rearrange(self.conv1d.weight, "d 1 w -> d w"), + self.conv1d.bias, + self.activation, + ) + + # We're careful here about the layout, to avoid extra transposes. + # We want dt to have d as the slowest moving dimension + # and L as the fastest moving dimension, since those are what the ssm_scan kernel expects. + x_dbl = self.x_proj(rearrange(x, "b d l -> (b l) d")) # (bl d) + dt, B, C = torch.split(x_dbl, [self.dt_rank, self.d_state, self.d_state], dim=-1) + dt = self.dt_proj.weight @ dt.t() + dt = rearrange(dt, "d (b l) -> b d l", l=seqlen) + B = rearrange(B, "(b l) dstate -> b dstate l", l=seqlen).contiguous() + C = rearrange(C, "(b l) dstate -> b dstate l", l=seqlen).contiguous() + assert self.activation in ["silu", "swish"] + y = selective_scan_fn( + x, + dt, + A, + B, + C, + self.D.float(), + z=z, + delta_bias=self.dt_proj.bias.float(), + delta_softplus=True, + return_last_state=ssm_state is not None, + ) + if ssm_state is not None: + y, last_state = y + ssm_state.copy_(last_state) + y = rearrange(y, "b d l -> b l d") + out = self.out_proj(y) + return out + + def step(self, hidden_states, conv_state, ssm_state): + dtype = hidden_states.dtype + assert hidden_states.shape[1] == 1, "Only support decoding with 1 token at a time for now" + xz = self.in_proj(hidden_states.squeeze(1)) # (B 2D) + x, z = xz.chunk(2, dim=-1) # (B D) + + # Conv step + if causal_conv1d_update is None: + conv_state.copy_(torch.roll(conv_state, shifts=-1, dims=-1)) # Update state (B D W) + conv_state[:, :, -1] = x + x = torch.sum(conv_state * rearrange(self.conv1d.weight, "d 1 w -> d w"), dim=-1) # (B D) + if self.conv1d.bias is not None: + x = x + self.conv1d.bias + x = self.act(x).to(dtype=dtype) + else: + x = causal_conv1d_update( + x, + conv_state, + rearrange(self.conv1d.weight, "d 1 w -> d w"), + self.conv1d.bias, + self.activation, + ) + + x_db = self.x_proj(x) # (B dt_rank+2*d_state) + dt, B, C = torch.split(x_db, [self.dt_rank, self.d_state, self.d_state], dim=-1) + # Don't add dt_bias here + dt = F.linear(dt, self.dt_proj.weight) # (B d_inner) + A = -torch.exp(self.A_log.float()) # (d_inner, d_state) + + # SSM step + if selective_state_update is None: + # Discretize A and B + dt = F.softplus(dt + self.dt_proj.bias.to(dtype=dt.dtype)) + dA = torch.exp(torch.einsum("bd,dn->bdn", dt, A)) + dB = torch.einsum("bd,bn->bdn", dt, B) + ssm_state.copy_(ssm_state * dA + rearrange(x, "b d -> b d 1") * dB) + y = torch.einsum("bdn,bn->bd", ssm_state.to(dtype), C) + y = y + self.D.to(dtype) * x + y = y * self.act(z) # (B D) + else: + y = selective_state_update( + ssm_state, x, dt, A, B, C, self.D, z=z, dt_bias=self.dt_proj.bias, dt_softplus=True + ) + + out = self.out_proj(y) + return out.unsqueeze(1), conv_state, ssm_state + + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): + device = self.out_proj.weight.device + conv_dtype = self.conv1d.weight.dtype if dtype is None else dtype + conv_state = torch.zeros( + batch_size, self.d_model * self.expand, self.d_conv, device=device, dtype=conv_dtype + ) + ssm_dtype = self.dt_proj.weight.dtype if dtype is None else dtype + # ssm_dtype = torch.float32 + ssm_state = torch.zeros( + batch_size, self.d_model * self.expand, self.d_state, device=device, dtype=ssm_dtype + ) + return conv_state, ssm_state + + def _get_states_from_cache(self, inference_params, batch_size, initialize_states=False): + assert self.layer_idx is not None + if self.layer_idx not in inference_params.key_value_memory_dict: + batch_shape = (batch_size,) + conv_state = torch.zeros( + batch_size, + self.d_model * self.expand, + self.d_conv, + device=self.conv1d.weight.device, + dtype=self.conv1d.weight.dtype, + ) + ssm_state = torch.zeros( + batch_size, + self.d_model * self.expand, + self.d_state, + device=self.dt_proj.weight.device, + dtype=self.dt_proj.weight.dtype, + # dtype=torch.float32, + ) + inference_params.key_value_memory_dict[self.layer_idx] = (conv_state, ssm_state) + else: + conv_state, ssm_state = inference_params.key_value_memory_dict[self.layer_idx] + # TODO: What if batch size changes between generation, and we reuse the same states? + if initialize_states: + conv_state.zero_() + ssm_state.zero_() + return conv_state, ssm_state + + +class Block(nn.Module): + def __init__( + self, dim, mixer_cls, norm_cls=nn.LayerNorm, fused_add_norm=False, residual_in_fp32=False + ): + """ + Simple block wrapping a mixer class with LayerNorm/RMSNorm and residual connection" + + This Block has a slightly different structure compared to a regular + prenorm Transformer block. + The standard block is: LN -> MHA/MLP -> Add. + [Ref: https://arxiv.org/abs/2002.04745] + Here we have: Add -> LN -> Mixer, returning both + the hidden_states (output of the mixer) and the residual. + This is purely for performance reasons, as we can fuse add and LayerNorm. + The residual needs to be provided (except for the very first block). + """ + super().__init__() + self.residual_in_fp32 = residual_in_fp32 + self.fused_add_norm = fused_add_norm + self.mixer = mixer_cls(dim) + self.norm = norm_cls(dim) + if self.fused_add_norm: + assert RMSNorm is not None, "RMSNorm import fails" + assert isinstance( + self.norm, (nn.LayerNorm, RMSNorm) + ), "Only LayerNorm and RMSNorm are supported for fused_add_norm" + + def forward( + self, hidden_states: Tensor, residual: Optional[Tensor] = None, inference_params=None + ): + r"""Pass the input through the encoder layer. + + Args: + hidden_states: the sequence to the encoder layer (required). + residual: hidden_states = Mixer(LN(residual)) + """ + if not self.fused_add_norm: + residual = (hidden_states + residual) if residual is not None else hidden_states + hidden_states = self.norm(residual.to(dtype=self.norm.weight.dtype)) + if self.residual_in_fp32: + residual = residual.to(torch.float32) + else: + fused_add_norm_fn = rms_norm_fn if isinstance(self.norm, RMSNorm) else layer_norm_fn + hidden_states, residual = fused_add_norm_fn( + hidden_states, + self.norm.weight, + self.norm.bias, + residual=residual, + prenorm=True, + residual_in_fp32=self.residual_in_fp32, + eps=self.norm.eps, + ) + hidden_states = self.mixer(hidden_states, inference_params=inference_params) + return hidden_states, residual + + def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs): + return self.mixer.allocate_inference_cache(batch_size, max_seqlen, dtype=dtype, **kwargs) diff --git a/source_code/SegMamba/mamba/mamba_ssm/ops/__init__.py b/source_code/SegMamba/mamba/mamba_ssm/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/source_code/SegMamba/mamba/mamba_ssm/ops/triton/__init__.py b/source_code/SegMamba/mamba/mamba_ssm/ops/triton/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/source_code/SegMamba/mamba/mamba_ssm/utils/__init__.py b/source_code/SegMamba/mamba/mamba_ssm/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/source_code/SegMamba/mamba/mamba_ssm/utils/hf.py b/source_code/SegMamba/mamba/mamba_ssm/utils/hf.py new file mode 100644 index 0000000000000000000000000000000000000000..0d7555acddbd260636d1d14d5bd6324f6af0056a --- /dev/null +++ b/source_code/SegMamba/mamba/mamba_ssm/utils/hf.py @@ -0,0 +1,23 @@ +import json + +import torch + +from transformers.utils import WEIGHTS_NAME, CONFIG_NAME +from transformers.utils.hub import cached_file + + +def load_config_hf(model_name): + resolved_archive_file = cached_file(model_name, CONFIG_NAME, _raise_exceptions_for_missing_entries=False) + return json.load(open(resolved_archive_file)) + + +def load_state_dict_hf(model_name, device=None, dtype=None): + # If not fp32, then we don't want to load directly to the GPU + mapped_device = "cpu" if dtype not in [torch.float32, None] else device + resolved_archive_file = cached_file(model_name, WEIGHTS_NAME, _raise_exceptions_for_missing_entries=False) + return torch.load(resolved_archive_file, map_location=mapped_device) + # Convert dtype before moving to GPU to save memory + if dtype is not None: + state_dict = {k: v.to(dtype=dtype) for k, v in state_dict.items()} + state_dict = {k: v.to(device=device) for k, v in state_dict.items()} + return state_dict diff --git a/source_code/SegMamba/mamba/setup.py b/source_code/SegMamba/mamba/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..2ce0ac045f8b2ae07f39f3d045e997ab362ec4c1 --- /dev/null +++ b/source_code/SegMamba/mamba/setup.py @@ -0,0 +1,276 @@ +# Copyright (c) 2023, Albert Gu, Tri Dao. +import sys +import warnings +import os +import re +import ast +from pathlib import Path +from packaging.version import parse, Version +import platform +import shutil + +from setuptools import setup, find_packages +import subprocess + +import urllib.request +import urllib.error +from wheel.bdist_wheel import bdist_wheel as _bdist_wheel + +import torch +from torch.utils.cpp_extension import ( + BuildExtension, + CppExtension, + CUDAExtension, + CUDA_HOME, +) + + +with open("README.md", "r", encoding="utf-8") as fh: + long_description = fh.read() + + +# ninja build does not work unless include_dirs are abs path +this_dir = os.path.dirname(os.path.abspath(__file__)) + +PACKAGE_NAME = "mamba_ssm" + +BASE_WHEEL_URL = "https://github.com/state-spaces/mamba/releases/download/{tag_name}/{wheel_name}" + +# FORCE_BUILD: Force a fresh build locally, instead of attempting to find prebuilt wheels +# SKIP_CUDA_BUILD: Intended to allow CI to use a simple `python setup.py sdist` run to copy over raw files, without any cuda compilation +FORCE_BUILD = os.getenv("MAMBA_FORCE_BUILD", "FALSE") == "TRUE" +SKIP_CUDA_BUILD = os.getenv("MAMBA_SKIP_CUDA_BUILD", "FALSE") == "TRUE" +# For CI, we want the option to build with C++11 ABI since the nvcr images use C++11 ABI +FORCE_CXX11_ABI = os.getenv("MAMBA_FORCE_CXX11_ABI", "FALSE") == "TRUE" + + +def get_platform(): + """ + Returns the platform name as used in wheel filenames. + """ + if sys.platform.startswith("linux"): + return "linux_x86_64" + elif sys.platform == "darwin": + mac_version = ".".join(platform.mac_ver()[0].split(".")[:2]) + return f"macosx_{mac_version}_x86_64" + elif sys.platform == "win32": + return "win_amd64" + else: + raise ValueError("Unsupported platform: {}".format(sys.platform)) + + +def get_cuda_bare_metal_version(cuda_dir): + raw_output = subprocess.check_output( + [cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True + ) + output = raw_output.split() + release_idx = output.index("release") + 1 + bare_metal_version = parse(output[release_idx].split(",")[0]) + + return raw_output, bare_metal_version + + +def check_if_cuda_home_none(global_option: str) -> None: + if CUDA_HOME is not None: + return + # warn instead of error because user could be downloading prebuilt wheels, so nvcc won't be necessary + # in that case. + warnings.warn( + f"{global_option} was requested, but nvcc was not found. Are you sure your environment has nvcc available? " + "If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, " + "only images whose names contain 'devel' will provide nvcc." + ) + + +def append_nvcc_threads(nvcc_extra_args): + return nvcc_extra_args + ["--threads", "4"] + + +cmdclass = {} +ext_modules = [] + +if not SKIP_CUDA_BUILD: + print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__)) + TORCH_MAJOR = int(torch.__version__.split(".")[0]) + TORCH_MINOR = int(torch.__version__.split(".")[1]) + + check_if_cuda_home_none(PACKAGE_NAME) + # Check, if CUDA11 is installed for compute capability 8.0 + cc_flag = [] + if CUDA_HOME is not None: + _, bare_metal_version = get_cuda_bare_metal_version(CUDA_HOME) + if bare_metal_version < Version("11.6"): + raise RuntimeError( + f"{PACKAGE_NAME} is only supported on CUDA 11.6 and above. " + "Note: make sure nvcc has a supported version by running nvcc -V." + ) + + cc_flag.append("-gencode") + cc_flag.append("arch=compute_70,code=sm_70") + cc_flag.append("-gencode") + cc_flag.append("arch=compute_80,code=sm_80") + if bare_metal_version >= Version("11.8"): + cc_flag.append("-gencode") + cc_flag.append("arch=compute_90,code=sm_90") + + # HACK: The compiler flag -D_GLIBCXX_USE_CXX11_ABI is set to be the same as + # torch._C._GLIBCXX_USE_CXX11_ABI + # https://github.com/pytorch/pytorch/blob/8472c24e3b5b60150096486616d98b7bea01500b/torch/utils/cpp_extension.py#L920 + if FORCE_CXX11_ABI: + torch._C._GLIBCXX_USE_CXX11_ABI = True + + ext_modules.append( + CUDAExtension( + name="selective_scan_cuda", + sources=[ + "csrc/selective_scan/selective_scan.cpp", + "csrc/selective_scan/selective_scan_fwd_fp32.cu", + "csrc/selective_scan/selective_scan_fwd_fp16.cu", + "csrc/selective_scan/selective_scan_fwd_bf16.cu", + "csrc/selective_scan/selective_scan_bwd_fp32_real.cu", + "csrc/selective_scan/selective_scan_bwd_fp32_complex.cu", + "csrc/selective_scan/selective_scan_bwd_fp16_real.cu", + "csrc/selective_scan/selective_scan_bwd_fp16_complex.cu", + "csrc/selective_scan/selective_scan_bwd_bf16_real.cu", + "csrc/selective_scan/selective_scan_bwd_bf16_complex.cu", + ], + extra_compile_args={ + "cxx": ["-O3", "-std=c++17"], + "nvcc": append_nvcc_threads( + [ + "-O3", + "-std=c++17", + "-U__CUDA_NO_HALF_OPERATORS__", + "-U__CUDA_NO_HALF_CONVERSIONS__", + "-U__CUDA_NO_BFLOAT16_OPERATORS__", + "-U__CUDA_NO_BFLOAT16_CONVERSIONS__", + "-U__CUDA_NO_BFLOAT162_OPERATORS__", + "-U__CUDA_NO_BFLOAT162_CONVERSIONS__", + "--expt-relaxed-constexpr", + "--expt-extended-lambda", + "--use_fast_math", + "--ptxas-options=-v", + "-lineinfo", + ] + + cc_flag + ), + }, + include_dirs=[Path(this_dir) / "csrc" / "selective_scan"], + ) + ) + + +def get_package_version(): + with open(Path(this_dir) / PACKAGE_NAME / "__init__.py", "r") as f: + version_match = re.search(r"^__version__\s*=\s*(.*)$", f.read(), re.MULTILINE) + public_version = ast.literal_eval(version_match.group(1)) + local_version = os.environ.get("MAMBA_LOCAL_VERSION") + if local_version: + return f"{public_version}+{local_version}" + else: + return str(public_version) + + +def get_wheel_url(): + # Determine the version numbers that will be used to determine the correct wheel + # We're using the CUDA version used to build torch, not the one currently installed + # _, cuda_version_raw = get_cuda_bare_metal_version(CUDA_HOME) + torch_cuda_version = parse(torch.version.cuda) + torch_version_raw = parse(torch.__version__) + # For CUDA 11, we only compile for CUDA 11.8, and for CUDA 12 we only compile for CUDA 12.2 + # to save CI time. Minor versions should be compatible. + torch_cuda_version = parse("11.8") if torch_cuda_version.major == 11 else parse("12.2") + python_version = f"cp{sys.version_info.major}{sys.version_info.minor}" + platform_name = get_platform() + mamba_ssm_version = get_package_version() + # cuda_version = f"{cuda_version_raw.major}{cuda_version_raw.minor}" + cuda_version = f"{torch_cuda_version.major}{torch_cuda_version.minor}" + torch_version = f"{torch_version_raw.major}.{torch_version_raw.minor}" + cxx11_abi = str(torch._C._GLIBCXX_USE_CXX11_ABI).upper() + + # Determine wheel URL based on CUDA version, torch version, python version and OS + wheel_filename = f"{PACKAGE_NAME}-{mamba_ssm_version}+cu{cuda_version}torch{torch_version}cxx11abi{cxx11_abi}-{python_version}-{python_version}-{platform_name}.whl" + wheel_url = BASE_WHEEL_URL.format( + tag_name=f"v{mamba_ssm_version}", wheel_name=wheel_filename + ) + return wheel_url, wheel_filename + + +class CachedWheelsCommand(_bdist_wheel): + """ + The CachedWheelsCommand plugs into the default bdist wheel, which is ran by pip when it cannot + find an existing wheel (which is currently the case for all installs). We use + the environment parameters to detect whether there is already a pre-built version of a compatible + wheel available and short-circuits the standard full build pipeline. + """ + + def run(self): + if FORCE_BUILD: + return super().run() + + wheel_url, wheel_filename = get_wheel_url() + print("Guessing wheel URL: ", wheel_url) + try: + urllib.request.urlretrieve(wheel_url, wheel_filename) + + # Make the archive + # Lifted from the root wheel processing command + # https://github.com/pypa/wheel/blob/cf71108ff9f6ffc36978069acb28824b44ae028e/src/wheel/bdist_wheel.py#LL381C9-L381C85 + if not os.path.exists(self.dist_dir): + os.makedirs(self.dist_dir) + + impl_tag, abi_tag, plat_tag = self.get_tag() + archive_basename = f"{self.wheel_dist_name}-{impl_tag}-{abi_tag}-{plat_tag}" + + wheel_path = os.path.join(self.dist_dir, archive_basename + ".whl") + print("Raw wheel path", wheel_path) + shutil.move(wheel_filename, wheel_path) + except urllib.error.HTTPError: + print("Precompiled wheel not found. Building from source...") + # If the wheel could not be downloaded, build from source + super().run() + + +setup( + name=PACKAGE_NAME, + version=get_package_version(), + packages=find_packages( + exclude=( + "build", + "csrc", + "include", + "tests", + "dist", + "docs", + "benchmarks", + "mamba_ssm.egg-info", + ) + ), + author="Tri Dao, Albert Gu", + author_email="tri@tridao.me, agu@cs.cmu.edu", + description="Mamba state-space model", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/state-spaces/mamba", + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: BSD License", + "Operating System :: Unix", + ], + ext_modules=ext_modules, + cmdclass={"bdist_wheel": CachedWheelsCommand, "build_ext": BuildExtension} + if ext_modules + else { + "bdist_wheel": CachedWheelsCommand, + }, + python_requires=">=3.7", + install_requires=[ + "torch", + "packaging", + "ninja", + "einops", + "triton", + "transformers", + "causal_conv1d", + ], +) diff --git a/source_code/SegMamba/mamba/test_mamba_module.py b/source_code/SegMamba/mamba/test_mamba_module.py new file mode 100644 index 0000000000000000000000000000000000000000..64710e92f7ec4fc0fe88821550e4ecf902a22bfe --- /dev/null +++ b/source_code/SegMamba/mamba/test_mamba_module.py @@ -0,0 +1,15 @@ +import torch +from mamba_ssm import Mamba + +batch, length, dim = 2, 64, 768 +x = torch.randn(batch, length, dim).to("cuda") +model = Mamba( + # This module uses roughly 3 * expand * d_model^2 parameters + d_model=dim, # Model dimension d_model + d_state=16, # SSM state expansion factor # 64 + d_conv=4, # Local convolution width + expand=2, # Block expansion factor + use_fast_path=False, +).to("cuda") +y = model(x) +assert y.shape == x.shape diff --git a/source_code/SegMamba/mamba/tests/ops/triton/test_selective_state_update.py b/source_code/SegMamba/mamba/tests/ops/triton/test_selective_state_update.py new file mode 100644 index 0000000000000000000000000000000000000000..70a8d79d9cad3e4d33897478caf178bd96d0ae5a --- /dev/null +++ b/source_code/SegMamba/mamba/tests/ops/triton/test_selective_state_update.py @@ -0,0 +1,49 @@ +# Copyright (C) 2023, Tri Dao. + +import math + +import torch +import torch.nn.functional as F +import pytest + +from einops import rearrange + +from mamba_ssm.ops.triton.selective_state_update import selective_state_update, selective_state_update_ref + + +@pytest.mark.parametrize("itype", [torch.float32, torch.float16, torch.bfloat16]) +# @pytest.mark.parametrize('itype', [torch.float16]) +@pytest.mark.parametrize("has_z", [False, True]) +# @pytest.mark.parametrize('has_z', [True]) +@pytest.mark.parametrize("dstate", [16, 32, 64]) +# @pytest.mark.parametrize("dstate", [16]) +@pytest.mark.parametrize("dim", [2048, 2048 + 16, 4096]) +# @pytest.mark.parametrize("dim", [2048]) +def test_causal_conv1d_update(dim, dstate, has_z, itype): + device = "cuda" + rtol, atol = (3e-4, 1e-3) if itype == torch.float32 else (5e-3, 1e-2) + if itype == torch.bfloat16: + rtol, atol = 1e-2, 5e-2 + # set seed + torch.random.manual_seed(0) + batch_size = 2 + state = torch.randn(batch_size, dim, dstate, dtype=itype, device=device) + x = torch.randn(batch_size, dim, device=device, dtype=itype) + dt = torch.randn(batch_size, dim, device=device, dtype=itype) + dt_bias = torch.rand(dim, device=device) - 4.0 + A = -torch.rand(dim, dstate, device=device) - 1.0 + B = torch.randn(batch_size, dstate, device=device) + C = torch.randn(batch_size, dstate, device=device) + D = torch.randn(dim, device=device) + if has_z: + z = torch.randn_like(x) + else: + z = None + state_ref = state.detach().clone() + out = selective_state_update(state, x, dt, A, B, C, D=D, z=z, dt_bias=dt_bias, dt_softplus=True) + out_ref = selective_state_update_ref(state_ref, x, dt, A, B, C, D=D, z=z, dt_bias=dt_bias, dt_softplus=True) + + print(f"Output max diff: {(out - out_ref).abs().max().item()}") + print(f"Output mean diff: {(out - out_ref).abs().mean().item()}") + assert torch.allclose(state, state_ref, rtol=rtol, atol=atol) + assert torch.allclose(out, out_ref, rtol=rtol, atol=atol) diff --git a/source_code/SegMamba/model_segmamba/segmamba.py b/source_code/SegMamba/model_segmamba/segmamba.py new file mode 100644 index 0000000000000000000000000000000000000000..481737f0def0e4ffa41e333e15bf32d2f5a54fda --- /dev/null +++ b/source_code/SegMamba/model_segmamba/segmamba.py @@ -0,0 +1,356 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations +import os +import sys +import torch.nn as nn +import torch +from functools import partial + +# Prefer pip-installed MONAI over the local monai/ folder. +os.environ.setdefault("MONAI_SKIP_SUBMODULES", "1") +_repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +if "" in sys.path: + sys.path.remove("") +if _repo_root in sys.path: + sys.path.remove(_repo_root) +import monai # noqa: E402 +sys.path.insert(0, _repo_root) + +from monai.networks.blocks.dynunet_block import UnetOutBlock +from monai.networks.blocks.unetr_block import UnetrBasicBlock, UnetrUpBlock +from mamba_ssm import Mamba +import torch.nn.functional as F + +class LayerNorm(nn.Module): + r""" LayerNorm that supports two data formats: channels_last (default) or channels_first. + The ordering of the dimensions in the inputs. channels_last corresponds to inputs with + shape (batch_size, height, width, channels) while channels_first corresponds to inputs + with shape (batch_size, channels, height, width). + """ + def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"): + super().__init__() + self.weight = nn.Parameter(torch.ones(normalized_shape)) + self.bias = nn.Parameter(torch.zeros(normalized_shape)) + self.eps = eps + self.data_format = data_format + if self.data_format not in ["channels_last", "channels_first"]: + raise NotImplementedError + self.normalized_shape = (normalized_shape, ) + + def forward(self, x): + if self.data_format == "channels_last": + return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) + elif self.data_format == "channels_first": + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = self.weight[:, None, None, None] * x + self.bias[:, None, None, None] + + return x + +class MambaLayer(nn.Module): + def __init__(self, dim, d_state = 16, d_conv = 4, expand = 2, num_slices=None): + super().__init__() + self.dim = dim + self.norm = nn.LayerNorm(dim) + self.mamba = Mamba( + d_model=dim, # Model dimension d_model + d_state=d_state, # SSM state expansion factor + d_conv=d_conv, # Local convolution width + expand=expand, # Block expansion factor + bimamba_type="v3", + nslices=num_slices, + ) + + def forward(self, x): + B, C = x.shape[:2] + x_skip = x + assert C == self.dim + n_tokens = x.shape[2:].numel() + img_dims = x.shape[2:] + x_flat = x.reshape(B, C, n_tokens).transpose(-1, -2) + x_norm = self.norm(x_flat) + x_mamba = self.mamba(x_norm) + + out = x_mamba.transpose(-1, -2).reshape(B, C, *img_dims) + out = out + x_skip + + return out + +class MlpChannel(nn.Module): + def __init__(self,hidden_size, mlp_dim, ): + super().__init__() + self.fc1 = nn.Conv3d(hidden_size, mlp_dim, 1) + self.act = nn.GELU() + self.fc2 = nn.Conv3d(mlp_dim, hidden_size, 1) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.fc2(x) + return x + +class GSC(nn.Module): + def __init__(self, in_channles) -> None: + super().__init__() + + self.proj = nn.Conv3d(in_channles, in_channles, 3, 1, 1) + self.norm = nn.InstanceNorm3d(in_channles) + self.nonliner = nn.ReLU() + + self.proj2 = nn.Conv3d(in_channles, in_channles, 3, 1, 1) + self.norm2 = nn.InstanceNorm3d(in_channles) + self.nonliner2 = nn.ReLU() + + self.proj3 = nn.Conv3d(in_channles, in_channles, 1, 1, 0) + self.norm3 = nn.InstanceNorm3d(in_channles) + self.nonliner3 = nn.ReLU() + + self.proj4 = nn.Conv3d(in_channles, in_channles, 1, 1, 0) + self.norm4 = nn.InstanceNorm3d(in_channles) + self.nonliner4 = nn.ReLU() + + def forward(self, x): + + x_residual = x + + x1 = self.proj(x) + x1 = self.norm(x1) + x1 = self.nonliner(x1) + + x1 = self.proj2(x1) + x1 = self.norm2(x1) + x1 = self.nonliner2(x1) + + x2 = self.proj3(x) + x2 = self.norm3(x2) + x2 = self.nonliner3(x2) + + x = x1 + x2 + x = self.proj4(x) + x = self.norm4(x) + x = self.nonliner4(x) + + return x + x_residual + +class MambaEncoder(nn.Module): + def __init__(self, in_chans=1, depths=[2, 2, 2, 2], dims=[48, 96, 192, 384], + drop_path_rate=0., layer_scale_init_value=1e-6, out_indices=[0, 1, 2, 3]): + super().__init__() + + self.downsample_layers = nn.ModuleList() # stem and 3 intermediate downsampling conv layers + stem = nn.Sequential( + nn.Conv3d(in_chans, dims[0], kernel_size=7, stride=2, padding=3), + ) + self.downsample_layers.append(stem) + for i in range(3): + downsample_layer = nn.Sequential( + # LayerNorm(dims[i], eps=1e-6, data_format="channels_first"), + nn.InstanceNorm3d(dims[i]), + nn.Conv3d(dims[i], dims[i+1], kernel_size=2, stride=2), + ) + self.downsample_layers.append(downsample_layer) + + self.stages = nn.ModuleList() + self.gscs = nn.ModuleList() + num_slices_list = [64, 32, 16, 8] + cur = 0 + for i in range(4): + gsc = GSC(dims[i]) + + stage = nn.Sequential( + *[MambaLayer(dim=dims[i], num_slices=num_slices_list[i]) for j in range(depths[i])] + ) + + self.stages.append(stage) + self.gscs.append(gsc) + cur += depths[i] + + self.out_indices = out_indices + + self.mlps = nn.ModuleList() + for i_layer in range(4): + layer = nn.InstanceNorm3d(dims[i_layer]) + layer_name = f'norm{i_layer}' + self.add_module(layer_name, layer) + self.mlps.append(MlpChannel(dims[i_layer], 2 * dims[i_layer])) + + def forward_features(self, x): + outs = [] + for i in range(4): + x = self.downsample_layers[i](x) + x = self.gscs[i](x) + x = self.stages[i](x) + + if i in self.out_indices: + norm_layer = getattr(self, f'norm{i}') + x_out = norm_layer(x) + x_out = self.mlps[i](x_out) + outs.append(x_out) + + return tuple(outs) + + def forward(self, x): + x = self.forward_features(x) + return x + +class SegMamba(nn.Module): + def __init__( + self, + in_chans=1, + out_chans=13, + depths=[2, 2, 2, 2], + feat_size=[48, 96, 192, 384], + drop_path_rate=0, + layer_scale_init_value=1e-6, + hidden_size: int = 768, + norm_name = "instance", + conv_block: bool = True, + res_block: bool = True, + spatial_dims=3, + ) -> None: + super().__init__() + + self.hidden_size = hidden_size + self.in_chans = in_chans + self.out_chans = out_chans + self.depths = depths + self.drop_path_rate = drop_path_rate + self.feat_size = feat_size + self.layer_scale_init_value = layer_scale_init_value + + self.spatial_dims = spatial_dims + self.vit = MambaEncoder(in_chans, + depths=depths, + dims=feat_size, + drop_path_rate=drop_path_rate, + layer_scale_init_value=layer_scale_init_value, + ) + self.encoder1 = UnetrBasicBlock( + spatial_dims=spatial_dims, + in_channels=self.in_chans, + out_channels=self.feat_size[0], + kernel_size=3, + stride=1, + norm_name=norm_name, + res_block=res_block, + ) + self.encoder2 = UnetrBasicBlock( + spatial_dims=spatial_dims, + in_channels=self.feat_size[0], + out_channels=self.feat_size[1], + kernel_size=3, + stride=1, + norm_name=norm_name, + res_block=res_block, + ) + self.encoder3 = UnetrBasicBlock( + spatial_dims=spatial_dims, + in_channels=self.feat_size[1], + out_channels=self.feat_size[2], + kernel_size=3, + stride=1, + norm_name=norm_name, + res_block=res_block, + ) + self.encoder4 = UnetrBasicBlock( + spatial_dims=spatial_dims, + in_channels=self.feat_size[2], + out_channels=self.feat_size[3], + kernel_size=3, + stride=1, + norm_name=norm_name, + res_block=res_block, + ) + + self.encoder5 = UnetrBasicBlock( + spatial_dims=spatial_dims, + in_channels=self.feat_size[3], + out_channels=self.hidden_size, + kernel_size=3, + stride=1, + norm_name=norm_name, + res_block=res_block, + ) + + self.decoder5 = UnetrUpBlock( + spatial_dims=spatial_dims, + in_channels=self.hidden_size, + out_channels=self.feat_size[3], + kernel_size=3, + upsample_kernel_size=2, + norm_name=norm_name, + res_block=res_block, + ) + self.decoder4 = UnetrUpBlock( + spatial_dims=spatial_dims, + in_channels=self.feat_size[3], + out_channels=self.feat_size[2], + kernel_size=3, + upsample_kernel_size=2, + norm_name=norm_name, + res_block=res_block, + ) + self.decoder3 = UnetrUpBlock( + spatial_dims=spatial_dims, + in_channels=self.feat_size[2], + out_channels=self.feat_size[1], + kernel_size=3, + upsample_kernel_size=2, + norm_name=norm_name, + res_block=res_block, + ) + self.decoder2 = UnetrUpBlock( + spatial_dims=spatial_dims, + in_channels=self.feat_size[1], + out_channels=self.feat_size[0], + kernel_size=3, + upsample_kernel_size=2, + norm_name=norm_name, + res_block=res_block, + ) + self.decoder1 = UnetrBasicBlock( + spatial_dims=spatial_dims, + in_channels=self.feat_size[0], + out_channels=self.feat_size[0], + kernel_size=3, + stride=1, + norm_name=norm_name, + res_block=res_block, + ) + self.out = UnetOutBlock(spatial_dims=spatial_dims, in_channels=48, out_channels=self.out_chans) + + def proj_feat(self, x): + new_view = [x.size(0)] + self.proj_view_shape + x = x.view(new_view) + x = x.permute(self.proj_axes).contiguous() + return x + + def forward(self, x_in): + outs = self.vit(x_in) + enc1 = self.encoder1(x_in) + x2 = outs[0] + enc2 = self.encoder2(x2) + x3 = outs[1] + enc3 = self.encoder3(x3) + x4 = outs[2] + enc4 = self.encoder4(x4) + enc_hidden = self.encoder5(outs[3]) + dec3 = self.decoder5(enc_hidden, enc4) + dec2 = self.decoder4(dec3, enc3) + dec1 = self.decoder3(dec2, enc2) + dec0 = self.decoder2(dec1, enc1) + out = self.decoder1(dec0) + + return self.out(out) + diff --git a/source_code/SegMamba/monai/.DS_Store b/source_code/SegMamba/monai/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..931d3855616e373c0bec96373a4b56604bb9b337 Binary files /dev/null and b/source_code/SegMamba/monai/.DS_Store differ diff --git a/source_code/SegMamba/monai/README.md b/source_code/SegMamba/monai/README.md new file mode 100644 index 0000000000000000000000000000000000000000..a1e36c62100cc46ec01c41a4d1bd72aa93705649 --- /dev/null +++ b/source_code/SegMamba/monai/README.md @@ -0,0 +1,38 @@ +# MONAI + +* **apps**: high level medical domain specific deep learning applications. + +* **auto3dseg**: automated machine learning (AutoML) components for volumetric image analysis. + +* **bundle**: components to build the portable self-descriptive model bundle. + +* **config**: for system configuration and diagnostic output. + +* **csrc**: for C++/CUDA extensions. + +* **data**: for the datasets, readers/writers, and synthetic data. + +* **engines**: engine-derived classes for extending Ignite behaviour. + +* **fl**: federated learning components to allow pipeline integration with any federated learning framework. + +* **handlers**: defines handlers for implementing functionality at various stages in the training process. + +* **inferers**: defines model inference methods. + +* **losses**: classes defining loss functions, which follow the pattern of `torch.nn.modules.loss`. + +* **metrics**: defines metric tracking types. + +* **networks**: contains network definitions, component definitions, and Pytorch specific utilities. + +* **optimizers**: classes defining optimizers, which follow the pattern of `torch.optim`. + +* **transforms**: defines data transforms for preprocessing and postprocessing. + +* **utils**: generic utilities intended to be implemented in pure Python or using Numpy, +and not with Pytorch, such as namespace aliasing, auto module loading. + +* **visualize**: utilities for data visualization. + +* **_extensions**: C++/CUDA extensions to be loaded in a just-in-time manner using `torch.utils.cpp_extension.load`. diff --git a/source_code/SegMamba/monai/__init__.py b/source_code/SegMamba/monai/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..695f05d02483e60f8d5f4279994e8a39b622feed --- /dev/null +++ b/source_code/SegMamba/monai/__init__.py @@ -0,0 +1,97 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +import os +import sys + +from ._version import get_versions + +PY_REQUIRED_MAJOR = 3 +PY_REQUIRED_MINOR = 8 + +version_dict = get_versions() +__version__: str = version_dict.get("version", "0+unknown") +__revision_id__: str = version_dict.get("full-revisionid") +del get_versions, version_dict + +__copyright__ = "(c) MONAI Consortium" + +__basedir__ = os.path.dirname(__file__) + +if sys.version_info.major != PY_REQUIRED_MAJOR or sys.version_info.minor < PY_REQUIRED_MINOR: + import warnings + + warnings.warn( + f"MONAI requires Python {PY_REQUIRED_MAJOR}.{PY_REQUIRED_MINOR} or higher. " + f"But the current Python is: {sys.version}", + category=RuntimeWarning, + ) + +from .utils.module import load_submodules # noqa: E402 + +# handlers_* have some external decorators the users may not have installed +# *.so files and folder "_C" may not exist when the cpp extensions are not compiled +excludes = "|".join( + [ + "(^(monai.handlers))", + "(^(monai.bundle))", + "(^(monai.fl))", + "((\\.so)$)", + "(^(monai._C))", + "(.*(__main__)$)", + "(.*(video_dataset)$)", + "(.*(nnunet).*$)", + ] +) + +_skip_submodules = os.environ.get("MONAI_SKIP_SUBMODULES", "").lower() in ("1", "true", "yes") +if not _skip_submodules: + # load directory modules only, skip loading individual files + load_submodules(sys.modules[__name__], False, exclude_pattern=excludes) + + # load all modules, this will trigger all export decorations + load_submodules(sys.modules[__name__], True, exclude_pattern=excludes) + +__all__ = [ + "apps", + "auto3dseg", + "bundle", + "config", + "data", + "engines", + "fl", + "handlers", + "inferers", + "losses", + "metrics", + "networks", + "optimizers", + "transforms", + "utils", + "visualize", +] + +try: + from .utils.tf32 import detect_default_tf32 + + detect_default_tf32() + import torch + + # workaround related to https://github.com/Project-MONAI/MONAI/issues/7575 + if hasattr(torch.cuda.device_count, "cache_clear"): + torch.cuda.device_count.cache_clear() +except BaseException: + from .utils.misc import MONAIEnvVars + + if MONAIEnvVars.debug(): + raise diff --git a/source_code/SegMamba/monai/_extensions/__init__.py b/source_code/SegMamba/monai/_extensions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..47d0c7021ac5adb355ded3b6c0d68b920b88d6ad --- /dev/null +++ b/source_code/SegMamba/monai/_extensions/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from .loader import load_module diff --git a/source_code/SegMamba/monai/_version.py b/source_code/SegMamba/monai/_version.py new file mode 100644 index 0000000000000000000000000000000000000000..8b8b186ff8f27945b836370dae364b7ba759655c --- /dev/null +++ b/source_code/SegMamba/monai/_version.py @@ -0,0 +1,657 @@ + +# This file helps to compute a version number in source trees obtained from +# git-archive tarball (such as those provided by githubs download-from-tag +# feature). Distribution tarballs (built by setup.py sdist) and build +# directories (produced by setup.py build) will contain a much shorter file +# that just contains the computed version number. + +# This file is released into the public domain. Generated by +# versioneer-0.23 (https://github.com/python-versioneer/python-versioneer) + +"""Git implementation of _version.py.""" + +import errno +import os +import re +import subprocess +import sys +from typing import Callable, Dict +import functools + + +def get_keywords(): + """Get the keywords needed to look up the version information.""" + # these strings will be replaced by git during git-archive. + # setup.py/versioneer.py will grep for the variable names, so they must + # each be defined on a line of their own. _version.py will just call + # get_keywords(). + git_refnames = " (HEAD -> dev, refs/pull/7696/head)" + git_full = "fe733b0ff1951ee752ab87ebfe5c4b7c82d30579" + git_date = "2024-05-07 11:55:31 +0800" + keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} + return keywords + + +class VersioneerConfig: + """Container for Versioneer configuration parameters.""" + + +def get_config(): + """Create, populate and return the VersioneerConfig() object.""" + # these strings are filled in when 'setup.py versioneer' creates + # _version.py + cfg = VersioneerConfig() + cfg.VCS = "git" + cfg.style = "pep440" + cfg.tag_prefix = "" + cfg.parentdir_prefix = "" + cfg.versionfile_source = "monai/_version.py" + cfg.verbose = False + return cfg + + +class NotThisMethod(Exception): + """Exception raised if a method is not valid for the current scenario.""" + + +LONG_VERSION_PY: Dict[str, str] = {} +HANDLERS: Dict[str, Dict[str, Callable]] = {} + + +def register_vcs_handler(vcs, method): # decorator + """Create decorator to mark a method as the handler of a VCS.""" + def decorate(f): + """Store f in HANDLERS[vcs][method].""" + if vcs not in HANDLERS: + HANDLERS[vcs] = {} + HANDLERS[vcs][method] = f + return f + return decorate + + +def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, + env=None): + """Call the given command(s).""" + assert isinstance(commands, list) + process = None + + popen_kwargs = {} + if sys.platform == "win32": + # This hides the console window if pythonw.exe is used + startupinfo = subprocess.STARTUPINFO() + startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW + popen_kwargs["startupinfo"] = startupinfo + + for command in commands: + try: + dispcmd = str([command] + args) + # remember shell=False, so use git.cmd on windows, not just git + process = subprocess.Popen([command] + args, cwd=cwd, env=env, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr + else None), **popen_kwargs) + break + except OSError: + e = sys.exc_info()[1] + if e.errno == errno.ENOENT: + continue + if verbose: + print("unable to run %s" % dispcmd) + print(e) + return None, None + else: + if verbose: + print("unable to find command, tried %s" % (commands,)) + return None, None + stdout = process.communicate()[0].strip().decode() + if process.returncode != 0: + if verbose: + print("unable to run %s (error)" % dispcmd) + print("stdout was %s" % stdout) + return None, process.returncode + return stdout, process.returncode + + +def versions_from_parentdir(parentdir_prefix, root, verbose): + """Try to determine the version from the parent directory name. + + Source tarballs conventionally unpack into a directory that includes both + the project name and a version string. We will also support searching up + two directory levels for an appropriately named parent directory + """ + rootdirs = [] + + for _ in range(3): + dirname = os.path.basename(root) + if dirname.startswith(parentdir_prefix): + return {"version": dirname[len(parentdir_prefix):], + "full-revisionid": None, + "dirty": False, "error": None, "date": None} + rootdirs.append(root) + root = os.path.dirname(root) # up a level + + if verbose: + print("Tried directories %s but none started with prefix %s" % + (str(rootdirs), parentdir_prefix)) + raise NotThisMethod("rootdir doesn't start with parentdir_prefix") + + +@register_vcs_handler("git", "get_keywords") +def git_get_keywords(versionfile_abs): + """Extract version information from the given file.""" + # the code embedded in _version.py can just fetch the value of these + # keywords. When used from setup.py, we don't want to import _version.py, + # so we do it with a regexp instead. This function is not used from + # _version.py. + keywords = {} + try: + with open(versionfile_abs, "r") as fobj: + for line in fobj: + if line.strip().startswith("git_refnames ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["refnames"] = mo.group(1) + if line.strip().startswith("git_full ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["full"] = mo.group(1) + if line.strip().startswith("git_date ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["date"] = mo.group(1) + except OSError: + pass + return keywords + + +@register_vcs_handler("git", "keywords") +def git_versions_from_keywords(keywords, tag_prefix, verbose): + """Get version information from git keywords.""" + if "refnames" not in keywords: + raise NotThisMethod("Short version file found") + date = keywords.get("date") + if date is not None: + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + + # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant + # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 + # -like" string, which we must then edit to make compliant), because + # it's been around since git-1.5.3, and it's too difficult to + # discover which version we're using, or to work around using an + # older one. + date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) + refnames = keywords["refnames"].strip() + if refnames.startswith("$Format"): + if verbose: + print("keywords are unexpanded, not using") + raise NotThisMethod("unexpanded keywords, not a git-archive tarball") + refs = {r.strip() for r in refnames.strip("()").split(",")} + # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of + # just "foo-1.0". If we see a "tag: " prefix, prefer those. + TAG = "tag: " + tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} + if not tags: + # Either we're using git < 1.8.3, or there really are no tags. We use + # a heuristic: assume all version tags have a digit. The old git %d + # expansion behaves like git log --decorate=short and strips out the + # refs/heads/ and refs/tags/ prefixes that would let us distinguish + # between branches and tags. By ignoring refnames without digits, we + # filter out many common branch names like "release" and + # "stabilization", as well as "HEAD" and "master". + tags = {r for r in refs if re.search(r'\d', r)} + if verbose: + print("discarding '%s', no digits" % ",".join(refs - tags)) + if verbose: + print("likely tags: %s" % ",".join(sorted(tags))) + for ref in sorted(tags): + # sorting will prefer e.g. "2.0" over "2.0rc1" + if ref.startswith(tag_prefix): + r = ref[len(tag_prefix):] + # Filter out refs that exactly match prefix or that don't start + # with a number once the prefix is stripped (mostly a concern + # when prefix is '') + if not re.match(r'\d', r): + continue + if verbose: + print("picking %s" % r) + return {"version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, "error": None, + "date": date} + # no suitable tags, so version is "0+unknown", but full hex is still there + if verbose: + print("no suitable tags, using unknown + full revision id") + return {"version": "0+unknown", + "full-revisionid": keywords["full"].strip(), + "dirty": False, "error": "no suitable tags", "date": None} + + +@register_vcs_handler("git", "pieces_from_vcs") +def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command): + """Get version from 'git describe' in the root of the source tree. + + This only gets called if the git-archive 'subst' keywords were *not* + expanded, and _version.py hasn't already been rewritten with a short + version string, meaning we're inside a checked out source tree. + """ + GITS = ["git"] + if sys.platform == "win32": + GITS = ["git.cmd", "git.exe"] + + # GIT_DIR can interfere with correct operation of Versioneer. + # It may be intended to be passed to the Versioneer-versioned project, + # but that should not change where we get our version from. + env = os.environ.copy() + env.pop("GIT_DIR", None) + runner = functools.partial(runner, env=env) + + _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, + hide_stderr=True) + if rc != 0: + if verbose: + print("Directory %s not under git control" % root) + raise NotThisMethod("'git rev-parse --git-dir' returned error") + + # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] + # if there isn't one, this yields HEX[-dirty] (no NUM) + describe_out, rc = runner(GITS, [ + "describe", "--tags", "--dirty", "--always", "--long", + "--match", f"{tag_prefix}[[:digit:]]*" + ], cwd=root) + # --long was added in git-1.5.5 + if describe_out is None: + raise NotThisMethod("'git describe' failed") + describe_out = describe_out.strip() + full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) + if full_out is None: + raise NotThisMethod("'git rev-parse' failed") + full_out = full_out.strip() + + pieces = {} + pieces["long"] = full_out + pieces["short"] = full_out[:7] # maybe improved later + pieces["error"] = None + + branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], + cwd=root) + # --abbrev-ref was added in git-1.6.3 + if rc != 0 or branch_name is None: + raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") + branch_name = branch_name.strip() + + if branch_name == "HEAD": + # If we aren't exactly on a branch, pick a branch which represents + # the current commit. If all else fails, we are on a branchless + # commit. + branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) + # --contains was added in git-1.5.4 + if rc != 0 or branches is None: + raise NotThisMethod("'git branch --contains' returned error") + branches = branches.split("\n") + + # Remove the first line if we're running detached + if "(" in branches[0]: + branches.pop(0) + + # Strip off the leading "* " from the list of branches. + branches = [branch[2:] for branch in branches] + if "master" in branches: + branch_name = "master" + elif not branches: + branch_name = None + else: + # Pick the first branch that is returned. Good or bad. + branch_name = branches[0] + + pieces["branch"] = branch_name + + # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] + # TAG might have hyphens. + git_describe = describe_out + + # look for -dirty suffix + dirty = git_describe.endswith("-dirty") + pieces["dirty"] = dirty + if dirty: + git_describe = git_describe[:git_describe.rindex("-dirty")] + + # now we have TAG-NUM-gHEX or HEX + + if "-" in git_describe: + # TAG-NUM-gHEX + mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) + if not mo: + # unparsable. Maybe git-describe is misbehaving? + pieces["error"] = ("unable to parse git-describe output: '%s'" + % describe_out) + return pieces + + # tag + full_tag = mo.group(1) + if not full_tag.startswith(tag_prefix): + if verbose: + fmt = "tag '%s' doesn't start with prefix '%s'" + print(fmt % (full_tag, tag_prefix)) + pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" + % (full_tag, tag_prefix)) + return pieces + pieces["closest-tag"] = full_tag[len(tag_prefix):] + + # distance: number of commits since tag + pieces["distance"] = int(mo.group(2)) + + # commit: short hex revision ID + pieces["short"] = mo.group(3) + + else: + # HEX: no tags + pieces["closest-tag"] = None + out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root) + pieces["distance"] = len(out.split()) # total number of commits + + # commit date: see ISO-8601 comment in git_versions_from_keywords() + date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) + + return pieces + + +def plus_or_dot(pieces): + """Return a + if we don't already have one, else return a .""" + if "+" in pieces.get("closest-tag", ""): + return "." + return "+" + + +def render_pep440(pieces): + """Build up version string, with post-release "local version identifier". + + Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you + get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty + + Exceptions: + 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += plus_or_dot(pieces) + rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0+untagged.%d.g%s" % (pieces["distance"], + pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_branch(pieces): + """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . + + The ".dev0" means not master branch. Note that .dev0 sorts backwards + (a feature branch will appear "older" than the master branch). + + Exceptions: + 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0" + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += "+untagged.%d.g%s" % (pieces["distance"], + pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def pep440_split_post(ver): + """Split pep440 version string at the post-release segment. + + Returns the release segments before the post-release and the + post-release version number (or -1 if no post-release segment is present). + """ + vc = str.split(ver, ".post") + return vc[0], int(vc[1] or 0) if len(vc) == 2 else None + + +def render_pep440_pre(pieces): + """TAG[.postN.devDISTANCE] -- No -dirty. + + Exceptions: + 1: no tags. 0.post0.devDISTANCE + """ + if pieces["closest-tag"]: + if pieces["distance"]: + # update the post release segment + tag_version, post_version = pep440_split_post(pieces["closest-tag"]) + rendered = tag_version + if post_version is not None: + rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"]) + else: + rendered += ".post0.dev%d" % (pieces["distance"]) + else: + # no commits, use the tag as the version + rendered = pieces["closest-tag"] + else: + # exception #1 + rendered = "0.post0.dev%d" % pieces["distance"] + return rendered + + +def render_pep440_post(pieces): + """TAG[.postDISTANCE[.dev0]+gHEX] . + + The ".dev0" means dirty. Note that .dev0 sorts backwards + (a dirty tree will appear "older" than the corresponding clean one), + but you shouldn't be releasing software with -dirty anyways. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "g%s" % pieces["short"] + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + rendered += "+g%s" % pieces["short"] + return rendered + + +def render_pep440_post_branch(pieces): + """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . + + The ".dev0" means not master branch. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "g%s" % pieces["short"] + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += "+g%s" % pieces["short"] + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_old(pieces): + """TAG[.postDISTANCE[.dev0]] . + + The ".dev0" means dirty. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + return rendered + + +def render_git_describe(pieces): + """TAG[-DISTANCE-gHEX][-dirty]. + + Like 'git describe --tags --dirty --always'. + + Exceptions: + 1: no tags. HEX[-dirty] (note: no 'g' prefix) + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"]: + rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render_git_describe_long(pieces): + """TAG-DISTANCE-gHEX[-dirty]. + + Like 'git describe --tags --dirty --always -long'. + The distance/hash is unconditional. + + Exceptions: + 1: no tags. HEX[-dirty] (note: no 'g' prefix) + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render(pieces, style): + """Render the given version pieces into the requested style.""" + if pieces["error"]: + return {"version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"], + "date": None} + + if not style or style == "default": + style = "pep440" # the default + + if style == "pep440": + rendered = render_pep440(pieces) + elif style == "pep440-branch": + rendered = render_pep440_branch(pieces) + elif style == "pep440-pre": + rendered = render_pep440_pre(pieces) + elif style == "pep440-post": + rendered = render_pep440_post(pieces) + elif style == "pep440-post-branch": + rendered = render_pep440_post_branch(pieces) + elif style == "pep440-old": + rendered = render_pep440_old(pieces) + elif style == "git-describe": + rendered = render_git_describe(pieces) + elif style == "git-describe-long": + rendered = render_git_describe_long(pieces) + else: + raise ValueError("unknown style '%s'" % style) + + return {"version": rendered, "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], "error": None, + "date": pieces.get("date")} + + +def get_versions(): + """Get version information or return default if unable to do so.""" + # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have + # __file__, we can work backwards from there to the root. Some + # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which + # case we can only use expanded keywords. + + cfg = get_config() + verbose = cfg.verbose + + try: + return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, + verbose) + except NotThisMethod: + pass + + try: + root = os.path.realpath(__file__) + # versionfile_source is the relative path from the top of the source + # tree (where the .git directory might live) to this file. Invert + # this to find the root from __file__. + for _ in cfg.versionfile_source.split('/'): + root = os.path.dirname(root) + except NameError: + return {"version": "0+unknown", "full-revisionid": None, + "dirty": None, + "error": "unable to find root of source tree", + "date": None} + + try: + pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) + return render(pieces, cfg.style) + except NotThisMethod: + pass + + try: + if cfg.parentdir_prefix: + return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) + except NotThisMethod: + pass + + return {"version": "0+unknown", "full-revisionid": None, + "dirty": None, + "error": "unable to compute version", "date": None} diff --git a/source_code/SegMamba/monai/apps/__init__.py b/source_code/SegMamba/monai/apps/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9cc7aeb8e052c23701c7d73a9a98dc37af1de77e --- /dev/null +++ b/source_code/SegMamba/monai/apps/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from .datasets import CrossValidation, DecathlonDataset, MedNISTDataset, TciaDataset +from .mmars import MODEL_DESC, RemoteMMARKeys, download_mmar, get_model_spec, load_from_mmar +from .utils import SUPPORTED_HASH_TYPES, check_hash, download_and_extract, download_url, extractall, get_logger, logger diff --git a/source_code/SegMamba/monai/py.typed b/source_code/SegMamba/monai/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/source_code/gliomasam3_moe/GliomaSAM3D-MoE_updated.tex b/source_code/gliomasam3_moe/GliomaSAM3D-MoE_updated.tex new file mode 100644 index 0000000000000000000000000000000000000000..084c44dc50962731a5329dffdc39fd1197023f24 --- /dev/null +++ b/source_code/gliomasam3_moe/GliomaSAM3D-MoE_updated.tex @@ -0,0 +1,647 @@ +\documentclass[11pt]{article} + +\usepackage[a4paper,margin=1in]{geometry} +\usepackage[T1]{fontenc} +\usepackage{lmodern} +\usepackage{microtype} + +\usepackage{amsmath,amssymb,amsfonts,bm} +\usepackage{graphicx} +\usepackage{booktabs} +\usepackage{multirow} +\usepackage{enumitem} +\usepackage{algorithm} +\usepackage{algorithmic} +\usepackage{hyperref} +\usepackage[nameinlink]{cleveref} + +\input{macros} + +\title{GliomaSAM3D-MoE: Concept-Prompted 3D Glioma Segmentation with Dual-Domain Enhancement and Sparse Mixture-of-Experts} +\author{Anonymous} +\date{} + +\begin{document} +\maketitle + +\begin{abstract} +Accurate delineation of glioma subregions in multi-parametric MRI is central to treatment planning and longitudinal assessment, and has been standardized by the Brain Tumor Segmentation (BraTS) benchmark~\cite{Menze2015BraTS,Baid2021BraTS,SynapseBraTS2023}. +While recent 3D CNN/Transformer segmenters achieve strong overlap metrics, they remain brittle to boundary ambiguity, class imbalance, and the frequent absence of enhancing tumor (\ET), where spurious \ET predictions can disproportionately degrade surface-distance criteria (e.g., HD95). +Inspired by the promptable design of the Segment Anything Model (SAM), we propose \textbf{GliomaSAM3D-MoE}, a fully automatic 3D glioma segmentation framework that replaces manual prompts with learned concept tokens. +Our method combines an \ET-existence gate with direction-aware dual-domain enhancement and a task-structured sparse mixture-of-experts decoder to reduce false positives and improve boundary fidelity. +\end{abstract} + +\section{Introduction}\label{sec:intro} +Gliomas are among the most common primary brain tumors, and accurate segmentation of tumor subregions from multi-parametric MRI (mpMRI) is essential for diagnosis, treatment planning, radiotherapy targeting, and longitudinal response assessment. +To foster reproducible evaluation, the Brain Tumor Segmentation (BraTS) challenges provide multi-institutional mpMRI scans with expert annotations and standardized evaluation protocols~\cite{Menze2015BraTS,Baid2021BraTS,SynapseBraTS2023}. +In the conventional BraTS setting, methods segment three clinically-relevant regions---whole tumor (\WT), tumor core (\TC), and enhancing tumor (\ET)---derived from voxel-wise labels of edema, non-enhancing/necrotic core, and enhancing components~\cite{Menze2015BraTS,Baid2021BraTS}. + +Despite steady progress, BraTS glioma segmentation remains challenging for three recurring reasons. +First, tumor boundaries can be ambiguous due to partial volume effects, intensity inhomogeneity, and infiltrative growth patterns, making boundary-sensitive metrics (e.g., HD95) particularly unforgiving. +Second, the region hierarchy (\ET $\subseteq$ \TC $\subseteq$ \WT) induces severe class imbalance, where small \ET volumes are easily overwhelmed by \WT/\TC during optimization. +Third, \ET is frequently absent in low-grade cases and in portions of the cohort; in such \ET-absent volumes, even a small number of false-positive \ET voxels can yield large surface-distance penalties and clinically misleading ``enhancing'' findings. +These properties suggest that effective models should (i) emphasize high-frequency boundary cues, (ii) respect the nested region structure, and (iii) explicitly model \emph{existence} (whether a region should appear) separately from \emph{localization} (where it appears). + +Most state-of-the-art BraTS solutions build on volumetric encoder--decoder architectures, including 3D CNN families~\cite{Cicek2016,Milletari2016VNet,Kamnitsas2017DeepMedic,Isensee2021nnUNet,Roy2023MedNeXt} and transformer-based variants~\cite{Wang2021TransBTS,Hatamizadeh2022UNETR,Hatamizadeh2022SwinUNETR}. +While these models can achieve strong Dice scores, they typically treat each subregion as a dense per-voxel classification problem, without an explicit mechanism to prevent anatomically-implausible ``hallucinations'' of \ET in \ET-absent cases, and without a targeted strategy to align optimization with surface-distance behavior. + +In parallel, promptable segmentation foundation models such as SAM~\cite{Kirillov2023SAM} introduce a compelling alternative abstraction: the user (or an upstream module) specifies \emph{what} to segment via prompts, and the model focuses on \emph{where} to segment. +Recent medical adaptations demonstrate that SAM-style pretraining can transfer to medical targets~\cite{Ma2024MedSAM}, and that SAM-inspired designs can be extended to volumetric data~\cite{Bui2023SAM3D,Wang2023SAMMed3D}. +However, most promptable approaches are interactive (requiring points/boxes), operate slice-wise, or do not explicitly address BraTS-specific challenges such as \ET absence, nested region constraints, and boundary-driven evaluation. + +To bridge these gaps, we propose \textbf{GliomaSAM3D-MoE}, a fully automatic SAM-style volumetric segmenter tailored to BraTS glioma subregions. +Our key idea is to replace manual prompts with \emph{learned concept tokens} predicted from the 3D volume, and to structure the decoder as a \emph{task-aware sparse mixture-of-experts} so that different experts specialize to \WT/\TC/\ET decoding. +Crucially, we introduce an explicit \ET-existence classifier whose probability gates the \ET mask to reduce false positives in \ET-absent volumes, and we design a direction-aware dual-domain enhancement module that injects high-frequency priors and spectral modulation to improve boundary fidelity and robustness. + +Our main contributions are: +\begin{itemize} + \item \textbf{Concept-prompted automatic segmentation.} We introduce a SAM-style framework that predicts discrete concept tokens from the input volume and uses them as prompts for region-specific mask decoding, enabling fully automatic volumetric glioma segmentation. + \item \textbf{\ET-aware existence gating.} We decouple \emph{existence} from \emph{localization} by adding an \ET-presence predictor and an explicit gating mechanism to suppress spurious \ET predictions in \ET-absent cases, targeting improved boundary-sensitive evaluation. + \item \textbf{Direction-aware dual-domain enhancement.} We combine high-frequency directional priors with calibrated multi-scale fusion and spectral modulation to sharpen boundaries and improve robustness to acquisition/style shifts. + \item \textbf{Task-structured sparse MoE decoding.} We propose a region-aware sparse mixture-of-experts decoder for \WT/\TC/\ET that promotes expert specialization without incurring the full cost of dense multi-branch decoding. +\end{itemize} + +\section{Related Work}\label{sec:related} +\subsection{Glioma segmentation on BraTS} +Early BraTS approaches combined hand-crafted features with classical classifiers, but modern solutions are dominated by deep volumetric segmentation networks. +3D encoder--decoders, such as 3D U-Net~\cite{Cicek2016} and V-Net~\cite{Milletari2016VNet}, established a strong baseline for dense volumetric prediction. +Multi-scale and context-aware designs (e.g., DeepMedic~\cite{Kamnitsas2017DeepMedic}) further improved robustness for heterogeneous lesions. +More recently, nnU-Net~\cite{Isensee2021nnUNet} popularized automated pipeline configuration and remains a widely used competitive baseline in medical challenges, including BraTS. +Transformer-based volumetric segmenters improve global context modeling, exemplified by TransBTS~\cite{Wang2021TransBTS}, UNETR~\cite{Hatamizadeh2022UNETR}, and Swin UNETR~\cite{Hatamizadeh2022SwinUNETR}. +Convolutional architectures inspired by transformer design principles (e.g., MedNeXt~\cite{Roy2023MedNeXt}) also show competitive performance with favorable efficiency. + +Despite strong overlap metrics, BraTS segmentation is still hampered by (i) boundary ambiguity and (ii) rare/absent subregions (notably \ET), where naive voxel-wise decoding can yield false positives that severely affect surface-distance measures. +Our work focuses on explicitly modeling \ET existence and improving boundary fidelity within a SAM-style prompt-conditioned decoding paradigm. + +\subsection{Promptable and foundation models for medical segmentation} +SAM~\cite{Kirillov2023SAM} introduced a promptable segmentation paradigm that generalizes across diverse natural-image targets via point/box/mask prompts. +MedSAM~\cite{Ma2024MedSAM} demonstrated that SAM can be adapted to medical images through large-scale fine-tuning, improving performance on typical medical targets. +Extending promptable segmentation to volumetric data is an active research direction: SAM3D~\cite{Bui2023SAM3D} adapts SAM-style features to 3D volumes, and SAM-Med3D~\cite{Wang2023SAMMed3D} constructs a fully learnable 3D promptable model trained on large-scale volumetric masks. +While these approaches advance general-purpose promptable segmentation, BraTS requires \emph{fully automatic} multi-region decoding (\WT/\TC/\ET), careful handling of \ET-absent cases, and improved boundary behavior. +We therefore replace manual prompts with learned concept tokens and add an \ET-existence gate that targets BraTS-specific failure modes. + +\subsection{Boundary-aware learning and frequency-domain robustness} +Region-overlap losses (e.g., Dice) may under-penalize boundary errors, motivating boundary-aware objectives such as boundary loss~\cite{Kervadec2019BoundaryLoss} and losses that explicitly target Hausdorff distance behavior~\cite{Karimi2019HDLoss}. +In parallel, domain shift across scanners, protocols, and institutions has motivated frequency-domain adaptation and augmentation. +FDA~\cite{Yang2020FDA} reduces style discrepancy by swapping low-frequency amplitude components, and AmpMix~\cite{Xu2023AmpMix} perturbs amplitude while preserving phase semantics to improve domain generalization. +Our method integrates boundary-centric high-frequency priors and spectral modulation as complementary mechanisms for robust BraTS segmentation across dataset editions. + +\subsection{Mixture-of-experts for dense prediction} +Mixture-of-experts (MoE) enables conditional computation by routing each input to a sparse subset of specialized experts, improving capacity without proportional cost~\cite{Shazeer2017MoE,Fedus2022Switch}. +In computer vision, MoE has been explored for multi-task learning and dense prediction with adaptive routing~\cite{Chen2023AdaMVMoE}. +Motivated by the heterogeneity of BraTS subregions and the nested (\ET $\subseteq$ \TC $\subseteq$ \WT) structure, we design a task-structured sparse MoE decoder that encourages expert specialization across regions while remaining efficient for 3D inference. + +\section{Method}\label{sec:method} + +\subsection{Problem formulation and notation} +Given a multi-modal MRI volume +$\mathbf{X}\in\R^{C\times H\times W\times D}$ with $C=4$ modalities, +our goal is to predict a voxel-wise label map +$\widehat{\mathbf{Y}}\in\{0,1,2,3\}^{H\times W\times D}$. +Equivalently, we predict three standard BraTS region masks +$\widehat{\mathbf{m}}_{r}\in[0,1]^{H\times W\times D}$ for $r\in\{\WT,\TC,\ET\}$ (using the canonical definitions \ET$\subseteq$\TC$\subseteq$\WT). +The method is \emph{fully automatic}: no user interactions at inference time. + +\subsection{Overview} +GliomaSAM3D-MoE consists of five key components: +\begin{enumerate}[leftmargin=*] + \item a parameter-free high-frequency direction injection module (HFDI-3D) to provide directional boundary priors, + \item a SAM-style 2D image encoder $E_{\mathrm{img}}(\cdot)$ applied to each slice, + \item a lightweight slice-as-sequence 3D adaptation module $T_{\mathrm{seq}}(\cdot)$ for inter-slice context, + \item a concept-prompt module with (i) a fixed discrete token vocabulary and (ii) an attribute predictor $h_{\mathrm{attr}}(\cdot)$ to infer tokens at test time, + \item a direction-aware dual-domain enhancement branch (MSDA-3D + FA + FCF + spectral modulation) and a task-structured sparse MoE decoder to output region logits. +\end{enumerate} + +\subsection{High-frequency direction injection (HFDI-3D)} +To strengthen boundary cues and tiny-fragment sensitivity, we inject a \emph{directional} high-frequency prior using parameter-free operators. +We first compute a modality-averaged volume: +\begin{equation} +\bar{\mathbf{X}}=\frac{1}{C}\sum_{c=1}^{C}\mathbf{X}^{(c)}\in\R^{H\times W\times D}. +\end{equation} +Using fixed finite-difference (or 3D Sobel) operators $\nabla_x,\nabla_y,\nabla_z$, we extract directional high-frequency maps: +\begin{equation} +\mathbf{G}_x=\nabla_x \bar{\mathbf{X}},\qquad \mathbf{G}_y=\nabla_y \bar{\mathbf{X}},\qquad \mathbf{G}_z=\nabla_z \bar{\mathbf{X}}. +\end{equation} +We form a normalized direction stack +\begin{equation} +\mathbf{H}=\mathrm{Norm}\!\Big(\Concat\big(|\mathbf{G}_x|,|\mathbf{G}_y|,|\mathbf{G}_z|\big)\Big)\in\R^{3\times H\times W\times D}, +\end{equation} +and concatenate it with the original modalities: +\begin{equation} +\mathbf{X}^{+}=\Concat(\mathbf{X},\mathbf{H})\in\R^{(C+3)\times H\times W\times D}. +\end{equation} +This augmentation is deterministic and parameter-free, serving as an explicit directional prior that is especially helpful for fragmented \ET boundaries. + +\subsection{Slice-as-sequence 3D adaptation} +We interpret the axial dimension as a sequence of ``frames'': $\{\mathbf{x}_t\}_{t=1}^{D}$, +where $\mathbf{x}_t\in\R^{C\times H\times W}$ denotes the $t$-th slice. +After HFDI-3D, the image encoder ingests the augmented slice $\mathbf{x}^{+}_t\in\R^{(C+3)\times H\times W}$. +The 2D image encoder produces per-slice token embeddings: +\begin{equation} +\mathbf{F}_t = E_{\mathrm{img}}(\mathbf{x}^{+}_t)\in\R^{N\times d}, +\end{equation} +where $N$ is the number of tokens and $d$ is the token dimension. + +To inject 3D context, we aggregate neighboring slice information within a short window +$\mathcal{W}_t=\{t-K,\ldots,t-1\}$ using memory-style cross-attention: +\begin{equation} +\widetilde{\mathbf{F}}_t = +\Attn\!\left( +\mathbf{Q}=\mathbf{F}_t,\, +\mathbf{K}=\Concat(\mathbf{F}_{t-K},\ldots,\mathbf{F}_{t-1}),\, +\mathbf{V}=\Concat(\mathbf{F}_{t-K},\ldots,\mathbf{F}_{t-1}) +\right). +\end{equation} +In practice, $K$ is small (e.g., 4--8) for efficiency. +During training we randomize traversal direction (forward/backward) to encourage bidirectional consistency without doubling inference cost. + +\subsection{Discrete concept tokens and prompt injection} +\paragraph{Fixed concept vocabulary.} +Instead of free-form natural language (which can be non-deterministic at inference), we define a fixed vocabulary $\mathcal{V}$ of discrete concept tokens: +\begin{equation} +\mathcal{V}=\{\texttt{WT},\texttt{TC},\texttt{ET},\texttt{ET\_PRESENT},\texttt{ET\_ABSENT},\texttt{FRAG\_BIN}_i,\texttt{SCALE\_BIN}_j,\ldots\}. +\end{equation} +During training, token supervision is derived from the ground truth masks (e.g., \ET presence, fragmentation bins, scale bins). +At inference, tokens are predicted by a lightweight attribute predictor, avoiding train/test mismatch. + +\paragraph{Attribute predictor and prompt embeddings.} +We compute a global volume descriptor by pooling over tokens and slices: +\begin{equation} +\mathbf{z} = \Pool(\{\widetilde{\mathbf{F}}_t\}_{t=1}^{D}). +\end{equation} +An attribute head $h_{\mathrm{attr}}(\cdot)$ outputs predicted concept labels $\widehat{\mathbf{c}}$ and an \ET presence probability $\pi_{\ET}\in[0,1]$: +\begin{equation} +(\widehat{\mathbf{c}},\, \pi_{\ET}) = h_{\mathrm{attr}}(\mathbf{z}). +\end{equation} +The selected tokens are embedded and injected via a prompt encoder $E_{\mathrm{prm}}(\cdot)$: +\begin{equation} +\mathbf{p} = E_{\mathrm{prm}}(\mathrm{Embed}(\widehat{\mathbf{c}})). +\end{equation} + +\paragraph{\ET presence gating.} +Let $\mathbf{l}_{\ET}\in\R^{H\times W\times D}$ denote the \ET logits from the decoder. +We convert logits to probabilities and gate with $\pi_{\ET}$ to suppress false positives in \ET-absent volumes: +\begin{equation} +\widehat{\mathbf{m}}_{\ET} = \sigma(\mathbf{l}_{\ET})\cdot \pi_{\ET}. +\end{equation} +This explicitly decouples existence and localization for \ET and stabilizes HD95 under small spurious detections. + +\subsection{Direction-aware dual-domain enhancement (MSDA-3D + FA + FCF)} +\paragraph{Learnable spectral modulation.} +For a training crop (or a full volume) $\mathbf{X}$, we compute a 3D Fourier transform per modality channel: +\begin{equation} +\FFT(\mathbf{X}) = \mathbf{A}\odot e^{\ii\mathbf{\Phi}}, +\end{equation} +where $\mathbf{A}$ and $\mathbf{\Phi}$ denote amplitude and phase, respectively. +We apply a learnable radial frequency gate $w_{\theta}(r)$ (parameterized by $\theta$ and indexed by radial frequency magnitude $r$): +\begin{equation} +\mathbf{A}' = \mathbf{A}\odot w_{\theta}(r),\qquad +\mathbf{X}_{\mathrm{spec}} = \IFFT\!\left(\mathbf{A}'\odot e^{\ii\mathbf{\Phi}}\right). +\end{equation} +We summarize directional frequency statistics (used later for routing and fusion) by partitioning the frequency domain into $Q$ directional sectors $\{\mathbf{B}_q\}_{q=1}^{Q}$ and computing +\begin{equation} +s_q=\frac{\langle \mathbf{A},\mathbf{B}_q\rangle}{\langle \mathbf{A},\mathbf{1}\rangle},\qquad \mathbf{s}=[s_1,\ldots,s_Q]. +\end{equation} +The spectral-enhanced volume $\mathbf{X}_{\mathrm{spec}}$ is fused with spatial features before decoding. + +\paragraph{Multi-scale direction-aware module (MSDA-3D).} +We further enhance directional perception across scales. +Let $\mathbf{U}$ denote a 3D feature tensor (obtained by reshaping token embeddings to a grid and aggregating slices). +For each scale $k\in\mathcal{K}$ and direction $d\in\{x,y,z\}$, we apply a directional depthwise convolution: +\begin{equation} +\mathbf{U}_{k,d}=\mathrm{DWConv}_{k}^{(d)}(\mathbf{U}), +\end{equation} +and combine them with learned attention weights $a_{k,d}$: +\begin{equation} +a_{k,d}=\mathrm{Softmax}_{k,d}\big(\mathrm{MLP}(\Pool(\mathbf{U}))\big),\qquad +\mathbf{U}_{\mathrm{msda}}=\sum_{k\in\mathcal{K}}\sum_{d\in\{x,y,z\}} a_{k,d}\odot \mathbf{U}_{k,d}. +\end{equation} +This module promotes multi-scale local relation modeling while retaining explicit direction selectivity. + +\paragraph{Feature aggregation (FA) and feature calibration fusion (FCF).} +To prevent tiny \ET targets from vanishing in high-level representations, we aggregate multi-level features into a lesion-preserving representation $\mathbf{U}_{\mathrm{fa}}=\mathrm{Agg}(\{\mathbf{U}^{(\ell)}\}_{\ell=1}^{L})$. +We then calibrate cross-source fusion (spatial vs.\ spectral, and multi-level vs.\ MSDA-enhanced) using a lightweight gate: +\begin{equation} +\boldsymbol{\eta}=\sigma\!\Big(\mathrm{MLP}\big(\Pool(\Concat(\mathbf{U}_{\mathrm{fa}},\mathbf{U}_{\mathrm{msda}}))\big)\Big),\qquad +\mathbf{U}_{\mathrm{fuse}}=\boldsymbol{\eta}\odot \mathbf{U}_{\mathrm{fa}}+(1-\boldsymbol{\eta})\odot \mathbf{U}_{\mathrm{msda}}. +\end{equation} +Finally, $\mathbf{U}_{\mathrm{fuse}}$ is fused with spectral features derived from $\mathbf{X}_{\mathrm{spec}}$ (e.g., via concatenation or cross-attention) and fed to the MoE decoder. + +\paragraph{Fourier amplitude mixing augmentation.} +To improve robustness to acquisition/style variation, we randomly mix Fourier amplitudes across samples while preserving phase: +\begin{equation} +\mathbf{A}_{\mathrm{mix}} = \alpha \mathbf{A}^{(a)} + (1-\alpha)\mathbf{A}^{(b)},\qquad +\mathbf{X}_{\mathrm{mix}} = \IFFT\!\left(\mathbf{A}_{\mathrm{mix}}\odot e^{\ii\mathbf{\Phi}^{(a)}}\right), +\end{equation} +where $\alpha\sim\mathrm{Beta}(\beta,\beta)$ and $(a,b)$ denote two randomly paired training samples. + +\subsection{Task-structured sparse MoE decoder} +We design $M$ expert decoders $\{D_m\}_{m=1}^{M}$ specialized for different targets: +\texttt{\WT/edema}, \texttt{\TC/core}, \texttt{\ET/fine}, \texttt{boundary}, and \texttt{FP-suppress}. +A gating network $G(\cdot)$ produces routing weights conditioned on global visual context and prompt embeddings: +\begin{equation} +\boldsymbol{\gamma} = G\!\left(\mathbf{z},\,\mathbf{p},\,\mathbf{s}\right),\qquad +\sum_{m=1}^{M}\gamma_m = 1, +\end{equation} +where $\mathbf{s}$ denotes optional spectral statistics (e.g., band-energy ratios). +Each expert outputs a 3-channel logit tensor $\mathbf{L}^{(m)}\in\R^{3\times H\times W\times D}$ for $\{\WT,\TC,\ET\}$. +We apply sparse top-$k$ routing (e.g., $k=2$) to combine expert logits: +\begin{equation} +\mathbf{L} = +\sum_{m\in \TopK(\boldsymbol{\gamma})} +\gamma_m \cdot D_m(\{\widetilde{\mathbf{F}}_t\}_{t=1}^{D}, \mathbf{p}) \;=\; \sum_{m\in \TopK(\boldsymbol{\gamma})}\gamma_m \mathbf{L}^{(m)}. +\end{equation} +We denote the region-specific logits by $\{\mathbf{l}_{\WT},\mathbf{l}_{\TC},\mathbf{l}_{\ET}\}$ as the three channels of $\mathbf{L}$. +A load-balancing regularizer encourages diverse expert utilization. + +\subsection{Training objectives} +The overall loss is: +\begin{equation} +\mathcal{L} = +\mathcal{L}_{\mathrm{seg}} + + \lambda_{\mathrm{pres}}\mathcal{L}_{\mathrm{pres}} + + \lambda_{\mathrm{attr}}\mathcal{L}_{\mathrm{attr}} + + \lambda_{\mathrm{moe}}\mathcal{L}_{\mathrm{moe}} + + \lambda_{\mathrm{hier}}\mathcal{L}_{\mathrm{hier}}. +\end{equation} + +\paragraph{Segmentation loss.} +We combine Dice and cross-entropy, with \ET-aware reweighting and focal emphasis: +\begin{equation} +\mathcal{L}_{\mathrm{seg}} = +\sum_{r\in\{\WT,\TC,\ET\}} +\lambda_r \cdot \mathcal{L}_{\mathrm{Dice}}^{(r)} + + \lambda_{\mathrm{CE}}\cdot \mathcal{L}_{\mathrm{CE}} + + \lambda_{\mathrm{Focal}}\cdot \mathcal{L}_{\mathrm{Focal}}^{(\ET)}. +\end{equation} + +\paragraph{Presence and attribute supervision.} +\ET presence uses binary cross-entropy: +\begin{equation} +\mathcal{L}_{\mathrm{pres}} = \BCE(\pi_{\ET}, y_{\ET}^{\mathrm{pres}}), +\end{equation} +where $y_{\ET}^{\mathrm{pres}}\in\{0,1\}$ is the ground-truth \ET presence indicator. +Other concept attributes (fragmentation/scale bins) use multi-class cross-entropy: +\begin{equation} +\mathcal{L}_{\mathrm{attr}} = \sum_{u\in\mathcal{U}} \CE(\widehat{c}_u, c_u). +\end{equation} +Here, $\mathcal{U}$ denotes the set of non-presence concept attributes (e.g., fragmentation bin, scale bin), with ground-truth label $c_u$ and prediction $\widehat{c}_u$ for each attribute $u$. + +\paragraph{Hierarchy consistency.} +We softly enforce logical constraints (e.g., \ET $\subseteq$ \TC $\subseteq$ \WT) by penalizing violations: +\begin{equation} +\mathcal{L}_{\mathrm{hier}} = +\left\|\max(\widehat{\mathbf{m}}_{\ET}-\widehat{\mathbf{m}}_{\TC},0)\right\|_1 + + \left\|\max(\widehat{\mathbf{m}}_{\TC}-\widehat{\mathbf{m}}_{\WT},0)\right\|_1. +\end{equation} + +\paragraph{MoE load balancing.} +To prevent routing collapse, we regularize the batch-average routing probabilities: +\begin{equation} +\mathcal{L}_{\mathrm{moe}} = +\sum_{m=1}^{M} \left(\overline{\gamma}_m - \frac{1}{M}\right)^2, +\end{equation} +where $\overline{\gamma}_m$ is the batch-average of $\gamma_m$. + +\subsection{Inference and post-processing} +At inference, concept tokens are predicted by $h_{\mathrm{attr}}(\cdot)$ (no external LLM calls). +\ET probabilities are gated by $\pi_{\ET}$. +Optionally, we apply light post-processing for \ET to remove tiny isolated components below a small voxel threshold, mitigating HD95 sensitivity. + +\begin{algorithm}[t] +\caption{GliomaSAM3D-MoE inference (fully automatic)} +\label{alg:infer} +\begin{algorithmic}[1] +\REQUIRE Volume $\mathbf{X}\in\R^{4\times H\times W\times D}$ +\STATE HFDI-3D: $\mathbf{X}^{+}\leftarrow \mathrm{HFDI}(\mathbf{X})$ +\STATE Slice encoding: $\mathbf{F}_t\leftarrow E_{\mathrm{img}}(\mathbf{x}^{+}_t)$ for $t=1..D$ +\STATE Inter-slice aggregation: $\widetilde{\mathbf{F}}_{1:D} \leftarrow T_{\mathrm{seq}}(\mathbf{F}_{1:D})$ +\STATE Concept prediction: $(\widehat{\mathbf{c}},\pi_{\ET}) \leftarrow h_{\mathrm{attr}}(\Pool(\widetilde{\mathbf{F}}_{1:D}))$ +\STATE Prompt embeddings: $\mathbf{p}\leftarrow E_{\mathrm{prm}}(\mathrm{Embed}(\widehat{\mathbf{c}}))$ +\STATE Direction-aware enhancement: MSDA-3D + FA + FCF + spectral modulation +\STATE Decode with sparse MoE: obtain logits $\{\mathbf{l}_{\WT},\mathbf{l}_{\TC},\mathbf{l}_{\ET}\}$ +\STATE Region probabilities: $\widehat{\mathbf{m}}_{\WT} \leftarrow \sigma(\mathbf{l}_{\WT})$, $\widehat{\mathbf{m}}_{\TC} \leftarrow \sigma(\mathbf{l}_{\TC})$ +\STATE \ET gating: $\widehat{\mathbf{m}}_{\ET} \leftarrow \sigma(\mathbf{l}_{\ET})\cdot \pi_{\ET}$ +\STATE (Optional) post-process \ET: remove tiny isolated components +\STATE \textbf{return} $\{\widehat{\mathbf{m}}_{\WT},\widehat{\mathbf{m}}_{\TC},\widehat{\mathbf{m}}_{\ET}\}$ +\end{algorithmic} +\end{algorithm} + + + +\section{Experiments}\label{sec:experiments} + +\subsection{Datasets} +We conduct experiments on the BraTS 2021 and BraTS 2023 adult glioma datasets~\cite{Baid2021BraTS,SynapseBraTS2023}. +Both releases provide co-registered, skull-stripped, and resampled mpMRI volumes, typically including T1, T1ce, T2, and FLAIR modalities, along with expert tumor annotations~\cite{Menze2015BraTS,Baid2021BraTS}. +Following the BraTS convention, we evaluate three derived regions: whole tumor (\WT), tumor core (\TC), and enhancing tumor (\ET)~\cite{Baid2021BraTS}. +In addition to in-domain evaluation (training and testing within the same BraTS edition), we consider \emph{cross-year generalization} (e.g., train on BraTS 2021 and evaluate on BraTS 2023) to quantify robustness to dataset shift. + +\subsection{Preprocessing and data sampling} +Although BraTS provides standardized preprocessing, we apply additional normalization and sampling steps for stable training: +(i) per-modality z-score normalization within the brain mask, +(ii) foreground-aware random cropping to extract 3D patches centered on tumor regions with a fixed probability, +and (iii) standard geometric augmentations (random flips, rotations) and intensity perturbations. +We will release exact hyperparameters (patch size, sampling ratios, and augmentation strengths) in the final version. + +\subsection{Evaluation metrics} +We report region-wise Dice similarity coefficient (Dice) and the 95th percentile Hausdorff distance (HD95) for \WT/\TC/\ET, consistent with BraTS evaluation practice~\cite{Baid2021BraTS}. +For \ET-absent volumes (i.e., empty \ET ground truth), we additionally report the false-positive \ET volume and the \ET-presence classification accuracy/AUROC of the proposed gate, since these directly reflect the intended behavior of existence-aware decoding. + +\subsection{Implementation details} +All models are trained with the same data splits and preprocessing for fair comparison. +GliomaSAM3D-MoE uses a SAM-style 2D image encoder (initialized from SAM weights~\cite{Kirillov2023SAM}) applied slice-wise, followed by a 3D aggregation encoder and a task-structured sparse MoE decoder (Section~\ref{sec:method}). +We optimize the segmentation loss (Dice + cross-entropy) and the \ET-presence classification loss jointly; details of weighting and schedules will be included in the final version. +Unless otherwise stated, results are averaged over multiple runs (or folds) to reduce variance. + +\subsection{Compared methods} +We compare against representative volumetric CNN/Transformer baselines and SAM-inspired volumetric models: +\begin{itemize} + \item \textbf{3D U-Net}~\cite{Cicek2016} and \textbf{V-Net}~\cite{Milletari2016VNet} as classical volumetric encoder--decoders. + \item \textbf{nnU-Net}~\cite{Isensee2021nnUNet} as a strong self-configuring medical segmentation baseline. + \item \textbf{TransBTS}~\cite{Wang2021TransBTS}, \textbf{UNETR}~\cite{Hatamizadeh2022UNETR}, and \textbf{Swin UNETR}~\cite{Hatamizadeh2022SwinUNETR} as representative transformer-based volumetric models. + \item \textbf{MedNeXt}~\cite{Roy2023MedNeXt} as a modern ConvNeXt-style volumetric baseline. + \item \textbf{SAM3D}~\cite{Bui2023SAM3D} and \textbf{SAM-Med3D}~\cite{Wang2023SAMMed3D} as promptable volumetric SAM adaptations. For a \emph{fully automatic} setting, prompts are generated by a lightweight coarse segmentation network trained on the same data (details in the final version). +\end{itemize} + +\subsection{Main quantitative results} +Tables~\ref{tab:brats21} and~\ref{tab:brats23} report the main comparisons on BraTS 2021 and BraTS 2023, respectively. +(Placeholders are included here and should be filled once experiments are completed.) + +\begin{table}[t] +\centering +\caption{Quantitative comparison on \textbf{BraTS 2021}. Report Dice (\%) $\uparrow$ and HD95 (mm) $\downarrow$ for \WT/\TC/\ET.} +\label{tab:brats21} +\resizebox{\linewidth}{!}{ +\begin{tabular}{lcccccc} +\toprule +Method & \WT Dice $\uparrow$ & \TC Dice $\uparrow$ & \ET Dice $\uparrow$ & \WT HD95 $\downarrow$ & \TC HD95 $\downarrow$ & \ET HD95 $\downarrow$ \\ +\midrule +3D U-Net~\cite{Cicek2016} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} \\ +V-Net~\cite{Milletari2016VNet} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} \\ +nnU-Net~\cite{Isensee2021nnUNet} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} \\ +TransBTS~\cite{Wang2021TransBTS} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} \\ +UNETR~\cite{Hatamizadeh2022UNETR} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} \\ +Swin UNETR~\cite{Hatamizadeh2022SwinUNETR} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} \\ +MedNeXt~\cite{Roy2023MedNeXt} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} \\ +SAM3D~\cite{Bui2023SAM3D} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} \\ +SAM-Med3D~\cite{Wang2023SAMMed3D} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} \\ +\midrule +\textbf{GliomaSAM3D-MoE (ours)} & \textbf{TBD} & \textbf{TBD} & \textbf{TBD} & \textbf{TBD} & \textbf{TBD} & \textbf{TBD} \\ +\bottomrule +\end{tabular}} +\end{table} + +\begin{table}[t] +\centering +\caption{Quantitative comparison on \textbf{BraTS 2023}. Report Dice (\%) $\uparrow$ and HD95 (mm) $\downarrow$ for \WT/\TC/\ET.} +\label{tab:brats23} +\resizebox{\linewidth}{!}{ +\begin{tabular}{lcccccc} +\toprule +Method & \WT Dice $\uparrow$ & \TC Dice $\uparrow$ & \ET Dice $\uparrow$ & \WT HD95 $\downarrow$ & \TC HD95 $\downarrow$ & \ET HD95 $\downarrow$ \\ +\midrule +3D U-Net~\cite{Cicek2016} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} \\ +V-Net~\cite{Milletari2016VNet} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} \\ +nnU-Net~\cite{Isensee2021nnUNet} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} \\ +TransBTS~\cite{Wang2021TransBTS} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} \\ +UNETR~\cite{Hatamizadeh2022UNETR} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} \\ +Swin UNETR~\cite{Hatamizadeh2022SwinUNETR} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} \\ +MedNeXt~\cite{Roy2023MedNeXt} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} \\ +SAM3D~\cite{Bui2023SAM3D} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} \\ +SAM-Med3D~\cite{Wang2023SAMMed3D} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} \\ +\midrule +\textbf{GliomaSAM3D-MoE (ours)} & \textbf{TBD} & \textbf{TBD} & \textbf{TBD} & \textbf{TBD} & \textbf{TBD} & \textbf{TBD} \\ +\bottomrule +\end{tabular}} +\end{table} + +\subsection{Cross-year generalization} +To explicitly measure robustness to dataset shift, we evaluate cross-year transfer without re-training. +Table~\ref{tab:crossyear} summarizes the cross-year performance when training on one BraTS edition and evaluating on the other. + +\begin{table}[t] +\centering +\caption{Cross-year generalization between BraTS 2021 and BraTS 2023. ``Mean'' denotes the average over \WT/\TC/\ET.} +\label{tab:crossyear} +\resizebox{\linewidth}{!}{ +\begin{tabular}{lcccc} +\toprule +Train $\rightarrow$ Test & Method & Mean Dice $\uparrow$ & Mean HD95 $\downarrow$ & \ET FP Vol. $\downarrow$ \\ +\midrule +\multirow{2}{*}{2021 $\rightarrow$ 2023} +& nnU-Net~\cite{Isensee2021nnUNet} & \textit{TBD} & \textit{TBD} & \textit{TBD} \\ +& \textbf{GliomaSAM3D-MoE (ours)} & \textbf{TBD} & \textbf{TBD} & \textbf{TBD} \\ +\midrule +\multirow{2}{*}{2023 $\rightarrow$ 2021} +& nnU-Net~\cite{Isensee2021nnUNet} & \textit{TBD} & \textit{TBD} & \textit{TBD} \\ +& \textbf{GliomaSAM3D-MoE (ours)} & \textbf{TBD} & \textbf{TBD} & \textbf{TBD} \\ +\bottomrule +\end{tabular}} +\end{table} + +\subsection{Ablation studies} +We perform ablations to isolate the impact of each proposed component: (i) concept prompting, (ii) \ET-presence gating, (iii) direction-aware dual-domain enhancement, and (iv) task-structured sparse MoE decoding. +Table~\ref{tab:ablation} provides a template for reporting these results. + +\begin{table}[t] +\centering +\caption{Ablation study on a validation split (e.g., BraTS 2021). ``Mean'' denotes the average over \WT/\TC/\ET.} +\label{tab:ablation} +\resizebox{\linewidth}{!}{ +\begin{tabular}{lcccc} +\toprule +Variant & Mean Dice $\uparrow$ & Mean HD95 $\downarrow$ & \ET Dice $\uparrow$ & \ET FP Vol. $\downarrow$ \\ +\midrule +w/o concept tokens & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} \\ +w/o \ET gate & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} \\ +w/o dual-domain enhancement & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} \\ +w/o MoE (single decoder) & \textit{TBD} & \textit{TBD} & \textit{TBD} & \textit{TBD} \\ +\midrule +\textbf{Full model (ours)} & \textbf{TBD} & \textbf{TBD} & \textbf{TBD} & \textbf{TBD} \\ +\bottomrule +\end{tabular}} +\end{table} + +\subsection{Visualization and qualitative analysis}\label{sec:vis} +In addition to quantitative metrics, we include qualitative comparisons to highlight boundary quality, \ET false-positive suppression, and expert specialization behavior. +The following subsections describe the planned visualizations; figures should be inserted once generated. + +\subsubsection{Qualitative comparison on representative cases} +We will visualize representative axial/coronal/sagittal slices with overlays of predicted \WT/\TC/\ET masks for competing methods. +A typical figure includes (i) the four modalities (T1, T1ce, T2, FLAIR), (ii) ground truth, and (iii) predictions from each baseline and our method. + +\begin{figure}[t] +\centering +\fbox{\parbox[c][0.22\textheight][c]{0.95\linewidth}{\centering \small Placeholder: qualitative comparison figure.}} +\caption{Qualitative comparison on representative BraTS cases. Each row corresponds to one subject; columns show modalities, ground truth, and predictions from baselines and GliomaSAM3D-MoE.} +\label{fig:qualitative} +\end{figure} + +\subsubsection{\ET-absent case study and false-positive analysis} +To directly evaluate existence-aware decoding, we will curate a subset of \ET-absent volumes and visualize: +(i) predicted \ET masks before/after applying the \ET gate, (ii) the predicted \ET-presence probability $\pi_{\ET}$, and (iii) the resulting reduction in false-positive \ET regions. + +\begin{figure}[t] +\centering +\fbox{\parbox[c][0.18\textheight][c]{0.95\linewidth}{\centering \small Placeholder: \ET-absent gating case study.}} +\caption{Case study on \ET-absent volumes. The proposed \ET gate suppresses spurious \ET predictions while preserving \WT/\TC.} +\label{fig:et_gate} +\end{figure} + +\subsubsection{Boundary error maps and surface-distance visualization} +We will visualize boundary errors using signed distance transforms between prediction and ground truth, highlighting where improvements in HD95 arise. +In addition, 3D surface renderings can be used to show topological artifacts and boundary smoothness. + +\begin{figure}[t] +\centering +\fbox{\parbox[c][0.18\textheight][c]{0.95\linewidth}{\centering \small Placeholder: boundary error maps / surface-distance visualization.}} +\caption{Boundary error visualization via distance-transform maps. Warmer colors indicate larger surface discrepancies.} +\label{fig:boundary} +\end{figure} + +\subsubsection{Expert routing and concept token interpretability} +To interpret the MoE behavior, we will visualize the routing weights over experts for each region (\WT/\TC/\ET) and correlate routing patterns with tumor morphology (e.g., size/fragmentation). +For concept tokens, we will plot the predicted discrete concept indices and analyze their association with observable properties (e.g., \ET presence, boundary complexity). + +\begin{figure}[t] +\centering +\fbox{\parbox[c][0.18\textheight][c]{0.95\linewidth}{\centering \small Placeholder: MoE routing and concept token visualization.}} +\caption{Visualization of MoE routing. We show expert assignment histograms per region and per case, illustrating specialization patterns.} +\label{fig:moe} +\end{figure} + +\subsubsection{Frequency-domain analysis} +To motivate dual-domain enhancement, we will visualize amplitude spectra of input modalities and the effect of spectral modulation. +Additionally, we will include qualitative examples under synthetic intensity/style perturbations (e.g., amplitude mixing~\cite{Xu2023AmpMix}) to illustrate robustness. + +\begin{figure}[t] +\centering +\fbox{\parbox[c][0.18\textheight][c]{0.95\linewidth}{\centering \small Placeholder: frequency-domain analysis visualization.}} +\caption{Frequency-domain visualization. We illustrate amplitude spectra and the effect of spectral modulation/augmentation on segmentation robustness.} +\label{fig:freq} +\end{figure} + +\section{Conclusion} +We introduced GliomaSAM3D-MoE, a SAM-style fully automatic 3D glioma segmentation framework with concept prompting, \ET-aware existence gating, direction-aware dual-domain enhancement, and a task-structured sparse MoE decoder. +The final version will include complete quantitative results and visual analyses on BraTS 2021 and BraTS 2023. + + + +\begin{thebibliography}{99} + +\bibitem{Menze2015BraTS} +B.~H. Menze, A.~Jakab, S.~Bauer, J.~Kalpathy-Cramer, K.~Farahani, J.~Kirby, and et~al. +\newblock The multimodal brain tumor image segmentation benchmark ({BRATS}). +\newblock \emph{IEEE Transactions on Medical Imaging}, 34(10):1993--2024, 2015. + +\bibitem{Baid2021BraTS} +U.~Baid et~al. +\newblock The {RSNA}-{ASNR}-{MICCAI} {BraTS} 2021 benchmark on brain tumor segmentation and radiogenomic classification. +\newblock \emph{arXiv preprint arXiv:2107.02314}, 2021. + +\bibitem{SynapseBraTS2023} +BraTS 2023 Challenge (Synapse). +\newblock \url{https://www.synapse.org/brats2023}. Accessed: 2026-01-25. + +\bibitem{Kirillov2023SAM} +A.~Kirillov et~al. +\newblock Segment anything. +\newblock In \emph{Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)}, 2023. + +\bibitem{Ma2024MedSAM} +J.~Ma, Y.~He, F.~Li, L.~Han, C.~You, and B.~Wang. +\newblock Segment anything in medical images. +\newblock \emph{Nature Communications}, 15:654, 2024. + +\bibitem{Bui2023SAM3D} +N.-T. Bui, D.-H. Hoang, M.-T. Tran, G.~Doretto, D.~Adjeroh, B.~Patel, A.~Choudhary, and N.~Le. +\newblock {SAM3D}: Segment anything model in volumetric medical images. +\newblock \emph{arXiv preprint arXiv:2309.03493}, 2023. + +\bibitem{Wang2023SAMMed3D} +H.~Wang et~al. +\newblock {SAM-Med3D}: Towards general-purpose segmentation models for volumetric medical images. +\newblock \emph{arXiv preprint arXiv:2310.15161}, 2023. + +\bibitem{Cicek2016} +{\"O}.~{\c{C}}i{\c{c}}ek, A.~Abdulkadir, S.~S. Lienkamp, T.~Brox, and O.~Ronneberger. +\newblock {3D U-Net}: Learning dense volumetric segmentation from sparse annotation. +\newblock In \emph{MICCAI}, 2016. + +\bibitem{Milletari2016VNet} +F.~Milletari, N.~Navab, and S.-A. Ahmadi. +\newblock {V-Net}: Fully convolutional neural networks for volumetric medical image segmentation. +\newblock In \emph{Proceedings of 3DV}, 2016. + +\bibitem{Kamnitsas2017DeepMedic} +K.~Kamnitsas et~al. +\newblock Efficient multi-scale 3D {CNN} with fully connected {CRF} for accurate brain lesion segmentation. +\newblock \emph{Medical Image Analysis}, 36:61--78, 2017. + +\bibitem{Isensee2021nnUNet} +F.~Isensee, P.~F. Jaeger, S.~A.~A. Kohl, J.~Petersen, and K.~H. Maier-Hein. +\newblock nn{U}-{N}et: A self-configuring method for deep learning-based biomedical image segmentation. +\newblock \emph{Nature Methods}, 18:203--211, 2021. + +\bibitem{Wang2021TransBTS} +W.~Wang, C.~Chen, M.~Ding, J.~Li, H.~Yu, and S.~Zha. +\newblock {TransBTS}: Multimodal brain tumor segmentation using transformer. +\newblock In \emph{MICCAI}, 2021. + +\bibitem{Hatamizadeh2022UNETR} +A.~Hatamizadeh et~al. +\newblock {UNETR}: Transformers for 3D medical image segmentation. +\newblock In \emph{WACV}, 2022. + +\bibitem{Hatamizadeh2022SwinUNETR} +A.~Hatamizadeh, V.~Nath, Y.~Tang, D.~Yang, H.~R. Roth, and D.~Xu. +\newblock {Swin UNETR}: Swin transformers for semantic segmentation of brain tumors in {MRI} images. +\newblock \emph{arXiv preprint arXiv:2201.01266}, 2022. + +\bibitem{Roy2023MedNeXt} +S.~Roy, G.~Koehler, C.~Ulrich, M.~Baumgartner, J.~Petersen, F.~Isensee, P.~F. Jaeger, and K.~Maier-Hein. +\newblock {MedNeXt}: Transformer-driven scaling of convnets for medical image segmentation. +\newblock \emph{arXiv preprint arXiv:2303.09975}, 2023. + +\bibitem{Kervadec2019BoundaryLoss} +H.~Kervadec et~al. +\newblock Boundary loss for highly unbalanced segmentation. +\newblock In \emph{Proceedings of MIDL (PMLR)}, 2019. + +\bibitem{Karimi2019HDLoss} +D.~Karimi and S.~E. Salcudean. +\newblock Reducing the Hausdorff distance in medical image segmentation with convolutional neural networks. +\newblock \emph{arXiv preprint arXiv:1904.10030}, 2019. + +\bibitem{Yang2020FDA} +Y.~Yang and S.~Soatto. +\newblock {FDA}: Fourier domain adaptation for semantic segmentation. +\newblock In \emph{CVPR}, 2020. + +\bibitem{Xu2023AmpMix} +Q.~Xu et~al. +\newblock Fourier-based augmentation with applications to domain generalization. +\newblock \emph{Pattern Recognition}, 139:109474, 2023. + +\bibitem{Shazeer2017MoE} +N.~Shazeer et~al. +\newblock Outrageously large neural networks: The sparsely-gated mixture-of-experts layer. +\newblock \emph{arXiv preprint arXiv:1701.06538}, 2017. + +\bibitem{Fedus2022Switch} +W.~Fedus, B.~Zoph, and N.~Shazeer. +\newblock Switch transformers: Scaling to trillion parameter models with simple and efficient sparsity. +\newblock \emph{Journal of Machine Learning Research}, 23(120):1--39, 2022. + +\bibitem{Chen2023AdaMVMoE} +T.~Chen et~al. +\newblock {AdaMV-MoE}: Adaptive multi-task vision mixture-of-experts. +\newblock In \emph{ICCV}, 2023. + +\end{thebibliography} + + +\end{document} + + diff --git a/source_code/gliomasam3_moe/README.md b/source_code/gliomasam3_moe/README.md new file mode 100644 index 0000000000000000000000000000000000000000..99179aeb8a6046beb8fd15fc48e922e63b607448 --- /dev/null +++ b/source_code/gliomasam3_moe/README.md @@ -0,0 +1,73 @@ +# GliomaSAM3-MoE (Minimal BraTS2023 3D Segmentation) + +This is a **minimal, fully runnable** BraTS2023 3D segmentation project with a complete +`GliomaSAM3_MoE` model, synthetic data support, training, inference, and tests. + +## Install + +```bash +pip install -r requirements.txt +``` + +## Run tests + +```bash +pytest -q +``` + +## Synthetic debug training (no real data required) + +```bash +python train.py --config configs/debug.yaml --synthetic true +``` + +## Real BraTS data layout (expected) + +Each case is a folder under `data.root_dir`, containing: + +``` +case_id/ + t1n.nii.gz + t1c.nii.gz + t2f.nii.gz + t2w.nii.gz + seg.nii.gz +``` + +Label values must be in `{0, 1, 2, 4}`. + +## SegMamba preprocessed data (npz) + +If you use the SegMamba preprocessing pipeline, place `*.npz` under: + +``` +./data/fullres/train +``` + +This project supports that format with `data.format: "segmamba_npz"` (already in configs). +It will read `*.npz` and cached `*.npy` / `*_seg.npy`, and automatically map label `3 -> 4`. + +Recommended paths (aligned with SegMamba): +- checkpoints: `./logs/segmamba/model` +- predictions: `./prediction_results/segmamba` + +Example: +```bash +python train.py --config configs/train.yaml +python infer.py --config configs/train.yaml --input ./data/fullres/train --checkpoint ./logs/segmamba/model/ckpt_stepXXXX.pt --output ./prediction_results/segmamba +``` + +## Inference + +```bash +python infer.py --config configs/train.yaml --input /path/to/case_or_root --checkpoint /path/to/ckpt.pt --output ./outputs +``` + +Outputs: +- `*_regions_prob.nii.gz` : probability maps for [WT, TC, ET] +- `*_regions_bin.nii.gz` : thresholded binary maps +- `*_label.nii.gz` : final label map in `{0,1,2,4}` + +When `data.format: "segmamba_npz"`, `infer.py` also writes: +- `{case_id}.nii.gz` : 3-channel (TC/WT/ET) mask for SegMamba `5_compute_metrics.py` + diff --git a/source_code/gliomasam3_moe/configs/debug.yaml b/source_code/gliomasam3_moe/configs/debug.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e985cf0ec5c5dff0c6d3a61c0900da58830c0763 --- /dev/null +++ b/source_code/gliomasam3_moe/configs/debug.yaml @@ -0,0 +1,70 @@ +seed: 42 +device: "cuda" +amp: true +synthetic: true + +data: + format: "segmamba_npz" + root_dir: "./data/fullres/train" + modalities: ["t1n", "t1c", "t2f", "t2w"] + seg_name: "seg" + orientation: "RAS" + do_spacing: false + spacing: [1.0, 1.0, 1.0] + crop_size: [8, 64, 64] + num_samples: 1 + rand_scale_prob: 0.1 + rand_shift_prob: 0.1 + batch_size: 2 + synthetic_shape: [8, 64, 64] + synthetic_cases: 6 + +model: + patch_size: 16 + token_dim: 96 + depth: 2 + heads: 4 + mlp_ratio: 4.0 + slice_attn_k: 4 + slice_attn_random_dir: true + spectral_bins: 16 + spectral_q: 3 + msda_scales: [3, 5, 7] + moe_experts: 5 + moe_topk: 2 + decoder_hidden: 64 + prompt_mlp_hidden: 96 + use_sam3_backbone: false + sam3_ckpt_path: "/data/yty/sam3/sam3.pt" + sam3_freeze: true + sam3_in_chans: 7 + sam3_input_mean: [0.5, 0.5, 0.5] + sam3_input_std: [0.5, 0.5, 0.5] + +loss: + dice_weight: 1.0 + bce_weight: 1.0 + et_focal_weight: 0.2 + focal_gamma: 2.0 + pres_weight: 0.1 + hier_weight: 0.1 + moe_weight: 0.01 + +train: + epochs: 1 + max_steps: 12 + lr: 0.0003 + weight_decay: 0.00001 + log_every: 2 + save_every: 6 + ckpt_dir: "./checkpoints" + fourier_mix_prob: 0.3 + num_workers: 0 + use_label_prompt: true + +infer: + roi_size: [8, 64, 64] + sw_batch_size: 1 + overlap: 0.5 + threshold: 0.5 + et_cc_min_size: 20 diff --git a/source_code/gliomasam3_moe/configs/train.yaml b/source_code/gliomasam3_moe/configs/train.yaml new file mode 100644 index 0000000000000000000000000000000000000000..aad3bb8e069bca6a4f50b662598a12f28fc2d6f3 --- /dev/null +++ b/source_code/gliomasam3_moe/configs/train.yaml @@ -0,0 +1,76 @@ +seed: 42 +device: "cuda" +amp: true +synthetic: false + +data: + format: "segmamba_npz" + root_dir: "/data/yty/brats23_segmamba_processed" + modalities: ["t1n", "t1c", "t2f", "t2w"] + seg_name: "seg" + orientation: "RAS" + do_spacing: false + spacing: [1.0, 1.0, 1.0] + crop_size: [128, 128, 128] + num_samples: 1 + rand_scale_prob: 0.1 + rand_shift_prob: 0.1 + batch_size: 6 + synthetic_shape: [16, 128, 128] + synthetic_cases: 16 + train_rate: 0.7 + val_rate: 0.1 + test_rate: 0.2 + segmamba_unpack: true + +model: + patch_size: 14 + token_dim: 128 + depth: 3 + heads: 4 + mlp_ratio: 4.0 + slice_attn_k: 6 + slice_attn_random_dir: true + spectral_bins: 24 + spectral_q: 3 + msda_scales: [3, 5, 7] + moe_experts: 5 + moe_topk: 2 + decoder_hidden: 96 + prompt_mlp_hidden: 128 + use_sam3_backbone: true + sam3_ckpt_path: "/data/yty/sam3/sam3.pt" + sam3_freeze: true + sam3_in_chans: 7 + sam3_input_mean: [0.5, 0.5, 0.5] + sam3_input_std: [0.5, 0.5, 0.5] + +loss: + dice_weight: 1.0 + bce_weight: 1.0 + et_focal_weight: 0.5 + focal_gamma: 2.0 + pres_weight: 0.1 + hier_weight: 0.1 + moe_weight: 0.01 + +train: + epochs: 300 + max_steps: 10000 + lr: 0.0002 + weight_decay: 0.00001 + log_every: 20 + save_every: 200 + ckpt_dir: "./logs/segmamba/model" + fourier_mix_prob: 0.2 + num_workers: 4 + use_label_prompt: true + test_every_epochs: 5 + test_max_cases: 0 + +infer: + roi_size: [128, 128, 128] + sw_batch_size: 1 + overlap: 0.5 + threshold: 0.5 + et_cc_min_size: 50 diff --git a/source_code/gliomasam3_moe/eval_ablation.py b/source_code/gliomasam3_moe/eval_ablation.py new file mode 100644 index 0000000000000000000000000000000000000000..916dc41ffdaffac487672daecd93b7a611845aa7 --- /dev/null +++ b/source_code/gliomasam3_moe/eval_ablation.py @@ -0,0 +1,617 @@ +""" +Ablation Evaluation Script for GliomaSAM3-MoE + +Implements: +- Table 4: ET-absent subset evaluation +- Table 7: Boundary-band Dice (3-voxel band) + +Usage: + cd /root/githubs/gliomasam3_moe + PYTHONPATH=/root/githubs/sam3:$PYTHONPATH python eval_ablation.py \ + --config configs/train.yaml \ + --checkpoint logs/segmamba/model/ckpt_step3000.pt \ + --eval table7 # or table4, or both + +Author: GliomaSAM3-MoE Team +""" + +import argparse +import os +import sys +import json +from typing import Dict, List, Tuple, Optional +from collections import defaultdict + +import numpy as np +import torch +import yaml +from tqdm import tqdm + +# Add project paths +ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) +SRC_DIR = os.path.join(ROOT_DIR, "src") +if SRC_DIR not in sys.path: + sys.path.insert(0, SRC_DIR) + +from scipy import ndimage as ndi +from scipy.ndimage import binary_dilation, binary_erosion +from sklearn.metrics import roc_auc_score, accuracy_score, roc_curve + +# ============================================================================ +# Configuration +# ============================================================================ +DEFAULT_CONFIG = { + "data_dir": "/data/yty/brats23_segmamba_processed", + "seed": 20251225, # Fixed seed as per spec + "train_rate": 0.7, + "val_rate": 0.1, + "test_rate": 0.2, + "threshold": 0.5, + "et_cc_min_size": 50, + "boundary_band_radius": 3, + "hd95_empty_value": 50.0, +} + + +# ============================================================================ +# Utility Functions +# ============================================================================ +def load_yaml(path: str) -> Dict: + with open(path, "r") as f: + return yaml.safe_load(f) + + +def split_npz_paths(data_dir: str, train_rate: float, val_rate: float, + test_rate: float, seed: int) -> Tuple[List[str], List[str], List[str]]: + """Split data paths into train/val/test sets with fixed seed.""" + import glob + import random + + all_paths = sorted(glob.glob(os.path.join(data_dir, "*.npz"))) + + random.seed(seed) + random.shuffle(all_paths) + + n = len(all_paths) + n_train = int(n * train_rate) + n_val = int(n * val_rate) + + train_paths = all_paths[:n_train] + val_paths = all_paths[n_train:n_train + n_val] + test_paths = all_paths[n_train + n_val:] + + return train_paths, val_paths, test_paths + + +def load_case(npz_path: str) -> Dict: + """Load a single case from npz/npy files.""" + npy_path = npz_path[:-4] + ".npy" + seg_path = npz_path[:-4] + "_seg.npy" + + # Load image + if os.path.isfile(npy_path): + image = np.load(npy_path, mmap_mode="r") + else: + data = np.load(npz_path) + image = data["data"] + + image = np.asarray(image, dtype=np.float32) + if image.ndim == 5 and image.shape[0] == 1: + image = image[0] + if image.ndim == 4 and image.shape[0] != 4 and image.shape[-1] == 4: + image = image.transpose(3, 0, 1, 2) + + # Load label + if os.path.isfile(seg_path): + label = np.load(seg_path, mmap_mode="r") + else: + data = np.load(npz_path) + label = data["seg"] if "seg" in data else None + + if label is not None: + label = np.asarray(label, dtype=np.int16) + if label.ndim == 4 and label.shape[0] == 1: + label = label[0] + # Map ET label 3 -> 4 if needed (BraTS convention) + if label.max() == 3 and (label == 4).sum() == 0: + label = label.copy() + label[label == 3] = 4 + + case_id = os.path.basename(npz_path)[:-4] + return {"image": image, "label": label, "case_id": case_id} + + +def label_to_regions(label: np.ndarray) -> np.ndarray: + """Convert BraTS label to [WT, TC, ET] regions.""" + label = np.asarray(label) + wt = label > 0 + tc = (label == 1) | (label == 4) + et = label == 4 + return np.stack([wt, tc, et], axis=0).astype(np.uint8) + + +def remove_small_components(mask: np.ndarray, min_size: int, connectivity: int = 3) -> np.ndarray: + """Remove connected components smaller than min_size. + + Args: + mask: Binary mask [D, H, W] + min_size: Minimum voxel count to keep + connectivity: 1 for 6-connectivity, 2 for 18, 3 for 26 + """ + struct = ndi.generate_binary_structure(3, connectivity) + labeled, num = ndi.label(mask.astype(np.uint8), structure=struct) + if num == 0: + return mask.astype(np.uint8) + + sizes = ndi.sum(mask.astype(np.uint8), labeled, index=np.arange(1, num + 1)) + keep = np.zeros_like(mask, dtype=np.uint8) + for i, s in enumerate(sizes, start=1): + if s >= min_size: + keep[labeled == i] = 1 + return keep + + +def count_connected_components(mask: np.ndarray, connectivity: int = 3) -> int: + """Count number of connected components.""" + struct = ndi.generate_binary_structure(3, connectivity) + _, num = ndi.label(mask.astype(np.uint8), structure=struct) + return num + + +# ============================================================================ +# Model Inference +# ============================================================================ +class ModelPredictor: + """Predictor for GliomaSAM3-MoE model.""" + + def __init__(self, config_path: str, checkpoint_path: str, device: str = "cuda"): + self.device = torch.device(device if torch.cuda.is_available() else "cpu") + self.cfg = load_yaml(config_path) + + from gliomasam3_moe.models.gliomasam3_moe import GliomaSAM3_MoE + + self.model = GliomaSAM3_MoE(**self.cfg["model"]).to(self.device) + ckpt = torch.load(checkpoint_path, map_location="cpu") + state_dict = {k: v for k, v in ckpt["model"].items() if "freqs_cis" not in k} + self.model.load_state_dict(state_dict, strict=False) + self.model.eval() + print(f"Loaded checkpoint: {checkpoint_path}") + + def predict(self, image: np.ndarray) -> Dict: + """Run inference and return predictions with aux outputs. + + Returns: + { + "probs": np.ndarray [3, D, H, W] - raw probabilities before gating + "probs_gated": np.ndarray [3, D, H, W] - probabilities after ET gating + "pi_et": float - ET presence probability + "regions_bin": np.ndarray [3, D, H, W] - binary predictions (after gating + threshold) + } + """ + if image.ndim == 4: + x = torch.from_numpy(image.copy()).float().unsqueeze(0) + else: + raise ValueError(f"Invalid image shape: {image.shape}") + + x = x.to(self.device) + + with torch.no_grad(): + logits, aux = self.model(x) + probs = torch.sigmoid(logits) + + # Get pi_et + pi_et = aux["pi_et"] + pi_et_value = float(pi_et.item()) + + # Probs before gating + probs_np = probs[0].cpu().numpy() + + # Apply ET gating + probs_gated = probs.clone() + probs_gated[:, 2:3] = probs[:, 2:3] * pi_et.view(-1, 1, 1, 1, 1) + probs_gated_np = probs_gated[0].cpu().numpy() + + # Binary prediction with threshold + threshold = self.cfg.get("infer", {}).get("threshold", 0.5) + regions_bin = (probs_gated_np > threshold).astype(np.uint8) + + return { + "probs": probs_np, + "probs_gated": probs_gated_np, + "pi_et": pi_et_value, + "regions_bin": regions_bin, + } + + +# ============================================================================ +# Table 7: Boundary-band Dice +# ============================================================================ +def compute_boundary_band(mask: np.ndarray, radius: int = 3) -> np.ndarray: + """Compute 3D boundary band using morphological operations. + + Args: + mask: Binary mask [D, H, W] + radius: Dilation/erosion radius in voxels + + Returns: + band: Binary mask of boundary band + """ + struct = ndi.generate_binary_structure(3, 3) # 26-connectivity + + mask_bool = mask.astype(bool) + if mask_bool.sum() == 0: + return np.zeros_like(mask, dtype=np.uint8) + + # Dilate and erode + dilated = binary_dilation(mask_bool, structure=struct, iterations=radius) + eroded = binary_erosion(mask_bool, structure=struct, iterations=radius) + + # Band = dilated XOR eroded + band = np.logical_xor(dilated, eroded).astype(np.uint8) + return band + + +def compute_boundary_band_dice(pred: np.ndarray, gt: np.ndarray, radius: int = 3) -> float: + """Compute Boundary-band Dice score. + + Args: + pred: Binary prediction [D, H, W] + gt: Binary ground truth [D, H, W] + radius: Band radius in voxels + + Returns: + Dice score for boundary band region + """ + eps = 1e-7 + + pred = pred.astype(bool) + gt = gt.astype(bool) + + # Handle empty cases + if gt.sum() == 0 and pred.sum() == 0: + return 1.0 + if gt.sum() == 0 and pred.sum() > 0: + return 0.0 + + # Compute boundary band from GT + band = compute_boundary_band(gt, radius=radius) + + # Restrict pred and gt to band + pred_band = pred & band.astype(bool) + gt_band = gt & band.astype(bool) + + # Dice on band + intersection = (pred_band & gt_band).sum() + dice = 2 * intersection / (pred_band.sum() + gt_band.sum() + eps) + + return float(dice) + + +def eval_table7(predictor: ModelPredictor, val_paths: List[str], + config: Dict, output_dir: str) -> Dict: + """Evaluate Table 7: Boundary-band Dice (3-voxel band). + + Returns: + Results dict with per-region and mean boundary dice + """ + print("\n" + "=" * 60) + print("Table 7: Boundary-band Dice Evaluation") + print("=" * 60) + + radius = config.get("boundary_band_radius", 3) + min_size = config.get("et_cc_min_size", 50) + + results = { + "WT": [], "TC": [], "ET": [], + "config": {"radius": radius, "min_size": min_size} + } + + for npz_path in tqdm(val_paths, desc="Evaluating"): + case = load_case(npz_path) + if case["label"] is None: + continue + + # Get predictions + pred_out = predictor.predict(case["image"]) + pred_regions = pred_out["regions_bin"].copy() + + # Post-process ET: remove small components + pred_regions[2] = remove_small_components(pred_regions[2], min_size, connectivity=3) + + # Get GT regions + gt_regions = label_to_regions(case["label"]) + + # Compute boundary-band dice for each region + for i, region in enumerate(["WT", "TC", "ET"]): + dice = compute_boundary_band_dice(pred_regions[i], gt_regions[i], radius=radius) + results[region].append(dice) + + # Compute statistics + stats = {} + for region in ["WT", "TC", "ET"]: + scores = results[region] + stats[region] = { + "mean": float(np.mean(scores)), + "std": float(np.std(scores)), + "n": len(scores), + } + + stats["Mean"] = { + "mean": float(np.mean([stats[r]["mean"] for r in ["WT", "TC", "ET"]])), + } + + # Print results + print(f"\nBoundary-band Dice (radius={radius} voxels):") + print("-" * 40) + print(f"{'Region':<10} {'Mean':>10} {'Std':>10} {'N':>8}") + print("-" * 40) + for region in ["WT", "TC", "ET"]: + s = stats[region] + print(f"{region:<10} {s['mean']:>10.4f} {s['std']:>10.4f} {s['n']:>8}") + print("-" * 40) + print(f"{'Mean':<10} {stats['Mean']['mean']:>10.4f}") + + # Save results + output_path = os.path.join(output_dir, "table7_boundary_dice.json") + with open(output_path, "w") as f: + json.dump({"stats": stats, "config": results["config"]}, f, indent=2) + print(f"\nResults saved to: {output_path}") + + return stats + + +# ============================================================================ +# Table 4: ET-absent Subset Evaluation +# ============================================================================ +def eval_table4(predictor: ModelPredictor, val_paths: List[str], + config: Dict, output_dir: str) -> Dict: + """Evaluate Table 4: ET-absent subset evaluation. + + Metrics: + - ET-absent subset (n cases where GT ET voxels = 0): + - FP volume (mm³) + - FP components (count) + - Full validation set: + - ET presence classification: AUROC, Acc, Sens, Spec + """ + print("\n" + "=" * 60) + print("Table 4: ET-absent Subset Evaluation") + print("=" * 60) + + min_size = config.get("et_cc_min_size", 50) + threshold = config.get("threshold", 0.5) + + # Collect results + et_absent_results = [] # For ET-absent subset + classification_results = [] # For full validation set + + for npz_path in tqdm(val_paths, desc="Evaluating"): + case = load_case(npz_path) + if case["label"] is None: + continue + + # Get predictions + pred_out = predictor.predict(case["image"]) + + # Get GT regions + gt_regions = label_to_regions(case["label"]) + gt_et = gt_regions[2] + gt_et_voxels = int(gt_et.sum()) + + # Classification labels: y=1 if ET present, 0 otherwise + y_true = 1 if gt_et_voxels > 0 else 0 + + # Score for classification: pi_et + s_score = pred_out["pi_et"] + + # Binary ET prediction (after gating + threshold + post-process) + pred_et_prob = pred_out["probs_gated"][2] + pred_et_bin = (pred_et_prob > threshold).astype(np.uint8) + pred_et_bin = remove_small_components(pred_et_bin, min_size, connectivity=3) + + # Store classification data + classification_results.append({ + "case_id": case["case_id"], + "y_true": y_true, + "s_score": s_score, + "y_pred": 1 if s_score > 0.5 else 0, + }) + + # For ET-absent cases, compute FP metrics + if gt_et_voxels == 0: + fp_voxels = int(pred_et_bin.sum()) + fp_components = count_connected_components(pred_et_bin, connectivity=3) + + et_absent_results.append({ + "case_id": case["case_id"], + "fp_volume_mm3": fp_voxels, # spacing=1mm + "fp_components": fp_components, + }) + + # ------------------------- + # ET-absent subset metrics + # ------------------------- + n_et_absent = len(et_absent_results) + if n_et_absent > 0: + fp_volumes = [r["fp_volume_mm3"] for r in et_absent_results] + fp_components = [r["fp_components"] for r in et_absent_results] + + et_absent_stats = { + "n": n_et_absent, + "fp_volume_mm3": { + "mean": float(np.mean(fp_volumes)), + "std": float(np.std(fp_volumes)), + "min": float(np.min(fp_volumes)), + "max": float(np.max(fp_volumes)), + }, + "fp_components": { + "mean": float(np.mean(fp_components)), + "std": float(np.std(fp_components)), + "min": int(np.min(fp_components)), + "max": int(np.max(fp_components)), + }, + } + else: + et_absent_stats = {"n": 0, "fp_volume_mm3": None, "fp_components": None} + + # ------------------------- + # Classification metrics (full validation set) + # ------------------------- + y_true = np.array([r["y_true"] for r in classification_results]) + s_score = np.array([r["s_score"] for r in classification_results]) + + # AUROC and optimal threshold using Youden's J statistic + if len(np.unique(y_true)) > 1: + auroc = roc_auc_score(y_true, s_score) + + # Find optimal threshold using Youden's J = Sens + Spec - 1 + fpr, tpr, thresholds = roc_curve(y_true, s_score) + j_scores = tpr - fpr # Youden's J statistic + best_idx = np.argmax(j_scores) + optimal_threshold = thresholds[best_idx] + + # Use optimal threshold for predictions + y_pred_optimal = (s_score >= optimal_threshold).astype(int) + else: + auroc = float("nan") + optimal_threshold = 0.5 + y_pred_optimal = (s_score >= 0.5).astype(int) + + # Compute metrics at optimal threshold + tp = int(((y_true == 1) & (y_pred_optimal == 1)).sum()) + tn = int(((y_true == 0) & (y_pred_optimal == 0)).sum()) + fp = int(((y_true == 0) & (y_pred_optimal == 1)).sum()) + fn = int(((y_true == 1) & (y_pred_optimal == 0)).sum()) + + acc_optimal = (tp + tn) / len(y_true) if len(y_true) > 0 else float("nan") + sens_optimal = tp / (tp + fn) if (tp + fn) > 0 else float("nan") + spec_optimal = tn / (tn + fp) if (tn + fp) > 0 else float("nan") + + classification_stats = { + "n": len(classification_results), + "n_et_present": int(y_true.sum()), + "n_et_absent": int((1 - y_true).sum()), + "auroc": float(auroc), + "optimal_threshold": float(optimal_threshold), + "accuracy_optimal": float(acc_optimal), + "sensitivity_optimal": float(sens_optimal), + "specificity_optimal": float(spec_optimal), + } + + # Print results + print(f"\nET Presence Classification (n={classification_stats['n']}):") + print("-" * 50) + print(f"ET-present: {classification_stats['n_et_present']}, " + f"ET-absent: {classification_stats['n_et_absent']}") + print(f"AUROC: {classification_stats['auroc']:.4f}") + print(f"Optimal Threshold: {classification_stats['optimal_threshold']:.4f}") + print(f"Accuracy: {classification_stats['accuracy_optimal']:.4f}") + print(f"Sensitivity: {classification_stats['sensitivity_optimal']:.4f}") + print(f"Specificity: {classification_stats['specificity_optimal']:.4f}") + + # Save results + results = { + "et_absent_subset": et_absent_stats, + "et_absent_cases": et_absent_results, + "classification": classification_stats, + "config": {"min_size": min_size, "threshold": threshold}, + } + + output_path = os.path.join(output_dir, "table4_et_absent.json") + with open(output_path, "w") as f: + json.dump(results, f, indent=2) + print(f"\nResults saved to: {output_path}") + + return results + + +# ============================================================================ +# Main +# ============================================================================ +def main(): + parser = argparse.ArgumentParser(description="Ablation Evaluation for GliomaSAM3-MoE") + parser.add_argument("--config", type=str, default="configs/train.yaml", + help="Model config path") + parser.add_argument("--checkpoint", type=str, required=True, + help="Model checkpoint path") + parser.add_argument("--eval", type=str, default="both", + choices=["table4", "table7", "both"], + help="Which evaluation to run") + parser.add_argument("--seed", type=int, default=20251225, + help="Random seed for data split") + parser.add_argument("--output_dir", type=str, default="./eval_results", + help="Output directory for results") + parser.add_argument("--device", type=str, default="cuda", + help="Device to use") + parser.add_argument("--data_dir", type=str, default=None, + help="Override data directory") + parser.add_argument("--use_all", action="store_true", + help="Use all data instead of validation split only") + args = parser.parse_args() + + # Setup + os.makedirs(args.output_dir, exist_ok=True) + + # Load model config + model_cfg = load_yaml(args.config) + + # Setup evaluation config + config = DEFAULT_CONFIG.copy() + config["seed"] = args.seed + if args.data_dir: + config["data_dir"] = args.data_dir + else: + config["data_dir"] = model_cfg.get("data", {}).get("root_dir", config["data_dir"]) + + print("=" * 60) + print("GliomaSAM3-MoE Ablation Evaluation") + print("=" * 60) + print(f"Config: {args.config}") + print(f"Checkpoint: {args.checkpoint}") + print(f"Data dir: {config['data_dir']}") + print(f"Seed: {config['seed']}") + print(f"Evaluation: {args.eval}") + print(f"Use all data: {args.use_all}") + + # Get data paths + import glob + all_paths = sorted(glob.glob(os.path.join(config["data_dir"], "*.npz"))) + + if args.use_all: + # Use all data for evaluation + val_paths = all_paths + print(f"\nUsing all data: {len(val_paths)} cases") + else: + # Split data + print("\nSplitting data...") + train_paths, val_paths, test_paths = split_npz_paths( + config["data_dir"], + train_rate=config["train_rate"], + val_rate=config["val_rate"], + test_rate=config["test_rate"], + seed=config["seed"], + ) + print(f"Train: {len(train_paths)}, Val: {len(val_paths)}, Test: {len(test_paths)}") + + # Initialize predictor + print("\nLoading model...") + predictor = ModelPredictor(args.config, args.checkpoint, args.device) + + # Run evaluations + results = {} + + if args.eval in ["table7", "both"]: + results["table7"] = eval_table7(predictor, val_paths, config, args.output_dir) + + if args.eval in ["table4", "both"]: + results["table4"] = eval_table4(predictor, val_paths, config, args.output_dir) + + print("\n" + "=" * 60) + print("Evaluation Complete!") + print("=" * 60) + + return results + + +if __name__ == "__main__": + main() diff --git a/source_code/gliomasam3_moe/eval_train_subset.py b/source_code/gliomasam3_moe/eval_train_subset.py new file mode 100644 index 0000000000000000000000000000000000000000..0b68b523e8f4c0e5fb251750fd2978f9b8760410 --- /dev/null +++ b/source_code/gliomasam3_moe/eval_train_subset.py @@ -0,0 +1,110 @@ +import argparse +import glob +import os +import re +import sys +from typing import List, Sequence, Tuple + +import torch +from torch.utils.data import DataLoader + +sys.path.append(os.path.join(os.path.dirname(__file__), "src")) + +from gliomasam3_moe.data.brats_dataset import SegMambaNPZDataset, split_npz_paths +from gliomasam3_moe.models.gliomasam3_moe import GliomaSAM3_MoE +from train import evaluate_test, load_config + + +def _find_latest_ckpt(ckpt_dir: str) -> str: + pattern = os.path.join(ckpt_dir, "ckpt_step*.pt") + matches = [] + for path in glob.glob(pattern): + m = re.search(r"ckpt_step(\d+)\.pt$", path) + if m: + matches.append((int(m.group(1)), path)) + if not matches: + raise FileNotFoundError(f"No checkpoints found under {ckpt_dir}.") + matches.sort(key=lambda x: x[0]) + return matches[-1][1] + + +def _select_train_subset( + data_dir: str, + train_rate: float, + val_rate: float, + test_rate: float, + seed: int, +) -> Tuple[Sequence[str], int, int]: + train_paths, _, test_paths = split_npz_paths( + data_dir, train_rate=train_rate, val_rate=val_rate, test_rate=test_rate, seed=seed + ) + test_n = len(test_paths) + if test_n == 0: + raise ValueError("Test split size is 0; cannot match train subset size.") + subset_n = min(len(train_paths), test_n) + rng = torch.Generator().manual_seed(seed) + perm = torch.randperm(len(train_paths), generator=rng).tolist() + subset_paths = [train_paths[i] for i in perm[:subset_n]] + return subset_paths, test_n, len(train_paths) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--config", type=str, default="configs/train.yaml") + parser.add_argument("--checkpoint", type=str, default=None, help="Path to ckpt_step*.pt (default: latest in ckpt_dir).") + parser.add_argument("--max_cases", type=int, default=0, help="Optional cap on subset size.") + args = parser.parse_args() + + cfg = load_config(args.config) + data_dir = cfg.data.root_dir + if not os.path.isdir(data_dir): + raise FileNotFoundError(f"data.root_dir does not exist: {data_dir}") + + if getattr(cfg.data, "format", "nifti") != "segmamba_npz": + raise ValueError("Only segmamba_npz format is supported for this evaluation script.") + + ckpt_path = args.checkpoint or _find_latest_ckpt(cfg.train.ckpt_dir) + if not os.path.isfile(ckpt_path): + raise FileNotFoundError(f"Checkpoint not found: {ckpt_path}") + + subset_paths, test_n, train_n = _select_train_subset( + data_dir, + train_rate=getattr(cfg.data, "train_rate", 0.7), + val_rate=getattr(cfg.data, "val_rate", 0.1), + test_rate=getattr(cfg.data, "test_rate", 0.2), + seed=cfg.seed, + ) + if args.max_cases and args.max_cases > 0: + subset_paths = subset_paths[: min(len(subset_paths), args.max_cases)] + + device = torch.device(cfg.device if torch.cuda.is_available() else "cpu") + model = GliomaSAM3_MoE(**cfg.model.__dict__).to(device) + ckpt = torch.load(ckpt_path, map_location=device) + model.load_state_dict(ckpt["model"], strict=True) + + ensure_npy = bool(getattr(cfg.data, "segmamba_unpack", True)) + dataset = SegMambaNPZDataset( + data_dir=data_dir, + npz_paths=subset_paths, + test=False, + ensure_npy=ensure_npy, + map_et_to_4=True, + ) + loader = DataLoader( + dataset, + batch_size=1, + shuffle=False, + num_workers=max(0, int(cfg.train.num_workers)), + ) + + metrics = evaluate_test(model, loader, cfg, device) + print(f"[TRAIN-SUBSET] ckpt={ckpt_path}") + print(f"[TRAIN-SUBSET] total_train={train_n} test_count={test_n} subset={len(subset_paths)}") + print( + f"[TRAIN-SUBSET] dice[WT,TC,ET]={metrics['dice']} " + f"hd95[WT,TC,ET]={metrics['hd95']}" + ) + + +if __name__ == "__main__": + main() diff --git a/source_code/gliomasam3_moe/infer.py b/source_code/gliomasam3_moe/infer.py new file mode 100644 index 0000000000000000000000000000000000000000..bcf4bb8acfa3cd8e302edcc48f4d18465b396177 --- /dev/null +++ b/source_code/gliomasam3_moe/infer.py @@ -0,0 +1,169 @@ +import argparse +import os +import sys +from types import SimpleNamespace +from typing import Any + +import yaml +import numpy as np +import torch +import nibabel as nib +from torch.utils.data import DataLoader + +# Avoid heavy MONAI import side effects. +os.environ.setdefault("MONAI_SKIP_SUBMODULES", "1") +from monai.inferers import sliding_window_inference + +sys.path.append(os.path.join(os.path.dirname(__file__), "src")) + +from gliomasam3_moe.models.gliomasam3_moe import GliomaSAM3_MoE +from gliomasam3_moe.data.brats_dataset import BraTSDataset, SegMambaNPZDataset +from gliomasam3_moe.data.transforms_segmamba_like import get_infer_transforms +from gliomasam3_moe.utils.brats_regions import regions_to_label +from gliomasam3_moe.utils.postprocess import remove_small_components + + +def _to_namespace(obj: Any): + if isinstance(obj, dict): + return SimpleNamespace(**{k: _to_namespace(v) for k, v in obj.items()}) + return obj + + +def load_config(path: str) -> SimpleNamespace: + with open(path, "r") as f: + cfg = yaml.safe_load(f) + return _to_namespace(cfg) + + +def _get_affine(meta_dict): + if meta_dict is None: + return np.eye(4) + affine = meta_dict.get("affine", None) + if isinstance(affine, torch.Tensor): + affine = affine.detach().cpu().numpy() + if isinstance(affine, np.ndarray) and affine.ndim == 3: + affine = affine[0] + if affine is None: + affine = np.eye(4) + return affine + + +def save_nifti(path: str, arr: np.ndarray, affine: np.ndarray): + img = nib.Nifti1Image(arr, affine) + nib.save(img, path) + + +def save_segmamba_3c(path: str, arr_3c: np.ndarray, affine: np.ndarray | None = None): + """Save 3-channel mask for SegMamba metrics. + + Expected input: [3, D, H, W], saved as 4D NIfTI (D,H,W,3). + """ + if affine is None: + affine = np.eye(4) + if arr_3c.ndim != 4 or arr_3c.shape[0] != 3: + raise ValueError(f"expected (3,D,H,W), got {arr_3c.shape}") + arr = arr_3c.transpose(1, 2, 3, 0) # (D,H,W,3) + save_nifti(path, arr.astype(np.uint8), affine) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--config", type=str, default="configs/train.yaml") + parser.add_argument("--input", type=str, required=True, help="Case folder or root folder.") + parser.add_argument("--output", type=str, default="./prediction_results/segmamba") + parser.add_argument("--checkpoint", type=str, required=True) + args = parser.parse_args() + + cfg = load_config(args.config) + device = torch.device(cfg.device if torch.cuda.is_available() else "cpu") + + model = GliomaSAM3_MoE(**cfg.model.__dict__).to(device) + ckpt = torch.load(args.checkpoint, map_location="cpu") + model.load_state_dict(ckpt["model"], strict=True) + model.eval() + data_format = getattr(cfg.data, "format", "nifti") + + input_path = args.input + if data_format == "segmamba_npz": + if not os.path.isdir(input_path): + raise ValueError("Input must be a directory containing *.npz files.") + ensure_npy = bool(getattr(cfg.data, "segmamba_unpack", True)) + dataset = SegMambaNPZDataset( + data_dir=input_path, + test=True, + ensure_npy=ensure_npy, + map_et_to_4=True, + ) + loader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=0) + else: + if os.path.isdir(input_path): + has_modalities = any( + os.path.isfile(os.path.join(input_path, m + ".nii.gz")) or os.path.isfile(os.path.join(input_path, m + ".nii")) + for m in cfg.data.modalities + ) + if has_modalities: + root_dir = os.path.dirname(input_path) + case_ids = [os.path.basename(input_path)] + else: + root_dir = input_path + case_ids = None + else: + raise ValueError("Input must be a directory.") + + image_keys = [f"image{i}" for i in range(len(cfg.data.modalities))] + transforms = get_infer_transforms(cfg, image_keys=image_keys) + dataset = BraTSDataset( + root_dir=root_dir, + modalities=cfg.data.modalities, + seg_name=cfg.data.seg_name, + transforms=transforms, + include_label=False, + case_ids=case_ids, + image_keys=image_keys, + ) + loader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=0) + + os.makedirs(args.output, exist_ok=True) + with torch.no_grad(): + for batch in loader: + image = batch["image"].to(device) + case_id = batch["case_id"][0] if isinstance(batch["case_id"], (list, tuple)) else batch["case_id"] + + # Sliding window for logits only (aux is computed from full pass). + logits = sliding_window_inference( + inputs=image, + roi_size=tuple(cfg.infer.roi_size), + sw_batch_size=cfg.infer.sw_batch_size, + predictor=lambda x: model(x)[0], + overlap=cfg.infer.overlap, + ) + _, aux = model(image) + probs = torch.sigmoid(logits) + pi_et = aux["pi_et"].view(probs.shape[0], 1, 1, 1, 1) + probs[:, 2:3] = probs[:, 2:3] * pi_et + regions_bin = (probs > cfg.infer.threshold).float() + + # ET postprocess (remove small components) + et_pp = remove_small_components(regions_bin[:, 2], cfg.infer.et_cc_min_size) + regions_bin[:, 2] = et_pp + + label_map = regions_to_label(regions_bin) + + meta = batch.get("image_meta_dict", None) + affine = _get_affine(meta) + prob_np = probs[0].detach().cpu().numpy().transpose(1, 2, 3, 0) # (D,H,W,3) + bin_np = regions_bin[0].detach().cpu().numpy().transpose(1, 2, 3, 0) + lbl_np = label_map[0, 0].detach().cpu().numpy().astype(np.int16) + + save_nifti(os.path.join(args.output, f"{case_id}_regions_prob.nii.gz"), prob_np, affine) + save_nifti(os.path.join(args.output, f"{case_id}_regions_bin.nii.gz"), bin_np, affine) + save_nifti(os.path.join(args.output, f"{case_id}_label.nii.gz"), lbl_np, affine) + + if data_format == "segmamba_npz": + seg_path = os.path.join(args.output, f"{case_id}.nii.gz") + seg_arr = regions_bin[0].detach().cpu().numpy().astype(np.uint8) + save_segmamba_3c(seg_path, seg_arr, affine) + + +if __name__ == "__main__": + main() diff --git a/source_code/gliomasam3_moe/requirements.txt b/source_code/gliomasam3_moe/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..43aae62201254d08ad16fe9fa0517b81abcd14ee --- /dev/null +++ b/source_code/gliomasam3_moe/requirements.txt @@ -0,0 +1,8 @@ +torch +monai +numpy +nibabel +einops +pyyaml +tqdm +scipy diff --git a/source_code/gliomasam3_moe/src/gliomasam3_moe/__init__.py b/source_code/gliomasam3_moe/src/gliomasam3_moe/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a9cc9a07d7d16afc30f6381aaaa750c2404ab961 --- /dev/null +++ b/source_code/gliomasam3_moe/src/gliomasam3_moe/__init__.py @@ -0,0 +1,4 @@ +from .models.gliomasam3_moe import GliomaSAM3_MoE +from .losses.brats_losses import LossComputer + +__all__ = ["GliomaSAM3_MoE", "LossComputer"] diff --git a/source_code/gliomasam3_moe/src/gliomasam3_moe/data/__init__.py b/source_code/gliomasam3_moe/src/gliomasam3_moe/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5b755ba7a535e5ac0d6a8e2ce7ffd38ee36d70ba --- /dev/null +++ b/source_code/gliomasam3_moe/src/gliomasam3_moe/data/__init__.py @@ -0,0 +1,18 @@ +from .brats_dataset import BraTSDataset, SyntheticBraTSDataset, SegMambaNPZDataset, split_npz_paths +from .transforms_segmamba_like import ( + get_train_transforms, + get_val_transforms, + get_infer_transforms, + get_synthetic_transforms, +) + +__all__ = [ + "BraTSDataset", + "SyntheticBraTSDataset", + "SegMambaNPZDataset", + "split_npz_paths", + "get_train_transforms", + "get_val_transforms", + "get_infer_transforms", + "get_synthetic_transforms", +] diff --git a/source_code/gliomasam3_moe/src/gliomasam3_moe/data/brats_dataset.py b/source_code/gliomasam3_moe/src/gliomasam3_moe/data/brats_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..08869f1dea15dcca1a02c52e84a82310453f9475 --- /dev/null +++ b/source_code/gliomasam3_moe/src/gliomasam3_moe/data/brats_dataset.py @@ -0,0 +1,262 @@ +import os +import glob +import pickle +import random +from typing import List, Optional, Sequence, Tuple + +import numpy as np +import torch +from torch.utils.data import Dataset + + +def _resolve_nii(case_dir: str, name: str) -> str: + for ext in [".nii.gz", ".nii"]: + path = os.path.join(case_dir, name + ext) + if os.path.isfile(path): + return path + raise FileNotFoundError(f"Missing NIfTI: {case_dir}/{name}.nii(.gz)") + + +class BraTSDataset(Dataset): + """BraTS dataset loader with MONAI-style dict outputs. + + Output sample (before transforms): + { + "image0": ".../t1n.nii.gz", + "image1": ".../t1c.nii.gz", + "image2": ".../t2f.nii.gz", + "image3": ".../t2w.nii.gz", + "label": ".../seg.nii.gz", + "case_id": "BraTS-GLI-XXXX" + } + """ + + def __init__( + self, + root_dir: str, + modalities: List[str], + seg_name: str = "seg", + transforms=None, + include_label: bool = True, + case_ids: Optional[List[str]] = None, + image_keys: Optional[List[str]] = None, + ): + self.root_dir = root_dir + self.modalities = modalities + self.seg_name = seg_name + self.transforms = transforms + self.include_label = include_label + self.case_ids = case_ids or sorted( + [d for d in os.listdir(root_dir) if os.path.isdir(os.path.join(root_dir, d))] + ) + self.image_keys = image_keys or [f"image{i}" for i in range(len(modalities))] + if len(self.image_keys) != len(self.modalities): + raise ValueError("image_keys length must match modalities length.") + + self.samples = [] + for case_id in self.case_ids: + case_dir = os.path.join(root_dir, case_id) + sample = {"case_id": case_id} + for key, mod in zip(self.image_keys, self.modalities): + sample[key] = _resolve_nii(case_dir, mod) + if include_label: + sample["label"] = _resolve_nii(case_dir, self.seg_name) + self.samples.append(sample) + + def __len__(self) -> int: + return len(self.samples) + + def __getitem__(self, idx: int): + sample = dict(self.samples[idx]) + if self.transforms is not None: + sample = self.transforms(sample) + return sample + + +class SyntheticBraTSDataset(Dataset): + """Synthetic dataset for sanity checks. + + Output: + image: [4, D, H, W] float32 + label: [1, D, H, W] int64 in {0,1,2,4} + """ + + def __init__( + self, + num_cases: int, + shape: List[int], + transforms=None, + seed: int = 123, + et_prob: float = 0.6, + ): + self.num_cases = num_cases + self.shape = shape + self.transforms = transforms + self.rng = np.random.RandomState(seed) + self.et_prob = et_prob + + def __len__(self) -> int: + return self.num_cases + + def _sphere(self, center, radius, shape): + z, y, x = np.ogrid[:shape[0], :shape[1], :shape[2]] + dist = (z - center[0]) ** 2 + (y - center[1]) ** 2 + (x - center[2]) ** 2 + return dist <= radius ** 2 + + def _make_label(self) -> np.ndarray: + d, h, w = self.shape + label = np.zeros((d, h, w), dtype=np.int64) + center = np.array([self.rng.randint(d // 4, d * 3 // 4), + self.rng.randint(h // 4, h * 3 // 4), + self.rng.randint(w // 4, w * 3 // 4)]) + r_wt = self.rng.randint(max(2, min(self.shape) // 8), max(3, min(self.shape) // 4)) + r_tc = max(2, r_wt // 2) + r_et = max(1, r_tc // 2) + + wt = self._sphere(center, r_wt, self.shape) + tc = self._sphere(center, r_tc, self.shape) + label[wt] = 2 + label[tc] = 1 + if self.rng.rand() < self.et_prob: + et = self._sphere(center, r_et, self.shape) + label[et] = 4 + return label + + def __getitem__(self, idx: int): + d, h, w = self.shape + image = self.rng.randn(4, d, h, w).astype(np.float32) + label = self._make_label().astype(np.int64)[None] # [1,D,H,W] + sample = {"image": image, "label": label, "case_id": f"synthetic_{idx:04d}"} + if self.transforms is not None: + sample = self.transforms(sample) + return sample + + +def _maybe_unpack_npz(npz_path: str, unpack_seg: bool = True) -> None: + """Ensure .npy and _seg.npy exist for a given .npz.""" + npy_path = npz_path[:-3] + "npy" + seg_path = npz_path[:-4] + "_seg.npy" + if os.path.isfile(npy_path) and (not unpack_seg or os.path.isfile(seg_path)): + return + data = np.load(npz_path) + if not os.path.isfile(npy_path): + np.save(npy_path, data["data"]) + if unpack_seg and "seg" in data and not os.path.isfile(seg_path): + np.save(seg_path, data["seg"]) + + +def split_npz_paths(data_dir: str, train_rate: float = 0.7, val_rate: float = 0.1, test_rate: float = 0.2, seed: int = 42): + paths = sorted(glob.glob(os.path.join(data_dir, "*.npz"))) + random.seed(seed) + random.shuffle(paths) + train_n = int(len(paths) * train_rate) + val_n = int(len(paths) * val_rate) + test_n = int(len(paths) * test_rate) + train_paths = paths[:train_n] + val_paths = paths[train_n : train_n + val_n] + test_paths = paths[-test_n:] if test_n > 0 else [] + return train_paths, val_paths, test_paths + + +def split_npz_paths( + data_dir: str, train_rate: float = 0.7, val_rate: float = 0.1, test_rate: float = 0.2, seed: int = 42 +) -> Tuple[Sequence[str], Sequence[str], Sequence[str]]: + paths = sorted(glob.glob(os.path.join(data_dir, "*.npz"))) + random.seed(seed) + random.shuffle(paths) + train_n = int(len(paths) * train_rate) + val_n = int(len(paths) * val_rate) + test_n = int(len(paths) * test_rate) + train_paths = paths[:train_n] + val_paths = paths[train_n : train_n + val_n] + test_paths = paths[-test_n:] if test_n > 0 else [] + return train_paths, val_paths, test_paths + + +class SegMambaNPZDataset(Dataset): + """SegMamba preprocessed dataset loader. + + Each case is a .npz file with keys: data (image) and seg (label). + The loader prefers cached .npy and _seg.npy produced by unpacking. + + Output: + image: float32 [4, D, H, W] + label: int64 [1, D, H, W] (values in {0,1,2,4}) + """ + + def __init__( + self, + data_dir: str, + npz_paths: Optional[Sequence[str]] = None, + test: bool = False, + ensure_npy: bool = True, + map_et_to_4: bool = True, + include_properties: bool = False, + transforms=None, + ): + self.data_dir = data_dir + self.paths = list(npz_paths) if npz_paths is not None else sorted(glob.glob(os.path.join(data_dir, "*.npz"))) + self.test = test + self.ensure_npy = ensure_npy + self.map_et_to_4 = map_et_to_4 + self.include_properties = include_properties + self.transforms = transforms + + def __len__(self) -> int: + return len(self.paths) + + def _load_properties(self, npz_path: str): + pkl_path = npz_path[:-4] + ".pkl" + if os.path.isfile(pkl_path): + try: + with open(pkl_path, "rb") as f: + return pickle.load(f) + except Exception: + return None + return None + + def __getitem__(self, idx: int): + npz_path = self.paths[idx] + if self.ensure_npy: + _maybe_unpack_npz(npz_path, unpack_seg=not self.test) + + npy_path = npz_path[:-3] + "npy" + seg_path = npz_path[:-4] + "_seg.npy" + if os.path.isfile(npy_path): + image = np.load(npy_path, mmap_mode="r") + else: + image = np.load(npz_path)["data"] + + label = None + if not self.test: + if os.path.isfile(seg_path): + label = np.load(seg_path, mmap_mode="r") + else: + label = np.load(npz_path)["seg"] + + image = np.asarray(image, dtype=np.float32).copy() + if image.ndim == 5 and image.shape[0] == 1: + image = image[0] + if image.ndim == 4 and image.shape[0] != 4 and image.shape[-1] == 4: + image = image.transpose(3, 0, 1, 2) + + props = self._load_properties(npz_path) if self.include_properties else None + case_id = ( + props.get("name") if isinstance(props, dict) and "name" in props else os.path.splitext(os.path.basename(npz_path))[0] + ) + + sample = {"image": torch.from_numpy(image).float(), "case_id": case_id} + if self.include_properties: + sample["properties"] = props if props is not None else {} + if label is not None: + label = np.asarray(label).copy() + if label.ndim == 3: + label = label[None] + if self.map_et_to_4 and label.max() == 3 and (label == 4).sum() == 0: + label = label.copy() + label[label == 3] = 4 + sample["label"] = torch.from_numpy(label).long() + + if self.transforms is not None: + sample = self.transforms(sample) + return sample diff --git a/source_code/gliomasam3_moe/src/gliomasam3_moe/data/transforms_segmamba_like.py b/source_code/gliomasam3_moe/src/gliomasam3_moe/data/transforms_segmamba_like.py new file mode 100644 index 0000000000000000000000000000000000000000..df29d93e365ae74b34cc9434ad90a03131067234 --- /dev/null +++ b/source_code/gliomasam3_moe/src/gliomasam3_moe/data/transforms_segmamba_like.py @@ -0,0 +1,339 @@ +from typing import List + +import os +import random + +import torch +import torch.nn.functional as F + + +def _lazy_monai_transforms(): + os.environ.setdefault("MONAI_SKIP_SUBMODULES", "1") + from monai.transforms import ( + Compose, + LoadImaged, + EnsureChannelFirstd, + Orientationd, + Spacingd, + NormalizeIntensityd, + RandCropByPosNegLabeld, + RandSpatialCropd, + RandFlipd, + RandScaleIntensityd, + RandShiftIntensityd, + EnsureTyped, + ConcatItemsd, + DeleteItemsd, + ) + return { + "Compose": Compose, + "LoadImaged": LoadImaged, + "EnsureChannelFirstd": EnsureChannelFirstd, + "Orientationd": Orientationd, + "Spacingd": Spacingd, + "NormalizeIntensityd": NormalizeIntensityd, + "RandCropByPosNegLabeld": RandCropByPosNegLabeld, + "RandSpatialCropd": RandSpatialCropd, + "RandFlipd": RandFlipd, + "RandScaleIntensityd": RandScaleIntensityd, + "RandShiftIntensityd": RandShiftIntensityd, + "EnsureTyped": EnsureTyped, + "ConcatItemsd": ConcatItemsd, + "DeleteItemsd": DeleteItemsd, + } + + +class SelectFirstd: + """If a transform returns a list of samples, keep the first.""" + + def __call__(self, data): + if isinstance(data, list): + return data[0] if len(data) > 0 else {} + return data + + +class ComposeSimple: + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, data): + for t in self.transforms: + data = t(data) + return data + + +def _to_channel_first(x: torch.Tensor) -> torch.Tensor: + if x.ndim == 3: + return x.unsqueeze(0) + return x + + +class RandomCropByPosNegLabeldSimple: + def __init__(self, keys, label_key, spatial_size, pos=1, neg=1): + self.keys = keys + self.label_key = label_key + self.spatial_size = spatial_size + self.pos = pos + self.neg = neg + + def __call__(self, data): + label = data[self.label_key] + label = _to_channel_first(label) + d, h, w = label.shape[-3:] + cd, ch, cw = self.spatial_size + use_pos = random.random() < (self.pos / max(self.pos + self.neg, 1)) + if use_pos: + fg = (label > 0).nonzero(as_tuple=False) + if fg.numel() > 0: + idx = fg[random.randrange(fg.shape[0])] + z, y, x = idx[-3:].tolist() + else: + z = random.randrange(d) + y = random.randrange(h) + x = random.randrange(w) + else: + z = random.randrange(d) + y = random.randrange(h) + x = random.randrange(w) + + sd = max(0, min(z - cd // 2, max(d - cd, 0))) + sh = max(0, min(y - ch // 2, max(h - ch, 0))) + sw = max(0, min(x - cw // 2, max(w - cw, 0))) + + def crop_pad(arr): + arr = _to_channel_first(arr) + cropped = arr[..., sd : sd + cd, sh : sh + ch, sw : sw + cw] + pad_d = cd - cropped.shape[-3] + pad_h = ch - cropped.shape[-2] + pad_w = cw - cropped.shape[-1] + if pad_d > 0 or pad_h > 0 or pad_w > 0: + cropped = F.pad(cropped, (0, pad_w, 0, pad_h, 0, pad_d)) + return cropped + + for k in self.keys: + data[k] = crop_pad(data[k]) + return data + + +class RandomFlipdSimple: + def __init__(self, keys, prob=0.5, spatial_axis=0): + self.keys = keys + self.prob = prob + self.spatial_axis = spatial_axis + + def __call__(self, data): + if random.random() >= self.prob: + return data + axis = self.spatial_axis + 1 # channel-first + for k in self.keys: + data[k] = torch.flip(_to_channel_first(data[k]), dims=(axis,)) + return data + + +class RandScaleIntensitydSimple: + def __init__(self, keys, factors=0.1, prob=0.1): + self.keys = keys + self.factors = factors + self.prob = prob + + def __call__(self, data): + if random.random() >= self.prob: + return data + scale = 1.0 + random.uniform(-self.factors, self.factors) + for k in self.keys: + data[k] = data[k] * scale + return data + + +class RandShiftIntensitydSimple: + def __init__(self, keys, offsets=0.1, prob=0.1): + self.keys = keys + self.offsets = offsets + self.prob = prob + + def __call__(self, data): + if random.random() >= self.prob: + return data + shift = random.uniform(-self.offsets, self.offsets) + for k in self.keys: + data[k] = data[k] + shift + return data + + +class EnsureTypedSimple: + def __init__(self, keys, dtype): + self.keys = keys + self.dtype = dtype + + def __call__(self, data): + for k, dt in zip(self.keys, self.dtype): + data[k] = data[k].to(dtype=dt) + return data + +def get_train_transforms(cfg, image_keys: List[str], label_key: str = "label"): + """Train transforms (SegMamba-like). + + Output shapes after transform: + image: [4, D, H, W] + label: [1, D, H, W] + """ + mt = _lazy_monai_transforms() + Compose = mt["Compose"] + LoadImaged = mt["LoadImaged"] + EnsureChannelFirstd = mt["EnsureChannelFirstd"] + Orientationd = mt["Orientationd"] + Spacingd = mt["Spacingd"] + NormalizeIntensityd = mt["NormalizeIntensityd"] + RandCropByPosNegLabeld = mt["RandCropByPosNegLabeld"] + RandFlipd = mt["RandFlipd"] + RandScaleIntensityd = mt["RandScaleIntensityd"] + RandShiftIntensityd = mt["RandShiftIntensityd"] + EnsureTyped = mt["EnsureTyped"] + ConcatItemsd = mt["ConcatItemsd"] + DeleteItemsd = mt["DeleteItemsd"] + + keys = list(image_keys) + [label_key] + t = [ + LoadImaged(keys=keys), + EnsureChannelFirstd(keys=keys), + Orientationd(keys=keys, axcodes=cfg.data.orientation), + ] + if cfg.data.do_spacing: + t.append(Spacingd(keys=keys, pixdim=cfg.data.spacing, mode=("bilinear", "nearest"))) + t += [ + ConcatItemsd(keys=image_keys, name="image"), + DeleteItemsd(keys=image_keys), + NormalizeIntensityd(keys="image", nonzero=True, channel_wise=True), + ] + if cfg.data.crop_size is not None: + t.append( + RandCropByPosNegLabeld( + keys=["image", label_key], + label_key=label_key, + spatial_size=cfg.data.crop_size, + pos=1, + neg=1, + num_samples=cfg.data.num_samples, + image_key="image", + ) + ) + t += [ + RandFlipd(keys=["image", label_key], prob=0.5, spatial_axis=0), + RandFlipd(keys=["image", label_key], prob=0.5, spatial_axis=1), + RandFlipd(keys=["image", label_key], prob=0.5, spatial_axis=2), + RandScaleIntensityd(keys="image", factors=0.1, prob=cfg.data.rand_scale_prob), + RandShiftIntensityd(keys="image", offsets=0.1, prob=cfg.data.rand_shift_prob), + EnsureTyped(keys=["image", label_key], dtype=[torch.float32, torch.int64]), + ] + return Compose(t) + + +def get_npz_train_transforms(cfg, label_key: str = "label"): + """Train transforms for SegMamba NPZ tensors. + + Output shapes after transform: + image: [4, D, H, W] + label: [1, D, H, W] + """ + t = [] + if cfg.data.crop_size is not None: + t.append( + RandomCropByPosNegLabeldSimple( + keys=["image", label_key], + label_key=label_key, + spatial_size=cfg.data.crop_size, + pos=1, + neg=1, + ) + ) + t += [ + RandomFlipdSimple(keys=["image", label_key], prob=0.5, spatial_axis=0), + RandomFlipdSimple(keys=["image", label_key], prob=0.5, spatial_axis=1), + RandomFlipdSimple(keys=["image", label_key], prob=0.5, spatial_axis=2), + RandScaleIntensitydSimple(keys=["image"], factors=0.1, prob=cfg.data.rand_scale_prob), + RandShiftIntensitydSimple(keys=["image"], offsets=0.1, prob=cfg.data.rand_shift_prob), + EnsureTypedSimple(keys=["image", label_key], dtype=[torch.float32, torch.int64]), + ] + return ComposeSimple(t) + + +def get_val_transforms(cfg, image_keys: List[str], label_key: str = "label"): + mt = _lazy_monai_transforms() + Compose = mt["Compose"] + LoadImaged = mt["LoadImaged"] + EnsureChannelFirstd = mt["EnsureChannelFirstd"] + Orientationd = mt["Orientationd"] + Spacingd = mt["Spacingd"] + NormalizeIntensityd = mt["NormalizeIntensityd"] + EnsureTyped = mt["EnsureTyped"] + ConcatItemsd = mt["ConcatItemsd"] + DeleteItemsd = mt["DeleteItemsd"] + + keys = list(image_keys) + [label_key] + t = [ + LoadImaged(keys=keys), + EnsureChannelFirstd(keys=keys), + Orientationd(keys=keys, axcodes=cfg.data.orientation), + ] + if cfg.data.do_spacing: + t.append(Spacingd(keys=keys, pixdim=cfg.data.spacing, mode=("bilinear", "nearest"))) + t += [ + ConcatItemsd(keys=image_keys, name="image"), + DeleteItemsd(keys=image_keys), + NormalizeIntensityd(keys="image", nonzero=True, channel_wise=True), + EnsureTyped(keys=["image", label_key], dtype=[torch.float32, torch.int64]), + ] + return Compose(t) + + +def get_infer_transforms(cfg, image_keys: List[str]): + mt = _lazy_monai_transforms() + Compose = mt["Compose"] + LoadImaged = mt["LoadImaged"] + EnsureChannelFirstd = mt["EnsureChannelFirstd"] + Orientationd = mt["Orientationd"] + Spacingd = mt["Spacingd"] + NormalizeIntensityd = mt["NormalizeIntensityd"] + EnsureTyped = mt["EnsureTyped"] + ConcatItemsd = mt["ConcatItemsd"] + DeleteItemsd = mt["DeleteItemsd"] + + keys = list(image_keys) + t = [ + LoadImaged(keys=keys), + EnsureChannelFirstd(keys=keys), + Orientationd(keys=keys, axcodes=cfg.data.orientation), + ] + if cfg.data.do_spacing: + t.append(Spacingd(keys=keys, pixdim=cfg.data.spacing, mode="bilinear")) + t += [ + ConcatItemsd(keys=image_keys, name="image"), + DeleteItemsd(keys=image_keys), + NormalizeIntensityd(keys="image", nonzero=True, channel_wise=True), + EnsureTyped(keys=["image"], dtype=[torch.float32]), + ] + return Compose(t) + + +def get_synthetic_transforms(cfg): + # Simple synthetic transforms (no MONAI dependency). + t = [] + if cfg.data.crop_size is not None: + t.append( + RandomCropByPosNegLabeldSimple( + keys=["image", "label"], + label_key="label", + spatial_size=cfg.data.crop_size, + pos=1, + neg=1, + ) + ) + t += [ + RandomFlipdSimple(keys=["image", "label"], prob=0.5, spatial_axis=0), + RandomFlipdSimple(keys=["image", "label"], prob=0.5, spatial_axis=1), + RandomFlipdSimple(keys=["image", "label"], prob=0.5, spatial_axis=2), + RandScaleIntensitydSimple(keys=["image"], factors=0.1, prob=cfg.data.rand_scale_prob), + RandShiftIntensitydSimple(keys=["image"], offsets=0.1, prob=cfg.data.rand_shift_prob), + EnsureTypedSimple(keys=["image", "label"], dtype=[torch.float32, torch.int64]), + ] + return ComposeSimple(t) diff --git a/source_code/gliomasam3_moe/src/gliomasam3_moe/losses/__init__.py b/source_code/gliomasam3_moe/src/gliomasam3_moe/losses/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..79aef621a39d623cf0bd9768319cbc2b9604a28a --- /dev/null +++ b/source_code/gliomasam3_moe/src/gliomasam3_moe/losses/__init__.py @@ -0,0 +1,3 @@ +from .brats_losses import LossComputer + +__all__ = ["LossComputer"] diff --git a/source_code/gliomasam3_moe/src/gliomasam3_moe/losses/brats_losses.py b/source_code/gliomasam3_moe/src/gliomasam3_moe/losses/brats_losses.py new file mode 100644 index 0000000000000000000000000000000000000000..0639566e2d265a03f8d0730264c3573dc191acd8 --- /dev/null +++ b/source_code/gliomasam3_moe/src/gliomasam3_moe/losses/brats_losses.py @@ -0,0 +1,101 @@ +from typing import Dict, Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from gliomasam3_moe.utils.brats_regions import label_to_regions, et_present + + +def dice_loss(logits: torch.Tensor, targets: torch.Tensor, eps: float = 1e-5) -> torch.Tensor: + """Dice loss for multi-label (WT/TC/ET). + + logits: [B, 3, D, H, W] + targets: [B, 3, D, H, W] + """ + probs = torch.sigmoid(logits) + dims = (0, 2, 3, 4) + intersection = (probs * targets).sum(dims) + union = probs.sum(dims) + targets.sum(dims) + dice = (2.0 * intersection + eps) / (union + eps) + return 1.0 - dice.mean() + + +class LossComputer(nn.Module): + """Loss for BraTS regions. + + Returns total loss and logs dict. + """ + + def __init__( + self, + dice_weight: float = 1.0, + bce_weight: float = 1.0, + et_focal_weight: float = 0.0, + focal_gamma: float = 2.0, + pres_weight: float = 0.1, + hier_weight: float = 0.1, + moe_weight: float = 0.01, + ): + super().__init__() + self.dice_weight = dice_weight + self.bce_weight = bce_weight + self.et_focal_weight = et_focal_weight + self.focal_gamma = focal_gamma + self.pres_weight = pres_weight + self.hier_weight = hier_weight + self.moe_weight = moe_weight + + def forward( + self, + logits_regions: torch.Tensor, + aux: Dict[str, torch.Tensor], + label: torch.Tensor, + ) -> Tuple[torch.Tensor, Dict[str, float]]: + regions_gt = label_to_regions(label) + bce = F.binary_cross_entropy_with_logits(logits_regions, regions_gt) + dice = dice_loss(logits_regions, regions_gt) + + focal = torch.tensor(0.0, device=logits_regions.device) + if self.et_focal_weight > 0: + et_logits = logits_regions[:, 2] + et_gt = regions_gt[:, 2] + bce_et = F.binary_cross_entropy_with_logits(et_logits, et_gt, reduction="none") + pt = torch.exp(-bce_et) + focal = ((1.0 - pt) ** self.focal_gamma * bce_et).mean() + + pi_et = aux.get("pi_et", None) + pres = torch.tensor(0.0, device=logits_regions.device) + if pi_et is not None: + y_et = et_present(label) + # BCE is unsafe under autocast; compute in fp32 outside autocast. + with torch.amp.autocast(device_type=pi_et.device.type, enabled=False): + pres = F.binary_cross_entropy(pi_et.float(), y_et.float()) + + p = torch.sigmoid(logits_regions) + hier = torch.relu(p[:, 2] - p[:, 1]).mean() + torch.relu(p[:, 1] - p[:, 0]).mean() + + moe = torch.tensor(0.0, device=logits_regions.device) + if "moe_gamma" in aux: + gamma = aux["moe_gamma"] + m = gamma.shape[1] + moe = ((gamma.mean(dim=0) - 1.0 / m) ** 2).sum() + + total = ( + self.dice_weight * dice + + self.bce_weight * bce + + self.et_focal_weight * focal + + self.pres_weight * pres + + self.hier_weight * hier + + self.moe_weight * moe + ) + logs = { + "loss_total": float(total.detach().cpu()), + "loss_dice": float(dice.detach().cpu()), + "loss_bce": float(bce.detach().cpu()), + "loss_focal_et": float(focal.detach().cpu()), + "loss_pres": float(pres.detach().cpu()), + "loss_hier": float(hier.detach().cpu()), + "loss_moe": float(moe.detach().cpu()), + } + return total, logs diff --git a/source_code/gliomasam3_moe/src/gliomasam3_moe/models/__init__.py b/source_code/gliomasam3_moe/src/gliomasam3_moe/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..73eb997a3cfaa38b03c346fe2e149b1a7621bd57 --- /dev/null +++ b/source_code/gliomasam3_moe/src/gliomasam3_moe/models/__init__.py @@ -0,0 +1,3 @@ +from .gliomasam3_moe import GliomaSAM3_MoE + +__all__ = ["GliomaSAM3_MoE"] diff --git a/source_code/gliomasam3_moe/src/gliomasam3_moe/models/gliomasam3_moe.py b/source_code/gliomasam3_moe/src/gliomasam3_moe/models/gliomasam3_moe.py new file mode 100644 index 0000000000000000000000000000000000000000..8baa431bd502fc267595b8a5589b6d3515849c6e --- /dev/null +++ b/source_code/gliomasam3_moe/src/gliomasam3_moe/models/gliomasam3_moe.py @@ -0,0 +1,732 @@ +from __future__ import annotations + +import os +import sys +from typing import Dict, Optional, Tuple, List + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class HFDI3D(nn.Module): + """HFDI-3D (parameter-free). + + Input: + x: [B, 4, D, H, W] float32 + Output: + x_plus: [B, 7, D, H, W] (concat of original 4ch and normalized gradients 3ch) + h_norm: [B, 3, D, H, W] (|Gz|, |Gy|, |Gx|) + Axis convention: + D = depth (z), H = height (y), W = width (x) + """ + + def __init__(self, eps: float = 1e-6): + super().__init__() + self.eps = eps + # Simple central-difference kernels for D/H/W axes. + k = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float32) + self.register_buffer("k_d", k.view(1, 1, 3, 1, 1)) + self.register_buffer("k_h", k.view(1, 1, 1, 3, 1)) + self.register_buffer("k_w", k.view(1, 1, 1, 1, 3)) + + def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + x_bar = x.mean(dim=1, keepdim=True) # [B,1,D,H,W] + g_d = F.conv3d(x_bar, self.k_d, padding=(1, 0, 0)) + g_h = F.conv3d(x_bar, self.k_h, padding=(0, 1, 0)) + g_w = F.conv3d(x_bar, self.k_w, padding=(0, 0, 1)) + h = torch.cat([g_d.abs(), g_h.abs(), g_w.abs()], dim=1) # [B,3,D,H,W] + + # Per-sample normalization + b = h.shape[0] + h_flat = h.view(b, -1) + mean = h_flat.mean(dim=1).view(b, 1, 1, 1, 1) + std = h_flat.std(dim=1).view(b, 1, 1, 1, 1) + h_norm = (h - mean) / (std + self.eps) + + x_plus = torch.cat([x, h_norm], dim=1) # [B,7,D,H,W] + return x_plus, h_norm + + +class TransformerBlock(nn.Module): + """Lightweight Transformer encoder block. + + Input/Output: + x: [B, N, d] + """ + + def __init__(self, dim: int, heads: int, mlp_ratio: float = 4.0, dropout: float = 0.0): + super().__init__() + self.norm1 = nn.LayerNorm(dim) + self.attn = nn.MultiheadAttention(dim, heads, batch_first=True, dropout=dropout) + self.norm2 = nn.LayerNorm(dim) + hidden = int(dim * mlp_ratio) + self.mlp = nn.Sequential( + nn.Linear(dim, hidden), + nn.GELU(), + nn.Linear(hidden, dim), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x_attn, _ = self.attn(self.norm1(x), self.norm1(x), self.norm1(x)) + x = x + x_attn + x = x + self.mlp(self.norm2(x)) + return x + + +class ImageEncoder2D(nn.Module): + """SAM-style lightweight 2D image encoder (slice-wise). + + Input: + x2d: [B*D, 7, H, W] + Output: + tokens: [B*D, N, d] + grid_hw: (h, w) where N = h*w + """ + + def __init__( + self, + in_chans: int = 7, + patch_size: int = 16, + embed_dim: int = 128, + depth: int = 2, + heads: int = 4, + mlp_ratio: float = 4.0, + dropout: float = 0.0, + ): + super().__init__() + self.patch_size = patch_size + self.patch_embed = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + self.blocks = nn.ModuleList( + [TransformerBlock(embed_dim, heads, mlp_ratio=mlp_ratio, dropout=dropout) for _ in range(depth)] + ) + self.norm = nn.LayerNorm(embed_dim) + + def forward(self, x2d: torch.Tensor) -> Tuple[torch.Tensor, Tuple[int, int]]: + x = self.patch_embed(x2d) # [B*D, d, h, w] + h, w = x.shape[-2], x.shape[-1] + x = x.flatten(2).transpose(1, 2) # [B*D, N, d] + for blk in self.blocks: + x = blk(x) + x = self.norm(x) + return x, (h, w) + + +class Sam3ImageEncoder2D(nn.Module): + """SAM3 ViT backbone wrapper (slice-wise). + + Input: + x2d: [B*D, 7, H, W] + Output: + tokens: [B*D, N, d] + grid_hw: (h, w) where N = h*w + """ + + def __init__( + self, + out_dim: int, + in_chans: int = 7, + checkpoint_path: Optional[str] = None, + freeze: bool = True, + input_mean: Optional[List[float]] = None, + input_std: Optional[List[float]] = None, + ): + super().__init__() + self.freeze = freeze + self.in_proj = nn.Conv2d(in_chans, 3, kernel_size=1) + self._rope_cache = {} + + if input_mean is not None and input_std is not None: + mean = torch.tensor(input_mean, dtype=torch.float32).view(1, 3, 1, 1) + std = torch.tensor(input_std, dtype=torch.float32).view(1, 3, 1, 1) + self.register_buffer("input_mean", mean) + self.register_buffer("input_std", std) + else: + self.input_mean = None + self.input_std = None + + try: + from sam3.model_builder import build_sam3_image_model + except Exception: + sam3_repo = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../../sam3")) + if os.path.isdir(sam3_repo) and sam3_repo not in sys.path: + sys.path.append(sam3_repo) + try: + from sam3.model_builder import build_sam3_image_model + except Exception as exc: + raise ImportError( + "SAM3 backbone requested but the sam3 package is not available. " + "Ensure /root/githubs/sam3 is on PYTHONPATH or install sam3." + ) from exc + + load_from_hf = True + if checkpoint_path is None: + default_path = os.environ.get("SAM3_CKPT", "/data/yty/sam3/sam3.pt") + if os.path.isfile(default_path): + checkpoint_path = default_path + load_from_hf = False + else: + load_from_hf = False + + sam3_model = build_sam3_image_model( + checkpoint_path=checkpoint_path, + load_from_HF=load_from_hf, + enable_segmentation=False, + eval_mode=True, + device="cpu", + ) + self.sam3_trunk = sam3_model.backbone.vision_backbone.trunk + + kernel = self.sam3_trunk.patch_embed.proj.kernel_size + self.patch_size = int(kernel[0]) + self.in_dim = int(self.sam3_trunk.channel_list[-1]) + self.proj = nn.Identity() if self.in_dim == out_dim else nn.Linear(self.in_dim, out_dim) + + if freeze: + for p in self.sam3_trunk.parameters(): + p.requires_grad = False + self.sam3_trunk.eval() + + def _update_global_rope_freqs(self, grid_h: int, grid_w: int, device: torch.device) -> None: + key = (grid_h, grid_w, device.type, device.index) + cached = self._rope_cache.get(key) + if cached is None: + freqs_cis = None + for blk in self.sam3_trunk.blocks: + if getattr(blk, "window_size", 0) != 0: + continue + attn = blk.attn + if not getattr(attn, "use_rope", False): + continue + scale_pos = 1.0 + if getattr(attn, "rope_interp", False): + rope_pt = attn.rope_pt_size or (grid_h, grid_w) + scale_pos = rope_pt[0] / max(grid_h, 1) + freqs_cis = attn.compute_cis(end_x=grid_h, end_y=grid_w, scale_pos=scale_pos) + break + if freqs_cis is None: + return + cached = freqs_cis.to(device) + self._rope_cache[key] = cached + for blk in self.sam3_trunk.blocks: + if getattr(blk, "window_size", 0) != 0: + continue + attn = blk.attn + if getattr(attn, "use_rope", False): + attn.freqs_cis = cached + + def forward(self, x2d: torch.Tensor) -> Tuple[torch.Tensor, Tuple[int, int]]: + x = self.in_proj(x2d) + if self.input_mean is not None and self.input_std is not None: + x = (x - self.input_mean) / self.input_std + grid_h = x.shape[-2] // self.patch_size + grid_w = x.shape[-1] // self.patch_size + self._update_global_rope_freqs(grid_h, grid_w, x.device) + if self.freeze: + self.sam3_trunk.eval() + with torch.no_grad(): + feats = self.sam3_trunk(x)[-1] + else: + feats = self.sam3_trunk(x)[-1] + h, w = feats.shape[-2], feats.shape[-1] + tokens = feats.flatten(2).transpose(1, 2) + tokens = self.proj(tokens) + return tokens, (h, w) + + +class SliceSeqAdapter(nn.Module): + """Slice-as-Sequence 3D adaptation with cross-attention memory. + + Input: + tokens: [B, D, N, d] + Output: + tokens_out: [B, D, N, d] + """ + + def __init__(self, dim: int, heads: int, k: int = 4, mlp_ratio: float = 2.0): + super().__init__() + self.k = k + self.norm_q = nn.LayerNorm(dim) + self.norm_kv = nn.LayerNorm(dim) + self.attn = nn.MultiheadAttention(dim, heads, batch_first=True) + hidden = int(dim * mlp_ratio) + self.norm_mlp = nn.LayerNorm(dim) + self.mlp = nn.Sequential( + nn.Linear(dim, hidden), + nn.GELU(), + nn.Linear(hidden, dim), + ) + + def forward(self, tokens: torch.Tensor, direction: str = "forward") -> torch.Tensor: + b, d, n, c = tokens.shape + out: List[torch.Tensor] = [tokens[:, i] for i in range(d)] + if self.k <= 0: + return tokens + + order = range(d) if direction == "forward" else range(d - 1, -1, -1) + for t in order: + q = tokens[:, t] # [B, N, C] + if direction == "forward": + ctx_idx = list(range(max(0, t - self.k), t)) + else: + ctx_idx = list(range(t + 1, min(d, t + self.k + 1))) + if len(ctx_idx) == 0: + out[t] = q + continue + kv = tokens[:, ctx_idx].reshape(b, -1, c) # [B, K*N, C] + qn = self.norm_q(q) + kvn = self.norm_kv(kv) + attn_out, _ = self.attn(qn, kvn, kvn) + q = q + attn_out + q = q + self.mlp(self.norm_mlp(q)) + out[t] = q + + return torch.stack(out, dim=1) + + +class AttributeHead(nn.Module): + """Predicts ET presence from global token. + + Input: + z: [B, d] + Output: + dict with: + pi_et: [B] + """ + + def __init__(self, dim: int): + super().__init__() + self.fc = nn.Linear(dim, 1) + + def forward(self, z: torch.Tensor) -> Dict[str, torch.Tensor]: + pi_et = torch.sigmoid(self.fc(z)).squeeze(-1) + return {"pi_et": pi_et} + + +class PromptEncoder(nn.Module): + """Concept token embedding + prompt MLP. + + Input: + token_ids: [B, L] + Output: + prompt: [B, d] + """ + + def __init__(self, num_tokens: int, dim: int, hidden: int): + super().__init__() + self.emb = nn.Embedding(num_tokens, dim) + self.mlp = nn.Sequential( + nn.Linear(dim, hidden), + nn.GELU(), + nn.Linear(hidden, dim), + ) + + def forward(self, token_ids: torch.Tensor) -> torch.Tensor: + x = self.emb(token_ids) # [B, L, d] + x = x.mean(dim=1) # [B, d] + return self.mlp(x) + + +class PromptFiLM(nn.Module): + """FiLM modulation with prompt. + + Input: + tokens: [B, D, N, d] + prompt: [B, d] + Output: + modulated tokens: [B, D, N, d] + """ + + def __init__(self, dim: int): + super().__init__() + self.to_scale_bias = nn.Linear(dim, dim * 2) + + def forward(self, tokens: torch.Tensor, prompt: torch.Tensor) -> torch.Tensor: + sb = self.to_scale_bias(prompt) # [B, 2d] + scale, bias = sb.chunk(2, dim=-1) + scale = scale.view(scale.shape[0], 1, 1, -1) + bias = bias.view(bias.shape[0], 1, 1, -1) + return tokens * (1.0 + scale) + bias + + +class SpectralModulation3D(nn.Module): + """Learnable spectral modulation via 3D FFT amplitude gating. + + Input: + x: [B, C, D, H, W] + Output: + x_spec: [B, C, D, H, W] + stats: [B, Q] + """ + + def __init__(self, num_bins: int = 16, q: int = 3, eps: float = 1e-6): + super().__init__() + self.num_bins = num_bins + self.q = q + self.eps = eps + self.gate = nn.Parameter(torch.ones(num_bins)) + + def _radial_weight(self, d: int, h: int, w: int, device, dtype) -> torch.Tensor: + fd = torch.fft.fftfreq(d, device=device, dtype=dtype).abs().view(d, 1, 1) + fh = torch.fft.fftfreq(h, device=device, dtype=dtype).abs().view(1, h, 1) + fw = torch.fft.fftfreq(w, device=device, dtype=dtype).abs().view(1, 1, w) + r = torch.sqrt(fd ** 2 + fh ** 2 + fw ** 2) + r = r / (r.max() + self.eps) # [D,H,W] in [0,1] + idx = r * (self.num_bins - 1) + idx0 = idx.floor().long().clamp(min=0, max=self.num_bins - 1) + idx1 = (idx0 + 1).clamp(min=0, max=self.num_bins - 1) + alpha = (idx - idx0.float()) + w0 = self.gate[idx0] + w1 = self.gate[idx1] + w_r = w0 * (1.0 - alpha) + w1 * alpha + return w_r.unsqueeze(0).unsqueeze(0) # [1,1,D,H,W] + + def _spectral_stats(self, amp: torch.Tensor) -> torch.Tensor: + # amp: [B, C, D, H, W] + b, _, d, h, w = amp.shape + amp2 = (amp ** 2).sum(dim=1) # [B,D,H,W] + total = amp2.sum(dim=(1, 2, 3)) + self.eps + fd = torch.fft.fftfreq(d, device=amp.device, dtype=amp.dtype).abs().view(d, 1, 1) + fh = torch.fft.fftfreq(h, device=amp.device, dtype=amp.dtype).abs().view(1, h, 1) + fw = torch.fft.fftfreq(w, device=amp.device, dtype=amp.dtype).abs().view(1, 1, w) + e_d = (amp2 * fd).sum(dim=(1, 2, 3)) / total + e_h = (amp2 * fh).sum(dim=(1, 2, 3)) / total + e_w = (amp2 * fw).sum(dim=(1, 2, 3)) / total + stats = torch.stack([e_d, e_h, e_w], dim=-1) # [B,3] + if self.q > 3: + pad = torch.zeros((b, self.q - 3), device=amp.device, dtype=amp.dtype) + stats = torch.cat([stats, pad], dim=-1) + return stats + + def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + b, c, d, h, w = x.shape + x_fft = torch.fft.fftn(x, dim=(2, 3, 4)) + amp = torch.abs(x_fft) + phase = torch.angle(x_fft) + w_r = self._radial_weight(d, h, w, x.device, x.dtype) + amp_mod = amp * w_r + x_fft_mod = amp_mod * torch.exp(1j * phase) + x_spec = torch.fft.ifftn(x_fft_mod, dim=(2, 3, 4)).real + stats = self._spectral_stats(amp_mod) + return x_spec, stats + + +class MSDA3D(nn.Module): + """Multi-scale directional depthwise conv. + + Input: + u: [B, C, D, h, w] + Output: + u_msda: [B, C, D, h, w] + """ + + def __init__(self, channels: int, scales: List[int]): + super().__init__() + self.channels = channels + self.scales = scales + convs = [] + for k in scales: + convs.append(nn.Conv3d(channels, channels, kernel_size=(k, 1, 1), + padding=(k // 2, 0, 0), groups=channels)) + convs.append(nn.Conv3d(channels, channels, kernel_size=(1, k, 1), + padding=(0, k // 2, 0), groups=channels)) + convs.append(nn.Conv3d(channels, channels, kernel_size=(1, 1, k), + padding=(0, 0, k // 2), groups=channels)) + self.convs = nn.ModuleList(convs) + self.weight_mlp = nn.Sequential( + nn.Linear(channels, channels), + nn.GELU(), + nn.Linear(channels, len(convs)), + ) + + def forward(self, u: torch.Tensor) -> torch.Tensor: + b, c, _, _, _ = u.shape + pool = u.mean(dim=(2, 3, 4)) # [B, C] + logits = self.weight_mlp(pool) # [B, K*3] + weights = torch.softmax(logits, dim=-1) # [B, K*3] + out = 0.0 + for i, conv in enumerate(self.convs): + w = weights[:, i].view(b, 1, 1, 1, 1) + out = out + w * conv(u) + return out + + +class DualDomainEnhancer(nn.Module): + """Dual-domain enhancement: MSDA + FA/FCF + spectral fusion. + + Inputs: + u: [B, C, D, h, w] (tokens reshaped to 3D grid) + x_spec: [B, 4, D, H, W] (spectral-modulated image) + Output: + u_out: [B, C, D, h, w] + """ + + def __init__(self, channels: int, patch_size: int, spectral_in: int = 4, scales: List[int] = None): + super().__init__() + if scales is None: + scales = [3, 5, 7] + self.msda = MSDA3D(channels, scales=scales) + self.fa_level = nn.Conv3d(channels, channels, kernel_size=3, padding=1) + self.fa_fuse = nn.Conv3d(channels * 2, channels, kernel_size=1) + self.fcf_mlp = nn.Sequential( + nn.Linear(channels * 2, channels), + nn.GELU(), + nn.Linear(channels, 1), + ) + self.spec_stem = nn.Conv3d( + spectral_in, channels, + kernel_size=(1, patch_size, patch_size), + stride=(1, patch_size, patch_size), + ) + self.fuse_conv = nn.Conv3d(channels * 2, channels, kernel_size=1) + + def forward(self, u: torch.Tensor, x_spec: torch.Tensor) -> torch.Tensor: + u_msda = self.msda(u) + u_lv1 = self.fa_level(u) + u_fa = self.fa_fuse(torch.cat([u, u_lv1], dim=1)) + pool = torch.cat([u_fa, u_msda], dim=1).mean(dim=(2, 3, 4)) + eta = torch.sigmoid(self.fcf_mlp(pool)).view(u.shape[0], 1, 1, 1, 1) + u_fuse = eta * u_fa + (1.0 - eta) * u_msda + + u_spec = self.spec_stem(x_spec) + u_out = self.fuse_conv(torch.cat([u_fuse, u_spec], dim=1)) + return u_out + + +class ExpertHead(nn.Module): + """Expert decoder head. + + Input: + u: [B, C, D, H, W] + Output: + logits: [B, 3, D, H, W] + """ + + def __init__(self, in_channels: int, hidden: int = 64): + super().__init__() + self.net = nn.Sequential( + nn.Conv3d(in_channels, hidden, kernel_size=3, padding=1), + nn.GELU(), + nn.Conv3d(hidden, 3, kernel_size=1), + ) + + def forward(self, u: torch.Tensor) -> torch.Tensor: + return self.net(u) + + +def _topk_sparse(gamma: torch.Tensor, k: int) -> torch.Tensor: + if k >= gamma.shape[1]: + return gamma + _, topk_idx = torch.topk(gamma, k, dim=-1) + mask = torch.zeros_like(gamma).scatter_(1, topk_idx, 1.0) + gamma = gamma * mask + gamma = gamma / (gamma.sum(dim=-1, keepdim=True) + 1e-8) + return gamma + + +class MoEDecoder(nn.Module): + """Sparse Mixture-of-Experts decoder. + + Inputs: + u: [B, C, D, h, w] + z: [B, d] + prompt: [B, d] + spectral_stats: [B, Q] + Outputs: + logits: [B, 3, D, H, W] + gamma: [B, M] + """ + + def __init__( + self, + in_channels: int, + token_dim: int, + spectral_q: int, + num_experts: int = 5, + topk: int = 2, + hidden: int = 64, + ): + super().__init__() + self.num_experts = num_experts + self.topk = topk + gate_in = token_dim + token_dim + spectral_q + self.gate = nn.Sequential( + nn.Linear(gate_in, token_dim), + nn.GELU(), + nn.Linear(token_dim, num_experts), + ) + self.experts = nn.ModuleList([ExpertHead(in_channels, hidden=hidden) for _ in range(num_experts)]) + + def forward( + self, + u: torch.Tensor, + z: torch.Tensor, + prompt: torch.Tensor, + spectral_stats: torch.Tensor, + target_size: Tuple[int, int, int], + ) -> Tuple[torch.Tensor, torch.Tensor]: + b = u.shape[0] + gate_in = torch.cat([z, prompt, spectral_stats], dim=-1) + gamma = torch.softmax(self.gate(gate_in), dim=-1) + gamma = _topk_sparse(gamma, self.topk) + + u_up = F.interpolate(u, size=target_size, mode="trilinear", align_corners=False) + logits_all = [] + for exp in self.experts: + logits_all.append(exp(u_up)) + logits_all = torch.stack(logits_all, dim=1) # [B, M, 3, D, H, W] + gamma_view = gamma.view(b, self.num_experts, 1, 1, 1, 1) + logits = (logits_all * gamma_view).sum(dim=1) + return logits, gamma + + +class GliomaSAM3_MoE(nn.Module): + """GliomaSAM3-MoE full model. + + Forward: + image: [B, 4, D, H, W] + returns: + logits_regions: [B, 3, D, H, W] (WT, TC, ET) + aux: dict with pi_et, moe_gamma, spectral_stats, et_prob_gated + """ + + def __init__( + self, + patch_size: int = 16, + token_dim: int = 128, + depth: int = 2, + heads: int = 4, + mlp_ratio: float = 4.0, + slice_attn_k: int = 4, + slice_attn_random_dir: bool = True, + spectral_bins: int = 16, + spectral_q: int = 3, + msda_scales: List[int] = None, + moe_experts: int = 5, + moe_topk: int = 2, + decoder_hidden: int = 64, + prompt_mlp_hidden: int = 128, + use_sam3_backbone: bool = False, + sam3_ckpt_path: Optional[str] = None, + sam3_freeze: bool = True, + sam3_in_chans: int = 7, + sam3_input_mean: Optional[List[float]] = None, + sam3_input_std: Optional[List[float]] = None, + ): + super().__init__() + self.patch_size = patch_size + self.slice_attn_random_dir = slice_attn_random_dir + + self.hfdi = HFDI3D() + self.spectral = SpectralModulation3D(num_bins=spectral_bins, q=spectral_q) + if use_sam3_backbone: + self.encoder2d = Sam3ImageEncoder2D( + out_dim=token_dim, + in_chans=sam3_in_chans, + checkpoint_path=sam3_ckpt_path, + freeze=sam3_freeze, + input_mean=sam3_input_mean, + input_std=sam3_input_std, + ) + self.patch_size = self.encoder2d.patch_size + else: + self.encoder2d = ImageEncoder2D( + in_chans=7, + patch_size=patch_size, + embed_dim=token_dim, + depth=depth, + heads=heads, + mlp_ratio=mlp_ratio, + ) + self.slice_adapter = SliceSeqAdapter(token_dim, heads=heads, k=slice_attn_k) + self.attr_head = AttributeHead(token_dim) + + self.concept_vocab = ["WT", "TC", "ET", "ET_PRESENT", "ET_ABSENT"] + self.concept_to_id = {k: i for i, k in enumerate(self.concept_vocab)} + self.prompt_encoder = PromptEncoder(num_tokens=len(self.concept_vocab), dim=token_dim, hidden=prompt_mlp_hidden) + self.prompt_film = PromptFiLM(token_dim) + + self.dual_enhance = DualDomainEnhancer( + channels=token_dim, + patch_size=self.patch_size, + spectral_in=4, + scales=msda_scales, + ) + self.moe_decoder = MoEDecoder( + in_channels=token_dim, + token_dim=token_dim, + spectral_q=spectral_q, + num_experts=moe_experts, + topk=moe_topk, + hidden=decoder_hidden, + ) + + def _select_concept_tokens(self, pi_et: torch.Tensor, label: Optional[torch.Tensor] = None) -> torch.Tensor: + b = pi_et.shape[0] + if label is not None: + et_flag = (label == 4).view(b, -1).any(dim=1) + else: + et_flag = pi_et > 0.5 + wt_id = self.concept_to_id["WT"] + tc_id = self.concept_to_id["TC"] + et_id = self.concept_to_id["ET"] + present_id = self.concept_to_id["ET_PRESENT"] + absent_id = self.concept_to_id["ET_ABSENT"] + base = torch.tensor([wt_id, tc_id, et_id], device=pi_et.device).view(1, 3).repeat(b, 1) + et_token = torch.where(et_flag, torch.tensor(present_id, device=pi_et.device), + torch.tensor(absent_id, device=pi_et.device)).view(b, 1) + return torch.cat([base, et_token], dim=1) # [B, 4] + + def forward(self, image: torch.Tensor, label: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: + b, c, d, h, w = image.shape + orig_h, orig_w = h, w + pad_h = (self.patch_size - (h % self.patch_size)) % self.patch_size + pad_w = (self.patch_size - (w % self.patch_size)) % self.patch_size + ph0 = pad_h // 2 + ph1 = pad_h - ph0 + pw0 = pad_w // 2 + pw1 = pad_w - pw0 + if pad_h > 0 or pad_w > 0: + image = F.pad(image, (pw0, pw1, ph0, ph1, 0, 0)) + h, w = image.shape[-2:] + + x_plus, _ = self.hfdi(image) + x_spec, spectral_stats = self.spectral(image) + + # Slice-wise 2D encoding + x2d = x_plus.permute(0, 2, 1, 3, 4).reshape(b * d, 7, h, w) # [B*D,7,H,W] + tokens, (gh, gw) = self.encoder2d(x2d) # [B*D, N, C] + n = gh * gw + tokens = tokens.view(b, d, n, -1) # [B,D,N,C] + + # Slice-as-sequence adaptation + if self.training and self.slice_attn_random_dir: + direction = "backward" if torch.rand(1).item() < 0.5 else "forward" + else: + direction = "forward" + tokens = self.slice_adapter(tokens, direction=direction) + + # Global token and prompt + z = tokens.mean(dim=(1, 2)) # [B,C] + attr = self.attr_head(z) + pi_et = attr["pi_et"] + token_ids = self._select_concept_tokens(pi_et, label=label) + prompt = self.prompt_encoder(token_ids) + tokens = self.prompt_film(tokens, prompt) + + # Tokens -> 3D grid + u = tokens.view(b, d, gh, gw, -1).permute(0, 4, 1, 2, 3) # [B,C,D,h,w] + u = self.dual_enhance(u, x_spec) + + logits, gamma = self.moe_decoder(u, z, prompt, spectral_stats, target_size=(d, h, w)) + if pad_h > 0 or pad_w > 0: + logits = logits[:, :, :, ph0 : ph0 + orig_h, pw0 : pw0 + orig_w] + et_prob_gated = torch.sigmoid(logits[:, 2:3]) * pi_et.view(b, 1, 1, 1, 1) + + aux = { + "pi_et": pi_et, + "moe_gamma": gamma, + "spectral_stats": spectral_stats, + "et_prob_gated": et_prob_gated, + } + return logits, aux diff --git a/source_code/gliomasam3_moe/src/gliomasam3_moe/utils/__init__.py b/source_code/gliomasam3_moe/src/gliomasam3_moe/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..89f82a4523fae4c27f04f68d35d357c8259d602e --- /dev/null +++ b/source_code/gliomasam3_moe/src/gliomasam3_moe/utils/__init__.py @@ -0,0 +1,11 @@ +from .brats_regions import label_to_regions, regions_to_label, et_present +from .postprocess import remove_small_components +from .seed import set_seed + +__all__ = [ + "label_to_regions", + "regions_to_label", + "et_present", + "remove_small_components", + "set_seed", +] diff --git a/source_code/gliomasam3_moe/src/gliomasam3_moe/utils/brats_regions.py b/source_code/gliomasam3_moe/src/gliomasam3_moe/utils/brats_regions.py new file mode 100644 index 0000000000000000000000000000000000000000..de774a4c63afb1429ba42a055d85197d69fcf61f --- /dev/null +++ b/source_code/gliomasam3_moe/src/gliomasam3_moe/utils/brats_regions.py @@ -0,0 +1,54 @@ +import torch + + +def label_to_regions(label: torch.Tensor) -> torch.Tensor: + """Convert label map to region channels. + + Input: + label: [B, 1, D, H, W] or [B, D, H, W] with values in {0,1,2,4} + Output: + regions: [B, 3, D, H, W] float32 in order [WT, TC, ET] + """ + if label.dim() == 4: + label = label.unsqueeze(1) + if label.dim() != 5: + raise ValueError("label must be [B,1,D,H,W] or [B,D,H,W].") + label = label.long() + wt = (label > 0).float() + tc = ((label == 1) | (label == 4)).float() + et = (label == 4).float() + return torch.cat([wt, tc, et], dim=1) + + +def regions_to_label(regions: torch.Tensor) -> torch.Tensor: + """Convert binary region channels to label map. + + Input: + regions: [B, 3, D, H, W] (WT, TC, ET), values in {0,1} or probabilities + Output: + label: [B, 1, D, H, W] with values in {0,1,2,4} + """ + if regions.dim() != 5: + raise ValueError("regions must be [B,3,D,H,W].") + wt = regions[:, 0] > 0.5 + tc = regions[:, 1] > 0.5 + et = regions[:, 2] > 0.5 + label = torch.zeros_like(wt, dtype=torch.long) + label[wt] = 2 + label[tc] = 1 + label[et] = 4 + return label.unsqueeze(1) + + +def et_present(label: torch.Tensor) -> torch.Tensor: + """Check ET presence. + + Input: + label: [B, 1, D, H, W] or [B, D, H, W] + Output: + present: [B] float32 (1 if ET exists else 0) + """ + if label.dim() == 4: + label = label.unsqueeze(1) + et = (label == 4).view(label.shape[0], -1).any(dim=1) + return et.float() diff --git a/source_code/gliomasam3_moe/src/gliomasam3_moe/utils/postprocess.py b/source_code/gliomasam3_moe/src/gliomasam3_moe/utils/postprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..46fa12771198971d23176cfc5494998a12484c39 --- /dev/null +++ b/source_code/gliomasam3_moe/src/gliomasam3_moe/utils/postprocess.py @@ -0,0 +1,53 @@ +from typing import Union +import numpy as np + +try: + from scipy import ndimage as ndi + _HAS_SCIPY = True +except Exception: + _HAS_SCIPY = False + + +def remove_small_components(mask: Union[np.ndarray, "torch.Tensor"], min_size: int) -> Union[np.ndarray, "torch.Tensor"]: + """Remove small connected components from a binary 3D mask. + + Input: + mask: [D, H, W] or [B, D, H, W] binary + min_size: minimum voxel size to keep + Output: + filtered mask with same type/shape + """ + is_torch = False + if "torch" in str(type(mask)): + import torch + is_torch = True + mask_np = mask.detach().cpu().numpy() + else: + mask_np = mask + + if not _HAS_SCIPY: + # Graceful degrade: no connected component filtering. + return mask if not is_torch else mask + + def _filter_single(m: np.ndarray) -> np.ndarray: + labeled, num = ndi.label(m.astype(np.uint8)) + if num == 0: + return m.astype(np.uint8) + sizes = ndi.sum(m.astype(np.uint8), labeled, index=np.arange(1, num + 1)) + keep = np.zeros_like(m, dtype=np.uint8) + for i, s in enumerate(sizes, start=1): + if s >= min_size: + keep[labeled == i] = 1 + return keep + + if mask_np.ndim == 3: + out = _filter_single(mask_np) + elif mask_np.ndim == 4: + out = np.stack([_filter_single(mask_np[i]) for i in range(mask_np.shape[0])], axis=0) + else: + raise ValueError("mask must be 3D or 4D.") + + if is_torch: + import torch + return torch.from_numpy(out).to(mask.device) + return out diff --git a/source_code/gliomasam3_moe/src/gliomasam3_moe/utils/seed.py b/source_code/gliomasam3_moe/src/gliomasam3_moe/utils/seed.py new file mode 100644 index 0000000000000000000000000000000000000000..f1677fe151597176583542582fa58959c9f9cea0 --- /dev/null +++ b/source_code/gliomasam3_moe/src/gliomasam3_moe/utils/seed.py @@ -0,0 +1,13 @@ +import random +import numpy as np +import torch + + +def set_seed(seed: int) -> None: + """Set random seeds for reproducibility.""" + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False diff --git a/source_code/gliomasam3_moe/tests/test_loss_backward.py b/source_code/gliomasam3_moe/tests/test_loss_backward.py new file mode 100644 index 0000000000000000000000000000000000000000..9c896a736457e9e0a8d23e79d8f2f508ab95b7bd --- /dev/null +++ b/source_code/gliomasam3_moe/tests/test_loss_backward.py @@ -0,0 +1,25 @@ +import os +import sys + +import torch + +sys.path.append(os.path.join(os.path.dirname(__file__), "..", "src")) + +from gliomasam3_moe.losses.brats_losses import LossComputer + + +def test_loss_backward(): + logits = torch.randn(2, 3, 8, 32, 32, requires_grad=True) + labels = torch.randint(0, 4, (2, 1, 8, 32, 32)) + # map {0,1,2,3} -> {0,1,2,4} + labels = labels.clone() + labels[labels == 3] = 4 + + aux = { + "pi_et": torch.sigmoid(torch.randn(2)), + "moe_gamma": torch.softmax(torch.randn(2, 5), dim=-1), + } + + loss_fn = LossComputer() + loss, _ = loss_fn(logits, aux, labels) + loss.backward() diff --git a/source_code/gliomasam3_moe/tests/test_model_shapes.py b/source_code/gliomasam3_moe/tests/test_model_shapes.py new file mode 100644 index 0000000000000000000000000000000000000000..30e95561e3f8d4048450b8b130ec80a7f5e1f0f4 --- /dev/null +++ b/source_code/gliomasam3_moe/tests/test_model_shapes.py @@ -0,0 +1,31 @@ +import os +import sys + +import torch + +sys.path.append(os.path.join(os.path.dirname(__file__), "..", "src")) + +from gliomasam3_moe.models.gliomasam3_moe import GliomaSAM3_MoE + + +def test_model_shapes(): + model = GliomaSAM3_MoE( + patch_size=16, + token_dim=64, + depth=2, + heads=4, + slice_attn_k=4, + spectral_bins=8, + spectral_q=3, + moe_experts=5, + moe_topk=2, + decoder_hidden=32, + prompt_mlp_hidden=64, + ) + x = torch.randn(2, 4, 16, 128, 128) + logits, aux = model(x) + assert logits.shape == (2, 3, 16, 128, 128) + assert "pi_et" in aux + assert "moe_gamma" in aux + assert "spectral_stats" in aux + assert "et_prob_gated" in aux diff --git a/source_code/gliomasam3_moe/train.py b/source_code/gliomasam3_moe/train.py new file mode 100644 index 0000000000000000000000000000000000000000..64f607fd3ddf3846837f6e06720d065fd0d78b40 --- /dev/null +++ b/source_code/gliomasam3_moe/train.py @@ -0,0 +1,383 @@ +import argparse +import os +import sys +import warnings +from types import SimpleNamespace +from typing import Any, Dict, List, Sequence + +import yaml +import torch +import torch.nn.functional as F +from torch.utils.data import DataLoader +from torch.cuda.amp import autocast, GradScaler +from tqdm import tqdm + +sys.path.append(os.path.join(os.path.dirname(__file__), "src")) + +from gliomasam3_moe.models.gliomasam3_moe import GliomaSAM3_MoE +from gliomasam3_moe.losses.brats_losses import LossComputer +from gliomasam3_moe.data.brats_dataset import BraTSDataset, SyntheticBraTSDataset, SegMambaNPZDataset, split_npz_paths +from gliomasam3_moe.data.transforms_segmamba_like import ( + get_train_transforms, + get_synthetic_transforms, + get_npz_train_transforms, +) +from gliomasam3_moe.utils.brats_regions import label_to_regions +from gliomasam3_moe.utils.postprocess import remove_small_components +from gliomasam3_moe.utils.seed import set_seed + + +def _to_namespace(obj: Any): + if isinstance(obj, dict): + return SimpleNamespace(**{k: _to_namespace(v) for k, v in obj.items()}) + return obj + + +def _to_dict(obj: Any): + if isinstance(obj, SimpleNamespace): + return {k: _to_dict(v) for k, v in obj.__dict__.items()} + return obj + + +def load_config(path: str) -> SimpleNamespace: + with open(path, "r") as f: + cfg = yaml.safe_load(f) + return _to_namespace(cfg) + + +def str2bool(v: str) -> bool: + return str(v).lower() in {"1", "true", "yes", "y"} + + +def fourier_amplitude_mix(x: torch.Tensor, p: float = 0.0) -> torch.Tensor: + """Fourier amplitude mixing (keep phase, mix amplitude within batch).""" + if p <= 0 or x.shape[0] < 2: + return x + b = x.shape[0] + device = x.device + mask = torch.rand(b, device=device) < p + if mask.sum() == 0: + return x + perm = torch.randperm(b, device=device) + x_fft = torch.fft.fftn(x, dim=(2, 3, 4)) + x_fft_perm = x_fft[perm] + amp = torch.abs(x_fft) + amp_perm = torch.abs(x_fft_perm) + phase = torch.angle(x_fft) + lam = torch.rand(b, device=device).view(b, 1, 1, 1, 1) + amp_mix = amp * (1.0 - lam) + amp_perm * lam + x_fft_mix = amp_mix * torch.exp(1j * phase) + x_mix = torch.fft.ifftn(x_fft_mix, dim=(2, 3, 4)).real + x_out = x.clone() + x_out[mask] = x_mix[mask] + return x_out + + +def compute_dice(pred: torch.Tensor, gt: torch.Tensor, eps: float = 1e-5) -> torch.Tensor: + """Compute Dice per-channel for [WT,TC,ET].""" + dims = (0, 2, 3, 4) + inter = (pred * gt).sum(dims) + union = pred.sum(dims) + gt.sum(dims) + dice = (2.0 * inter + eps) / (union + eps) + return dice + + +def _gaussian_weight(roi_size: Sequence[int], device: torch.device) -> torch.Tensor: + sigmas = [s * 0.125 for s in roi_size] + grids = [] + for size, sigma in zip(roi_size, sigmas): + center = (size - 1) / 2.0 + x = torch.arange(size, device=device, dtype=torch.float32) + grids.append(torch.exp(-0.5 * ((x - center) / max(sigma, 1e-6)) ** 2)) + w = grids[0][:, None, None] * grids[1][None, :, None] * grids[2][None, None, :] + w = w / w.max() + return w + + +def sliding_window_inference_3d( + inputs: torch.Tensor, + roi_size: Sequence[int], + overlap: float, + predictor, +) -> torch.Tensor: + """Simple 3D sliding window inference with gaussian weighting. + + Returns blended probabilities (after sigmoid). If the predictor returns + (logits, aux) and aux contains "pi_et", ET probabilities are gated. + """ + b, c, d, h, w = inputs.shape + if b != 1: + raise ValueError("sliding_window_inference_3d expects batch size 1.") + rz, ry, rx = [int(x) for x in roi_size] + pad_d = max(0, rz - d) + pad_h = max(0, ry - h) + pad_w = max(0, rx - w) + if pad_d > 0 or pad_h > 0 or pad_w > 0: + pd0, pd1 = pad_d // 2, pad_d - pad_d // 2 + ph0, ph1 = pad_h // 2, pad_h - pad_h // 2 + pw0, pw1 = pad_w // 2, pad_w - pad_w // 2 + inputs = F.pad(inputs, (pw0, pw1, ph0, ph1, pd0, pd1)) + d, h, w = inputs.shape[-3:] + sz = max(1, int(rz * (1.0 - overlap))) + sy = max(1, int(ry * (1.0 - overlap))) + sx = max(1, int(rx * (1.0 - overlap))) + + def _starts(dim, roi, step): + if dim <= roi: + return [0] + starts = list(range(0, dim - roi + 1, step)) + if starts[-1] != dim - roi: + starts.append(dim - roi) + return starts + + zs = _starts(d, rz, sz) + ys = _starts(h, ry, sy) + xs = _starts(w, rx, sx) + + weight = _gaussian_weight((rz, ry, rx), inputs.device)[None, None] + out = torch.zeros((1, 3, d, h, w), device=inputs.device, dtype=torch.float32) + count = torch.zeros((1, 1, d, h, w), device=inputs.device, dtype=torch.float32) + + for z in zs: + for y in ys: + for x in xs: + patch = inputs[:, :, z : z + rz, y : y + ry, x : x + rx] + pred = predictor(patch) + aux = None + if isinstance(pred, (tuple, list)): + logits = pred[0] + if len(pred) > 1: + aux = pred[1] + else: + logits = pred + probs = torch.sigmoid(logits) + if isinstance(aux, dict) and "pi_et" in aux: + pi_et = aux["pi_et"].view(probs.shape[0], 1, 1, 1, 1) + probs[:, 2:3] = probs[:, 2:3] * pi_et + out[:, :, z : z + rz, y : y + ry, x : x + rx] += probs * weight + count[:, :, z : z + rz, y : y + ry, x : x + rx] += weight + + out = out / count.clamp(min=1e-6) + if pad_d > 0 or pad_h > 0 or pad_w > 0: + out = out[:, :, pd0 : pd0 + (d - pad_d), ph0 : ph0 + (h - pad_h), pw0 : pw0 + (w - pad_w)] + return out + + +def _compute_hd95(pred: torch.Tensor, gt: torch.Tensor, spacing: Sequence[float]) -> List[float]: + try: + from medpy import metric + except Exception: + return [float("nan")] * 3 + + pred_np = pred.detach().cpu().numpy() + gt_np = gt.detach().cpu().numpy() + out = [] + for c in range(3): + p = pred_np[c] > 0 + g = gt_np[c] > 0 + if p.sum() > 0 and g.sum() > 0: + out.append(metric.binary.hd95(p, g, voxelspacing=spacing)) + else: + out.append(50.0) + return out + + +@torch.no_grad() +def evaluate_test( + model: torch.nn.Module, + loader: DataLoader, + cfg, + device: torch.device, +) -> Dict[str, List[float]]: + model.eval() + roi = getattr(cfg.infer, "roi_size", cfg.data.crop_size) + overlap = float(getattr(cfg.infer, "overlap", 0.5)) + threshold = float(getattr(cfg.infer, "threshold", 0.5)) + et_min = int(getattr(cfg.infer, "et_cc_min_size", 0)) + spacing = getattr(cfg.data, "spacing", [1.0, 1.0, 1.0]) + + dice_all: List[List[float]] = [] + hd95_all: List[List[float]] = [] + + max_cases = int(getattr(cfg.train, "test_max_cases", 0)) + for idx, batch in enumerate(loader): + if max_cases and idx >= max_cases: + break + image = batch["image"].to(device) + label = batch["label"].to(device) + + probs = sliding_window_inference_3d( + inputs=image, + roi_size=roi, + overlap=overlap, + predictor=lambda x: model(x), + ) + pred = (probs > threshold).float() + if et_min > 0: + pred[:, 2] = remove_small_components(pred[:, 2], et_min) + + gt = label_to_regions(label) + dice = compute_dice(pred, gt).detach().cpu().tolist() + hd95 = _compute_hd95(pred[0], gt[0], spacing) + dice_all.append(dice) + hd95_all.append(hd95) + + mean_dice = torch.tensor(dice_all).mean(dim=0).tolist() + mean_hd95 = torch.tensor(hd95_all).mean(dim=0).tolist() + return {"dice": mean_dice, "hd95": mean_hd95} + + +def save_checkpoint(path: str, model: torch.nn.Module, optimizer: torch.optim.Optimizer, cfg: SimpleNamespace, step: int): + os.makedirs(os.path.dirname(path), exist_ok=True) + torch.save( + { + "model": model.state_dict(), + "optimizer": optimizer.state_dict(), + "config": _to_dict(cfg), + "step": step, + }, + path, + ) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--config", type=str, default="configs/debug.yaml") + parser.add_argument("--synthetic", type=str, default=None, help="Override synthetic flag.") + args = parser.parse_args() + + cfg = load_config(args.config) + if args.synthetic is not None: + cfg.synthetic = str2bool(args.synthetic) + + # Clean up noisy warnings in training logs. + warnings.filterwarnings("ignore", category=FutureWarning, message=".*GradScaler.*") + warnings.filterwarnings("ignore", category=FutureWarning, message=".*autocast.*") + warnings.filterwarnings("ignore", category=UserWarning, message="The given NumPy array is not writable.*") + + set_seed(cfg.seed) + device = torch.device(cfg.device if torch.cuda.is_available() else "cpu") + + if cfg.synthetic: + transforms = get_synthetic_transforms(cfg) + dataset = SyntheticBraTSDataset( + num_cases=cfg.data.synthetic_cases, + shape=cfg.data.synthetic_shape, + transforms=transforms, + seed=cfg.seed, + ) + else: + data_format = getattr(cfg.data, "format", "nifti") + if data_format == "segmamba_npz": + train_rate = getattr(cfg.data, "train_rate", 0.7) + val_rate = getattr(cfg.data, "val_rate", 0.1) + test_rate = getattr(cfg.data, "test_rate", 0.2) + train_paths, _, _ = split_npz_paths( + cfg.data.root_dir, train_rate=train_rate, val_rate=val_rate, test_rate=test_rate, seed=cfg.seed + ) + ensure_npy = bool(getattr(cfg.data, "segmamba_unpack", True)) + transforms = get_npz_train_transforms(cfg) + dataset = SegMambaNPZDataset( + data_dir=cfg.data.root_dir, + npz_paths=train_paths, + test=False, + ensure_npy=ensure_npy, + map_et_to_4=True, + transforms=transforms, + ) + loader = DataLoader( + dataset, + batch_size=cfg.data.batch_size, + shuffle=True, + num_workers=cfg.train.num_workers, + ) + + test_loader = None + if not cfg.synthetic and getattr(cfg.train, "test_every_epochs", 0): + data_format = getattr(cfg.data, "format", "nifti") + if data_format == "segmamba_npz": + train_rate = getattr(cfg.data, "train_rate", 0.7) + val_rate = getattr(cfg.data, "val_rate", 0.1) + test_rate = getattr(cfg.data, "test_rate", 0.2) + _, _, test_paths = split_npz_paths( + cfg.data.root_dir, train_rate=train_rate, val_rate=val_rate, test_rate=test_rate, seed=cfg.seed + ) + ensure_npy = bool(getattr(cfg.data, "segmamba_unpack", True)) + test_ds = SegMambaNPZDataset( + data_dir=cfg.data.root_dir, + npz_paths=test_paths, + test=False, + ensure_npy=ensure_npy, + map_et_to_4=True, + ) + test_loader = DataLoader( + test_ds, + batch_size=1, + shuffle=False, + num_workers=max(0, int(cfg.train.num_workers)), + ) + else: + print("[WARN] test_every_epochs is set but only segmamba_npz is supported; skipping test.") + + model = GliomaSAM3_MoE(**cfg.model.__dict__).to(device) + loss_fn = LossComputer(**cfg.loss.__dict__).to(device) + optimizer = torch.optim.AdamW(model.parameters(), lr=cfg.train.lr, weight_decay=cfg.train.weight_decay) + scaler = GradScaler(enabled=bool(cfg.amp and device.type == "cuda")) + + model.train() + step = 0 + for epoch in range(cfg.train.epochs): + pbar = tqdm( + loader, + desc=f"Epoch {epoch + 1}/{cfg.train.epochs}", + leave=False, + dynamic_ncols=True, + ) + for batch in pbar: + step += 1 + image = batch["image"].to(device) + label = batch["label"].to(device) + image = fourier_amplitude_mix(image, p=cfg.train.fourier_mix_prob) + + optimizer.zero_grad(set_to_none=True) + with autocast(enabled=bool(cfg.amp and device.type == "cuda")): + logits, aux = model(image, label=label if cfg.train.use_label_prompt else None) + loss, logs = loss_fn(logits, aux, label) + scaler.scale(loss).backward() + scaler.step(optimizer) + scaler.update() + + if step % cfg.train.log_every == 0: + with torch.no_grad(): + gt = label_to_regions(label) + pred = (torch.sigmoid(logits) > 0.5).float() + dice = compute_dice(pred, gt).detach().cpu().numpy().tolist() + pbar.set_postfix( + { + "step": step, + "loss": f"{logs['loss_total']:.4f}", + "dice": [f"{d:.3f}" for d in dice], + } + ) + + if step % cfg.train.save_every == 0: + ckpt_path = os.path.join(cfg.train.ckpt_dir, f"ckpt_step{step}.pt") + save_checkpoint(ckpt_path, model, optimizer, cfg, step) + + if step >= cfg.train.max_steps: + break + test_every = int(getattr(cfg.train, "test_every_epochs", 0)) + if test_loader is not None and test_every > 0 and (epoch + 1) % test_every == 0: + metrics = evaluate_test(model, test_loader, cfg, device) + print( + f"[TEST] epoch={epoch + 1} dice[WT,TC,ET]={metrics['dice']} " + f"hd95[WT,TC,ET]={metrics['hd95']}" + ) + model.train() + + if step >= cfg.train.max_steps: + break + + +if __name__ == "__main__": + main() diff --git a/source_code/gliomasam3_moe/visualizations/README.md b/source_code/gliomasam3_moe/visualizations/README.md new file mode 100644 index 0000000000000000000000000000000000000000..de332421e1e1aaa62297b70bcd7527e6b9d8b2f2 --- /dev/null +++ b/source_code/gliomasam3_moe/visualizations/README.md @@ -0,0 +1,34 @@ +# Visualization Suite (no code changes) + +This folder adds **standalone** visualization scripts. It does **not** modify any existing training/inference code. + +## Quick start + +1. Prepare prediction folders for each method (ours + baselines). + Supported formats (per-case): + - `*_label.nii.gz` (label map, values {0,1,2,4}) + - `*_regions_prob.nii.gz` (probabilities for WT/TC/ET) + - `*_regions_bin.nii.gz` (binary WT/TC/ET) + - `{case_id}.nii.gz` (SegMamba 3-channel) + +2. Copy and edit the config: + ```bash + cp visualizations/vis_config_example.yaml visualizations/vis_config.yaml + ``` + +3. Run (full suite): + ```bash + python visualizations/vis_suite.py --config visualizations/vis_config.yaml --checkpoint /path/to/ckpt.pt --run all + ``` + +4. Run a subset (comma-separated): + ```bash + python visualizations/vis_suite.py --config visualizations/vis_config.yaml --checkpoint /path/to/ckpt.pt --run qualitative,et_absent,boundary + ``` + +## Notes + +- Model-dependent visualizations (`et_absent`, `moe`, `dual_domain`, `ampmix`) require `--checkpoint`. +- Aux outputs (pi_ET, MoE routing, spectral outputs) are cached to `predictions.aux_dir` when provided. +- Output figures are saved to `visualization.output_dir` (default: `visualizations/outputs`). +- If `matplotlib` is missing, install it in your environment. diff --git a/source_code/gliomasam3_moe/visualizations/find_best_cases.py b/source_code/gliomasam3_moe/visualizations/find_best_cases.py new file mode 100644 index 0000000000000000000000000000000000000000..4c079bda833320f16cf19259931932a04d9ee1d4 --- /dev/null +++ b/source_code/gliomasam3_moe/visualizations/find_best_cases.py @@ -0,0 +1,103 @@ +""" +Script to find best cases for different visualizations. +""" +import os +import sys +import glob +import numpy as np +import torch + +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "src")) + +def main(): + # Setup + ckpt_path = "/root/githubs/gliomasam3_moe/logs/segmamba/model/ckpt_step600.pt" + data_path = "/data/yty/brats23_segmamba_processed" + + # Get all cases + npz_files = sorted(glob.glob(os.path.join(data_path, "*.npz"))) + case_ids = [os.path.basename(f).replace(".npz", "") for f in npz_files] + + print(f"Found {len(case_ids)} cases") + + # Use ModelRunner from vis_publication + from vis_publication import ModelRunner + import yaml + + with open("visualizations/vis_config.yaml", "r") as f: + vis_cfg = yaml.safe_load(f) + + runner = ModelRunner(vis_cfg, "configs/train.yaml", ckpt_path, "cuda") + model = runner.model + + print("Model loaded") + + # Scan cases for best examples + et_absent_candidates = [] + moe_routing_candidates = [] + + test_cases = case_ids[:50] # Test first 50 + + for case_id in test_cases: + try: + # Load data using runner + img_t, _ = runner.load_case_tensor(case_id) + + # Run model + with torch.no_grad(): + out = runner.forward_intermediate(img_t) + + pi_et = float(out["pi_et"].cpu().numpy().reshape(-1)[0]) + et_pre = out["et_pre"].cpu().numpy()[0, 0] + et_post = out["et_post"].cpu().numpy()[0, 0] + moe_gamma = out["moe_gamma"].cpu().numpy()[0] # [3, n_experts] + + # Check ET-absent: want low pi_et AND difference between pre/post + pre_sum = (et_pre > 0.5).sum() + post_sum = (et_post > 0.5).sum() + diff_ratio = abs(pre_sum - post_sum) / max(pre_sum, 1) + + et_absent_candidates.append({ + "case_id": case_id, + "pi_et": pi_et, + "pre_sum": int(pre_sum), + "post_sum": int(post_sum), + "diff_ratio": diff_ratio, + "score": (1 - pi_et) * diff_ratio # High score = low pi_et + big diff + }) + + # Check MoE routing: want non-zero and diverse weights + gamma_mean = moe_gamma.mean(axis=0) # Average over regions + nonzero_count = (gamma_mean > 0.05).sum() + gamma_std = gamma_mean.std() + + moe_routing_candidates.append({ + "case_id": case_id, + "gamma_mean": gamma_mean.tolist(), + "nonzero_count": int(nonzero_count), + "gamma_std": float(gamma_std), + "score": nonzero_count * gamma_std # High score = diverse weights + }) + + print(f"{case_id}: pi_et={pi_et:.3f}, pre={pre_sum}, post={post_sum}, diff={diff_ratio:.2f}, moe_nonzero={nonzero_count}") + + except Exception as e: + print(f"Error on {case_id}: {e}") + continue + + # Sort and report + print("\n" + "="*60) + print("Best ET-absent candidates (high score = low pi_et + big diff):") + et_absent_candidates.sort(key=lambda x: x["score"], reverse=True) + for c in et_absent_candidates[:10]: + print(f" {c['case_id']}: pi_et={c['pi_et']:.3f}, pre={c['pre_sum']}, post={c['post_sum']}, score={c['score']:.3f}") + + print("\n" + "="*60) + print("Best MoE routing candidates (high score = diverse weights):") + moe_routing_candidates.sort(key=lambda x: x["score"], reverse=True) + for c in moe_routing_candidates[:10]: + print(f" {c['case_id']}: nonzero={c['nonzero_count']}, std={c['gamma_std']:.3f}, weights={[f'{w:.2f}' for w in c['gamma_mean']]}") + +if __name__ == "__main__": + main() diff --git a/source_code/gliomasam3_moe/visualizations/run_inference.py b/source_code/gliomasam3_moe/visualizations/run_inference.py new file mode 100644 index 0000000000000000000000000000000000000000..e18fde80aec88f74180e63954e5b729199c35522 --- /dev/null +++ b/source_code/gliomasam3_moe/visualizations/run_inference.py @@ -0,0 +1,138 @@ +"""Standalone inference script for visualization (does not modify original code).""" +import argparse +import os +import sys +from types import SimpleNamespace +from typing import Any + +import yaml +import numpy as np +import torch +import nibabel as nib +from torch.utils.data import DataLoader + +os.environ.setdefault("MONAI_SKIP_SUBMODULES", "1") +from monai.inferers import sliding_window_inference + +ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +SRC_DIR = os.path.join(ROOT_DIR, "src") +if SRC_DIR not in sys.path: + sys.path.insert(0, SRC_DIR) + +from gliomasam3_moe.models.gliomasam3_moe import GliomaSAM3_MoE +from gliomasam3_moe.data.brats_dataset import SegMambaNPZDataset +from gliomasam3_moe.utils.brats_regions import regions_to_label +from gliomasam3_moe.utils.postprocess import remove_small_components + + +def _to_namespace(obj: Any): + if isinstance(obj, dict): + return SimpleNamespace(**{k: _to_namespace(v) for k, v in obj.items()}) + return obj + + +def load_config(path: str) -> SimpleNamespace: + with open(path, "r") as f: + cfg = yaml.safe_load(f) + return _to_namespace(cfg) + + +def save_nifti(path: str, arr: np.ndarray, affine: np.ndarray): + img = nib.Nifti1Image(arr, affine) + nib.save(img, path) + + +def save_segmamba_3c(path: str, arr_3c: np.ndarray, affine: np.ndarray | None = None): + if affine is None: + affine = np.eye(4) + if arr_3c.ndim != 4 or arr_3c.shape[0] != 3: + raise ValueError(f"expected (3,D,H,W), got {arr_3c.shape}") + arr = arr_3c.transpose(1, 2, 3, 0) + save_nifti(path, arr.astype(np.uint8), affine) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--config", type=str, default=os.path.join(ROOT_DIR, "configs/train.yaml")) + parser.add_argument("--input", type=str, required=True) + parser.add_argument("--output", type=str, required=True) + parser.add_argument("--checkpoint", type=str, required=True) + parser.add_argument("--cases", type=str, default="", help="Comma-separated case IDs (optional)") + args = parser.parse_args() + + cfg = load_config(args.config) + device = torch.device(cfg.device if torch.cuda.is_available() else "cpu") + + model = GliomaSAM3_MoE(**cfg.model.__dict__).to(device) + ckpt = torch.load(args.checkpoint, map_location="cpu") + # Filter out freqs_cis which is dynamically computed and may have shape mismatch + state_dict = {k: v for k, v in ckpt["model"].items() if "freqs_cis" not in k} + missing, unexpected = model.load_state_dict(state_dict, strict=False) + if missing: + non_freqs = [k for k in missing if "freqs_cis" not in k] + if non_freqs: + print(f"Missing keys (non-freqs_cis): {non_freqs}") + model.eval() + + input_path = args.input + if not os.path.isdir(input_path): + raise ValueError("Input must be a directory containing *.npz files.") + + if args.cases: + case_ids = [c.strip() for c in args.cases.split(",")] + npz_paths = [os.path.join(input_path, f"{c}.npz") for c in case_ids] + npz_paths = [p for p in npz_paths if os.path.isfile(p)] + else: + npz_paths = None + + dataset = SegMambaNPZDataset( + data_dir=input_path, + npz_paths=npz_paths, + test=True, + ensure_npy=True, + map_et_to_4=True, + ) + loader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=0) + + os.makedirs(args.output, exist_ok=True) + with torch.no_grad(): + for batch in loader: + image = batch["image"].to(device) + case_id = batch["case_id"][0] if isinstance(batch["case_id"], (list, tuple)) else batch["case_id"] + print(f"Processing {case_id}...") + + logits = sliding_window_inference( + inputs=image, + roi_size=tuple(cfg.infer.roi_size), + sw_batch_size=cfg.infer.sw_batch_size, + predictor=lambda x: model(x)[0], + overlap=cfg.infer.overlap, + ) + _, aux = model(image) + probs = torch.sigmoid(logits) + pi_et = aux["pi_et"].view(probs.shape[0], 1, 1, 1, 1) + probs[:, 2:3] = probs[:, 2:3] * pi_et + regions_bin = (probs > cfg.infer.threshold).float() + + et_pp = remove_small_components(regions_bin[:, 2], cfg.infer.et_cc_min_size) + regions_bin[:, 2] = et_pp + + label_map = regions_to_label(regions_bin) + + affine = np.eye(4) + prob_np = probs[0].detach().cpu().numpy().transpose(1, 2, 3, 0) + bin_np = regions_bin[0].detach().cpu().numpy().transpose(1, 2, 3, 0) + lbl_np = label_map[0, 0].detach().cpu().numpy().astype(np.int16) + + save_nifti(os.path.join(args.output, f"{case_id}_regions_prob.nii.gz"), prob_np, affine) + save_nifti(os.path.join(args.output, f"{case_id}_regions_bin.nii.gz"), bin_np, affine) + save_nifti(os.path.join(args.output, f"{case_id}_label.nii.gz"), lbl_np, affine) + + seg_arr = regions_bin[0].detach().cpu().numpy().astype(np.uint8) + save_segmamba_3c(os.path.join(args.output, f"{case_id}.nii.gz"), seg_arr, affine) + + print(f" Saved: {case_id}") + + +if __name__ == "__main__": + main() diff --git a/source_code/gliomasam3_moe/visualizations/vis_config.yaml b/source_code/gliomasam3_moe/visualizations/vis_config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d1e06455ae411c4ecb8f2d07422940bb2ff953ed --- /dev/null +++ b/source_code/gliomasam3_moe/visualizations/vis_config.yaml @@ -0,0 +1,90 @@ +# Visualization config for gliomasam3_moe +# Output: vis_res/ (each visualization type in a separate subfolder) + +data: + format: "segmamba_npz" + root_dir: "/data/yty/brats23_segmamba_processed" + npz_dir: "/data/yty/brats23_segmamba_processed" + modalities: ["t1n", "t1c", "t2f", "t2w"] + seg_name: "seg" + +predictions: + ours: + name: "GliomaSAM3-MoE" + dir: "/root/githubs/gliomasam3_moe/vis_res/predictions_ours" + type: "label" + baselines: + - name: "SegMamba" + dir: "/root/githubs/SegMamba/prediction_results/segmamba_brats23_ep799" + type: "segmamba_3c" + aux_dir: "/root/githubs/gliomasam3_moe/vis_res/aux_cache" + +visualization: + output_dir: "/root/githubs/gliomasam3_moe/vis_res" + overlay_modality: "t1c" + alpha: 0.45 + colors: + WT: [1.0, 0.85, 0.0] + TC: [0.0, 1.0, 0.25] + ET: [1.0, 0.0, 0.0] + boundary_region: "ET" + frag_bins: [1, 3, 5] + scale_bins: [50, 200, 500] + +# Case selection +cases: + # 1) Main qualitative comparison (Ours vs SegMamba) + qualitative: + - "BraTS-GLI-00005-000" + - "BraTS-GLI-00006-000" + - "BraTS-GLI-00017-000" + + # 2) ET-absent case study (best candidate with highest pre/post diff) + et_absent: + - "BraTS-GLI-00012-000" + + # 3) Boundary-focused visualization + boundary: + - "BraTS-GLI-00005-000" + - "BraTS-GLI-00017-000" + + # 4) Tiny/fragmented ET cases + tiny_et: + - "BraTS-GLI-00005-000" + - "BraTS-GLI-00006-000" + + # 5) Cross-year robustness (placeholder - needs cross-year data) + cross_year: {} + + # 6) MoE routing interpretability + moe: + - "BraTS-GLI-00018-000" + - "BraTS-GLI-00012-000" + + # 7) Concept tokens interpretability + concept_tokens: + - "BraTS-GLI-00005-000" + - "BraTS-GLI-00006-000" + + # 8) Dual-domain enhancement effect + dual_domain: + - "BraTS-GLI-00005-000" + - "BraTS-GLI-00017-000" + + # 9) Augmentation robustness (AmpMix) + ampmix: + - base: "BraTS-GLI-00005-000" + mix: "BraTS-GLI-00006-000" + lam: 0.5 + + # 10) Failure cases / limitations + failure: + - "BraTS-GLI-00020-000" + failure_notes: + "BraTS-GLI-00020-000": "potential boundary ambiguity" + +# 11) Efficiency / inference visualization +efficiency: + case_id: "BraTS-GLI-00005-000" + roi_size: [128, 128, 128] + overlap: 0.5 diff --git a/source_code/gliomasam3_moe/visualizations/vis_config_example.yaml b/source_code/gliomasam3_moe/visualizations/vis_config_example.yaml new file mode 100644 index 0000000000000000000000000000000000000000..435394c7de014e837d73659d2ab3d1ba67fe4440 --- /dev/null +++ b/source_code/gliomasam3_moe/visualizations/vis_config_example.yaml @@ -0,0 +1,70 @@ +data: + format: "segmamba_npz" # or "nifti" + root_dir: "/data/your/brats/fullres/train" + npz_dir: "/data/your/brats/fullres/train" + modalities: ["t1n", "t1c", "t2f", "t2w"] + seg_name: "seg" + +predictions: + ours: + name: "GliomaSAM3-MoE" + dir: "/path/to/ours/predictions" + type: "label" # label | regions_prob | regions_bin | segmamba_3c | auto + baselines: + - name: "Baseline-A" + dir: "/path/to/baseline_a/pred" + type: "label" + - name: "Baseline-B" + dir: "/path/to/baseline_b/pred" + type: "label" + - name: "Baseline-C" + dir: "/path/to/baseline_c/pred" + type: "label" + aux_dir: "/path/to/aux_cache" + +visualization: + output_dir: "/path/to/vis_outputs" + overlay_modality: "t1c" + alpha: 0.45 + colors: + WT: [1.0, 0.85, 0.0] + TC: [0.0, 1.0, 0.25] + ET: [1.0, 0.0, 0.0] + boundary_region: "ET" + frag_bins: [1, 3, 5] + scale_bins: [50, 200, 500] + +cases: + qualitative: ["BraTS-GLI-00001", "BraTS-GLI-00002"] + et_absent: ["BraTS-GLI-00010"] + boundary: ["BraTS-GLI-00003"] + tiny_et: ["BraTS-GLI-00004", "BraTS-GLI-00005"] + moe: ["BraTS-GLI-00001", "BraTS-GLI-00002"] + concept_tokens: ["BraTS-GLI-00001"] + dual_domain: ["BraTS-GLI-00001"] + ampmix: + - base: "BraTS-GLI-00001" + mix: "BraTS-GLI-00002" + lam: 0.5 + cross_year: + 2021_to_2023: + method: + name: "Ours-2021" + dir: "/path/to/ours_2021/pred" + type: "label" + cases: ["BraTS-GLI-2023-00001"] + 2023_to_2021: + method: + name: "Ours-2023" + dir: "/path/to/ours_2023/pred" + type: "label" + cases: ["BraTS-GLI-2021-00001"] + failure: ["BraTS-GLI-00006", "BraTS-GLI-00007"] + failure_notes: + "BraTS-GLI-00006": "low contrast" + "BraTS-GLI-00007": "motion artifacts / missing modality" + +efficiency: + case_id: "BraTS-GLI-00001" + roi_size: [128, 128, 128] + overlap: 0.5 diff --git a/source_code/gliomasam3_moe/visualizations/vis_method_comparison.py b/source_code/gliomasam3_moe/visualizations/vis_method_comparison.py new file mode 100644 index 0000000000000000000000000000000000000000..ae30aaf43ac46a88efa658567249615479404622 --- /dev/null +++ b/source_code/gliomasam3_moe/visualizations/vis_method_comparison.py @@ -0,0 +1,676 @@ +""" +Method comparison visualization for GliomaSAM3-MoE vs SegMamba. +Generates separate images for: +- Original input (4 modalities) +- Ground truth +- Predictions from different checkpoints + +Usage: + cd /root/githubs/gliomasam3_moe + PYTHONPATH=/root/githubs/sam3:$PYTHONPATH python visualizations/vis_method_comparison.py +""" + +import argparse +import os +import sys +from typing import Dict, List, Optional, Tuple + +import numpy as np +import torch +import yaml + +import matplotlib +matplotlib.use("Agg") +import matplotlib.pyplot as plt +from matplotlib.colors import ListedColormap +from scipy.ndimage import zoom + +# ============================================================================ +# Global Style Configuration +# ============================================================================ +STYLE = { + "dpi": 300, + "font_size": 12, + "color_WT": "#00BBD4", # cyan + "color_TC": "#D81B60", # magenta + "color_ET": "#FBC02D", # yellow + "alpha_mask": 0.45, +} + +def hex_to_rgb(hex_color: str) -> Tuple[float, float, float]: + h = hex_color.lstrip("#") + return tuple(int(h[i:i+2], 16) / 255.0 for i in (0, 2, 4)) + +COLORS = { + "WT": hex_to_rgb(STYLE["color_WT"]), + "TC": hex_to_rgb(STYLE["color_TC"]), + "ET": hex_to_rgb(STYLE["color_ET"]), +} + +# ============================================================================ +# Add project paths +# ============================================================================ +ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +SRC_DIR = os.path.join(ROOT_DIR, "src") +SEGMAMBA_DIR = "/root/githubs/SegMamba" + +if SRC_DIR not in sys.path: + sys.path.insert(0, SRC_DIR) +if SEGMAMBA_DIR not in sys.path: + sys.path.insert(0, SEGMAMBA_DIR) + +from scipy import ndimage as ndi + +# ============================================================================ +# Configuration +# ============================================================================ +CONFIG = { + # Data + "data_dir": "/data/yty/brats23_segmamba_processed", + "modalities": ["t1n", "t1c", "t2f", "t2w"], + "modality_names": ["T1", "T1ce", "FLAIR", "T2"], + + # Selected cases (6 cases) + "cases": [ + "BraTS-GLI-00005-000", + "BraTS-GLI-00006-000", + "BraTS-GLI-00012-000", + "BraTS-GLI-00017-000", + "BraTS-GLI-00018-000", + "BraTS-GLI-00020-000", + ], + + # GliomaSAM3-MoE checkpoints (3 paths) + "gliomasam_ckpts": [ + "/root/githubs/gliomasam3_moe/logs/segmamba/model/ckpt_step2000.pt", + "/root/githubs/gliomasam3_moe/logs/segmamba/model/ckpt_step2600.pt", + "/root/githubs/gliomasam3_moe/logs/segmamba/model/ckpt_step3000.pt", + ], + "gliomasam_names": ["step2000", "step2600", "step3000"], + + # SegMamba pre-generated predictions (use existing prediction directories) + # Since SegMamba has CUDA compatibility issues, we use pre-generated results + "segmamba_pred_dirs": [ + "/root/githubs/SegMamba/prediction_results/segmamba_brats23", + "/root/githubs/SegMamba/prediction_results/segmamba_brats23_ep799", + ], + "segmamba_names": ["segmamba_default", "segmamba_ep799"], + + # Original SegMamba checkpoints (for reference, currently unused due to CUDA issues) + "segmamba_ckpts": [ + "/root/githubs/SegMamba/logs/segmamba_brats23/model/tmp_model_ep299_0.8274.pt", + "/root/githubs/SegMamba/logs/segmamba_brats23/model/tmp_model_ep599_0.8295.pt", + "/root/githubs/SegMamba/logs/segmamba_brats23/model/tmp_model_ep799_0.8498.pt", + ], + + # Model configs + "gliomasam_config": "/root/githubs/gliomasam3_moe/configs/train.yaml", + + # Output + "output_dir": "/root/githubs/gliomasam3_moe/vis_res/method_comparison", +} + +# ============================================================================ +# Utility Functions +# ============================================================================ +def ensure_dir(path: str) -> None: + os.makedirs(path, exist_ok=True) + +def load_yaml(path: str) -> Dict: + with open(path, "r") as f: + return yaml.safe_load(f) + +def normalize_volume(vol: np.ndarray, eps: float = 1e-6) -> np.ndarray: + """Normalize volume to [0, 1] using percentile clipping.""" + x = np.asarray(vol, dtype=np.float32) + x = np.nan_to_num(x, nan=0.0, posinf=0.0, neginf=0.0) + flat = x.reshape(-1) + if flat.size == 0: + return np.zeros_like(x, dtype=np.float32) + lo, hi = np.percentile(flat, [1, 99]) + if hi - lo < eps: + return np.zeros_like(x, dtype=np.float32) + x = np.clip(x, lo, hi) + x = (x - lo) / (hi - lo + eps) + return x + +def label_to_regions(label: np.ndarray) -> np.ndarray: + """Convert BraTS label to [WT, TC, ET] regions.""" + label = np.asarray(label) + wt = label > 0 + tc = (label == 1) | (label == 4) + et = label == 4 + return np.stack([wt, tc, et], axis=0).astype(np.uint8) + +def regions_to_label(regions: np.ndarray) -> np.ndarray: + """Convert [WT, TC, ET] regions back to BraTS label.""" + if regions.ndim == 4 and regions.shape[0] == 3: + wt = regions[0] > 0.5 + tc = regions[1] > 0.5 + et = regions[2] > 0.5 + elif regions.ndim == 3: + # Assume it's already a single-channel label + return regions.astype(np.int16) + else: + raise ValueError(f"Invalid regions shape: {regions.shape}") + + label = np.zeros_like(wt, dtype=np.int16) + label[wt] = 2 # Whole tumor - edema + label[tc] = 1 # Tumor core - necrotic + label[et] = 4 # Enhanced tumor + return label + +def extract_slice(vol: np.ndarray, plane: str, idx: int) -> np.ndarray: + """Extract 2D slice from 3D volume.""" + if plane == "axial": + img = vol[idx, :, :] + elif plane == "coronal": + img = vol[:, idx, :] + elif plane == "sagittal": + img = vol[:, :, idx] + else: + raise ValueError(f"Unknown plane: {plane}") + return np.rot90(img) + +def select_best_slice(mask: np.ndarray) -> Dict[str, int]: + """Select slice with maximum tumor content.""" + if mask is None or mask.sum() == 0: + return {"axial": mask.shape[0] // 2 if mask is not None else 64} + m = mask.astype(np.uint8) + axial = int(np.argmax(m.sum(axis=(1, 2)))) + return {"axial": axial} + +def mask_boundary(mask2d: np.ndarray, iterations: int = 1) -> np.ndarray: + """Extract boundary of a binary mask.""" + if mask2d.sum() == 0: + return mask2d.astype(bool) + eroded = ndi.binary_erosion(mask2d.astype(bool), iterations=iterations) + return np.logical_xor(mask2d.astype(bool), eroded) + +def overlay_masks_publication( + base2d: np.ndarray, + masks: Dict[str, np.ndarray], + alpha: float = STYLE["alpha_mask"], + draw_boundary: bool = True, + boundary_width: int = 2, +) -> np.ndarray: + """Overlay masks with publication-quality colors and boundaries.""" + base = np.clip(base2d, 0.0, 1.0) + rgb = np.stack([base, base, base], axis=-1).astype(np.float32) + + # Draw order: WT -> TC -> ET (ET on top) + order = ["WT", "TC", "ET"] + for key in order: + if key not in masks: + continue + m = masks[key].astype(bool) + if m.shape != base.shape: + zoom_factors = (base.shape[0] / m.shape[0], base.shape[1] / m.shape[1]) + m = zoom(m.astype(float), zoom_factors, order=0) > 0.5 + if m.sum() == 0: + continue + color = np.array(COLORS.get(key, (1.0, 0.0, 0.0)), dtype=np.float32) + rgb[m] = (1.0 - alpha) * rgb[m] + alpha * color + + if draw_boundary: + b = mask_boundary(m, iterations=boundary_width) + rgb[b] = color + + return np.clip(rgb, 0, 1) + +# ============================================================================ +# Data Loading +# ============================================================================ +def load_case(data_dir: str, case_id: str) -> Dict: + """Load a single case from the segmamba processed data.""" + npz_path = os.path.join(data_dir, case_id + ".npz") + npy_path = os.path.join(data_dir, case_id + ".npy") + seg_path = os.path.join(data_dir, case_id + "_seg.npy") + + # Load image + if os.path.isfile(npy_path): + image = np.load(npy_path, mmap_mode="r") + else: + data = np.load(npz_path) + image = data["data"] + + image = np.asarray(image, dtype=np.float32) + if image.ndim == 5 and image.shape[0] == 1: + image = image[0] + if image.ndim == 4 and image.shape[0] != 4 and image.shape[-1] == 4: + image = image.transpose(3, 0, 1, 2) + + # Load label + if os.path.isfile(seg_path): + label = np.load(seg_path, mmap_mode="r") + else: + data = np.load(npz_path) + label = data["seg"] if "seg" in data else None + + if label is not None: + label = np.asarray(label, dtype=np.int16) + if label.ndim == 4 and label.shape[0] == 1: + label = label[0] + # Map ET label 3 -> 4 if needed + if label.max() == 3 and (label == 4).sum() == 0: + label = label.copy() + label[label == 3] = 4 + + return {"image": image, "label": label} + +# ============================================================================ +# Model Inference +# ============================================================================ +class GliomaSAMPredictor: + """Predictor for GliomaSAM3-MoE model.""" + + def __init__(self, config_path: str, device: str = "cuda"): + self.device = torch.device(device if torch.cuda.is_available() else "cpu") + self.cfg = load_yaml(config_path) + self.model = None + self.current_ckpt = None + + def load_checkpoint(self, ckpt_path: str): + """Load model checkpoint.""" + if self.current_ckpt == ckpt_path: + return + + from gliomasam3_moe.models.gliomasam3_moe import GliomaSAM3_MoE + + if self.model is None: + self.model = GliomaSAM3_MoE(**self.cfg["model"]).to(self.device) + + ckpt = torch.load(ckpt_path, map_location="cpu") + state_dict = {k: v for k, v in ckpt["model"].items() if "freqs_cis" not in k} + self.model.load_state_dict(state_dict, strict=False) + self.model.eval() + self.current_ckpt = ckpt_path + print(f" Loaded GliomaSAM checkpoint: {os.path.basename(ckpt_path)}") + + def predict(self, image: np.ndarray) -> np.ndarray: + """Run inference on a single case.""" + # Prepare input tensor + if image.ndim == 4: + x = torch.from_numpy(image).float().unsqueeze(0) # (1, C, D, H, W) + else: + raise ValueError(f"Invalid image shape: {image.shape}") + + x = x.to(self.device) + + with torch.no_grad(): + logits, aux = self.model(x) + probs = torch.sigmoid(logits) + + # Apply ET gating + pi_et = aux["pi_et"].view(probs.shape[0], 1, 1, 1, 1) + probs[:, 2:3] = probs[:, 2:3] * pi_et + + # Binary prediction + regions_bin = (probs > 0.5).float() + + return regions_bin[0].cpu().numpy() # (3, D, H, W) + + +class SegMambaPredictor: + """Predictor for SegMamba model.""" + + def __init__(self, device: str = "cuda"): + self.device = torch.device(device if torch.cuda.is_available() else "cpu") + self.model = None + self.current_ckpt = None + + def load_checkpoint(self, ckpt_path: str): + """Load model checkpoint.""" + if self.current_ckpt == ckpt_path: + return + + from model_segmamba.segmamba import SegMamba + + if self.model is None: + self.model = SegMamba( + in_chans=4, + out_chans=4, + depths=[2, 2, 2, 2], + feat_size=[48, 96, 192, 384], + ).to(self.device) + + ckpt = torch.load(ckpt_path, map_location="cpu") + # Handle different checkpoint formats + if "model" in ckpt: + state_dict = ckpt["model"] + elif "state_dict" in ckpt: + state_dict = ckpt["state_dict"] + else: + state_dict = ckpt + + self.model.load_state_dict(state_dict, strict=True) + self.model.eval() + self.current_ckpt = ckpt_path + print(f" Loaded SegMamba checkpoint: {os.path.basename(ckpt_path)}") + + def predict(self, image: np.ndarray) -> np.ndarray: + """Run inference on a single case.""" + # Prepare input tensor + if image.ndim == 4: + x = torch.from_numpy(image).float().unsqueeze(0) # (1, C, D, H, W) + else: + raise ValueError(f"Invalid image shape: {image.shape}") + + x = x.to(self.device) + + with torch.no_grad(): + logits = self.model(x) # (1, 4, D, H, W) + pred_lbl = logits.argmax(dim=1) # (1, D, H, W) + + # Convert to regions [TC, WT, ET] + # SegMamba labels: 0=background, 1=NCR/NET, 2=ED, 3=ET + labels = pred_lbl[0].cpu().numpy() + tc = (labels == 1) | (labels == 3) # NCR + ET + wt = (labels == 1) | (labels == 2) | (labels == 3) # NCR + ED + ET + et = labels == 3 + + regions = np.stack([wt, tc, et], axis=0).astype(np.uint8) + + return regions # (3, D, H, W) + + +# ============================================================================ +# Visualization Functions +# ============================================================================ +def save_single_image( + arr2d: np.ndarray, + out_path: str, + cmap: str = "gray", + title: str = None, + is_overlay: bool = False, +): + """Save a single 2D image.""" + fig, ax = plt.subplots(figsize=(5, 5)) + + if is_overlay: + ax.imshow(arr2d, aspect="equal") + else: + ax.imshow(arr2d, cmap=cmap, aspect="equal") + + ax.axis("off") + if title: + ax.set_title(title, fontsize=STYLE["font_size"], fontweight="bold") + + fig.tight_layout(pad=0.1) + fig.savefig(out_path, dpi=STYLE["dpi"], bbox_inches="tight", facecolor="white") + plt.close(fig) + +def visualize_case( + case_id: str, + case_data: Dict, + gliomasam_predictor: GliomaSAMPredictor, + segmamba_predictor: SegMambaPredictor, + output_dir: str, +): + """Generate all visualizations for a single case.""" + print(f"\nProcessing case: {case_id}") + + image = case_data["image"] + label = case_data["label"] + + # Find best slice + if label is not None: + gt_regions = label_to_regions(label) + slice_info = select_best_slice(gt_regions[2]) # Use ET for slice selection + else: + slice_info = {"axial": image.shape[1] // 2} + + slice_idx = slice_info["axial"] + plane = "axial" + + case_dir = os.path.join(output_dir, case_id) + ensure_dir(case_dir) + + # -------------------------- + # 1. Save original modalities + # -------------------------- + print(" Saving original modalities...") + for i, (mod, mod_name) in enumerate(zip(CONFIG["modalities"], CONFIG["modality_names"])): + vol = normalize_volume(image[i]) + slice_2d = extract_slice(vol, plane, slice_idx) + out_path = os.path.join(case_dir, f"input_{mod_name}.png") + save_single_image(slice_2d, out_path, cmap="gray", title=mod_name) + + # -------------------------- + # 2. Save ground truth + # -------------------------- + print(" Saving ground truth...") + base_vol = normalize_volume(image[1]) # Use T1ce as base + base_2d = extract_slice(base_vol, plane, slice_idx) + + if label is not None: + gt_regions = label_to_regions(label) + gt_masks = { + "WT": extract_slice(gt_regions[0], plane, slice_idx) > 0, + "TC": extract_slice(gt_regions[1], plane, slice_idx) > 0, + "ET": extract_slice(gt_regions[2], plane, slice_idx) > 0, + } + gt_overlay = overlay_masks_publication(base_2d, gt_masks) + out_path = os.path.join(case_dir, "gt_overlay.png") + save_single_image(gt_overlay, out_path, is_overlay=True, title="Ground Truth") + + # Save individual GT regions + for region_name in ["WT", "TC", "ET"]: + region_overlay = overlay_masks_publication(base_2d, {region_name: gt_masks[region_name]}) + out_path = os.path.join(case_dir, f"gt_{region_name}.png") + save_single_image(region_overlay, out_path, is_overlay=True, title=f"GT {region_name}") + + # -------------------------- + # 3. GliomaSAM3-MoE predictions + # -------------------------- + print(" Running GliomaSAM3-MoE predictions...") + for ckpt_path, ckpt_name in zip(CONFIG["gliomasam_ckpts"], CONFIG["gliomasam_names"]): + if not os.path.exists(ckpt_path): + print(f" Checkpoint not found: {ckpt_path}") + continue + + try: + gliomasam_predictor.load_checkpoint(ckpt_path) + pred_regions = gliomasam_predictor.predict(image) + + # Create overlay + pred_masks = { + "WT": extract_slice(pred_regions[0], plane, slice_idx) > 0, + "TC": extract_slice(pred_regions[1], plane, slice_idx) > 0, + "ET": extract_slice(pred_regions[2], plane, slice_idx) > 0, + } + pred_overlay = overlay_masks_publication(base_2d, pred_masks) + out_path = os.path.join(case_dir, f"pred_gliomasam_{ckpt_name}_overlay.png") + save_single_image(pred_overlay, out_path, is_overlay=True, title=f"GliomaSAM3-MoE ({ckpt_name})") + + # Save individual regions + for region_name in ["WT", "TC", "ET"]: + region_overlay = overlay_masks_publication(base_2d, {region_name: pred_masks[region_name]}) + out_path = os.path.join(case_dir, f"pred_gliomasam_{ckpt_name}_{region_name}.png") + save_single_image(region_overlay, out_path, is_overlay=True, title=f"GliomaSAM {ckpt_name} {region_name}") + except Exception as e: + print(f" Error with GliomaSAM {ckpt_name}: {e}") + + # -------------------------- + # 4. SegMamba predictions (from pre-generated files) + # -------------------------- + print(" Loading SegMamba predictions from files...") + import nibabel as nib + for pred_dir, pred_name in zip(CONFIG["segmamba_pred_dirs"], CONFIG["segmamba_names"]): + if not os.path.exists(pred_dir): + print(f" Prediction dir not found: {pred_dir}") + continue + + try: + pred_path = os.path.join(pred_dir, f"{case_id}.nii.gz") + if not os.path.exists(pred_path): + print(f" Prediction file not found: {pred_path}") + continue + + pred_nii = nib.load(pred_path) + pred_arr = np.asarray(pred_nii.get_fdata()) + + # Handle SegMamba format: (D, H, W, 3) where channels are [TC, WT, ET] + if pred_arr.ndim == 4 and pred_arr.shape[-1] == 3: + pred_regions = pred_arr.transpose(3, 0, 1, 2) + elif pred_arr.ndim == 4 and pred_arr.shape[0] == 3: + pred_regions = pred_arr + else: + print(f" Unexpected prediction shape: {pred_arr.shape}") + continue + + # SegMamba order is [TC, WT, ET], reorder to [WT, TC, ET] + pred_regions_reordered = np.stack([ + pred_regions[1], # WT + pred_regions[0], # TC + pred_regions[2], # ET + ], axis=0) + + # Create overlay + pred_masks = { + "WT": extract_slice(pred_regions_reordered[0], plane, slice_idx) > 0, + "TC": extract_slice(pred_regions_reordered[1], plane, slice_idx) > 0, + "ET": extract_slice(pred_regions_reordered[2], plane, slice_idx) > 0, + } + pred_overlay = overlay_masks_publication(base_2d, pred_masks) + out_path = os.path.join(case_dir, f"pred_segmamba_{pred_name}_overlay.png") + save_single_image(pred_overlay, out_path, is_overlay=True, title=f"SegMamba ({pred_name})") + + # Save individual regions + for region_name in ["WT", "TC", "ET"]: + region_overlay = overlay_masks_publication(base_2d, {region_name: pred_masks[region_name]}) + out_path = os.path.join(case_dir, f"pred_segmamba_{pred_name}_{region_name}.png") + save_single_image(region_overlay, out_path, is_overlay=True, title=f"SegMamba {pred_name} {region_name}") + print(f" Loaded: {pred_name}") + except Exception as e: + print(f" Error with SegMamba {pred_name}: {e}") + + print(f" Saved to: {case_dir}") + + +def create_comparison_grid(output_dir: str, cases: List[str]): + """Create a summary comparison grid for all cases.""" + print("\nCreating comparison summary grid...") + + # Check how many checkpoints were actually run + first_case_dir = os.path.join(output_dir, cases[0]) + if not os.path.exists(first_case_dir): + print(" No case directories found, skipping grid generation.") + return + + # Create grid: rows = cases, cols = GT + GliomaSAM ckpts + SegMamba ckpts + n_cases = len(cases) + n_gliomasam = len(CONFIG["gliomasam_names"]) + n_segmamba = len(CONFIG["segmamba_names"]) + n_cols = 1 + n_gliomasam + n_segmamba # GT + methods + + fig, axes = plt.subplots(n_cases, n_cols, figsize=(3 * n_cols, 3 * n_cases)) + if n_cases == 1: + axes = axes.reshape(1, -1) + + col_titles = ["Ground Truth"] + col_titles += [f"GliomaSAM3-MoE\n({n})" for n in CONFIG["gliomasam_names"]] + col_titles += [f"SegMamba\n({n})" for n in CONFIG["segmamba_names"]] + + for row_idx, case_id in enumerate(cases): + case_dir = os.path.join(output_dir, case_id) + + # GT + ax = axes[row_idx, 0] + gt_path = os.path.join(case_dir, "gt_overlay.png") + if os.path.exists(gt_path): + img = plt.imread(gt_path) + ax.imshow(img) + ax.axis("off") + if row_idx == 0: + ax.set_title(col_titles[0], fontsize=10, fontweight="bold") + ax.set_ylabel(case_id.split("-")[-1], fontsize=10, rotation=0, ha="right", va="center") + + # GliomaSAM predictions + col = 1 + for ckpt_name in CONFIG["gliomasam_names"]: + ax = axes[row_idx, col] + pred_path = os.path.join(case_dir, f"pred_gliomasam_{ckpt_name}_overlay.png") + if os.path.exists(pred_path): + img = plt.imread(pred_path) + ax.imshow(img) + ax.axis("off") + if row_idx == 0: + ax.set_title(col_titles[col], fontsize=10, fontweight="bold") + col += 1 + + # SegMamba predictions + for ckpt_name in CONFIG["segmamba_names"]: + ax = axes[row_idx, col] + pred_path = os.path.join(case_dir, f"pred_segmamba_{ckpt_name}_overlay.png") + if os.path.exists(pred_path): + img = plt.imread(pred_path) + ax.imshow(img) + ax.axis("off") + if row_idx == 0: + ax.set_title(col_titles[col], fontsize=10, fontweight="bold") + col += 1 + + fig.suptitle("Method Comparison: GliomaSAM3-MoE vs SegMamba\n(Different Checkpoints)", + fontsize=14, fontweight="bold", y=0.98) + fig.tight_layout(rect=[0, 0, 1, 0.95]) + + grid_path = os.path.join(output_dir, "comparison_grid.png") + fig.savefig(grid_path, dpi=200, bbox_inches="tight", facecolor="white") + plt.close(fig) + print(f" Saved: {grid_path}") + + +# ============================================================================ +# Main +# ============================================================================ +def main(): + parser = argparse.ArgumentParser(description="Method comparison visualization") + parser.add_argument("--device", default="cuda", help="Device to use") + parser.add_argument("--cases", nargs="+", default=None, help="Override case IDs") + args = parser.parse_args() + + output_dir = CONFIG["output_dir"] + ensure_dir(output_dir) + + cases = args.cases if args.cases else CONFIG["cases"] + + print("=" * 60) + print("Method Comparison Visualization") + print("=" * 60) + print(f"Cases: {len(cases)}") + print(f"GliomaSAM3-MoE checkpoints: {len(CONFIG['gliomasam_ckpts'])}") + print(f"SegMamba checkpoints: {len(CONFIG['segmamba_ckpts'])}") + print(f"Output directory: {output_dir}") + + # Initialize predictors + print("\nInitializing predictors...") + gliomasam_predictor = GliomaSAMPredictor(CONFIG["gliomasam_config"], args.device) + segmamba_predictor = SegMambaPredictor(args.device) + + # Process each case + for case_id in cases: + try: + case_data = load_case(CONFIG["data_dir"], case_id) + visualize_case( + case_id, + case_data, + gliomasam_predictor, + segmamba_predictor, + output_dir, + ) + except Exception as e: + print(f" Error processing {case_id}: {e}") + import traceback + traceback.print_exc() + + # Create summary grid + create_comparison_grid(output_dir, cases) + + print("\n" + "=" * 60) + print(f"All visualizations saved to: {output_dir}") + print("=" * 60) + + +if __name__ == "__main__": + main() diff --git a/source_code/gliomasam3_moe/visualizations/vis_publication.py b/source_code/gliomasam3_moe/visualizations/vis_publication.py new file mode 100644 index 0000000000000000000000000000000000000000..680edc4afea3caca410849106dbc7511dc64f940 --- /dev/null +++ b/source_code/gliomasam3_moe/visualizations/vis_publication.py @@ -0,0 +1,1587 @@ +""" +Publication-quality visualization suite for GliomaSAM3-MoE. +Follows detailed spec for figures. +""" +import argparse +import os +import sys +from typing import Dict, List, Optional, Tuple, Any + +import numpy as np +import yaml + +import matplotlib +matplotlib.use("Agg") +import matplotlib.pyplot as plt +from matplotlib.patches import Rectangle +from matplotlib.colors import LinearSegmentedColormap +import matplotlib.font_manager as fm + +# ============================================================================ +# Global Style Configuration (Spec A) +# ============================================================================ +STYLE = { + # Canvas + "aspect_main": (16, 9), + "aspect_panel": (4, 3), + "dpi": 300, + "bg_color": "white", + + # Typography + "font_family": "sans-serif", + "font_title": 18, + "font_subtitle": 12, + "font_label": 10, + + # Lines + "linewidth_contour": 1.5, + "linewidth_boundary": 2.0, + + # Colors (fixed palette) + "color_WT": "#00BBD4", # cyan + "color_TC": "#D81B60", # magenta + "color_ET": "#FBC02D", # yellow + "alpha_mask": 0.40, + + # Error colormap: blue -> white -> red + "cmap_error": "RdBu_r", +} + +def hex_to_rgb(hex_color: str) -> Tuple[float, float, float]: + h = hex_color.lstrip("#") + return tuple(int(h[i:i+2], 16) / 255.0 for i in (0, 2, 4)) + +COLORS = { + "WT": hex_to_rgb(STYLE["color_WT"]), + "TC": hex_to_rgb(STYLE["color_TC"]), + "ET": hex_to_rgb(STYLE["color_ET"]), +} + +# ============================================================================ +# Setup matplotlib defaults +# ============================================================================ +def setup_mpl_style(): + plt.rcParams.update({ + "font.family": STYLE["font_family"], + "font.size": STYLE["font_label"], + "axes.titlesize": STYLE["font_subtitle"], + "axes.labelsize": STYLE["font_label"], + "figure.facecolor": STYLE["bg_color"], + "axes.facecolor": STYLE["bg_color"], + "savefig.facecolor": STYLE["bg_color"], + "savefig.dpi": STYLE["dpi"], + "figure.dpi": 100, + }) + +setup_mpl_style() + +# ============================================================================ +# Imports from project +# ============================================================================ +ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +SRC_DIR = os.path.join(ROOT_DIR, "src") +if SRC_DIR not in sys.path: + sys.path.append(SRC_DIR) + +from scipy import ndimage as ndi +from scipy.ndimage import zoom + +from gliomasam3_moe.data.brats_dataset import SegMambaNPZDataset +from gliomasam3_moe.models.gliomasam3_moe import GliomaSAM3_MoE + +# ============================================================================ +# Utility Functions +# ============================================================================ +def ensure_dir(path: str) -> None: + os.makedirs(path, exist_ok=True) + +def load_config(path: str) -> Dict: + with open(path, "r") as f: + return yaml.safe_load(f) + +def normalize_volume(vol: np.ndarray, eps: float = 1e-6) -> np.ndarray: + x = np.asarray(vol, dtype=np.float32) + x = np.nan_to_num(x, nan=0.0, posinf=0.0, neginf=0.0) + flat = x.reshape(-1) + if flat.size == 0: + return np.zeros_like(x, dtype=np.float32) + lo, hi = np.percentile(flat, [1, 99]) + if hi - lo < eps: + return np.zeros_like(x, dtype=np.float32) + x = np.clip(x, lo, hi) + x = (x - lo) / (hi - lo + eps) + return x + +def label_to_regions(label: np.ndarray) -> np.ndarray: + label = np.asarray(label) + wt = label > 0 + tc = (label == 1) | (label == 4) + et = label == 4 + return np.stack([wt, tc, et], axis=0).astype(np.uint8) + +def regions_to_label(regions: np.ndarray) -> np.ndarray: + if regions.ndim != 4 or regions.shape[0] != 3: + raise ValueError("regions must be [3, D, H, W]") + wt = regions[0] > 0.5 + tc = regions[1] > 0.5 + et = regions[2] > 0.5 + label = np.zeros_like(wt, dtype=np.int16) + label[wt] = 2 + label[tc] = 1 + label[et] = 4 + return label + +def extract_slice(vol: np.ndarray, plane: str, idx: int) -> np.ndarray: + if plane == "axial": + img = vol[idx, :, :] + elif plane == "coronal": + img = vol[:, idx, :] + elif plane == "sagittal": + img = vol[:, :, idx] + else: + raise ValueError(f"Unknown plane: {plane}") + return np.rot90(img) + +def select_slices_from_mask(mask: Optional[np.ndarray]) -> Dict[str, int]: + if mask is None or mask.sum() == 0: + return {"axial": None, "coronal": None, "sagittal": None} + m = mask.astype(np.uint8) + axial = int(np.argmax(m.sum(axis=(1, 2)))) + coronal = int(np.argmax(m.sum(axis=(0, 2)))) + sagittal = int(np.argmax(m.sum(axis=(0, 1)))) + return {"axial": axial, "coronal": coronal, "sagittal": sagittal} + +def fallback_slices(shape: Tuple[int, int, int]) -> Dict[str, int]: + d, h, w = shape + return {"axial": d // 2, "coronal": h // 2, "sagittal": w // 2} + +def get_slices(mask_ref: Optional[np.ndarray], vol_shape: Tuple[int, int, int]) -> Dict[str, int]: + idx = select_slices_from_mask(mask_ref) + if any(v is None for v in idx.values()): + idx = fallback_slices(vol_shape) + return idx + +def mask_boundary(mask2d: np.ndarray, iterations: int = 1) -> np.ndarray: + if mask2d.sum() == 0: + return mask2d.astype(bool) + eroded = ndi.binary_erosion(mask2d.astype(bool), iterations=iterations) + return np.logical_xor(mask2d.astype(bool), eroded) + +def signed_distance(mask: np.ndarray) -> np.ndarray: + mask = mask.astype(bool) + if mask.sum() == 0: + return np.zeros_like(mask, dtype=np.float32) + outside = ndi.distance_transform_edt(~mask) + inside = ndi.distance_transform_edt(mask) + return (inside - outside).astype(np.float32) + +def boundary_error_map(pred: np.ndarray, gt: np.ndarray) -> np.ndarray: + pred = pred.astype(bool) + gt = gt.astype(bool) + dist_gt = signed_distance(gt) + err = np.zeros_like(dist_gt, dtype=np.float32) + # False positive: predicted but not GT + fp = pred & ~gt + # False negative: GT but not predicted + fn = ~pred & gt + err[fp] = np.abs(dist_gt[fp]) + err[fn] = -np.abs(dist_gt[fn]) + return err + +def connected_components(mask: np.ndarray) -> Tuple[np.ndarray, int]: + labeled, num = ndi.label(mask.astype(np.uint8)) + return labeled, int(num) + +def fft_amplitude_slice(vol: np.ndarray, plane: str = "axial") -> np.ndarray: + fft = np.fft.fftn(vol) + amp = np.abs(fft) + amp = np.fft.fftshift(amp) + d, h, w = amp.shape + if plane == "axial": + sl = amp[d // 2, :, :] + elif plane == "coronal": + sl = amp[:, h // 2, :] + else: + sl = amp[:, :, w // 2] + sl = np.log1p(sl) + return normalize_volume(sl) + +def fourier_amplitude_mix(a: np.ndarray, b: np.ndarray, lam: float) -> np.ndarray: + if a.shape != b.shape: + b_resized = np.zeros_like(a) + for c in range(min(a.shape[0], b.shape[0])): + zoom_factors = tuple(a.shape[i+1] / b.shape[i+1] for i in range(3)) + b_resized[c] = zoom(b[c], zoom_factors, order=1) + b = b_resized + fft_a = np.fft.fftn(a, axes=(1, 2, 3)) + fft_b = np.fft.fftn(b, axes=(1, 2, 3)) + amp_a = np.abs(fft_a) + amp_b = np.abs(fft_b) + phase = np.exp(1j * np.angle(fft_a)) + amp_mix = (1.0 - lam) * amp_a + lam * amp_b + mixed = np.fft.ifftn(amp_mix * phase, axes=(1, 2, 3)).real + return mixed.astype(np.float32) + +# ============================================================================ +# Overlay Functions (Spec compliant) +# ============================================================================ +def overlay_masks_publication( + base2d: np.ndarray, + masks: Dict[str, np.ndarray], + alpha: float = STYLE["alpha_mask"], + draw_boundary: bool = True, + boundary_width: int = 2, +) -> np.ndarray: + """Overlay masks with publication-quality colors and boundaries.""" + base = np.clip(base2d, 0.0, 1.0) + rgb = np.stack([base, base, base], axis=-1).astype(np.float32) + + # Draw order: WT -> TC -> ET (ET on top) + order = ["WT", "TC", "ET"] + for key in order: + if key not in masks: + continue + m = masks[key].astype(bool) + # Handle shape mismatch + if m.shape != base.shape: + zoom_factors = (base.shape[0] / m.shape[0], base.shape[1] / m.shape[1]) + m = zoom(m.astype(float), zoom_factors, order=0) > 0.5 + if m.sum() == 0: + continue + color = np.array(COLORS.get(key, (1.0, 0.0, 0.0)), dtype=np.float32) + rgb[m] = (1.0 - alpha) * rgb[m] + alpha * color + + if draw_boundary: + b = mask_boundary(m, iterations=boundary_width) + rgb[b] = color + + return np.clip(rgb, 0, 1) + +def draw_contour(ax, mask2d: np.ndarray, color: str, linewidth: float = STYLE["linewidth_contour"]): + """Draw contour line on axis.""" + if mask2d.sum() == 0: + return + ax.contour(mask2d.astype(float), levels=[0.5], colors=[color], linewidths=[linewidth]) + +# ============================================================================ +# Data Loading +# ============================================================================ +class CaseLoader: + def __init__(self, cfg: Dict): + self.data_cfg = cfg.get("data", {}) + self.cache: Dict[Tuple[str, bool], Dict] = {} + + def get_case(self, case_id: str, include_label: bool = True) -> Dict: + key = (case_id, include_label) + if key in self.cache: + return self.cache[key] + + data_format = self.data_cfg.get("format", "nifti") + if data_format == "segmamba_npz": + npz_dir = self.data_cfg.get("npz_dir") or self.data_cfg.get("root_dir", "") + if case_id.endswith(".npz"): + npz_path = case_id + else: + npz_path = os.path.join(npz_dir, case_id + ".npz") + + npy_path = npz_path[:-3] + "npy" + seg_path = npz_path[:-4] + "_seg.npy" + + if os.path.isfile(npy_path): + image = np.load(npy_path, mmap_mode="r") + else: + data = np.load(npz_path) + image = data["data"] + + image = np.asarray(image, dtype=np.float32) + if image.ndim == 5 and image.shape[0] == 1: + image = image[0] + if image.ndim == 4 and image.shape[0] != 4 and image.shape[-1] == 4: + image = image.transpose(3, 0, 1, 2) + + label = None + if include_label: + if os.path.isfile(seg_path): + label = np.load(seg_path, mmap_mode="r") + else: + data = np.load(npz_path) + label = data["seg"] if "seg" in data else None + if label is not None: + label = np.asarray(label, dtype=np.int16) + if label.ndim == 4 and label.shape[0] == 1: + label = label[0] + # Map ET label 3 -> 4 if needed + if label.max() == 3 and (label == 4).sum() == 0: + label = label.copy() + label[label == 3] = 4 + + # Create images dict with modality names + modalities = self.data_cfg.get("modalities", ["t1n", "t1c", "t2f", "t2w"]) + images = {} + for i, mod in enumerate(modalities): + if i < image.shape[0]: + images[mod] = normalize_volume(image[i]) + + out = {"images": images, "label": label, "affine": np.eye(4)} + self.cache[key] = out + return out + + raise NotImplementedError("Only segmamba_npz format is currently supported") + + +class PredictionLoader: + def __init__(self, cfg: Dict): + pred_cfg = cfg.get("predictions", {}) + self.ours = pred_cfg.get("ours", {}) + self.baselines = pred_cfg.get("baselines", []) + + def get_all_methods(self) -> List[Dict]: + methods = [] + if self.ours: + methods.append(self.ours) + methods.extend(self.baselines) + return methods + + def load_method(self, method_cfg: Dict, case_id: str) -> Dict: + import nibabel as nib + + pred_dir = method_cfg.get("dir", "") + pred_type = method_cfg.get("type", "auto") + + def _find(base: str) -> Optional[str]: + for ext in [".nii.gz", ".nii"]: + path = os.path.join(pred_dir, base + ext) + if os.path.isfile(path): + return path + return None + + paths = { + "regions_prob": _find(f"{case_id}_regions_prob"), + "regions_bin": _find(f"{case_id}_regions_bin"), + "label": _find(f"{case_id}_label"), + "segmamba_3c": _find(f"{case_id}"), + } + + if pred_type == "auto": + for key in ["regions_prob", "regions_bin", "label", "segmamba_3c"]: + if paths[key] is not None: + pred_type = key + break + + path = paths.get(pred_type) + if path is None: + raise FileNotFoundError(f"No prediction found for {case_id} in {pred_dir}") + + img = nib.load(path) + arr = np.asarray(img.get_fdata()) + + out: Dict[str, Optional[np.ndarray]] = {"label": None, "regions": None, "prob": None} + + if pred_type in {"regions_prob", "regions_bin"}: + if arr.ndim == 4 and arr.shape[-1] == 3: + regions = arr.transpose(3, 0, 1, 2) + else: + regions = arr + out["prob"] = regions.astype(np.float32) if pred_type == "regions_prob" else None + out["regions"] = (regions > 0.5).astype(np.uint8) if pred_type == "regions_prob" else regions.astype(np.uint8) + out["label"] = regions_to_label(out["regions"]) + elif pred_type == "segmamba_3c": + if arr.ndim == 4 and arr.shape[-1] == 3: + regions = arr.transpose(3, 0, 1, 2).astype(np.uint8) + else: + regions = arr.astype(np.uint8) + out["regions"] = regions + out["label"] = regions_to_label(regions) + else: + label = arr.astype(np.int16) + out["label"] = label + out["regions"] = label_to_regions(label) + + return out + + +class AuxCache: + def __init__(self, aux_dir: Optional[str]): + self.aux_dir = aux_dir + + def path(self, case_id: str) -> Optional[str]: + if not self.aux_dir: + return None + return os.path.join(self.aux_dir, f"{case_id}_aux.npz") + + def load(self, case_id: str) -> Optional[Dict]: + path = self.path(case_id) + if path and os.path.isfile(path): + data = np.load(path) + return {k: data[k] for k in data.files} + return None + + def save(self, case_id: str, data: Dict) -> None: + if not self.aux_dir: + return + ensure_dir(self.aux_dir) + path = self.path(case_id) + np.savez_compressed(path, **data) + + +class ModelRunner: + def __init__(self, vis_cfg: Dict, model_cfg_path: str, ckpt_path: str, device: str): + import torch + import torch.nn.functional as F + + self.torch = torch + self.F = F + self.vis_cfg = vis_cfg + self.cfg = load_config(model_cfg_path) + self.device = torch.device(device if torch.cuda.is_available() else "cpu") + self.model = GliomaSAM3_MoE(**self.cfg["model"]).to(self.device) + ckpt = torch.load(ckpt_path, map_location="cpu") + state_dict = {k: v for k, v in ckpt["model"].items() if "freqs_cis" not in k} + self.model.load_state_dict(state_dict, strict=False) + self.model.eval() + + def load_case_tensor(self, case_id: str): + data_cfg = self.vis_cfg.get("data", {}) + data_dir = data_cfg.get("npz_dir") or data_cfg.get("root_dir", "") + if case_id.endswith(".npz"): + npz_path = case_id + else: + npz_path = os.path.join(data_dir, case_id + ".npz") + dataset = SegMambaNPZDataset(data_dir=data_dir, npz_paths=[npz_path], test=True, ensure_npy=True) + sample = dataset[0] + image = sample["image"].unsqueeze(0) + case = sample["case_id"] + return image, case + + def forward_intermediate(self, image): + torch = self.torch + F = self.F + model = self.model + with torch.no_grad(): + b, c, d, h, w = image.shape + orig_h, orig_w = h, w + pad_h = (model.patch_size - (h % model.patch_size)) % model.patch_size + pad_w = (model.patch_size - (w % model.patch_size)) % model.patch_size + ph0, ph1 = pad_h // 2, pad_h - pad_h // 2 + pw0, pw1 = pad_w // 2, pad_w - pad_w // 2 + if pad_h > 0 or pad_w > 0: + image = F.pad(image, (pw0, pw1, ph0, ph1, 0, 0)) + h, w = image.shape[-2:] + + image = image.to(self.device) + x_plus, _ = model.hfdi(image) + x_spec, spectral_stats = model.spectral(image) + + x2d = x_plus.permute(0, 2, 1, 3, 4).reshape(b * d, 7, h, w) + tokens, (gh, gw) = model.encoder2d(x2d) + n = gh * gw + tokens = tokens.view(b, d, n, -1) + tokens = model.slice_adapter(tokens, direction="forward") + + z = tokens.mean(dim=(1, 2)) + pi_et = model.attr_head(z)["pi_et"] + token_ids = model._select_concept_tokens(pi_et, label=None) + prompt = model.prompt_encoder(token_ids) + tokens = model.prompt_film(tokens, prompt) + + u = tokens.view(b, d, gh, gw, -1).permute(0, 4, 1, 2, 3) + u_msda = model.dual_enhance.msda(u) + u_lv1 = model.dual_enhance.fa_level(u) + u_fa = model.dual_enhance.fa_fuse(torch.cat([u, u_lv1], dim=1)) + pool = torch.cat([u_fa, u_msda], dim=1).mean(dim=(2, 3, 4)) + eta = torch.sigmoid(model.dual_enhance.fcf_mlp(pool)).view(b, 1, 1, 1, 1) + u_fuse = eta * u_fa + (1.0 - eta) * u_msda + u_spec = model.dual_enhance.spec_stem(x_spec) + u_out = model.dual_enhance.fuse_conv(torch.cat([u_fuse, u_spec], dim=1)) + + logits, gamma = model.moe_decoder(u_out, z, prompt, spectral_stats, target_size=(d, h, w)) + if pad_h > 0 or pad_w > 0: + logits = logits[:, :, :, ph0 : ph0 + orig_h, pw0 : pw0 + orig_w] + + et_pre = torch.sigmoid(logits[:, 2:3]) + et_post = et_pre * pi_et.view(b, 1, 1, 1, 1) + + u_up = F.interpolate(u_out, size=(d, h, w), mode="trilinear", align_corners=False) + logits_all = torch.stack([exp(u_up) for exp in model.moe_decoder.experts], dim=1) + prob_all = torch.sigmoid(logits_all) + mean_prob = prob_all.mean(dim=(3, 4, 5)) + contrib = gamma.view(b, -1, 1) * mean_prob + + return { + "pi_et": pi_et, + "moe_gamma": gamma, + "spectral_stats": spectral_stats, + "et_pre": et_pre, + "et_post": et_post, + "expert_contrib": contrib, + "x_spec": x_spec, + "u_fuse": u_fuse, + "u_spec": u_spec, + "logits": logits, + } + + +# ============================================================================ +# Figure Saving (Spec L) +# ============================================================================ +def save_figure(fig, out_dir: str, name: str, close: bool = True): + """Save figure as PNG and PDF at 300 dpi.""" + ensure_dir(out_dir) + png_path = os.path.join(out_dir, f"{name}.png") + pdf_path = os.path.join(out_dir, f"{name}.pdf") + # Use pad_inches to prevent axis/image overlap + fig.savefig(png_path, dpi=STYLE["dpi"], bbox_inches="tight", pad_inches=0.1, facecolor=STYLE["bg_color"]) + fig.savefig(pdf_path, dpi=STYLE["dpi"], bbox_inches="tight", pad_inches=0.1, facecolor=STYLE["bg_color"]) + if close: + plt.close(fig) + print(f" Saved: {png_path}") + + +def finalize_figure(fig, title: str = None): + """Finalize figure layout to prevent axis overlap.""" + if title: + fig.suptitle(title, fontsize=STYLE["font_title"], fontweight="bold", y=0.98) + # Use constrained layout or manual adjustment + try: + fig.tight_layout(rect=[0, 0.02, 1, 0.95] if title else [0, 0, 1, 1]) + except Exception: + pass + fig.subplots_adjust(wspace=0.3, hspace=0.3) + + +# ============================================================================ +# B) Main Qualitative Comparison (Spec B) +# ============================================================================ +def make_qualitative(cfg: Dict, case_loader: CaseLoader, pred_loader: PredictionLoader, out_dir: str) -> None: + """ + Main qualitative comparison figure. + Layout: Each row = one case + Columns: [T1n | T1ce | T2f | FLAIR | GT | Ours | Baseline...] + Small inset for coronal/sagittal view + """ + cases = cfg.get("cases", {}).get("qualitative", []) + if not cases: + return + + methods = pred_loader.get_all_methods() + modalities = cfg.get("data", {}).get("modalities", ["t1n", "t1c", "t2f", "t2w"]) + mod_labels = {"t1n": "T1", "t1c": "T1ce", "t2f": "FLAIR", "t2w": "T2"} + + n_cols = len(modalities) + 1 + len(methods) # modalities + GT + methods + n_rows = len(cases) + + fig_width = 2.0 * n_cols + fig_height = 2.0 * n_rows + fig, axes = plt.subplots(n_rows, n_cols, figsize=(fig_width, fig_height)) + if n_rows == 1: + axes = axes.reshape(1, -1) + + for row_idx, case_id in enumerate(cases): + case = case_loader.get_case(case_id, include_label=True) + images = case["images"] + label = case["label"] + + # Use T1ce as reference for slice selection + ref_mod = "t1c" if "t1c" in images else list(images.keys())[0] + base = images[ref_mod] + + # Find best slice based on tumor + mask_ref = label_to_regions(label)[2] if label is not None else None + idx = get_slices(mask_ref, base.shape) + plane = "axial" + slice_idx = idx[plane] + + col_idx = 0 + + # Plot modalities + for mod in modalities: + ax = axes[row_idx, col_idx] + if mod in images: + img2d = extract_slice(images[mod], plane, slice_idx) + ax.imshow(img2d, cmap="gray", aspect="equal") + ax.axis("off") + if row_idx == 0: + ax.set_title(mod_labels.get(mod, mod.upper()), fontsize=STYLE["font_subtitle"], fontweight="bold") + col_idx += 1 + + # Plot GT + ax = axes[row_idx, col_idx] + base2d = extract_slice(base, plane, slice_idx) + if label is not None: + gt_regions = label_to_regions(label) + masks = { + "WT": extract_slice(gt_regions[0], plane, slice_idx) > 0, + "TC": extract_slice(gt_regions[1], plane, slice_idx) > 0, + "ET": extract_slice(gt_regions[2], plane, slice_idx) > 0, + } + overlay = overlay_masks_publication(base2d, masks) + ax.imshow(overlay, aspect="equal") + else: + ax.imshow(base2d, cmap="gray", aspect="equal") + ax.axis("off") + if row_idx == 0: + ax.set_title("GT", fontsize=STYLE["font_subtitle"], fontweight="bold") + col_idx += 1 + + # Plot methods + for method in methods: + ax = axes[row_idx, col_idx] + try: + pred = pred_loader.load_method(method, case_id) + pred_regions = pred["regions"] + masks = { + "WT": extract_slice(pred_regions[0], plane, slice_idx) > 0, + "TC": extract_slice(pred_regions[1], plane, slice_idx) > 0, + "ET": extract_slice(pred_regions[2], plane, slice_idx) > 0, + } + overlay = overlay_masks_publication(base2d, masks) + ax.imshow(overlay, aspect="equal") + except Exception as e: + ax.imshow(base2d, cmap="gray", aspect="equal") + ax.text(0.5, 0.5, "N/A", transform=ax.transAxes, ha="center", va="center", fontsize=STYLE["font_label"]) + ax.axis("off") + if row_idx == 0: + ax.set_title(method.get("name", "Method"), fontsize=STYLE["font_subtitle"], fontweight="bold") + + # Add small inset for coronal view (top-right corner) + inset_ax = ax.inset_axes([0.65, 0.65, 0.33, 0.33]) + try: + cor_idx = idx["coronal"] + base_cor = extract_slice(base, "coronal", cor_idx) + if "pred_regions" in dir(): + masks_cor = { + "WT": extract_slice(pred_regions[0], "coronal", cor_idx) > 0, + "TC": extract_slice(pred_regions[1], "coronal", cor_idx) > 0, + "ET": extract_slice(pred_regions[2], "coronal", cor_idx) > 0, + } + overlay_cor = overlay_masks_publication(base_cor, masks_cor) + inset_ax.imshow(overlay_cor, aspect="equal") + else: + inset_ax.imshow(base_cor, cmap="gray", aspect="equal") + except: + pass + inset_ax.axis("off") + inset_ax.patch.set_edgecolor("white") + inset_ax.patch.set_linewidth(1) + + col_idx += 1 + + # Add case ID on the left + axes[row_idx, 0].text(-0.15, 0.5, case_id.split("-")[-1], transform=axes[row_idx, 0].transAxes, + rotation=90, va="center", ha="right", fontsize=STYLE["font_label"]) + + finalize_figure(fig, "Main Qualitative Comparison") + save_figure(fig, out_dir, "Fig1_qualitative_comparison") + + +# ============================================================================ +# C) ET-absent Case Study (Spec C) +# ============================================================================ +def make_et_absent(cfg: Dict, case_loader: CaseLoader, aux: AuxCache, runner: Optional[ModelRunner], out_dir: str) -> None: + """ + ET-absent case study. + Three columns: Before gate | After gate | π_ET value with colorbar + """ + cases = cfg.get("cases", {}).get("et_absent", []) + if not cases: + return + + for case_idx, case_id in enumerate(cases): + case = case_loader.get_case(case_id, include_label=False) + images = case["images"] + ref_mod = "t1c" if "t1c" in images else list(images.keys())[0] + base = images[ref_mod] + + # Load or compute aux data + aux_data = aux.load(case_id) + needed_keys = ["pi_et", "et_pre", "et_post"] + if aux_data is None or not all(k in aux_data for k in needed_keys): + if runner is None: + continue + image, _ = runner.load_case_tensor(case_id) + out = runner.forward_intermediate(image) + new_data = { + "pi_et": out["pi_et"].detach().cpu().numpy(), + "et_pre": out["et_pre"].detach().cpu().numpy(), + "et_post": out["et_post"].detach().cpu().numpy(), + } + if aux_data is not None: + aux_data.update(new_data) + else: + aux_data = new_data + aux.save(case_id, aux_data) + + if aux_data is None: + continue + + et_pre = aux_data["et_pre"][0, 0] + et_post = aux_data["et_post"][0, 0] + pi_et = float(np.asarray(aux_data["pi_et"]).reshape(-1)[0]) + + idx = get_slices(et_pre > 0.3, base.shape) + plane = "axial" + slice_idx = idx[plane] + + fig = plt.figure(figsize=(14, 5)) + gs = fig.add_gridspec(1, 4, width_ratios=[1, 1, 0.6, 0.05], wspace=0.25) + + base2d = extract_slice(base, plane, slice_idx) + pre2d = extract_slice(et_pre, plane, slice_idx) + post2d = extract_slice(et_post, plane, slice_idx) + + # Before gate + ax0 = fig.add_subplot(gs[0]) + ax0.imshow(base2d, cmap="gray", aspect="equal") + im = ax0.imshow(pre2d, cmap="YlOrRd", alpha=0.6, vmin=0, vmax=1, aspect="equal") + draw_contour(ax0, pre2d > 0.5, STYLE["color_ET"], linewidth=STYLE["linewidth_boundary"]) + ax0.set_title("ET Before Gate", fontsize=STYLE["font_subtitle"], fontweight="bold") + ax0.axis("off") + + # After gate + ax1 = fig.add_subplot(gs[1]) + ax1.imshow(base2d, cmap="gray", aspect="equal") + ax1.imshow(post2d, cmap="YlOrRd", alpha=0.6, vmin=0, vmax=1, aspect="equal") + draw_contour(ax1, post2d > 0.5, STYLE["color_ET"], linewidth=STYLE["linewidth_boundary"]) + ax1.set_title("ET After Gate", fontsize=STYLE["font_subtitle"], fontweight="bold") + ax1.axis("off") + + # π_ET value with bar and stats + ax2 = fig.add_subplot(gs[2]) + ax2.barh(0.5, pi_et, height=0.25, color=STYLE["color_ET"], edgecolor="black", linewidth=1.5) + ax2.axvline(0.5, color="gray", linestyle="--", linewidth=1.5, label="Threshold") + ax2.set_xlim(0, 1) + ax2.set_ylim(0, 1) + ax2.set_xlabel("π_ET value", fontsize=STYLE["font_label"]) + ax2.set_title(f"π_ET = {pi_et:.3f}", fontsize=STYLE["font_subtitle"], fontweight="bold", + color="green" if pi_et > 0.5 else "red") + ax2.set_yticks([]) + ax2.spines["top"].set_visible(False) + ax2.spines["right"].set_visible(False) + ax2.spines["left"].set_visible(False) + + # Stats text + pre_vox = int((pre2d > 0.5).sum()) + post_vox = int((post2d > 0.5).sum()) + diff_pct = (pre_vox - post_vox) / max(pre_vox, 1) * 100 + ax2.text(0.5, 0.2, f"Before: {pre_vox}\nAfter: {post_vox}\nΔ: {diff_pct:+.1f}%", + transform=ax2.transAxes, fontsize=STYLE["font_label"], ha="center", va="bottom", + bbox=dict(boxstyle="round,pad=0.3", facecolor="lightyellow", edgecolor="gray", alpha=0.9)) + + # Colorbar + ax_cbar = fig.add_subplot(gs[3]) + cbar = fig.colorbar(im, cax=ax_cbar) + cbar.set_label("ET Prob", fontsize=STYLE["font_label"]) + + fig.suptitle(f"ET Gate Study: {case_id}", fontsize=STYLE["font_title"], fontweight="bold", y=0.98) + fig.tight_layout(rect=[0, 0, 1, 0.93]) + save_figure(fig, out_dir, f"Fig2_{chr(ord('a')+case_idx)}_et_absent_{case_id}") + + +# ============================================================================ +# D) Boundary Error Visualization (Spec D) +# ============================================================================ +def make_boundary(cfg: Dict, case_loader: CaseLoader, pred_loader: PredictionLoader, out_dir: str) -> None: + """ + Boundary error visualization. + Shows GT boundary (white) + Pred boundary (black) + signed error heatmap + """ + cases = cfg.get("cases", {}).get("boundary", []) + if not cases: + return + + region_name = cfg.get("visualization", {}).get("boundary_region", "ET") + region_idx = {"WT": 0, "TC": 1, "ET": 2}[region_name] + + for case_idx, case_id in enumerate(cases): + case = case_loader.get_case(case_id, include_label=True) + if case["label"] is None: + continue + + images = case["images"] + ref_mod = "t1c" if "t1c" in images else list(images.keys())[0] + base = images[ref_mod] + + gt_regions = label_to_regions(case["label"]) + pred = pred_loader.load_method(pred_loader.ours, case_id) + pred_regions = pred["regions"] + + mask_ref = gt_regions[region_idx] + idx = get_slices(mask_ref, base.shape) + plane = "axial" + slice_idx = idx[plane] + + fig, axes = plt.subplots(1, 3, figsize=(12, 4)) + + base2d = extract_slice(base, plane, slice_idx) + gt2d = extract_slice(gt_regions[region_idx], plane, slice_idx) > 0 + pred2d = extract_slice(pred_regions[region_idx], plane, slice_idx) > 0 + + # Resize pred2d if needed + if pred2d.shape != gt2d.shape: + zoom_factors = (gt2d.shape[0] / pred2d.shape[0], gt2d.shape[1] / pred2d.shape[1]) + pred2d = zoom(pred2d.astype(float), zoom_factors, order=0) > 0.5 + + err2d = boundary_error_map(pred2d, gt2d) + + # Panel 1: Base image with boundaries + ax = axes[0] + ax.imshow(base2d, cmap="gray", aspect="equal") + draw_contour(ax, gt2d, "white", linewidth=STYLE["linewidth_boundary"]) + draw_contour(ax, pred2d, "black", linewidth=STYLE["linewidth_contour"]) + ax.set_title("Boundaries: GT (white) vs Pred (black)", fontsize=STYLE["font_subtitle"]) + ax.axis("off") + + # Panel 2: Overlay comparison + ax = axes[1] + overlay = overlay_masks_publication(base2d, {region_name: gt2d}, alpha=0.3) + ax.imshow(overlay, aspect="equal") + draw_contour(ax, pred2d, "black", linewidth=STYLE["linewidth_contour"]) + ax.set_title(f"GT ({region_name}) + Pred Boundary", fontsize=STYLE["font_subtitle"]) + ax.axis("off") + + # Panel 3: Signed error heatmap + ax = axes[2] + ax.imshow(base2d, cmap="gray", aspect="equal") + max_err = max(np.abs(err2d).max(), 1.0) + im = ax.imshow(err2d, cmap=STYLE["cmap_error"], alpha=0.7, vmin=-max_err, vmax=max_err, aspect="equal") + ax.set_title("Signed Boundary Error", fontsize=STYLE["font_subtitle"]) + ax.axis("off") + + # Colorbar + cbar = fig.colorbar(im, ax=ax, orientation="vertical", fraction=0.046, pad=0.04) + cbar.set_label("Error (blue=FN, red=FP)", fontsize=STYLE["font_label"]) + + finalize_figure(fig, f"Boundary Analysis: {case_id} ({region_name})") + save_figure(fig, out_dir, f"Fig3_{chr(ord('a')+case_idx)}_boundary_{case_id}") + + +# ============================================================================ +# E) Tiny/Fragmented ET Cases (Spec E) +# ============================================================================ +def make_tiny_et(cfg: Dict, case_loader: CaseLoader, pred_loader: PredictionLoader, out_dir: str) -> None: + """ + Tiny/fragmented ET visualization with ROI zoom. + """ + cases = cfg.get("cases", {}).get("tiny_et", []) + if not cases: + return + + methods = pred_loader.get_all_methods() + + for case_idx, case_id in enumerate(cases): + case = case_loader.get_case(case_id, include_label=True) + images = case["images"] + ref_mod = "t1c" if "t1c" in images else list(images.keys())[0] + base = images[ref_mod] + + gt_regions = label_to_regions(case["label"]) if case["label"] is not None else None + + # Find ET centroid for ROI + if gt_regions is not None: + et_mask = gt_regions[2] + if et_mask.sum() > 0: + coords = np.where(et_mask) + centroid = [int(np.mean(c)) for c in coords] + else: + centroid = [s // 2 for s in base.shape] + else: + centroid = [s // 2 for s in base.shape] + + plane = "axial" + slice_idx = centroid[0] + + # Create ROI around ET (64x64 region) + roi_size = 64 + cy, cx = centroid[1], centroid[2] + y_start = max(0, cy - roi_size // 2) + y_end = min(base.shape[1], cy + roi_size // 2) + x_start = max(0, cx - roi_size // 2) + x_end = min(base.shape[2], cx + roi_size // 2) + + n_cols = 1 + len(methods) # GT + methods + fig, axes = plt.subplots(1, n_cols, figsize=(3 * n_cols, 3)) + if n_cols == 1: + axes = [axes] + + base2d = extract_slice(base, plane, slice_idx) + base_roi = base2d[y_start:y_end, x_start:x_end] + + col_idx = 0 + + # GT + ax = axes[col_idx] + if gt_regions is not None: + et2d = extract_slice(gt_regions[2], plane, slice_idx) > 0 + et_roi = et2d[y_start:y_end, x_start:x_end] + overlay = overlay_masks_publication(base_roi, {"ET": et_roi}, alpha=0.5) + ax.imshow(overlay, aspect="equal") + else: + ax.imshow(base_roi, cmap="gray", aspect="equal") + ax.set_title("GT", fontsize=STYLE["font_subtitle"], fontweight="bold") + ax.axis("off") + col_idx += 1 + + # Methods + for method in methods: + ax = axes[col_idx] + try: + pred = pred_loader.load_method(method, case_id) + pred_et = pred["regions"][2] + et2d = extract_slice(pred_et, plane, slice_idx) > 0 + # Resize if needed + if et2d.shape != base2d.shape: + zoom_factors = (base2d.shape[0] / et2d.shape[0], base2d.shape[1] / et2d.shape[1]) + et2d = zoom(et2d.astype(float), zoom_factors, order=0) > 0.5 + et_roi = et2d[y_start:y_end, x_start:x_end] + overlay = overlay_masks_publication(base_roi, {"ET": et_roi}, alpha=0.5) + ax.imshow(overlay, aspect="equal") + except: + ax.imshow(base_roi, cmap="gray", aspect="equal") + ax.set_title(method.get("name", "Method"), fontsize=STYLE["font_subtitle"], fontweight="bold") + ax.axis("off") + col_idx += 1 + + finalize_figure(fig, f"Tiny ET ROI: {case_id}") + save_figure(fig, out_dir, f"Fig4_{chr(ord('a')+case_idx)}_tiny_et_{case_id}") + + +# ============================================================================ +# G) MoE Routing Interpretability (Spec G) +# ============================================================================ +def make_moe_routing(cfg: Dict, case_loader: CaseLoader, aux: AuxCache, runner: Optional[ModelRunner], out_dir: str) -> None: + """ + MoE routing visualization with grouped bar chart. + Shows expert contributions for WT/TC/ET. + """ + cases = cfg.get("cases", {}).get("moe", []) + if not cases: + return + + for case_idx, case_id in enumerate(cases): + aux_data = aux.load(case_id) + needed_keys = ["moe_gamma", "expert_contrib"] + if aux_data is None or not all(k in aux_data for k in needed_keys): + if runner is None: + continue + image, _ = runner.load_case_tensor(case_id) + out = runner.forward_intermediate(image) + new_data = { + "moe_gamma": out["moe_gamma"].detach().cpu().numpy(), + "expert_contrib": out["expert_contrib"].detach().cpu().numpy(), + } + if aux_data is not None: + aux_data.update(new_data) + else: + aux_data = new_data + aux.save(case_id, aux_data) + + if aux_data is None: + continue + + gamma = np.asarray(aux_data["moe_gamma"])[0] + contrib = np.asarray(aux_data["expert_contrib"])[0] + + n_experts = contrib.shape[0] + x = np.arange(n_experts) + width = 0.25 + + # Find active experts (top-k, usually k=2) + active_experts = np.where(gamma > 0.01)[0] + top_k = len(active_experts) + + fig, ax = plt.subplots(figsize=(10, 5)) + + # Grouped bars for WT/TC/ET with highlight for active experts + for i in range(n_experts): + alpha = 1.0 if i in active_experts else 0.3 + edge_width = 2 if i in active_experts else 0.5 + ax.bar(x[i] - width, contrib[i, 0], width, color=STYLE["color_WT"], + edgecolor="black", linewidth=edge_width, alpha=alpha) + ax.bar(x[i], contrib[i, 1], width, color=STYLE["color_TC"], + edgecolor="black", linewidth=edge_width, alpha=alpha) + ax.bar(x[i] + width, contrib[i, 2], width, color=STYLE["color_ET"], + edgecolor="black", linewidth=edge_width, alpha=alpha) + + # Legend for regions (place at upper left) + from matplotlib.patches import Patch + legend_elements = [ + Patch(facecolor=STYLE["color_WT"], edgecolor="black", label="WT"), + Patch(facecolor=STYLE["color_TC"], edgecolor="black", label="TC"), + Patch(facecolor=STYLE["color_ET"], edgecolor="black", label="ET"), + ] + + # Routing weights as line with markers + ax.plot(x, gamma, "ko-", linewidth=2, markersize=8, label="Routing γ", zorder=10) + + # Annotate active experts with their gamma values + for i in active_experts: + ax.annotate(f"γ={gamma[i]:.2f}", xy=(x[i], gamma[i]), + xytext=(0, 10), textcoords="offset points", + fontsize=STYLE["font_label"], fontweight="bold", ha="center", + bbox=dict(boxstyle="round,pad=0.2", facecolor="yellow", alpha=0.8)) + + ax.set_xlabel("Expert Index", fontsize=STYLE["font_subtitle"]) + ax.set_ylabel("Contribution", fontsize=STYLE["font_subtitle"]) + ax.set_xticks(x) + ax.set_xticklabels([f"E{i}\n{'(active)' if i in active_experts else ''}" for i in range(n_experts)], + fontsize=STYLE["font_label"]) + # Legend at upper right + ax.legend(handles=legend_elements, loc="upper right", fontsize=STYLE["font_label"], + bbox_to_anchor=(0.99, 0.99)) + ax.spines["top"].set_visible(False) + ax.spines["right"].set_visible(False) + ax.set_ylim(0, None) + + # Add explanation text (upper right, below legend) + ax.text(0.98, 0.72, f"Top-k = {top_k} (sparse gating)\nActive: {', '.join([f'E{i}' for i in active_experts])}", + transform=ax.transAxes, fontsize=STYLE["font_label"], va="top", ha="right", + bbox=dict(boxstyle="round,pad=0.3", facecolor="lightyellow", edgecolor="gray", alpha=0.9)) + + finalize_figure(fig, f"MoE Expert Routing: {case_id}") + save_figure(fig, out_dir, f"Fig5_{chr(ord('a')+case_idx)}_moe_routing_{case_id}") + + +# ============================================================================ +# H) Concept Token Interpretability (Spec H) +# ============================================================================ +def make_concept_tokens(cfg: Dict, case_loader: CaseLoader, pred_loader: PredictionLoader, out_dir: str) -> None: + """ + Concept token visualization - split into 3 clear figures per case: + 1. ET Overview: T1ce + ET mask + 2. Fragmentation Analysis: Components → FRAG_BIN + 3. Scale Analysis: Size → SCALE_BIN + """ + cases = cfg.get("cases", {}).get("concept_tokens", []) + if not cases: + return + + frag_bins = cfg.get("visualization", {}).get("frag_bins", [1, 3, 5]) + scale_bins = cfg.get("visualization", {}).get("scale_bins", [50, 200, 500]) + frag_labels = ["None", "Low", "Medium", "High"] + scale_labels = ["Tiny", "Small", "Medium", "Large"] + + def bin_value(value, thresholds): + for i, t in enumerate(thresholds): + if value <= t: + return i + return len(thresholds) + + def analyze_et_morphology(et_mask): + """Analyze ET morphology: size, fragmentation, component sizes.""" + et = et_mask > 0 + et_count = int(et.sum()) + labeled, n_comp = connected_components(et) + + comp_sizes = [] + for i in range(1, n_comp + 1): + comp_sizes.append(int((labeled == i).sum())) + comp_sizes = sorted(comp_sizes, reverse=True) + + frag_bin = bin_value(n_comp, frag_bins) + scale_bin = bin_value(et_count, scale_bins) + + return { + "total_voxels": et_count, + "n_components": n_comp, + "comp_sizes": comp_sizes, + "frag_bin": frag_bin, + "frag_label": frag_labels[min(frag_bin, len(frag_labels)-1)], + "scale_bin": scale_bin, + "scale_label": scale_labels[min(scale_bin, len(scale_labels)-1)], + "labeled": labeled, + } + + for case_idx, case_id in enumerate(cases): + case = case_loader.get_case(case_id, include_label=True) + pred = pred_loader.load_method(pred_loader.ours, case_id) + + images = case["images"] + ref_mod = "t1c" if "t1c" in images else list(images.keys())[0] + base = images[ref_mod] + + # Analyze morphology + pred_morph = analyze_et_morphology(pred["regions"][2]) + gt_morph = analyze_et_morphology(label_to_regions(case["label"])[2]) if case["label"] is not None else None + + # Find best slice + mask_ref = pred["regions"][2] if pred_morph["total_voxels"] > 0 else None + idx = get_slices(mask_ref, base.shape) + plane = "axial" + slice_idx = idx[plane] + base2d = extract_slice(base, plane, slice_idx) + + n_comp = pred_morph["n_components"] + letter = chr(ord('a') + case_idx) + + # ===================================================================== + # Figure 1: ET Overview (T1ce + ET mask) + # ===================================================================== + fig1, axes1 = plt.subplots(1, 2, figsize=(10, 5)) + + # Left: T1ce + axes1[0].imshow(base2d, cmap="gray", aspect="equal") + axes1[0].set_title("T1ce Input", fontsize=STYLE["font_subtitle"], fontweight="bold") + axes1[0].axis("off") + + # Right: ET overlay + et2d = extract_slice(pred["regions"][2], plane, slice_idx) + if et2d.shape != base2d.shape: + zoom_factors = (base2d.shape[0] / et2d.shape[0], base2d.shape[1] / et2d.shape[1]) + et2d = zoom(et2d.astype(float), zoom_factors, order=0) + overlay = overlay_masks_publication(base2d, {"ET": et2d > 0}, alpha=0.5) + axes1[1].imshow(overlay, aspect="equal") + axes1[1].set_title(f"Predicted ET\n({pred_morph['total_voxels']} voxels total)", + fontsize=STYLE["font_subtitle"], fontweight="bold") + axes1[1].axis("off") + + fig1.suptitle(f"ET Prediction Overview: {case_id}", fontsize=STYLE["font_title"], fontweight="bold") + fig1.tight_layout(rect=[0, 0, 1, 0.92]) + save_figure(fig1, out_dir, f"Fig6_{letter}1_et_overview_{case_id}") + + # ===================================================================== + # Figure 2: Fragmentation Analysis (Components → FRAG_BIN) + # ===================================================================== + fig2, axes2 = plt.subplots(1, 3, figsize=(12, 4.5), gridspec_kw={"width_ratios": [1, 1, 0.8]}) + + # Left: Connected components visualization + labeled2d = extract_slice(pred_morph["labeled"], plane, slice_idx) + if labeled2d.shape != base2d.shape: + zoom_factors = (base2d.shape[0] / labeled2d.shape[0], base2d.shape[1] / labeled2d.shape[1]) + labeled2d = zoom(labeled2d.astype(float), zoom_factors, order=0) + + comp_rgb = np.zeros((*base2d.shape, 3), dtype=np.float32) + comp_rgb[:] = base2d[:, :, np.newaxis] * 0.3 + + if n_comp > 0: + colors = plt.cm.Set1(np.linspace(0, 1, max(n_comp, 3)))[:n_comp] + for i in range(1, n_comp + 1): + mask = labeled2d == i + if mask.sum() > 0: + comp_rgb[mask] = colors[i-1][:3] + + axes2[0].imshow(comp_rgb, aspect="equal") + axes2[0].set_title(f"Connected Components\n(n = {n_comp})", + fontsize=STYLE["font_subtitle"], fontweight="bold") + axes2[0].axis("off") + + # Middle: Component sizes bar chart + if pred_morph["comp_sizes"]: + comp_sizes = pred_morph["comp_sizes"][:8] + colors = plt.cm.Set1(np.linspace(0, 1, max(len(comp_sizes), 3)))[:len(comp_sizes)] + bars = axes2[1].barh(range(len(comp_sizes)), comp_sizes, color=colors, edgecolor="black", linewidth=0.5) + axes2[1].set_yticks(range(len(comp_sizes))) + axes2[1].set_yticklabels([f"C{i+1}" for i in range(len(comp_sizes))]) + axes2[1].set_xlabel("Voxels", fontsize=STYLE["font_label"]) + axes2[1].invert_yaxis() + axes2[1].spines["top"].set_visible(False) + axes2[1].spines["right"].set_visible(False) + else: + axes2[1].text(0.5, 0.5, "No ET", ha="center", va="center", + transform=axes2[1].transAxes, fontsize=STYLE["font_subtitle"]) + axes2[1].axis("off") + axes2[1].set_title("Component Sizes", fontsize=STYLE["font_subtitle"], fontweight="bold") + + # Right: FRAG_BIN selection + frag_colors = ["#CCCCCC", "#90EE90", "#FFD700", "#FF6347"] + frag_idx = pred_morph["frag_bin"] + bar_colors = [frag_colors[i] if i != frag_idx else "#FF0000" for i in range(4)] + bars = axes2[2].bar(range(4), [1]*4, color=bar_colors, edgecolor="black", linewidth=1.5) + axes2[2].set_xticks(range(4)) + axes2[2].set_xticklabels(frag_labels, fontsize=STYLE["font_label"], rotation=45, ha="right") + axes2[2].set_ylim(0, 1.3) + axes2[2].set_yticks([]) + axes2[2].spines["top"].set_visible(False) + axes2[2].spines["right"].set_visible(False) + axes2[2].spines["left"].set_visible(False) + + # Arrow pointing to selected bin + axes2[2].annotate(f"n={n_comp}", xy=(frag_idx, 1.05), xytext=(frag_idx, 1.2), + fontsize=STYLE["font_label"], fontweight="bold", ha="center", + arrowprops=dict(arrowstyle="->", color="red", lw=2)) + axes2[2].set_title(f"FRAG_BIN = {pred_morph['frag_label']}", + fontsize=STYLE["font_subtitle"], fontweight="bold", color="red") + + # Add mapping explanation + fig2.text(0.5, 0.02, + f"Mapping: {n_comp} components → FRAG_BIN = {pred_morph['frag_label']} " + f"(thresholds: ≤{frag_bins[0]}=None, ≤{frag_bins[1]}=Low, ≤{frag_bins[2]}=Med, >{frag_bins[2]}=High)", + ha="center", fontsize=STYLE["font_label"], style="italic") + + fig2.suptitle(f"Fragmentation Analysis: {case_id}", fontsize=STYLE["font_title"], fontweight="bold") + fig2.tight_layout(rect=[0, 0.06, 1, 0.92]) + save_figure(fig2, out_dir, f"Fig6_{letter}2_fragmentation_{case_id}") + + # ===================================================================== + # Figure 3: Scale Analysis (Size → SCALE_BIN) + # ===================================================================== + fig3, axes3 = plt.subplots(1, 2, figsize=(10, 5)) + + # Left: Size visualization (ET with size info) + axes3[0].imshow(overlay, aspect="equal") + total_voxels = pred_morph["total_voxels"] + axes3[0].set_title(f"ET Region\nTotal: {total_voxels} voxels", + fontsize=STYLE["font_subtitle"], fontweight="bold") + axes3[0].axis("off") + + # Right: SCALE_BIN selection with size ruler + ax_scale = axes3[1] + scale_colors = ["#E0E0E0", "#87CEEB", "#4169E1", "#1E3A8A"] + scale_idx = pred_morph["scale_bin"] + + # Create size ranges for visualization + scale_ranges = [f"≤{scale_bins[0]}", f"{scale_bins[0]+1}-{scale_bins[1]}", + f"{scale_bins[1]+1}-{scale_bins[2]}", f">{scale_bins[2]}"] + + bar_colors = [scale_colors[i] if i != scale_idx else "#FF0000" for i in range(4)] + y_pos = np.arange(4) + bars = ax_scale.barh(y_pos, [scale_bins[0], scale_bins[1]-scale_bins[0], + scale_bins[2]-scale_bins[1], scale_bins[2]], + color=scale_colors, edgecolor="black", linewidth=1.5, left=[0, scale_bins[0], scale_bins[1], scale_bins[2]]) + + # Highlight selected bin + ax_scale.barh(scale_idx, bars[scale_idx].get_width(), + left=bars[scale_idx].get_x(), color="#FF0000", edgecolor="black", linewidth=2) + + ax_scale.set_yticks(y_pos) + ax_scale.set_yticklabels([f"{scale_labels[i]}\n({scale_ranges[i]})" for i in range(4)], + fontsize=STYLE["font_label"]) + ax_scale.set_xlabel("Voxels", fontsize=STYLE["font_label"]) + ax_scale.spines["top"].set_visible(False) + ax_scale.spines["right"].set_visible(False) + + # Mark current value + ax_scale.axvline(x=total_voxels, color="red", linestyle="--", linewidth=2, label=f"Current: {total_voxels}") + ax_scale.legend(loc="upper right", fontsize=STYLE["font_label"]) + ax_scale.set_title(f"SCALE_BIN = {pred_morph['scale_label']}", + fontsize=STYLE["font_subtitle"], fontweight="bold", color="red") + + # Add mapping explanation + fig3.text(0.5, 0.02, + f"Mapping: {total_voxels} voxels → SCALE_BIN = {pred_morph['scale_label']} " + f"(thresholds: ≤{scale_bins[0]}=Tiny, ≤{scale_bins[1]}=Small, ≤{scale_bins[2]}=Med, >{scale_bins[2]}=Large)", + ha="center", fontsize=STYLE["font_label"], style="italic") + + fig3.suptitle(f"Scale Analysis: {case_id}", fontsize=STYLE["font_title"], fontweight="bold") + fig3.tight_layout(rect=[0, 0.06, 1, 0.92]) + save_figure(fig3, out_dir, f"Fig6_{letter}3_scale_{case_id}") + + +# ============================================================================ +# I) Dual-domain Enhancement (Spec I) +# ============================================================================ +def make_dual_domain(cfg: Dict, case_loader: CaseLoader, aux: AuxCache, runner: Optional[ModelRunner], out_dir: str) -> None: + """ + Dual-domain enhancement visualization. + Left: Original amplitude spectrum, Right: Enhanced spectrum (same scale) + """ + cases = cfg.get("cases", {}).get("dual_domain", []) + if not cases: + return + + for case_idx, case_id in enumerate(cases): + case = case_loader.get_case(case_id, include_label=False) + images = case["images"] + ref_mod = "t1c" if "t1c" in images else list(images.keys())[0] + base = images[ref_mod] + + aux_data = aux.load(case_id) + needed_keys = ["x_spec"] + if aux_data is None or not all(k in aux_data for k in needed_keys): + if runner is None: + continue + image, _ = runner.load_case_tensor(case_id) + out = runner.forward_intermediate(image) + new_data = { + "x_spec": out["x_spec"].detach().cpu().numpy(), + } + if aux_data is not None: + aux_data.update(new_data) + else: + aux_data = new_data + aux.save(case_id, aux_data) + + if aux_data is None: + continue + + x_spec = aux_data["x_spec"][0] + + # Compute amplitude spectra + amp_orig = fft_amplitude_slice(base, plane="axial") + amp_spec = fft_amplitude_slice(x_spec[0] if x_spec.ndim == 4 else x_spec, plane="axial") + + # Use same scale for both + vmin = min(amp_orig.min(), amp_spec.min()) + vmax = max(amp_orig.max(), amp_spec.max()) + + fig = plt.figure(figsize=(12, 5)) + gs = fig.add_gridspec(1, 3, width_ratios=[1, 1, 0.05], wspace=0.15) + + ax0 = fig.add_subplot(gs[0]) + im = ax0.imshow(amp_orig, cmap="inferno", vmin=vmin, vmax=vmax, aspect="equal") + ax0.set_title("Original Amplitude Spectrum", fontsize=STYLE["font_subtitle"], fontweight="bold") + ax0.axis("off") + + ax1 = fig.add_subplot(gs[1]) + ax1.imshow(amp_spec, cmap="inferno", vmin=vmin, vmax=vmax, aspect="equal") + ax1.set_title("Enhanced Amplitude Spectrum", fontsize=STYLE["font_subtitle"], fontweight="bold") + ax1.axis("off") + + # Colorbar in dedicated subplot + ax_cbar = fig.add_subplot(gs[2]) + cbar = fig.colorbar(im, cax=ax_cbar) + cbar.set_label("Log Amplitude", fontsize=STYLE["font_label"]) + + fig.suptitle(f"Dual-domain Enhancement: {case_id}", fontsize=STYLE["font_title"], fontweight="bold", y=0.98) + fig.tight_layout(rect=[0, 0, 1, 0.93]) + save_figure(fig, out_dir, f"Fig7_{chr(ord('a')+case_idx)}_dual_domain_{case_id}") + + +# ============================================================================ +# J) AmpMix Augmentation Robustness (Spec J) +# ============================================================================ +def make_ampmix(cfg: Dict, case_loader: CaseLoader, runner: Optional[ModelRunner], out_dir: str) -> None: + """ + AmpMix augmentation robustness visualization. + Three columns: Original | AmpMix | Prediction comparison + """ + pairs = cfg.get("cases", {}).get("ampmix", []) + if not pairs or runner is None: + return + + for pair_idx, pair in enumerate(pairs): + case_a = pair.get("base") + case_b = pair.get("mix") + lam = float(pair.get("lam", 0.5)) + + if not case_a or not case_b: + continue + + img_a, _ = runner.load_case_tensor(case_a) + img_b, _ = runner.load_case_tensor(case_b) + + mixed = fourier_amplitude_mix(img_a[0].cpu().numpy(), img_b[0].cpu().numpy(), lam) + mixed_t = runner.torch.from_numpy(mixed).unsqueeze(0).to(runner.device) + + # Get predictions + with runner.torch.no_grad(): + logits_a, _ = runner.model(img_a.to(runner.device)) + logits_m, _ = runner.model(mixed_t) + + pred_a = (logits_a.sigmoid() > 0.5).detach().cpu().numpy()[0] + pred_m = (logits_m.sigmoid() > 0.5).detach().cpu().numpy()[0] + + case = case_loader.get_case(case_a, include_label=False) + images = case["images"] + ref_mod = "t1c" if "t1c" in images else list(images.keys())[0] + base = images[ref_mod] + + idx = get_slices(pred_a[2] > 0, base.shape) + plane = "axial" + slice_idx = idx[plane] + + fig, axes = plt.subplots(1, 3, figsize=(12, 4)) + + base2d = extract_slice(base, plane, slice_idx) + mix2d = extract_slice(normalize_volume(mixed[0]), plane, slice_idx) + + # Original + ax = axes[0] + masks_a = { + "WT": extract_slice(pred_a[0], plane, slice_idx) > 0, + "TC": extract_slice(pred_a[1], plane, slice_idx) > 0, + "ET": extract_slice(pred_a[2], plane, slice_idx) > 0, + } + overlay_a = overlay_masks_publication(base2d, masks_a) + ax.imshow(overlay_a, aspect="equal") + ax.set_title("Original + Prediction", fontsize=STYLE["font_subtitle"], fontweight="bold") + ax.axis("off") + + # AmpMix image + ax = axes[1] + ax.imshow(mix2d, cmap="gray", aspect="equal") + ax.set_title(f"AmpMix (λ={lam})", fontsize=STYLE["font_subtitle"], fontweight="bold") + ax.axis("off") + + # AmpMix prediction + ax = axes[2] + masks_m = { + "WT": extract_slice(pred_m[0], plane, slice_idx) > 0, + "TC": extract_slice(pred_m[1], plane, slice_idx) > 0, + "ET": extract_slice(pred_m[2], plane, slice_idx) > 0, + } + overlay_m = overlay_masks_publication(mix2d, masks_m) + ax.imshow(overlay_m, aspect="equal") + ax.set_title("AmpMix + Prediction", fontsize=STYLE["font_subtitle"], fontweight="bold") + ax.axis("off") + + finalize_figure(fig, f"Augmentation Robustness: {case_a}") + save_figure(fig, out_dir, f"Fig8_{chr(ord('a')+pair_idx)}_ampmix_{case_a}") + + +# ============================================================================ +# K) Failure Cases (Spec K) +# ============================================================================ +def make_failure(cfg: Dict, case_loader: CaseLoader, pred_loader: PredictionLoader, out_dir: str) -> None: + """ + Failure case visualization with red boxes highlighting problem areas. + """ + cases = cfg.get("cases", {}).get("failure", []) + notes = cfg.get("cases", {}).get("failure_notes", {}) + if not cases: + return + + for case_idx, case_id in enumerate(cases): + case = case_loader.get_case(case_id, include_label=True) + images = case["images"] + ref_mod = "t1c" if "t1c" in images else list(images.keys())[0] + base = images[ref_mod] + + gt_regions = label_to_regions(case["label"]) if case["label"] is not None else None + pred = pred_loader.load_method(pred_loader.ours, case_id) + pred_regions = pred["regions"] + + mask_ref = gt_regions[2] if gt_regions is not None else pred_regions[2] + idx = get_slices(mask_ref, base.shape) + plane = "axial" + slice_idx = idx[plane] + + fig, axes = plt.subplots(1, 3 if gt_regions is not None else 2, figsize=(10, 4)) + + base2d = extract_slice(base, plane, slice_idx) + + col = 0 + # Base image + ax = axes[col] + ax.imshow(base2d, cmap="gray", aspect="equal") + ax.set_title("Input", fontsize=STYLE["font_subtitle"], fontweight="bold") + ax.axis("off") + col += 1 + + # GT + if gt_regions is not None: + ax = axes[col] + gt_masks = { + "WT": extract_slice(gt_regions[0], plane, slice_idx) > 0, + "TC": extract_slice(gt_regions[1], plane, slice_idx) > 0, + "ET": extract_slice(gt_regions[2], plane, slice_idx) > 0, + } + overlay_gt = overlay_masks_publication(base2d, gt_masks) + ax.imshow(overlay_gt, aspect="equal") + ax.set_title("Ground Truth", fontsize=STYLE["font_subtitle"], fontweight="bold") + ax.axis("off") + col += 1 + + # Prediction with failure region highlighted + ax = axes[col] + pred_masks = { + "WT": extract_slice(pred_regions[0], plane, slice_idx) > 0, + "TC": extract_slice(pred_regions[1], plane, slice_idx) > 0, + "ET": extract_slice(pred_regions[2], plane, slice_idx) > 0, + } + # Resize if needed + for key in pred_masks: + if pred_masks[key].shape != base2d.shape: + zoom_factors = (base2d.shape[0] / pred_masks[key].shape[0], base2d.shape[1] / pred_masks[key].shape[1]) + pred_masks[key] = zoom(pred_masks[key].astype(float), zoom_factors, order=0) > 0.5 + + overlay_pred = overlay_masks_publication(base2d, pred_masks) + ax.imshow(overlay_pred, aspect="equal") + + # Find error region and add red box + if gt_regions is not None: + gt_et = extract_slice(gt_regions[2], plane, slice_idx) > 0 + pred_et = pred_masks["ET"] + error_region = np.logical_xor(gt_et, pred_et) + if error_region.sum() > 0: + coords = np.where(error_region) + y_min, y_max = coords[0].min(), coords[0].max() + x_min, x_max = coords[1].min(), coords[1].max() + margin = 10 + rect = Rectangle((x_min - margin, y_min - margin), + x_max - x_min + 2*margin, y_max - y_min + 2*margin, + linewidth=2, edgecolor='red', facecolor='none') + ax.add_patch(rect) + + ax.set_title("Prediction", fontsize=STYLE["font_subtitle"], fontweight="bold") + ax.axis("off") + + # Add note + note = notes.get(case_id, "") + title = f"Failure Case: {case_id}" + (f"\n{note}" if note else "") + finalize_figure(fig, title) + save_figure(fig, out_dir, f"Fig9_{chr(ord('a')+case_idx)}_failure_{case_id}") + + +# ============================================================================ +# Main +# ============================================================================ +def main(): + parser = argparse.ArgumentParser(description="Publication-quality visualization suite") + parser.add_argument("--config", required=True, help="Visualization config yaml") + parser.add_argument("--model-config", default=os.path.join(ROOT_DIR, "configs/train.yaml"), help="Model config yaml") + parser.add_argument("--checkpoint", default="", help="Model checkpoint for model-based visualizations") + parser.add_argument("--device", default="cuda") + parser.add_argument("--run", default="all", help="Comma-separated list or 'all'") + args = parser.parse_args() + + cfg = load_config(args.config) + out_dir = cfg.get("visualization", {}).get("output_dir", os.path.join(ROOT_DIR, "vis_res")) + ensure_dir(out_dir) + + case_loader = CaseLoader(cfg) + pred_loader = PredictionLoader(cfg) + aux_cache = AuxCache(cfg.get("predictions", {}).get("aux_dir")) + runner = ModelRunner(cfg, args.model_config, args.checkpoint, args.device) if args.checkpoint else None + + run_set = set([s.strip() for s in args.run.split(",")]) if args.run != "all" else None + + def should_run(name: str) -> bool: + return run_set is None or name in run_set + + print("=" * 60) + print("Publication-Quality Visualization Suite") + print("=" * 60) + + if should_run("qualitative"): + print("\n[B] Generating qualitative comparison...") + make_qualitative(cfg, case_loader, pred_loader, out_dir) + + if should_run("et_absent"): + print("\n[C] Generating ET-absent case study...") + make_et_absent(cfg, case_loader, aux_cache, runner, out_dir) + + if should_run("boundary"): + print("\n[D] Generating boundary error visualization...") + make_boundary(cfg, case_loader, pred_loader, out_dir) + + if should_run("tiny_et"): + print("\n[E] Generating tiny/fragmented ET visualization...") + make_tiny_et(cfg, case_loader, pred_loader, out_dir) + + if should_run("moe"): + print("\n[G] Generating MoE routing visualization...") + make_moe_routing(cfg, case_loader, aux_cache, runner, out_dir) + + if should_run("concept_tokens"): + print("\n[H] Generating concept token visualization...") + make_concept_tokens(cfg, case_loader, pred_loader, out_dir) + + if should_run("dual_domain"): + print("\n[I] Generating dual-domain enhancement visualization...") + make_dual_domain(cfg, case_loader, aux_cache, runner, out_dir) + + if should_run("ampmix"): + print("\n[J] Generating AmpMix robustness visualization...") + make_ampmix(cfg, case_loader, runner, out_dir) + + if should_run("failure"): + print("\n[K] Generating failure case visualization...") + make_failure(cfg, case_loader, pred_loader, out_dir) + + print("\n" + "=" * 60) + print(f"All visualizations saved to: {out_dir}") + print("=" * 60) + + +if __name__ == "__main__": + main() diff --git a/source_code/gliomasam3_moe/visualizations/vis_suite.py b/source_code/gliomasam3_moe/visualizations/vis_suite.py new file mode 100644 index 0000000000000000000000000000000000000000..97357bb50cebab533c407b83876b8be450589163 --- /dev/null +++ b/source_code/gliomasam3_moe/visualizations/vis_suite.py @@ -0,0 +1,900 @@ +import argparse +import os +import sys +from typing import Dict, List, Optional, Tuple + +import numpy as np +import yaml + +import matplotlib +matplotlib.use("Agg") +import matplotlib.pyplot as plt + +ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) +SRC_DIR = os.path.join(ROOT_DIR, "src") +if SRC_DIR not in sys.path: + sys.path.append(SRC_DIR) + +from gliomasam3_moe.data.brats_dataset import BraTSDataset, SegMambaNPZDataset +from gliomasam3_moe.data.transforms_segmamba_like import get_infer_transforms +from gliomasam3_moe.models.gliomasam3_moe import GliomaSAM3_MoE + +from vis_utils import ( + ensure_dir, + load_case, + load_prediction, + normalize_volume, + label_to_regions, + regions_to_label, + select_slices_from_mask, + fallback_slices, + extract_slice, + overlay_masks, + boundary_error_map, + mask_boundary, + connected_components, + bin_by_threshold, + fft_amplitude_slice, + fourier_amplitude_mix, +) + + +def load_config(path: str) -> Dict: + with open(path, "r") as f: + return yaml.safe_load(f) + + +def get_default_colors() -> Dict[str, Tuple[float, float, float]]: + return { + "WT": (1.0, 0.85, 0.0), + "TC": (0.0, 1.0, 0.25), + "ET": (1.0, 0.0, 0.0), + } + + +class CaseLoader: + def __init__(self, cfg: Dict): + self.data_cfg = cfg.get("data", {}) + self.cache: Dict[Tuple[str, bool], Dict] = {} + + def _rename_modalities(self, images: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: + modalities = self.data_cfg.get("modalities", []) + if modalities and all(k.startswith("ch") for k in images.keys()): + if len(modalities) == len(images): + return {m: images[f"ch{i}"] for i, m in enumerate(modalities)} + return images + + def get_case(self, case_id: str, include_label: bool = True) -> Dict: + key = (case_id, include_label) + if key in self.cache: + return self.cache[key] + images, label, affine = load_case(self.data_cfg, case_id, include_label=include_label) + images = self._rename_modalities(images) + images = {k: normalize_volume(v) for k, v in images.items()} + out = {"images": images, "label": label, "affine": affine} + self.cache[key] = out + return out + + +class PredictionLoader: + def __init__(self, cfg: Dict): + pred_cfg = cfg.get("predictions", {}) + self.ours = pred_cfg.get("ours", {}) + self.baselines = pred_cfg.get("baselines", []) + self.extra = pred_cfg.get("extra_methods", []) + self.cross_year = pred_cfg.get("cross_year", {}) + + def get_all_methods(self) -> List[Dict]: + methods = [] + if self.ours: + methods.append(self.ours) + methods.extend(self.baselines) + methods.extend(self.extra) + return methods + + def load_method(self, method_cfg: Dict, case_id: str) -> Dict: + pred_dir = method_cfg.get("dir", "") + pred_type = method_cfg.get("type", "auto") + return load_prediction(pred_dir, case_id, pred_type=pred_type) + + +class AuxCache: + def __init__(self, aux_dir: Optional[str]): + self.aux_dir = aux_dir + + def path(self, case_id: str) -> Optional[str]: + if not self.aux_dir: + return None + return os.path.join(self.aux_dir, f"{case_id}_aux.npz") + + def load(self, case_id: str) -> Optional[Dict]: + path = self.path(case_id) + if path and os.path.isfile(path): + data = np.load(path) + return {k: data[k] for k in data.files} + return None + + def save(self, case_id: str, data: Dict) -> None: + if not self.aux_dir: + return + ensure_dir(self.aux_dir) + path = self.path(case_id) + np.savez_compressed(path, **data) + + +class ModelRunner: + def __init__(self, vis_cfg: Dict, model_cfg_path: str, ckpt_path: str, device: str): + import torch + import torch.nn.functional as F + + self.torch = torch + self.F = F + self.vis_cfg = vis_cfg + self.cfg = load_config(model_cfg_path) + self.device = torch.device(device if torch.cuda.is_available() else "cpu") + self.model = GliomaSAM3_MoE(**self.cfg["model"]).to(self.device) + ckpt = torch.load(ckpt_path, map_location="cpu") + # Filter out freqs_cis which is dynamically computed and may have shape mismatch + state_dict = {k: v for k, v in ckpt["model"].items() if "freqs_cis" not in k} + self.model.load_state_dict(state_dict, strict=False) + self.model.eval() + + def load_case_tensor(self, case_id: str) -> Tuple["torch.Tensor", str]: + # Use vis_cfg for data paths, model cfg for other settings + data_cfg = self.vis_cfg.get("data", {}) + data_format = data_cfg.get("format", "nifti") + if data_format == "segmamba_npz": + data_dir = data_cfg.get("npz_dir") or data_cfg.get("root_dir", "") + if case_id.endswith(".npz"): + npz_path = case_id + else: + npz_path = os.path.join(data_dir, case_id + ".npz") + dataset = SegMambaNPZDataset(data_dir=data_dir, npz_paths=[npz_path], test=True, ensure_npy=True) + sample = dataset[0] + image = sample["image"].unsqueeze(0) + case = sample["case_id"] + else: + root_dir = data_cfg.get("root_dir", "") + modalities = data_cfg.get("modalities", ["t1n", "t1c", "t2f", "t2w"]) + image_keys = [f"image{i}" for i in range(len(modalities))] + transforms = get_infer_transforms(self.cfg, image_keys=image_keys) + dataset = BraTSDataset( + root_dir=root_dir, + modalities=modalities, + seg_name=data_cfg.get("seg_name", "seg"), + transforms=transforms, + include_label=False, + case_ids=[case_id], + image_keys=image_keys, + ) + sample = dataset[0] + image = sample["image"].unsqueeze(0) + case = sample["case_id"] + return image, case + + def infer_basic(self, image: "torch.Tensor") -> Dict: + torch = self.torch + with torch.no_grad(): + logits, aux = self.model(image.to(self.device)) + probs = torch.sigmoid(logits) + pi_et = aux["pi_et"].view(probs.shape[0], 1, 1, 1, 1) + et_pre = probs[:, 2:3] + et_post = aux.get("et_prob_gated", et_pre * pi_et) + out = { + "logits": logits, + "pi_et": aux["pi_et"], + "moe_gamma": aux.get("moe_gamma"), + "spectral_stats": aux.get("spectral_stats"), + "et_pre": et_pre, + "et_post": et_post, + } + return out + + def forward_intermediate(self, image: "torch.Tensor") -> Dict: + torch = self.torch + F = self.F + model = self.model + with torch.no_grad(): + b, c, d, h, w = image.shape + orig_h, orig_w = h, w + pad_h = (model.patch_size - (h % model.patch_size)) % model.patch_size + pad_w = (model.patch_size - (w % model.patch_size)) % model.patch_size + ph0 = pad_h // 2 + ph1 = pad_h - ph0 + pw0 = pad_w // 2 + pw1 = pad_w - pw0 + if pad_h > 0 or pad_w > 0: + image = F.pad(image, (pw0, pw1, ph0, ph1, 0, 0)) + h, w = image.shape[-2:] + + image = image.to(self.device) + x_plus, _ = model.hfdi(image) + x_spec, spectral_stats = model.spectral(image) + + x2d = x_plus.permute(0, 2, 1, 3, 4).reshape(b * d, 7, h, w) + tokens, (gh, gw) = model.encoder2d(x2d) + n = gh * gw + tokens = tokens.view(b, d, n, -1) + tokens = model.slice_adapter(tokens, direction="forward") + + z = tokens.mean(dim=(1, 2)) + pi_et = model.attr_head(z)["pi_et"] + token_ids = model._select_concept_tokens(pi_et, label=None) + prompt = model.prompt_encoder(token_ids) + tokens = model.prompt_film(tokens, prompt) + + u = tokens.view(b, d, gh, gw, -1).permute(0, 4, 1, 2, 3) + u_msda = model.dual_enhance.msda(u) + u_lv1 = model.dual_enhance.fa_level(u) + u_fa = model.dual_enhance.fa_fuse(torch.cat([u, u_lv1], dim=1)) + pool = torch.cat([u_fa, u_msda], dim=1).mean(dim=(2, 3, 4)) + eta = torch.sigmoid(model.dual_enhance.fcf_mlp(pool)).view(b, 1, 1, 1, 1) + u_fuse = eta * u_fa + (1.0 - eta) * u_msda + u_spec = model.dual_enhance.spec_stem(x_spec) + u_out = model.dual_enhance.fuse_conv(torch.cat([u_fuse, u_spec], dim=1)) + + logits, gamma = model.moe_decoder(u_out, z, prompt, spectral_stats, target_size=(d, h, w)) + if pad_h > 0 or pad_w > 0: + logits = logits[:, :, :, ph0 : ph0 + orig_h, pw0 : pw0 + orig_w] + + et_pre = torch.sigmoid(logits[:, 2:3]) + et_post = et_pre * pi_et.view(b, 1, 1, 1, 1) + + u_up = F.interpolate(u_out, size=(d, h, w), mode="trilinear", align_corners=False) + logits_all = torch.stack([exp(u_up) for exp in model.moe_decoder.experts], dim=1) + prob_all = torch.sigmoid(logits_all) + mean_prob = prob_all.mean(dim=(3, 4, 5)) + contrib = gamma.view(b, -1, 1) * mean_prob + + return { + "pi_et": pi_et, + "moe_gamma": gamma, + "spectral_stats": spectral_stats, + "et_pre": et_pre, + "et_post": et_post, + "expert_contrib": contrib, + "x_spec": x_spec, + "u_fuse": u_fuse, + "u_spec": u_spec, + "logits": logits, + } + + +def choose_overlay_modality(cfg: Dict, images: Dict[str, np.ndarray]) -> str: + pref = cfg.get("visualization", {}).get("overlay_modality") + if pref and pref in images: + return pref + for cand in ["t1c", "t2w", "t2f", "t1n"]: + if cand in images: + return cand + return list(images.keys())[0] + + +def get_slices(mask_ref: Optional[np.ndarray], vol_shape: Tuple[int, int, int]) -> Dict[str, int]: + idx = select_slices_from_mask(mask_ref) + if any(v is None for v in idx.values()): + idx = fallback_slices(vol_shape) + return idx + + +def save_fig(path: str) -> None: + ensure_dir(os.path.dirname(path)) + plt.tight_layout() + plt.savefig(path, dpi=200) + plt.close() + + +def make_qualitative(cfg: Dict, case_loader: CaseLoader, pred_loader: PredictionLoader, out_dir: str) -> None: + cases = cfg.get("cases", {}).get("qualitative", []) + if not cases: + return + colors = cfg.get("visualization", {}).get("colors", get_default_colors()) + alpha = cfg.get("visualization", {}).get("alpha", 0.45) + methods = pred_loader.get_all_methods() + for case_id in cases: + case = case_loader.get_case(case_id, include_label=True) + images = case["images"] + label = case["label"] + overlay_mod = choose_overlay_modality(cfg, images) + base = images[overlay_mod] + mask_ref = None + if label is not None: + mask_ref = label_to_regions(label)[2] + else: + try: + ours_pred = pred_loader.load_method(pred_loader.ours, case_id) + mask_ref = ours_pred["regions"][2] + except Exception: + mask_ref = None + idx = get_slices(mask_ref, base.shape) + planes = ["axial", "coronal", "sagittal"] + rows = [] + row_labels = [] + for mod in images.keys(): + rows.append([extract_slice(images[mod], p, idx[p]) for p in planes]) + row_labels.append(mod.upper()) + + if label is not None: + regions = label_to_regions(label) + row = [] + for p in planes: + base2d = extract_slice(base, p, idx[p]) + masks = { + "WT": extract_slice(regions[0], p, idx[p]) > 0, + "TC": extract_slice(regions[1], p, idx[p]) > 0, + "ET": extract_slice(regions[2], p, idx[p]) > 0, + } + row.append(overlay_masks(base2d, masks, colors, alpha=alpha)) + rows.append(row) + row_labels.append("GT") + + for method in methods: + pred = pred_loader.load_method(method, case_id) + regions = pred["regions"] + row = [] + for p in planes: + base2d = extract_slice(base, p, idx[p]) + masks = { + "WT": extract_slice(regions[0], p, idx[p]) > 0, + "TC": extract_slice(regions[1], p, idx[p]) > 0, + "ET": extract_slice(regions[2], p, idx[p]) > 0, + } + row.append(overlay_masks(base2d, masks, colors, alpha=alpha)) + rows.append(row) + row_labels.append(method.get("name", "Method")) + + fig, axes = plt.subplots(len(rows), len(planes), figsize=(4 * len(planes), 3 * len(rows))) + for r, row in enumerate(rows): + for c, img in enumerate(row): + ax = axes[r, c] if len(rows) > 1 else axes[c] + if img.ndim == 2: + ax.imshow(img, cmap="gray") + else: + ax.imshow(img) + ax.axis("off") + if r == 0: + ax.set_title(planes[c], fontsize=10) + ax0 = axes[r, 0] if len(rows) > 1 else axes[0] + ax0.set_ylabel(row_labels[r], rotation=0, labelpad=40, fontsize=9, va="center") + save_fig(os.path.join(out_dir, "qualitative", f"{case_id}.png")) + + +def make_et_absent(cfg: Dict, case_loader: CaseLoader, pred_loader: PredictionLoader, aux: AuxCache, runner: Optional[ModelRunner], out_dir: str) -> None: + cases = cfg.get("cases", {}).get("et_absent", []) + if not cases: + return + colors = cfg.get("visualization", {}).get("colors", get_default_colors()) + alpha = cfg.get("visualization", {}).get("alpha", 0.5) + for case_id in cases: + case = case_loader.get_case(case_id, include_label=False) + images = case["images"] + overlay_mod = choose_overlay_modality(cfg, images) + base = images[overlay_mod] + + aux_data = aux.load(case_id) + needed_keys = ["pi_et", "et_pre", "et_post"] + if aux_data is None or not all(k in aux_data for k in needed_keys): + if runner is None: + continue + image, _ = runner.load_case_tensor(case_id) + out = runner.forward_intermediate(image) + new_data = { + "pi_et": out["pi_et"].detach().cpu().numpy(), + "et_pre": out["et_pre"].detach().cpu().numpy(), + "et_post": out["et_post"].detach().cpu().numpy(), + } + if aux_data is not None: + aux_data.update(new_data) + else: + aux_data = new_data + aux.save(case_id, aux_data) + if aux_data is None: + continue + + et_pre = aux_data["et_pre"][0, 0] + et_post = aux_data["et_post"][0, 0] + pi_et = float(np.asarray(aux_data["pi_et"]).reshape(-1)[0]) + + idx = get_slices(et_pre > 0.5, base.shape) + planes = ["axial", "coronal", "sagittal"] + fig, axes = plt.subplots(2, len(planes), figsize=(4 * len(planes), 6)) + for c, p in enumerate(planes): + base2d = extract_slice(base, p, idx[p]) + pre2d = extract_slice(et_pre, p, idx[p]) + post2d = extract_slice(et_post, p, idx[p]) + for r, (prob, title) in enumerate([(pre2d, "ET before gate"), (post2d, "ET after gate")]): + ax = axes[r, c] + ax.imshow(base2d, cmap="gray") + im = ax.imshow(prob, cmap="magma", alpha=0.6) + mask = prob > 0.5 + overlay = overlay_masks(base2d, {"ET": mask}, colors, alpha=alpha) + ax.imshow(overlay, alpha=0.4) + ax.axis("off") + if c == 0: + ax.set_ylabel(title, rotation=0, labelpad=40, fontsize=9, va="center") + if r == 0: + ax.set_title(p, fontsize=10) + fig.colorbar(im, ax=axes[:, c], fraction=0.02, pad=0.01) + fig.suptitle(f"{case_id} | pi_ET={pi_et:.3f}", fontsize=11) + save_fig(os.path.join(out_dir, "et_absent", f"{case_id}.png")) + + +def make_boundary(cfg: Dict, case_loader: CaseLoader, pred_loader: PredictionLoader, out_dir: str) -> None: + cases = cfg.get("cases", {}).get("boundary", []) + if not cases: + return + colors = cfg.get("visualization", {}).get("colors", get_default_colors()) + for case_id in cases: + case = case_loader.get_case(case_id, include_label=True) + if case["label"] is None: + continue + images = case["images"] + overlay_mod = choose_overlay_modality(cfg, images) + base = images[overlay_mod] + gt_regions = label_to_regions(case["label"]) + pred = pred_loader.load_method(pred_loader.ours, case_id) + pred_regions = pred["regions"] + region_idx = {"WT": 0, "TC": 1, "ET": 2}[cfg.get("visualization", {}).get("boundary_region", "ET")] + mask_ref = gt_regions[region_idx] + idx = get_slices(mask_ref, base.shape) + planes = ["axial", "coronal", "sagittal"] + fig, axes = plt.subplots(3, len(planes), figsize=(4 * len(planes), 9)) + for c, p in enumerate(planes): + base2d = extract_slice(base, p, idx[p]) + gt2d = extract_slice(gt_regions[region_idx], p, idx[p]) > 0 + pred2d = extract_slice(pred_regions[region_idx], p, idx[p]) > 0 + err2d = extract_slice(boundary_error_map(pred_regions[region_idx], gt_regions[region_idx]), p, idx[p]) + ax0 = axes[0, c] + ax0.imshow(base2d, cmap="gray") + ax0.axis("off") + ax0.set_title(p, fontsize=10) + + ax1 = axes[1, c] + ax1.imshow(base2d, cmap="gray") + gt_b = mask_boundary(gt2d) + pr_b = mask_boundary(pred2d) + ax1.imshow(np.dstack([gt_b, np.zeros_like(gt_b), pr_b]).astype(float), alpha=0.8) + ax1.axis("off") + + ax2 = axes[2, c] + ax2.imshow(base2d, cmap="gray") + max_err = float(np.max(np.abs(err2d))) + if max_err <= 0: + max_err = 1.0 + im = ax2.imshow(err2d, cmap="coolwarm", alpha=0.7, vmin=-max_err, vmax=max_err) + ax2.axis("off") + fig.colorbar(im, ax=ax2, fraction=0.03, pad=0.01) + axes[0, 0].set_ylabel("Base", rotation=0, labelpad=35, va="center", fontsize=9) + axes[1, 0].set_ylabel("GT vs Pred\nBoundary", rotation=0, labelpad=35, va="center", fontsize=9) + axes[2, 0].set_ylabel("Signed Error", rotation=0, labelpad=35, va="center", fontsize=9) + save_fig(os.path.join(out_dir, "boundary", f"{case_id}.png")) + + +def make_tiny_et(cfg: Dict, case_loader: CaseLoader, pred_loader: PredictionLoader, out_dir: str) -> None: + cases = cfg.get("cases", {}).get("tiny_et", []) + if not cases: + return + colors = cfg.get("visualization", {}).get("colors", get_default_colors()) + alpha = cfg.get("visualization", {}).get("alpha", 0.5) + methods = pred_loader.get_all_methods() + for case_id in cases: + case = case_loader.get_case(case_id, include_label=True) + images = case["images"] + overlay_mod = choose_overlay_modality(cfg, images) + base = images[overlay_mod] + gt_regions = label_to_regions(case["label"]) if case["label"] is not None else None + mask_ref = gt_regions[2] if gt_regions is not None else None + idx = get_slices(mask_ref, base.shape) + planes = ["axial", "coronal", "sagittal"] + rows = [] + row_labels = [] + if gt_regions is not None: + row = [] + for p in planes: + base2d = extract_slice(base, p, idx[p]) + et2d = extract_slice(gt_regions[2], p, idx[p]) > 0 + row.append(overlay_masks(base2d, {"ET": et2d}, colors, alpha=alpha)) + rows.append(row) + row_labels.append("GT") + for method in methods: + pred = pred_loader.load_method(method, case_id) + regions = pred["regions"] + row = [] + for p in planes: + base2d = extract_slice(base, p, idx[p]) + et2d = extract_slice(regions[2], p, idx[p]) > 0 + row.append(overlay_masks(base2d, {"ET": et2d}, colors, alpha=alpha)) + rows.append(row) + row_labels.append(method.get("name", "Method")) + + fig, axes = plt.subplots(len(rows), len(planes), figsize=(4 * len(planes), 3 * len(rows))) + for r, row in enumerate(rows): + for c, img in enumerate(row): + ax = axes[r, c] if len(rows) > 1 else axes[c] + ax.imshow(img) + ax.axis("off") + if r == 0: + ax.set_title(planes[c], fontsize=10) + ax0 = axes[r, 0] if len(rows) > 1 else axes[0] + ax0.set_ylabel(row_labels[r], rotation=0, labelpad=35, va="center", fontsize=9) + save_fig(os.path.join(out_dir, "tiny_et", f"{case_id}.png")) + + +def make_cross_year(cfg: Dict, case_loader: CaseLoader, pred_loader: PredictionLoader, out_dir: str) -> None: + cross_cfg = cfg.get("cases", {}).get("cross_year", {}) + if not cross_cfg: + return + colors = cfg.get("visualization", {}).get("colors", get_default_colors()) + alpha = cfg.get("visualization", {}).get("alpha", 0.45) + for direction, entry in cross_cfg.items(): + cases = entry.get("cases", []) + method = entry.get("method", pred_loader.ours) + if not cases or not method: + continue + for case_id in cases: + case = case_loader.get_case(case_id, include_label=True) + images = case["images"] + overlay_mod = choose_overlay_modality(cfg, images) + base = images[overlay_mod] + gt_regions = label_to_regions(case["label"]) if case["label"] is not None else None + pred = pred_loader.load_method(method, case_id) + pred_regions = pred["regions"] + mask_ref = gt_regions[2] if gt_regions is not None else pred_regions[2] + idx = get_slices(mask_ref, base.shape) + planes = ["axial", "coronal", "sagittal"] + + fig, axes = plt.subplots(3 if gt_regions is not None else 2, len(planes), figsize=(4 * len(planes), 8)) + for c, p in enumerate(planes): + base2d = extract_slice(base, p, idx[p]) + ax0 = axes[0, c] + ax0.imshow(base2d, cmap="gray") + ax0.axis("off") + ax0.set_title(p, fontsize=10) + if gt_regions is not None: + gt2d = { + "WT": extract_slice(gt_regions[0], p, idx[p]) > 0, + "TC": extract_slice(gt_regions[1], p, idx[p]) > 0, + "ET": extract_slice(gt_regions[2], p, idx[p]) > 0, + } + axes[1, c].imshow(overlay_masks(base2d, gt2d, colors, alpha=alpha)) + axes[1, c].axis("off") + pred_row = 2 + else: + pred_row = 1 + pred2d = { + "WT": extract_slice(pred_regions[0], p, idx[p]) > 0, + "TC": extract_slice(pred_regions[1], p, idx[p]) > 0, + "ET": extract_slice(pred_regions[2], p, idx[p]) > 0, + } + axes[pred_row, c].imshow(overlay_masks(base2d, pred2d, colors, alpha=alpha)) + axes[pred_row, c].axis("off") + axes[0, 0].set_ylabel("Image", rotation=0, labelpad=35, va="center", fontsize=9) + if gt_regions is not None: + axes[1, 0].set_ylabel("GT", rotation=0, labelpad=35, va="center", fontsize=9) + axes[2, 0].set_ylabel(method.get("name", "Method"), rotation=0, labelpad=35, va="center", fontsize=9) + else: + axes[1, 0].set_ylabel(method.get("name", "Method"), rotation=0, labelpad=35, va="center", fontsize=9) + save_fig(os.path.join(out_dir, "cross_year", direction, f"{case_id}.png")) + + +def make_moe_routing(cfg: Dict, case_loader: CaseLoader, aux: AuxCache, runner: Optional[ModelRunner], out_dir: str) -> None: + cases = cfg.get("cases", {}).get("moe", []) + if not cases: + return + for case_id in cases: + aux_data = aux.load(case_id) + needed_keys = ["moe_gamma", "expert_contrib"] + if aux_data is None or not all(k in aux_data for k in needed_keys): + if runner is None: + continue + image, _ = runner.load_case_tensor(case_id) + out = runner.forward_intermediate(image) + new_data = { + "moe_gamma": out["moe_gamma"].detach().cpu().numpy(), + "expert_contrib": out["expert_contrib"].detach().cpu().numpy(), + } + # Merge with existing data + if aux_data is not None: + aux_data.update(new_data) + else: + aux_data = new_data + aux.save(case_id, aux_data) + if aux_data is None: + continue + gamma = np.asarray(aux_data["moe_gamma"])[0] + contrib = np.asarray(aux_data["expert_contrib"])[0] + m = contrib.shape[0] + x = np.arange(m) + fig, ax = plt.subplots(figsize=(6, 3)) + width = 0.25 + ax.bar(x - width, contrib[:, 0], width, label="WT") + ax.bar(x, contrib[:, 1], width, label="TC") + ax.bar(x + width, contrib[:, 2], width, label="ET") + ax.plot(x, gamma, "k--", label="gamma") + ax.set_xlabel("Expert") + ax.set_ylabel("Contribution") + ax.set_title(case_id) + ax.legend(fontsize=8) + save_fig(os.path.join(out_dir, "moe_routing", f"{case_id}.png")) + + +def make_concept_tokens(cfg: Dict, case_loader: CaseLoader, pred_loader: PredictionLoader, out_dir: str) -> None: + cases = cfg.get("cases", {}).get("concept_tokens", []) + if not cases: + return + frag_bins = cfg.get("visualization", {}).get("frag_bins", [1, 3, 5]) + scale_bins = cfg.get("visualization", {}).get("scale_bins", [50, 200, 500]) + for case_id in cases: + case = case_loader.get_case(case_id, include_label=True) + pred = pred_loader.load_method(pred_loader.ours, case_id) + pred_regions = pred["regions"] + gt_regions = label_to_regions(case["label"]) if case["label"] is not None else None + + def tokens_from_regions(regions: np.ndarray) -> Dict[str, int]: + et = regions[2] > 0 + et_count = int(et.sum()) + _, comp = connected_components(et) + frag_bin = bin_by_threshold(comp, frag_bins) + scale_bin = bin_by_threshold(et_count, scale_bins) + return { + "WT": int(regions[0].sum() > 0), + "TC": int(regions[1].sum() > 0), + "ET": int(et_count > 0), + "FRAG_BIN": frag_bin, + "SCALE_BIN": scale_bin, + } + + pred_tokens = tokens_from_regions(pred_regions) + gt_tokens = tokens_from_regions(gt_regions) if gt_regions is not None else None + + fig, ax = plt.subplots(figsize=(6, 2)) + ax.axis("off") + lines = [ + f"Pred: WT={pred_tokens['WT']} TC={pred_tokens['TC']} ET={pred_tokens['ET']} " + f"FRAG={pred_tokens['FRAG_BIN']} SCALE={pred_tokens['SCALE_BIN']}" + ] + if gt_tokens is not None: + lines.append( + f"GT: WT={gt_tokens['WT']} TC={gt_tokens['TC']} ET={gt_tokens['ET']} " + f"FRAG={gt_tokens['FRAG_BIN']} SCALE={gt_tokens['SCALE_BIN']}" + ) + ax.text(0.01, 0.6, "\n".join(lines), fontsize=10, family="monospace") + ax.set_title(case_id) + save_fig(os.path.join(out_dir, "concept_tokens", f"{case_id}.png")) + + +def make_dual_domain(cfg: Dict, case_loader: CaseLoader, aux: AuxCache, runner: Optional[ModelRunner], out_dir: str) -> None: + cases = cfg.get("cases", {}).get("dual_domain", []) + if not cases: + return + for case_id in cases: + case = case_loader.get_case(case_id, include_label=False) + images = case["images"] + overlay_mod = choose_overlay_modality(cfg, images) + base = images[overlay_mod] + aux_data = aux.load(case_id) + needed_keys = ["x_spec", "u_fuse", "u_spec"] + if aux_data is None or not all(k in aux_data for k in needed_keys): + if runner is None: + continue + image, _ = runner.load_case_tensor(case_id) + out = runner.forward_intermediate(image) + new_data = { + "x_spec": out["x_spec"].detach().cpu().numpy(), + "u_fuse": out["u_fuse"].detach().cpu().numpy(), + "u_spec": out["u_spec"].detach().cpu().numpy(), + } + if aux_data is not None: + aux_data.update(new_data) + else: + aux_data = new_data + aux.save(case_id, aux_data) + x_spec = aux_data["x_spec"][0] + u_fuse = aux_data["u_fuse"][0].mean(axis=0) + u_spec = aux_data["u_spec"][0].mean(axis=0) + + amp_orig = fft_amplitude_slice(base, plane="axial") + amp_spec = fft_amplitude_slice(x_spec[0], plane="axial") + + mid = base.shape[0] // 2 + u_fuse2d = extract_slice(normalize_volume(u_fuse), "axial", mid) + u_spec2d = extract_slice(normalize_volume(u_spec), "axial", mid) + + fig, axes = plt.subplots(2, 2, figsize=(6, 6)) + axes[0, 0].imshow(amp_orig, cmap="inferno") + axes[0, 0].set_title("Amplitude (orig)") + axes[0, 1].imshow(amp_spec, cmap="inferno") + axes[0, 1].set_title("Amplitude (enhanced)") + axes[1, 0].imshow(u_fuse2d, cmap="viridis") + axes[1, 0].set_title("Spatial-fused features") + axes[1, 1].imshow(u_spec2d, cmap="viridis") + axes[1, 1].set_title("Spectral features") + for ax in axes.flat: + ax.axis("off") + save_fig(os.path.join(out_dir, "dual_domain", f"{case_id}.png")) + + +def make_ampmix(cfg: Dict, case_loader: CaseLoader, runner: Optional[ModelRunner], out_dir: str) -> None: + pairs = cfg.get("cases", {}).get("ampmix", []) + if not pairs: + return + colors = cfg.get("visualization", {}).get("colors", get_default_colors()) + alpha = cfg.get("visualization", {}).get("alpha", 0.45) + for pair in pairs: + case_a = pair.get("base") + case_b = pair.get("mix") + lam = float(pair.get("lam", 0.5)) + if not case_a or not case_b: + continue + if runner is None: + continue + + img_a, _ = runner.load_case_tensor(case_a) + img_b, _ = runner.load_case_tensor(case_b) + mixed = fourier_amplitude_mix(img_a[0].cpu().numpy(), img_b[0].cpu().numpy(), lam) + mixed_t = runner.torch.from_numpy(mixed).unsqueeze(0).to(runner.device) + + out_a = runner.infer_basic(img_a) + out_m = runner.infer_basic(mixed_t) + pred_a = (out_a["logits"].sigmoid() > 0.5).detach().cpu().numpy()[0] + pred_m = (out_m["logits"].sigmoid() > 0.5).detach().cpu().numpy()[0] + + case = case_loader.get_case(case_a, include_label=False) + images = case["images"] + overlay_mod = choose_overlay_modality(cfg, images) + base = images[overlay_mod] + idx = get_slices(pred_a[2] > 0, base.shape) + plane = "axial" + base2d = extract_slice(base, plane, idx[plane]) + mix2d = extract_slice(normalize_volume(mixed[0]), plane, idx[plane]) + + fig, axes = plt.subplots(2, 2, figsize=(6, 6)) + axes[0, 0].imshow(base2d, cmap="gray") + axes[0, 0].set_title("Original") + axes[0, 1].imshow(mix2d, cmap="gray") + axes[0, 1].set_title("AmpMix") + + axes[1, 0].imshow(overlay_masks(base2d, { + "WT": extract_slice(pred_a[0], plane, idx[plane]) > 0, + "TC": extract_slice(pred_a[1], plane, idx[plane]) > 0, + "ET": extract_slice(pred_a[2], plane, idx[plane]) > 0, + }, colors, alpha=alpha)) + axes[1, 0].set_title("Pred (orig)") + + axes[1, 1].imshow(overlay_masks(mix2d, { + "WT": extract_slice(pred_m[0], plane, idx[plane]) > 0, + "TC": extract_slice(pred_m[1], plane, idx[plane]) > 0, + "ET": extract_slice(pred_m[2], plane, idx[plane]) > 0, + }, colors, alpha=alpha)) + axes[1, 1].set_title("Pred (AmpMix)") + for ax in axes.flat: + ax.axis("off") + save_fig(os.path.join(out_dir, "ampmix", f"{case_a}_mix_{case_b}.png")) + + +def make_failure_cases(cfg: Dict, case_loader: CaseLoader, pred_loader: PredictionLoader, out_dir: str) -> None: + cases = cfg.get("cases", {}).get("failure", []) + if not cases: + return + notes = cfg.get("cases", {}).get("failure_notes", {}) + colors = cfg.get("visualization", {}).get("colors", get_default_colors()) + alpha = cfg.get("visualization", {}).get("alpha", 0.45) + for case_id in cases: + case = case_loader.get_case(case_id, include_label=True) + images = case["images"] + overlay_mod = choose_overlay_modality(cfg, images) + base = images[overlay_mod] + gt_regions = label_to_regions(case["label"]) if case["label"] is not None else None + pred = pred_loader.load_method(pred_loader.ours, case_id) + pred_regions = pred["regions"] + mask_ref = gt_regions[2] if gt_regions is not None else pred_regions[2] + idx = get_slices(mask_ref, base.shape) + plane = "axial" + base2d = extract_slice(base, plane, idx[plane]) + fig, axes = plt.subplots(1, 3 if gt_regions is not None else 2, figsize=(9, 3)) + axes[0].imshow(base2d, cmap="gray") + axes[0].set_title("Image") + axes[0].axis("off") + col = 1 + if gt_regions is not None: + axes[1].imshow(overlay_masks(base2d, { + "WT": extract_slice(gt_regions[0], plane, idx[plane]) > 0, + "TC": extract_slice(gt_regions[1], plane, idx[plane]) > 0, + "ET": extract_slice(gt_regions[2], plane, idx[plane]) > 0, + }, colors, alpha=alpha)) + axes[1].set_title("GT") + axes[1].axis("off") + col = 2 + axes[col].imshow(overlay_masks(base2d, { + "WT": extract_slice(pred_regions[0], plane, idx[plane]) > 0, + "TC": extract_slice(pred_regions[1], plane, idx[plane]) > 0, + "ET": extract_slice(pred_regions[2], plane, idx[plane]) > 0, + }, colors, alpha=alpha)) + axes[col].set_title(pred_loader.ours.get("name", "Ours")) + axes[col].axis("off") + fig.suptitle(notes.get(case_id, ""), fontsize=9) + save_fig(os.path.join(out_dir, "failure", f"{case_id}.png")) + + +def make_efficiency(cfg: Dict, case_loader: CaseLoader, out_dir: str) -> None: + info = cfg.get("efficiency", {}) + case_id = info.get("case_id") + if not case_id: + return + roi = info.get("roi_size", [128, 128, 128]) + overlap = float(info.get("overlap", 0.5)) + case = case_loader.get_case(case_id, include_label=False) + images = case["images"] + overlay_mod = choose_overlay_modality(cfg, images) + base = images[overlay_mod] + d, h, w = base.shape + rz, ry, rx = roi + stride = [max(1, int(r * (1.0 - overlap))) for r in roi] + centers = [] + for z in range(0, max(1, d - rz + 1), stride[0]): + for y in range(0, max(1, h - ry + 1), stride[1]): + for x in range(0, max(1, w - rx + 1), stride[2]): + centers.append((z + rz // 2, y + ry // 2, x + rx // 2)) + mid = d // 2 + base2d = extract_slice(base, "axial", mid) + fig, ax = plt.subplots(figsize=(5, 5)) + ax.imshow(base2d, cmap="gray") + for z, y, x in centers: + if abs(z - mid) <= rz // 2: + yy = y + xx = x + ax.scatter(xx, base2d.shape[0] - yy, s=2, c="yellow", alpha=0.6) + ax.set_title("Sliding-window centers") + ax.axis("off") + save_fig(os.path.join(out_dir, "efficiency", f"{case_id}.png")) + + +def main() -> None: + parser = argparse.ArgumentParser() + parser.add_argument("--config", required=True, help="Visualization config yaml.") + parser.add_argument("--model-config", default=os.path.join(ROOT_DIR, "configs/train.yaml"), help="Model config yaml.") + parser.add_argument("--checkpoint", default="", help="Checkpoint for model-based visualizations.") + parser.add_argument("--device", default="cuda") + parser.add_argument("--run", default="all", help="Comma list or 'all'.") + args = parser.parse_args() + + cfg = load_config(args.config) + out_dir = cfg.get("visualization", {}).get("output_dir", os.path.join(ROOT_DIR, "visualizations", "outputs")) + ensure_dir(out_dir) + + case_loader = CaseLoader(cfg) + pred_loader = PredictionLoader(cfg) + aux_cache = AuxCache(cfg.get("predictions", {}).get("aux_dir")) + runner = ModelRunner(cfg, args.model_config, args.checkpoint, args.device) if args.checkpoint else None + + run_set = set([s.strip() for s in args.run.split(",")]) if args.run != "all" else None + + def should_run(name: str) -> bool: + return run_set is None or name in run_set + + if should_run("qualitative"): + make_qualitative(cfg, case_loader, pred_loader, out_dir) + if should_run("et_absent"): + make_et_absent(cfg, case_loader, pred_loader, aux_cache, runner, out_dir) + if should_run("boundary"): + make_boundary(cfg, case_loader, pred_loader, out_dir) + if should_run("tiny_et"): + make_tiny_et(cfg, case_loader, pred_loader, out_dir) + if should_run("cross_year"): + make_cross_year(cfg, case_loader, pred_loader, out_dir) + if should_run("moe"): + make_moe_routing(cfg, case_loader, aux_cache, runner, out_dir) + if should_run("concept_tokens"): + make_concept_tokens(cfg, case_loader, pred_loader, out_dir) + if should_run("dual_domain"): + make_dual_domain(cfg, case_loader, aux_cache, runner, out_dir) + if should_run("ampmix"): + make_ampmix(cfg, case_loader, runner, out_dir) + if should_run("failure"): + make_failure_cases(cfg, case_loader, pred_loader, out_dir) + if should_run("efficiency"): + make_efficiency(cfg, case_loader, out_dir) + + +if __name__ == "__main__": + main() diff --git a/source_code/gliomasam3_moe/visualizations/vis_utils.py b/source_code/gliomasam3_moe/visualizations/vis_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..625f723c81c53e6bf05548975c6c03a32156b863 --- /dev/null +++ b/source_code/gliomasam3_moe/visualizations/vis_utils.py @@ -0,0 +1,328 @@ +import os +from typing import Dict, Iterable, List, Optional, Tuple + +import numpy as np +import nibabel as nib +from scipy import ndimage as ndi + + +def ensure_dir(path: str) -> None: + os.makedirs(path, exist_ok=True) + + +def _resolve_nii(case_dir: str, name: str) -> str: + for ext in [".nii.gz", ".nii"]: + path = os.path.join(case_dir, name + ext) + if os.path.isfile(path): + return path + raise FileNotFoundError(f"Missing NIfTI: {case_dir}/{name}.nii(.gz)") + + +def load_nifti(path: str) -> Tuple[np.ndarray, np.ndarray]: + img = nib.load(path) + data = img.get_fdata() + return np.asarray(data), img.affine + + +def normalize_volume(vol: np.ndarray, eps: float = 1e-6) -> np.ndarray: + x = np.asarray(vol, dtype=np.float32) + x = np.nan_to_num(x, nan=0.0, posinf=0.0, neginf=0.0) + flat = x.reshape(-1) + if flat.size == 0: + return np.zeros_like(x, dtype=np.float32) + lo, hi = np.percentile(flat, [1, 99]) + if hi - lo < eps: + return np.zeros_like(x, dtype=np.float32) + x = np.clip(x, lo, hi) + x = (x - lo) / (hi - lo + eps) + return x + + +def label_to_regions(label: np.ndarray) -> np.ndarray: + label = np.asarray(label) + wt = label > 0 + tc = (label == 1) | (label == 4) + et = label == 4 + return np.stack([wt, tc, et], axis=0).astype(np.uint8) + + +def regions_to_label(regions: np.ndarray) -> np.ndarray: + if regions.ndim != 4 or regions.shape[0] != 3: + raise ValueError("regions must be [3, D, H, W]") + wt = regions[0] > 0.5 + tc = regions[1] > 0.5 + et = regions[2] > 0.5 + label = np.zeros_like(wt, dtype=np.int16) + label[wt] = 2 + label[tc] = 1 + label[et] = 4 + return label + + +def load_case_nifti( + root_dir: str, + case_id: str, + modalities: List[str], + seg_name: str = "seg", + include_label: bool = True, +) -> Tuple[Dict[str, np.ndarray], Optional[np.ndarray], np.ndarray]: + case_dir = os.path.join(root_dir, case_id) + images: Dict[str, np.ndarray] = {} + affine = None + for mod in modalities: + path = _resolve_nii(case_dir, mod) + arr, affine = load_nifti(path) + images[mod] = np.asarray(arr, dtype=np.float32) + label = None + if include_label: + seg_path = _resolve_nii(case_dir, seg_name) + label, _ = load_nifti(seg_path) + label = np.asarray(label, dtype=np.int16) + if affine is None: + affine = np.eye(4) + return images, label, affine + + +def _load_npz_arrays(npz_path: str, include_label: bool) -> Tuple[np.ndarray, Optional[np.ndarray]]: + data = np.load(npz_path) + image = data["data"] + label = data["seg"] if include_label and "seg" in data else None + return image, label + + +def load_case_npz( + npz_dir: str, + case_id: str, + include_label: bool = True, +) -> Tuple[np.ndarray, Optional[np.ndarray]]: + if case_id.endswith(".npz"): + npz_path = case_id + else: + npz_path = os.path.join(npz_dir, case_id + ".npz") + if not os.path.isfile(npz_path): + raise FileNotFoundError(f"Missing npz: {npz_path}") + + npy_path = npz_path[:-3] + "npy" + seg_path = npz_path[:-4] + "_seg.npy" + if os.path.isfile(npy_path): + image = np.load(npy_path, mmap_mode="r") + else: + image, _ = _load_npz_arrays(npz_path, include_label=False) + + label = None + if include_label: + if os.path.isfile(seg_path): + label = np.load(seg_path, mmap_mode="r") + else: + _, label = _load_npz_arrays(npz_path, include_label=True) + + image = np.asarray(image, dtype=np.float32) + if image.ndim == 5 and image.shape[0] == 1: + image = image[0] + if image.ndim == 4 and image.shape[0] != 4 and image.shape[-1] == 4: + image = image.transpose(3, 0, 1, 2) + + if label is not None: + label = np.asarray(label, dtype=np.int16) + if label.ndim == 4 and label.shape[0] == 1: + label = label[0] + return image, label + + +def load_case( + data_cfg: Dict, + case_id: str, + include_label: bool = True, +) -> Tuple[Dict[str, np.ndarray], Optional[np.ndarray], np.ndarray]: + data_format = data_cfg.get("format", "nifti") + if data_format == "segmamba_npz": + npz_dir = data_cfg.get("npz_dir") or data_cfg.get("root_dir", "") + image, label = load_case_npz(npz_dir, case_id, include_label=include_label) + images = {f"ch{i}": image[i] for i in range(image.shape[0])} + affine = np.eye(4) + return images, label, affine + root_dir = data_cfg.get("root_dir", "") + modalities = data_cfg.get("modalities", ["t1n", "t1c", "t2f", "t2w"]) + seg_name = data_cfg.get("seg_name", "seg") + return load_case_nifti(root_dir, case_id, modalities, seg_name=seg_name, include_label=include_label) + + +def load_prediction( + pred_dir: str, + case_id: str, + pred_type: str = "auto", +) -> Dict[str, Optional[np.ndarray]]: + def _find(base: str) -> Optional[str]: + for ext in [".nii.gz", ".nii"]: + path = os.path.join(pred_dir, base + ext) + if os.path.isfile(path): + return path + return None + + pred_type = pred_type.lower() + paths = { + "regions_prob": _find(f"{case_id}_regions_prob"), + "regions_bin": _find(f"{case_id}_regions_bin"), + "label": _find(f"{case_id}_label"), + "segmamba_3c": _find(f"{case_id}"), + } + if pred_type == "auto": + for key in ["regions_prob", "regions_bin", "label", "segmamba_3c"]: + if paths[key] is not None: + pred_type = key + break + path = paths.get(pred_type) + if path is None: + raise FileNotFoundError(f"No prediction found for {case_id} in {pred_dir}") + + arr, _ = load_nifti(path) + arr = np.asarray(arr) + out: Dict[str, Optional[np.ndarray]] = {"label": None, "regions": None, "prob": None} + if pred_type in {"regions_prob", "regions_bin"}: + if arr.ndim != 4 or arr.shape[-1] != 3: + raise ValueError(f"Expected (D,H,W,3) for regions, got {arr.shape}") + regions = arr.transpose(3, 0, 1, 2) + out["prob"] = regions.astype(np.float32) if pred_type == "regions_prob" else None + out["regions"] = (regions > 0.5).astype(np.uint8) if pred_type == "regions_prob" else regions.astype(np.uint8) + out["label"] = regions_to_label(out["regions"]) + elif pred_type == "segmamba_3c": + if arr.ndim != 4 or arr.shape[-1] != 3: + raise ValueError(f"Expected (D,H,W,3) for SegMamba 3c, got {arr.shape}") + regions = arr.transpose(3, 0, 1, 2).astype(np.uint8) + out["regions"] = regions + out["label"] = regions_to_label(regions) + else: + label = arr.astype(np.int16) + out["label"] = label + out["regions"] = label_to_regions(label) + return out + + +def select_slices_from_mask(mask: Optional[np.ndarray]) -> Dict[str, int]: + if mask is None or mask.sum() == 0: + return {"axial": None, "coronal": None, "sagittal": None} + m = mask.astype(np.uint8) + axial = int(np.argmax(m.sum(axis=(1, 2)))) + coronal = int(np.argmax(m.sum(axis=(0, 2)))) + sagittal = int(np.argmax(m.sum(axis=(0, 1)))) + return {"axial": axial, "coronal": coronal, "sagittal": sagittal} + + +def fallback_slices(shape: Tuple[int, int, int]) -> Dict[str, int]: + d, h, w = shape + return {"axial": d // 2, "coronal": h // 2, "sagittal": w // 2} + + +def extract_slice(vol: np.ndarray, plane: str, idx: int) -> np.ndarray: + if plane == "axial": + img = vol[idx, :, :] + elif plane == "coronal": + img = vol[:, idx, :] + elif plane == "sagittal": + img = vol[:, :, idx] + else: + raise ValueError(f"Unknown plane: {plane}") + return np.rot90(img) + + +def mask_boundary(mask2d: np.ndarray, iterations: int = 1) -> np.ndarray: + if mask2d.sum() == 0: + return mask2d.astype(bool) + eroded = ndi.binary_erosion(mask2d.astype(bool), iterations=iterations) + return np.logical_xor(mask2d.astype(bool), eroded) + + +def overlay_masks( + base2d: np.ndarray, + masks: Dict[str, np.ndarray], + colors: Dict[str, Tuple[float, float, float]], + alpha: float = 0.5, + draw_boundary: bool = True, + boundary_width: int = 1, +) -> np.ndarray: + base = np.clip(base2d, 0.0, 1.0) + rgb = np.stack([base, base, base], axis=-1) + order = ["WT", "TC", "ET"] + for key in order: + if key not in masks: + continue + m = masks[key].astype(bool) + # Handle shape mismatch by resizing mask to match base + if m.shape != base.shape: + from scipy.ndimage import zoom + zoom_factors = (base.shape[0] / m.shape[0], base.shape[1] / m.shape[1]) + m = zoom(m.astype(float), zoom_factors, order=0) > 0.5 + if m.sum() == 0: + continue + color = np.array(colors.get(key, (1.0, 0.0, 0.0)), dtype=np.float32) + rgb[m] = (1.0 - alpha) * rgb[m] + alpha * color + if draw_boundary: + b = mask_boundary(m, iterations=boundary_width) + rgb[b] = color + return rgb + + +def signed_distance(mask: np.ndarray) -> np.ndarray: + mask = mask.astype(bool) + if mask.sum() == 0: + return np.zeros_like(mask, dtype=np.float32) + outside = ndi.distance_transform_edt(~mask) + inside = ndi.distance_transform_edt(mask) + return (inside - outside).astype(np.float32) + + +def boundary_error_map(pred: np.ndarray, gt: np.ndarray) -> np.ndarray: + pred = pred.astype(bool) + gt = gt.astype(bool) + dist = np.abs(signed_distance(gt)) + err = np.zeros_like(dist, dtype=np.float32) + err[pred & ~gt] = dist[pred & ~gt] + err[~pred & gt] = -dist[~pred & gt] + return err + + +def connected_components(mask: np.ndarray) -> Tuple[np.ndarray, int]: + labeled, num = ndi.label(mask.astype(np.uint8)) + return labeled, int(num) + + +def bin_by_threshold(value: float, thresholds: Iterable[float]) -> int: + for i, t in enumerate(thresholds): + if value <= t: + return i + return len(list(thresholds)) + + +def fft_amplitude_slice(vol: np.ndarray, plane: str = "axial") -> np.ndarray: + fft = np.fft.fftn(vol) + amp = np.abs(fft) + amp = np.fft.fftshift(amp) + d, h, w = amp.shape + if plane == "axial": + sl = amp[d // 2, :, :] + elif plane == "coronal": + sl = amp[:, h // 2, :] + else: + sl = amp[:, :, w // 2] + sl = np.log1p(sl) + return normalize_volume(sl) + + +def fourier_amplitude_mix(a: np.ndarray, b: np.ndarray, lam: float) -> np.ndarray: + # If shapes differ, crop/pad b to match a + if a.shape != b.shape: + from scipy.ndimage import zoom + # Resize b to match a shape + b_resized = np.zeros_like(a) + for c in range(min(a.shape[0], b.shape[0])): + zoom_factors = tuple(a.shape[i+1] / b.shape[i+1] for i in range(3)) + b_resized[c] = zoom(b[c], zoom_factors, order=1) + b = b_resized + fft_a = np.fft.fftn(a, axes=(1, 2, 3)) + fft_b = np.fft.fftn(b, axes=(1, 2, 3)) + amp_a = np.abs(fft_a) + amp_b = np.abs(fft_b) + phase = np.exp(1j * np.angle(fft_a)) + amp_mix = (1.0 - lam) * amp_a + lam * amp_b + mixed = np.fft.ifftn(amp_mix * phase, axes=(1, 2, 3)).real + return mixed.astype(np.float32) diff --git a/source_code/sam3/.gitignore b/source_code/sam3/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..fcda494a8d3bbf84810b8937101d85bfe68fb38d --- /dev/null +++ b/source_code/sam3/.gitignore @@ -0,0 +1,153 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints +*-Copy*.ipynb + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# PyCharm +.idea/ + +# VS Code +.vscode/ +*.code-workspace + +# Model weights and checkpoints +*.pth +*.pt +*.bin +*.ckpt +*.safetensors +weights/ +checkpoints/ +sam3_logs/ + +# Data files +*.h5 +*.hdf5 +*.pkl +*.pickle +*.npy +*.npz + +# Logs +logs/ +runs/ +tensorboard/ + +# OS specific +.DS_Store +Thumbs.db + +# BPE vocabulary files +*.bpe +*.vocab diff --git a/source_code/sam3/CODE_OF_CONDUCT.md b/source_code/sam3/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000000000000000000000000000000..3232ed665566ec047ce55a929db1581dbda266a1 --- /dev/null +++ b/source_code/sam3/CODE_OF_CONDUCT.md @@ -0,0 +1,80 @@ +# Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to make participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic +address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a +professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies within all project spaces, and it also applies when +an individual is representing the project or its community in public spaces. +Examples of representing a project or community include using an official +project e-mail address, posting via an official social media account, or acting +as an appointed representative at an online or offline event. Representation of +a project may be further defined and clarified by project maintainers. + +This Code of Conduct also applies outside the project spaces when there is a +reasonable belief that an individual's behavior may have a negative impact on +the project or its community. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at . All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq diff --git a/source_code/sam3/CONTRIBUTING.md b/source_code/sam3/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..8d0d9290ad3ce04efa27839264f7892e65924dfc --- /dev/null +++ b/source_code/sam3/CONTRIBUTING.md @@ -0,0 +1,30 @@ +# Contributing to sam3 +We want to make contributing to this project as easy and transparent as +possible. + +## Pull Requests +We actively welcome your pull requests. + +1. Fork the repo and create your branch from `main`. +2. If you've added code that should be tested, add tests. +3. If you've changed APIs, update the documentation. +4. Make sure your code lints. +5. If you haven't already, complete the Contributor License Agreement ("CLA"). + +## Contributor License Agreement ("CLA") +In order to accept your pull request, we need you to submit a CLA. You only need +to do this once to work on any of Facebook's open source projects. + +Complete your CLA here: + +## Issues +We use GitHub issues to track public bugs. Please ensure your description is +clear and has sufficient instructions to be able to reproduce the issue. + +Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe +disclosure of security bugs. In those cases, please go through the process +outlined on that page and do not file a public issue. + +## License +By contributing to sam3, you agree that your contributions will be licensed +under the LICENSE file in the root directory of this source tree. diff --git a/source_code/sam3/LICENSE b/source_code/sam3/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..00030caa37d1d1714b2eb0d7f55c50c5805ed4ce --- /dev/null +++ b/source_code/sam3/LICENSE @@ -0,0 +1,61 @@ +SAM License +Last Updated: November 19, 2025 + +“Agreement” means the terms and conditions for use, reproduction, distribution and modification of the SAM Materials set forth herein. + + +“SAM Materials” means, collectively, Documentation and the models, software and algorithms, including machine-learning model code, trained model weights, inference-enabling code, training-enabling code, fine-tuning enabling code, and other elements of the foregoing distributed by Meta and made available under this Agreement. + +“Documentation” means the specifications, manuals and documentation accompanying +SAM Materials distributed by Meta. + + +“Licensee” or “you” means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity’s behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf. + + +“Meta” or “we” means Meta Platforms Ireland Limited (if you are located in or, if you are an entity, your principal place of business is in the EEA or Switzerland) or Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland). + + +“Sanctions” means any economic or trade sanctions or restrictions administered or enforced by the United States (including the Office of Foreign Assets Control of the U.S. Department of the Treasury (“OFAC”), the U.S. Department of State and the U.S. Department of Commerce), the United Nations, the European Union, or the United Kingdom. + + +“Trade Controls” means any of the following: Sanctions and applicable export and import controls. + +By using or distributing any portion or element of the SAM Materials, you agree to be bound by this Agreement. + + +1. License Rights and Redistribution. + + +a. Grant of Rights. You are granted a non-exclusive, worldwide, non-transferable and royalty-free limited license under Meta’s intellectual property or other rights owned by Meta embodied in the SAM Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the SAM Materials. + +b. Redistribution and Use. +i. Distribution of SAM Materials, and any derivative works thereof, are subject to the terms of this Agreement. If you distribute or make the SAM Materials, or any derivative works thereof, available to a third party, you may only do so under the terms of this Agreement and you shall provide a copy of this Agreement with any such SAM Materials. + + +ii. If you submit for publication the results of research you perform on, using, or otherwise in connection with SAM Materials, you must acknowledge the use of SAM Materials in your publication. + + +iii. Your use of the SAM Materials must comply with applicable laws and regulations, including Trade Control Laws and applicable privacy and data protection laws. +iv. Your use of the SAM Materials will not involve or encourage others to reverse engineer, decompile or discover the underlying components of the SAM Materials. +v. You are not the target of Trade Controls and your use of SAM Materials must comply with Trade Controls. You agree not to use, or permit others to use, SAM Materials for any activities subject to the International Traffic in Arms Regulations (ITAR) or end uses prohibited by Trade Controls, including those related to military or warfare purposes, nuclear industries or applications, espionage, or the development or use of guns or illegal weapons. +2. User Support. Your use of the SAM Materials is done at your own discretion; Meta does not process any information nor provide any service in relation to such use. Meta is under no obligation to provide any support services for the SAM Materials. Any support provided is “as is”, “with all faults”, and without warranty of any kind. + + +3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE SAM MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN “AS IS” BASIS, WITHOUT WARRANTIES OF ANY KIND, AND META DISCLAIMS ALL WARRANTIES OF ANY KIND, BOTH EXPRESS AND IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE SAM MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE SAM MATERIALS AND ANY OUTPUT AND RESULTS. + +4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY DIRECT OR INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING. + +5. Intellectual Property. + + +a. Subject to Meta’s ownership of SAM Materials and derivatives made by or for Meta, with respect to any derivative works and modifications of the SAM Materials that are made by you, as between you and Meta, you are and will be the owner of such derivative works and modifications. + +b. If you institute litigation or other proceedings against Meta or any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the SAM Materials, outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Meta from and against any claim by any third party arising out of or related to your use or distribution of the SAM Materials. + +6. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the SAM Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Meta may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of the SAM Materials. Sections 3, 4 and 7 shall survive the termination of this Agreement. + +7. Governing Law and Jurisdiction. This Agreement will be governed and construed under the laws of the State of California without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement. The courts of California shall have exclusive jurisdiction of any dispute arising out of this Agreement. + + +8. Modifications and Amendments. Meta may modify this Agreement from time to time; provided that they are similar in spirit to the current version of the Agreement, but may differ in detail to address new problems or concerns. All such changes will be effective immediately. Your continued use of the SAM Materials after any modification to this Agreement constitutes your agreement to such modification. Except as provided in this Agreement, no modification or addition to any provision of this Agreement will be binding unless it is in writing and signed by an authorized representative of both you and Meta. diff --git a/source_code/sam3/MANIFEST.in b/source_code/sam3/MANIFEST.in new file mode 100644 index 0000000000000000000000000000000000000000..6daf92e44ba1bc4ab5ce60fbf4bf79089df17473 --- /dev/null +++ b/source_code/sam3/MANIFEST.in @@ -0,0 +1,6 @@ +include LICENSE +include README.md +recursive-include examples *.py +recursive-include examples *.ipynb +recursive-include examples *.md +recursive-include tests *.py diff --git a/source_code/sam3/README.md b/source_code/sam3/README.md new file mode 100644 index 0000000000000000000000000000000000000000..669242df448121be280f204c7ae3d81fce0bf441 --- /dev/null +++ b/source_code/sam3/README.md @@ -0,0 +1,395 @@ +# SAM 3: Segment Anything with Concepts + +Meta Superintelligence Labs + +[Nicolas Carion](https://www.nicolascarion.com/)\*, +[Laura Gustafson](https://scholar.google.com/citations?user=c8IpF9gAAAAJ&hl=en)\*, +[Yuan-Ting Hu](https://scholar.google.com/citations?user=E8DVVYQAAAAJ&hl=en)\*, +[Shoubhik Debnath](https://scholar.google.com/citations?user=fb6FOfsAAAAJ&hl=en)\*, +[Ronghang Hu](https://ronghanghu.com/)\*, +[Didac Suris](https://www.didacsuris.com/)\*, +[Chaitanya Ryali](https://scholar.google.com/citations?user=4LWx24UAAAAJ&hl=en)\*, +[Kalyan Vasudev Alwala](https://scholar.google.co.in/citations?user=m34oaWEAAAAJ&hl=en)\*, +[Haitham Khedr](https://hkhedr.com/)\*, Andrew Huang, +[Jie Lei](https://jayleicn.github.io/), +[Tengyu Ma](https://scholar.google.com/citations?user=VeTSl0wAAAAJ&hl=en), +[Baishan Guo](https://scholar.google.com/citations?user=BC5wDu8AAAAJ&hl=en), +Arpit Kalla, [Markus Marks](https://damaggu.github.io/), +[Joseph Greer](https://scholar.google.com/citations?user=guL96CkAAAAJ&hl=en), +Meng Wang, [Peize Sun](https://peizesun.github.io/), +[Roman Rädle](https://scholar.google.com/citations?user=Tpt57v0AAAAJ&hl=en), +[Triantafyllos Afouras](https://www.robots.ox.ac.uk/~afourast/), +[Effrosyni Mavroudi](https://scholar.google.com/citations?user=vYRzGGEAAAAJ&hl=en), +[Katherine Xu](https://k8xu.github.io/)°, +[Tsung-Han Wu](https://patrickthwu.com/)°, +[Yu Zhou](https://yu-bryan-zhou.github.io/)°, +[Liliane Momeni](https://scholar.google.com/citations?user=Lb-KgVYAAAAJ&hl=en)°, +[Rishi Hazra](https://rishihazra.github.io/)°, +[Shuangrui Ding](https://mark12ding.github.io/)°, +[Sagar Vaze](https://sgvaze.github.io/)°, +[Francois Porcher](https://scholar.google.com/citations?user=LgHZ8hUAAAAJ&hl=en)°, +[Feng Li](https://fengli-ust.github.io/)°, +[Siyuan Li](https://siyuanliii.github.io/)°, +[Aishwarya Kamath](https://ashkamath.github.io/)°, +[Ho Kei Cheng](https://hkchengrex.com/)°, +[Piotr Dollar](https://pdollar.github.io/)†, +[Nikhila Ravi](https://nikhilaravi.com/)†, +[Kate Saenko](https://ai.bu.edu/ksaenko.html)†, +[Pengchuan Zhang](https://pzzhang.github.io/pzzhang/)†, +[Christoph Feichtenhofer](https://feichtenhofer.github.io/)† + +\* core contributor, ° intern, † project lead, order is random within groups + +[[`Paper`](https://ai.meta.com/research/publications/sam-3-segment-anything-with-concepts/)] +[[`Project`](https://ai.meta.com/sam3)] +[[`Demo`](https://segment-anything.com/)] +[[`Blog`](https://ai.meta.com/blog/segment-anything-model-3/)] +[[`BibTeX`](#citing-sam-3)] + +![SAM 3 architecture](assets/model_diagram.png?raw=true) SAM 3 is a unified foundation model for promptable segmentation in images and videos. It can detect, segment, and track objects using text or visual prompts such as points, boxes, and masks. Compared to its predecessor [SAM 2](https://github.com/facebookresearch/sam2), SAM 3 introduces the ability to exhaustively segment all instances of an open-vocabulary concept specified by a short text phrase or exemplars. Unlike prior work, SAM 3 can handle a vastly larger set of open-vocabulary prompts. It achieves 75-80% of human performance on our new [SA-CO benchmark](https://github.com/facebookresearch/sam3?tab=readme-ov-file#sa-co-dataset) which contains 270K unique concepts, over 50 times more than existing benchmarks. + +This breakthrough is driven by an innovative data engine that has automatically annotated over 4 million unique concepts, creating the largest high-quality open-vocabulary segmentation dataset to date. In addition, SAM 3 introduces a new model architecture featuring a presence token that improves discrimination between closely related text prompts (e.g., “a player in white” vs. “a player in red”), as well as a decoupled detector–tracker design that minimizes task interference and scales efficiently with data. + +

+ + +

+ +## Installation + +### Prerequisites + +- Python 3.12 or higher +- PyTorch 2.7 or higher +- CUDA-compatible GPU with CUDA 12.6 or higher + +1. **Create a new Conda environment:** + +```bash +conda create -n sam3 python=3.12 +conda deactivate +conda activate sam3 +``` + +2. **Install PyTorch with CUDA support:** + +```bash +pip install torch==2.7.0 torchvision torchaudio --index-url https://download.pytorch.org/whl/cu126 +``` + +3. **Clone the repository and install the package:** + +```bash +git clone https://github.com/facebookresearch/sam3.git +cd sam3 +pip install -e . +``` + +4. **Install additional dependencies for example notebooks or development:** + +```bash +# For running example notebooks +pip install -e ".[notebooks]" + +# For development +pip install -e ".[train,dev]" +``` + +## Getting Started + +⚠️ Before using SAM 3, please request access to the checkpoints on the SAM 3 +Hugging Face [repo](https://huggingface.co/facebook/sam3). Once accepted, you +need to be authenticated to download the checkpoints. You can do this by running +the following [steps](https://huggingface.co/docs/huggingface_hub/en/quick-start#authentication) +(e.g. `hf auth login` after generating an access token.) + +### Basic Usage + +```python +import torch +#################################### For Image #################################### +from PIL import Image +from sam3.model_builder import build_sam3_image_model +from sam3.model.sam3_image_processor import Sam3Processor +# Load the model +model = build_sam3_image_model() +processor = Sam3Processor(model) +# Load an image +image = Image.open("") +inference_state = processor.set_image(image) +# Prompt the model with text +output = processor.set_text_prompt(state=inference_state, prompt="") + +# Get the masks, bounding boxes, and scores +masks, boxes, scores = output["masks"], output["boxes"], output["scores"] + +#################################### For Video #################################### + +from sam3.model_builder import build_sam3_video_predictor + +video_predictor = build_sam3_video_predictor() +video_path = "" # a JPEG folder or an MP4 video file +# Start a session +response = video_predictor.handle_request( + request=dict( + type="start_session", + resource_path=video_path, + ) +) +response = video_predictor.handle_request( + request=dict( + type="add_prompt", + session_id=response["session_id"], + frame_index=0, # Arbitrary frame index + text="", + ) +) +output = response["outputs"] +``` + +## Examples + +The `examples` directory contains notebooks demonstrating how to use SAM3 with +various types of prompts: + +- [`sam3_image_predictor_example.ipynb`](examples/sam3_image_predictor_example.ipynb) + : Demonstrates how to prompt SAM 3 with text and visual box prompts on images. +- [`sam3_video_predictor_example.ipynb`](examples/sam3_video_predictor_example.ipynb) + : Demonstrates how to prompt SAM 3 with text prompts on videos, and doing + further interactive refinements with points. +- [`sam3_image_batched_inference.ipynb`](examples/sam3_image_batched_inference.ipynb) + : Demonstrates how to run batched inference with SAM 3 on images. +- [`sam3_agent.ipynb`](examples/sam3_agent.ipynb): Demonsterates the use of SAM + 3 Agent to segment complex text prompt on images. +- [`saco_gold_silver_vis_example.ipynb`](examples/saco_gold_silver_vis_example.ipynb) + : Shows a few examples from SA-Co image evaluation set. +- [`saco_veval_vis_example.ipynb`](examples/saco_veval_vis_example.ipynb) : + Shows a few examples from SA-Co video evaluation set. + +There are additional notebooks in the examples directory that demonstrate how to +use SAM 3 for interactive instance segmentation in images and videos (SAM 1/2 +tasks), or as a tool for an MLLM, and how to run evaluations on the SA-Co +dataset. + +To run the Jupyter notebook examples: + +```bash +# Make sure you have the notebooks dependencies installed +pip install -e ".[notebooks]" + +# Start Jupyter notebook +jupyter notebook examples/sam3_image_predictor_example.ipynb +``` + +## Model + +SAM 3 consists of a detector and a tracker that share a vision encoder. It has 848M parameters. The +detector is a DETR-based model conditioned on text, geometry, and image +exemplars. The tracker inherits the SAM 2 transformer encoder-decoder +architecture, supporting video segmentation and interactive refinement. + +## Image Results + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ModelInstance SegmentationBox Detection
LVISSA-Co/GoldLVISCOCOSA-Co/Gold
cgF1APcgF1cgF1APAPAPo +cgF1
Human--72.8----74.0
OWLv2*29.343.424.630.245.546.123.924.5
DINO-X-38.521.3-52.456.0-22.5
Gemini 2.513.4-13.016.1---14.4
SAM 337.248.554.140.653.656.455.755.7
+ +

* Partially trained on LVIS, APo refers to COCO-O accuracy

+ +
+ +## Video Results + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ModelSA-V testYT-Temporal-1B testSmartGlasses testLVVIS testBURST test
cgF1pHOTAcgF1pHOTAcgF1pHOTAmAPHOTA
Human53.170.571.278.458.572.3--
SAM 330.358.050.869.936.463.636.344.5
+
+ +## SA-Co Dataset + +We release 2 image benchmarks, [SA-Co/Gold](scripts/eval/gold/README.md) and +[SA-Co/Silver](scripts/eval/silver/README.md), and a video benchmark +[SA-Co/VEval](scripts/eval/veval/README.md). The datasets contain images (or videos) with annotated noun phrases. Each image/video and noun phrase pair is annotated with instance masks and unique IDs of each object matching the phrase. Phrases that have no matching objects (negative prompts) have no masks, shown in red font in the figure. See the linked READMEs for more details on how to download and run evaluations on the datasets. + +* HuggingFace host: [SA-Co/Gold](https://huggingface.co/datasets/facebook/SACo-Gold), [SA-Co/Silver](https://huggingface.co/datasets/facebook/SACo-Silver) and [SA-Co/VEval](https://huggingface.co/datasets/facebook/SACo-VEval) +* Roboflow host: [SA-Co/Gold](https://universe.roboflow.com/sa-co-gold), [SA-Co/Silver](https://universe.roboflow.com/sa-co-silver) and [SA-Co/VEval](https://universe.roboflow.com/sa-co-veval) + +![SA-Co dataset](assets/sa_co_dataset.jpg?raw=true) + +## Development + +To set up the development environment: + +```bash +pip install -e ".[dev,train]" +``` + +To format the code: + +```bash +ufmt format . +``` + +## Contributing + +See [contributing](CONTRIBUTING.md) and the +[code of conduct](CODE_OF_CONDUCT.md). + +## License + +This project is licensed under the SAM License - see the [LICENSE](LICENSE) file +for details. + +## Acknowledgements + +We would like to thank the following people for their contributions to the SAM 3 project: Alex He, Alexander Kirillov, +Alyssa Newcomb, Ana Paula Kirschner Mofarrej, Andrea Madotto, Andrew Westbury, Ashley Gabriel, Azita Shokpour, +Ben Samples, Bernie Huang, Carleigh Wood, Ching-Feng Yeh, Christian Puhrsch, Claudette Ward, Daniel Bolya, +Daniel Li, Facundo Figueroa, Fazila Vhora, George Orlin, Hanzi Mao, Helen Klein, Hu Xu, Ida Cheng, Jake Kinney, +Jiale Zhi, Jo Sampaio, Joel Schlosser, Justin Johnson, Kai Brown, Karen Bergan, Karla Martucci, Kenny Lehmann, +Maddie Mintz, Mallika Malhotra, Matt Ward, Michelle Chan, Michelle Restrepo, Miranda Hartley, Muhammad Maaz, +Nisha Deo, Peter Park, Phillip Thomas, Raghu Nayani, Rene Martinez Doehner, Robbie Adkins, Ross Girshik, Sasha +Mitts, Shashank Jain, Spencer Whitehead, Ty Toledano, Valentin Gabeur, Vincent Cho, Vivian Lee, William Ngan, +Xuehai He, Yael Yungster, Ziqi Pang, Ziyi Dou, Zoe Quake. + +## Citing SAM 3 + +If you use SAM 3 or the SA-Co dataset in your research, please use the following BibTeX entry. + +```bibtex +@misc{carion2025sam3segmentconcepts, + title={SAM 3: Segment Anything with Concepts}, + author={Nicolas Carion and Laura Gustafson and Yuan-Ting Hu and Shoubhik Debnath and Ronghang Hu and Didac Suris and Chaitanya Ryali and Kalyan Vasudev Alwala and Haitham Khedr and Andrew Huang and Jie Lei and Tengyu Ma and Baishan Guo and Arpit Kalla and Markus Marks and Joseph Greer and Meng Wang and Peize Sun and Roman Rädle and Triantafyllos Afouras and Effrosyni Mavroudi and Katherine Xu and Tsung-Han Wu and Yu Zhou and Liliane Momeni and Rishi Hazra and Shuangrui Ding and Sagar Vaze and Francois Porcher and Feng Li and Siyuan Li and Aishwarya Kamath and Ho Kei Cheng and Piotr Dollár and Nikhila Ravi and Kate Saenko and Pengchuan Zhang and Christoph Feichtenhofer}, + year={2025}, + eprint={2511.16719}, + archivePrefix={arXiv}, + primaryClass={cs.CV}, + url={https://arxiv.org/abs/2511.16719}, +} +``` diff --git a/source_code/sam3/README_TRAIN.md b/source_code/sam3/README_TRAIN.md new file mode 100644 index 0000000000000000000000000000000000000000..01904c27418b9c8765b933a9c08699a7d67b13a1 --- /dev/null +++ b/source_code/sam3/README_TRAIN.md @@ -0,0 +1,190 @@ +# Training + +This repository supports finetuning SAM3 models on custom datasets in multi-node setup or local execution. The training script is located at `sam3/train.py` and uses Hydra configuration management to handle complex training setups. + + +## Installation + +```bash +cd sam3 +pip install -e ".[train]" +``` + +### Training Script Usage + +The main training script is located at `sam3/train.py`. It uses Hydra configuration management to handle complex training setups. + +#### Basic Usage + +```bash +# Example: Train on Roboflow dataset +python sam3/train/train.py -c configs/roboflow_v100/roboflow_v100_full_ft_100_images.yaml +# Example: Train on ODinW13 dataset +python sam3/train/train.py -c configs/odinw13/odinw_text_only_train.yaml +``` +Follow [`Roboflow 100-VL`](https://github.com/roboflow/rf100-vl/) to download the roboflow 100-vl datasets. Follow [`GLIP`](https://github.com/microsoft/GLIP) to download the ODinW datasets. The data folder should be organized as follows, and put your roboflow_vl_100_root and odinw_data_root in the job configs. +``` +roboflow_vl_100_root: + 13-lkc01 + train + valid + test + 2024-frc + actions + ... +odinw_data_root: + AerialMaritimeDrone + large + train + valid + test + Aquarium + ... +``` + +#### Command Line Arguments + +The training script supports several command line arguments: + +```bash +python sam3/train/train.py \ + -c CONFIG_NAME \ + [--use-cluster 0|1] \ + [--partition PARTITION_NAME] \ + [--account ACCOUNT_NAME] \ + [--qos QOS_NAME] \ + [--num-gpus NUM_GPUS] \ + [--num-nodes NUM_NODES] +``` + +**Arguments:** +- `-c, --config`: **Required.** Path to the configuration file (e.g., `sam3/train/configs/roboflow_v100_full_ft_100_images.yaml`) +- `--use-cluster`: Whether to launch on a cluster (0: local, 1: cluster). Default: uses config setting +- `--partition`: SLURM partition name for cluster execution +- `--account`: SLURM account name for cluster execution +- `--qos`: SLURM QOS (Quality of Service) setting +- `--num-gpus`: Number of GPUs per node. Default: uses config setting +- `--num-nodes`: Number of nodes for distributed training. Default: uses config setting + +#### Local Training Examples + +```bash +# Single GPU training +python sam3/train/train.py -c configs/roboflow_v100/roboflow_v100_full_ft_100_images.yaml --use-cluster 0 --num-gpus 1 + +# Multi-GPU training on a single node +python sam3/train/train.py -c configs/roboflow_v100/roboflow_v100_full_ft_100_images.yaml --use-cluster 0 --num-gpus 4 + +# Force local execution even if config specifies GPUs +python sam3/train/train.py -c configs/roboflow_v100/roboflow_v100_full_ft_100_images.yaml --use-cluster 0 +``` + +#### Cluster Training Examples + +```bash +# Basic cluster training with default settings from config +python sam3/train/train.py -c configs/roboflow_v100/roboflow_v100_full_ft_100_images.yaml --use-cluster 1 + +# Cluster training with specific SLURM settings +python sam3/train/train.py -c configs/roboflow_v100/roboflow_v100_full_ft_100_images.yaml \ + --use-cluster 1 \ + --partition gpu_partition \ + --account my_account \ + --qos high_priority \ + --num-gpus 8 \ + --num-nodes 2 +``` + +### Configuration Files + +Training configurations are stored in `sam3/train/configs/`. The configuration files use Hydra's YAML format and support: + +- **Dataset Configuration**: Data paths, transforms, and loading parameters +- **Model Configuration**: Architecture settings, checkpoint paths, and model parameters +- **Training Configuration**: Batch sizes, learning rates, optimization settings +- **Launcher Configuration**: Distributed training and cluster settings +- **Logging Configuration**: TensorBoard, experiment tracking, and output directories + +#### Key Configuration Sections + +```yaml +# Paths to datasets and checkpoints +paths: + bpe_path: /path/to/bpe/file + dataset_root: /path/to/dataset + experiment_log_dir: /path/to/logs + +# Launcher settings for local/cluster execution +launcher: + num_nodes: 1 + gpus_per_node: 2 + experiment_log_dir: ${paths.experiment_log_dir} + +# Cluster execution settings +submitit: + use_cluster: True + timeout_hour: 72 + cpus_per_task: 10 + partition: null + account: null +``` + +### Monitoring Training + +The training script automatically sets up logging and saves outputs to the experiment directory: + +```bash +# Logs are saved to the experiment_log_dir specified in config +experiment_log_dir/ +├── config.yaml # Original configuration +├── config_resolved.yaml # Resolved configuration with all variables expanded +├── checkpoints/ # Model checkpoints (if skip_checkpointing=False) +├── tensorboard/ # TensorBoard logs +├── logs/ # Text logs +└── submitit_logs/ # Cluster job logs (if using cluster) +``` + +You can monitor training progress using TensorBoard: + +```bash +tensorboard --logdir /path/to/experiment_log_dir/tensorboard +``` + +### Job Arrays for Dataset Sweeps + +The Roboflow and ODinW configuration supports job arrays for training multiple models on different datasets: + +This feature is specifically enabled via, +```yaml +submitit: + job_array: + num_tasks: 100 + task_index: 0 +``` + +The configuration includes a complete list of 100 Roboflow supercategories, and the `submitit.job_array.task_index` automatically selects which dataset to use based on the array job index. + +```bash +# Submit job array to train on different Roboflow datasets +# The job array index selects which dataset from all_roboflow_supercategories +python sam3/train/train.py -c configs/roboflow_v100/roboflow_v100_full_ft_100_images.yaml \ + --use-cluster 1 +``` + +### Reproduce ODinW13 10-shot results +Running the following job will give the results on the ODinW13 seed 300, see `odinw_train.train_file: fewshot_train_shot10_seed300` in the config file. +```bash +# Example: Train on ODinW13 dataset +python sam3/train/train.py -c configs/odinw13/odinw_text_only_train.yaml +``` +Change `odinw_train.train_file` to `fewshot_train_shot10_seed30` and `fewshot_train_shot10_seed3` to get the results for the other two seeds. Final results are aggregated from the three seeds. Notice that a small number of jobs may diverge during training, in which case we just use the last checkpoint's result before it diverges. + + +### Eval Script Usage +With a similar setup as the training config, the training script `sam3/train.py` can also be used for evaluation, too, when setting `trainer.mode = val` in the job config. Run the following job will give the results on the zero-shot results on RF100-VL and ODinW13 datasets. +```bash +# Example: Evaluate on Roboflow dataset +python sam3/train/train.py -c configs/roboflow_v100/roboflow_v100_eval.yaml +# Example: Evaluate on ODinW13 dataset +python sam3/train/train.py -c configs/odinw13/odinw_text_only.yaml +``` diff --git a/source_code/sam3/examples/sam3_video_predictor_example.ipynb b/source_code/sam3/examples/sam3_video_predictor_example.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..2848b674adb630761afe3e63e9bbaec5b33933ed --- /dev/null +++ b/source_code/sam3/examples/sam3_video_predictor_example.ipynb @@ -0,0 +1,1603 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "License Information", + "origin": "ai" + }, + "collapsed": false, + "customOutput": null, + "executionStartTime": 1762496606600, + "executionStopTime": 1762496607864, + "isCommentPanelOpen": false, + "jupyter": { + "outputs_hidden": false + }, + "language": "python", + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Copyright Notice", + "origin": "ai" + }, + "collapsed": false, + "customInput": null, + "customOutput": null, + "executionStartTime": 1761950741027, + "executionStopTime": 1761950741468, + "isCommentPanelOpen": false, + "language": "python", + "originalKey": "49a54601-c2fe-47f2-a436-9a1f91503520", + "outputsInitialized": true, + "requestMsgId": "da31cd52-f746-4a69-bfae-b2037e84d00c", + "serverExecutionDuration": 2.3454379988834, + "showInput": true + }, + "originalKey": "913d6f63-449f-4836-ae81-7d55a42ccf8c", + "output": { + "id": 863592039347424, + "loadingStatus": "loaded" + }, + "outputsInitialized": false, + "requestMsgId": "913d6f63-449f-4836-ae81-7d55a42ccf8c", + "serverExecutionDuration": 2.4641010004416 + }, + "outputs": [], + "source": [ + "# Copyright (c) Meta Platforms, Inc. and affiliates." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "attachments": [], + "bentoAICellStatus": "none", + "isCommentPanelOpen": false, + "language": "markdown", + "metadata": { + "bentoAICellStatus": "none", + "customInput": null, + "isCommentPanelOpen": false, + "language": "markdown", + "originalKey": "50280cd0-12eb-4f23-a78d-c37f5bda1fe6", + "outputsInitialized": false, + "showInput": false + }, + "originalKey": "9e88cae8-b006-498d-9a02-c1c369a95f57", + "outputsInitialized": false, + "showInput": true + }, + "source": [ + "## Video segmentation and tracking with SAM 3\n", + "\n", + "This notebook demonstrates how to use SAM 3 for interactive video segmentation and dense tracking. It covers the following capabilities:\n", + "\n", + "- **Text prompts**: Using natural language descriptions to segment objects (e.g., \"person\", \"shoe\")\n", + "- **Point prompts**: Adding positive/negative clicks to segment and refine objects\n", + "\n", + "We use the terms _segment_ or _mask_ to refer to the model prediction for an object on a single frame, and _masklet_ to refer to the spatio-temporal masks across the entire video. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# \n", + "# \"Open\n", + "# " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "using_colab = False" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if using_colab:\n", + " import torch\n", + " import torchvision\n", + " print(\"PyTorch version:\", torch.__version__)\n", + " print(\"Torchvision version:\", torchvision.__version__)\n", + " print(\"CUDA is available:\", torch.cuda.is_available())\n", + " import sys\n", + " !{sys.executable} -m pip install opencv-python matplotlib scikit-learn\n", + " !{sys.executable} -m pip install 'git+https://github.com/facebookresearch/sam3.git'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Display GPU Status", + "origin": "ai" + }, + "customOutput": null, + "executionStartTime": 1762496607874, + "executionStopTime": 1762496609713, + "isCommentPanelOpen": false, + "language": "python", + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Check GPU Status", + "origin": "ai" + }, + "collapsed": false, + "customInput": null, + "customOutput": null, + "executionStartTime": 1761950741471, + "executionStopTime": 1761950742878, + "isCommentPanelOpen": false, + "language": "python", + "originalKey": "c3f5a31b-c8df-45db-ae6d-0d219ee60382", + "outputsInitialized": true, + "requestMsgId": "cea571e0-345b-461d-b2ee-8f95e0ea4b4e", + "serverExecutionDuration": 1091.9901590096, + "showInput": true + }, + "originalKey": "fb0eb6a0-acd2-4c80-bacd-bafd09669e7e", + "output": { + "id": "794918370206651", + "loadingStatus": "before loading" + }, + "outputsInitialized": true, + "requestMsgId": "fb0eb6a0-acd2-4c80-bacd-bafd09669e7e", + "serverExecutionDuration": 1123.5525669999 + }, + "outputs": [], + "source": [ + "!nvidia-smi" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "attachments": [], + "bentoAICellStatus": "none", + "isCommentPanelOpen": false, + "language": "markdown", + "metadata": { + "bentoAICellStatus": "none", + "collapsed": false, + "customInput": null, + "executionStartTime": 1761927188199, + "executionStopTime": 1761927188659, + "isCommentPanelOpen": false, + "language": "markdown", + "originalKey": "8304fc58-e145-4f5f-8bdc-a6d2dfba8a04", + "outputsInitialized": false, + "requestMsgId": "8304fc58-e145-4f5f-8bdc-a6d2dfba8a04", + "serverExecutionDuration": 3.739742009202, + "showInput": false + }, + "originalKey": "6702b9f4-54e9-46be-aca8-82ad9f96e9cc", + "outputsInitialized": false, + "showInput": false + }, + "source": [ + "## Set-up\n", + "\n", + "In this example, we allow running inference either on a single GPU or multiple GPUs." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Configure GPU Usage", + "origin": "ai" + }, + "collapsed": false, + "customOutput": null, + "executionStartTime": 1762496609726, + "executionStopTime": 1762496617098, + "isCommentPanelOpen": false, + "jupyter": { + "outputs_hidden": false + }, + "language": "python", + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Import iopath library", + "origin": "ai" + }, + "collapsed": false, + "customOutput": null, + "executionStartTime": 1761950742885, + "executionStopTime": 1761950745386, + "isCommentPanelOpen": false, + "language": "python", + "originalKey": "5faf15c7-5bbb-4b86-a9a8-70abbc76cba2", + "outputsInitialized": true, + "requestMsgId": "e63f2826-d537-4849-82ac-fe7280cc9de0", + "serverExecutionDuration": 2117.1291570063 + }, + "originalKey": "5d0ad6b6-0225-4371-9455-e6291e92604c", + "output": { + "id": "1459804151757142", + "loadingStatus": "before loading" + }, + "outputsInitialized": true, + "requestMsgId": "5d0ad6b6-0225-4371-9455-e6291e92604c", + "serverExecutionDuration": 6628.1851309996 + }, + "outputs": [], + "source": [ + "import os\n", + "import sam3\n", + "import torch\n", + "\n", + "sam3_root = os.path.join(os.path.dirname(sam3.__file__), \"..\")\n", + "\n", + "# use all available GPUs on the machine\n", + "gpus_to_use = range(torch.cuda.device_count())\n", + "# # use only a single GPU\n", + "# gpus_to_use = [torch.cuda.current_device()]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Initialize Video Predictor", + "origin": "ai" + }, + "collapsed": false, + "customOutput": null, + "executionStartTime": 1762496617103, + "executionStopTime": 1762496677439, + "isCommentPanelOpen": false, + "jupyter": { + "outputs_hidden": false + }, + "language": "python", + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Import Video Predictor", + "origin": "ai" + }, + "collapsed": false, + "customInput": null, + "customOutput": null, + "executionStartTime": 1761950750102, + "executionStopTime": 1761950806603, + "isCommentPanelOpen": false, + "language": "python", + "originalKey": "01683eda-e85f-4af6-9d91-86b3f1822170", + "outputsInitialized": true, + "requestMsgId": "822fb211-d78e-4d1c-92fa-848e0e755100", + "serverExecutionDuration": 55998.664824001, + "showInput": true + }, + "originalKey": "aea5a4b9-de9f-46ed-9fd1-20928ab60d2e", + "output": { + "id": "1581259049706846", + "loadingStatus": "before loading" + }, + "outputsInitialized": true, + "requestMsgId": "aea5a4b9-de9f-46ed-9fd1-20928ab60d2e", + "serverExecutionDuration": 59514.871851999 + }, + "outputs": [], + "source": [ + "from sam3.model_builder import build_sam3_video_predictor\n", + "\n", + "predictor = build_sam3_video_predictor(gpus_to_use=gpus_to_use)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "bentoAICellStatus": "none", + "collapsed": false, + "customInput": null, + "executionStartTime": 1762140878760, + "executionStopTime": 1762140879318, + "isCommentPanelOpen": false, + "jupyter": { + "outputs_hidden": false + }, + "language": "markdown", + "originalKey": "2cb37dda-a58c-46ae-85ff-118bb3ff4c02", + "outputsInitialized": false, + "requestMsgId": "2cb37dda-a58c-46ae-85ff-118bb3ff4c02", + "serverExecutionDuration": 3.7622059462592, + "showInput": false + }, + "source": [ + "#### Inference and visualization utils" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Set Up Video Processing", + "origin": "ai" + }, + "collapsed": false, + "customInput": null, + "customOutput": null, + "executionStartTime": 1762496677450, + "executionStopTime": 1762496679879, + "isCommentPanelOpen": false, + "jupyter": { + "outputs_hidden": false + }, + "language": "python", + "originalKey": "10d98ae4-dd65-4824-8469-960a9801ec72", + "output": { + "id": "1183417547004803", + "loadingStatus": "before loading" + }, + "outputsInitialized": true, + "requestMsgId": "10d98ae4-dd65-4824-8469-960a9801ec72", + "serverExecutionDuration": 1535.9860829994, + "showInput": true + }, + "outputs": [], + "source": [ + "import glob\n", + "import os\n", + "\n", + "import cv2\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "from PIL import Image\n", + "from sam3.visualization_utils import (\n", + " load_frame,\n", + " prepare_masks_for_visualization,\n", + " visualize_formatted_frame_output,\n", + ")\n", + "\n", + "# font size for axes titles\n", + "plt.rcParams[\"axes.titlesize\"] = 12\n", + "plt.rcParams[\"figure.titlesize\"] = 12\n", + "\n", + "\n", + "def propagate_in_video(predictor, session_id):\n", + " # we will just propagate from frame 0 to the end of the video\n", + " outputs_per_frame = {}\n", + " for response in predictor.handle_stream_request(\n", + " request=dict(\n", + " type=\"propagate_in_video\",\n", + " session_id=session_id,\n", + " )\n", + " ):\n", + " outputs_per_frame[response[\"frame_index\"]] = response[\"outputs\"]\n", + "\n", + " return outputs_per_frame\n", + "\n", + "\n", + "def abs_to_rel_coords(coords, IMG_WIDTH, IMG_HEIGHT, coord_type=\"point\"):\n", + " \"\"\"Convert absolute coordinates to relative coordinates (0-1 range)\n", + "\n", + " Args:\n", + " coords: List of coordinates\n", + " coord_type: 'point' for [x, y] or 'box' for [x, y, w, h]\n", + " \"\"\"\n", + " if coord_type == \"point\":\n", + " return [[x / IMG_WIDTH, y / IMG_HEIGHT] for x, y in coords]\n", + " elif coord_type == \"box\":\n", + " return [\n", + " [x / IMG_WIDTH, y / IMG_HEIGHT, w / IMG_WIDTH, h / IMG_HEIGHT]\n", + " for x, y, w, h in coords\n", + " ]\n", + " else:\n", + " raise ValueError(f\"Unknown coord_type: {coord_type}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "attachments": [], + "bentoAICellStatus": "none", + "isCommentPanelOpen": false, + "language": "markdown", + "metadata": { + "bentoAICellStatus": "none", + "customInput": null, + "isCommentPanelOpen": false, + "language": "markdown", + "originalKey": "2e38c5fe-1aa7-4000-9778-25e240daf5e5", + "outputsInitialized": false, + "showInput": false + }, + "originalKey": "7f803ec4-a343-43c9-9be3-5d3b9b66ae9a", + "outputsInitialized": false, + "showInput": false + }, + "source": [ + "### Loading an example video\n", + "\n", + "We assume that the video is stored as either **a list of JPEG frames with filenames like `.jpg`** or **an MP4 video**.\n", + "\n", + "Note that you can extract their JPEG frames using ffmpeg (https://ffmpeg.org/) as follows:\n", + "```\n", + "ffmpeg -i .mp4 -q:v 2 -start_number 0 /'%05d.jpg'\n", + "```\n", + "where `-q:v` generates high-quality JPEG frames and `-start_number 0` asks ffmpeg to start the JPEG file from `00000.jpg`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Set video path", + "origin": "ai" + }, + "collapsed": false, + "customOutput": null, + "executionStartTime": 1762496679887, + "executionStopTime": 1762496680687, + "isCommentPanelOpen": false, + "jupyter": { + "outputs_hidden": false + }, + "language": "python", + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Print SAM3 Directory Path", + "origin": "ai" + }, + "collapsed": false, + "customInput": null, + "customOutput": null, + "executionStartTime": 1761950806610, + "executionStopTime": 1761950807097, + "isCommentPanelOpen": false, + "language": "python", + "originalKey": "59540fba-3749-4498-bd14-c28fb7d61dbc", + "outputsInitialized": true, + "requestMsgId": "2807b3b8-a4fb-41e9-9976-67e2cd1f2ca2", + "serverExecutionDuration": 5.3407719824463, + "showInput": true + }, + "originalKey": "92d7b964-a3e4-4efb-98d4-202344994413", + "outputsInitialized": false, + "requestMsgId": "92d7b964-a3e4-4efb-98d4-202344994413", + "serverExecutionDuration": 3.5114740003337 + }, + "outputs": [], + "source": [ + "# \"video_path\" needs to be either a JPEG folder or a MP4 video file\n", + "video_path = f\"{sam3_root}/assets/videos/0001\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Load Video Frames for Visualization", + "origin": "ai" + }, + "collapsed": false, + "customOutput": null, + "executionStartTime": 1762496680695, + "executionStopTime": 1762496681428, + "isCommentPanelOpen": false, + "jupyter": { + "outputs_hidden": false + }, + "language": "python", + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Load Video Frames for Visualization", + "origin": "ai" + }, + "collapsed": false, + "customInput": null, + "customOutput": null, + "executionStartTime": 1761950807101, + "executionStopTime": 1761950807480, + "isCommentPanelOpen": false, + "language": "python", + "originalKey": "86f88968-93c4-4ca1-81fc-7fad55ccd566", + "outputsInitialized": true, + "requestMsgId": "96b17a72-b889-4a1f-af98-b40c773780f7", + "serverExecutionDuration": 7.6176410075277, + "showInput": true + }, + "originalKey": "10fe83fd-eb12-4400-a8ac-b6e137819136", + "outputsInitialized": false, + "requestMsgId": "10fe83fd-eb12-4400-a8ac-b6e137819136", + "serverExecutionDuration": 8.1744140006776 + }, + "outputs": [], + "source": [ + "# load \"video_frames_for_vis\" for visualization purposes (they are not used by the model)\n", + "if isinstance(video_path, str) and video_path.endswith(\".mp4\"):\n", + " cap = cv2.VideoCapture(video_path)\n", + " video_frames_for_vis = []\n", + " while True:\n", + " ret, frame = cap.read()\n", + " if not ret:\n", + " break\n", + " video_frames_for_vis.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n", + " cap.release()\n", + "else:\n", + " video_frames_for_vis = glob.glob(os.path.join(video_path, \"*.jpg\"))\n", + " try:\n", + " # integer sort instead of string sort (so that e.g. \"2.jpg\" is before \"11.jpg\")\n", + " video_frames_for_vis.sort(\n", + " key=lambda p: int(os.path.splitext(os.path.basename(p))[0])\n", + " )\n", + " except ValueError:\n", + " # fallback to lexicographic sort if the format is not \".jpg\"\n", + " print(\n", + " f'frame names are not in \".jpg\" format: {video_frames_for_vis[:5]=}, '\n", + " f\"falling back to lexicographic sort.\"\n", + " )\n", + " video_frames_for_vis.sort()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "attachments": [], + "bentoAICellStatus": "none", + "isCommentPanelOpen": false, + "language": "markdown", + "metadata": { + "bentoAICellStatus": "none", + "customInput": null, + "isCommentPanelOpen": false, + "language": "markdown", + "originalKey": "2f67df56-df50-470d-bb6d-f736a592dd47", + "outputsInitialized": false, + "showInput": false + }, + "originalKey": "552fbb00-7387-4014-9161-7f9c32418701", + "outputsInitialized": false, + "showInput": false + }, + "source": [ + "### Opening an inference session on this video\n", + "\n", + "SAM 3 requires stateful inference for interactive video segmentation, so we need to initialize an **inference session** on this video.\n", + "\n", + "During initialization, it loads all the video frames and stores their pixels in the session state." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Start Video Session", + "origin": "ai" + }, + "collapsed": false, + "customOutput": null, + "executionStartTime": 1762496681434, + "executionStopTime": 1762496694273, + "isCommentPanelOpen": false, + "jupyter": { + "outputs_hidden": false + }, + "language": "python", + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Start Video Session", + "origin": "ai" + }, + "collapsed": false, + "customInput": null, + "customOutput": null, + "executionStartTime": 1761950807485, + "executionStopTime": 1761950821459, + "isCommentPanelOpen": false, + "language": "python", + "originalKey": "eec3ee85-e95a-4fa7-8401-921fba35d6ee", + "outputsInitialized": true, + "requestMsgId": "262d936d-486c-4a75-8f25-ad2a2832c3f8", + "serverExecutionDuration": 13503.750298987, + "showInput": true + }, + "originalKey": "2b5917e0-95da-409a-b2c6-4868fe5ff88e", + "output": { + "id": "816533627765185", + "loadingStatus": "before loading" + }, + "outputsInitialized": true, + "requestMsgId": "2b5917e0-95da-409a-b2c6-4868fe5ff88e", + "serverExecutionDuration": 11971.027362999 + }, + "outputs": [], + "source": [ + "response = predictor.handle_request(\n", + " request=dict(\n", + " type=\"start_session\",\n", + " resource_path=video_path,\n", + " )\n", + ")\n", + "session_id = response[\"session_id\"]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "attachments": [], + "bentoAICellStatus": "none", + "isCommentPanelOpen": false, + "language": "markdown", + "metadata": { + "bentoAICellStatus": "none", + "customInput": null, + "isCommentPanelOpen": false, + "language": "markdown", + "originalKey": "4d26ec1b-d78f-4054-879a-cbf06b84a79f", + "outputsInitialized": false, + "showInput": false + }, + "originalKey": "1da36b65-5759-4803-be41-ec6d2ec8c5d9", + "outputsInitialized": false, + "showInput": false + }, + "source": [ + "### Video promptable concept segmentation with text\n", + "\n", + "Using SAM 3 you can describe objects using natural language, and the model will automatically detect and track all instances of that object throughout the video.\n", + "\n", + "In the example below, we add a text prompt on frame 0 and propagation throughout the video. Here we use the text prompt \"person\" to detect all people in the video. SAM 3 will automatically identify multiple person instances and assign each a unique object ID.\n", + "\n", + "Note that the first call might be slower due to setting up buffers. **You can rerun all the cells below when measuring speed.**" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Reset Session", + "origin": "ai" + }, + "collapsed": false, + "customOutput": null, + "executionStartTime": 1762496694278, + "executionStopTime": 1762496694675, + "isCommentPanelOpen": false, + "jupyter": { + "outputs_hidden": false + }, + "language": "python", + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Reset Prediction Session", + "origin": "ai" + }, + "collapsed": false, + "customInput": null, + "customOutput": null, + "executionStartTime": 1761950821467, + "executionStopTime": 1761950821992, + "isCommentPanelOpen": false, + "language": "python", + "originalKey": "be47ff32-7721-43c2-a1ad-b9d1689cf1c1", + "outputsInitialized": true, + "requestMsgId": "c5eb2442-0530-4408-9ace-ba74e7441cf6", + "serverExecutionDuration": 10.527236998314, + "showInput": true + }, + "originalKey": "f61b9bc5-5ec7-4c72-9071-e50bedb89f0a", + "output": { + "id": "4255598051433217", + "loadingStatus": "before loading" + }, + "outputsInitialized": true, + "requestMsgId": "f61b9bc5-5ec7-4c72-9071-e50bedb89f0a", + "serverExecutionDuration": 9.9740919995384 + }, + "outputs": [], + "source": [ + "# note: in case you already ran one text prompt and now want to switch to another text prompt\n", + "# it's required to reset the session first (otherwise the results would be wrong)\n", + "_ = predictor.handle_request(\n", + " request=dict(\n", + " type=\"reset_session\",\n", + " session_id=session_id,\n", + " )\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Add Text Prompt", + "origin": "ai" + }, + "collapsed": false, + "customOutput": null, + "executionStartTime": 1762496694678, + "executionStopTime": 1762496699791, + "isCommentPanelOpen": false, + "jupyter": { + "outputs_hidden": false + }, + "language": "python", + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Add Prompt to Frame", + "origin": "ai" + }, + "collapsed": false, + "customInput": null, + "customOutput": null, + "executionStartTime": 1761950821995, + "executionStopTime": 1761950825751, + "isCommentPanelOpen": false, + "language": "python", + "originalKey": "ca780545-a450-4ed8-a192-f6033e0288ba", + "outputsInitialized": true, + "requestMsgId": "d784487d-5758-40a4-8a66-1ea26ad3bcfd", + "serverExecutionDuration": 3358.8370049838, + "showInput": true + }, + "originalKey": "55538638-8336-4b1c-9daf-517c0dc31806", + "output": { + "id": "2083947459075035", + "loadingStatus": "before loading" + }, + "outputsInitialized": true, + "requestMsgId": "55538638-8336-4b1c-9daf-517c0dc31806", + "serverExecutionDuration": 3957.4137909985 + }, + "outputs": [], + "source": [ + "prompt_text_str = \"person\"\n", + "frame_idx = 0 # add a text prompt on frame 0\n", + "response = predictor.handle_request(\n", + " request=dict(\n", + " type=\"add_prompt\",\n", + " session_id=session_id,\n", + " frame_index=frame_idx,\n", + " text=prompt_text_str,\n", + " )\n", + ")\n", + "out = response[\"outputs\"]\n", + "\n", + "plt.close(\"all\")\n", + "visualize_formatted_frame_output(\n", + " frame_idx,\n", + " video_frames_for_vis,\n", + " outputs_list=[prepare_masks_for_visualization({frame_idx: out})],\n", + " titles=[\"SAM 3 Dense Tracking outputs\"],\n", + " figsize=(6, 4),\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Visualize Video Outputs", + "origin": "ai" + }, + "collapsed": false, + "customOutput": null, + "executionStartTime": 1762496699796, + "executionStopTime": 1762496734325, + "isCommentPanelOpen": false, + "jupyter": { + "outputs_hidden": false + }, + "language": "python", + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Visualize Video Outputs", + "origin": "ai" + }, + "collapsed": false, + "customInput": null, + "customOutput": null, + "executionStartTime": 1761950827402, + "executionStopTime": 1761950861260, + "isCommentPanelOpen": false, + "language": "python", + "originalKey": "06edc7da-9dd1-42e8-b502-0afdbfbaddc2", + "outputsInitialized": true, + "requestMsgId": "33659e9c-a817-40fe-b3b7-727b7a344d74", + "serverExecutionDuration": 33374.819626013, + "showInput": true + }, + "originalKey": "41e1b095-80a4-4997-9b3e-2baa28dba05f", + "output": { + "id": "2605449593181328", + "loadingStatus": "before loading" + }, + "outputsInitialized": true, + "requestMsgId": "41e1b095-80a4-4997-9b3e-2baa28dba05f", + "serverExecutionDuration": 33588.144188001 + }, + "outputs": [], + "source": [ + "# now we propagate the outputs from frame 0 to the end of the video and collect all outputs\n", + "outputs_per_frame = propagate_in_video(predictor, session_id)\n", + "\n", + "# finally, we reformat the outputs for visualization and plot the outputs every 60 frames\n", + "outputs_per_frame = prepare_masks_for_visualization(outputs_per_frame)\n", + "\n", + "vis_frame_stride = 60\n", + "plt.close(\"all\")\n", + "for frame_idx in range(0, len(outputs_per_frame), vis_frame_stride):\n", + " visualize_formatted_frame_output(\n", + " frame_idx,\n", + " video_frames_for_vis,\n", + " outputs_list=[outputs_per_frame],\n", + " titles=[\"SAM 3 Dense Tracking outputs\"],\n", + " figsize=(6, 4),\n", + " )" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "bentoAICellStatus": "none", + "customInput": null, + "isCommentPanelOpen": false, + "language": "markdown", + "originalKey": "e341c66f-6083-4731-b8e1-ebc7b4177a43", + "outputsInitialized": false, + "showInput": false + }, + "source": [ + "### Removing objects\n", + "\n", + "We can remove individual objects using their id.\n", + "\n", + "As an example, let's remove object 2 (which is the dancer in the front)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Remove Front Dancer", + "origin": "ai" + }, + "collapsed": false, + "customInput": null, + "customOutput": null, + "executionStartTime": 1762496734333, + "executionStopTime": 1762496735487, + "isCommentPanelOpen": false, + "jupyter": { + "outputs_hidden": false + }, + "language": "python", + "originalKey": "cf97330e-68f5-4f4e-931f-bc5b6faa77ff", + "output": { + "id": "1345936250272478", + "loadingStatus": "before loading" + }, + "outputsInitialized": true, + "requestMsgId": "cf97330e-68f5-4f4e-931f-bc5b6faa77ff", + "serverExecutionDuration": 127.66883199947, + "showInput": true + }, + "outputs": [], + "source": [ + "# we pick id 2, which is the dancer in the front\n", + "obj_id = 2\n", + "response = predictor.handle_request(\n", + " request=dict(\n", + " type=\"remove_object\",\n", + " session_id=session_id,\n", + " obj_id=obj_id,\n", + " )\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Visualize Video Outputs", + "origin": "ai" + }, + "collapsed": false, + "customInput": null, + "customOutput": null, + "executionStartTime": 1762496735493, + "executionStopTime": 1762496742056, + "isCommentPanelOpen": false, + "jupyter": { + "outputs_hidden": false + }, + "language": "python", + "originalKey": "a4552ab0-1b08-42b6-b90d-bfd1814c9398", + "output": { + "id": "1747332999309403", + "loadingStatus": "before loading" + }, + "outputsInitialized": true, + "requestMsgId": "a4552ab0-1b08-42b6-b90d-bfd1814c9398", + "serverExecutionDuration": 5491.8713629995, + "showInput": true + }, + "outputs": [], + "source": [ + "# now we propagate the outputs from frame 0 to the end of the video and collect all outputs\n", + "outputs_per_frame = propagate_in_video(predictor, session_id)\n", + "\n", + "# finally, we reformat the outputs for visualization and plot the outputs every 60 frames\n", + "outputs_per_frame = prepare_masks_for_visualization(outputs_per_frame)\n", + "\n", + "vis_frame_stride = 60\n", + "plt.close(\"all\")\n", + "for frame_idx in range(0, len(outputs_per_frame), vis_frame_stride):\n", + " visualize_formatted_frame_output(\n", + " frame_idx,\n", + " video_frames_for_vis,\n", + " outputs_list=[outputs_per_frame],\n", + " titles=[\"SAM 3 Dense Tracking outputs\"],\n", + " figsize=(6, 4),\n", + " )" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "bentoAICellStatus": "none", + "customInput": null, + "isCommentPanelOpen": false, + "language": "markdown", + "originalKey": "4606cb85-5007-4b11-baa6-41ce9017de8e", + "outputsInitialized": false, + "showInput": false + }, + "source": [ + "### Adding new objects with point prompts\n", + "\n", + "We can add new objects through point prompts.\n", + "\n", + "Assuming that we've changed our mind, and now that we want to add back the dancer in the front (whom we just removed in the step above). We can use interactive clicks to add her back." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Get image dimensions", + "origin": "ai" + }, + "collapsed": false, + "customInput": null, + "customOutput": null, + "executionStartTime": 1762496742064, + "executionStopTime": 1762496743435, + "isCommentPanelOpen": false, + "jupyter": { + "outputs_hidden": false + }, + "language": "python", + "originalKey": "2ad8f8e9-49d8-4003-ad2f-f289ab5befc8", + "outputsInitialized": false, + "requestMsgId": "2ad8f8e9-49d8-4003-ad2f-f289ab5befc8", + "serverExecutionDuration": 15.222899999571, + "showInput": true + }, + "outputs": [], + "source": [ + "sample_img = Image.fromarray(load_frame(video_frames_for_vis[0]))\n", + "\n", + "IMG_WIDTH, IMG_HEIGHT = sample_img.size" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Convert Absolute to Relative Coordinates", + "origin": "ai" + }, + "collapsed": false, + "customInput": null, + "customOutput": null, + "executionStartTime": 1762496743442, + "executionStopTime": 1762496744333, + "isCommentPanelOpen": false, + "jupyter": { + "outputs_hidden": false + }, + "language": "python", + "originalKey": "3baca8f9-8e19-46da-a320-29524ddbf950", + "outputsInitialized": false, + "requestMsgId": "3baca8f9-8e19-46da-a320-29524ddbf950", + "serverExecutionDuration": 3.9295850001508, + "showInput": true + }, + "outputs": [], + "source": [ + "# let's add back the dancer via point prompts.\n", + "# we will use a single positive click to add the dancer back.\n", + "\n", + "frame_idx = 0\n", + "obj_id = 2\n", + "points_abs = np.array(\n", + " [\n", + " [760, 550], # positive click\n", + " ]\n", + ")\n", + "# positive clicks have label 1, while negative clicks have label 0\n", + "labels = np.array([1])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Display Data Points", + "origin": "ai" + }, + "collapsed": false, + "customInput": null, + "customOutput": null, + "executionStartTime": 1762496744337, + "executionStopTime": 1762496748117, + "isCommentPanelOpen": false, + "jupyter": { + "outputs_hidden": false + }, + "language": "python", + "originalKey": "865e951f-8d0a-41bf-8d51-72092703e3cf", + "output": { + "id": "1240547311311464", + "loadingStatus": "before loading" + }, + "outputsInitialized": true, + "requestMsgId": "865e951f-8d0a-41bf-8d51-72092703e3cf", + "serverExecutionDuration": 1224.9363859992, + "showInput": true + }, + "outputs": [], + "source": [ + "# convert points and labels to tensors; also convert to relative coordinates\n", + "points_tensor = torch.tensor(\n", + " abs_to_rel_coords(points_abs, IMG_WIDTH, IMG_HEIGHT, coord_type=\"point\"),\n", + " dtype=torch.float32,\n", + ")\n", + "points_labels_tensor = torch.tensor(labels, dtype=torch.int32)\n", + "\n", + "response = predictor.handle_request(\n", + " request=dict(\n", + " type=\"add_prompt\",\n", + " session_id=session_id,\n", + " frame_index=frame_idx,\n", + " points=points_tensor,\n", + " point_labels=points_labels_tensor,\n", + " obj_id=obj_id,\n", + " )\n", + ")\n", + "out = response[\"outputs\"]\n", + "\n", + "plt.close(\"all\")\n", + "visualize_formatted_frame_output(\n", + " frame_idx,\n", + " video_frames_for_vis,\n", + " outputs_list=[prepare_masks_for_visualization({frame_idx: out})],\n", + " titles=[\"SAM 3 Dense Tracking outputs\"],\n", + " figsize=(6, 4),\n", + " points_list=[points_abs],\n", + " points_labels_list=[labels],\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Process Video Frames", + "origin": "ai" + }, + "collapsed": false, + "customInput": null, + "customOutput": null, + "executionStartTime": 1762496748120, + "executionStopTime": 1762496774486, + "isCommentPanelOpen": false, + "jupyter": { + "outputs_hidden": false + }, + "language": "python", + "originalKey": "cf1d2dda-464b-4a6a-aff3-d729aa486ec3", + "output": { + "id": "824678093489712", + "loadingStatus": "before loading" + }, + "outputsInitialized": true, + "requestMsgId": "cf1d2dda-464b-4a6a-aff3-d729aa486ec3", + "serverExecutionDuration": 25528.605932001, + "showInput": true + }, + "outputs": [], + "source": [ + "# now we propagate the outputs from frame 0 to the end of the video and collect all outputs\n", + "outputs_per_frame = propagate_in_video(predictor, session_id)\n", + "\n", + "# finally, we reformat the outputs for visualization and plot the outputs every 60 frames\n", + "outputs_per_frame = prepare_masks_for_visualization(outputs_per_frame)\n", + "\n", + "vis_frame_stride = 60\n", + "plt.close(\"all\")\n", + "for frame_idx in range(0, len(outputs_per_frame), vis_frame_stride):\n", + " visualize_formatted_frame_output(\n", + " frame_idx,\n", + " video_frames_for_vis,\n", + " outputs_list=[outputs_per_frame],\n", + " titles=[\"SAM 3 Dense Tracking outputs\"],\n", + " figsize=(6, 4),\n", + " )" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "bentoAICellStatus": "none", + "customInput": null, + "isCommentPanelOpen": false, + "language": "markdown", + "originalKey": "79b12d4e-cdf0-41b8-9939-bcc1d5422115", + "outputsInitialized": false, + "showInput": false + }, + "source": [ + "### Refining an existing object with point prompts\n", + "\n", + "We can also refine the segmentation mask of an existing object through point prompts.\n", + "\n", + "Assuming that we've changed our mind (again) -- for Object ID 2 (the dancer in the front whom we just added back in the step above), now we only want to segment her T-shirt instead of her whole body. We can adjust the segmentation mask with a few more positive and negative clicks." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Segment T-shirt with Clicks", + "origin": "ai" + }, + "collapsed": false, + "customInput": null, + "customOutput": null, + "executionStartTime": 1762496774494, + "executionStopTime": 1762496775380, + "isCommentPanelOpen": false, + "jupyter": { + "outputs_hidden": false + }, + "language": "python", + "originalKey": "8114fb12-386d-45e0-b875-f74eee630d96", + "outputsInitialized": false, + "requestMsgId": "8114fb12-386d-45e0-b875-f74eee630d96", + "serverExecutionDuration": 4.0469909999956, + "showInput": true + }, + "outputs": [], + "source": [ + "# For the dancer in the front, suppose now we only want to segment her T-shirt instead of her whole body\n", + "# we will use 2 positive clicks and 2 negative clicks to select her shirt.\n", + "\n", + "frame_idx = 0\n", + "obj_id = 2\n", + "points_abs = np.array(\n", + " [\n", + " [740, 450], # positive click\n", + " [760, 630], # negative click\n", + " [840, 640], # negative click\n", + " [760, 550], # positive click\n", + " ]\n", + ")\n", + "# positive clicks have label 1, while negative clicks have label 0\n", + "labels = np.array([1, 0, 0, 1])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Process and Visualize Frame Outputs", + "origin": "ai" + }, + "collapsed": false, + "customInput": null, + "customOutput": null, + "executionStartTime": 1762496775385, + "executionStopTime": 1762496777685, + "isCommentPanelOpen": false, + "jupyter": { + "outputs_hidden": false + }, + "language": "python", + "originalKey": "5e66f671-aa71-42d7-a68d-8dc6430df8fe", + "output": { + "id": "25486291537675748", + "loadingStatus": "before loading" + }, + "outputsInitialized": true, + "requestMsgId": "5e66f671-aa71-42d7-a68d-8dc6430df8fe", + "serverExecutionDuration": 1227.9235379992, + "showInput": true + }, + "outputs": [], + "source": [ + "# convert points and labels to tensors; also convert to relative coordinates\n", + "points_tensor = torch.tensor(\n", + " abs_to_rel_coords(points_abs, IMG_WIDTH, IMG_HEIGHT, coord_type=\"point\"),\n", + " dtype=torch.float32,\n", + ")\n", + "points_labels_tensor = torch.tensor(labels, dtype=torch.int32)\n", + "\n", + "response = predictor.handle_request(\n", + " request=dict(\n", + " type=\"add_prompt\",\n", + " session_id=session_id,\n", + " frame_index=frame_idx,\n", + " points=points_tensor,\n", + " point_labels=points_labels_tensor,\n", + " obj_id=obj_id,\n", + " )\n", + ")\n", + "out = response[\"outputs\"]\n", + "\n", + "plt.close(\"all\")\n", + "visualize_formatted_frame_output(\n", + " frame_idx,\n", + " video_frames_for_vis,\n", + " outputs_list=[prepare_masks_for_visualization({frame_idx: out})],\n", + " titles=[\"SAM 3 Dense Tracking outputs\"],\n", + " figsize=(6, 4),\n", + " points_list=[points_abs],\n", + " points_labels_list=[labels],\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Visualize Video Tracking Outputs", + "origin": "ai" + }, + "collapsed": false, + "customInput": null, + "customOutput": null, + "executionStartTime": 1762496777688, + "executionStopTime": 1762496803927, + "isCommentPanelOpen": false, + "jupyter": { + "outputs_hidden": false + }, + "language": "python", + "originalKey": "5c4a35a7-e5cc-4c7d-ba05-25540665d125", + "output": { + "id": "1222230279729631", + "loadingStatus": "before loading" + }, + "outputsInitialized": true, + "requestMsgId": "5c4a35a7-e5cc-4c7d-ba05-25540665d125", + "serverExecutionDuration": 25325.393453, + "showInput": true + }, + "outputs": [], + "source": [ + "# now we propagate the outputs from frame 0 to the end of the video and collect all outputs\n", + "outputs_per_frame = propagate_in_video(predictor, session_id)\n", + "\n", + "# finally, we reformat the outputs for visualization and plot the outputs every 60 frames\n", + "outputs_per_frame = prepare_masks_for_visualization(outputs_per_frame)\n", + "\n", + "vis_frame_stride = 60\n", + "plt.close(\"all\")\n", + "for frame_idx in range(0, len(outputs_per_frame), vis_frame_stride):\n", + " visualize_formatted_frame_output(\n", + " frame_idx,\n", + " video_frames_for_vis,\n", + " outputs_list=[outputs_per_frame],\n", + " titles=[\"SAM 3 Dense Tracking outputs\"],\n", + " figsize=(6, 4),\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "attachments": [], + "bentoAICellStatus": "none", + "isCommentPanelOpen": false, + "language": "markdown", + "metadata": { + "bentoAICellStatus": "none", + "customInput": null, + "isCommentPanelOpen": false, + "language": "markdown", + "originalKey": "3f8d998b-4585-4956-b15d-1d4078cf8927", + "outputsInitialized": false, + "showInput": false + }, + "originalKey": "b1d99d9b-c26e-4d5a-a4a2-70aab345fd77", + "outputsInitialized": false, + "showInput": false + }, + "source": [ + "### Close session\n", + "\n", + "Each session is tied to a single video. We can close the session after inference to free up its resources.\n", + "\n", + "(Then, you may start a new session on another video.)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Close inference session", + "origin": "ai" + }, + "collapsed": false, + "customOutput": null, + "executionStartTime": 1762496803937, + "executionStopTime": 1762496805854, + "isCommentPanelOpen": false, + "jupyter": { + "outputs_hidden": false + }, + "language": "python", + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Reset Session Request", + "origin": "ai" + }, + "collapsed": false, + "customInput": null, + "customOutput": null, + "executionStartTime": 1761950861264, + "executionStopTime": 1761950863082, + "isCommentPanelOpen": false, + "language": "python", + "originalKey": "d08a9e5f-3563-4aa1-ba8a-9f94615e4d7e", + "outputsInitialized": true, + "requestMsgId": "6e872e9a-3803-418f-827d-f1d817ef9cb9", + "serverExecutionDuration": 926.3133899949, + "showInput": true + }, + "originalKey": "4f94819e-de3c-49bc-a25f-0790b2fa2cfb", + "output": { + "id": "1532190008092267", + "loadingStatus": "before loading" + }, + "outputsInitialized": true, + "requestMsgId": "4f94819e-de3c-49bc-a25f-0790b2fa2cfb", + "serverExecutionDuration": 941.08029199924 + }, + "outputs": [], + "source": [ + "# finally, close the inference session to free its GPU resources\n", + "# (you may start a new session on another video)\n", + "_ = predictor.handle_request(\n", + " request=dict(\n", + " type=\"close_session\",\n", + " session_id=session_id,\n", + " )\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "attachments": [], + "bentoAICellStatus": "none", + "isCommentPanelOpen": false, + "language": "markdown", + "metadata": { + "bentoAICellStatus": "none", + "customInput": null, + "isCommentPanelOpen": false, + "language": "markdown", + "originalKey": "d79e36cd-a691-420d-8061-ad5222913770", + "outputsInitialized": false, + "showInput": false + }, + "originalKey": "5f1ced4a-7f9b-4a3f-88bb-77fd601466eb", + "outputsInitialized": false, + "showInput": false + }, + "source": [ + "### Clean-up\n", + "\n", + "After all inference is done, we can shutdown the predictor to free up the multi-GPU process group." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Shutdown Predictor", + "origin": "ai" + }, + "collapsed": false, + "customOutput": null, + "executionStartTime": 1762496805866, + "executionStopTime": 1762496807085, + "isCommentPanelOpen": false, + "jupyter": { + "outputs_hidden": false + }, + "language": "python", + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Shutdown Predictor", + "origin": "ai" + }, + "collapsed": false, + "customInput": null, + "customOutput": null, + "executionStartTime": 1761950863090, + "executionStopTime": 1761950863987, + "isCommentPanelOpen": false, + "language": "python", + "originalKey": "2de9666e-d888-4b2b-955a-82727363fc59", + "outputsInitialized": true, + "requestMsgId": "59e250c1-3d4c-436d-aa6e-fc0429fe4d8f", + "serverExecutionDuration": 282.37027197611, + "showInput": true + }, + "originalKey": "bb5f6b72-d945-4193-8988-2490e9168882", + "output": { + "id": "1866958724222293", + "loadingStatus": "before loading" + }, + "outputsInitialized": true, + "requestMsgId": "bb5f6b72-d945-4193-8988-2490e9168882", + "serverExecutionDuration": 284.71523799999 + }, + "outputs": [], + "source": [ + "# after all inference is done, we can shutdown the predictor\n", + "# to free up the multi-GPU process group\n", + "predictor.shutdown()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "bentoAICellStatus": "none", + "bentoCellName": { + "name": "Cell 33", + "origin": "initial" + }, + "collapsed": false, + "customInput": null, + "customOutput": null, + "executionStartTime": 1762496807093, + "executionStopTime": 1762496807812, + "isCommentPanelOpen": false, + "jupyter": { + "outputs_hidden": false + }, + "language": "python", + "originalKey": "e4ad9f5f-c0df-4e30-97a2-40d389ba92ac", + "outputsInitialized": false, + "requestMsgId": "e4ad9f5f-c0df-4e30-97a2-40d389ba92ac", + "serverExecutionDuration": 3.5742059990298, + "showInput": true + }, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "bento_stylesheets": { + "bento/extensions/flow/main.css": true, + "bento/extensions/kernel_selector/main.css": true, + "bento/extensions/kernel_ui/main.css": true, + "bento/extensions/new_kernel/main.css": true, + "bento/extensions/system_usage/main.css": true, + "bento/extensions/theme/main.css": true + }, + "captumWidgetMessage": [], + "fileHeader": "", + "fileUid": "8685c221-c143-4b84-98ec-b1f023cedd6c", + "isAdHoc": false, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.11" + }, + "last_base_url": "https://bento.edge.x2p.facebook.net/", + "last_kernel_id": "b57809cb-57de-4b58-a47a-2cd14cd7dc51", + "last_msg_id": "be2245fc-daa1cc5649ef79144c475c5d_1965", + "last_server_session_id": "4fb65252-bdbd-4eea-b3c3-4a9f2995ad48", + "notebookId": "825823386977069", + "notebookNumber": "N8482762" + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/source_code/sam3/medsam3_brats/4_predict_sam3.py b/source_code/sam3/medsam3_brats/4_predict_sam3.py new file mode 100644 index 0000000000000000000000000000000000000000..eb14bd99eadcbc50e24ae8c19ed06f0dadc36378 --- /dev/null +++ b/source_code/sam3/medsam3_brats/4_predict_sam3.py @@ -0,0 +1,393 @@ +#!/usr/bin/env python3 +""" +SAM3 + LoRA 推理脚本 - 与 SegMamba 的 4_predict.py 保持一致的评估流程 + +读取 SegMamba 格式的预处理数据 (.npz),使用 SAM3 + LoRA + decoder 进行推理, +输出与 SegMamba 相同格式的预测结果。 +""" + +import argparse +import glob +import os +import sys +from pathlib import Path + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +import SimpleITK as sitk +from tqdm import tqdm +import random + +sys.path.insert(0, "/root/githubs/sam3") +sys.path.insert(0, "/root/githubs/SegMamba") + +# Set determinism +random.seed(123) +np.random.seed(123) +torch.manual_seed(123) + + +def dice(pred, gt): + """计算 Dice 系数""" + pred = pred.astype(bool) + gt = gt.astype(bool) + intersection = np.sum(pred & gt) + union = np.sum(pred) + np.sum(gt) + if union == 0: + return 1.0 if np.sum(pred) == 0 else 0.0 + return 2.0 * intersection / union + + +class MedSAM3DetectorSeg(nn.Module): + """ + 与训练时相同的模型结构:SAM3 detector backbone -> lightweight decoder -> mask logits + 4 类分割: 0=背景, 1=NCR, 2=ED, 3=ET + """ + + def __init__(self, sam3_detector: nn.Module, image_size: int = 1008, num_classes: int = 4): + super().__init__() + self.detector = sam3_detector + self.image_size = int(image_size) + self.num_classes = num_classes + self.register_buffer("mean", torch.tensor([0.5, 0.5, 0.5]).view(1, 3, 1, 1)) + self.register_buffer("std", torch.tensor([0.5, 0.5, 0.5]).view(1, 3, 1, 1)) + + # lightweight decoder; expects a 256-channel feature map from SAM3 backbone + # 输出 num_classes 通道 (4 类分割) + self.decoder = nn.Sequential( + nn.Conv2d(256, 128, 3, padding=1), + nn.BatchNorm2d(128), + nn.ReLU(inplace=True), + nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False), + nn.Conv2d(128, 64, 3, padding=1), + nn.BatchNorm2d(64), + nn.ReLU(inplace=True), + nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False), + nn.Conv2d(64, 32, 3, padding=1), + nn.BatchNorm2d(32), + nn.ReLU(inplace=True), + nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False), + nn.Conv2d(32, num_classes, 1), # 4 类输出 + ) + + def _preprocess(self, images: torch.Tensor) -> torch.Tensor: + _, _, h, w = images.shape + if h != self.image_size or w != self.image_size: + images = F.interpolate( + images, size=(self.image_size, self.image_size), mode="bilinear", align_corners=False + ) + images = (images - self.mean.to(images.device)) / self.std.to(images.device) + return images + + def _pick_feat(self, backbone_out) -> torch.Tensor: + feat = None + if isinstance(backbone_out, dict): + if "sam3_features" in backbone_out: + feat = backbone_out["sam3_features"] + elif "features" in backbone_out: + feat = backbone_out["features"] + else: + for _, v in backbone_out.items(): + if isinstance(v, torch.Tensor) and v.ndim == 4: + feat = v + break + elif isinstance(backbone_out, torch.Tensor): + feat = backbone_out + if feat is None or not isinstance(feat, torch.Tensor) or feat.ndim != 4: + raise RuntimeError("Could not find a 4D feature map in SAM3 backbone output") + return feat + + def forward(self, images: torch.Tensor) -> torch.Tensor: + orig_h, orig_w = images.shape[-2:] + x = self._preprocess(images) + backbone_out = self.detector.backbone.forward_image(x) + feat = self._pick_feat(backbone_out) + logits = self.decoder(feat) # (B, num_classes, ?, ?) + if logits.shape[-2:] != (orig_h, orig_w): + logits = F.interpolate(logits, size=(orig_h, orig_w), mode="bilinear", align_corners=False) + return logits # (B, num_classes, H, W) + + +def load_model(checkpoint_path: str, lora_weights: str, decoder_weights: str = None, device: str = "cuda"): + """加载 SAM3 + LoRA 模型""" + from sam3.model_builder import build_sam3_video_model + from lora import apply_lora_to_model, load_lora_weights + + print(f"Loading SAM3 from: {checkpoint_path}") + sam3 = build_sam3_video_model( + checkpoint_path=checkpoint_path, + load_from_HF=False, + device=device, + apply_temporal_disambiguation=True, + ) + + # 创建分割模型 (4 类: 背景 + 3 类肿瘤) + model = MedSAM3DetectorSeg(sam3.detector, image_size=1008, num_classes=4) + + # 注入 LoRA + target_modules = ["q_proj", "k_proj", "v_proj", "out_proj", "qkv", "proj"] + print(f"Applying LoRA to detector...") + apply_lora_to_model( + model.detector, + rank=8, + alpha=16.0, + dropout=0.0, + target_modules=target_modules, + exclude_modules=[], + ) + + # 加载 LoRA 权重 + print(f"Loading LoRA weights from: {lora_weights}") + load_lora_weights(model.detector, lora_weights) + + # 加载 decoder 权重 + if decoder_weights is not None: + decoder_path = Path(decoder_weights) + else: + decoder_path = Path(lora_weights).parent / "best_decoder_weights.pt" + + if decoder_path.exists(): + print(f"Loading decoder weights from: {decoder_path}") + decoder_state = torch.load(decoder_path, map_location="cpu") + model.decoder.load_state_dict(decoder_state) + else: + print(f"WARNING: Decoder weights not found at {decoder_path}") + print("The model will use randomly initialized decoder - predictions will be wrong!") + print("Please retrain with the updated training script that saves decoder weights.") + + model = model.to(device) + model.eval() + return model + + +def convert_labels(labels): + """ + 转换标签为 3 通道 (TC, WT, ET) + - TC (Tumor Core): label==1 或 label==3 + - WT (Whole Tumor): label==1 或 label==2 或 label==3 + - ET (Enhancing Tumor): label==3 + """ + result = [ + (labels == 1) | (labels == 3), # TC + (labels == 1) | (labels == 2) | (labels == 3), # WT + labels == 3, # ET + ] + return np.stack(result, axis=0).astype(np.float32) + + +def predict_volume(model, volume_4d: np.ndarray, modality: int = 0, + target_size: int = 512, device: str = "cuda") -> np.ndarray: + """ + 对 4D volume 进行 4 类分割预测 + + Args: + model: 分割模型 (输出 4 类) + volume_4d: (4, D, H, W) 的 4 模态 3D volume + modality: 使用的模态索引(0=T1, 1=T1ce, 2=T2, 3=FLAIR) + target_size: 目标尺寸 + device: 计算设备 + + Returns: + pred_3d: (D, H, W) 的类别预测 (0=背景, 1=NCR, 2=ED, 3=ET) + """ + # 提取指定模态 + volume = volume_4d[modality] # (D, H, W) + D, H, W = volume.shape + + pred_3d = np.zeros((D, H, W), dtype=np.uint8) + + with torch.no_grad(): + for z in range(D): + slice_2d = volume[z] # (H, W) + + # 归一化到 [0, 1] + v_min, v_max = slice_2d.min(), slice_2d.max() + if v_max > v_min: + slice_2d = (slice_2d - v_min) / (v_max - v_min) + else: + slice_2d = np.zeros_like(slice_2d) + + # 转为 3 通道 RGB + slice_rgb = np.stack([slice_2d] * 3, axis=0) # (3, H, W) + + # Resize to target size + slice_tensor = torch.from_numpy(slice_rgb).float().unsqueeze(0) # (1, 3, H, W) + if H != target_size or W != target_size: + slice_tensor = F.interpolate(slice_tensor, size=(target_size, target_size), + mode="bilinear", align_corners=False) + + slice_tensor = slice_tensor.to(device) + + # 推理 - 4 类输出 + logits = model(slice_tensor) # (1, 4, H, W) + pred_class = logits.argmax(dim=1) # (1, H, W) + + # Resize back to original size + if H != target_size or W != target_size: + pred_class = F.interpolate(pred_class.unsqueeze(1).float(), size=(H, W), + mode="nearest").squeeze(1).long() + + pred_3d[z] = pred_class[0].cpu().numpy() + + return pred_3d + + +def labels_to_regions(pred_3d: np.ndarray) -> dict: + """ + 将 4 类预测转换为 TC/WT/ET 区域 + + BraTS 标签: + 0: 背景 + 1: NCR (Necrotic tumor core) + 2: ED (Peritumoral Edema) + 3: ET (Enhancing tumor) + + 区域定义: + TC (Tumor Core) = NCR + ET = label 1 + label 3 + WT (Whole Tumor) = NCR + ED + ET = label 1 + label 2 + label 3 + ET (Enhancing Tumor) = label 3 + """ + tc = ((pred_3d == 1) | (pred_3d == 3)).astype(np.uint8) + wt = ((pred_3d == 1) | (pred_3d == 2) | (pred_3d == 3)).astype(np.uint8) + et = (pred_3d == 3).astype(np.uint8) + return {"TC": tc, "WT": wt, "ET": et} + + +def main(): + parser = argparse.ArgumentParser(description="SAM3+LoRA inference for BraTS2023 (SegMamba-compatible)") + + # 数据参数 + parser.add_argument("--data_dir", type=str, default="/data/yty/brats23_processed", + help="Preprocessed data directory (contains *.npz)") + parser.add_argument("--split", type=str, default="test", choices=["train", "val", "test", "all"]) + parser.add_argument("--train_rate", type=float, default=0.7) + parser.add_argument("--val_rate", type=float, default=0.1) + parser.add_argument("--test_rate", type=float, default=0.2) + parser.add_argument("--seed", type=int, default=42) + + # 模型参数 + parser.add_argument("--checkpoint", type=str, default="/data/yty/sam3/sam3.pt", + help="SAM3 checkpoint path") + parser.add_argument("--lora_weights", type=str, + default="/data/yty/brats23_sam3_video_lora_bestonly_122/checkpoints/best_lora_weights.pt", + help="LoRA weights path") + parser.add_argument("--decoder_weights", type=str, default=None, + help="Decoder weights path (default: auto-detect from lora_weights dir)") + parser.add_argument("--modality", type=int, default=0, + help="Which modality to use (0=T1, 1=T1ce, 2=T2, 3=FLAIR)") + parser.add_argument("--target_size", type=int, default=512, + help="Target image size for inference") + + # 输出参数 + parser.add_argument("--save_dir", type=str, default="/data/yty/brats23_sam3_predictions", + help="Directory to save predictions") + parser.add_argument("--device", type=str, default="cuda:0") + parser.add_argument("--raw_spacing", type=str, default="1,1,1") + parser.add_argument("--print_dice", action="store_true", help="Print dice for each case") + + args = parser.parse_args() + + raw_spacing = [float(x) for x in args.raw_spacing.split(",")] + + # 加载模型 + model = load_model(args.checkpoint, args.lora_weights, args.decoder_weights, args.device) + + # 获取数据集 + all_paths = sorted(glob.glob(os.path.join(args.data_dir, "*.npz"))) + all_names = [os.path.splitext(os.path.basename(p))[0] for p in all_paths] + + if args.split == "all": + cases = list(zip(all_names, all_paths)) + else: + # 按比例划分 + n = len(all_names) + indices = list(range(n)) + rng = np.random.RandomState(args.seed) + rng.shuffle(indices) + + n_train = int(n * args.train_rate) + n_val = int(n * args.val_rate) + + train_idx = indices[:n_train] + val_idx = indices[n_train:n_train + n_val] + test_idx = indices[n_train + n_val:] + + split_map = {"train": train_idx, "val": val_idx, "test": test_idx} + selected_idx = split_map[args.split] + + cases = [(all_names[i], all_paths[i]) for i in selected_idx] + + print(f"Found {len(cases)} cases for split '{args.split}'") + + os.makedirs(args.save_dir, exist_ok=True) + + all_dices = [] + + for case_name, npz_path in tqdm(cases, desc="Predicting"): + # 加载数据 + data = np.load(npz_path) + image_4d = data["data"] # (4, D, H, W) + seg = data.get("seg", None) # (1, D, H, W) or None + + # 预测 - 4 类分割 + pred_classes = predict_volume(model, image_4d, modality=args.modality, + target_size=args.target_size, device=args.device) + + # 转换为 TC/WT/ET 区域 + pred_regions = labels_to_regions(pred_classes) + D, H, W = pred_classes.shape + pred_3c = np.stack([pred_regions["TC"], pred_regions["WT"], pred_regions["ET"]], axis=0) + + # 计算 dice(如果有 GT) + if seg is not None and args.print_dice: + gt = seg[0] # (D, H, W) + gt_3c = convert_labels(gt) + + dices = [] + for i, name in enumerate(["TC", "WT", "ET"]): + d = dice(pred_3c[i], gt_3c[i]) + dices.append(d) + + # 也计算整体 tumor 的 dice + gt_binary = (gt > 0).astype(np.float32) + pred_binary = (pred_classes > 0).astype(np.float32) + overall_dice = dice(pred_binary, gt_binary) + + print(f"{case_name}: TC={dices[0]:.4f}, WT={dices[1]:.4f}, ET={dices[2]:.4f}, Overall={overall_dice:.4f}") + all_dices.append({ + "case": case_name, + "TC": dices[0], + "WT": dices[1], + "ET": dices[2], + "Overall": overall_dice, + }) + + # 保存预测 + out_path = os.path.join(args.save_dir, f"{case_name}.nii.gz") + pred_itk = sitk.GetImageFromArray(pred_3c, isVector=False) + pred_itk.SetSpacing((raw_spacing[0], raw_spacing[1], raw_spacing[2], 1.0)) + sitk.WriteImage(pred_itk, out_path) + + # 汇总结果 + if all_dices: + print("\n" + "=" * 60) + print(f"Results Summary ({len(all_dices)} cases):") + avg_tc = np.mean([d["TC"] for d in all_dices]) + avg_wt = np.mean([d["WT"] for d in all_dices]) + avg_et = np.mean([d["ET"] for d in all_dices]) + avg_overall = np.mean([d["Overall"] for d in all_dices]) + print(f" Average TC Dice: {avg_tc:.4f}") + print(f" Average WT Dice: {avg_wt:.4f}") + print(f" Average ET Dice: {avg_et:.4f}") + print(f" Average Overall Dice: {avg_overall:.4f}") + print("=" * 60) + + # 保存结果 + np.save(os.path.join(args.save_dir, "metrics.npy"), all_dices, allow_pickle=True) + + +if __name__ == "__main__": + main() + diff --git a/source_code/sam3/medsam3_brats/preprocess_brats.py b/source_code/sam3/medsam3_brats/preprocess_brats.py new file mode 100644 index 0000000000000000000000000000000000000000..08b6ad3e6e549a8cdb7bafcc20312302670d5b06 --- /dev/null +++ b/source_code/sam3/medsam3_brats/preprocess_brats.py @@ -0,0 +1,464 @@ +#!/usr/bin/env python3 +""" +BraTS2023 数据预处理脚本 +将3D NIfTI医学数据转换为SAM3可处理的帧序列格式 +""" + +import os +import argparse +import numpy as np +import nibabel as nib +from pathlib import Path +from tqdm import tqdm +import cv2 +from PIL import Image +import json +import multiprocessing as mp + + +def normalize_intensity(volume, low_percentile=0.5, high_percentile=99.5): + """ + 对体积数据进行强度归一化 + """ + low = np.percentile(volume[volume > 0], low_percentile) + high = np.percentile(volume[volume > 0], high_percentile) + volume = np.clip(volume, low, high) + volume = (volume - low) / (high - low + 1e-8) + return volume + + +def load_brats_case(case_dir): + """ + 加载单个BraTS病例的所有模态 + + Args: + case_dir: 病例文件夹路径 + + Returns: + data: 形状为 (4, D, H, W) 的numpy数组,4个模态 + seg: 形状为 (D, H, W) 的分割标签 + affine: NIfTI仿射矩阵 + """ + case_dir = Path(case_dir) + case_name = case_dir.name + + # BraTS2023 模态 + modalities = ['t1c', 't1n', 't2f', 't2w'] + + data = [] + affine = None + + for mod in modalities: + # 尝试不同的命名格式 + possible_names = [ + f"{mod}.nii.gz", + f"{case_name}-{mod}.nii.gz", + f"{case_name}_{mod}.nii.gz" + ] + + nii_path = None + for name in possible_names: + p = case_dir / name + if p.exists(): + nii_path = p + break + + if nii_path is None: + raise FileNotFoundError(f"Cannot find {mod} file in {case_dir}") + + nii = nib.load(str(nii_path)) + if affine is None: + affine = nii.affine + + volume = nii.get_fdata().astype(np.float32) + volume = normalize_intensity(volume) + data.append(volume) + + data = np.stack(data, axis=0) # (4, D, H, W) + + # 加载分割标签 + seg_names = [ + "seg.nii.gz", + f"{case_name}-seg.nii.gz", + f"{case_name}_seg.nii.gz" + ] + + seg = None + for name in seg_names: + p = case_dir / name + if p.exists(): + seg_nii = nib.load(str(p)) + seg = seg_nii.get_fdata().astype(np.int32) + break + + return data, seg, affine + + +def convert_to_frames(data, output_dir, case_name, modality_idx=0, target_size=(512, 512)): + """ + 将3D数据转换为帧序列(JPEG图像) + + Args: + data: 形状为 (4, D, H, W) 的数据 + output_dir: 输出目录 + case_name: 病例名称 + modality_idx: 使用哪个模态 (0=t1c, 1=t1n, 2=t2f, 3=t2w) + target_size: 目标图像大小 + """ + frames_dir = Path(output_dir) / case_name / "frames" + frames_dir.mkdir(parents=True, exist_ok=True) + + # 选择模态 + volume = data[modality_idx] # (D, H, W) + + # 转换为 (D, H, W) 格式,D为深度(切片数) + # BraTS数据通常是 (H, W, D),需要转置 + if volume.shape[0] > volume.shape[2]: + volume = np.transpose(volume, (2, 0, 1)) + + num_slices = volume.shape[0] + + for i in range(num_slices): + slice_2d = volume[i] # (H, W) + + # 归一化到 0-255 + slice_2d = (slice_2d * 255).astype(np.uint8) + + # 转为RGB(SAM3需要RGB输入) + slice_rgb = np.stack([slice_2d, slice_2d, slice_2d], axis=-1) + + # 调整大小 + if target_size is not None: + slice_rgb = cv2.resize(slice_rgb, target_size, interpolation=cv2.INTER_LINEAR) + + # 保存为JPEG + frame_path = frames_dir / f"{i:05d}.jpg" + Image.fromarray(slice_rgb).save(str(frame_path), quality=95) + + return frames_dir, num_slices + + +def save_segmentation_masks(seg, output_dir, case_name, target_size=(512, 512)): + """ + 保存分割标签为帧序列 + + BraTS标签: + 0: 背景 + 1: NCR (Necrotic Core) - 坏死核心 + 2: ED (Edema) - 水肿 + 3: ET (Enhancing Tumor) - 强化肿瘤 + + 合并为: + - Whole Tumor (WT): 1+2+3 + - Tumor Core (TC): 1+3 + - Enhancing Tumor (ET): 3 + """ + if seg is None: + return None + + masks_dir = Path(output_dir) / case_name / "masks" + masks_dir.mkdir(parents=True, exist_ok=True) + + # 转置如果需要 + if seg.shape[0] > seg.shape[2]: + seg = np.transpose(seg, (2, 0, 1)) + + num_slices = seg.shape[0] + + # 创建不同区域的mask + for i in range(num_slices): + slice_seg = seg[i] # (H, W) + + # Whole Tumor (包含所有肿瘤区域) + wt_mask = ((slice_seg == 1) | (slice_seg == 2) | (slice_seg == 3)).astype(np.uint8) * 255 + + # 调整大小 + if target_size is not None: + wt_mask = cv2.resize(wt_mask, target_size, interpolation=cv2.INTER_NEAREST) + + # 保存mask + mask_path = masks_dir / f"{i:05d}.png" + Image.fromarray(wt_mask).save(str(mask_path)) + + return masks_dir + + +def get_tumor_bbox_and_center(seg, slice_idx=None): + """ + 获取肿瘤的边界框和中心点 + + Args: + seg: 分割标签 (D, H, W) + slice_idx: 指定切片索引,如果为None则找最大肿瘤面积的切片 + + Returns: + slice_idx: 选中的切片索引 + bbox: (x_min, y_min, x_max, y_max) + center: (x, y) + """ + if seg is None: + return None, None, None + + # 转置如果需要 + if seg.shape[0] > seg.shape[2]: + seg = np.transpose(seg, (2, 0, 1)) + + # 整个肿瘤区域 + tumor_mask = (seg > 0) + + # 如果没有指定切片,找最大肿瘤面积的切片 + if slice_idx is None: + tumor_areas = tumor_mask.sum(axis=(1, 2)) + slice_idx = int(np.argmax(tumor_areas)) + + # 获取该切片的肿瘤mask + slice_tumor = tumor_mask[slice_idx] + + if slice_tumor.sum() == 0: + return slice_idx, None, None + + # 找边界框 + rows = np.any(slice_tumor, axis=1) + cols = np.any(slice_tumor, axis=0) + y_min, y_max = np.where(rows)[0][[0, -1]] + x_min, x_max = np.where(cols)[0][[0, -1]] + + bbox = (int(x_min), int(y_min), int(x_max), int(y_max)) + center = ((x_min + x_max) // 2, (y_min + y_max) // 2) + + return slice_idx, bbox, center + + +def save_prompt_info(output_dir, case_name, slice_idx, bbox, center, + original_size, target_size=(512, 512)): + """ + 保存提示信息(用于SAM3推理) + """ + info_dir = Path(output_dir) / case_name + info_dir.mkdir(parents=True, exist_ok=True) + + # 计算缩放比例 + scale_x = target_size[0] / original_size[1] + scale_y = target_size[1] / original_size[0] + + # 缩放bbox和center + if bbox is not None: + scaled_bbox = ( + int(bbox[0] * scale_x), + int(bbox[1] * scale_y), + int(bbox[2] * scale_x), + int(bbox[3] * scale_y) + ) + else: + scaled_bbox = None + + if center is not None: + scaled_center = ( + int(center[0] * scale_x), + int(center[1] * scale_y) + ) + else: + scaled_center = None + + info = { + 'slice_idx': slice_idx, + 'bbox': scaled_bbox, + 'center': scaled_center, + 'original_size': original_size, + 'target_size': target_size, + 'scale': (scale_x, scale_y) + } + + np.save(str(info_dir / 'prompt_info.npy'), info, allow_pickle=True) + + return info + + +def process_single_case(case_dir, output_dir, modality_idx=0, target_size=(512, 512)): + """ + 处理单个病例 + """ + case_name = Path(case_dir).name + print(f"Processing {case_name}...") + + try: + # 加载数据 + data, seg, affine = load_brats_case(case_dir) + original_size = data.shape[2:4] # (H, W) + + # 转换为帧 + frames_dir, num_slices = convert_to_frames( + data, output_dir, case_name, + modality_idx=modality_idx, + target_size=target_size + ) + + # 保存mask + masks_dir = save_segmentation_masks(seg, output_dir, case_name, target_size=target_size) + + # 获取提示信息 + slice_idx, bbox, center = get_tumor_bbox_and_center(seg) + + # 保存提示信息 + prompt_info = save_prompt_info( + output_dir, case_name, slice_idx, bbox, center, + original_size, target_size + ) + + print(f" - {num_slices} slices processed") + print(f" - Tumor center slice: {slice_idx}") + if bbox: + print(f" - BBox: {prompt_info['bbox']}") + print(f" - Center: {prompt_info['center']}") + + return True + + except Exception as e: + print(f"Error processing {case_name}: {e}") + import traceback + traceback.print_exc() + return False + + +def _make_splits(case_names, train_ratio: float, val_ratio: float, test_ratio: float, seed: int): + """Deterministic case-level split into train/val/test.""" + if not (0 < train_ratio < 1) or not (0 <= val_ratio < 1) or not (0 <= test_ratio < 1): + raise ValueError("ratios must be in [0,1) and train_ratio in (0,1)") + if abs((train_ratio + val_ratio + test_ratio) - 1.0) > 1e-6: + raise ValueError(f"train/val/test ratios must sum to 1.0, got {train_ratio+val_ratio+test_ratio:.6f}") + + case_names = list(case_names) + rng = np.random.RandomState(seed) + rng.shuffle(case_names) + + n = len(case_names) + n_train = int(round(n * train_ratio)) + n_val = int(round(n * val_ratio)) + # ensure non-negative and all used + n_train = min(max(n_train, 0), n) + n_val = min(max(n_val, 0), n - n_train) + n_test = n - n_train - n_val + + train = case_names[:n_train] + val = case_names[n_train : n_train + n_val] + test = case_names[n_train + n_val :] + return {"train": train, "val": val, "test": test} + + +def _already_processed(output_dir: Path, case_name: str) -> bool: + """Heuristic for resuming: prompt_info.npy exists AND frames/masks dirs exist.""" + case_dir = output_dir / case_name + if not (case_dir / "prompt_info.npy").exists(): + return False + if not (case_dir / "frames").is_dir(): + return False + if not (case_dir / "masks").is_dir(): + return False + return True + + +def _worker_process_case(args): + case_dir, output_dir, modality_idx, target_size, skip_existing = args + case_name = Path(case_dir).name + output_dir = Path(output_dir) + if skip_existing and _already_processed(output_dir, case_name): + return case_name, True, "skipped" + ok = process_single_case(case_dir, output_dir, modality_idx=modality_idx, target_size=target_size) + return case_name, ok, "processed" if ok else "failed" + + +def main(): + parser = argparse.ArgumentParser(description='Preprocess BraTS data for SAM3') + parser.add_argument('--input_dir', type=str, required=True, + help='Input directory containing BraTS cases') + parser.add_argument('--output_dir', type=str, required=True, + help='Output directory for processed data') + parser.add_argument('--modality', type=int, default=0, + help='Modality index: 0=t1c, 1=t1n, 2=t2f, 3=t2w') + parser.add_argument('--target_size', type=int, nargs=2, default=[512, 512], + help='Target image size (width height)') + parser.add_argument('--num_cases', type=int, default=None, + help='Number of cases to process (None for all)') + parser.add_argument('--num_processes', type=int, default=1, + help='Number of parallel worker processes for preprocessing') + parser.add_argument('--skip_existing', action='store_true', + help='Skip cases that already have frames/masks/prompt_info.npy in output_dir') + + # split file generation (case-level) + parser.add_argument('--write_splits', action='store_true', + help='Write train/val/test split json to output_dir/splits.json') + parser.add_argument('--train_ratio', type=float, default=0.7) + parser.add_argument('--val_ratio', type=float, default=0.1) + parser.add_argument('--test_ratio', type=float, default=0.2) + parser.add_argument('--seed', type=int, default=42) + + args = parser.parse_args() + + input_dir = Path(args.input_dir) + output_dir = Path(args.output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + + # 获取所有病例 + case_dirs = sorted([d for d in input_dir.iterdir() if d.is_dir()]) + + if args.num_cases is not None: + case_dirs = case_dirs[:args.num_cases] + + print(f"Found {len(case_dirs)} cases to process") + + # write split file (based on all available case dirs) + if args.write_splits: + splits = _make_splits( + [Path(d).name for d in case_dirs], + train_ratio=args.train_ratio, + val_ratio=args.val_ratio, + test_ratio=args.test_ratio, + seed=args.seed, + ) + split_path = output_dir / "splits.json" + with open(split_path, "w") as f: + json.dump( + { + "seed": args.seed, + "train_ratio": args.train_ratio, + "val_ratio": args.val_ratio, + "test_ratio": args.test_ratio, + "splits": splits, + }, + f, + indent=2, + ) + print( + f"Wrote splits to {split_path} " + f"(train={len(splits['train'])}, val={len(splits['val'])}, test={len(splits['test'])})" + ) + + success_count = 0 + target_size = tuple(args.target_size) + + if args.num_processes <= 1: + for case_dir in tqdm(case_dirs, desc="Processing cases"): + case_name = Path(case_dir).name + if args.skip_existing and _already_processed(output_dir, case_name): + continue + if process_single_case(case_dir, output_dir, modality_idx=args.modality, target_size=target_size): + success_count += 1 + else: + work = [ + (str(case_dir), str(output_dir), int(args.modality), target_size, bool(args.skip_existing)) + for case_dir in case_dirs + ] + with mp.get_context("spawn").Pool(processes=int(args.num_processes)) as pool: + for case_name, ok, status in tqdm( + pool.imap_unordered(_worker_process_case, work), + total=len(work), + desc="Processing cases (mp)", + ): + if ok and status != "skipped": + success_count += 1 + + print(f"\nProcessed {success_count}/{len(case_dirs)} cases successfully") + + +if __name__ == "__main__": + main() diff --git a/source_code/sam3/medsam3_brats/quick_test.py b/source_code/sam3/medsam3_brats/quick_test.py new file mode 100644 index 0000000000000000000000000000000000000000..21c1b12c2664125ce028d74976878117f58e293a --- /dev/null +++ b/source_code/sam3/medsam3_brats/quick_test.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python3 +""" +快速测试脚本 - 验证MedSAM3流程是否正常工作 +处理单个病例并显示结果 +""" + +import os +import sys +import numpy as np +import torch +from pathlib import Path + +# 添加SAM3路径 +sys.path.insert(0, '/root/githubs/sam3') + + +def test_preprocessing(): + """测试数据预处理""" + print("\n" + "="*50) + print("Testing Data Preprocessing...") + print("="*50) + + from preprocess_brats import load_brats_case, convert_to_frames, \ + save_segmentation_masks, get_tumor_bbox_and_center, save_prompt_info + + # 配置路径 + case_dir = "/data/yty/brats2023/ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData/BraTS-GLI-00000-000" + output_dir = "/data/yty/brats23_sam3_test" + + if not Path(case_dir).exists(): + print(f"Test case not found: {case_dir}") + return None + + print(f"Loading case: {case_dir}") + + # 加载数据 + data, seg, affine = load_brats_case(case_dir) + print(f" Data shape: {data.shape}") + print(f" Seg shape: {seg.shape if seg is not None else 'None'}") + + # 转换为帧 + case_name = Path(case_dir).name + frames_dir, num_slices = convert_to_frames( + data, output_dir, case_name, + modality_idx=0, # T1ce + target_size=(512, 512) + ) + print(f" Converted to {num_slices} frames: {frames_dir}") + + # 保存mask + masks_dir = save_segmentation_masks( + seg, output_dir, case_name, + target_size=(512, 512) + ) + print(f" Saved masks: {masks_dir}") + + # 获取提示信息 + original_size = data.shape[2:4] + slice_idx, bbox, center = get_tumor_bbox_and_center(seg) + print(f" Tumor center slice: {slice_idx}") + print(f" Original bbox: {bbox}") + print(f" Original center: {center}") + + # 保存提示信息 + prompt_info = save_prompt_info( + output_dir, case_name, slice_idx, bbox, center, + original_size, target_size=(512, 512) + ) + print(f" Scaled bbox: {prompt_info['bbox']}") + print(f" Scaled center: {prompt_info['center']}") + + print("\n✅ Preprocessing test passed!") + return output_dir + + +def test_sam3_loading(): + """测试SAM3模型加载""" + print("\n" + "="*50) + print("Testing SAM3 Model Loading...") + print("="*50) + + checkpoint_path = "/data/yty/sam3/sam3.pt" + + if not Path(checkpoint_path).exists(): + print(f"Checkpoint not found: {checkpoint_path}") + return False + + print(f"Loading checkpoint: {checkpoint_path}") + + try: + from sam3.model_builder import build_sam3_video_model + + model = build_sam3_video_model( + checkpoint_path=checkpoint_path, + load_from_HF=False, + device='cuda' if torch.cuda.is_available() else 'cpu' + ) + + print(f" Model loaded successfully!") + print(f" Device: {next(model.parameters()).device}") + + print("\n✅ Model loading test passed!") + return True + + except Exception as e: + print(f"Error loading model: {e}") + import traceback + traceback.print_exc() + return False + + +def test_inference(processed_dir): + """测试推理""" + print("\n" + "="*50) + print("Testing SAM3 Inference...") + print("="*50) + + if processed_dir is None: + print("Skipping inference test (no processed data)") + return + + checkpoint_path = "/data/yty/sam3/sam3.pt" + + try: + from infer_brats_sam3 import MedSAM3VideoInference, load_prompt_info + + # 初始化模型 + print("Initializing MedSAM3VideoInference...") + model = MedSAM3VideoInference( + checkpoint_path=checkpoint_path, + device='cuda' if torch.cuda.is_available() else 'cpu' + ) + + # 获取测试病例 + case_dirs = sorted([d for d in Path(processed_dir).iterdir() if d.is_dir()]) + if not case_dirs: + print("No processed cases found") + return + + case_dir = case_dirs[0] + case_name = case_dir.name + frames_dir = case_dir / "frames" + + print(f"Testing on case: {case_name}") + + # 加载提示信息 + prompt_info = load_prompt_info(case_dir) + if prompt_info is None: + print("No prompt info found") + return + + print(f" Prompt slice: {prompt_info['slice_idx']}") + print(f" Bbox: {prompt_info['bbox']}") + + # 运行推理 + print("Running inference...") + pred_masks = model.segment_3d_volume( + frames_dir=str(frames_dir), + prompt_slice_idx=prompt_info['slice_idx'], + prompt_type='box', + bbox=prompt_info['bbox'] + ) + + print(f" Output shape: {pred_masks.shape}") + print(f" Non-zero slices: {np.sum(pred_masks.sum(axis=(1,2)) > 0)}") + + print("\n✅ Inference test passed!") + + except Exception as e: + print(f"Error in inference: {e}") + import traceback + traceback.print_exc() + + +def main(): + print("="*60) + print(" MedSAM3 BraTS Quick Test") + print("="*60) + + # 检查CUDA + print(f"\nCUDA available: {torch.cuda.is_available()}") + if torch.cuda.is_available(): + print(f"CUDA device: {torch.cuda.get_device_name(0)}") + + # 测试预处理 + processed_dir = test_preprocessing() + + # 测试模型加载 + model_ok = test_sam3_loading() + + # 测试推理(如果模型加载成功) + if model_ok: + test_inference(processed_dir) + + print("\n" + "="*60) + print(" Quick Test Complete!") + print("="*60) + + +if __name__ == "__main__": + main() diff --git a/source_code/sam3/pyproject.toml b/source_code/sam3/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..e4998de0f969cd0302f96c38cb355bcc0d4287c6 --- /dev/null +++ b/source_code/sam3/pyproject.toml @@ -0,0 +1,131 @@ +[build-system] +requires = ["setuptools>=61", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "sam3" +dynamic = ["version"] +description = "SAM3 (Segment Anything Model 3) implementation" +readme = "README.md" +requires-python = ">=3.8" +license = {file = "LICENSE"} +authors = [ + {name = "Meta AI Research"} +] +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: MIT License", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Topic :: Scientific/Engineering :: Artificial Intelligence", +] +dependencies = [ + "timm>=1.0.17", + "numpy==1.26", + "tqdm", + "ftfy==6.1.1", + "regex", + "iopath>=0.1.10", + "typing_extensions", + "huggingface_hub", +] + +[project.optional-dependencies] +dev = [ + "pytest", + "pytest-cov", + "black==24.2.0", + "ufmt==2.8.0", + "ruff-api==0.1.0", + "usort==1.0.2", + "gitpython==3.1.31", + "yt-dlp", + "pandas", + "opencv-python", + "pycocotools", + "numba", + "python-rapidjson", +] +notebooks = [ + "matplotlib", + "jupyter", + "notebook", + "ipywidgets", + "ipycanvas", + "ipympl", + "pycocotools", + "decord", + "opencv-python", + "einops", + "scikit-image", + "scikit-learn", +] +train = [ + "hydra-core", + "submitit", + "tensorboard", + "zstandard", + "scipy", + "torchmetrics", + "fvcore", + "fairscale", + "scikit-image", + "scikit-learn", +] + +[project.urls] +"Homepage" = "https://github.com/facebookresearch/sam3" +"Bug Tracker" = "https://github.com/facebookresearch/sam3/issues" + +[tool.setuptools] +packages = ["sam3", "sam3.model"] + +[tool.setuptools.dynamic] +version = {attr = "sam3.__version__"} + +[tool.black] +line-length = 88 +target-version = ['py38', 'py39', 'py310', 'py311', 'py312'] +include = '\.pyi?$' + +[tool.isort] +profile = "black" +multi_line_output = 3 + +[tool.usort] +first_party_detection = false + +[tool.ufmt] +formatter = "ruff-api" + +[tool.mypy] +python_version = "3.12" +warn_return_any = true +warn_unused_configs = true +disallow_untyped_defs = true +disallow_incomplete_defs = true + +[[tool.mypy.overrides]] +module = [ + "torch.*", + "torchvision.*", + "timm.*", + "numpy.*", + "PIL.*", + "tqdm.*", + "ftfy.*", + "regex.*", + "iopath.*", +] +ignore_missing_imports = true + +[tool.pytest.ini_options] +testpaths = ["tests"] +python_files = "test_*.py" +python_classes = "Test*" +python_functions = "test_*" diff --git a/source_code/sam3/sam3.egg-info/PKG-INFO b/source_code/sam3/sam3.egg-info/PKG-INFO new file mode 100644 index 0000000000000000000000000000000000000000..23d15c4147d23fca76c6e8181eb68890b1014702 --- /dev/null +++ b/source_code/sam3/sam3.egg-info/PKG-INFO @@ -0,0 +1,525 @@ +Metadata-Version: 2.4 +Name: sam3 +Version: 0.1.0 +Summary: SAM3 (Segment Anything Model 3) implementation +Author: Meta AI Research +License: SAM License + Last Updated: November 19, 2025 + + “Agreement” means the terms and conditions for use, reproduction, distribution and modification of the SAM Materials set forth herein. + + + “SAM Materials” means, collectively, Documentation and the models, software and algorithms, including machine-learning model code, trained model weights, inference-enabling code, training-enabling code, fine-tuning enabling code, and other elements of the foregoing distributed by Meta and made available under this Agreement. + + “Documentation” means the specifications, manuals and documentation accompanying + SAM Materials distributed by Meta. + + + “Licensee” or “you” means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity’s behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf. + + + “Meta” or “we” means Meta Platforms Ireland Limited (if you are located in or, if you are an entity, your principal place of business is in the EEA or Switzerland) or Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland). + + + “Sanctions” means any economic or trade sanctions or restrictions administered or enforced by the United States (including the Office of Foreign Assets Control of the U.S. Department of the Treasury (“OFAC”), the U.S. Department of State and the U.S. Department of Commerce), the United Nations, the European Union, or the United Kingdom. + + + “Trade Controls” means any of the following: Sanctions and applicable export and import controls. + + By using or distributing any portion or element of the SAM Materials, you agree to be bound by this Agreement. + + + 1. License Rights and Redistribution. + + + a. Grant of Rights. You are granted a non-exclusive, worldwide, non-transferable and royalty-free limited license under Meta’s intellectual property or other rights owned by Meta embodied in the SAM Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the SAM Materials. + + b. Redistribution and Use. + i. Distribution of SAM Materials, and any derivative works thereof, are subject to the terms of this Agreement. If you distribute or make the SAM Materials, or any derivative works thereof, available to a third party, you may only do so under the terms of this Agreement and you shall provide a copy of this Agreement with any such SAM Materials. + + + ii. If you submit for publication the results of research you perform on, using, or otherwise in connection with SAM Materials, you must acknowledge the use of SAM Materials in your publication. + + + iii. Your use of the SAM Materials must comply with applicable laws and regulations, including Trade Control Laws and applicable privacy and data protection laws. + iv. Your use of the SAM Materials will not involve or encourage others to reverse engineer, decompile or discover the underlying components of the SAM Materials. + v. You are not the target of Trade Controls and your use of SAM Materials must comply with Trade Controls. You agree not to use, or permit others to use, SAM Materials for any activities subject to the International Traffic in Arms Regulations (ITAR) or end uses prohibited by Trade Controls, including those related to military or warfare purposes, nuclear industries or applications, espionage, or the development or use of guns or illegal weapons. + 2. User Support. Your use of the SAM Materials is done at your own discretion; Meta does not process any information nor provide any service in relation to such use. Meta is under no obligation to provide any support services for the SAM Materials. Any support provided is “as is”, “with all faults”, and without warranty of any kind. + + + 3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE SAM MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN “AS IS” BASIS, WITHOUT WARRANTIES OF ANY KIND, AND META DISCLAIMS ALL WARRANTIES OF ANY KIND, BOTH EXPRESS AND IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE SAM MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE SAM MATERIALS AND ANY OUTPUT AND RESULTS. + + 4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY DIRECT OR INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING. + + 5. Intellectual Property. + + + a. Subject to Meta’s ownership of SAM Materials and derivatives made by or for Meta, with respect to any derivative works and modifications of the SAM Materials that are made by you, as between you and Meta, you are and will be the owner of such derivative works and modifications. + + b. If you institute litigation or other proceedings against Meta or any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the SAM Materials, outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Meta from and against any claim by any third party arising out of or related to your use or distribution of the SAM Materials. + + 6. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the SAM Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Meta may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of the SAM Materials. Sections 3, 4 and 7 shall survive the termination of this Agreement. + + 7. Governing Law and Jurisdiction. This Agreement will be governed and construed under the laws of the State of California without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement. The courts of California shall have exclusive jurisdiction of any dispute arising out of this Agreement. + + + 8. Modifications and Amendments. Meta may modify this Agreement from time to time; provided that they are similar in spirit to the current version of the Agreement, but may differ in detail to address new problems or concerns. All such changes will be effective immediately. Your continued use of the SAM Materials after any modification to this Agreement constitutes your agreement to such modification. Except as provided in this Agreement, no modification or addition to any provision of this Agreement will be binding unless it is in writing and signed by an authorized representative of both you and Meta. + +Project-URL: Homepage, https://github.com/facebookresearch/sam3 +Project-URL: Bug Tracker, https://github.com/facebookresearch/sam3/issues +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Requires-Python: >=3.8 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: timm>=1.0.17 +Requires-Dist: numpy==1.26 +Requires-Dist: tqdm +Requires-Dist: ftfy==6.1.1 +Requires-Dist: regex +Requires-Dist: iopath>=0.1.10 +Requires-Dist: typing_extensions +Requires-Dist: huggingface_hub +Provides-Extra: dev +Requires-Dist: pytest; extra == "dev" +Requires-Dist: pytest-cov; extra == "dev" +Requires-Dist: black==24.2.0; extra == "dev" +Requires-Dist: ufmt==2.8.0; extra == "dev" +Requires-Dist: ruff-api==0.1.0; extra == "dev" +Requires-Dist: usort==1.0.2; extra == "dev" +Requires-Dist: gitpython==3.1.31; extra == "dev" +Requires-Dist: yt-dlp; extra == "dev" +Requires-Dist: pandas; extra == "dev" +Requires-Dist: opencv-python; extra == "dev" +Requires-Dist: pycocotools; extra == "dev" +Requires-Dist: numba; extra == "dev" +Requires-Dist: python-rapidjson; extra == "dev" +Provides-Extra: notebooks +Requires-Dist: matplotlib; extra == "notebooks" +Requires-Dist: jupyter; extra == "notebooks" +Requires-Dist: notebook; extra == "notebooks" +Requires-Dist: ipywidgets; extra == "notebooks" +Requires-Dist: ipycanvas; extra == "notebooks" +Requires-Dist: ipympl; extra == "notebooks" +Requires-Dist: pycocotools; extra == "notebooks" +Requires-Dist: decord; extra == "notebooks" +Requires-Dist: opencv-python; extra == "notebooks" +Requires-Dist: einops; extra == "notebooks" +Requires-Dist: scikit-image; extra == "notebooks" +Requires-Dist: scikit-learn; extra == "notebooks" +Provides-Extra: train +Requires-Dist: hydra-core; extra == "train" +Requires-Dist: submitit; extra == "train" +Requires-Dist: tensorboard; extra == "train" +Requires-Dist: zstandard; extra == "train" +Requires-Dist: scipy; extra == "train" +Requires-Dist: torchmetrics; extra == "train" +Requires-Dist: fvcore; extra == "train" +Requires-Dist: fairscale; extra == "train" +Requires-Dist: scikit-image; extra == "train" +Requires-Dist: scikit-learn; extra == "train" +Dynamic: license-file + +# SAM 3: Segment Anything with Concepts + +Meta Superintelligence Labs + +[Nicolas Carion](https://www.nicolascarion.com/)\*, +[Laura Gustafson](https://scholar.google.com/citations?user=c8IpF9gAAAAJ&hl=en)\*, +[Yuan-Ting Hu](https://scholar.google.com/citations?user=E8DVVYQAAAAJ&hl=en)\*, +[Shoubhik Debnath](https://scholar.google.com/citations?user=fb6FOfsAAAAJ&hl=en)\*, +[Ronghang Hu](https://ronghanghu.com/)\*, +[Didac Suris](https://www.didacsuris.com/)\*, +[Chaitanya Ryali](https://scholar.google.com/citations?user=4LWx24UAAAAJ&hl=en)\*, +[Kalyan Vasudev Alwala](https://scholar.google.co.in/citations?user=m34oaWEAAAAJ&hl=en)\*, +[Haitham Khedr](https://hkhedr.com/)\*, Andrew Huang, +[Jie Lei](https://jayleicn.github.io/), +[Tengyu Ma](https://scholar.google.com/citations?user=VeTSl0wAAAAJ&hl=en), +[Baishan Guo](https://scholar.google.com/citations?user=BC5wDu8AAAAJ&hl=en), +Arpit Kalla, [Markus Marks](https://damaggu.github.io/), +[Joseph Greer](https://scholar.google.com/citations?user=guL96CkAAAAJ&hl=en), +Meng Wang, [Peize Sun](https://peizesun.github.io/), +[Roman Rädle](https://scholar.google.com/citations?user=Tpt57v0AAAAJ&hl=en), +[Triantafyllos Afouras](https://www.robots.ox.ac.uk/~afourast/), +[Effrosyni Mavroudi](https://scholar.google.com/citations?user=vYRzGGEAAAAJ&hl=en), +[Katherine Xu](https://k8xu.github.io/)°, +[Tsung-Han Wu](https://patrickthwu.com/)°, +[Yu Zhou](https://yu-bryan-zhou.github.io/)°, +[Liliane Momeni](https://scholar.google.com/citations?user=Lb-KgVYAAAAJ&hl=en)°, +[Rishi Hazra](https://rishihazra.github.io/)°, +[Shuangrui Ding](https://mark12ding.github.io/)°, +[Sagar Vaze](https://sgvaze.github.io/)°, +[Francois Porcher](https://scholar.google.com/citations?user=LgHZ8hUAAAAJ&hl=en)°, +[Feng Li](https://fengli-ust.github.io/)°, +[Siyuan Li](https://siyuanliii.github.io/)°, +[Aishwarya Kamath](https://ashkamath.github.io/)°, +[Ho Kei Cheng](https://hkchengrex.com/)°, +[Piotr Dollar](https://pdollar.github.io/)†, +[Nikhila Ravi](https://nikhilaravi.com/)†, +[Kate Saenko](https://ai.bu.edu/ksaenko.html)†, +[Pengchuan Zhang](https://pzzhang.github.io/pzzhang/)†, +[Christoph Feichtenhofer](https://feichtenhofer.github.io/)† + +\* core contributor, ° intern, † project lead, order is random within groups + +[[`Paper`](https://ai.meta.com/research/publications/sam-3-segment-anything-with-concepts/)] +[[`Project`](https://ai.meta.com/sam3)] +[[`Demo`](https://segment-anything.com/)] +[[`Blog`](https://ai.meta.com/blog/segment-anything-model-3/)] +[[`BibTeX`](#citing-sam-3)] + +![SAM 3 architecture](assets/model_diagram.png?raw=true) SAM 3 is a unified foundation model for promptable segmentation in images and videos. It can detect, segment, and track objects using text or visual prompts such as points, boxes, and masks. Compared to its predecessor [SAM 2](https://github.com/facebookresearch/sam2), SAM 3 introduces the ability to exhaustively segment all instances of an open-vocabulary concept specified by a short text phrase or exemplars. Unlike prior work, SAM 3 can handle a vastly larger set of open-vocabulary prompts. It achieves 75-80% of human performance on our new [SA-CO benchmark](https://github.com/facebookresearch/sam3?tab=readme-ov-file#sa-co-dataset) which contains 270K unique concepts, over 50 times more than existing benchmarks. + +This breakthrough is driven by an innovative data engine that has automatically annotated over 4 million unique concepts, creating the largest high-quality open-vocabulary segmentation dataset to date. In addition, SAM 3 introduces a new model architecture featuring a presence token that improves discrimination between closely related text prompts (e.g., “a player in white” vs. “a player in red”), as well as a decoupled detector–tracker design that minimizes task interference and scales efficiently with data. + +

+ + +

+ +## Installation + +### Prerequisites + +- Python 3.12 or higher +- PyTorch 2.7 or higher +- CUDA-compatible GPU with CUDA 12.6 or higher + +1. **Create a new Conda environment:** + +```bash +conda create -n sam3 python=3.12 +conda deactivate +conda activate sam3 +``` + +2. **Install PyTorch with CUDA support:** + +```bash +pip install torch==2.7.0 torchvision torchaudio --index-url https://download.pytorch.org/whl/cu126 +``` + +3. **Clone the repository and install the package:** + +```bash +git clone https://github.com/facebookresearch/sam3.git +cd sam3 +pip install -e . +``` + +4. **Install additional dependencies for example notebooks or development:** + +```bash +# For running example notebooks +pip install -e ".[notebooks]" + +# For development +pip install -e ".[train,dev]" +``` + +## Getting Started + +⚠️ Before using SAM 3, please request access to the checkpoints on the SAM 3 +Hugging Face [repo](https://huggingface.co/facebook/sam3). Once accepted, you +need to be authenticated to download the checkpoints. You can do this by running +the following [steps](https://huggingface.co/docs/huggingface_hub/en/quick-start#authentication) +(e.g. `hf auth login` after generating an access token.) + +### Basic Usage + +```python +import torch +#################################### For Image #################################### +from PIL import Image +from sam3.model_builder import build_sam3_image_model +from sam3.model.sam3_image_processor import Sam3Processor +# Load the model +model = build_sam3_image_model() +processor = Sam3Processor(model) +# Load an image +image = Image.open("") +inference_state = processor.set_image(image) +# Prompt the model with text +output = processor.set_text_prompt(state=inference_state, prompt="") + +# Get the masks, bounding boxes, and scores +masks, boxes, scores = output["masks"], output["boxes"], output["scores"] + +#################################### For Video #################################### + +from sam3.model_builder import build_sam3_video_predictor + +video_predictor = build_sam3_video_predictor() +video_path = "" # a JPEG folder or an MP4 video file +# Start a session +response = video_predictor.handle_request( + request=dict( + type="start_session", + resource_path=video_path, + ) +) +response = video_predictor.handle_request( + request=dict( + type="add_prompt", + session_id=response["session_id"], + frame_index=0, # Arbitrary frame index + text="", + ) +) +output = response["outputs"] +``` + +## Examples + +The `examples` directory contains notebooks demonstrating how to use SAM3 with +various types of prompts: + +- [`sam3_image_predictor_example.ipynb`](examples/sam3_image_predictor_example.ipynb) + : Demonstrates how to prompt SAM 3 with text and visual box prompts on images. +- [`sam3_video_predictor_example.ipynb`](examples/sam3_video_predictor_example.ipynb) + : Demonstrates how to prompt SAM 3 with text prompts on videos, and doing + further interactive refinements with points. +- [`sam3_image_batched_inference.ipynb`](examples/sam3_image_batched_inference.ipynb) + : Demonstrates how to run batched inference with SAM 3 on images. +- [`sam3_agent.ipynb`](examples/sam3_agent.ipynb): Demonsterates the use of SAM + 3 Agent to segment complex text prompt on images. +- [`saco_gold_silver_vis_example.ipynb`](examples/saco_gold_silver_vis_example.ipynb) + : Shows a few examples from SA-Co image evaluation set. +- [`saco_veval_vis_example.ipynb`](examples/saco_veval_vis_example.ipynb) : + Shows a few examples from SA-Co video evaluation set. + +There are additional notebooks in the examples directory that demonstrate how to +use SAM 3 for interactive instance segmentation in images and videos (SAM 1/2 +tasks), or as a tool for an MLLM, and how to run evaluations on the SA-Co +dataset. + +To run the Jupyter notebook examples: + +```bash +# Make sure you have the notebooks dependencies installed +pip install -e ".[notebooks]" + +# Start Jupyter notebook +jupyter notebook examples/sam3_image_predictor_example.ipynb +``` + +## Model + +SAM 3 consists of a detector and a tracker that share a vision encoder. It has 848M parameters. The +detector is a DETR-based model conditioned on text, geometry, and image +exemplars. The tracker inherits the SAM 2 transformer encoder-decoder +architecture, supporting video segmentation and interactive refinement. + +## Image Results + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ModelInstance SegmentationBox Detection
LVISSA-Co/GoldLVISCOCOSA-Co/Gold
cgF1APcgF1cgF1APAPAPo +cgF1
Human--72.8----74.0
OWLv2*29.343.424.630.245.546.123.924.5
DINO-X-38.521.3-52.456.0-22.5
Gemini 2.513.4-13.016.1---14.4
SAM 337.248.554.140.653.656.455.755.7
+ +

* Partially trained on LVIS, APo refers to COCO-O accuracy

+ +
+ +## Video Results + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ModelSA-V testYT-Temporal-1B testSmartGlasses testLVVIS testBURST test
cgF1pHOTAcgF1pHOTAcgF1pHOTAmAPHOTA
Human53.170.571.278.458.572.3--
SAM 330.358.050.869.936.463.636.344.5
+
+ +## SA-Co Dataset + +We release 2 image benchmarks, [SA-Co/Gold](scripts/eval/gold/README.md) and +[SA-Co/Silver](scripts/eval/silver/README.md), and a video benchmark +[SA-Co/VEval](scripts/eval/veval/README.md). The datasets contain images (or videos) with annotated noun phrases. Each image/video and noun phrase pair is annotated with instance masks and unique IDs of each object matching the phrase. Phrases that have no matching objects (negative prompts) have no masks, shown in red font in the figure. See the linked READMEs for more details on how to download and run evaluations on the datasets. + +* HuggingFace host: [SA-Co/Gold](https://huggingface.co/datasets/facebook/SACo-Gold), [SA-Co/Silver](https://huggingface.co/datasets/facebook/SACo-Silver) and [SA-Co/VEval](https://huggingface.co/datasets/facebook/SACo-VEval) +* Roboflow host: [SA-Co/Gold](https://universe.roboflow.com/sa-co-gold), [SA-Co/Silver](https://universe.roboflow.com/sa-co-silver) and [SA-Co/VEval](https://universe.roboflow.com/sa-co-veval) + +![SA-Co dataset](assets/sa_co_dataset.jpg?raw=true) + +## Development + +To set up the development environment: + +```bash +pip install -e ".[dev,train]" +``` + +To format the code: + +```bash +ufmt format . +``` + +## Contributing + +See [contributing](CONTRIBUTING.md) and the +[code of conduct](CODE_OF_CONDUCT.md). + +## License + +This project is licensed under the SAM License - see the [LICENSE](LICENSE) file +for details. + +## Acknowledgements + +We would like to thank the following people for their contributions to the SAM 3 project: Alex He, Alexander Kirillov, +Alyssa Newcomb, Ana Paula Kirschner Mofarrej, Andrea Madotto, Andrew Westbury, Ashley Gabriel, Azita Shokpour, +Ben Samples, Bernie Huang, Carleigh Wood, Ching-Feng Yeh, Christian Puhrsch, Claudette Ward, Daniel Bolya, +Daniel Li, Facundo Figueroa, Fazila Vhora, George Orlin, Hanzi Mao, Helen Klein, Hu Xu, Ida Cheng, Jake Kinney, +Jiale Zhi, Jo Sampaio, Joel Schlosser, Justin Johnson, Kai Brown, Karen Bergan, Karla Martucci, Kenny Lehmann, +Maddie Mintz, Mallika Malhotra, Matt Ward, Michelle Chan, Michelle Restrepo, Miranda Hartley, Muhammad Maaz, +Nisha Deo, Peter Park, Phillip Thomas, Raghu Nayani, Rene Martinez Doehner, Robbie Adkins, Ross Girshik, Sasha +Mitts, Shashank Jain, Spencer Whitehead, Ty Toledano, Valentin Gabeur, Vincent Cho, Vivian Lee, William Ngan, +Xuehai He, Yael Yungster, Ziqi Pang, Ziyi Dou, Zoe Quake. + +## Citing SAM 3 + +If you use SAM 3 or the SA-Co dataset in your research, please use the following BibTeX entry. + +```bibtex +@misc{carion2025sam3segmentconcepts, + title={SAM 3: Segment Anything with Concepts}, + author={Nicolas Carion and Laura Gustafson and Yuan-Ting Hu and Shoubhik Debnath and Ronghang Hu and Didac Suris and Chaitanya Ryali and Kalyan Vasudev Alwala and Haitham Khedr and Andrew Huang and Jie Lei and Tengyu Ma and Baishan Guo and Arpit Kalla and Markus Marks and Joseph Greer and Meng Wang and Peize Sun and Roman Rädle and Triantafyllos Afouras and Effrosyni Mavroudi and Katherine Xu and Tsung-Han Wu and Yu Zhou and Liliane Momeni and Rishi Hazra and Shuangrui Ding and Sagar Vaze and Francois Porcher and Feng Li and Siyuan Li and Aishwarya Kamath and Ho Kei Cheng and Piotr Dollár and Nikhila Ravi and Kate Saenko and Pengchuan Zhang and Christoph Feichtenhofer}, + year={2025}, + eprint={2511.16719}, + archivePrefix={arXiv}, + primaryClass={cs.CV}, + url={https://arxiv.org/abs/2511.16719}, +} +``` diff --git a/source_code/sam3/sam3.egg-info/dependency_links.txt b/source_code/sam3/sam3.egg-info/dependency_links.txt new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/source_code/sam3/sam3.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/source_code/sam3/sam3/agent/agent_core.py b/source_code/sam3/sam3/agent/agent_core.py new file mode 100644 index 0000000000000000000000000000000000000000..f0016c7ca668b651b09a609d518432ff6307867a --- /dev/null +++ b/source_code/sam3/sam3/agent/agent_core.py @@ -0,0 +1,563 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import copy +import json +import os + +import cv2 +from PIL import Image + +from .client_llm import send_generate_request +from .client_sam3 import call_sam_service +from .viz import visualize + + +def save_debug_messages(messages_list, debug, debug_folder_path, debug_jsonl_path): + """Save messages to debug jsonl file if debug is enabled""" + if debug and debug_jsonl_path: + # Ensure the debug directory exists before writing + os.makedirs(debug_folder_path, exist_ok=True) + with open(debug_jsonl_path, "w") as f: + for msg in messages_list: + f.write(json.dumps(msg, indent=4) + "\n") + + +def cleanup_debug_files(debug, debug_folder_path, debug_jsonl_path): + """Clean up debug files when function successfully returns""" + if debug and debug_folder_path: + try: + if os.path.exists(debug_jsonl_path): + os.remove(debug_jsonl_path) + if os.path.exists(debug_folder_path): + os.rmdir(debug_folder_path) + except Exception as e: + print(f"Warning: Could not clean up debug files: {e}") + + +def count_images(messages): + """Count the total number of images present in the messages history.""" + total = 0 + for message in messages: + # Check if message has content (should be a list) + if "content" in message and isinstance(message["content"], list): + # Iterate through each content item + for content_item in message["content"]: + # Check if content item is a dict with type "image" + if ( + isinstance(content_item, dict) + and content_item.get("type") == "image" + ): + total += 1 + return total + + +def _prune_messages_for_next_round( + messages_list, + used_text_prompts, + latest_sam3_text_prompt, + img_path, + initial_text_prompt, +): + """Return a new messages list that contains only: + 1) messages[:2] (with optional warning text added to the second message's content) + 2) the latest assistant message (and everything after it) that contains a segment_phrase tool call + """ + # There should not be more than 10 messages in the conversation history + assert len(messages_list) < 10 + + # Part 1: always keep the first two message JSONs + part1 = copy.deepcopy(messages_list[:2]) + + # Part 2: search backwards for the latest assistant message containing a segment_phrase tool call + part2_start_idx = None + for idx in range(len(messages_list) - 1, 1, -1): + msg = messages_list[idx] + # We only consider assistant messages with a "content" list + if msg.get("role") != "assistant" or "content" not in msg: + continue + # Look for any content element that is a text containing the segment_phrase tool call + for content in msg["content"]: + if ( + isinstance(content, dict) + and content.get("type") == "text" + and "" in content.get("text", "") + and "segment_phrase" in content.get("text", "") + ): + part2_start_idx = idx + break + if part2_start_idx is not None: + break + + part2 = messages_list[part2_start_idx:] if part2_start_idx is not None else [] + + # Part 3: decide whether to add warning text to the second message in part1 + previously_used = ( + [p for p in used_text_prompts if p != latest_sam3_text_prompt] + if latest_sam3_text_prompt + else list(used_text_prompts) + ) + if part2 and len(previously_used) > 0: + warning_text = f'Note that we have previously called the segment_phrase tool with each "text_prompt" in this list: {list(previously_used)}, but none of the generated results were satisfactory. So make sure that you do not use any of these phrases as the "text_prompt" to call the segment_phrase tool again.' + # Replace the second message entirely to keep exactly 2 content items + part1[1] = { + "role": "user", + "content": [ + {"type": "image", "image": img_path}, + { + "type": "text", + "text": f"The above image is the raw input image. The initial user input query is: '{initial_text_prompt}'." + + " " + + warning_text, + }, + ], + } + assert len(part1[1]["content"]) == 2 + + # Build the new messages list: part1 (with optional warning), then part2 + new_messages = list(part1) + new_messages.extend(part2) + return new_messages + + +def agent_inference( + img_path: str, + initial_text_prompt: str, + debug: bool = False, + send_generate_request=send_generate_request, + call_sam_service=call_sam_service, + max_generations: int = 100, + output_dir="../../sam3_agent_out", +): + """ + Given a text prompt and an image, this tool will perform all aspects of agentic problem solving, + while saving sam3 and MLLM outputs to their respective directories. + + Args: + img_path: Path to the input image + initial_text_prompt: Initial text prompt from the user + debug: Whether to enable debug mode + max_generations: Maximum number of send_generate_request calls allowed (default: 100) + """ + # setup dir + sam_output_dir = os.path.join(output_dir, "sam_out") + error_save_dir = os.path.join(output_dir, "none_out") + debug_save_dir = os.path.join(output_dir, "agent_debug_out") + os.makedirs(sam_output_dir, exist_ok=True) + os.makedirs(error_save_dir, exist_ok=True) + os.makedirs(debug_save_dir, exist_ok=True) + current_dir = os.path.dirname(os.path.abspath(__file__)) + MLLM_SYSTEM_PROMPT_PATH = os.path.join( + current_dir, "system_prompts/system_prompt.txt" + ) + ITERATIVE_CHECKING_SYSTEM_PROMPT_PATH = os.path.join( + current_dir, "system_prompts/system_prompt_iterative_checking.txt" + ) + # init variables + PATH_TO_LATEST_OUTPUT_JSON = "" + LATEST_SAM3_TEXT_PROMPT = "" + USED_TEXT_PROMPTS = ( + set() + ) # Track all previously used text prompts for segment_phrase + generation_count = 0 # Counter for number of send_generate_request calls + + # debug setup + debug_folder_path = None + debug_jsonl_path = None + if debug: + debug_folder_path = os.path.join( + debug_save_dir, f"{img_path.rsplit('/', 1)[-1].rsplit('.', 1)[0]}" + ) + debug_jsonl_path = os.path.join(debug_folder_path, "debug_history.json") + os.makedirs(debug_folder_path, exist_ok=True) + + # The helper functions are now defined outside the agent_inference function + with open(MLLM_SYSTEM_PROMPT_PATH, "r") as f: + system_prompt = f.read().strip() + with open(ITERATIVE_CHECKING_SYSTEM_PROMPT_PATH, "r") as f: + iterative_checking_system_prompt = f.read().strip() + + # Construct the initial message list + messages = [ + {"role": "system", "content": system_prompt}, + { + "role": "user", + "content": [ + {"type": "image", "image": img_path}, + { + "type": "text", + "text": f"The above image is the raw input image. The initial user input query is: '{initial_text_prompt}'.", + }, + ], + }, + ] + print(f"> Text prompt: {initial_text_prompt}") + print(f"> Image path: {img_path}") + + print("\n\n") + print("-" * 30 + f" Round {str(generation_count + 1)}" + "-" * 30) + print("\n\n") + generated_text = send_generate_request(messages) + print(f"\n>>> MLLM Response [start]\n{generated_text}\n<<< MLLM Response [end]\n") + while generated_text is not None: + save_debug_messages(messages, debug, debug_folder_path, debug_jsonl_path) + assert ( + "" in generated_text, + f"Generated text does not contain tag: {generated_text}", + ) + generated_text = generated_text.split("", 1)[0] + "" + tool_call_json_str = ( + generated_text.split("")[-1] + .split("")[0] + .strip() + .replace(r"}}}", r"}}") # remove extra } if any + ) + try: + tool_call = json.loads(tool_call_json_str) + except json.JSONDecodeError: + raise ValueError(f"Invalid JSON in tool call: {tool_call_json_str}") + + if PATH_TO_LATEST_OUTPUT_JSON == "": + # The first tool call must be segment_phrase or report_no_mask + assert ( + tool_call["name"] == "segment_phrase" + or tool_call["name"] == "report_no_mask" + ) + + if tool_call["name"] == "segment_phrase": + print("🔍 Calling segment_phrase tool...") + assert list(tool_call["parameters"].keys()) == ["text_prompt"] + + # Check if this text_prompt has been used before + current_text_prompt = tool_call["parameters"]["text_prompt"] + if current_text_prompt in USED_TEXT_PROMPTS: + print( + f"❌ Text prompt '{current_text_prompt}' has been used before. Requesting a different prompt." + ) + duplicate_prompt_message = f"You have previously used '{current_text_prompt}' as your text_prompt to call the segment_phrase tool. You may not use it again. Please call the segment_phrase tool again with a different, perhaps more general, or more creative simple noun phrase prompt, while adhering to all the rules stated in the system prompt. You must also never use any of the following text_prompt(s): {str(list(USED_TEXT_PROMPTS))}." + messages.append( + { + "role": "assistant", + "content": [{"type": "text", "text": generated_text}], + } + ) + messages.append( + { + "role": "user", + "content": [{"type": "text", "text": duplicate_prompt_message}], + } + ) + else: + # Add the text_prompt to the set of used prompts + USED_TEXT_PROMPTS.add(current_text_prompt) + LATEST_SAM3_TEXT_PROMPT = current_text_prompt + PATH_TO_LATEST_OUTPUT_JSON = call_sam_service( + image_path=img_path, + text_prompt=current_text_prompt, + output_folder_path=sam_output_dir, + ) + sam3_outputs = json.load(open(PATH_TO_LATEST_OUTPUT_JSON, "r")) + sam3_output_image_path = sam3_outputs["output_image_path"] + num_masks = len(sam3_outputs["pred_boxes"]) + + messages.append( + { + "role": "assistant", + "content": [{"type": "text", "text": generated_text}], + } + ) + if num_masks == 0: + print("❌ No masks generated by SAM3, reporting no mask to Qwen.") + sam3_output_text_message = f"The segment_phrase tool did not generate any masks for the text_prompt '{current_text_prompt}'. Now, please call the segment_phrase tool again with a different, perhaps more general, or more creative simple noun phrase text_prompt, while adhering to all the rules stated in the system prompt. Please be reminded that the original user query was '{initial_text_prompt}'." + messages.append( + { + "role": "user", + "content": [ + {"type": "text", "text": sam3_output_text_message} + ], + } + ) + else: + sam3_output_text_message = rf"The segment_phrase tool generated {num_masks} available masks. All {num_masks} available masks are rendered in this image below, now you must analyze the {num_masks} available mask(s) carefully, compare them against the raw input image and the original user query, and determine your next action. Please be reminded that the original user query was '{initial_text_prompt}'." + messages.append( + { + "role": "user", + "content": [ + {"type": "text", "text": sam3_output_text_message}, + {"type": "image", "image": sam3_output_image_path}, + ], + } + ) + print("\n\n>>> sam3_output_text_message:\n", sam3_output_text_message) + + elif tool_call["name"] == "examine_each_mask": + print("🔍 Calling examine_each_mask tool...") + assert LATEST_SAM3_TEXT_PROMPT != "" + + # Make sure that the last message is a image + assert ( + messages[-1]["content"][1]["type"] == "image" + ), "Second content element should be an image" + messages.pop() # Remove the last user message + # Add simplified replacement message + simplified_message = { + "role": "user", + "content": [ + { + "type": "text", + "text": "The segment_phrase tool generated several masks. Now you must analyze the mask(s) carefully, compare them against the raw input image and the original user query, and determine your next action.", + } + ], + } + messages.append(simplified_message) + + current_outputs = json.load(open(PATH_TO_LATEST_OUTPUT_JSON, "r")) + num_masks = len(current_outputs["pred_masks"]) + masks_to_keep = [] + + # MLLM check the mask one by one + for i in range(num_masks): + print(f"🔍 Checking mask {i+1}/{num_masks}...") + image_w_mask_i, image_w_zoomed_in_mask_i = visualize(current_outputs, i) + + image_w_zoomed_in_mask_i_path = os.path.join( + sam_output_dir, rf"{LATEST_SAM3_TEXT_PROMPT}.png".replace("/", "_") + ).replace(".png", f"_zoom_in_mask_{i + 1}.png") + image_w_mask_i_path = os.path.join( + sam_output_dir, rf"{LATEST_SAM3_TEXT_PROMPT}.png".replace("/", "_") + ).replace(".png", f"_selected_mask_{i + 1}.png") + image_w_zoomed_in_mask_i.save(image_w_zoomed_in_mask_i_path) + image_w_mask_i.save(image_w_mask_i_path) + + iterative_checking_messages = [ + {"role": "system", "content": iterative_checking_system_prompt}, + { + "role": "user", + "content": [ + {"type": "text", "text": f"The raw input image: "}, + {"type": "image", "image": img_path}, + { + "type": "text", + "text": f"The initial user input query is: '{initial_text_prompt}'", + }, + { + "type": "text", + "text": f"Image with the predicted segmentation mask rendered on it: ", + }, + {"type": "image", "image": image_w_mask_i_path}, + { + "type": "text", + "text": f"Image with the zoomed-in mask: ", + }, + {"type": "image", "image": image_w_zoomed_in_mask_i_path}, + ], + }, + ] + checking_generated_text = send_generate_request( + iterative_checking_messages + ) + + # Process the generated text to determine if the mask should be kept or rejected + if checking_generated_text is None: + raise ValueError( + "Generated text is None, which is unexpected. Please check the Qwen server and the input parameters." + ) + print(f"Generated text for mask {i+1}: {checking_generated_text}") + verdict = ( + checking_generated_text.split("")[-1] + .split("")[0] + .strip() + ) + if "Accept" in verdict: + assert not "Reject" in verdict + print(f"Mask {i+1} accepted, keeping it in the outputs.") + masks_to_keep.append(i) + elif "Reject" in verdict: + assert not "Accept" in verdict + print(f"Mask {i+1} rejected, removing it from the outputs.") + else: + raise ValueError( + f"Unexpected verdict in generated text: {checking_generated_text}. Expected 'Accept' or 'Reject'." + ) + + updated_outputs = { + "original_image_path": current_outputs["original_image_path"], + "orig_img_h": current_outputs["orig_img_h"], + "orig_img_w": current_outputs["orig_img_w"], + "pred_boxes": [current_outputs["pred_boxes"][i] for i in masks_to_keep], + "pred_scores": [ + current_outputs["pred_scores"][i] for i in masks_to_keep + ], + "pred_masks": [current_outputs["pred_masks"][i] for i in masks_to_keep], + } + + image_w_check_masks = visualize(updated_outputs) + image_w_check_masks_path = os.path.join( + sam_output_dir, rf"{LATEST_SAM3_TEXT_PROMPT}.png" + ).replace( + ".png", + f"_selected_masks_{'-'.join(map(str, [i+1 for i in masks_to_keep]))}.png".replace( + "/", "_" + ), + ) + image_w_check_masks.save(image_w_check_masks_path) + # save the updated json outputs and append to message history + messages.append( + { + "role": "assistant", + "content": [{"type": "text", "text": generated_text}], + } + ) + if len(masks_to_keep) == 0: + messages.append( + { + "role": "user", + "content": [ + { + "type": "text", + "text": f"The original user query was: '{initial_text_prompt}'. The examine_each_mask tool examined and rejected all of the masks generated by the segment_phrase tool. Now, please call the segment_phrase tool again with a different, perhaps more general, or more creative simple noun phrase text_prompt, while adhering to all the rules stated in the system prompt.", + } + ], + } + ) + else: + messages.append( + { + "role": "user", + "content": [ + { + "type": "text", + "text": f"The original user query was: '{initial_text_prompt}'. After calling the examine_each_mask tool on the available masks, the number of available masks is now {len(masks_to_keep)}. All {len(masks_to_keep)} available masks are rendered in this image below, now you must analyze the {len(masks_to_keep)} available mask(s) carefully, compare them against the raw input image and the original user query, and determine your next action.", + }, + {"type": "image", "image": image_w_check_masks_path}, + ], + } + ) + + # Create a new filename based on the original path to avoid filename length issues + base_path = PATH_TO_LATEST_OUTPUT_JSON + # Remove any existing "masks_" suffix to avoid duplication + if "masks_" in base_path: + base_path = base_path.split("masks_")[0] + ".json" + # Create new filename with current masks; use a clearer suffix when empty + if len(masks_to_keep) == 0: + PATH_TO_LATEST_OUTPUT_JSON = base_path.replace( + ".json", "masks_none.json" + ) + else: + PATH_TO_LATEST_OUTPUT_JSON = base_path.replace( + ".json", f"masks_{'_'.join(map(str, masks_to_keep))}.json" + ) + json.dump(updated_outputs, open(PATH_TO_LATEST_OUTPUT_JSON, "w"), indent=4) + + elif tool_call["name"] == "select_masks_and_return": + print("🔍 Calling select_masks_and_return tool...") + current_outputs = json.load(open(PATH_TO_LATEST_OUTPUT_JSON, "r")) + + assert list(tool_call["parameters"].keys()) == ["final_answer_masks"] + masks_to_keep = tool_call["parameters"]["final_answer_masks"] + + # Keep only valid mask indices, remove duplicates, and preserve deterministic ascending order + available_masks = set(range(1, len(current_outputs["pred_masks"]) + 1)) + masks_to_keep = sorted({i for i in masks_to_keep if i in available_masks}) + # Change this to a update message telling the model to try again along with information about errors made. + + final_outputs = { + "original_image_path": current_outputs["original_image_path"], + "orig_img_h": current_outputs["orig_img_h"], + "orig_img_w": current_outputs["orig_img_w"], + "pred_boxes": [ + current_outputs["pred_boxes"][i - 1] for i in masks_to_keep + ], + "pred_scores": [ + current_outputs["pred_scores"][i - 1] for i in masks_to_keep + ], + "pred_masks": [ + current_outputs["pred_masks"][i - 1] for i in masks_to_keep + ], + } + + rendered_final_output = visualize(final_outputs) + messages.append( + { + "role": "assistant", + "content": [{"type": "text", "text": generated_text}], + } + ) + + # Clean up debug files before successful return + cleanup_debug_files(debug, debug_folder_path, debug_jsonl_path) + return messages, final_outputs, rendered_final_output + + elif tool_call["name"] == "report_no_mask": + print("🔍 Calling report_no_mask tool...") + height, width = cv2.imread(img_path).shape[:2] + final_outputs = { + "original_image_path": img_path, + "orig_img_h": height, + "orig_img_w": width, + "pred_boxes": [], + "pred_scores": [], + "pred_masks": [], + } + rendered_final_output = Image.open(img_path) + messages.append( + { + "role": "assistant", + "content": [{"type": "text", "text": generated_text}], + } + ) + return messages, final_outputs, rendered_final_output + + else: + raise ValueError(f"Unknown tool call: {tool_call['name']}") + + # sometimes the MLLM don't know when to stop, and generates multiple tool calls in one round, so we need to split the generated text by and only keep the first one + + for message in messages: + if message["role"] == "assistant" and "content" in message: + for content in message["content"]: + if ( + isinstance(content, dict) + and content.get("type") == "text" + and "text" in content + ): + content["text"] = ( + content["text"].split("", 1)[0] + "\n\n" + ) + # Prune the messages history before the next MLLM generation round according to the 3-part rules. + # This keeps history compact and ensures the model sees only the allowed parts. + messages = _prune_messages_for_next_round( + messages, + USED_TEXT_PROMPTS, + LATEST_SAM3_TEXT_PROMPT, + img_path, + initial_text_prompt, + ) + # make sure there can never be more than 2 images in the context + assert count_images(messages) <= 2 + generation_count += 1 + if generation_count > max_generations: + raise ValueError( + f"Exceeded maximum number of allowed generation requests ({max_generations})" + ) + + print("\n\n") + print("-" * 30 + f" Round {str(generation_count + 1)}" + "-" * 30) + print("\n\n") + generated_text = send_generate_request(messages) + print( + f"\n>>> MLLM Response [start]\n{generated_text}\n<<< MLLM Response [end]\n" + ) + + print("\n\n>>> SAM 3 Agent execution ended.\n\n") + + error_save_path = os.path.join( + error_save_dir, + f"{img_path.rsplit('/', 1)[-1].rsplit('.', 1)[0]}_error_history.json", + ) + with open(error_save_path, "w") as f: + json.dump(messages, f, indent=4) + print("Saved messages history that caused error to:", error_save_path) + raise ValueError( + rf"Generated text is None, which is unexpected. Please check the Qwen server and the input parameters for image path: {img_path} and initial text prompt: {initial_text_prompt}." + ) diff --git a/source_code/sam3/sam3/agent/inference.py b/source_code/sam3/sam3/agent/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..0aac1165121ce3abac5d6e49225e3fdbddfa08b1 --- /dev/null +++ b/source_code/sam3/sam3/agent/inference.py @@ -0,0 +1,65 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import json +import os + +from sam3.agent.agent_core import agent_inference + + +def run_single_image_inference( + image_path, + text_prompt, + llm_config, + send_generate_request, + call_sam_service, + output_dir="agent_output", + debug=False, +): + """Run inference on a single image with provided prompt""" + + llm_name = llm_config["name"] + + if not os.path.exists(image_path): + raise FileNotFoundError(f"Image file not found: {image_path}") + + # Create output directory + os.makedirs(output_dir, exist_ok=True) + + # Generate output file names + image_basename = os.path.splitext(os.path.basename(image_path))[0] + prompt_for_filename = text_prompt.replace("/", "_").replace(" ", "_") + + base_filename = f"{image_basename}_{prompt_for_filename}_agent_{llm_name}" + output_json_path = os.path.join(output_dir, f"{base_filename}_pred.json") + output_image_path = os.path.join(output_dir, f"{base_filename}_pred.png") + agent_history_path = os.path.join(output_dir, f"{base_filename}_history.json") + + # Check if output already exists and skip + if os.path.exists(output_json_path): + print(f"Output JSON {output_json_path} already exists. Skipping.") + return + + print(f"{'-'*30} Starting SAM 3 Agent Session... {'-'*30} ") + agent_history, final_output_dict, rendered_final_output = agent_inference( + image_path, + text_prompt, + send_generate_request=send_generate_request, + call_sam_service=call_sam_service, + output_dir=output_dir, + debug=debug, + ) + print(f"{'-'*30} End of SAM 3 Agent Session... {'-'*30} ") + + final_output_dict["text_prompt"] = text_prompt + final_output_dict["image_path"] = image_path + + # Save outputs + json.dump(final_output_dict, open(output_json_path, "w"), indent=4) + json.dump(agent_history, open(agent_history_path, "w"), indent=4) + rendered_final_output.save(output_image_path) + + print(f"\n✅ Successfully processed single image!") + print(f"Output JSON: {output_json_path}") + print(f"Output Image: {output_image_path}") + print(f"Agent History: {agent_history_path}") + return output_image_path diff --git a/source_code/sam3/sam3/agent/viz.py b/source_code/sam3/sam3/agent/viz.py new file mode 100644 index 0000000000000000000000000000000000000000..286d823718d83489136356a34f27c64a49bdaf37 --- /dev/null +++ b/source_code/sam3/sam3/agent/viz.py @@ -0,0 +1,114 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import cv2 +import numpy as np +import pycocotools.mask as mask_utils +from PIL import Image + +from .helpers.visualizer import Visualizer +from .helpers.zoom_in import render_zoom_in + + +def visualize( + input_json: dict, + zoom_in_index: int | None = None, + mask_alpha: float = 0.15, + label_mode: str = "1", + font_size_multiplier: float = 1.2, + boarder_width_multiplier: float = 0, +): + """ + Unified visualization function. + + If zoom_in_index is None: + - Render all masks in input_json (equivalent to visualize_masks_from_result_json). + - Returns: PIL.Image + + If zoom_in_index is provided: + - Returns two PIL.Images: + 1) Output identical to zoom_in_and_visualize(input_json, index). + 2) The same instance rendered via the general overlay using the color + returned by (1), equivalent to calling visualize_masks_from_result_json + on a single-mask json_i with color=color_hex. + """ + # Common fields + orig_h = int(input_json["orig_img_h"]) + orig_w = int(input_json["orig_img_w"]) + img_path = input_json["original_image_path"] + + # ---------- Mode A: Full-scene render ---------- + if zoom_in_index is None: + boxes = np.array(input_json["pred_boxes"]) + rle_masks = [ + {"size": (orig_h, orig_w), "counts": rle} + for rle in input_json["pred_masks"] + ] + binary_masks = [mask_utils.decode(rle) for rle in rle_masks] + + img_bgr = cv2.imread(img_path) + if img_bgr is None: + raise FileNotFoundError(f"Could not read image: {img_path}") + img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB) + + viz = Visualizer( + img_rgb, + font_size_multiplier=font_size_multiplier, + boarder_width_multiplier=boarder_width_multiplier, + ) + viz.overlay_instances( + boxes=boxes, + masks=rle_masks, + binary_masks=binary_masks, + assigned_colors=None, + alpha=mask_alpha, + label_mode=label_mode, + ) + pil_all_masks = Image.fromarray(viz.output.get_image()) + return pil_all_masks + + # ---------- Mode B: Zoom-in pair ---------- + else: + idx = int(zoom_in_index) + num_masks = len(input_json.get("pred_masks", [])) + if idx < 0 or idx >= num_masks: + raise ValueError(f"zoom_in_index {idx} is out of range (0..{num_masks-1}).") + + # (1) Replicate zoom_in_and_visualize + object_data = { + "labels": [{"noun_phrase": f"mask_{idx}"}], + "segmentation": { + "counts": input_json["pred_masks"][idx], + "size": [orig_h, orig_w], + }, + } + pil_img = Image.open(img_path) + pil_mask_i_zoomed, color_hex = render_zoom_in( + object_data, pil_img, mask_alpha=mask_alpha + ) + + # (2) Single-instance render with the same color + boxes_i = np.array([input_json["pred_boxes"][idx]]) + rle_i = {"size": (orig_h, orig_w), "counts": input_json["pred_masks"][idx]} + bin_i = mask_utils.decode(rle_i) + + img_bgr_i = cv2.imread(img_path) + if img_bgr_i is None: + raise FileNotFoundError(f"Could not read image: {img_path}") + img_rgb_i = cv2.cvtColor(img_bgr_i, cv2.COLOR_BGR2RGB) + + viz_i = Visualizer( + img_rgb_i, + font_size_multiplier=font_size_multiplier, + boarder_width_multiplier=boarder_width_multiplier, + ) + viz_i.overlay_instances( + boxes=boxes_i, + masks=[rle_i], + binary_masks=[bin_i], + assigned_colors=[color_hex], + alpha=mask_alpha, + label_mode=label_mode, + ) + pil_mask_i = Image.fromarray(viz_i.output.get_image()) + + return pil_mask_i, pil_mask_i_zoomed diff --git a/source_code/sam3/sam3/eval/coco_reindex.py b/source_code/sam3/sam3/eval/coco_reindex.py new file mode 100644 index 0000000000000000000000000000000000000000..49cd94429d0bd6d6b23e6a732028922ae5b17c38 --- /dev/null +++ b/source_code/sam3/sam3/eval/coco_reindex.py @@ -0,0 +1,230 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +""" +Self-contained COCO JSON re-indexing function that creates temporary files. +""" + +import json +import os +import tempfile +from pathlib import Path +from typing import Any, Dict, List, Optional, Tuple + + +def reindex_coco_to_temp(input_json_path: str) -> Optional[str]: + """ + Convert 0-indexed COCO JSON file to 1-indexed and save to temporary location. + + Args: + input_json_path: Path to the input COCO JSON file + + Returns: + Path to the new 1-indexed JSON file in temporary directory, or None if no conversion needed + + Raises: + FileNotFoundError: If input file doesn't exist + json.JSONDecodeError: If input file is not valid JSON + ValueError: If input file is not a valid COCO format + """ + + def is_coco_json(data: Dict[str, Any]) -> bool: + """Check if data appears to be a COCO format file.""" + if not isinstance(data, dict): + return False + # A COCO file should have at least one of these keys + coco_keys = {"images", "annotations", "categories"} + return any(key in data for key in coco_keys) + + def check_zero_indexed(data: Dict[str, Any]) -> Tuple[bool, bool, bool]: + """ + Check if annotations, images, or categories start from index 0. + + Returns: + Tuple of (annotations_zero_indexed, images_zero_indexed, categories_zero_indexed) + """ + annotations_zero = False + images_zero = False + categories_zero = False + + # Check annotations + annotations = data.get("annotations", []) + if annotations and any(ann.get("id", -1) == 0 for ann in annotations): + annotations_zero = True + + # Check images + images = data.get("images", []) + if images and any(img.get("id", -1) == 0 for img in images): + images_zero = True + + # Check categories + categories = data.get("categories", []) + if categories and any(cat.get("id", -1) == 0 for cat in categories): + categories_zero = True + + return annotations_zero, images_zero, categories_zero + + def reindex_coco_data(data: Dict[str, Any]) -> Dict[str, Any]: + """Convert 0-indexed COCO data to 1-indexed.""" + modified_data = data.copy() + + annotations_zero, images_zero, categories_zero = check_zero_indexed(data) + + # Create ID mapping for consistency + image_id_mapping = {} + category_id_mapping = {} + + # Process images first (since annotations reference image IDs) + if images_zero and "images" in modified_data: + for img in modified_data["images"]: + old_id = img["id"] + new_id = old_id + 1 + image_id_mapping[old_id] = new_id + img["id"] = new_id + + # Process categories (since annotations reference category IDs) + if categories_zero and "categories" in modified_data: + for cat in modified_data["categories"]: + old_id = cat["id"] + new_id = old_id + 1 + category_id_mapping[old_id] = new_id + cat["id"] = new_id + + # Process annotations + if "annotations" in modified_data: + for ann in modified_data["annotations"]: + # Update annotation ID if needed + if annotations_zero: + ann["id"] = ann["id"] + 1 + + # Update image_id reference if images were reindexed + if images_zero and ann.get("image_id") is not None: + old_image_id = ann["image_id"] + if old_image_id in image_id_mapping: + ann["image_id"] = image_id_mapping[old_image_id] + + # Update category_id reference if categories were reindexed + if categories_zero and ann.get("category_id") is not None: + old_category_id = ann["category_id"] + if old_category_id in category_id_mapping: + ann["category_id"] = category_id_mapping[old_category_id] + + return modified_data + + # Validate input path + if not os.path.exists(input_json_path): + raise FileNotFoundError(f"Input file not found: {input_json_path}") + + # Load and validate JSON data + try: + with open(input_json_path, "r", encoding="utf-8") as f: + data = json.load(f) + except json.JSONDecodeError as e: + raise json.JSONDecodeError(f"Invalid JSON in {input_json_path}: {e}") + + # Validate COCO format + if not is_coco_json(data): + raise ValueError( + f"File does not appear to be in COCO format: {input_json_path}" + ) + + # Check if reindexing is needed + annotations_zero, images_zero, categories_zero = check_zero_indexed(data) + + if not (annotations_zero or images_zero or categories_zero): + # No conversion needed - just copy to temp location + input_path = Path(input_json_path) + temp_dir = tempfile.mkdtemp() + temp_filename = f"{input_path.stem}_1_indexed{input_path.suffix}" + temp_path = os.path.join(temp_dir, temp_filename) + + with open(temp_path, "w", encoding="utf-8") as f: + json.dump(data, f, indent=2, ensure_ascii=False) + + return temp_path + + # Perform reindexing + modified_data = reindex_coco_data(data) + + # Create temporary file + input_path = Path(input_json_path) + temp_dir = tempfile.mkdtemp() + temp_filename = f"{input_path.stem}_1_indexed{input_path.suffix}" + temp_path = os.path.join(temp_dir, temp_filename) + + # Write modified data to temporary file + with open(temp_path, "w", encoding="utf-8") as f: + json.dump(modified_data, f, indent=2, ensure_ascii=False) + + return temp_path + + +# Example usage and test function +def test_reindex_function(): + """Test the reindex function with a sample COCO file.""" + + # Create a test COCO file + test_data = { + "info": {"description": "Test COCO dataset", "version": "1.0", "year": 2023}, + "images": [ + {"id": 0, "width": 640, "height": 480, "file_name": "test1.jpg"}, + {"id": 1, "width": 640, "height": 480, "file_name": "test2.jpg"}, + ], + "categories": [ + {"id": 0, "name": "person", "supercategory": "person"}, + {"id": 1, "name": "car", "supercategory": "vehicle"}, + ], + "annotations": [ + { + "id": 0, + "image_id": 0, + "category_id": 0, + "bbox": [100, 100, 50, 75], + "area": 3750, + "iscrowd": 0, + }, + { + "id": 1, + "image_id": 1, + "category_id": 1, + "bbox": [200, 150, 120, 80], + "area": 9600, + "iscrowd": 0, + }, + ], + } + + # Create temporary test file + with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as f: + json.dump(test_data, f, indent=2) + test_file_path = f.name + + try: + # Test the function + result_path = reindex_coco_to_temp(test_file_path) + print(f"Original file: {test_file_path}") + print(f"Converted file: {result_path}") + + # Load and display the result + with open(result_path, "r") as f: + result_data = json.load(f) + + print("\nConverted data sample:") + print(f"First image ID: {result_data['images'][0]['id']}") + print(f"First category ID: {result_data['categories'][0]['id']}") + print(f"First annotation ID: {result_data['annotations'][0]['id']}") + print(f"First annotation image_id: {result_data['annotations'][0]['image_id']}") + print( + f"First annotation category_id: {result_data['annotations'][0]['category_id']}" + ) + + # Clean up + os.unlink(result_path) + os.rmdir(os.path.dirname(result_path)) + + finally: + # Clean up test file + os.unlink(test_file_path) + + +if __name__ == "__main__": + test_reindex_function() diff --git a/source_code/sam3/sam3/eval/saco_veval_eval.py b/source_code/sam3/sam3/eval/saco_veval_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..4f0ed2b61503455e879d3b2fc095adced3131f8f --- /dev/null +++ b/source_code/sam3/sam3/eval/saco_veval_eval.py @@ -0,0 +1,155 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved +import argparse +import json +import os +from collections import defaultdict + +from iopath.common.file_io import g_pathmgr +from sam3.eval.saco_veval_evaluators import ( + VideoCGF1Evaluator, + VideoPhraseApEvaluator, + VideoPhraseHotaEvaluator, + VideoTetaEvaluator, + YTVISPredFileEvaluator, +) + + +class VEvalEvaluator: + def __init__(self, gt_annot_file: str, eval_res_file: str): + self.gt_annot_file = gt_annot_file + self.eval_res_file = eval_res_file + self.evaluators = [ + # mAP + YTVISPredFileEvaluator(gt_annot_file), + # Phrase AP + VideoPhraseApEvaluator(gt_annot_file), + # TETA + VideoTetaEvaluator(gt_annot_file, use_mask=True, is_exhaustive=True), + # HOTA + VideoPhraseHotaEvaluator(gt_annot_file), + # cgF1 + VideoCGF1Evaluator(gt_annot_file), + ] + + def run_eval(self, pred_file: str): + dataset_results = {} + video_np_results = defaultdict(dict) + for evaluator in self.evaluators: + d_res, v_np_res = evaluator.evaluate(pred_file) + dataset_results.update(d_res) + for (video_id, category_id), res in v_np_res.items(): + video_np_results[(video_id, category_id)].update(res) + + if len(dataset_results) == 0: + dataset_results = {"": 0.0} + + formatted_video_np_results = [ + {"video_id": video_id, "category_id": category_id, **res} + for (video_id, category_id), res in video_np_results.items() + ] + eval_metrics = { + "dataset_results": dataset_results, + "video_np_results": formatted_video_np_results, + } + + with g_pathmgr.open(self.eval_res_file, "w") as f: + json.dump(eval_metrics, f) + + return eval_metrics + + +def run_main_all(dataset_name, args): + gt_annot_file = os.path.join(args.gt_annot_dir, dataset_name + ".json") + pred_file = os.path.join(args.pred_dir, dataset_name + "_preds.json") + eval_res_file = os.path.join(args.eval_res_dir, dataset_name + "_eval_res.json") + print(f"=== Running evaluation for Pred {pred_file} vs GT {gt_annot_file} ===") + veval_evaluator = VEvalEvaluator( + gt_annot_file=gt_annot_file, eval_res_file=eval_res_file + ) + _ = veval_evaluator.run_eval(pred_file=pred_file) + + print(f"=== Results saved to {eval_res_file} ===") + + +def main_all(args): + saco_veval_dataset_names = [ + "saco_veval_sav_test", + "saco_veval_sav_val", + "saco_veval_yt1b_test", + "saco_veval_yt1b_val", + "saco_veval_smartglasses_test", + "saco_veval_smartglasses_val", + ] + + # multiprocessing may not really work as inner evaluator also using multiprocessing + # so we just for loop + for dataset_name in saco_veval_dataset_names: + print(f"=== Running evaluation for dataset {dataset_name} ===") + run_main_all(dataset_name=dataset_name, args=args) + + +def main_one(args): + gt_annot_file = args.gt_annot_file + pred_file = args.pred_file + eval_res_file = args.eval_res_file + + print(f"=== Running evaluation for Pred {pred_file} vs GT {gt_annot_file} ===") + veval_evaluator = VEvalEvaluator( + gt_annot_file=gt_annot_file, eval_res_file=eval_res_file + ) + _ = veval_evaluator.run_eval(pred_file=pred_file) + + print(f"=== Results saved to {eval_res_file} ===") + + +def main(): + parser = argparse.ArgumentParser(description="Run video grounding evaluators") + + # Create subparsers for different commands + subparsers = parser.add_subparsers(dest="command", required=True) + + # Run evaluation for all datasets + all_parser = subparsers.add_parser("all", help="Run evaluation for all datasets") + all_parser.add_argument( + "--gt_annot_dir", + type=str, + help="Directory that contains the ground truth annotation files", + ) + all_parser.add_argument( + "--pred_dir", + type=str, + help="Directory that contains the prediction files", + ) + all_parser.add_argument( + "--eval_res_dir", + type=str, + help="Directory that contains the eval results files", + ) + all_parser.set_defaults(func=main_all) + + # Run evaluation for one dataset + one_parser = subparsers.add_parser("one", help="Run evaluation for one dataset") + one_parser.add_argument( + "--gt_annot_file", + type=str, + help="Path to the ground truth annotation file", + ) + one_parser.add_argument( + "--pred_file", + type=str, + help="Path to the prediction file", + ) + one_parser.add_argument( + "--eval_res_file", + type=str, + help="Path to the eval results file", + ) + one_parser.set_defaults(func=main_one) + + # Parse and dispatch + args = parser.parse_args() + args.func(args) + + +if __name__ == "__main__": + main() diff --git a/source_code/sam3/sam3/eval/teta_eval_toolkit/utils.py b/source_code/sam3/sam3/eval/teta_eval_toolkit/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..aa688e777afd3ed774ae200ff355b3418b1d389f --- /dev/null +++ b/source_code/sam3/sam3/eval/teta_eval_toolkit/utils.py @@ -0,0 +1,46 @@ +# fmt: off +# flake8: noqa + +import csv +import os +from collections import OrderedDict + + +def validate_metrics_list(metrics_list): + """Get names of metric class and ensures they are unique, further checks that the fields within each metric class + do not have overlapping names. + """ + metric_names = [metric.get_name() for metric in metrics_list] + # check metric names are unique + if len(metric_names) != len(set(metric_names)): + raise TrackEvalException( + "Code being run with multiple metrics of the same name" + ) + fields = [] + for m in metrics_list: + fields += m.fields + # check metric fields are unique + if len(fields) != len(set(fields)): + raise TrackEvalException( + "Code being run with multiple metrics with fields of the same name" + ) + return metric_names + + +def get_track_id_str(ann): + """Get name of track ID in annotation.""" + if "track_id" in ann: + tk_str = "track_id" + elif "instance_id" in ann: + tk_str = "instance_id" + elif "scalabel_id" in ann: + tk_str = "scalabel_id" + else: + assert False, "No track/instance ID." + return tk_str + + +class TrackEvalException(Exception): + """Custom exception for catching expected errors.""" + + ... diff --git a/source_code/sam3/sam3/logger.py b/source_code/sam3/sam3/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..db9c0a61b76292e941804b233cc6c184b641158a --- /dev/null +++ b/source_code/sam3/sam3/logger.py @@ -0,0 +1,54 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved +import logging +import os + +LOG_LEVELS = { + "DEBUG": logging.DEBUG, + "INFO": logging.INFO, + "WARNING": logging.WARNING, + "ERROR": logging.ERROR, + "CRITICAL": logging.CRITICAL, +} + + +class ColoredFormatter(logging.Formatter): + """A command line formatter with different colors for each level.""" + + def __init__(self): + super().__init__() + reset = "\033[0m" + colors = { + logging.DEBUG: f"{reset}\033[36m", # cyan, + logging.INFO: f"{reset}\033[32m", # green + logging.WARNING: f"{reset}\033[33m", # yellow + logging.ERROR: f"{reset}\033[31m", # red + logging.CRITICAL: f"{reset}\033[35m", # magenta + } + fmt_str = "{color}%(levelname)s %(asctime)s %(process)d %(filename)s:%(lineno)4d:{reset} %(message)s" + self.formatters = { + level: logging.Formatter(fmt_str.format(color=color, reset=reset)) + for level, color in colors.items() + } + self.default_formatter = self.formatters[logging.INFO] + + def format(self, record): + formatter = self.formatters.get(record.levelno, self.default_formatter) + return formatter.format(record) + + +def get_logger(name, level=logging.INFO): + """A command line logger.""" + if "LOG_LEVEL" in os.environ: + level = os.environ["LOG_LEVEL"].upper() + assert ( + level in LOG_LEVELS + ), f"Invalid LOG_LEVEL: {level}, must be one of {list(LOG_LEVELS.keys())}" + level = LOG_LEVELS[level] + logger = logging.getLogger(name) + logger.setLevel(level) + logger.propagate = False + ch = logging.StreamHandler() + ch.setLevel(level) + ch.setFormatter(ColoredFormatter()) + logger.addHandler(ch) + return logger diff --git a/source_code/sam3/sam3/model/io_utils.py b/source_code/sam3/sam3/model/io_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0a225842e4dca6eac64d84f262bddde2ee29d27b --- /dev/null +++ b/source_code/sam3/sam3/model/io_utils.py @@ -0,0 +1,709 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import contextlib +import os +import queue +import re +import time +from threading import Condition, get_ident, Lock, Thread + +import numpy as np +import torch +import torch.nn.functional as F +import torchvision.transforms.functional as TF + +from PIL import Image + +from sam3.logger import get_logger +from tqdm import tqdm + +logger = get_logger(__name__) + +IS_MAIN_PROCESS = os.getenv("IS_MAIN_PROCESS", "1") == "1" +RANK = int(os.getenv("RANK", "0")) + +IMAGE_EXTS = [".jpg", ".jpeg", ".png", ".bmp", ".tiff", ".webp"] +VIDEO_EXTS = [".mp4", ".mov", ".avi", ".mkv", ".webm"] + + +def load_resource_as_video_frames( + resource_path, + image_size, + offload_video_to_cpu, + img_mean=(0.5, 0.5, 0.5), + img_std=(0.5, 0.5, 0.5), + async_loading_frames=False, + video_loader_type="cv2", +): + """ + Load video frames from either a video or an image (as a single-frame video). + Alternatively, if input is a list of PIL images, convert its format + """ + if isinstance(resource_path, list): + img_mean = torch.tensor(img_mean, dtype=torch.float16)[:, None, None] + img_std = torch.tensor(img_std, dtype=torch.float16)[:, None, None] + assert all(isinstance(img_pil, Image.Image) for img_pil in resource_path) + assert len(resource_path) is not None + orig_height, orig_width = resource_path[0].size + orig_height, orig_width = ( + orig_width, + orig_height, + ) # For some reason, this method returns these swapped + images = [] + for img_pil in resource_path: + img_np = np.array(img_pil.convert("RGB").resize((image_size, image_size))) + assert img_np.dtype == np.uint8, "np.uint8 is expected for JPEG images" + img_np = img_np / 255.0 + img = torch.from_numpy(img_np).permute(2, 0, 1) + # float16 precision should be sufficient for image tensor storage + img = img.to(dtype=torch.float16) + # normalize by mean and std + img -= img_mean + img /= img_std + images.append(img) + images = torch.stack(images) + if not offload_video_to_cpu: + images = images.cuda() + return images, orig_height, orig_width + + is_image = ( + isinstance(resource_path, str) + and os.path.splitext(resource_path)[-1].lower() in IMAGE_EXTS + ) + if is_image: + return load_image_as_single_frame_video( + image_path=resource_path, + image_size=image_size, + offload_video_to_cpu=offload_video_to_cpu, + img_mean=img_mean, + img_std=img_std, + ) + else: + return load_video_frames( + video_path=resource_path, + image_size=image_size, + offload_video_to_cpu=offload_video_to_cpu, + img_mean=img_mean, + img_std=img_std, + async_loading_frames=async_loading_frames, + video_loader_type=video_loader_type, + ) + + +def load_image_as_single_frame_video( + image_path, + image_size, + offload_video_to_cpu, + img_mean=(0.5, 0.5, 0.5), + img_std=(0.5, 0.5, 0.5), +): + """Load an image as a single-frame video.""" + images, image_height, image_width = _load_img_as_tensor(image_path, image_size) + images = images.unsqueeze(0).half() + + img_mean = torch.tensor(img_mean, dtype=torch.float16)[:, None, None] + img_std = torch.tensor(img_std, dtype=torch.float16)[:, None, None] + if not offload_video_to_cpu: + images = images.cuda() + img_mean = img_mean.cuda() + img_std = img_std.cuda() + # normalize by mean and std + images -= img_mean + images /= img_std + return images, image_height, image_width + + +def load_video_frames( + video_path, + image_size, + offload_video_to_cpu, + img_mean=(0.5, 0.5, 0.5), + img_std=(0.5, 0.5, 0.5), + async_loading_frames=False, + video_loader_type="cv2", +): + """ + Load the video frames from video_path. The frames are resized to image_size as in + the model and are loaded to GPU if offload_video_to_cpu=False. This is used by the demo. + """ + assert isinstance(video_path, str) + if video_path.startswith(" where N is an integer + match = re.match(r"", video_path) + num_frames = int(match.group(1)) if match else 60 + return load_dummy_video(image_size, offload_video_to_cpu, num_frames=num_frames) + elif os.path.isdir(video_path): + return load_video_frames_from_image_folder( + image_folder=video_path, + image_size=image_size, + offload_video_to_cpu=offload_video_to_cpu, + img_mean=img_mean, + img_std=img_std, + async_loading_frames=async_loading_frames, + ) + elif os.path.splitext(video_path)[-1].lower() in VIDEO_EXTS: + return load_video_frames_from_video_file( + video_path=video_path, + image_size=image_size, + offload_video_to_cpu=offload_video_to_cpu, + img_mean=img_mean, + img_std=img_std, + async_loading_frames=async_loading_frames, + video_loader_type=video_loader_type, + ) + else: + raise NotImplementedError("Only video files and image folders are supported") + + +def load_video_frames_from_image_folder( + image_folder, + image_size, + offload_video_to_cpu, + img_mean, + img_std, + async_loading_frames, +): + """ + Load the video frames from a directory of image files ("." format) + """ + frame_names = [ + p + for p in os.listdir(image_folder) + if os.path.splitext(p)[-1].lower() in IMAGE_EXTS + ] + try: + frame_names.sort(key=lambda p: int(os.path.splitext(p)[0])) + except ValueError: + # fallback to lexicographic sort if the format is not "." + logger.warning( + f'frame names are not in "." format: {frame_names[:5]=}, ' + f"falling back to lexicographic sort." + ) + frame_names.sort() + num_frames = len(frame_names) + if num_frames == 0: + raise RuntimeError(f"no images found in {image_folder}") + img_paths = [os.path.join(image_folder, frame_name) for frame_name in frame_names] + img_mean = torch.tensor(img_mean, dtype=torch.float16)[:, None, None] + img_std = torch.tensor(img_std, dtype=torch.float16)[:, None, None] + + if async_loading_frames: + lazy_images = AsyncImageFrameLoader( + img_paths, image_size, offload_video_to_cpu, img_mean, img_std + ) + return lazy_images, lazy_images.video_height, lazy_images.video_width + + # float16 precision should be sufficient for image tensor storage + images = torch.zeros(num_frames, 3, image_size, image_size, dtype=torch.float16) + video_height, video_width = None, None + for n, img_path in enumerate( + tqdm(img_paths, desc=f"frame loading (image folder) [rank={RANK}]") + ): + images[n], video_height, video_width = _load_img_as_tensor(img_path, image_size) + if not offload_video_to_cpu: + images = images.cuda() + img_mean = img_mean.cuda() + img_std = img_std.cuda() + # normalize by mean and std + images -= img_mean + images /= img_std + return images, video_height, video_width + + +def load_video_frames_from_video_file( + video_path, + image_size, + offload_video_to_cpu, + img_mean, + img_std, + async_loading_frames, + gpu_acceleration=False, + gpu_device=None, + video_loader_type="cv2", +): + """Load the video frames from a video file.""" + if video_loader_type == "cv2": + return load_video_frames_from_video_file_using_cv2( + video_path=video_path, + image_size=image_size, + img_mean=img_mean, + img_std=img_std, + offload_video_to_cpu=offload_video_to_cpu, + ) + elif video_loader_type == "torchcodec": + logger.info("Using torchcodec to load video file") + lazy_images = AsyncVideoFileLoaderWithTorchCodec( + video_path=video_path, + image_size=image_size, + offload_video_to_cpu=offload_video_to_cpu, + img_mean=img_mean, + img_std=img_std, + gpu_acceleration=gpu_acceleration, + gpu_device=gpu_device, + ) + # The `AsyncVideoFileLoaderWithTorchCodec` class always loads the videos asynchronously, + # so we just wait for its loading thread to finish if async_loading_frames=False. + if not async_loading_frames: + async_thread = lazy_images.thread + if async_thread is not None: + async_thread.join() + return lazy_images, lazy_images.video_height, lazy_images.video_width + else: + raise RuntimeError("video_loader_type must be either 'cv2' or 'torchcodec'") + + +def load_video_frames_from_video_file_using_cv2( + video_path: str, + image_size: int, + img_mean: tuple = (0.5, 0.5, 0.5), + img_std: tuple = (0.5, 0.5, 0.5), + offload_video_to_cpu: bool = False, +) -> torch.Tensor: + """ + Load video from path, convert to normalized tensor with specified preprocessing + + Args: + video_path: Path to video file + image_size: Target size for square frames (height and width) + img_mean: Normalization mean (RGB) + img_std: Normalization standard deviation (RGB) + + Returns: + torch.Tensor: Preprocessed video tensor in shape (T, C, H, W) with float16 dtype + """ + import cv2 # delay OpenCV import to avoid unnecessary dependency + + # Initialize video capture + cap = cv2.VideoCapture(video_path) + if not cap.isOpened(): + raise ValueError(f"Could not open video: {video_path}") + + original_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + original_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + num_frames = num_frames if num_frames > 0 else None + + frames = [] + pbar = tqdm(desc=f"frame loading (OpenCV) [rank={RANK}]", total=num_frames) + while True: + ret, frame = cap.read() + if not ret: + break + + # Convert BGR to RGB and resize + frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + frame_resized = cv2.resize( + frame_rgb, (image_size, image_size), interpolation=cv2.INTER_CUBIC + ) + frames.append(frame_resized) + pbar.update(1) + cap.release() + pbar.close() + + # Convert to tensor + frames_np = np.stack(frames, axis=0).astype(np.float32) # (T, H, W, C) + video_tensor = torch.from_numpy(frames_np).permute(0, 3, 1, 2) # (T, C, H, W) + + img_mean = torch.tensor(img_mean, dtype=torch.float16).view(1, 3, 1, 1) + img_std = torch.tensor(img_std, dtype=torch.float16).view(1, 3, 1, 1) + if not offload_video_to_cpu: + video_tensor = video_tensor.cuda() + img_mean = img_mean.cuda() + img_std = img_std.cuda() + # normalize by mean and std + video_tensor -= img_mean + video_tensor /= img_std + return video_tensor, original_height, original_width + + +def load_dummy_video(image_size, offload_video_to_cpu, num_frames=60): + """ + Load a dummy video with random frames for testing and compilation warmup purposes. + """ + video_height, video_width = 480, 640 # dummy original video sizes + images = torch.randn(num_frames, 3, image_size, image_size, dtype=torch.float16) + if not offload_video_to_cpu: + images = images.cuda() + return images, video_height, video_width + + +def _load_img_as_tensor(img_path, image_size): + """Load and resize an image and convert it into a PyTorch tensor.""" + img = Image.open(img_path).convert("RGB") + orig_width, orig_height = img.width, img.height + img = TF.resize(img, size=(image_size, image_size)) + img = TF.to_tensor(img) + return img, orig_height, orig_width + + +class AsyncImageFrameLoader: + """ + A list of video frames to be load asynchronously without blocking session start. + """ + + def __init__(self, img_paths, image_size, offload_video_to_cpu, img_mean, img_std): + self.img_paths = img_paths + self.image_size = image_size + self.offload_video_to_cpu = offload_video_to_cpu + self.img_mean = img_mean + self.img_std = img_std + # items in `self._images` will be loaded asynchronously + self.images = [None] * len(img_paths) + # catch and raise any exceptions in the async loading thread + self.exception = None + # video_height and video_width be filled when loading the first image + self.video_height = None + self.video_width = None + + # load the first frame to fill video_height and video_width and also + # to cache it (since it's most likely where the user will click) + self.__getitem__(0) + + # load the rest of frames asynchronously without blocking the session start + def _load_frames(): + try: + for n in tqdm( + range(len(self.images)), + desc=f"frame loading (image folder) [rank={RANK}]", + ): + self.__getitem__(n) + except Exception as e: + self.exception = e + + self.thread = Thread(target=_load_frames, daemon=True) + self.thread.start() + + def __getitem__(self, index): + if self.exception is not None: + raise RuntimeError("Failure in frame loading thread") from self.exception + + img = self.images[index] + if img is not None: + return img + + img, video_height, video_width = _load_img_as_tensor( + self.img_paths[index], self.image_size + ) + self.video_height = video_height + self.video_width = video_width + # float16 precision should be sufficient for image tensor storage + img = img.to(dtype=torch.float16) + # normalize by mean and std + img -= self.img_mean + img /= self.img_std + if not self.offload_video_to_cpu: + img = img.cuda() + self.images[index] = img + return img + + def __len__(self): + return len(self.images) + + +class TorchCodecDecoder: + """ + A wrapper to support GPU device and num_threads in TorchCodec decoder, + which are not supported by `torchcodec.decoders.SimpleVideoDecoder` yet. + """ + + def __init__(self, source, dimension_order="NCHW", device="cpu", num_threads=1): + from torchcodec import _core as core + + self._source = source # hold a reference to the source to prevent it from GC + if isinstance(source, str): + self._decoder = core.create_from_file(source, "exact") + elif isinstance(source, bytes): + self._decoder = core.create_from_bytes(source, "exact") + else: + raise TypeError(f"Unknown source type: {type(source)}.") + assert dimension_order in ("NCHW", "NHWC") + + device_string = str(device) + core.scan_all_streams_to_update_metadata(self._decoder) + core.add_video_stream( + self._decoder, + dimension_order=dimension_order, + device=device_string, + num_threads=(1 if "cuda" in device_string else num_threads), + ) + video_metadata = core.get_container_metadata(self._decoder) + best_stream_index = video_metadata.best_video_stream_index + assert best_stream_index is not None + self.metadata = video_metadata.streams[best_stream_index] + assert self.metadata.num_frames_from_content is not None + self._num_frames = self.metadata.num_frames_from_content + + def __len__(self) -> int: + return self._num_frames + + def __getitem__(self, key: int): + from torchcodec import _core as core + + if key < 0: + key += self._num_frames + if key >= self._num_frames or key < 0: + raise IndexError( + f"Index {key} is out of bounds; length is {self._num_frames}" + ) + frame_data, *_ = core.get_frame_at_index( + self._decoder, + frame_index=key, + ) + return frame_data + + +class FIFOLock: + """A lock that ensures FIFO ordering of lock acquisitions.""" + + def __init__(self): + self._lock = Lock() + self._waiters = queue.Queue() + self._condition = Condition() + + def acquire(self): + ident = get_ident() + with self._condition: + self._waiters.put(ident) + while self._waiters.queue[0] != ident or not self._lock.acquire( + blocking=False + ): + self._condition.wait() + # got the lock and it's our turn + + def release(self): + with self._condition: + self._lock.release() + self._waiters.get() + self._condition.notify_all() + + def __enter__(self): + self.acquire() + + def __exit__(self, t, v, tb): + self.release() + + +class AsyncVideoFileLoaderWithTorchCodec: + """ + Loading frames from video files asynchronously without blocking session start. + + Unlike `AsyncVideoFileLoader`, this class uses PyTorch's offical TorchCodec library + for video decoding, which is more efficient and supports more video formats. + """ + + def __init__( + self, + video_path, + image_size, + offload_video_to_cpu, + img_mean, + img_std, + gpu_acceleration=True, + gpu_device=None, + use_rand_seek_in_loading=False, + ): + # Check and possibly infer the output device (and also get its GPU id when applicable) + assert gpu_device is None or gpu_device.type == "cuda" + gpu_id = ( + gpu_device.index + if gpu_device is not None and gpu_device.index is not None + else torch.cuda.current_device() + ) + if offload_video_to_cpu: + out_device = torch.device("cpu") + else: + out_device = torch.device("cuda") if gpu_device is None else gpu_device + self.out_device = out_device + self.gpu_acceleration = gpu_acceleration + self.gpu_id = gpu_id + self.image_size = image_size + self.offload_video_to_cpu = offload_video_to_cpu + if not isinstance(img_mean, torch.Tensor): + img_mean = torch.tensor(img_mean, dtype=torch.float16)[:, None, None] + self.img_mean = img_mean + if not isinstance(img_std, torch.Tensor): + img_std = torch.tensor(img_std, dtype=torch.float16)[:, None, None] + self.img_std = img_std + + if gpu_acceleration: + self.img_mean = self.img_mean.to(f"cuda:{self.gpu_id}") + self.img_std = self.img_std.to(f"cuda:{self.gpu_id}") + decoder_option = {"device": f"cuda:{self.gpu_id}"} + else: + self.img_mean = self.img_mean.cpu() + self.img_std = self.img_std.cpu() + decoder_option = {"num_threads": 1} # use a single thread to save memory + + self.rank = int(os.environ.get("RANK", "0")) + self.world_size = int(os.environ.get("WORLD_SIZE", "1")) + self.async_reader = TorchCodecDecoder(video_path, **decoder_option) + + # `num_frames_from_content` is the true number of frames in the video content + # from the scan operation (rather than from the metadata, which could be wrong) + self.num_frames = self.async_reader.metadata.num_frames_from_content + self.video_height = self.async_reader.metadata.height + self.video_width = self.async_reader.metadata.width + + # items in `self._images` will be loaded asynchronously + self.images_loaded = [False] * self.num_frames + self.images = torch.zeros( + self.num_frames, + 3, + self.image_size, + self.image_size, + dtype=torch.float16, + device=self.out_device, + ) + # catch and raise any exceptions in the async loading thread + self.exception = None + self.use_rand_seek_in_loading = use_rand_seek_in_loading + self.rand_seek_idx_queue = queue.Queue() + # use a lock to avoid race condition between concurrent access to torchcodec + # libs (which are not thread-safe); the lock is replaced with a nullcontext + # when the video is fully loaded + self.torchcodec_access_lock = FIFOLock() + self._start_video_loading() + + def _load_one_frame(self, idx): + frame_resized = self._transform_frame(self.async_reader[idx]) + return frame_resized + + @torch.inference_mode() + def _start_video_loading(self): + desc = f"frame loading (TorchCodec w/ {'GPU' if self.gpu_acceleration else 'CPU'}) [rank={RANK}]" + pbar = tqdm(desc=desc, total=self.num_frames) + self.num_loaded_frames = 0 + # load the first frame synchronously to cache it before the session is opened + idx = self.num_loaded_frames + self.images[idx] = self._load_one_frame(idx) + self.images_loaded[idx] = True + self.num_loaded_frames += 1 + pbar.update(n=1) + self.all_frames_loaded = self.num_loaded_frames == self.num_frames + + # load the frames asynchronously without blocking the session start + def _load_frames(): + finished = self.all_frames_loaded + chunk_size = 16 + while not finished: + # asynchronously load `chunk_size` frames each time we acquire the lock + with self.torchcodec_access_lock, torch.inference_mode(): + for _ in range(chunk_size): + try: + idx = self.num_loaded_frames + self.images[idx] = self._load_one_frame(idx) + self.images_loaded[idx] = True + self.num_loaded_frames += 1 + pbar.update(n=1) + if self.num_loaded_frames >= self.num_frames: + finished = True + break + except Exception as e: + self.exception = e + raise + + # also read the frame that is being randomly seeked to + while True: + try: + idx = self.rand_seek_idx_queue.get_nowait() + if not self.images_loaded[idx]: + self.images[idx] = self._load_one_frame(idx) + self.images_loaded[idx] = True + except queue.Empty: + break + except Exception as e: + self.exception = e + raise + + # finished -- check whether we have loaded the total number of frames + if self.num_loaded_frames != self.num_frames: + raise RuntimeError( + f"There are {self.num_frames} frames in the video, but only " + f"{self.num_loaded_frames} frames can be loaded successfully." + ) + else: + self.all_frames_loaded = True + pbar.close() + with self.torchcodec_access_lock: + import gc + + # all frames have been loaded, so we can release the readers and free their memory + # also remove pbar and thread (which shouldn't be a part of session saving) + reader = self.async_reader + if reader is not None: + reader._source = None + self.async_reader = None + self.pbar = None + self.thread = None + self.rand_seek_idx_queue = None + gc.collect() + # remove the lock (replace it with nullcontext) when the video is fully loaded + self.torchcodec_access_lock = contextlib.nullcontext() + + self.thread = Thread(target=_load_frames, daemon=True) + self.thread.start() + + def _transform_frame(self, frame): + frame = frame.clone() # make a copy to avoid modifying the original frame bytes + frame = frame.float() # convert to float32 before interpolation + frame_resized = F.interpolate( + frame[None, :], + size=(self.image_size, self.image_size), + mode="bicubic", + align_corners=False, + )[0] + # float16 precision should be sufficient for image tensor storage + frame_resized = frame_resized.half() # uint8 -> float16 + frame_resized /= 255 + frame_resized -= self.img_mean + frame_resized /= self.img_std + if self.offload_video_to_cpu: + frame_resized = frame_resized.cpu() + elif frame_resized.device != self.out_device: + frame_resized = frame_resized.to(device=self.out_device, non_blocking=True) + return frame_resized + + def __getitem__(self, index): + if self.exception is not None: + raise RuntimeError("Failure in frame loading thread") from self.exception + + max_tries = 1200 + for _ in range(max_tries): + # use a lock to avoid race condition between concurrent access to torchcodec + # libs (which are not thread-safe); the lock is replaced with a nullcontext + # when the video is fully loaded + with self.torchcodec_access_lock: + if self.images_loaded[index]: + return self.images[index] + + if self.use_rand_seek_in_loading: + # async loading hasn't reached this frame yet, so we load this frame individually + # (it will be loaded by in _load_frames thread and added to self.images[index]) + self.rand_seek_idx_queue.put(index) + + time.sleep(0.1) + + raise RuntimeError(f"Failed to load frame {index} after {max_tries} tries") + + def __len__(self): + return len(self.images) + + def __getstate__(self): + """ + Remove a few attributes during pickling, so that this async video loader can be + saved and loaded as a part of the model session. + """ + # wait for async video loading to finish before pickling + async_thread = self.thread + if async_thread is not None: + async_thread.join() + # release a few objects that cannot be pickled + reader = self.async_reader + if reader is not None: + reader._source = None + self.async_reader = None + self.pbar = None + self.thread = None + self.rand_seek_idx_queue = None + self.torchcodec_access_lock = contextlib.nullcontext() + return self.__dict__.copy() diff --git a/source_code/sam3/sam3/model/memory.py b/source_code/sam3/sam3/model/memory.py new file mode 100644 index 0000000000000000000000000000000000000000..bfde5487d85006ab1aa35044fd431260dff2870e --- /dev/null +++ b/source_code/sam3/sam3/model/memory.py @@ -0,0 +1,201 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import math +from typing import Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F + +try: + from timm.layers import DropPath +except ModuleNotFoundError: + # compatibility for older timm versions + from timm.models.layers import DropPath + +from .model_misc import get_clones, LayerNorm2d + + +class SimpleMaskDownSampler(nn.Module): + """ + Progressively downsample a mask by total_stride, each time by stride. + Note that LayerNorm is applied per *token*, like in ViT. + + With each downsample (by a factor stride**2), channel capacity increases by the same factor. + In the end, we linearly project to embed_dim channels. + """ + + def __init__( + self, + embed_dim=256, + kernel_size=4, + stride=4, + padding=0, + total_stride=16, + activation=nn.GELU, + # Option to interpolate the input mask first before downsampling using convs. In that case, the total_stride is assumed to be after interpolation. + # If set to input resolution or None, we don't interpolate. We default to None to be safe (for older configs or if not explicitly set) + interpol_size=None, + ): + super().__init__() + num_layers = int(math.log2(total_stride) // math.log2(stride)) + assert stride**num_layers == total_stride + self.encoder = nn.Sequential() + mask_in_chans, mask_out_chans = 1, 1 + for _ in range(num_layers): + mask_out_chans = mask_in_chans * (stride**2) + self.encoder.append( + nn.Conv2d( + mask_in_chans, + mask_out_chans, + kernel_size=kernel_size, + stride=stride, + padding=padding, + ) + ) + self.encoder.append(LayerNorm2d(mask_out_chans)) + self.encoder.append(activation()) + mask_in_chans = mask_out_chans + + self.encoder.append(nn.Conv2d(mask_out_chans, embed_dim, kernel_size=1)) + self.interpol_size = interpol_size + if self.interpol_size is not None: + assert isinstance( + self.interpol_size, (list, tuple) + ), f"Unsupported type {type(self.interpol_size)}. Should be a list or tuple." + self.interpol_size = list(interpol_size) + assert len(self.interpol_size) == 2 + + def forward(self, x: torch.Tensor): + if self.interpol_size is not None and self.interpol_size != list(x.shape[-2:]): + x = F.interpolate( + x.float(), + size=self.interpol_size, + align_corners=False, + mode="bilinear", + antialias=True, + ) + return self.encoder(x) + + +# Lightly adapted from ConvNext (https://github.com/facebookresearch/ConvNeXt) +class CXBlock(nn.Module): + r"""ConvNeXt Block. There are two equivalent implementations: + (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W) + (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back + We use (2) as we find it slightly faster in PyTorch + + Args: + dim (int): Number of input channels. + drop_path (float): Stochastic depth rate. Default: 0.0 + layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6. + """ + + def __init__( + self, + dim, + kernel_size=7, + padding=3, + drop_path=0.0, + layer_scale_init_value=1e-6, + use_dwconv=True, + ): + super().__init__() + self.dwconv = nn.Conv2d( + dim, + dim, + kernel_size=kernel_size, + padding=padding, + groups=dim if use_dwconv else 1, + ) # depthwise conv + self.norm = LayerNorm2d(dim, eps=1e-6) + self.pwconv1 = nn.Linear( + dim, 4 * dim + ) # pointwise/1x1 convs, implemented with linear layers + self.act = nn.GELU() + self.pwconv2 = nn.Linear(4 * dim, dim) + self.gamma = ( + nn.Parameter(layer_scale_init_value * torch.ones((dim)), requires_grad=True) + if layer_scale_init_value > 0 + else None + ) + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + + def forward(self, x): + input = x + x = self.dwconv(x) + x = self.norm(x) + x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C) + x = self.pwconv1(x) + x = self.act(x) + x = self.pwconv2(x) + if self.gamma is not None: + x = self.gamma * x + x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W) + + x = input + self.drop_path(x) + return x + + +class SimpleFuser(nn.Module): + def __init__(self, layer, num_layers, dim=None, input_projection=False): + super().__init__() + self.proj = nn.Identity() + self.layers = get_clones(layer, num_layers) + + if input_projection: + assert dim is not None + self.proj = nn.Conv2d(dim, dim, kernel_size=1) + + def forward(self, x): + # normally x: (N, C, H, W) + x = self.proj(x) + for layer in self.layers: + x = layer(x) + return x + + +class SimpleMaskEncoder(nn.Module): + def __init__( + self, + out_dim, + mask_downsampler, + fuser, + position_encoding, + in_dim=256, # in_dim of pix_feats + ): + super().__init__() + + self.mask_downsampler = mask_downsampler + + self.pix_feat_proj = nn.Conv2d(in_dim, in_dim, kernel_size=1) + self.fuser = fuser + self.position_encoding = position_encoding + self.out_proj = nn.Identity() + if out_dim != in_dim: + self.out_proj = nn.Conv2d(in_dim, out_dim, kernel_size=1) + + def forward( + self, + pix_feat: torch.Tensor, + masks: torch.Tensor, + skip_mask_sigmoid: bool = False, + ) -> Tuple[torch.Tensor, torch.Tensor]: + ## Process masks + # sigmoid, so that less domain shift from gt masks which are bool + if not skip_mask_sigmoid: + masks = F.sigmoid(masks) + masks = self.mask_downsampler(masks) + + ## Fuse pix_feats and downsampled masks + # in case the visual features are on CPU, cast them to CUDA + pix_feat = pix_feat.to(masks.device) + + x = self.pix_feat_proj(pix_feat) + x = x + masks + x = self.fuser(x) + x = self.out_proj(x) + + pos = self.position_encoding(x).to(x.dtype) + + return {"vision_features": x, "vision_pos_enc": [pos]} diff --git a/source_code/sam3/sam3/model/position_encoding.py b/source_code/sam3/sam3/model/position_encoding.py new file mode 100644 index 0000000000000000000000000000000000000000..eb3f4055a7bae489de46368d9b94cf9d33595370 --- /dev/null +++ b/source_code/sam3/sam3/model/position_encoding.py @@ -0,0 +1,124 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import math +from typing import Optional + +import torch +from torch import nn + + +class PositionEmbeddingSine(nn.Module): + """ + This is a more standard version of the position embedding, very similar to the one + used by the Attention is all you need paper, generalized to work on images. + """ + + def __init__( + self, + num_pos_feats, + temperature: int = 10000, + normalize: bool = True, + scale: Optional[float] = None, + precompute_resolution: Optional[int] = None, + ): + super().__init__() + assert num_pos_feats % 2 == 0, "Expecting even model width" + self.num_pos_feats = num_pos_feats // 2 + self.temperature = temperature + self.normalize = normalize + if scale is not None and normalize is False: + raise ValueError("normalize should be True if scale is passed") + if scale is None: + scale = 2 * math.pi + self.scale = scale + + self.cache = {} + # Precompute positional encodings under `precompute_resolution` to fill the cache + # and avoid symbolic shape tracing errors in torch.compile in PyTorch 2.4 nightly. + if precompute_resolution is not None: + # We precompute pos enc for stride 4, 8, 16 and 32 to fill `self.cache`. + precompute_sizes = [ + (precompute_resolution // 4, precompute_resolution // 4), + (precompute_resolution // 8, precompute_resolution // 8), + (precompute_resolution // 16, precompute_resolution // 16), + (precompute_resolution // 32, precompute_resolution // 32), + ] + for size in precompute_sizes: + tensors = torch.zeros((1, 1) + size, device="cuda") + self.forward(tensors) + # further clone and detach it in the cache (just to be safe) + self.cache[size] = self.cache[size].clone().detach() + + def _encode_xy(self, x, y): + # The positions are expected to be normalized + assert len(x) == len(y) and x.ndim == y.ndim == 1 + x_embed = x * self.scale + y_embed = y * self.scale + + dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) + dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) + + pos_x = x_embed[:, None] / dim_t + pos_y = y_embed[:, None] / dim_t + pos_x = torch.stack( + (pos_x[:, 0::2].sin(), pos_x[:, 1::2].cos()), dim=2 + ).flatten(1) + pos_y = torch.stack( + (pos_y[:, 0::2].sin(), pos_y[:, 1::2].cos()), dim=2 + ).flatten(1) + return pos_x, pos_y + + @torch.no_grad() + def encode_boxes(self, x, y, w, h): + pos_x, pos_y = self._encode_xy(x, y) + pos = torch.cat((pos_y, pos_x, h[:, None], w[:, None]), dim=1) + return pos + + encode = encode_boxes # Backwards compatibility + + @torch.no_grad() + def encode_points(self, x, y, labels): + (bx, nx), (by, ny), (bl, nl) = x.shape, y.shape, labels.shape + assert bx == by and nx == ny and bx == bl and nx == nl + pos_x, pos_y = self._encode_xy(x.flatten(), y.flatten()) + pos_x, pos_y = pos_x.reshape(bx, nx, -1), pos_y.reshape(by, ny, -1) + pos = torch.cat((pos_y, pos_x, labels[:, :, None]), dim=2) + return pos + + @torch.no_grad() + def forward(self, x): + cache_key = None + cache_key = (x.shape[-2], x.shape[-1]) + if cache_key in self.cache: + return self.cache[cache_key][None].repeat(x.shape[0], 1, 1, 1) + y_embed = ( + torch.arange(1, x.shape[-2] + 1, dtype=torch.float32, device=x.device) + .view(1, -1, 1) + .repeat(x.shape[0], 1, x.shape[-1]) + ) + x_embed = ( + torch.arange(1, x.shape[-1] + 1, dtype=torch.float32, device=x.device) + .view(1, 1, -1) + .repeat(x.shape[0], x.shape[-2], 1) + ) + + if self.normalize: + eps = 1e-6 + y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale + + dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) + dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) + + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack( + (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4 + ).flatten(3) + pos_y = torch.stack( + (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4 + ).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + if cache_key is not None: + self.cache[cache_key] = pos[0] + return pos diff --git a/source_code/sam3/sam3/model/text_encoder_ve.py b/source_code/sam3/sam3/model/text_encoder_ve.py new file mode 100644 index 0000000000000000000000000000000000000000..b1cf145ca5eae383e0ecce8d7358570125eaadb3 --- /dev/null +++ b/source_code/sam3/sam3/model/text_encoder_ve.py @@ -0,0 +1,328 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +from collections import OrderedDict +from typing import Callable, List, Optional, Tuple, Union + +import torch +import torch.nn as nn +from torch.utils.checkpoint import checkpoint + +from .model_misc import LayerScale + + +class ResidualAttentionBlock(nn.Module): + def __init__( + self, + d_model: int, + n_head: int, + mlp_ratio: float = 4.0, + ls_init_value: Optional[float] = None, + act_layer: Callable[[], nn.Module] = nn.GELU, + norm_layer: Callable[[int], nn.Module] = nn.LayerNorm, + ): + super().__init__() + # Attention + self.attn = nn.MultiheadAttention(d_model, n_head, batch_first=True) + + # LayerNorm, LayerScale + self.ln_1 = norm_layer(d_model) + self.ln_2 = norm_layer(d_model) + + self.ls_1 = ( + LayerScale(d_model, ls_init_value) + if ls_init_value is not None + else nn.Identity() + ) + self.ls_2 = ( + LayerScale(d_model, ls_init_value) + if ls_init_value is not None + else nn.Identity() + ) + + # MLP + mlp_width = int(d_model * mlp_ratio) + self.mlp = nn.Sequential( + OrderedDict( + [ + ("c_fc", nn.Linear(d_model, mlp_width)), + ("gelu", act_layer()), + ("c_proj", nn.Linear(mlp_width, d_model)), + ] + ) + ) + + def attention( + self, + q_x: torch.Tensor, + k_x: Optional[torch.Tensor] = None, + v_x: Optional[torch.Tensor] = None, + attn_mask: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + k_x = k_x if k_x is not None else q_x + v_x = v_x if v_x is not None else q_x + if attn_mask is not None: + # Leave boolean masks as is + if not attn_mask.dtype == torch.bool: + attn_mask = attn_mask.to(q_x.dtype) + + return self.attn(q_x, k_x, v_x, need_weights=False, attn_mask=attn_mask)[0] + + def forward( + self, + q_x: torch.Tensor, + k_x: Optional[torch.Tensor] = None, + v_x: Optional[torch.Tensor] = None, + attn_mask: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + k_x = ( + self.ln_1_kv(k_x) if hasattr(self, "ln_1_kv") and k_x is not None else None + ) + v_x = ( + self.ln_1_kv(v_x) if hasattr(self, "ln_1_kv") and v_x is not None else None + ) + x = q_x + self.ls_1( + self.attention(q_x=self.ln_1(q_x), k_x=k_x, v_x=v_x, attn_mask=attn_mask) + ) + x = x + self.ls_2(self.mlp(self.ln_2(x))) + return x + + +class Transformer(nn.Module): + def __init__( + self, + width: int, + layers: int, + heads: int, + mlp_ratio: float = 4.0, + ls_init_value: Optional[float] = None, + act_layer: Callable[[], nn.Module] = nn.GELU, + norm_layer: Callable[[int], nn.Module] = nn.LayerNorm, + compile_mode: Optional[str] = None, + use_act_checkpoint: bool = False, + ): + super().__init__() + self.width = width + self.layers = layers + self.grad_checkpointing = use_act_checkpoint + self.resblocks = nn.ModuleList( + [ + ResidualAttentionBlock( + width, + heads, + mlp_ratio, + ls_init_value=ls_init_value, + act_layer=act_layer, + norm_layer=norm_layer, + ) + for _ in range(layers) + ] + ) + + if compile_mode is not None: + self.forward = torch.compile( + self.forward, mode=compile_mode, fullgraph=True + ) + if self.grad_checkpointing: + torch._dynamo.config.optimize_ddp = False + + def forward( + self, + x: torch.Tensor, + attn_mask: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + for _, r in enumerate(self.resblocks): + if ( + self.grad_checkpointing + and not torch.jit.is_scripting() + and self.training + ): + x = checkpoint(r, x, None, None, attn_mask, use_reentrant=False) + else: + x = r( + x, + attn_mask=attn_mask, + ) + return x + + +def text_global_pool( + x: torch.Tensor, text: Optional[torch.Tensor] = None, pool_type: str = "argmax" +) -> Tuple[torch.Tensor, torch.Tensor]: + if pool_type == "first": + pooled, tokens = x[:, 0], x[:, 1:] + elif pool_type == "last": + pooled, tokens = x[:, -1], x[:, :-1] + elif pool_type == "argmax": + # take features from the eot embedding (eot_token is the highest number in each sequence) + assert text is not None + pooled, tokens = x[torch.arange(x.shape[0]), text.argmax(dim=-1)], x + else: + pooled = tokens = x + return pooled, tokens + + +class TextTransformer(nn.Module): + def __init__( + self, + context_length: int = 77, + vocab_size: int = 49408, + width: int = 512, + heads: int = 8, + layers: int = 12, + mlp_ratio: float = 4.0, + ls_init_value: Optional[float] = None, + output_dim: int = 512, + no_causal_mask: bool = False, + pool_type: str = "none", # no pooling + proj_bias: bool = False, + act_layer: Callable = nn.GELU, + norm_layer: Callable = nn.LayerNorm, + output_tokens: bool = False, + use_ln_post: bool = True, + compile_mode: Optional[str] = None, + use_act_checkpoint: bool = False, + ): + super().__init__() + assert pool_type in ("first", "last", "argmax", "none") + self.output_tokens = output_tokens + self.num_pos = self.context_length = context_length + self.vocab_size = vocab_size + self.width = width + self.output_dim = output_dim + self.heads = heads + self.pool_type = pool_type + + self.token_embedding = nn.Embedding(self.vocab_size, width) + self.positional_embedding = nn.Parameter(torch.empty(self.num_pos, width)) + self.transformer = Transformer( + width=width, + layers=layers, + heads=heads, + mlp_ratio=mlp_ratio, + ls_init_value=ls_init_value, + act_layer=act_layer, + norm_layer=norm_layer, + compile_mode=compile_mode, + use_act_checkpoint=use_act_checkpoint, + ) + self.ln_final = norm_layer(width) if use_ln_post else nn.Identity() + if no_causal_mask: + self.attn_mask = None + else: + self.register_buffer( + "attn_mask", self.build_causal_mask(), persistent=False + ) + if proj_bias: + self.text_projection = nn.Linear(width, output_dim) + else: + self.text_projection = nn.Parameter(torch.empty(width, output_dim)) + + def build_causal_mask(self) -> torch.Tensor: + # lazily create causal attention mask, with full attention between the tokens + # pytorch uses additive attention mask; fill with -inf + mask = torch.empty(self.num_pos, self.num_pos) + mask.fill_(float("-inf")) + mask.triu_(1) # zero out the lower diagonal + return mask + + def forward( + self, text: torch.Tensor + ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: + seq_len = text.shape[1] + x = self.token_embedding(text) # [batch_size, n_ctx, d_model] + + attn_mask = self.attn_mask + if attn_mask is not None: + attn_mask = attn_mask[:seq_len, :seq_len] + + x = x + self.positional_embedding[:seq_len] + x = self.transformer(x, attn_mask=attn_mask) + + x = self.ln_final(x) + pooled, tokens = text_global_pool(x, text, pool_type=self.pool_type) + if self.text_projection is not None: + if isinstance(self.text_projection, nn.Linear): + pooled = self.text_projection(pooled) + else: + pooled = pooled @ self.text_projection + if self.output_tokens: + return pooled, tokens + return pooled + + +class VETextEncoder(nn.Module): + def __init__( + self, + d_model: int, + tokenizer: Callable, + width: int = 1024, + heads: int = 16, + layers: int = 24, + context_length: int = 32, + vocab_size: int = 49408, + use_ln_post: bool = True, + compile_mode: Optional[str] = None, + use_act_checkpoint: bool = True, + ): + super().__init__() + self.context_length = context_length + self.use_ln_post = use_ln_post + self.tokenizer = tokenizer + + self.encoder = TextTransformer( + context_length=self.context_length, + vocab_size=vocab_size, + width=width, + heads=heads, + layers=layers, + # we want the tokens, not just the pooled output + output_tokens=True, + use_ln_post=use_ln_post, + compile_mode=compile_mode, + use_act_checkpoint=use_act_checkpoint, + ) + self.resizer = nn.Linear(self.encoder.width, d_model) + + def forward( + self, + text: Union[List[str], Tuple[torch.Tensor, torch.Tensor, dict]], + input_boxes: Optional[List] = None, + device: torch.device = None, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + if isinstance(text[0], str): + # no use case for this + assert input_boxes is None or len(input_boxes) == 0, "not supported" + + # Encode the text + tokenized = self.tokenizer(text, context_length=self.context_length).to( + device + ) # [b, seq_len] + text_attention_mask = (tokenized != 0).bool() + + # manually embed the tokens + inputs_embeds = self.encoder.token_embedding( + tokenized + ) # [b, seq_len, d=1024] + _, text_memory = self.encoder(tokenized) # [b, seq_len, d=1024] + + assert text_memory.shape[1] == inputs_embeds.shape[1] + # Invert attention mask because its the opposite in pytorch transformer + text_attention_mask = text_attention_mask.ne(1) + # Transpose memory because pytorch's attention expects sequence first + text_memory = text_memory.transpose(0, 1) + # Resize the encoder hidden states to be of the same d_model as the decoder + text_memory_resized = self.resizer(text_memory) + else: + # The text is already encoded, use as is. + text_attention_mask, text_memory_resized, tokenized = text + inputs_embeds = tokenized["inputs_embeds"] + assert ( + input_boxes is None or len(input_boxes) == 0 + ), "Can't replace boxes in text if it's already encoded" + + # Note that the input_embeds are returned in pytorch's convention (sequence first) + return ( + text_attention_mask, + text_memory_resized, + inputs_embeds.transpose(0, 1), + ) diff --git a/source_code/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_sa1b_nps.yaml b/source_code/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_sa1b_nps.yaml new file mode 100644 index 0000000000000000000000000000000000000000..52c87ee30545d24502160e7e8e3a565ce8d83bf2 --- /dev/null +++ b/source_code/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_sa1b_nps.yaml @@ -0,0 +1,66 @@ +# @package _global_ +defaults: + - /configs/eval_base.yaml + - _self_ + +# ============================================================================ +# Paths Configuration (you can override here, but it shouldn't require further changes if eval_base.yaml is correct +# ============================================================================ +paths: + experiment_log_dir: ${paths.base_experiment_log_dir}/gold_sa1b_nps/ + coco_gt: ${paths.base_annotation_path}/gold_sa1b_merged_a_release_test.json + coco_gts: + - ${paths.base_annotation_path}/gold_sa1b_merged_a_release_test.json + - ${paths.base_annotation_path}/gold_sa1b_merged_b_release_test.json + - ${paths.base_annotation_path}/gold_sa1b_merged_c_release_test.json + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + data: + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_EVAL_API_FROM_JSON_NP + _partial_: true + img_folder: ${paths.sa1b_img_path} + ann_file: ${paths.coco_gt} + transforms: ${scratch.base_val_transform} + max_ann_per_img: 100000 + multiplier: 1 + training: false + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: False + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: gold_sa1b_nps + + meters: + val: + gold_sa1b_nps: # this key matches the "dict_key" in the dataloader's collate function + cgf1: + _target_: sam3.eval.coco_writer.PredictionDumper + iou_type: "segm" + dump_dir: ${launcher.experiment_log_dir}/dumps/gold_sa1b_nps + merge_predictions: True + postprocessor: ${scratch.mask_postprocessor_thresholded} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + maxdets: 1000000 # no limit + pred_file_evaluators: + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gts} + iou_type: "bbox" + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gts} + iou_type: "segm" diff --git a/source_code/sam3/sam3/train/configs/odinw13/odinw_text_and_visual.yaml b/source_code/sam3/sam3/train/configs/odinw13/odinw_text_and_visual.yaml new file mode 100644 index 0000000000000000000000000000000000000000..51e93b457c471c861fc6a3a4fd65ced25119aca1 --- /dev/null +++ b/source_code/sam3/sam3/train/configs/odinw13/odinw_text_and_visual.yaml @@ -0,0 +1,255 @@ +# @package _global_ +defaults: + - _self_ + +# ============================================================================ +# Paths Configuration (Chage this to your own paths) +# ============================================================================ +# python sam3/train/train.py -c configs/odinw_text_only.yaml --use-cluster 1 --partition ${PARTITION} --account ${ACCOUNT} --qos ${QoS} + +paths: + odinw_data_root: + experiment_log_dir: + bpe_path: # This should be under assets/bpe_simple_vocab_16e6.txt.gz + +supercategory_tuple: ${all_odinw_supercategories.${string:${submitit.job_array.task_index}}} +# Validation transforms pipeline +val_transforms: + - _target_: sam3.train.transforms.basic_for_api.ComposeAPI + transforms: + - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI + sizes: ${scratch.resolution} + max_size: + _target_: sam3.train.transforms.basic.get_random_resize_max_size + size: ${scratch.resolution} + square: true + consistent_transform: False + - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI + - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI + mean: ${scratch.val_norm_mean} + std: ${scratch.val_norm_std} + - _target_: sam3.train.transforms.filter_query_transforms.TextQueryToVisual + keep_text_queries: true # Note: set this to false if you only want visual + probability: 1.0 # always + +# ============================================================================ +# Different helper parameters and functions +# ============================================================================ +scratch: + enable_segmentation: True + # Box processing + use_presence_eval: True + original_box_postprocessor: + _target_: sam3.eval.postprocessors.PostProcessImage + max_dets_per_img: -1 # infinite detections + use_original_ids: true + use_original_sizes_box: true + use_presence: ${scratch.use_presence_eval} + + # Image processing parameters + resolution: 1008 + # Normalization parameters + val_norm_mean: [0.5, 0.5, 0.5] + val_norm_std: [0.5, 0.5, 0.5] + + # Training parameters + val_batch_size: 2 + num_val_workers: 0 + gather_pred_via_filesys: false + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + _target_: sam3.train.trainer.Trainer + skip_saving_ckpts: true + empty_gpu_mem_cache_after_eval: True + max_epochs: 1 + accelerator: cuda + seed_value: 123 + mode: val + + distributed: + backend: nccl + find_unused_parameters: True + gradient_as_bucket_view: True + + loss: + default: + _target_: sam3.train.loss.sam3_loss.DummyLoss + + data: + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.COCO_FROM_JSON + prompts: ${odinw35_prompts.${supercategory_tuple.name}} + include_negatives: true + category_chunk_size: 20 # Note: Since we are doing AP +ve we need to include all categories! + _partial_: true + img_folder: ${paths.odinw_data_root}/${supercategory_tuple.val.img_folder} + ann_file: + _target_: sam3.eval.coco_reindex.reindex_coco_to_temp + input_json_path: ${paths.odinw_data_root}/${supercategory_tuple.val.json} + transforms: ${val_transforms} + max_ann_per_img: 100000 + multiplier: 1 + training: false + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: False + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: 1 + dict_key: odinw35 + + model: + _target_: sam3.model_builder.build_sam3_image_model + bpe_path: ${paths.bpe_path} + device: cpus + eval_mode: true # Set to false if training + enable_segmentation: ${scratch.enable_segmentation} # Warning: Enable this if using segmentation. + + meters: + val: + odinw35: + detection: + _target_: sam3.eval.coco_writer.PredictionDumper + iou_type: "bbox" + dump_dir: ${launcher.experiment_log_dir}/dumps/roboflow/${supercategory_tuple.name} + merge_predictions: True + postprocessor: ${scratch.original_box_postprocessor} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + maxdets: 100 + pred_file_evaluators: + - _target_: sam3.eval.coco_eval_offline.CocoEvaluatorOfflineWithPredFileEvaluators + gt_path: + _target_: sam3.eval.coco_reindex.reindex_coco_to_temp + input_json_path: ${paths.odinw_data_root}/${supercategory_tuple.val.json} + tide: False + iou_type: "bbox" + positive_split: true + + checkpoint: + save_dir: ${launcher.experiment_log_dir}/checkpoints + save_freq: 0 # 0 only last checkpoint is saved. + + + logging: + tensorboard_writer: + _target_: sam3.train.utils.logger.make_tensorboard_logger + log_dir: ${launcher.experiment_log_dir}/tensorboard + flush_secs: 120 + should_log: True + wandb_writer: null + log_dir: ${launcher.experiment_log_dir}/logs/${supercategory_tuple.name} + log_freq: 10 + +# ============================================================================ +# Launcher and Submitit Configuration +# ============================================================================ + +launcher: + num_nodes: 1 + gpus_per_node: 2 + experiment_log_dir: ${paths.experiment_log_dir} + multiprocessing_context: forkserver + +submitit: + account: null + partition: null + qos: null + timeout_hour: 72 + use_cluster: True + cpus_per_task: 10 + port_range: [10000, 65000] + constraint: null + + job_array: + num_tasks: 13 + task_index: 0 + +# ============================================================================ +# ODinW13 Supercategories +# ============================================================================ + +all_odinw_supercategories: + - name: AerialMaritimeDrone_large + val: + img_folder: AerialMaritimeDrone/large/test/ + json: AerialMaritimeDrone/large/test/annotations_without_background.json + - name: Aquarium + val: + img_folder: Aquarium/Aquarium Combined.v2-raw-1024.coco/test/ + json: Aquarium/Aquarium Combined.v2-raw-1024.coco/test/annotations_without_background.json + - name: CottontailRabbits + val: + img_folder: CottontailRabbits/test/ + json: CottontailRabbits/test/annotations_without_background.json + - name: EgoHands_generic + val: + img_folder: EgoHands/generic/test/ + json: EgoHands/generic/test/annotations_without_background.json + - name: NorthAmericaMushrooms + val: + img_folder: NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/test/ + json: NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/test/annotations_without_background.json + - name: Packages + val: + img_folder: Packages/Raw/test/ + json: Packages/Raw/test/annotations_without_background.json + - name: PascalVOC + val: + img_folder: PascalVOC/valid/ + json: PascalVOC/valid/annotations_without_background.json + - name: Raccoon + val: + img_folder: Raccoon/Raccoon.v2-raw.coco/test/ + json: Raccoon/Raccoon.v2-raw.coco/test/annotations_without_background.json + - name: ShellfishOpenImages + val: + img_folder: ShellfishOpenImages/raw/test/ + json: ShellfishOpenImages/raw/test/annotations_without_background.json + - name: VehiclesOpenImages + val: + img_folder: VehiclesOpenImages/416x416/test/ + json: VehiclesOpenImages/416x416/test/annotations_without_background.json + - name: pistols + val: + img_folder: pistols/export/ + json: pistols/export/test_annotations_without_background.json + - name: pothole + val: + img_folder: pothole/test/ + json: pothole/test/annotations_without_background.json + - name: thermalDogsAndPeople + val: + img_folder: thermalDogsAndPeople/test/ + json: thermalDogsAndPeople/test/annotations_without_background.json + + +odinw35_prompts: + AerialMaritimeDrone_large: '[{"id": 1, "name": "boat", "supercategory": "movable-objects"}, + {"id": 2, "name": "car", "supercategory": "movable-objects"}, {"id": 3, "name": "dock", + "supercategory": "movable-objects"}, {"id": 4, "name": "jet ski", "supercategory": "movable-objects"}, + {"id": 5, "name": "boat lift", "supercategory": "movable-objects"}]' + Aquarium: null + CottontailRabbits: null + EgoHands_generic: null + NorthAmericaMushrooms: '[{''id'': 1, ''name'': + ''chicken of the woods'', ''supercategory'': ''mushroom''}, {''id'': 2, ''name'': ''chanterelle'', ''supercategory'': ''mushroom''}]' + Packages: null + PascalVOC: null + Raccoon: null + ShellfishOpenImages: null + VehiclesOpenImages: null + pistols: null + pothole: null + thermalDogsAndPeople: null diff --git a/source_code/sam3/sam3/train/configs/odinw13/odinw_text_only_train.yaml b/source_code/sam3/sam3/train/configs/odinw13/odinw_text_only_train.yaml new file mode 100644 index 0000000000000000000000000000000000000000..eb03cdf908df88e95c74742e68cb7f243db1ebe1 --- /dev/null +++ b/source_code/sam3/sam3/train/configs/odinw13/odinw_text_only_train.yaml @@ -0,0 +1,591 @@ +# @package _global_ +defaults: + - _self_ + +# ============================================================================ +# Paths Configuration (Chage this to your own paths) +# ============================================================================ +# python sam3/train/train.py -c configs/odinw_text_only.yaml --use-cluster 1 --partition ${PARTITION} --account ${ACCOUNT} --qos ${QoS} + +paths: + odinw_data_root: + experiment_log_dir: + bpe_path: # This should be under assets/bpe_simple_vocab_16e6.txt.gz + + +odinw_train: + train_file: fewshot_train_shot10_seed300 + num_images: null + supercategory_tuple: ${all_odinw_supercategories.${string:${submitit.job_array.task_index}}} + # Training transforms pipeline + train_transforms: + - _target_: sam3.train.transforms.basic_for_api.ComposeAPI + transforms: + - _target_: sam3.train.transforms.filter_query_transforms.FlexibleFilterFindGetQueries + query_filter: + _target_: sam3.train.transforms.filter_query_transforms.FilterCrowds + - _target_: sam3.train.transforms.point_sampling.RandomizeInputBbox + box_noise_std: 0.1 + box_noise_max: 20 + - _target_: sam3.train.transforms.segmentation.DecodeRle + - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI + sizes: + _target_: sam3.train.transforms.basic.get_random_resize_scales + size: ${scratch.resolution} + min_size: 480 + rounded: false + max_size: + _target_: sam3.train.transforms.basic.get_random_resize_max_size + size: ${scratch.resolution} + square: true + consistent_transform: ${scratch.consistent_transform} + - _target_: sam3.train.transforms.basic_for_api.PadToSizeAPI + size: ${scratch.resolution} + consistent_transform: ${scratch.consistent_transform} + - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI + - _target_: sam3.train.transforms.filter_query_transforms.FlexibleFilterFindGetQueries + query_filter: + _target_: sam3.train.transforms.filter_query_transforms.FilterEmptyTargets + - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI + mean: ${scratch.train_norm_mean} + std: ${scratch.train_norm_std} + - _target_: sam3.train.transforms.filter_query_transforms.FlexibleFilterFindGetQueries + query_filter: + _target_: sam3.train.transforms.filter_query_transforms.FilterEmptyTargets + - _target_: sam3.train.transforms.filter_query_transforms.FlexibleFilterFindGetQueries + query_filter: + _target_: sam3.train.transforms.filter_query_transforms.FilterFindQueriesWithTooManyOut + max_num_objects: ${scratch.max_ann_per_img} + + # Validation transforms pipeline + val_transforms: + - _target_: sam3.train.transforms.basic_for_api.ComposeAPI + transforms: + - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI + sizes: ${scratch.resolution} + max_size: + _target_: sam3.train.transforms.basic.get_random_resize_max_size + size: ${scratch.resolution} + square: true + consistent_transform: False + - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI + - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI + mean: ${scratch.val_norm_mean} + std: ${scratch.val_norm_std} + + # loss config (no mask loss) + loss: + _target_: sam3.train.loss.sam3_loss.Sam3LossWrapper + matcher: ${scratch.matcher} + o2m_weight: 2.0 + o2m_matcher: + _target_: sam3.train.matcher.BinaryOneToManyMatcher + alpha: 0.3 + threshold: 0.4 + topk: 4 + use_o2m_matcher_on_o2m_aux: ${scratch.use_o2m_matcher_on_o2m_aux} + loss_fns_find: + - _target_: sam3.train.loss.loss_fns.Boxes + weight_dict: + loss_bbox: 5.0 + loss_giou: 2.0 + - _target_: sam3.train.loss.loss_fns.IABCEMdetr + weak_loss: False + weight_dict: + loss_ce: ${scratch.loss_ce_weight} # Change + presence_loss: ${scratch.presence_weight} # Change + pos_weight: ${scratch.iabce_pos_weight} + alpha: ${scratch.iabce_alpha} + gamma: 2 + use_presence: True # Change + pos_focal: ${scratch.iabce_pos_focal} + pad_n_queries: ${scratch.num_queries} + pad_scale_pos: ${scratch.instance_query_loss_pad_scale_pos} + + loss_fn_semantic_seg: null + scale_by_find_batch_size: ${scratch.scale_by_find_batch_size} + + +# ============================================================================ +# Different helper parameters and functions +# ============================================================================ +scratch: + enable_segmentation: False + use_act_checkpoint_geo_encoder: True + input_geometry_encoder: + _target_: sam3.model.geometry_encoders.SequenceGeometryEncoder + pos_enc: ${scratch.pos_embed} + encode_boxes_as_points: False + points_direct_project: True + points_pool: True + points_pos_enc: True + boxes_direct_project: True + boxes_pool: True + boxes_pos_enc: True + d_model: ${scratch.d_model} + num_layers: 3 + use_act_ckpt: ${scratch.use_act_checkpoint_geo_encoder} + layer: + _target_: sam3.model.encoder.TransformerEncoderLayer + activation: "relu" + d_model: ${scratch.d_model} + dim_feedforward: 2048 + dropout: ${scratch.encoder_dropout} + pos_enc_at_attn: false + pre_norm: True + pos_enc_at_cross_attn_queries: false + pos_enc_at_cross_attn_keys: true + self_attention: + _target_: sam3.model.attention.MultiheadAttention + attn_type: Vanilla + num_heads: 8 + dropout: ${scratch.encoder_dropout} + embed_dim: ${scratch.d_model} + batch_first: False + cross_attention: + _target_: sam3.model.attention.MultiheadAttention + attn_type: Vanilla + num_heads: 8 + dropout: ${scratch.encoder_dropout} + embed_dim: ${scratch.d_model} + batch_first: False + add_cls: true + add_post_encode_proj: True + + boxRPB: "log" + dac: True + use_early_fusion: true + o2m_mask: false + num_feature_levels: 1 # > 1 not implemented + encoder_dropout: 0.1 + decoder_dropout: 0.1 + + tokenizer_ve: + _target_: sam3.model.tokenizer_ve.SimpleTokenizer + bpe_path: ${paths.bpe_path} + + + freeze_text_tower: False + freeze_image_tower: NoFreeze + vis_backbone_dp: 0.0 + # Activation checkpointing (Save memory) + use_act_checkpoint_vision_backbone: True + use_act_checkpoint_text_backbone: True + use_act_checkpoint_encoder: True + use_act_checkpoint_decoder: True + + loss: null + # Loss parameters + num_queries: 200 + presence_weight: 20.0 + loss_ce_weight: 20.0 + iabce_pos_weight: 5.0 + iabce_pos_focal: false + iabce_alpha: 0.25 + instance_query_loss_pad_scale_pos: 1.0 + use_o2m_matcher_on_o2m_aux: false + + # Model parameters + use_instance_query: true + d_model: 256 + pos_embed: + _target_: sam3.model.position_encoding.PositionEmbeddingSine + num_pos_feats: ${scratch.d_model} + normalize: true + scale: null + temperature: 10000 + + # Box processing + use_presence_eval: True + original_box_postprocessor: + _target_: sam3.eval.postprocessors.PostProcessImage + max_dets_per_img: -1 # infinite detections + use_original_ids: true + use_original_sizes_box: true + use_presence: ${scratch.use_presence_eval} + + + # Matcher configuration + matcher: + _target_: sam3.train.matcher.BinaryHungarianMatcherV2 + focal: true + cost_class: 2.0 + cost_bbox: 5.0 + cost_giou: 2.0 + alpha: 0.25 + gamma: 2 + stable: False + scale_by_find_batch_size: True + + # Image processing parameters + resolution: 1008 + consistent_transform: False + max_ann_per_img: 200 + + # Normalization parameters + train_norm_mean: [0.5, 0.5, 0.5] + train_norm_std: [0.5, 0.5, 0.5] + val_norm_mean: [0.5, 0.5, 0.5] + val_norm_std: [0.5, 0.5, 0.5] + + # Training parameters + train_batch_size: 1 + val_batch_size: 1 + num_train_workers: 0 + num_val_workers: 0 + max_data_epochs: 40 + target_epoch_size: 1500 + hybrid_repeats: 1 + context_length: 2 + gather_pred_via_filesys: false + + # Learning rate and scheduler parameters + lr_scale: 0.1 + lr_transformer: ${times:8e-4,${scratch.lr_scale}} + lr_vision_backbone: ${times:2.5e-4,${scratch.lr_scale}} + lr_language_backbone: ${times:5e-5,${scratch.lr_scale}} + lrd_vision_backbone: 0.9 + wd: 0.1 + scheduler_timescale: 20 + scheduler_warmup: 20 + scheduler_cooldown: 20 + + + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + _target_: sam3.train.trainer.Trainer + skip_saving_ckpts: true + # _target_: sam3.train.trainer.Trainer + # skip_saving_ckpts: true + empty_gpu_mem_cache_after_eval: True + skip_first_val: True + max_epochs: ${scratch.max_data_epochs} + accelerator: cuda + seed_value: 123 + val_epoch_freq: 10 + mode: train + + distributed: + backend: nccl + find_unused_parameters: True + gradient_as_bucket_view: True + + loss: + all: ${odinw_train.loss} + default: + _target_: sam3.train.loss.sam3_loss.DummyLoss + + data: + train: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + limit_ids: ${odinw_train.num_images} + transforms: ${odinw_train.train_transforms} + load_segmentation: ${scratch.enable_segmentation} + max_ann_per_img: 500000 + multiplier: 1 + max_train_queries: 50000 + max_val_queries: 50000 + training: true + use_caching: False + img_folder: ${paths.odinw_data_root}/${odinw_train.supercategory_tuple.train.img_folder} + ann_file: + _target_: sam3.eval.coco_reindex.reindex_coco_to_temp + input_json_path: ${paths.odinw_data_root}/${odinw_train.supercategory_tuple.train.json} + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.COCO_FROM_JSON + prompts: ${odinw35_prompts.${odinw_train.supercategory_tuple.name}} #${odinw_train.supercategory_tuple.name) + _partial_: true + shuffle: True + batch_size: ${scratch.train_batch_size} + num_workers: ${scratch.num_train_workers} + pin_memory: False + drop_last: True + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: all + with_seg_masks: ${scratch.enable_segmentation} + + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + load_segmentation: ${scratch.enable_segmentation} + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.COCO_FROM_JSON + prompts: ${odinw35_prompts.${odinw_train.supercategory_tuple.name}} + include_negatives: true + category_chunk_size: 20 # Note: Since we are doing AP +ve we need to include all categories! + _partial_: true + img_folder: ${paths.odinw_data_root}/${odinw_train.supercategory_tuple.val.img_folder} + ann_file: + _target_: sam3.eval.coco_reindex.reindex_coco_to_temp + input_json_path: ${paths.odinw_data_root}/${odinw_train.supercategory_tuple.val.json} + transforms: ${odinw_train.val_transforms} + max_ann_per_img: 100000 + multiplier: 1 + training: false + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: False + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: 1 + dict_key: odinw35 + with_seg_masks: ${scratch.enable_segmentation} + + model: + _target_: sam3.model_builder.build_sam3_image_model + bpe_path: ${paths.bpe_path} + device: cpus + eval_mode: false # Set to false if training + enable_segmentation: ${scratch.enable_segmentation} # Warning: Enable this if using segmentation. + + meters: + val: + odinw35: + detection: + _target_: sam3.eval.coco_writer.PredictionDumper + iou_type: "bbox" + dump_dir: ${launcher.experiment_log_dir}/dumps/odinw/${odinw_train.supercategory_tuple.name} + merge_predictions: True + postprocessor: ${scratch.original_box_postprocessor} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + maxdets: 100 + pred_file_evaluators: + - _target_: sam3.eval.coco_eval_offline.CocoEvaluatorOfflineWithPredFileEvaluators + gt_path: + _target_: sam3.eval.coco_reindex.reindex_coco_to_temp + input_json_path: ${paths.odinw_data_root}/${odinw_train.supercategory_tuple.val.json} + tide: False + iou_type: "bbox" + positive_split: False + + optim: + amp: + enabled: True + amp_dtype: bfloat16 + + optimizer: + _target_: torch.optim.AdamW + + gradient_clip: + _target_: sam3.train.optim.optimizer.GradientClipper + max_norm: 0.1 + norm_type: 2 + + param_group_modifiers: + - _target_: sam3.train.optim.optimizer.layer_decay_param_modifier + _partial_: True + layer_decay_value: ${scratch.lrd_vision_backbone} + apply_to: 'backbone.vision_backbone.trunk' + overrides: + - pattern: '*pos_embed*' + value: 1.0 + + options: + lr: + - scheduler: # transformer and class_embed + _target_: sam3.train.optim.schedulers.InverseSquareRootParamScheduler + base_lr: ${scratch.lr_transformer} + timescale: ${scratch.scheduler_timescale} + warmup_steps: ${scratch.scheduler_warmup} + cooldown_steps: ${scratch.scheduler_cooldown} + - scheduler: + _target_: sam3.train.optim.schedulers.InverseSquareRootParamScheduler + base_lr: ${scratch.lr_vision_backbone} + timescale: ${scratch.scheduler_timescale} + warmup_steps: ${scratch.scheduler_warmup} + cooldown_steps: ${scratch.scheduler_cooldown} + param_names: + - 'backbone.vision_backbone.*' + - scheduler: + _target_: sam3.train.optim.schedulers.InverseSquareRootParamScheduler + base_lr: ${scratch.lr_language_backbone} + timescale: ${scratch.scheduler_timescale} + warmup_steps: ${scratch.scheduler_warmup} + cooldown_steps: ${scratch.scheduler_cooldown} + param_names: + - 'backbone.language_backbone.*' + + weight_decay: + - scheduler: + _target_: fvcore.common.param_scheduler.ConstantParamScheduler + value: ${scratch.wd} + - scheduler: + _target_: fvcore.common.param_scheduler.ConstantParamScheduler + value: 0.0 + param_names: + - '*bias*' + module_cls_names: ['torch.nn.LayerNorm'] + + checkpoint: + save_dir: ${launcher.experiment_log_dir}/checkpoints + save_freq: 0 # 0 only last checkpoint is saved. + + + logging: + tensorboard_writer: + _target_: sam3.train.utils.logger.make_tensorboard_logger + log_dir: ${launcher.experiment_log_dir}/tensorboard + flush_secs: 120 + should_log: True + wandb_writer: null + log_dir: ${launcher.experiment_log_dir}/logs/${odinw_train.supercategory_tuple.name} + log_freq: 10 + +# ============================================================================ +# Launcher and Submitit Configuration +# ============================================================================ + +launcher: + num_nodes: 1 + gpus_per_node: 2 + experiment_log_dir: null #${paths.experiment_log_dir} + multiprocessing_context: forkserver + +submitit: + account: null + partition: null + qos: null + timeout_hour: 72 + use_cluster: True + cpus_per_task: 10 + port_range: [10000, 65000] + constraint: null + + # task_index: 2 + # Uncomment for job array configuration + job_array: + num_tasks: 13 + task_index: 0 + + +# ============================================================================ +# ODinW13 Supercategories +# ============================================================================ + +all_odinw_supercategories: + - name: AerialMaritimeDrone_large + val: + img_folder: AerialMaritimeDrone/large/test/ + json: AerialMaritimeDrone/large/test/annotations_without_background.json + train: + img_folder: AerialMaritimeDrone/large/train/ + json: AerialMaritimeDrone/large/train/${odinw_train.train_file}.json + - name: Aquarium + val: + img_folder: Aquarium/Aquarium Combined.v2-raw-1024.coco/test/ + json: Aquarium/Aquarium Combined.v2-raw-1024.coco/test/annotations_without_background.json + train: + img_folder: Aquarium/Aquarium Combined.v2-raw-1024.coco/train/ + json: Aquarium/Aquarium Combined.v2-raw-1024.coco/train/${odinw_train.train_file}.json + - name: CottontailRabbits + val: + img_folder: CottontailRabbits/test/ + json: CottontailRabbits/test/annotations_without_background.json + train: + img_folder: CottontailRabbits/train/ + json: CottontailRabbits/train/${odinw_train.train_file}.json + - name: EgoHands_generic + val: + img_folder: EgoHands/generic/test/ + json: EgoHands/generic/test/annotations_without_background.json + train: + img_folder: EgoHands/generic/train/ + json: EgoHands/generic/train/${odinw_train.train_file}.json + - name: NorthAmericaMushrooms + val: + img_folder: NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/test/ + json: NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/test/annotations_without_background.json + train: + img_folder: NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/train/ + json: NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/train/${odinw_train.train_file}.json + - name: Packages + val: + img_folder: Packages/Raw/test/ + json: Packages/Raw/test/annotations_without_background.json + train: + img_folder: Packages/Raw/train/ + json: Packages/Raw/train/${odinw_train.train_file}.json + - name: PascalVOC + val: + img_folder: PascalVOC/valid/ + json: PascalVOC/valid/annotations_without_background.json + train: + img_folder: PascalVOC/train/ + json: PascalVOC/train/${odinw_train.train_file}.json + - name: Raccoon + val: + img_folder: Raccoon/Raccoon.v2-raw.coco/test/ + json: Raccoon/Raccoon.v2-raw.coco/test/annotations_without_background.json + train: + img_folder: Raccoon/Raccoon.v2-raw.coco/train/ + json: Raccoon/Raccoon.v2-raw.coco/train/${odinw_train.train_file}.json + - name: ShellfishOpenImages + val: + img_folder: ShellfishOpenImages/raw/test/ + json: ShellfishOpenImages/raw/test/annotations_without_background.json + train: + img_folder: ShellfishOpenImages/raw/train/ + json: ShellfishOpenImages/raw/train/${odinw_train.train_file}.json + - name: VehiclesOpenImages + val: + img_folder: VehiclesOpenImages/416x416/test/ + json: VehiclesOpenImages/416x416/test/annotations_without_background.json + train: + img_folder: VehiclesOpenImages/416x416/train/ + json: VehiclesOpenImages/416x416/train/${odinw_train.train_file}.json + - name: pistols + val: + img_folder: pistols/export/ + json: pistols/export/test_annotations_without_background.json + train: + img_folder: pistols/export/ + json: pistols/export/${odinw_train.train_file}.json + - name: pothole + val: + img_folder: pothole/test/ + json: pothole/test/annotations_without_background.json + train: + img_folder: pothole/train/ + json: pothole/train/${odinw_train.train_file}.json + - name: thermalDogsAndPeople + val: + img_folder: thermalDogsAndPeople/test/ + json: thermalDogsAndPeople/test/annotations_without_background.json + train: + img_folder: thermalDogsAndPeople/train/ + json: thermalDogsAndPeople/train/${odinw_train.train_file}.json + + +odinw35_prompts: + AerialMaritimeDrone_large: '[{"id": 1, "name": "boat", "supercategory": "movable-objects"}, + {"id": 2, "name": "car", "supercategory": "movable-objects"}, {"id": 3, "name": "dock", + "supercategory": "movable-objects"}, {"id": 4, "name": "jet ski", "supercategory": "movable-objects"}, + {"id": 5, "name": "boat lift", "supercategory": "movable-objects"}]' + Aquarium: null + CottontailRabbits: null + EgoHands_generic: null + NorthAmericaMushrooms: '[{''id'': 1, ''name'': + ''chicken of the woods'', ''supercategory'': ''mushroom''}, {''id'': 2, ''name'': ''chanterelle'', ''supercategory'': ''mushroom''}]' + Packages: null + PascalVOC: null + Raccoon: null + ShellfishOpenImages: null + VehiclesOpenImages: null + pistols: null + pothole: null + thermalDogsAndPeople: null diff --git a/source_code/sam3/sam3/train/configs/saco_video_evals/saco_veval_smartglasses_val.yaml b/source_code/sam3/sam3/train/configs/saco_video_evals/saco_veval_smartglasses_val.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a9bffda589381a9785eecf5cef4b3958a7840816 --- /dev/null +++ b/source_code/sam3/sam3/train/configs/saco_video_evals/saco_veval_smartglasses_val.yaml @@ -0,0 +1,174 @@ +# @package _global_ +defaults: + - _self_ + +# ============================================================================ +# Paths Configuration (Chage this to your own paths) +# ============================================================================ +paths: + + dump_file_name: saco_veval_smartglasses_val + experiment_log_dir: + ytvis_json: /saco_veval_smartglasses_val.json + ytvis_dir : + bpe_path: # This should be under assets/bpe_simple_vocab_16e6.txt.gz + num_videos: null + +# ============================================================================ +# Different helper parameters and functions +# ============================================================================ +scratch: + vid_mask_postprocessor: + _target_: sam3.eval.postprocessors.PostProcessNullOp + + use_presence_eval: True + + video_transforms_val: + - _target_: sam3.train.transforms.basic_for_api.ComposeAPI + transforms: + - _target_: sam3.train.transforms.segmentation.DecodeRle + # resize the image to 1024x1024 resolution + - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI + sizes: ${scratch.resolution} # originally `resolution: 1024` + square: true + consistent_transform: true + - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI + - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI + mean: ${scratch.val_norm_mean} + std: ${scratch.val_norm_std} + + # Model parameters + d_model: 256 + + # Image processing parameters + resolution: 1008 + + # Normalization parameters + train_norm_mean: [0.5, 0.5, 0.5] + train_norm_std: [0.5, 0.5, 0.5] + val_norm_mean: [0.5, 0.5, 0.5] + val_norm_std: [0.5, 0.5, 0.5] + + val_batch_size: 1 + num_val_workers: 0 + max_data_epochs: 20 + hybrid_repeats: 1 + gather_pred_via_filesys: false + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + _target_: sam3.train.trainer.Trainer + skip_saving_ckpts: true + empty_gpu_mem_cache_after_eval: True + skip_first_val: True + max_epochs: ${scratch.max_data_epochs} + accelerator: cuda + seed_value: 123 + val_epoch_freq: 10 + mode: val + + distributed: + backend: nccl + find_unused_parameters: True + gradient_as_bucket_view: True + + loss: + all: + _target_: sam3.train.loss.sam3_loss.DummyLoss + default: + _target_: sam3.train.loss.sam3_loss.DummyLoss + + data: + train: null + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_video_dataset.VideoGroundingDataset + limit_ids: ${paths.num_videos} + img_folder: ${paths.ytvis_dir} + ann_file: ${paths.ytvis_json} + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_VEVAL_API_FROM_JSON_NP + _partial_: true + + transforms: ${scratch.video_transforms_val} + max_ann_per_img: 100000 # filtered in transforms + max_val_queries: 100000 + multiplier: 1 + load_segmentation: true + training: false + + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: True + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: ytvis_val + with_seg_masks: true + + + model: + _target_: sam3.model_builder.build_sam3_video_model + bpe_path: ${paths.bpe_path} + has_presence_token: True + geo_encoder_use_img_cross_attn: True + apply_temporal_disambiguation: True + + meters: + val: + ytvis_val: + pred_file: # key + _target_: sam3.eval.ytvis_eval.YTVISResultsWriter + dump_file: ${launcher.experiment_log_dir}/preds/${paths.dump_file_name}.json + postprocessor: ${scratch.vid_mask_postprocessor} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + + optim: + amp: + enabled: True + amp_dtype: bfloat16 + + + checkpoint: + save_dir: ${launcher.experiment_log_dir}/checkpoints + save_freq: 0 # 0 only last checkpoint is saved. + + + logging: + tensorboard_writer: + _target_: sam3.train.utils.logger.make_tensorboard_logger + log_dir: ${launcher.experiment_log_dir}/tensorboard + flush_secs: 120 + should_log: True + wandb_writer: null + log_dir: ${launcher.experiment_log_dir}/logs/ + log_freq: 10 + +# ============================================================================ +# Launcher and Submitit Configuration +# ============================================================================ + +launcher: + num_nodes: 8 + gpus_per_node: 8 + experiment_log_dir: ${paths.experiment_log_dir} + multiprocessing_context: forkserver + +submitit: + account: null + partition: null + qos: null + timeout_hour: 72 + use_cluster: True + cpus_per_task: 10 + port_range: [10000, 65000] + constraint: null diff --git a/source_code/sam3/sam3/train/configs/saco_video_evals/saco_veval_smartglasses_val_noheur.yaml b/source_code/sam3/sam3/train/configs/saco_video_evals/saco_veval_smartglasses_val_noheur.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e1f64436d606cbd6fc82f103397e147f3e65345f --- /dev/null +++ b/source_code/sam3/sam3/train/configs/saco_video_evals/saco_veval_smartglasses_val_noheur.yaml @@ -0,0 +1,174 @@ +# @package _global_ +defaults: + - _self_ + +# ============================================================================ +# Paths Configuration (Chage this to your own paths) +# ============================================================================ +paths: + + dump_file_name: saco_veval_smartglasses_val + experiment_log_dir: + ytvis_json: /saco_veval_smartglasses_val.json + ytvis_dir : + bpe_path: # This should be under assets/bpe_simple_vocab_16e6.txt.gz + num_videos: null + +# ============================================================================ +# Different helper parameters and functions +# ============================================================================ +scratch: + vid_mask_postprocessor: + _target_: sam3.eval.postprocessors.PostProcessNullOp + + use_presence_eval: True + + video_transforms_val: + - _target_: sam3.train.transforms.basic_for_api.ComposeAPI + transforms: + - _target_: sam3.train.transforms.segmentation.DecodeRle + # resize the image to 1024x1024 resolution + - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI + sizes: ${scratch.resolution} # originally `resolution: 1024` + square: true + consistent_transform: true + - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI + - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI + mean: ${scratch.val_norm_mean} + std: ${scratch.val_norm_std} + + # Model parameters + d_model: 256 + + # Image processing parameters + resolution: 1008 + + # Normalization parameters + train_norm_mean: [0.5, 0.5, 0.5] + train_norm_std: [0.5, 0.5, 0.5] + val_norm_mean: [0.5, 0.5, 0.5] + val_norm_std: [0.5, 0.5, 0.5] + + val_batch_size: 1 + num_val_workers: 0 + max_data_epochs: 20 + hybrid_repeats: 1 + gather_pred_via_filesys: false + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + _target_: sam3.train.trainer.Trainer + skip_saving_ckpts: true + empty_gpu_mem_cache_after_eval: True + skip_first_val: True + max_epochs: ${scratch.max_data_epochs} + accelerator: cuda + seed_value: 123 + val_epoch_freq: 10 + mode: val + + distributed: + backend: nccl + find_unused_parameters: True + gradient_as_bucket_view: True + + loss: + all: + _target_: sam3.train.loss.sam3_loss.DummyLoss + default: + _target_: sam3.train.loss.sam3_loss.DummyLoss + + data: + train: null + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_video_dataset.VideoGroundingDataset + limit_ids: ${paths.num_videos} + img_folder: ${paths.ytvis_dir} + ann_file: ${paths.ytvis_json} + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_VEVAL_API_FROM_JSON_NP + _partial_: true + + transforms: ${scratch.video_transforms_val} + max_ann_per_img: 100000 # filtered in transforms + max_val_queries: 100000 + multiplier: 1 + load_segmentation: true + training: false + + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: True + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: ytvis_val + with_seg_masks: true + + + model: + _target_: sam3.model_builder.build_sam3_video_model + bpe_path: ${paths.bpe_path} + has_presence_token: True + geo_encoder_use_img_cross_attn: True + apply_temporal_disambiguation: False + + meters: + val: + ytvis_val: + pred_file: # key + _target_: sam3.eval.ytvis_eval.YTVISResultsWriter + dump_file: ${launcher.experiment_log_dir}/preds/${paths.dump_file_name}.json + postprocessor: ${scratch.vid_mask_postprocessor} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + + optim: + amp: + enabled: True + amp_dtype: bfloat16 + + + checkpoint: + save_dir: ${launcher.experiment_log_dir}/checkpoints + save_freq: 0 # 0 only last checkpoint is saved. + + + logging: + tensorboard_writer: + _target_: sam3.train.utils.logger.make_tensorboard_logger + log_dir: ${launcher.experiment_log_dir}/tensorboard + flush_secs: 120 + should_log: True + wandb_writer: null + log_dir: ${launcher.experiment_log_dir}/logs/ + log_freq: 10 + +# ============================================================================ +# Launcher and Submitit Configuration +# ============================================================================ + +launcher: + num_nodes: 8 + gpus_per_node: 8 + experiment_log_dir: ${paths.experiment_log_dir} + multiprocessing_context: forkserver + +submitit: + account: null + partition: null + qos: null + timeout_hour: 72 + use_cluster: True + cpus_per_task: 10 + port_range: [10000, 65000] + constraint: null diff --git a/source_code/sam3/sam3/train/configs/saco_video_evals/saco_veval_yt1b_test_noheur.yaml b/source_code/sam3/sam3/train/configs/saco_video_evals/saco_veval_yt1b_test_noheur.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f8df6aeec0b574a767255b027aa68a8336c60e8e --- /dev/null +++ b/source_code/sam3/sam3/train/configs/saco_video_evals/saco_veval_yt1b_test_noheur.yaml @@ -0,0 +1,174 @@ +# @package _global_ +defaults: + - _self_ + +# ============================================================================ +# Paths Configuration (Chage this to your own paths) +# ============================================================================ +paths: + + dump_file_name: saco_veval_yt1b_test + experiment_log_dir: + ytvis_json: /saco_veval_yt1b_test.json + ytvis_dir : + bpe_path: # This should be under assets/bpe_simple_vocab_16e6.txt.gz + num_videos: null + +# ============================================================================ +# Different helper parameters and functions +# ============================================================================ +scratch: + vid_mask_postprocessor: + _target_: sam3.eval.postprocessors.PostProcessNullOp + + use_presence_eval: True + + video_transforms_val: + - _target_: sam3.train.transforms.basic_for_api.ComposeAPI + transforms: + - _target_: sam3.train.transforms.segmentation.DecodeRle + # resize the image to 1024x1024 resolution + - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI + sizes: ${scratch.resolution} # originally `resolution: 1024` + square: true + consistent_transform: true + - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI + - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI + mean: ${scratch.val_norm_mean} + std: ${scratch.val_norm_std} + + # Model parameters + d_model: 256 + + # Image processing parameters + resolution: 1008 + + # Normalization parameters + train_norm_mean: [0.5, 0.5, 0.5] + train_norm_std: [0.5, 0.5, 0.5] + val_norm_mean: [0.5, 0.5, 0.5] + val_norm_std: [0.5, 0.5, 0.5] + + val_batch_size: 1 + num_val_workers: 0 + max_data_epochs: 20 + hybrid_repeats: 1 + gather_pred_via_filesys: false + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + _target_: sam3.train.trainer.Trainer + skip_saving_ckpts: true + empty_gpu_mem_cache_after_eval: True + skip_first_val: True + max_epochs: ${scratch.max_data_epochs} + accelerator: cuda + seed_value: 123 + val_epoch_freq: 10 + mode: val + + distributed: + backend: nccl + find_unused_parameters: True + gradient_as_bucket_view: True + + loss: + all: + _target_: sam3.train.loss.sam3_loss.DummyLoss + default: + _target_: sam3.train.loss.sam3_loss.DummyLoss + + data: + train: null + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_video_dataset.VideoGroundingDataset + limit_ids: ${paths.num_videos} + img_folder: ${paths.ytvis_dir} + ann_file: ${paths.ytvis_json} + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_VEVAL_API_FROM_JSON_NP + _partial_: true + + transforms: ${scratch.video_transforms_val} + max_ann_per_img: 100000 # filtered in transforms + max_val_queries: 100000 + multiplier: 1 + load_segmentation: true + training: false + + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: True + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: ytvis_val + with_seg_masks: true + + + model: + _target_: sam3.model_builder.build_sam3_video_model + bpe_path: ${paths.bpe_path} + has_presence_token: True + geo_encoder_use_img_cross_attn: True + apply_temporal_disambiguation: False + + meters: + val: + ytvis_val: + pred_file: # key + _target_: sam3.eval.ytvis_eval.YTVISResultsWriter + dump_file: ${launcher.experiment_log_dir}/preds/${paths.dump_file_name}.json + postprocessor: ${scratch.vid_mask_postprocessor} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + + optim: + amp: + enabled: True + amp_dtype: bfloat16 + + + checkpoint: + save_dir: ${launcher.experiment_log_dir}/checkpoints + save_freq: 0 # 0 only last checkpoint is saved. + + + logging: + tensorboard_writer: + _target_: sam3.train.utils.logger.make_tensorboard_logger + log_dir: ${launcher.experiment_log_dir}/tensorboard + flush_secs: 120 + should_log: True + wandb_writer: null + log_dir: ${launcher.experiment_log_dir}/logs/ + log_freq: 10 + +# ============================================================================ +# Launcher and Submitit Configuration +# ============================================================================ + +launcher: + num_nodes: 8 + gpus_per_node: 8 + experiment_log_dir: ${paths.experiment_log_dir} + multiprocessing_context: forkserver + +submitit: + account: null + partition: null + qos: null + timeout_hour: 72 + use_cluster: True + cpus_per_task: 10 + port_range: [10000, 65000] + constraint: null diff --git a/source_code/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_sav.yaml b/source_code/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_sav.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7ebbb0f2bbee9b06221a46d362b8ca719bed9b4b --- /dev/null +++ b/source_code/sam3/sam3/train/configs/silver_image_evals/sam3_silver_image_sav.yaml @@ -0,0 +1,64 @@ +# @package _global_ +defaults: + - /configs/eval_base.yaml + - _self_ + +# ============================================================================ +# Paths Configuration (you can override here, but it shouldn't require further changes if eval_base.yaml is correct +# ============================================================================ +paths: + experiment_log_dir: ${paths.base_experiment_log_dir}/silver_sav/ + coco_gt: ${paths.base_annotation_path_silver}/silver_sav_merged_test.json + img_path: ${paths.silver_img_path}/sav/ + + + +# ============================================================================ +# Trainer Configuration +# ============================================================================ + +trainer: + data: + val: + _target_: sam3.train.data.torch_dataset.TorchDataset + dataset: + _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset + coco_json_loader: + _target_: sam3.train.data.coco_json_loaders.SAM3_EVAL_API_FROM_JSON_NP + _partial_: true + img_folder: ${paths.img_path} + ann_file: ${paths.coco_gt} + transforms: ${scratch.base_val_transform} + max_ann_per_img: 100000 + multiplier: 1 + training: false + + shuffle: False + batch_size: ${scratch.val_batch_size} + num_workers: ${scratch.num_val_workers} + pin_memory: False + drop_last: False + collate_fn: + _target_: sam3.train.data.collator.collate_fn_api + _partial_: true + repeats: ${scratch.hybrid_repeats} + dict_key: silver_sav + + meters: + val: + silver_sav: # this key matches the "dict_key" in the dataloader's collate function + cgf1: + _target_: sam3.eval.coco_writer.PredictionDumper + iou_type: "segm" + dump_dir: ${launcher.experiment_log_dir}/dumps/silver_sav + merge_predictions: True + postprocessor: ${scratch.mask_postprocessor_thresholded} + gather_pred_via_filesys: ${scratch.gather_pred_via_filesys} + maxdets: 1000000 # no limit + pred_file_evaluators: + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gt} + iou_type: "bbox" + - _target_: sam3.eval.cgf1_eval.CGF1Evaluator + gt_path: ${paths.coco_gt} + iou_type: "segm" diff --git a/source_code/sam3/sam3/train/transforms/filter_query_transforms.py b/source_code/sam3/sam3/train/transforms/filter_query_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..2d6708f453d34a1ba05de8a461c9aee5d0636f45 --- /dev/null +++ b/source_code/sam3/sam3/train/transforms/filter_query_transforms.py @@ -0,0 +1,607 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +import logging +import random + +from collections import defaultdict +from typing import List, Optional, Union + +import torch + +from sam3.train.data.sam3_image_dataset import Datapoint, FindQuery, Object + + +class FilterDataPointQueries: + find_ids_to_filter: set = None + get_ids_to_filter: set = None + obj_ids_to_filter: set = None # stored as pairs (img_id, obj_id) + + def identify_queries_to_filter(self, datapoint: Datapoint) -> None: + """ + Compute set of query ids to keep, for both find and get queries + """ + raise NotImplementedError + + def _do_filter_query(self, query: Union[FindQuery], query_id: int): + assert self.find_ids_to_filter is not None + + return query_id in self.find_ids_to_filter + + +class FilterQueryWithText(FilterDataPointQueries): + """ + Filter all datapoints which have query text in a specified list of exluded terms + """ + + def __init__( + self, exclude_find_keys: List[str] = None, exclude_get_keys: List[str] = None + ): + self.find_filter_keys = exclude_find_keys if exclude_find_keys else [] + self.get_filter_keys = exclude_get_keys if exclude_get_keys else [] + + def identify_queries_to_filter(self, datapoint): + self.obj_ids_to_filter = set() + del_find_ids = [] + del_get_ids = [] + for i, f_q in enumerate(datapoint.find_queries): + if f_q.query_text in self.find_filter_keys: + del_find_ids.append(i) + + self.find_ids_to_filter = set(del_find_ids) + + +class KeepMaxNumFindQueries(FilterDataPointQueries): + def __init__( + self, max_num_find_queries: int, retain_positive_queries: bool = False + ): + self.max_num_find_queries = max_num_find_queries + self.retain_positive_queries = retain_positive_queries + + def identify_queries_to_filter(self, datapoint: Datapoint) -> None: + self.obj_ids_to_filter = set() + num_find_queries = len(datapoint.find_queries) + if num_find_queries <= self.max_num_find_queries: + self.find_ids_to_filter = set() # keep all find queries + return + + if not self.retain_positive_queries: + all_find_query_ids = list(range(num_find_queries)) + num_queries_to_filter = max(0, num_find_queries - self.max_num_find_queries) + query_ids_to_filter = random.sample( + all_find_query_ids, k=num_queries_to_filter + ) + else: + # keep up to max_num_find_queries postive find queries and fill + # the remaining slots (if any) with negative find queries + pos_find_ids, neg_find_ids = [], [] + for i, f_q in enumerate(datapoint.find_queries): + # Negative finds return an empty list of object_ids_output + if len(f_q.object_ids_output) == 0: + neg_find_ids.append(i) + else: + pos_find_ids.append(i) + + if len(pos_find_ids) >= self.max_num_find_queries: + # we have more positive find queries than `max_num_find_queries`, + # so we subsample postive find queries and remove all negative find queries + num_queries_to_filter = len(pos_find_ids) - self.max_num_find_queries + query_ids_to_filter = random.sample( + pos_find_ids, k=num_queries_to_filter + ) + query_ids_to_filter.extend(neg_find_ids) + else: + # we have fewer positive find queries than `max_num_find_queries` + # so we need to fill the remaining with negative find queries + num_queries_to_filter = num_find_queries - self.max_num_find_queries + query_ids_to_filter = random.sample( + neg_find_ids, k=num_queries_to_filter + ) + + assert len(query_ids_to_filter) == num_find_queries - self.max_num_find_queries + self.find_ids_to_filter = set(query_ids_to_filter) + + +class KeepMaxNumFindQueriesVideo(FilterDataPointQueries): + def __init__( + self, + video_mosaic_max_num_find_queries_per_frame: int, + retain_positive_queries: bool = False, + ): + self.video_mosaic_max_num_find_queries_per_frame = ( + video_mosaic_max_num_find_queries_per_frame + ) + self.retain_positive_queries = retain_positive_queries + + def identify_queries_to_filter(self, datapoint: Datapoint) -> None: + self.obj_ids_to_filter = set() + num_find_queries = len(datapoint.find_queries) + + findQueries_to_imageIds = defaultdict(list) + max_queries_per_frame = True + for i, f_q in enumerate(datapoint.find_queries): + findQueries_to_imageIds[f_q.image_id].append(i) + if ( + len(findQueries_to_imageIds[f_q.image_id]) + > self.video_mosaic_max_num_find_queries_per_frame + ): + max_queries_per_frame = False + + if max_queries_per_frame: + self.find_ids_to_filter = set() + return + + num_frames = len(findQueries_to_imageIds) + findQueries_0 = findQueries_to_imageIds[0] + num_find_queries_0 = len(findQueries_0) + max_num_find_queries_per_frame = ( + self.video_mosaic_max_num_find_queries_per_frame + ) + if not self.retain_positive_queries: + find_query_ids_0 = list(range(num_find_queries_0)) + num_queries_to_filter = max( + 0, num_find_queries_0 - max_num_find_queries_per_frame + ) + query_ids_to_filter_0 = random.sample( + find_query_ids_0, k=num_queries_to_filter + ) + else: + # keep up to max_num_find_queries postive find queries and fill + # the remaining slots (if any) with negative find queries + pos_find_ids_0, neg_find_ids_0 = [], [] + for i, f_q_id in enumerate(findQueries_0): + f_q = datapoint.find_queries[f_q_id] + # Negative finds return an empty list of object_ids_output + if len(f_q.object_ids_output) == 0: + neg_find_ids_0.append(i) + else: + pos_find_ids_0.append(i) + + if len(pos_find_ids_0) >= max_num_find_queries_per_frame: + # we have more positive find queries than `max_num_find_queries`, + # so we subsample postive find queries and remove all negative find queries + num_queries_to_filter = ( + len(pos_find_ids_0) - max_num_find_queries_per_frame + ) + query_ids_to_filter_0 = random.sample( + pos_find_ids_0, k=num_queries_to_filter + ) + query_ids_to_filter_0.extend(neg_find_ids_0) + else: + # we have fewer positive find queries than `max_num_find_queries` + # so we need to fill the remaining with negative find queries + num_queries_to_filter = ( + num_find_queries_0 - max_num_find_queries_per_frame + ) + query_ids_to_filter_0 = random.sample( + neg_find_ids_0, k=num_queries_to_filter + ) + + # get based on frame 0 all find queries from all the frames with the same indices as in frame 0 + query_ids_to_filter = [] + for i in range(num_frames): + findQueries_i = findQueries_to_imageIds[i] + query_ids_to_filter.extend( + [findQueries_i[j] for j in query_ids_to_filter_0] + ) + + assert ( + len(query_ids_to_filter) + == num_find_queries + - self.video_mosaic_max_num_find_queries_per_frame * num_frames + ) + self.find_ids_to_filter = set(query_ids_to_filter) + + +class KeepSemanticFindQueriesOnly(FilterDataPointQueries): + def identify_queries_to_filter(self, datapoint: Datapoint) -> None: + self.obj_ids_to_filter = set() + self.find_ids_to_filter = { + i for i, q in enumerate(datapoint.find_queries) if q.input_bbox is not None + } # filter (remove) geometric find queries (whose input_bbox is not None) + + # Keep all get queries which don't depend on filtered finds + + +class KeepUnaryFindQueriesOnly(FilterDataPointQueries): + def identify_queries_to_filter(self, datapoint: Datapoint) -> None: + self.obj_ids_to_filter = set() + self.find_ids_to_filter = set() + + # Keep all get queries which don't depend on filtered finds + + +class FilterZeroBoxQueries(FilterDataPointQueries): + """ + Filters all find queries which predict a box with zero area + """ + + @staticmethod + def _is_zero_area_object(obj: Object): + # Check if height or width of bounding box is zero + bbox = obj.bbox # Assume in XYXY format + height = bbox[..., 3].item() - bbox[..., 1].item() + width = bbox[..., 2].item() - bbox[..., 0].item() + + return height == 0 or width == 0 + + def identify_queries_to_filter(self, datapoint): + self.obj_ids_to_filter = set() + + # Find objects with zero area + # Assume only one image per datapoint + image_objects = datapoint.images[0].objects + exclude_objects = { + obj_id + for obj_id, obj in enumerate(image_objects) + if self._is_zero_area_object(obj) + } + + # If a query predicts an object with zero area, drop the whole find query + del_find_ids = [] + for i, f_q in enumerate(datapoint.find_queries): + f_q_objects = set(f_q.object_ids_output) + if len(exclude_objects.intersection(f_q_objects)) > 0: + del_find_ids.append(i) + + self.find_ids_to_filter = set(del_find_ids) + + +class FilterFindQueriesWithTooManyOut(FilterDataPointQueries): + """ + Filters all find queries which have more than a specified number of objects in the output + """ + + def __init__(self, max_num_objects: int): + self.max_num_objects = max_num_objects + + def identify_queries_to_filter(self, datapoint): + self.obj_ids_to_filter = set() + + # If a query predicts more than max_num_objects, drop the whole find query + del_find_ids = [] + for i, f_q in enumerate(datapoint.find_queries): + if len(f_q.object_ids_output) > self.max_num_objects: + del_find_ids.append(i) + + self.find_ids_to_filter = set(del_find_ids) + + +class FilterEmptyTargets(FilterDataPointQueries): + """ + Filters all targets which have zero area + """ + + def identify_queries_to_filter(self, datapoint): + self.obj_ids_to_filter = set() + + for img_id in range(len(datapoint.images)): + for obj_id, obj in enumerate(datapoint.images[img_id].objects): + if obj.area < 1e-6: + self.obj_ids_to_filter.add((img_id, obj_id)) + self.find_ids_to_filter = set() + + +class FilterNonExhaustiveFindQueries(FilterDataPointQueries): + """ + Filters all find queries which are non-exhaustive + """ + + def __init__(self, exhaustivity_type: str): + """ + Args: + exhaustivity_type: Can be "pixel" or "instance": + -pixel: filter queries where the union of all segments covers every pixel belonging to target class + -instance: filter queries where there are non-separable or non annotated instances + Note that instance exhaustivity implies pixel exhaustivity + """ + assert exhaustivity_type in ["pixel", "instance"] + self.exhaustivity_type = exhaustivity_type + + def identify_queries_to_filter(self, datapoint): + self.obj_ids_to_filter = set() + + # If a query predicts more than max_num_objects, drop the whole find query + del_find_ids = [] + for i, f_q in enumerate(datapoint.find_queries): + if self.exhaustivity_type == "instance": + if not f_q.is_exhaustive: + del_find_ids.append(i) + elif self.exhaustivity_type == "pixel": + if f_q.is_pixel_exhaustive is not None and not f_q.is_pixel_exhaustive: + del_find_ids.append(i) + else: + raise RuntimeError( + f"Unknown exhaustivity type {self.exhaustivity_type}" + ) + + self.find_ids_to_filter = set(del_find_ids) + + +class FilterInvalidGeometricQueries(FilterDataPointQueries): + """ + Filters geometric queries whose output got deleted (eg due to cropping) + """ + + def identify_queries_to_filter(self, datapoint): + self.obj_ids_to_filter = set() + + # If a query predicts more than max_num_objects, drop the whole find query + del_find_ids = [] + for i, f_q in enumerate(datapoint.find_queries): + if f_q.input_bbox is not None and f_q.query_text == "geometric": + if len(f_q.object_ids_output) == 0: + del_find_ids.append(i) + self.find_ids_to_filter = set(del_find_ids) + + +class FlexibleFilterFindGetQueries: + def __init__( + self, query_filter: FilterDataPointQueries, enabled: bool = True + ) -> None: + self.query_filter = query_filter + self.enabled = enabled + + def __call__(self, datapoint, **kwargs): + if not self.enabled: + return datapoint + + # Identify all queries to filter + self.query_filter.identify_queries_to_filter(datapoint=datapoint) + + del_find_ids = [] + del_get_ids = [] + for i, f_q in enumerate(datapoint.find_queries): + if self.query_filter._do_filter_query(f_q, i): + datapoint.find_queries[i] = None + del_find_ids.append(i) + + new_find_queries = [] + new_get_queries = [] + + find_old_to_new_map = {} + get_old_to_new_map = {} + + find_counter = 0 + get_counter = 0 + + for i, f_q in enumerate(datapoint.find_queries): + if f_q is not None: + find_old_to_new_map[i] = find_counter + find_counter += 1 + new_find_queries.append(f_q) + + start_with_zero_check = False + for n_f_q in new_find_queries: + if n_f_q.query_processing_order == 0: + start_with_zero_check = True + break + + if len(new_find_queries) == 0: + start_with_zero_check = True + + assert ( + start_with_zero_check + ), "Invalid Find queries, they need to start at query_processing_order = 0" + + datapoint.find_queries = new_find_queries + + if len(datapoint.find_queries) == 0: + print("Warning: No find queries left in datapoint, this is not allowed") + print("Filtering function:", self.query_filter) + print("Datapoint:", datapoint) + raise ValueError + + # The deletion may have removed intermediate steps, so we need to remap to make them contiguous again + all_stages = sorted( + list(set(q.query_processing_order for q in datapoint.find_queries)) + ) + stage_map = {qpo: i for i, qpo in enumerate(all_stages)} + for i in range(len(datapoint.find_queries)): + qpo = datapoint.find_queries[i].query_processing_order + datapoint.find_queries[i].query_processing_order = stage_map[qpo] + + # Final step, clear up objects that are not used anymore + for img_id in range(len(datapoint.images)): + all_objects_ids = set( + i + for find in datapoint.find_queries + for i in find.object_ids_output + if find.image_id == img_id + ) + unused_ids = ( + set(range(len(datapoint.images[img_id].objects))) - all_objects_ids + ) + for tgt_img_id, tgt_obj_id in self.query_filter.obj_ids_to_filter: + if tgt_img_id == img_id: + unused_ids.add(tgt_obj_id) + + if len(unused_ids) > 0: + old_objects = datapoint.images[img_id].objects + object_old_to_new_map = {} + new_objects = [] + for i, o in enumerate(old_objects): + if i not in unused_ids: + object_old_to_new_map[i] = len(new_objects) + new_objects.append(o) + + datapoint.images[img_id].objects = new_objects + + # Remap the outputs of the find queries + affected_find_queries_ids = set() + object_old_to_new_map_per_query = {} + for fid, find in enumerate(datapoint.find_queries): + if find.image_id == img_id: + old_object_ids_output = find.object_ids_output + object_old_to_new_map_per_query[fid] = {} + find.object_ids_output = [] + for oid, old_obj_id in enumerate(old_object_ids_output): + if old_obj_id not in unused_ids: + new_obj_id = object_old_to_new_map[old_obj_id] + find.object_ids_output.append(new_obj_id) + object_old_to_new_map_per_query[fid][oid] = ( + len(find.object_ids_output) - 1 + ) + affected_find_queries_ids.add(fid) + + # finally remove unused images + all_imgs_to_keep = set() + for f_q in datapoint.find_queries: + all_imgs_to_keep.add(f_q.image_id) + + old_img_id_to_new_img_id = {} + new_images = [] + for img_id, img in enumerate(datapoint.images): + if img_id in all_imgs_to_keep: + old_img_id_to_new_img_id[img_id] = len(new_images) + new_images.append(img) + datapoint.images = new_images + + for f_q in datapoint.find_queries: + f_q.image_id = old_img_id_to_new_img_id[f_q.image_id] + + return datapoint + + +class AddPrefixSuffixToFindText: + """ + Add prefix or suffix strings to find query text on the fly. + + If `condition_on_text` is True, the prefix or suffix strings are only added + to those find query text in `condition_text_list` (case-insensitive). + """ + + def __init__( + self, + prefix: Optional[str] = None, + suffix: Optional[str] = None, + condition_on_text: bool = False, + condition_text_list: Optional[List[str]] = None, + enabled: bool = True, + ) -> None: + self.prefix = prefix + self.suffix = suffix + self.condition_on_text = condition_on_text + if self.condition_on_text: + assert condition_text_list is not None + self.condition_text_set = {s.lower().strip() for s in condition_text_list} + self.enabled = enabled + if self.enabled: + logging.info( + f"AddPrefixSuffixToFindText: prefix={prefix}, suffix={suffix}, " + f"condition_on_text={condition_on_text}, condition_text_list={condition_text_list}" + ) + + def __call__(self, datapoint, **kwargs): + if not self.enabled: + return datapoint + + for find in datapoint.find_queries: + if find.query_text == "geometric": + # skip geometric find queries + continue + if ( + self.condition_on_text + and find.query_text.lower().strip() not in self.condition_text_set + ): + # if condition_on_text is True, skip those queries not in condition_text_set + continue + + # add prefix and/or suffix strings to the find query text + if self.prefix is not None: + find.query_text = self.prefix + find.query_text + if self.suffix is not None: + find.query_text = find.query_text + self.suffix + + return datapoint + + +class FilterCrowds(FilterDataPointQueries): + def identify_queries_to_filter(self, datapoint: Datapoint) -> None: + """ + Compute set of query ids to keep, for both find and get queries + """ + self.obj_ids_to_filter = set() + self.find_ids_to_filter = set() + # self.get_ids_to_filter = set() + for img_id, img in enumerate(datapoint.images): + for obj_id, obj in enumerate(img.objects): + if obj.is_crowd: + self.obj_ids_to_filter.add((img_id, obj_id)) + + +class TextQueryToVisual: + """ + Transform a test query to a visual query (with some proba), using any of the output targets as the prompt + """ + + def __init__(self, probability, keep_text_queries=False) -> None: + self.probability = probability + assert 0 <= probability <= 1 + self.keep_text_queries = keep_text_queries + + def __call__(self, datapoint: Datapoint, **kwargs): + for find in datapoint.find_queries: + if find.input_bbox is not None or find.input_points is not None: + # skip geometric find queries + continue + + if len(find.object_ids_output) == 0: + # Can't create a visual query, skip + continue + + if find.query_processing_order > 0: + # Second stage query, can't use + continue + + if random.random() > self.probability: + continue + + selected_vq_id = random.choice(find.object_ids_output) + img_id = find.image_id + + find.input_bbox = datapoint.images[img_id].objects[selected_vq_id].bbox + find.input_bbox_label = torch.ones(1, dtype=torch.bool) + if not self.keep_text_queries: + find.query_text = "visual" + + return datapoint + + +class RemoveInputBoxes: + """ + Remove input boxes from find queries + """ + + def __init__(self) -> None: + pass + + def __call__(self, datapoint: Datapoint, **kwargs): + for find in datapoint.find_queries: + if find.input_bbox is None: + continue + + if find.query_text == "geometric": + print("Warning: removing input box from geometric find query") + + find.input_bbox = None + return datapoint + + +class OverwriteTextQuery: + """ + With some probability, overwrite the text query with a custom text + """ + + def __init__(self, target_text, probability=1.0) -> None: + self.probability = probability + self.target_text = target_text + assert 0 <= probability <= 1 + + def __call__(self, datapoint: Datapoint, **kwargs): + for find in datapoint.find_queries: + if random.random() > self.probability: + continue + + find.query_text = self.target_text + + return datapoint diff --git a/source_code/sam3/sam3/visualization_utils.py b/source_code/sam3/sam3/visualization_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..73398f2bf68b0496c162859ec9249d7f6ac1e999 --- /dev/null +++ b/source_code/sam3/sam3/visualization_utils.py @@ -0,0 +1,941 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved +import json +import os +import subprocess +from pathlib import Path + +import cv2 +import matplotlib.patches as patches +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import pycocotools.mask as mask_utils +import torch +from matplotlib.colors import to_rgb +from PIL import Image +from skimage.color import lab2rgb, rgb2lab +from sklearn.cluster import KMeans +from torchvision.ops import masks_to_boxes +from tqdm import tqdm + + +def generate_colors(n_colors=256, n_samples=5000): + # Step 1: Random RGB samples + np.random.seed(42) + rgb = np.random.rand(n_samples, 3) + # Step 2: Convert to LAB for perceptual uniformity + # print(f"Converting {n_samples} RGB samples to LAB color space...") + lab = rgb2lab(rgb.reshape(1, -1, 3)).reshape(-1, 3) + # print("Conversion to LAB complete.") + # Step 3: k-means clustering in LAB + kmeans = KMeans(n_clusters=n_colors, n_init=10) + # print(f"Fitting KMeans with {n_colors} clusters on {n_samples} samples...") + kmeans.fit(lab) + # print("KMeans fitting complete.") + centers_lab = kmeans.cluster_centers_ + # Step 4: Convert LAB back to RGB + colors_rgb = lab2rgb(centers_lab.reshape(1, -1, 3)).reshape(-1, 3) + colors_rgb = np.clip(colors_rgb, 0, 1) + return colors_rgb + + +COLORS = generate_colors(n_colors=128, n_samples=5000) + + +def show_img_tensor(img_batch, vis_img_idx=0): + MEAN_IMG = np.array([0.5, 0.5, 0.5]) + STD_IMG = np.array([0.5, 0.5, 0.5]) + im_tensor = img_batch[vis_img_idx].detach().cpu() + assert im_tensor.dim() == 3 + im_tensor = im_tensor.numpy().transpose((1, 2, 0)) + im_tensor = (im_tensor * STD_IMG) + MEAN_IMG + im_tensor = np.clip(im_tensor, 0, 1) + plt.imshow(im_tensor) + + +def draw_box_on_image(image, box, color=(0, 255, 0)): + """ + Draws a rectangle on a given PIL image using the provided box coordinates in xywh format. + :param image: PIL.Image - The image on which to draw the rectangle. + :param box: tuple - A tuple (x, y, w, h) representing the top-left corner, width, and height of the rectangle. + :param color: tuple - A tuple (R, G, B) representing the color of the rectangle. Default is red. + :return: PIL.Image - The image with the rectangle drawn on it. + """ + # Ensure the image is in RGB mode + image = image.convert("RGB") + # Unpack the box coordinates + x, y, w, h = box + x, y, w, h = int(x), int(y), int(w), int(h) + # Get the pixel data + pixels = image.load() + # Draw the top and bottom edges + for i in range(x, x + w): + pixels[i, y] = color + pixels[i, y + h - 1] = color + pixels[i, y + 1] = color + pixels[i, y + h] = color + pixels[i, y - 1] = color + pixels[i, y + h - 2] = color + # Draw the left and right edges + for j in range(y, y + h): + pixels[x, j] = color + pixels[x + 1, j] = color + pixels[x - 1, j] = color + pixels[x + w - 1, j] = color + pixels[x + w, j] = color + pixels[x + w - 2, j] = color + return image + + +def plot_bbox( + img_height, + img_width, + box, + box_format="XYXY", + relative_coords=True, + color="r", + linestyle="solid", + text=None, + ax=None, +): + if box_format == "XYXY": + x, y, x2, y2 = box + w = x2 - x + h = y2 - y + elif box_format == "XYWH": + x, y, w, h = box + elif box_format == "CxCyWH": + cx, cy, w, h = box + x = cx - w / 2 + y = cy - h / 2 + else: + raise RuntimeError(f"Invalid box_format {box_format}") + + if relative_coords: + x *= img_width + w *= img_width + y *= img_height + h *= img_height + + if ax is None: + ax = plt.gca() + rect = patches.Rectangle( + (x, y), + w, + h, + linewidth=1.5, + edgecolor=color, + facecolor="none", + linestyle=linestyle, + ) + ax.add_patch(rect) + if text is not None: + facecolor = "w" + ax.text( + x, + y - 5, + text, + color=color, + weight="bold", + fontsize=8, + bbox={"facecolor": facecolor, "alpha": 0.75, "pad": 2}, + ) + + +def plot_mask(mask, color="r", ax=None): + im_h, im_w = mask.shape + mask_img = np.zeros((im_h, im_w, 4), dtype=np.float32) + mask_img[..., :3] = to_rgb(color) + mask_img[..., 3] = mask * 0.5 + # Use the provided ax or the current axis + if ax is None: + ax = plt.gca() + ax.imshow(mask_img) + + +def normalize_bbox(bbox_xywh, img_w, img_h): + # Assumes bbox_xywh is in XYWH format + if isinstance(bbox_xywh, list): + assert ( + len(bbox_xywh) == 4 + ), "bbox_xywh list must have 4 elements. Batching not support except for torch tensors." + normalized_bbox = bbox_xywh.copy() + normalized_bbox[0] /= img_w + normalized_bbox[1] /= img_h + normalized_bbox[2] /= img_w + normalized_bbox[3] /= img_h + else: + assert isinstance( + bbox_xywh, torch.Tensor + ), "Only torch tensors are supported for batching." + normalized_bbox = bbox_xywh.clone() + assert ( + normalized_bbox.size(-1) == 4 + ), "bbox_xywh tensor must have last dimension of size 4." + normalized_bbox[..., 0] /= img_w + normalized_bbox[..., 1] /= img_h + normalized_bbox[..., 2] /= img_w + normalized_bbox[..., 3] /= img_h + return normalized_bbox + + +def visualize_frame_output(frame_idx, video_frames, outputs, figsize=(12, 8)): + plt.figure(figsize=figsize) + plt.title(f"frame {frame_idx}") + img = load_frame(video_frames[frame_idx]) + img_H, img_W, _ = img.shape + plt.imshow(img) + for i in range(len(outputs["out_probs"])): + box_xywh = outputs["out_boxes_xywh"][i] + prob = outputs["out_probs"][i] + obj_id = outputs["out_obj_ids"][i] + binary_mask = outputs["out_binary_masks"][i] + color = COLORS[obj_id % len(COLORS)] + plot_bbox( + img_H, + img_W, + box_xywh, + text=f"(id={obj_id}, {prob=:.2f})", + box_format="XYWH", + color=color, + ) + plot_mask(binary_mask, color=color) + + +def visualize_formatted_frame_output( + frame_idx, + video_frames, + outputs_list, + titles=None, + points_list=None, + points_labels_list=None, + figsize=(12, 8), + title_suffix="", + prompt_info=None, +): + """Visualize up to three sets of segmentation masks on a video frame. + + Args: + frame_idx: Frame index to visualize + image_files: List of image file paths + outputs_list: List of {frame_idx: {obj_id: mask_tensor}} or single dict {obj_id: mask_tensor} + titles: List of titles for each set of outputs_list + points_list: Optional list of point coordinates + points_labels_list: Optional list of point labels + figsize: Figure size tuple + save: Whether to save the visualization to file + output_dir: Base output directory when saving + scenario_name: Scenario name for organizing saved files + title_suffix: Additional title suffix + prompt_info: Dictionary with prompt information (boxes, points, etc.) + """ + # Handle single output dict case + if isinstance(outputs_list, dict) and frame_idx in outputs_list: + # This is a single outputs dict with frame indices as keys + outputs_list = [outputs_list] + elif isinstance(outputs_list, dict) and not any( + isinstance(k, int) for k in outputs_list.keys() + ): + # This is a single frame's outputs {obj_id: mask} + single_frame_outputs = {frame_idx: outputs_list} + outputs_list = [single_frame_outputs] + + num_outputs = len(outputs_list) + if titles is None: + titles = [f"Set {i+1}" for i in range(num_outputs)] + assert ( + len(titles) == num_outputs + ), "length of `titles` should match that of `outputs_list` if not None." + + _, axes = plt.subplots(1, num_outputs, figsize=figsize) + if num_outputs == 1: + axes = [axes] # Make it iterable + + img = load_frame(video_frames[frame_idx]) + img_H, img_W, _ = img.shape + + for idx in range(num_outputs): + ax, outputs_set, ax_title = axes[idx], outputs_list[idx], titles[idx] + ax.set_title(f"Frame {frame_idx} - {ax_title}{title_suffix}") + ax.imshow(img) + + if frame_idx in outputs_set: + _outputs = outputs_set[frame_idx] + else: + print(f"Warning: Frame {frame_idx} not found in outputs_set") + continue + + if prompt_info and frame_idx == 0: # Show prompts on first frame + if "boxes" in prompt_info: + for box in prompt_info["boxes"]: + # box is in [x, y, w, h] normalized format + x, y, w, h = box + plot_bbox( + img_H, + img_W, + [x, y, x + w, y + h], # Convert to XYXY + box_format="XYXY", + relative_coords=True, + color="yellow", + linestyle="dashed", + text="PROMPT BOX", + ax=ax, + ) + + if "points" in prompt_info and "point_labels" in prompt_info: + points = np.array(prompt_info["points"]) + labels = np.array(prompt_info["point_labels"]) + # Convert normalized to pixel coordinates + points_pixel = points * np.array([img_W, img_H]) + + # Draw positive points (green stars) + pos_points = points_pixel[labels == 1] + if len(pos_points) > 0: + ax.scatter( + pos_points[:, 0], + pos_points[:, 1], + color="lime", + marker="*", + s=200, + edgecolor="white", + linewidth=2, + label="Positive Points", + zorder=10, + ) + + # Draw negative points (red stars) + neg_points = points_pixel[labels == 0] + if len(neg_points) > 0: + ax.scatter( + neg_points[:, 0], + neg_points[:, 1], + color="red", + marker="*", + s=200, + edgecolor="white", + linewidth=2, + label="Negative Points", + zorder=10, + ) + + objects_drawn = 0 + for obj_id, binary_mask in _outputs.items(): + mask_sum = ( + binary_mask.sum() + if hasattr(binary_mask, "sum") + else np.sum(binary_mask) + ) + + if mask_sum > 0: # Only draw if mask has content + # Convert to torch tensor if it's not already + if not isinstance(binary_mask, torch.Tensor): + binary_mask = torch.tensor(binary_mask) + + # Find bounding box from mask + if binary_mask.any(): + box_xyxy = masks_to_boxes(binary_mask.unsqueeze(0)).squeeze() + box_xyxy = normalize_bbox(box_xyxy, img_W, img_H) + else: + # Fallback: create a small box at center + box_xyxy = [0.45, 0.45, 0.55, 0.55] + + color = COLORS[obj_id % len(COLORS)] + + plot_bbox( + img_H, + img_W, + box_xyxy, + text=f"(id={obj_id})", + box_format="XYXY", + color=color, + ax=ax, + ) + + # Convert back to numpy for plotting + mask_np = ( + binary_mask.numpy() + if isinstance(binary_mask, torch.Tensor) + else binary_mask + ) + plot_mask(mask_np, color=color, ax=ax) + objects_drawn += 1 + + if objects_drawn == 0: + ax.text( + 0.5, + 0.5, + "No objects detected", + transform=ax.transAxes, + fontsize=16, + ha="center", + va="center", + color="red", + weight="bold", + ) + + # Draw additional points if provided + if points_list is not None and points_list[idx] is not None: + show_points( + points_list[idx], points_labels_list[idx], ax=ax, marker_size=200 + ) + + ax.axis("off") + + plt.tight_layout() + plt.show() + + +def render_masklet_frame(img, outputs, frame_idx=None, alpha=0.5): + """ + Overlays masklets and bounding boxes on a single image frame. + Args: + img: np.ndarray, shape (H, W, 3), uint8 or float32 in [0,255] or [0,1] + outputs: dict with keys: out_boxes_xywh, out_probs, out_obj_ids, out_binary_masks + frame_idx: int or None, for overlaying frame index text + alpha: float, mask overlay alpha + Returns: + overlay: np.ndarray, shape (H, W, 3), uint8 + """ + if img.dtype == np.float32 or img.max() <= 1.0: + img = (img * 255).astype(np.uint8) + img = img[..., :3] # drop alpha if present + height, width = img.shape[:2] + overlay = img.copy() + + for i in range(len(outputs["out_probs"])): + obj_id = outputs["out_obj_ids"][i] + color = COLORS[obj_id % len(COLORS)] + color255 = (color * 255).astype(np.uint8) + mask = outputs["out_binary_masks"][i] + if mask.shape != img.shape[:2]: + mask = cv2.resize( + mask.astype(np.float32), + (img.shape[1], img.shape[0]), + interpolation=cv2.INTER_NEAREST, + ) + mask_bool = mask > 0.5 + for c in range(3): + overlay[..., c][mask_bool] = ( + alpha * color255[c] + (1 - alpha) * overlay[..., c][mask_bool] + ).astype(np.uint8) + + # Draw bounding boxes and text + for i in range(len(outputs["out_probs"])): + box_xywh = outputs["out_boxes_xywh"][i] + obj_id = outputs["out_obj_ids"][i] + prob = outputs["out_probs"][i] + color = COLORS[obj_id % len(COLORS)] + color255 = tuple(int(x * 255) for x in color) + x, y, w, h = box_xywh + x1 = int(x * width) + y1 = int(y * height) + x2 = int((x + w) * width) + y2 = int((y + h) * height) + cv2.rectangle(overlay, (x1, y1), (x2, y2), color255, 2) + if prob is not None: + label = f"id={obj_id}, p={prob:.2f}" + else: + label = f"id={obj_id}" + cv2.putText( + overlay, + label, + (x1, max(y1 - 10, 0)), + cv2.FONT_HERSHEY_SIMPLEX, + 0.5, + color255, + 1, + cv2.LINE_AA, + ) + + # Overlay frame index at the top-left corner + if frame_idx is not None: + cv2.putText( + overlay, + f"Frame {frame_idx}", + (10, 30), + cv2.FONT_HERSHEY_SIMPLEX, + 1.0, + (255, 255, 255), + 2, + cv2.LINE_AA, + ) + + return overlay + + +def save_masklet_video(video_frames, outputs, out_path, alpha=0.5, fps=10): + # Each outputs dict has keys: "out_boxes_xywh", "out_probs", "out_obj_ids", "out_binary_masks" + # video_frames: list of video frame data, same length as outputs_list + + # Read first frame to get size + first_img = load_frame(video_frames[0]) + height, width = first_img.shape[:2] + if first_img.dtype == np.float32 or first_img.max() <= 1.0: + first_img = (first_img * 255).astype(np.uint8) + # Use 'mp4v' for best compatibility with VSCode playback (.mp4 files) + fourcc = cv2.VideoWriter_fourcc(*"mp4v") + writer = cv2.VideoWriter("temp.mp4", fourcc, fps, (width, height)) + + outputs_list = [ + (video_frames[frame_idx], frame_idx, outputs[frame_idx]) + for frame_idx in sorted(outputs.keys()) + ] + + for frame, frame_idx, frame_outputs in tqdm(outputs_list): + img = load_frame(frame) + overlay = render_masklet_frame( + img, frame_outputs, frame_idx=frame_idx, alpha=alpha + ) + writer.write(cv2.cvtColor(overlay, cv2.COLOR_RGB2BGR)) + + writer.release() + + # Re-encode the video for VSCode compatibility using ffmpeg + subprocess.run(["ffmpeg", "-y", "-i", "temp.mp4", out_path]) + print(f"Re-encoded video saved to {out_path}") + + os.remove("temp.mp4") # Clean up temporary file + + +def save_masklet_image(frame, outputs, out_path, alpha=0.5, frame_idx=None): + """ + Save a single image with masklet overlays. + """ + img = load_frame(frame) + overlay = render_masklet_frame(img, outputs, frame_idx=frame_idx, alpha=alpha) + Image.fromarray(overlay).save(out_path) + print(f"Overlay image saved to {out_path}") + + +def prepare_masks_for_visualization(frame_to_output): + # frame_to_obj_masks --> {frame_idx: {'output_probs': np.array, `out_obj_ids`: np.array, `out_binary_masks`: np.array}} + for frame_idx, out in frame_to_output.items(): + _processed_out = {} + for idx, obj_id in enumerate(out["out_obj_ids"].tolist()): + if out["out_binary_masks"][idx].any(): + _processed_out[obj_id] = out["out_binary_masks"][idx] + frame_to_output[frame_idx] = _processed_out + return frame_to_output + + +def convert_coco_to_masklet_format( + annotations, img_info, is_prediction=False, score_threshold=0.5 +): + """ + Convert COCO format annotations to format expected by render_masklet_frame + """ + outputs = { + "out_boxes_xywh": [], + "out_probs": [], + "out_obj_ids": [], + "out_binary_masks": [], + } + + img_h, img_w = img_info["height"], img_info["width"] + + for idx, ann in enumerate(annotations): + # Get bounding box in relative XYWH format + if "bbox" in ann: + bbox = ann["bbox"] + if max(bbox) > 1.0: # Convert absolute to relative coordinates + bbox = [ + bbox[0] / img_w, + bbox[1] / img_h, + bbox[2] / img_w, + bbox[3] / img_h, + ] + else: + mask = mask_utils.decode(ann["segmentation"]) + rows = np.any(mask, axis=1) + cols = np.any(mask, axis=0) + if np.any(rows) and np.any(cols): + rmin, rmax = np.where(rows)[0][[0, -1]] + cmin, cmax = np.where(cols)[0][[0, -1]] + # Convert to relative XYWH + bbox = [ + cmin / img_w, + rmin / img_h, + (cmax - cmin + 1) / img_w, + (rmax - rmin + 1) / img_h, + ] + else: + bbox = [0, 0, 0, 0] + + outputs["out_boxes_xywh"].append(bbox) + + # Get probability/score + if is_prediction: + prob = ann["score"] + else: + prob = 1.0 # GT has no probability + outputs["out_probs"].append(prob) + + outputs["out_obj_ids"].append(idx) + mask = mask_utils.decode(ann["segmentation"]) + mask = (mask > score_threshold).astype(np.uint8) + + outputs["out_binary_masks"].append(mask) + + return outputs + + +def save_side_by_side_visualization(img, gt_anns, pred_anns, noun_phrase): + """ + Create side-by-side visualization of GT and predictions + """ + + # Create side-by-side visualization + fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 7)) + + main_title = f"Noun phrase: '{noun_phrase}'" + fig.suptitle(main_title, fontsize=16, fontweight="bold") + + gt_overlay = render_masklet_frame(img, gt_anns, alpha=0.5) + ax1.imshow(gt_overlay) + ax1.set_title("Ground Truth", fontsize=14, fontweight="bold") + ax1.axis("off") + + pred_overlay = render_masklet_frame(img, pred_anns, alpha=0.5) + ax2.imshow(pred_overlay) + ax2.set_title("Predictions", fontsize=14, fontweight="bold") + ax2.axis("off") + + plt.subplots_adjust(top=0.88) + plt.tight_layout() + + +def bitget(val, idx): + return (val >> idx) & 1 + + +def pascal_color_map(): + colormap = np.zeros((512, 3), dtype=int) + ind = np.arange(512, dtype=int) + for shift in reversed(list(range(8))): + for channel in range(3): + colormap[:, channel] |= bitget(ind, channel) << shift + ind >>= 3 + + return colormap.astype(np.uint8) + + +def draw_masks_to_frame( + frame: np.ndarray, masks: np.ndarray, colors: np.ndarray +) -> np.ndarray: + masked_frame = frame + for mask, color in zip(masks, colors): + curr_masked_frame = np.where(mask[..., None], color, masked_frame) + masked_frame = cv2.addWeighted(masked_frame, 0.75, curr_masked_frame, 0.25, 0) + + if int(cv2.__version__[0]) > 3: + contours, _ = cv2.findContours( + np.array(mask, dtype=np.uint8).copy(), + cv2.RETR_TREE, + cv2.CHAIN_APPROX_NONE, + ) + else: + _, contours, _ = cv2.findContours( + np.array(mask, dtype=np.uint8).copy(), + cv2.RETR_TREE, + cv2.CHAIN_APPROX_NONE, + ) + + cv2.drawContours( + masked_frame, contours, -1, (255, 255, 255), 7 + ) # White outer contour + cv2.drawContours( + masked_frame, contours, -1, (0, 0, 0), 5 + ) # Black middle contour + cv2.drawContours( + masked_frame, contours, -1, color.tolist(), 3 + ) # Original color inner contour + return masked_frame + + +def get_annot_df(file_path: str): + with open(file_path, "r") as f: + data = json.load(f) + + dfs = {} + + for k, v in data.items(): + if k in ("info", "licenses"): + dfs[k] = v + continue + df = pd.DataFrame(v) + dfs[k] = df + + return dfs + + +def get_annot_dfs(file_list: list[str]): + dfs = {} + for annot_file in tqdm(file_list): + dataset_name = Path(annot_file).stem + dfs[dataset_name] = get_annot_df(annot_file) + + return dfs + + +def get_media_dir(media_dir: str, dataset: str): + if dataset in ["saco_veval_sav_test", "saco_veval_sav_val"]: + return os.path.join(media_dir, "saco_sav", "JPEGImages_24fps") + elif dataset in ["saco_veval_yt1b_test", "saco_veval_yt1b_val"]: + return os.path.join(media_dir, "saco_yt1b", "JPEGImages_6fps") + elif dataset in ["saco_veval_smartglasses_test", "saco_veval_smartglasses_val"]: + return os.path.join(media_dir, "saco_sg", "JPEGImages_6fps") + elif dataset == "sa_fari_test": + return os.path.join(media_dir, "sa_fari", "JPEGImages_6fps") + else: + raise ValueError(f"Dataset {dataset} not found") + + +def get_all_annotations_for_frame( + dataset_df: pd.DataFrame, video_id: int, frame_idx: int, data_dir: str, dataset: str +): + media_dir = os.path.join(data_dir, "media") + + # Load the annotation and video data + annot_df = dataset_df["annotations"] + video_df = dataset_df["videos"] + + # Get the frame + video_df_current = video_df[video_df.id == video_id] + assert ( + len(video_df_current) == 1 + ), f"Expected 1 video row, got {len(video_df_current)}" + video_row = video_df_current.iloc[0] + file_name = video_row.file_names[frame_idx] + file_path = os.path.join( + get_media_dir(media_dir=media_dir, dataset=dataset), file_name + ) + frame = cv2.imread(file_path) + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + + # Get the masks and noun phrases annotated in this video in this frame + annot_df_current_video = annot_df[annot_df.video_id == video_id] + if len(annot_df_current_video) == 0: + print(f"No annotations found for video_id {video_id}") + return frame, None, None + else: + empty_mask = np.zeros(frame.shape[:2], dtype=np.uint8) + mask_np_pairs = annot_df_current_video.apply( + lambda row: ( + ( + mask_utils.decode(row.segmentations[frame_idx]) + if row.segmentations[frame_idx] + else empty_mask + ), + row.noun_phrase, + ), + axis=1, + ) + # sort based on noun_phrases + mask_np_pairs = sorted(mask_np_pairs, key=lambda x: x[1]) + masks, noun_phrases = zip(*mask_np_pairs) + + return frame, masks, noun_phrases + + +def visualize_prompt_overlay( + frame_idx, + video_frames, + title="Prompt Visualization", + text_prompt=None, + point_prompts=None, + point_labels=None, + bounding_boxes=None, + box_labels=None, + obj_id=None, +): + """Simple prompt visualization function""" + img = Image.fromarray(load_frame(video_frames[frame_idx])) + fig, ax = plt.subplots(1, figsize=(6, 4)) + ax.imshow(img) + + img_w, img_h = img.size + + if text_prompt: + ax.text( + 0.02, + 0.98, + f'Text: "{text_prompt}"', + transform=ax.transAxes, + fontsize=12, + color="white", + weight="bold", + bbox=dict(boxstyle="round,pad=0.3", facecolor="red", alpha=0.7), + verticalalignment="top", + ) + + if point_prompts: + for i, point in enumerate(point_prompts): + x, y = point + # Convert relative to absolute coordinates + x_img, y_img = x * img_w, y * img_h + + # Use different colors for positive/negative points + if point_labels and len(point_labels) > i: + color = "green" if point_labels[i] == 1 else "red" + marker = "o" if point_labels[i] == 1 else "x" + else: + color = "green" + marker = "o" + + ax.plot( + x_img, + y_img, + marker=marker, + color=color, + markersize=10, + markeredgewidth=2, + markeredgecolor="white", + ) + ax.text( + x_img + 5, + y_img - 5, + f"P{i+1}", + color=color, + fontsize=10, + weight="bold", + bbox=dict(boxstyle="round,pad=0.2", facecolor="white", alpha=0.8), + ) + + if bounding_boxes: + for i, box in enumerate(bounding_boxes): + x, y, w, h = box + # Convert relative to absolute coordinates + x_img, y_img = x * img_w, y * img_h + w_img, h_img = w * img_w, h * img_h + + # Use different colors for positive/negative boxes + if box_labels and len(box_labels) > i: + color = "green" if box_labels[i] == 1 else "red" + else: + color = "green" + + rect = patches.Rectangle( + (x_img, y_img), + w_img, + h_img, + linewidth=2, + edgecolor=color, + facecolor="none", + ) + ax.add_patch(rect) + ax.text( + x_img, + y_img - 5, + f"B{i+1}", + color=color, + fontsize=10, + weight="bold", + bbox=dict(boxstyle="round,pad=0.2", facecolor="white", alpha=0.8), + ) + + # Add object ID info if provided + if obj_id is not None: + ax.text( + 0.02, + 0.02, + f"Object ID: {obj_id}", + transform=ax.transAxes, + fontsize=10, + color="white", + weight="bold", + bbox=dict(boxstyle="round,pad=0.3", facecolor="blue", alpha=0.7), + verticalalignment="bottom", + ) + + ax.set_title(title) + ax.axis("off") + plt.tight_layout() + plt.show() + + +def plot_results(img, results): + plt.figure(figsize=(12, 8)) + plt.imshow(img) + nb_objects = len(results["scores"]) + print(f"found {nb_objects} object(s)") + for i in range(nb_objects): + color = COLORS[i % len(COLORS)] + plot_mask(results["masks"][i].squeeze(0).cpu(), color=color) + w, h = img.size + prob = results["scores"][i].item() + plot_bbox( + h, + w, + results["boxes"][i].cpu(), + text=f"(id={i}, {prob=:.2f})", + box_format="XYXY", + color=color, + relative_coords=False, + ) + + +def single_visualization(img, anns, title): + """ + Create a single image visualization with overlays. + """ + fig, ax = plt.subplots(figsize=(7, 7)) + fig.suptitle(title, fontsize=16, fontweight="bold") + overlay = render_masklet_frame(img, anns, alpha=0.5) + ax.imshow(overlay) + ax.axis("off") + plt.tight_layout() + + +def show_mask(mask, ax, obj_id=None, random_color=False): + if random_color: + color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0) + else: + cmap = plt.get_cmap("tab10") + cmap_idx = 0 if obj_id is None else obj_id + color = np.array([*cmap(cmap_idx)[:3], 0.6]) + h, w = mask.shape[-2:] + mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1) + ax.imshow(mask_image) + + +def show_box(box, ax): + x0, y0 = box[0], box[1] + w, h = box[2] - box[0], box[3] - box[1] + ax.add_patch( + plt.Rectangle((x0, y0), w, h, edgecolor="green", facecolor=(0, 0, 0, 0), lw=2) + ) + + +def show_points(coords, labels, ax, marker_size=375): + pos_points = coords[labels == 1] + neg_points = coords[labels == 0] + ax.scatter( + pos_points[:, 0], + pos_points[:, 1], + color="green", + marker="*", + s=marker_size, + edgecolor="white", + linewidth=1.25, + ) + ax.scatter( + neg_points[:, 0], + neg_points[:, 1], + color="red", + marker="*", + s=marker_size, + edgecolor="white", + linewidth=1.25, + ) + + +def load_frame(frame): + if isinstance(frame, np.ndarray): + img = frame + elif isinstance(frame, Image.Image): + img = np.array(frame) + elif isinstance(frame, str) and os.path.isfile(frame): + img = plt.imread(frame) + else: + raise ValueError(f"Invalid video frame type: {type(frame)=}") + return img diff --git a/source_code/sam3/scripts/eval/silver/download_videos.py b/source_code/sam3/scripts/eval/silver/download_videos.py new file mode 100644 index 0000000000000000000000000000000000000000..352f5e9935bfdb838b45eee4dcaa4050d678a1c5 --- /dev/null +++ b/source_code/sam3/scripts/eval/silver/download_videos.py @@ -0,0 +1,260 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved +import ast +import concurrent.futures +import os +import shutil +import subprocess +import sys +from concurrent.futures import as_completed, ThreadPoolExecutor +from pathlib import Path + +import yt_dlp + +from utils import ( + annotation_files, + config, + load_json, + run_command, + save_json, + update_annotations, +) + + +def construct_gcs_path(original_video): + """ + Convert original_video string to GCS path. + Example: + 'AUTOLab_failure_2023-07-07_Fri_Jul__7_18:50:36_2023_recordings_MP4_22008760.mp4' + -> + 'gs://gresearch/robotics/droid_raw/1.0.1/AUTOLab/failure/2023-07-07/Fri_Jul__7_18:50:36_2023/recordings/MP4/22008760.mp4' + """ + parts = original_video.split("_") + lab = parts[0] + failure = parts[1] + date = parts[2] + time = "_".join(parts[3:-3]) + recordings = parts[-3] + mp4 = parts[-2] + file_id = parts[-1].split(".")[0] + gcs_path = ( + f"gs://gresearch/robotics/droid_raw/1.0.1/" + f"{lab}/{failure}/{date}/{time}/{recordings}/{mp4}/{file_id}.mp4" + ) + return gcs_path + + +def download_video(args): + gcs_path, dst_dir, json_file = args + # Ensure subdirectory exists + subdir = Path(dst_dir) + os.makedirs(subdir, exist_ok=True) + # Save file with its original name inside the subdir + print(json_file) + local_path = subdir / json_file + cmd = f'gsutil cp "{gcs_path}" "{local_path}"' + print(f"Running: {cmd}") + try: + run_command(cmd) + return (gcs_path, True, None) + except Exception as e: + return (gcs_path, False, str(e)) + + +def download_youtube_video(youtube_id, output_path=None): + try: + if output_path is None: + output_path = os.path.join( + config["yt1b_path"], "downloaded_videos", f"video_{youtube_id}.mp4" + ) + url = f"https://www.youtube.com/watch?v={youtube_id}" + if os.path.exists(output_path): + return youtube_id, None + format = "best[height<=720][fps<=30]/best[height<=720]/best" # 720p or lower, max 30fps + ydl_opts = { + "format": format, + "outtmpl": output_path, + "merge_output_format": "mp4", + "quiet": True, + "cookiefile": config["cookies_path"], + "socket_timeout": 60, # Increase timeout to 60 seconds (default is 10) + } + with yt_dlp.YoutubeDL(ydl_opts) as ydl: + ydl.download([url]) + return youtube_id, None + except Exception as e: + return youtube_id, str(e) + + +def download_youtube(): + all_videos_to_download = set() + for annotation_file in annotation_files["yt1b"]: + ann = load_json(os.path.join(config["path_annotations"], annotation_file)) + for video_info in ann["images"]: + youtube_id = video_info["original_video"] + all_videos_to_download.add(youtube_id) + + videos_to_download_still = all_videos_to_download + videos_downloaded = set() + videos_unavailable = set() + num_download_retries = 3 + for _ in range(num_download_retries): + if len(videos_to_download_still) == 0: + break + videos_error = set() + with concurrent.futures.ThreadPoolExecutor() as executor: + futures = [ + executor.submit(download_youtube_video, youtube_id) + for youtube_id in videos_to_download_still + ] + for future in concurrent.futures.as_completed(futures): + youtube_id, exception = future.result() + if exception is None: + videos_downloaded.add(youtube_id) + elif "unavailable" in exception or "members-only" in exception: + videos_unavailable.add(youtube_id) + else: + videos_error.add(youtube_id) + videos_to_download_still = ( + all_videos_to_download - videos_downloaded - videos_unavailable + ) + assert videos_to_download_still == videos_error + + if len(videos_unavailable) + len(videos_to_download_still) > 0: + message = "Some videos are either no longer available on YouTube, or are set to private, or resulted in some other error. " + if config["update_annotation_yt1b"]: + message += "The unavailable videos will be ***REMOVED*** from the annotation file. This will make the test results NOT DIRECTLY COMPARABLE to other reported results." + print(message) + update_annotations("yt1b", videos_downloaded) + else: + message += "You may want to either re-try the download, or remove these videos from the evaluation json" + print(message) + + +def download_droid(): + ann_dir = Path(config["path_annotations"]) + dst_dir = Path(config["droid_path"]) / "downloaded_videos" + json_files = annotation_files["droid"] + + download_tasks = [] + original_videos = set() + for json_file in json_files: + json_path = ann_dir / json_file + data = load_json(json_path) + for img in data["images"]: + original_video = img["original_video"] + original_videos.add(original_video) + + print(len(original_videos)) + for original_video in original_videos: + gcs_path = construct_gcs_path(original_video) + download_tasks.append((gcs_path, dst_dir, original_video)) + + max_workers = min(16, len(download_tasks)) + with ThreadPoolExecutor(max_workers=max_workers) as executor: + future_to_task = { + executor.submit(download_video, task): task for task in download_tasks + } + for future in as_completed(future_to_task): + gcs_path, success, error = future.result() + if not success: + print(f"Failed to download {gcs_path}: {error}") + + +def download_ego4d(): + output_dir = os.path.join(config["ego4d_path"], "downloaded_videos") + + ann_dir = Path(config["path_annotations"]) + json_files = annotation_files["ego4d"] + original_videos = set() + for json_file in json_files: + json_path = ann_dir / json_file + data = load_json(json_path) + for img in data["images"]: + original_video = img["original_video"] + original_videos.add(original_video) + + original_video_uids = [ + video_uid.replace(".mp4", "") for video_uid in original_videos + ] + video_ids_download = original_video_uids + num_download_retries = 2 + download_correct = False + message = "" + for _ in range(num_download_retries): + cmd = ( + [ + # "python", "-m", "ego4d.cli.cli", + "ego4d", + "--output_directory", + output_dir, + "--datasets", + "clips", + "--version", + "v1", + "--video_uids", + ] + + video_ids_download + + ["--yes"] + ) + + # Run the command + result = subprocess.run(cmd, capture_output=True, text=True) + message = result.stderr + if ( + "RuntimeError: The following requested video UIDs could not be found in the manifest for version:" + in result.stderr + ): + not_findable_videos = ast.literal_eval(result.stderr.split("\n")[-2]) + video_ids_download = [ + video_uid + for video_uid in video_ids_download + if video_uid not in not_findable_videos + ] + else: + download_correct = True + break + + if not download_correct: + print(f"There was an error downloading the Ego4D data: {message}") + + if len(video_ids_download) != len(original_video_uids): + message = "Some videos are no longer available. " + if config["update_annotation_ego4d"]: + message += "The unavailable videos will be ***REMOVED*** from the annotation file. This will make the test results NOT DIRECTLY COMPARABLE to other reported results." + print(message) + update_annotations("ego4d", video_ids_download) + else: + message += "You may want to either re-try the download, or remove these videos from the evaluation json" + print(message) + + +def download_sav(): + tar_url = config["sav_videos_fps_6_download_path"] + tar_file = "videos_fps_6.tar" + sav_data_dir = os.path.join(config["sav_path"], "downloaded_videos") + os.makedirs(sav_data_dir, exist_ok=True) + + subprocess.run(["wget", tar_url, "-O", tar_file], cwd=sav_data_dir, check=True) + subprocess.run(["tar", "-xvf", tar_file], cwd=sav_data_dir, check=True) + subprocess.run(["rm", tar_file], cwd=sav_data_dir, check=True) + + +def main(): + assert len(sys.argv) > 1, "You have to provide the name of the dataset" + dataset_name = sys.argv[1] + assert ( + dataset_name in annotation_files + ), f"The dataset can be one of {list(annotation_files.keys())}" + + if dataset_name == "yt1b": + download_youtube() + elif dataset_name == "droid": + download_droid() + elif dataset_name == "ego4d": + download_ego4d() + elif dataset_name == "sav": + download_sav() + + +if __name__ == "__main__": + main() diff --git a/source_code/sam3/scripts/eval/silver/fathomnet_image_uuids.json b/source_code/sam3/scripts/eval/silver/fathomnet_image_uuids.json new file mode 100644 index 0000000000000000000000000000000000000000..3c263e86a1a9afa9de8838f057c3a89de4a0111f --- /dev/null +++ b/source_code/sam3/scripts/eval/silver/fathomnet_image_uuids.json @@ -0,0 +1,9224 @@ +[ + "44973e24-09da-4366-9a24-d17e4a12b73d", + "a602c8ba-4d61-41a5-a4a0-80d680eb8972", + "8cad4907-094c-4509-a473-8f8bdf6c91d8", + "c1fee045-5e83-41fc-b9b5-eff847ccd69b", + "7bf5c26a-8026-4449-a3f9-baa486d0d16f", + "39a7ee48-76da-4af6-9c84-9c6d1e8278c8", + "420d2fb9-603c-45b8-92da-7827d9d3aee3", + "f757d96c-d009-41e7-9793-ebff210da547", + "dd94dc70-7ee2-4b6a-aa16-c27dde6350af", + "d7b1e1ed-4afe-4f3b-9b1e-22a7ed33538e", + "3e07dd50-c624-4274-8588-2238e8812f0e", + "d5532829-6f82-4813-86cc-4071b5dbdb2f", + "cc0dfcdd-3d92-43f8-bf26-45b84f73cabe", + "1088c10a-57ae-4bc3-924e-6f9a7f1ca505", + "93246182-a44d-4b4a-99b6-b28852d7fe42", + "e0c6e21f-19ec-4460-a15f-9fa9f0b17831", + "06338b9d-0993-4b9e-9fc2-635a3676a97c", + "270b2bb8-d5d8-41f1-b3cf-b565b2c71524", + "0c8fa41d-d4cb-46a1-ac8f-bc3f2b653a75", + "3bd40736-b0a5-4d8a-a38b-15af6aad2799", + "55cf3c50-bdba-4a0d-aaf3-193fb03d8478", + "7a2c269f-7c99-4e68-ad73-03e3b228b629", + "454c05b6-1fd0-4446-8a45-8d9b13f8c08b", + "d9524a6e-630f-42de-bcab-cdcae900f7ac", + "e9bbb302-befd-4fd1-bc57-dddc247f8c7a", + "f6257f8d-2d3f-479f-aa3e-f99ffacd753c", + "29f87187-9365-416e-8a9f-802b903a868e", + "825cd523-b4a1-4cb4-81cc-d8c14a0b65b2", + "563c22a7-c0e0-423d-9744-6f5882876295", + "d4794251-ef3c-4115-b35b-085e477bf364", + "204a90bc-35a4-480f-a1dc-e52cad34e85d", + "5e7d492e-1e65-4f5e-a1ec-72d4782362c6", + "522b0b65-ae5b-4709-b8c8-43657672f15e", + "6587710c-3e92-416d-a8bd-bd0c55f52454", + "ee56ca7f-553f-4b95-ab0e-13ff68360ed1", + "54cd215f-0d87-41ac-99a8-2f48b1a4299c", + "c4e3253a-a7c6-42a6-93a7-180cd219f077", + "c63cb2c3-1f23-4de5-93f5-08a309cde1c7", + "ddfb11bc-737b-4bbc-83de-a44c0476bc65", + "ac6b464f-ef5c-41b7-952b-8b5deec7d969", + "55b096b4-4529-4512-a187-4c5f3b1c6a9a", + "5a0a706c-5cab-47e5-b366-2de909dcf8b4", + "6ef5d99b-fea5-412f-94af-fbec84aa0b39", + "abfea99c-5e27-4fe5-9fc6-65cd9d7ecb45", + "1f843bca-5180-4b58-8eec-0f973a8c3643", + "0c1289d9-eabe-4bf5-b829-f7a1eab0e95d", + "befe9f12-442b-433d-af81-469626142a00", + "1a53fce4-a814-4924-9d1f-f7d04271b7e6", + "3afb1561-4dc1-4209-8267-f175ff5ef267", + "bd5fe049-42db-430c-b691-da55a09d5696", + "67a23937-af2d-477b-bfd9-689dff8c025a", + "b4b588f9-56f7-45a7-933d-127772e18922", + "fb19adf3-f6d7-4280-9490-bedf298bea30", + "06b1ef0a-3c13-44a5-b022-8f812910a4aa", + "ddfa006f-cca2-4814-ae6e-f12602fd7a1e", + "89682ad5-03d2-4c46-8e15-9ff54b03389c", + "9559eff6-2522-4714-a485-0d95d5b43974", + "84185e80-b943-4ffe-9b07-8010d7a42e38", + "c79ab88b-106d-4133-8c05-63990a8b8586", + "d1c9380f-c6a4-4706-8ad9-0cb1284393c8", + "e2c79403-977d-40ed-8491-637bd1724849", + "fba86954-f64d-4b25-8930-48a4b85dd3ad", + "58fb247c-eb20-4ffa-9f4c-23da0bd310cd", + "eea50043-49f1-48da-8480-e08253a0df9e", + "b5d03702-ca24-4836-9855-4343837b4c1b", + "82cb54ad-23a5-490e-aea9-1914ea355f00", + "b19ba54e-8589-413e-b16f-cdc694db45cd", + "cf81a495-ca2e-4202-9176-de42fc2de5f8", + "06f9a650-cbcb-4638-91cb-cbf99781be79", + "42591570-8846-4f8a-af42-048924fb6936", + "79672528-e8d3-4118-b576-16158924ecdf", + "04539a5c-850c-45ac-beff-d8a4e451d8b4", + "ff66bbae-c27f-45a9-99e2-8b289d9f2227", + "94f513ca-1bb3-4671-9c9c-6fc2d840a2b9", + "90a1bfb3-7692-4d5b-a698-d2b3f6e74257", + "bb6a48db-434e-489c-89ed-2812086b2542", + "510e3e3c-9580-4e1f-8f3b-2f7187ee05f8", + "9c3d0587-52cf-422c-b8cc-b4f42d4f41c0", + "c8930f95-cbe0-41ab-9e16-78199471bd16", + "554fac1c-31b8-4986-b5d8-6788c96e0f2f", + "dc0873d2-a05f-4f6c-a500-879f9d0d28cf", + "0142be4c-c49f-4db6-98b5-c38ff6d829f9", + "e884e923-e7e8-4574-89f1-b181c12ee46d", + "6c27d2cb-e9b1-46af-9788-cd3062da90f4", + "b438d26c-a33e-4c28-9036-576534ffd9c3", + "aeb62274-eeb4-493a-a162-31ce614486e2", + "1d813d76-fdc3-4f22-ae82-75e4962c3552", + "8563b16a-6690-4052-ad94-f33a4f439a58", + "f6cb5f83-aba4-4497-a10d-caf4d7690456", + "92ec365a-150b-4e4d-b8f5-8a36564f0ba1", + "a5b4f569-a239-454b-b031-7775036f8dd1", + "02713482-d953-4d2c-97d6-58e88f80aa4f", + "f39d582f-867c-4617-8b7a-b15f91d1bc94", + "6613c3bd-c86a-4c06-9882-8e970ee9ab98", + "691fd07b-2056-48cd-8516-f87a08d8307d", + "f5debb51-384f-4629-872b-86c17a6838ce", + "60c6b3f3-2c53-47d5-92da-3214ee895a63", + "9088fbe0-da76-4a39-9eb3-95cd3b4a06b9", + "10a56c9f-7f3a-4920-b3c1-49cf63ab2d34", + "c2801b9b-0ca7-45e1-a72d-3d44d48b53e4", + "33a3f615-f528-4acf-8ea8-b99e3e9c8048", + "c720fb78-430a-4fde-9613-e31f2b50f3cf", + "78a2b2cb-e6c2-4405-b6af-a4173e675bfd", + "4230e14d-83da-430f-b90a-b4e47071eebd", + "8d8e0b14-7d46-40a9-8262-7c63eb503d42", + "11868667-c04b-436a-aeb1-0b909317e076", + "57d16a32-c136-42e3-ac7b-fea17784cd04", + "c2c60cb1-3b19-4c47-b24e-3c8bd977184c", + "e4c1f9d7-71dc-45f9-b98a-f7399e4bbe5d", + "8485e3cb-fba8-44ed-a024-3ae6b42db55b", + "1d2c4171-2697-46d2-83fb-a57b911adc57", + "72c38240-eaaa-454b-b7ff-8269c9e66d2b", + "73725d47-6cfc-4adc-a80c-75351a43f5f8", + "38e26821-a55b-49a2-b1b4-b6fc9394b619", + "5db57488-6b39-451a-87ab-2a78716ae535", + "4e10bbc3-b400-4b79-ba5b-aba4a40c4594", + "fb99327d-217f-4f95-9ec7-5c82a5ddb20c", + "164e33ba-56de-4e09-a731-6f900e652480", + "9a003294-691e-4a3f-b140-0a4b72399835", + "3badec5a-d18c-4fe8-8af8-eccf6c5413af", + "0f3b806a-17a0-496e-8dd5-9415e58eaa3c", + "332d84e4-7ea5-47e0-94c0-43973165fb5a", + "5b9a4253-53e6-4b4f-a388-6871dc3c99ba", + "0a95e37f-6ea6-480c-b2d2-6a95420153a0", + "e4c16eff-8a05-41eb-98e3-f47bb4cc264f", + "2d1e4008-438e-4448-8c76-b0f8f42d4f5c", + "b65f0194-98a2-49dd-88a3-ce4180b681c8", + "d8bb8271-f5e6-4785-b940-9a328af1b227", + "07a343ae-ecf5-4b6d-86be-c2f00b3d19e5", + "cff847ec-375a-46a6-a003-2e25c7b295f9", + "cb1f2da1-dc31-40a8-91cb-c019b285f581", + "e3eaaaeb-da4f-4b71-8e7b-c6a10f68a923", + "b71f189d-edeb-4374-9cab-13ba0541ace1", + "d31fc28d-c832-4743-9d32-1e5f3fc2d1b3", + "5c3e6f92-9c77-4dc1-addf-95bd8324b727", + "f32209d8-8f76-4281-9cb7-27f9dac46e72", + "fb206041-5728-4392-bd14-1b1255ed5f61", + "9d241cf7-bf1e-487f-b451-795266c80269", + "67e6ac67-1fdd-4439-b4ed-788dff59f127", + "983030ef-bc7c-461d-b798-e062c6e44bdf", + "4a3e162d-d489-42ab-a437-5390f7463be3", + "5267ed67-ad0a-4ca4-9f18-80df09568733", + "bef5af9e-08da-4652-ac56-2fa92075f5c0", + "f56a08f0-925b-4610-ab4a-53ca5051d35d", + "ad73c5c7-9b8c-498a-aec5-5508bf5783ba", + "b4fcb998-3aba-4651-84cf-9e8ee2fec02f", + "fc98b7ae-d3d0-4e62-819b-99160aafe406", + "e1a653a9-7eb6-4c95-aa4f-eb6a81f15fbb", + "7119e96e-1f62-4dba-b432-6fd52c2cffe3", + "ad47d117-3e75-476b-950f-8f3d4d0d247c", + "7b396141-504e-41bb-aa0f-a06bcce16dd1", + "599ccf89-0fd6-4ecd-a2e4-dd47b14de119", + "f0cd7210-896a-450a-9742-3d57c86c8241", + "262154d9-3d3e-4250-84da-608242084a2e", + "b195737b-e7da-466e-9b71-6053f7cad893", + "9b7317a8-bed8-4ff2-9e13-948ff0b03fa2", + "45d22cbf-3866-4eca-8558-13f837477976", + "4c8c0d59-4108-4a0d-a5b6-ac0712e49e3f", + "056fbd6d-edc8-44fe-8073-6e569aeda685", + "7861b9a8-4420-40f0-a5e0-f442bc1e8981", + "e8bdadd6-8431-445c-9633-f0ca895fbb5f", + "c4594215-3818-4583-99be-97b5e05e7e53", + "635db8f9-864f-48aa-a527-5abe34973cce", + "2b13d82e-b2bf-4d46-84c6-7c0c971fa239", + "a1e9ea3a-74ee-46d9-b5c1-eb9c95d47f95", + "8668bd29-293a-424b-95a7-e7acb8c3e9d9", + "2c0d65e3-a294-42db-bc86-cac66f59fd50", + "d6c8174a-6cd2-4ab6-909c-e3a8573f481f", + "f7fca24c-7c07-45c7-b05e-9c407963ed87", + "7856c97b-1f12-4db5-94fe-d014e7bffdc1", + "01e07bab-9f99-4201-8625-a60ee6d4406b", + "2fbf1d02-94db-4c6d-bce4-61dfc52abffb", + "525e5004-8f3e-4894-b569-377eb101dd7d", + "29026fd8-e619-4084-ae91-de3e631f9fa5", + "d7822f22-ffbb-479e-92a3-a5bd0214e8a1", + "e8307581-8848-4ffb-8e7c-2bb3f2116ad8", + "7d749d77-4e32-4ee0-8707-a4545daddeca", + "9db0f2d3-24eb-4fc0-a77b-b634aa5392c4", + "c8625032-8515-4219-844c-6c56fb111505", + "e4c11319-5a9b-46b1-bbf7-21a0a872c981", + "c2fe4136-277c-42f7-a21b-bfa85e133fbf", + "e17ec664-bf88-4269-96d8-402c4e238b1b", + "30158210-4d99-4c35-ae04-e5665bce0d81", + "44ed8a79-51c8-4a30-8397-d0e27c8bfadb", + "16eb78b2-4d1a-4628-a471-13e7b3781f7a", + "b2dab81d-cea8-48dd-a326-036417cbad8f", + "9832f4fd-07b2-4722-9b47-dc0493230e0b", + "cf10f7a1-d5ed-4b01-8b7a-168645c13ff8", + "a6e6f915-5155-4d05-b0cf-7729bb78908f", + "4553454f-8060-4e34-982c-5801df3070f3", + "31ae7ddb-ee6b-4ca2-a4c0-76a4c82b13ea", + "541a3d23-3800-41b7-b3e8-78ed8b9eeda7", + "7cf49f66-d311-4314-9544-144f5f212d39", + "66966a4f-4d0e-4f2b-bc75-b6b5ec29b5a7", + "7fd7b6e9-34ee-41b8-9831-d4b6605775bc", + "5e5cd0df-1f06-44ac-a94f-90dcf1606a1e", + "5c09bfbf-90bf-404c-8c25-3235e5f4033d", + "4c316cab-a01d-4764-b677-283340032da8", + "957dc413-c5b4-4551-8900-8ac663a72e6b", + "da58305b-a78b-4060-bc48-130292ab1b0f", + "25a4b2e4-9908-49e8-8d0e-02e99a0cca77", + "cd741a56-018e-4c0d-b8ac-34596e416a12", + "3ed6e231-f351-40e9-8def-ff72851234c0", + "c84a55ca-cc4c-40b0-82d3-c2617afaad92", + "fcebd6c8-8e2e-4bc1-a22e-a60dd57f80ee", + "004fe257-00ae-49a0-bd2a-80c9d68d124d", + "5fa64bb5-d340-4453-b2e0-b222408bb0e7", + "6de233d2-d9c3-4bfd-9380-dc7f2aae8fcb", + "0e0811ec-2ecb-4a0f-a80a-eef979fba1cf", + "2f8591c2-895d-402b-819d-30b9bf3dbc4f", + "77f04550-b1fd-4e6f-98a5-8c7393114398", + "6897500c-331e-4952-954c-562930a6bfef", + "68b90ce2-690f-4bcb-8cc0-dbd7ead74095", + "0bf6103e-d636-4cd8-97f6-a456b61591a9", + "f3fbe4f4-5a9b-41e2-9e94-87bb525544a1", + "4eafa131-3cad-4231-ade1-e60919865856", + "66cb5eb7-5f47-46a5-a32d-34801c415683", + "93a169c1-0bde-4e23-a3c1-3c27e8174a0e", + "fa9cc392-4a54-4258-9d98-81952816ef37", + "dbbf1abf-dbfc-4954-8328-aa4ef1cf1c8b", + "087e1a2e-3190-41af-b595-8511de29128a", + "cd7458b0-193a-40ef-bcd1-10fbc85c054a", + "f31316f4-edf7-4afc-9dc2-09438188a800", + "d5402bb9-c9f4-4e58-9f30-223c8f1efb91", + "d05eb6ef-1589-42bd-b644-9ab087921272", + "7c835e45-8e52-4604-aa80-97b834e88a35", + "49b8c394-7766-4e9c-98ab-04cd97425ea5", + "84de4228-c234-4ed4-a12f-7de70d527bca", + "d3905e1c-4ac9-4d1b-8f94-1dba73f29083", + "4a36f3c6-193e-41ef-acd7-9c38722ea972", + "4cbe570b-e442-4494-b2d0-b0bcfbf54d21", + "3d88e3e4-17ee-472e-b2ed-81a0b8abd311", + "033c80bc-bf7d-4d2b-af8b-6464bbf9dfb7", + "d55f7061-18db-4598-b9a1-c0925874f609", + "a86f47ad-d4d0-4409-b421-88573930bc8b", + "e21b5852-3c76-4dbd-a1a7-96eb4627fda8", + "4007c8de-1a2d-40f7-809f-91c51339405f", + "5ce6aba2-5ed0-4e63-a4ad-6f5eecbc0b09", + "e9df1132-b19b-4452-8a7b-2c5fe90dd30d", + "5b04df10-67b9-434b-b2df-ba567b00be1c", + "b19c0945-a719-4c50-8573-96bfc7d23dab", + "a8ffc419-a174-40cd-9bb3-5d643b08ab5b", + "7f721b75-c58e-4807-8c9d-85655b927e5c", + "bc7b5218-c3a1-4c51-8da6-5146dd8554e8", + "cdd9fc2e-7a21-48f7-af8b-1b618bdf9508", + "6da27ef2-4579-461a-a9e7-ddfdbeb19f13", + "cfa7dbd0-fa01-441b-b534-1cbadc332ffb", + "95db11c3-3af1-418f-b995-b26511c72ee6", + "83ba038c-a730-4794-9714-f8240e12ad81", + "2bb213f3-2b55-49db-9385-718ed66ce9c5", + "3e0ca5cf-ade3-465d-a74e-df24466330e4", + "70f3099f-c7be-4ae3-b035-ee7f101eb644", + "aec0bade-92e2-474d-afb7-965affb743bd", + "cd32d19e-8c2a-4580-8e2f-fc20822c4195", + "4abdfb3e-7eec-41ec-b5c0-1e1b2dea41a2", + "0ad7a123-fe26-4d0e-a77b-2a9c22edbd93", + "fa09d69b-ecf2-452b-af92-39eabdde6d47", + "cfcb60df-f60a-499c-8d61-a39e93daed78", + "aaaa9a7c-c75e-4b64-a038-0c2a7e812423", + "c0419c03-5f22-4841-9bcf-522d73663c39", + "3c65d484-8c33-46a2-a5d1-ceefbc453578", + "317558bf-e3c8-4018-8aad-c7d046c0e4bd", + "512b5977-ff7f-487f-8d4b-a2cbcc0b0ea2", + "90e43eaa-cad3-406d-b648-11caaefbd32d", + "8f9e23b9-ba6b-4488-aa43-cace05fa46e6", + "fd0b700a-9eb9-4d3e-8ec3-2d6205311f11", + "efba881e-d53c-44cb-a9c6-76b25454a23e", + "10e3b0e0-1e22-4a54-9436-309a81fb5154", + "5f7725c8-5d0d-4c06-8088-59d0a96fe8f3", + "d8ee8ca4-bf8d-499b-966b-91bd81805c27", + "9c226251-f94a-46d5-847b-0ad8facf8aab", + "1a66adcb-9998-4a6b-a099-f7e1f790c017", + "3d11250f-1502-4b1c-9123-8aebfedbd4e1", + "29f2c74a-2651-4b5f-a7a5-49b58060deec", + "42a62736-5b0c-439c-ae84-b5371162a33b", + "ea5ae3f6-68a8-4d5f-8863-0085f90a6a4b", + "fa8f7e29-ffe4-4de7-812f-b97f3eaf0cc9", + "8ebc614a-a660-4818-980c-ca3eb7a7a60f", + "44a153bb-baca-4638-87b1-b931fdd652f8", + "d331aff6-9b9f-47d2-af40-0a784d528ec2", + "b076836d-0487-4185-9562-accbc5992e68", + "b2b614be-262e-484a-9e45-2e0bdfacb75b", + "db72bd34-c24a-4e40-8d38-e4977a0e44c0", + "4bd26590-720b-4475-82bb-58e0620fef77", + "c7aaf46e-cc3b-4304-a468-f70363bcc913", + "af96f129-7ce7-4051-856d-f8552c4d9237", + "10c5ac54-d0a2-4415-8fbc-755dcf54307d", + "343feb67-4f06-4e5c-ab00-dc1a3ef34ec5", + "45028c0f-ce3b-42cc-a7aa-13b7b4ac8c88", + "9088ef58-78f1-4ae2-a7f5-ec9a1e84b671", + "65004fc3-5037-487b-9a8c-1cdb8b5a7ae5", + "08a60ebd-2638-4b76-a1a0-898ceb270b56", + "98433997-a432-42b1-8e52-09d3eafef520", + "0ceb6351-1b85-42d3-8d13-02e2e000f89d", + "e7588b85-0726-4857-a606-01d17eb6b52b", + "cb158049-27b7-437d-bfb9-fd3b6d0821f0", + "157708c5-e59d-468a-b2a5-a03ad9db1eb5", + "29cf36ff-7286-45ea-882f-a9f5a81ba146", + "75a27146-f06c-4181-b569-ab30196c270e", + "21023e6b-8afc-41db-8b4a-c0fb6521a579", + "110ef8cb-ab8f-4356-8b15-bd9595d02e15", + "41b88ecc-4d44-405d-92d9-21b3daf820cf", + "2e579b69-771a-4571-8110-47e500455042", + "68f8c874-7b56-4b31-9bd7-d2190de0e738", + "ca55fccc-9812-4301-89d0-464949c30e90", + "74710e92-5f4a-41a7-aed0-fd8eef727d62", + "795d9443-dc05-4b14-b655-110e923199c9", + "5e5306e3-ea40-421f-92ce-63cd92c0dce4", + "782d5f06-2e6d-425c-9dfd-0117dc98669d", + "f89659e7-ea2c-45c6-a8eb-bd4151e87372", + "28b13e6b-2957-435d-b3ba-836277facf2b", + "d3e59088-f04b-4348-8279-7a29e5bf3879", + "bed6626b-064e-4f58-9e4a-1240e2aa2f68", + "76909db0-8184-43e3-89aa-d735215cb300", + "197802ce-729f-4a88-b4a0-8478c0155e71", + "8b0418df-0005-4b26-b381-9cd431a2d571", + "fd5b9dc6-80eb-47fe-97d5-948c7a3cfcdd", + "ab44c943-7332-4b0a-9793-01a72b694f37", + "b0fabf09-a08d-4c79-a98e-3467c95aa984", + "2243d013-fd2e-475e-9521-3fa825365d99", + "577fddd8-46ae-4afb-ab7f-d3c92e443ab6", + "8916115f-49b4-4361-9275-1c3284632c78", + "4c33e38e-032c-4643-a8aa-42e5e59d83dc", + "818c9b1e-7fce-4207-9557-d24cf8a13188", + "bd343d1a-40ba-442f-9a83-1a55d572c0e5", + "b94977f7-824e-4418-9462-a4de2fc09f60", + "8670514c-d077-489b-a409-4aefa422bc87", + "66429593-4e84-48fd-8f3e-9260a9708d5e", + "a835db97-4108-4907-9a39-d4e51f06176d", + "a77bd244-1fb9-49fb-b2e9-b08dc7cee177", + "a2d75dda-0277-4e15-b574-bdf9c30305a1", + "24c91ccb-7257-4c39-b39b-368a530ddcc1", + "abc32ea3-3dc2-43f5-9bed-4e6a45aa4672", + "ac9c7bea-0424-4a43-a80c-d2072927af90", + "bac4de6e-83a3-4ab7-931f-ae09316c371f", + "eabe9df9-de8a-4dfc-932b-9669cb05b5b9", + "142e73f2-879e-4765-ae19-6ffe10901da2", + "33b7d70d-c7d4-4047-985a-984a25accbfe", + "331592d8-6a77-4ede-8cc5-fd66ebff6130", + "47edd8c4-ba8c-47e6-801e-b88798739c64", + "0f69b2fd-7f31-4e38-8db5-634dce4884cf", + "14f55eeb-641c-4a3c-94bb-2fc2dd002ebd", + "ab6093ee-7865-4551-a004-07c1bef562b4", + "19913ac9-da08-4008-a5a3-01848f68af02", + "294a8ae7-69e7-401f-9c14-afbfd67cf2f9", + "424f9ed1-d478-4ca2-9523-4fe6a28cc651", + "98092022-ef47-428e-b69e-f7ea340c4124", + "3bd1deac-551a-4f2c-8c83-4ccfafff6064", + "32405353-05fc-410b-b42c-c75b202420d1", + "038d6225-c410-45b2-a82c-32774d5f290b", + "1b2479bc-9923-4a37-88e7-becff4750fa2", + "74cbda25-d7a0-4e00-8651-0de0cf3fa6c4", + "a03d113f-ed06-4889-85f5-e93ea160fd3e", + "6529fdfd-cc70-40a5-8132-357aa8c4891b", + "784202c6-106b-42f1-b4d5-c8834334071e", + "012d3a98-11ab-4d9a-84e9-e7d38ed4b00d", + "e5321662-826a-45ec-9b58-dd1698e6c6c5", + "dd7a8360-52ab-4f92-bc8d-a4b7a2ce71a2", + "8ca595f9-2130-46e8-bf06-26a4e1a94198", + "95648436-4438-4abb-b564-bddf12f8145b", + "730c63b9-6814-45d5-8aec-b470eb353970", + "346ed9bc-1a6e-4436-b618-005e229d568f", + "f072e617-55cd-45c8-ac04-04feeeb83fdb", + "4a244da7-07ae-4377-a2c4-410450195145", + "1f25335b-05a3-4c4f-88e8-ce4a6430f893", + "0807c527-ec84-4547-9cac-736c8fa8858f", + "f0649d6b-af07-49f4-a89d-62ff9b32b278", + "e47c764d-8ee1-4f87-b9ac-911ae3167c8c", + "07de764d-ea86-4646-afad-984fc4df69b3", + "e3d1efb7-0a36-4d88-9c75-e6b4b95afe9f", + "7aa0ff4c-0982-4131-8e58-09b560890fd6", + "0dcc08ce-4901-4387-bd15-48063b28eb39", + "459333a8-dd14-4ef9-9c82-bb0e840a37f3", + "2c594df5-ae02-4125-81dd-ebaabdc81f31", + "e335bc36-e181-4a21-8d7e-b93fa8e20323", + "6b066319-c64c-408e-8bff-43c450224266", + "10e3d3ed-fe9e-44bf-baf6-478202f0bb32", + "20bcaa29-7cee-498e-8c13-fbd7b4e6a368", + "3ca42cc2-fbf2-473f-9de8-654aa2e6bdc0", + "cc37b26c-4d72-45aa-a0fc-637e08413d2c", + "e8256b45-51fa-4f70-870d-aa48bd67ec77", + "e92963f0-9b05-4347-800d-dbfebef486b0", + "a61c389a-3f65-4279-ae18-1bb714ae4415", + "9ec09b6b-d1fc-46d4-ae29-29704578ed78", + "a518941f-cb2f-414d-a5c6-fa2b9b2bfaeb", + "53c830aa-2aa9-4389-b0d6-ca649f6c101f", + "28c86c76-78f2-4959-a4f6-bbbd81cb8b50", + "45043d49-4c7d-4f84-b796-d7290fc3625f", + "e4c64fde-cf90-4cd3-b2cb-097c3c0f2a98", + "3a86bb59-6d4e-4021-9a0f-921aca749168", + "7c537c2c-05ad-47bf-8714-e2ca002c2212", + "7c3eb7f7-1345-466e-8e93-1d124830af2b", + "bd1d398f-9b9a-4720-a884-a12b1f4c1250", + "4347f724-9ee0-4174-bc65-9f4fb988c763", + "25a4251a-1e41-44f9-9f2a-35f1b51247a4", + "4054e4ba-3b20-422c-90d3-8fb30bf06c8f", + "15913a69-e02d-45d9-b18f-fadedc716a99", + "74966c71-acee-45da-9795-9ed471bfcf25", + "aac2c80c-21be-42f2-8c67-14c830b5beaa", + "ea6e9b08-5e27-440d-9489-6913a62aa837", + "08437e37-cd81-4d81-b009-236625958fcd", + "390cfd88-6e9b-4cef-82c5-e9aa24182f04", + "4df0299c-d3e7-41a3-a143-adc3b6745c3d", + "903894b8-d2fe-4939-92c0-e1e076f1e5fd", + "12033cab-70de-45e8-a71a-db6abaef9002", + "5d462326-be2b-47e4-85dc-ea33ec8ab9ad", + "1f4eb8a3-f0f7-4d39-b9ec-d9aca7f7bdbd", + "d16703b7-086b-4c45-a815-b81ba36b2764", + "47f5c0a8-b051-4b77-b2df-08dacdb1c621", + "5b35a804-4800-41ad-97d4-e5b7071efe70", + "0b92365c-051d-4710-972c-9a8420a4b7c0", + "502ea15d-ae06-46be-be28-770e41e081f9", + "d3e728d6-8842-4864-bcf3-9012184baae8", + "26e77e53-1b11-4a0a-96c5-4888cf8e049d", + "ed29ecfd-cb66-420f-9df7-e3ff61cbd68c", + "862119b6-de3e-41be-842f-4d66b5221845", + "9434488d-cf56-4d8f-8c8e-9b4eb1a10ed1", + "af899072-8ee8-4c33-96af-dd63a0d62cbe", + "3bf91b9e-5b2c-4b86-97e7-a37063a813cb", + "628e7648-73d5-4747-a799-ba51d7dd6ee1", + "82dcd121-fb4e-40ca-81ab-a49289a8dc19", + "7a3a6786-a854-4eda-a151-8495619b2e3e", + "498cce21-0f44-4f46-8278-bb6eda08d29a", + "00172dce-06af-4afd-999d-9f6b543a8a7c", + "082053b7-fe10-4303-b1e6-c04abea7417f", + "8e65a162-b480-4e69-8b6f-54191bb6c544", + "47e1e1a6-f412-4fb5-be90-ea164c3ab89d", + "35cff1f5-9765-4941-8c95-4930f8ff86fa", + "15b26a59-1ce9-421b-829b-6f49af667806", + "82883953-544e-4efb-b94c-04edf5c59e6c", + "9136e083-3c95-4a38-8ab6-d70df252c9c9", + "4d5c9d90-603f-42da-929f-83dd954eaea8", + "37d13f12-41a1-4a27-8f45-da26de8a165c", + "f689a9d3-28c6-405f-a719-402ce2016623", + "92dde72a-2b1e-4e21-b8eb-6c9e2ae15616", + "d3189907-6d7d-4e40-9287-887164ed4c2a", + "17d5ad55-86b4-4741-a5c4-a480117ec0c3", + "52df641d-b10a-456b-9334-b789313f62f6", + "2a38e41d-cae6-4fd1-974b-df1e5c11e00c", + "a6bd286e-3108-47b3-ad3d-1df46c7b28ac", + "76df2da7-1ea7-4ec0-ad67-3b5a80b276c6", + "e2545979-08e1-49d3-a9cf-89fd6042bacf", + "0b8a4913-272f-40e4-a523-111f665dd022", + "4dcae03b-088a-41b0-8d33-8f6c9a6152c9", + "eb5821d4-0843-4268-8cce-5f48f37a4d77", + "a0a7cdbb-f78d-4bd6-8b1c-f41136ac6b3f", + "1eac9ca5-4b15-47c7-852c-0787984085cf", + "b99848af-e525-4891-b57e-07414fc8fca8", + "d8d4738b-9b73-4ab7-b987-c2c9d74c3dae", + "a673a3bc-8092-481b-b708-c5c37ec34a97", + "0b08fbaa-02d3-40f9-be5e-954f38ffe7f9", + "f9064f5c-545c-4e53-b79d-99a531900290", + "bfaf6b9c-89a0-4462-b5c0-7ff2f6b8e934", + "9cdbdf4b-6617-447d-839f-d5e66164a89e", + "207f03e4-7f30-4a6f-b5f6-31e39cdbbacf", + "c4766f87-44c1-4f2f-9492-f668b8e013d4", + "d615971e-ab5e-4929-9317-e2b673fad2c1", + "11087360-d7bf-40c9-ab99-ff7eb2ff6e08", + "3101ed68-58a4-4ec1-8721-ccad93158d41", + "3a35205f-0397-412e-b52d-e76b30df8d0d", + "ed4d63ce-1026-4870-a06d-68ccc9b5b1a4", + "c4a6262a-cdab-421f-8b64-25aa99cb7279", + "3d704f56-443a-4576-a1f6-a06cc2fe72bc", + "c5fa1ccd-9db8-4a10-a266-d1c2991b23c9", + "8f0fe317-9b16-463c-965e-00fe1d728e87", + "6149af86-78e6-4d09-bfb0-df784f477596", + "f3806699-35cc-4426-b870-7d625e634c2b", + "733997b5-1fe0-4edb-aa7b-a0cf5d9546ec", + "9707c206-9ea3-4885-84da-1b094d28431e", + "86a1e61d-e7da-4570-8964-1dc3f3095c2e", + "9f6c8dc3-e8cc-4942-943f-09069f1b4b35", + "d548c0b6-6a27-422f-a1a4-7a9cae7871fb", + "dd674571-00c4-4248-aac4-f17cf1c4fe8c", + "8b80a997-07ad-4b89-8240-d5f34142e782", + "7453f4bd-e711-47cf-a244-29dd428f4d69", + "ee8e1b77-4b1e-41df-8abb-2ba89fcbcaba", + "cf42748f-5bfa-4967-abd1-0420d9f8a459", + "7333c72e-724e-4273-bbb1-bc268baf08fb", + "60a16c28-0148-421b-a317-65b0f98da802", + "6be63c9d-6c39-44e8-b8f2-39f2b38df425", + "7a961bf2-2d95-43fd-b80b-4a71468859c6", + "6968a885-0fa8-4a96-b78b-da1cb76f4a89", + "32acb2ba-782e-4303-8578-ad1e8b399cd4", + "1db57d32-adc8-468f-9bbf-c01e4bd79731", + "2e20d41e-b9a6-4874-acfd-322aec84c19b", + "0b755ba9-b090-473e-8477-2c9f26554500", + "6c4335f9-43ef-46fa-ab52-27c1c0a6cbd2", + "06e1bff1-29e1-4a24-8b2f-e56ea5396ddb", + "0e62b859-e50e-43e3-b77a-140795b32167", + "d8b37f44-919b-498a-9414-1690450cdf0f", + "65c57fb8-a268-4598-8c2f-9c2b0ce860de", + "dfd64904-2625-406f-a8f2-62585a505f0e", + "37acd3d5-2a92-453e-b7f9-1a694711d631", + "e3be661c-2969-42e7-b958-b9a01667b313", + "56acd4c3-a4dd-4411-8634-c359d78da5e8", + "a4dad784-c2ae-43d1-b5fc-045d4923d1cf", + "65bd509f-db6b-4b58-bb83-124ec86d8bf1", + "1c9b90af-51a3-4a99-9730-87897e82c8a7", + "8b0d1f84-74a2-439a-90bb-82e0ee11361f", + "0211eb06-6d1c-4613-87ae-5de1c099a501", + "d823abb1-d7e3-4920-b61e-03e77c795b22", + "fe5bfa8c-0eb5-4b86-885f-d4543482c534", + "79886315-4921-41ab-95a0-01443ccce78c", + "20e9da4b-a5ff-4d5a-a5d5-efcf027f5e18", + "9436cef9-668c-4abd-a71a-ee12695288ad", + "39eb593a-0111-4b0f-b1cc-461216650dee", + "a7440ee9-12f3-4dfb-a0ae-2e17ae60839b", + "e67a7a9b-6b40-43a9-8b7a-fa44dbef9fa3", + "913dfdc6-1e11-4f56-aebc-514c24a1075f", + "fc9a12de-7d75-40b8-84d3-2e94610a59e4", + "2d9f03cc-6f91-49c9-b1f8-9c8627c7ae0b", + "411cffc0-c2ff-4050-9513-0dea10779d0a", + "626417e9-bc1d-446c-98da-141cd625cae7", + "d5def074-8287-4799-93ba-ea337131ca37", + "8ce73d7c-de4b-40ea-80c5-3afed0806d8c", + "1dca4500-204f-4dba-9e20-2cc74735a00c", + "5aa98616-2109-41db-b0e1-4ae85f58fbc8", + "22751e26-4c99-4ad0-9c11-4b013b08bfb7", + "3f703179-464f-42f0-a7e8-ea5e01bf5cd3", + "fe206147-f01a-4fad-b015-f72466c0b397", + "3d3fa85d-de5e-47ff-aa95-c0c3f1331480", + "2b555a3a-e171-483f-852f-06e6819e06d9", + "2bb854e7-bf9a-4b84-ad0d-8d8c6e61790c", + "2a13bd43-945b-4372-8cce-d7d4f60162eb", + "5412941b-d004-4a8a-8f4a-794b18652b2f", + "23cf34f7-98dd-4cbc-baaa-fb225ba00c9e", + "7adac89c-3622-4b38-93ab-c9be67419e29", + "3a377488-428c-48b5-97ea-5949c738c581", + "8001f3b4-21eb-413a-8520-3f894a5aa0a0", + "94dd7110-e8c5-40a1-9521-c79da2d98cd4", + "0dbb1768-b503-4245-8a25-afc45505dcbc", + "c540f375-e2fe-43b6-97b3-217d52066cf6", + "2b914a73-1a70-4c2c-9dc5-ef61c775072f", + "c91ffe95-1486-46b5-b4f1-545e51127dfe", + "501924f1-db32-4ecd-9cc2-4947a2036458", + "01bbeeac-ede9-4ba4-98e0-0ea228102878", + "d615f7c1-186e-4580-8d66-ad7939c59eea", + "22b1085c-085b-4ef5-97ed-e773099b4b1c", + "dd1c4ee1-5b3f-4897-984b-fc7b6fb13efc", + "d67ceee1-884d-4947-ac51-3cd14d6b931a", + "bde0a43e-edb3-4a09-b554-e4a5e64ac0d5", + "49aa528c-e5ce-44a1-8ef4-3875e5b475d3", + "d0546df7-1792-4b74-a0ee-cffc7e07c884", + "6cf4da7d-27f3-4136-8bcc-b968e423ca88", + "a1568d9f-e95a-4dcc-8317-5dbada79fd95", + "727eac33-6e95-4065-8506-51ff68ebab2a", + "ec5cd8ef-01ce-44ae-8a82-15628948749c", + "ed99ccc6-1a5a-443a-bff0-5e84fcceee58", + "98691524-3582-46d9-be3b-64707a174ee8", + "703602f8-48b1-4bae-b252-4b4c35edae21", + "2e458e45-ce93-499b-a85c-d4a0c7df81f0", + "bc9dd4c2-8719-4c0b-88b7-5021ead49b10", + "f28a8e7e-8726-4d26-b8b6-ba113bb6bcdd", + "38c57cb9-bd16-4a17-998f-df87d5a6c7cc", + "72022cd6-b9ff-46ec-8852-3a3be89ae6c5", + "4890d990-2cac-4bb1-8585-5905fa6f9c29", + "69ca60ee-1074-4a54-8a3e-d1cff36b685c", + "bc0b33de-c2a0-427d-916a-85ff2d72eae9", + "c12c9344-d549-4ed0-a627-14deef9ff81c", + "56d6b2b1-6083-42e4-898a-bb7183f7c077", + "6195506b-9584-48ae-af0a-c62baf8d9f1e", + "e93f5efd-0ecc-42b9-8f16-eb4054d6c325", + "b639a7b5-8274-4ee9-84d1-ff9896a3a819", + "90ff07fe-b756-472d-a5fc-55a15c57fe90", + "11ca0db9-daf3-41ea-a3c5-b8897027ae9d", + "12bf5243-b633-482c-ad87-f226f39a6943", + "7aacf63b-a7aa-4ed4-bede-c6702cfc0ed4", + "f85d5b11-ef93-40b8-b63f-bcf800d46f74", + "ca23fc2e-9736-4d46-803b-9095624cf911", + "cbda89a0-5e8c-4ee4-8792-43d01d1b3233", + "7ba90389-3ead-474a-b6c9-ab17e769c418", + "a48d8ff0-78fd-4c2f-9afd-be5772a002f9", + "4271ed18-85dc-4cda-a2a5-35447188e732", + "32438378-24c4-4fd6-a00d-ed6a391330fb", + "72956dd1-0db8-43cc-aeac-2ea647b7d88e", + "f8c02c8c-d149-4800-8dbc-4121b7dbe9c1", + "b42f950a-f666-4307-bb03-dcd0c413fc21", + "3ba1ea7b-010c-4236-8ada-0d9d8c6a948b", + "fb52a98d-977b-4962-bdd2-20e15e236474", + "77ef53b8-77cd-428f-b625-b492da0ed27e", + "3d1542e7-8344-411a-8508-65a0ae3a13a8", + "a70d3ddf-112e-4609-845d-5c81796a6160", + "8e1363c0-5d20-4b4d-9799-0625f43552ef", + "4bafeed9-dbf8-4fcc-8fda-eee429bab7bb", + "2b5bacea-570a-413d-b64e-c6fd8921b182", + "87dfab98-a53d-4892-b43d-a73beff6fd45", + "f150ee96-0db3-48b0-aeda-1b74d2608a95", + "551c208b-9145-45a8-b852-4eede2a904f1", + "3f877d59-a272-4bb6-89b1-c92d45a09d3c", + "87bb58cd-cc0c-404c-b264-839e1ae21eef", + "46dcea58-6fab-4a94-9575-3674a790320e", + "eff69698-7d01-46d1-a8f6-bdc0a301408e", + "d9e02c6e-6b3c-48ec-b609-a20cd2dcf0d9", + "d2960021-fe8d-47ae-8d3f-b65fa7e13403", + "2be8ba50-50b7-44ce-b42f-d0f85230b4de", + "af43e5af-0f82-49c8-bb84-0b1b12c4cc50", + "fffe7856-edde-4a3b-bfd4-1300990b6c56", + "020e1109-0eb3-47d5-9ee7-b4d4f8a10e35", + "b8833b97-9b07-492c-aa19-5dcda1c1fcb0", + "42e923ca-982b-4365-a06b-72abff4ba4b0", + "8cce5fe6-b81c-4df8-afd0-3cb3be8041ec", + "7a21f1e6-14b9-44d0-ab66-09d1f05dd261", + "861e9e38-6f57-43de-b35d-09419e01bfc0", + "509f8251-6665-46ca-a2e4-b835357a69c6", + "d3210543-fe3f-4c49-8589-c1a4ac137e31", + "fb83fb95-7fd4-43f4-8594-a9cba3b54dcf", + "f7a5f1da-c166-4147-a6e6-c5c43d9dc434", + "ed9b0a57-946e-49ce-9263-b394627b7095", + "0335caac-96e4-4200-8284-2a6bed06d474", + "3fb2f631-e035-4d8d-8fba-984b89dc6650", + "54d4d030-e90a-4939-8707-fe0712465400", + "24ebe0a3-9798-47ca-bcbc-4977891621a6", + "75918e20-ad88-408d-969a-b7a77eb1dc35", + "063be159-590a-45a9-ba4e-aea9f08d7599", + "c47b7459-091c-4b43-a9f2-529b441a90cb", + "98d50a00-d5a9-47cb-9770-9bd6b3e3ca0c", + "4def40dd-9fc7-4298-a211-df5036690d98", + "1d5a86d5-8b94-4ad3-83bd-b55bfdef4253", + "da7f06f1-1f91-4ee9-b83c-1d28e0a240b7", + "66ead5c6-1d6a-467a-b5c7-3fd5f67d9814", + "ac168d7d-c6e8-437e-b30f-787850640f6f", + "71349d43-4512-4631-a40f-c4456e9607b8", + "e2f88b60-272e-450e-a213-725c16fe82a6", + "5b7a9edb-0c03-42a7-b211-1055b17459f6", + "9c5c8a14-a57a-4485-99fc-d353632cb652", + "1303d28a-ca83-4b3c-90db-a0f27eadee03", + "5161796f-e744-4f67-bb5e-9f9faab48048", + "3d3d42d1-7f45-4ba2-8566-719f57e5947a", + "5e7fd6aa-d897-4d85-a8d5-7ca16ced5562", + "9bcb88bf-48cc-4d1f-b3f3-4d1d9e9bff84", + "3d3bd7a6-514d-4dc9-a880-64603c126e1e", + "dc35b06e-59d9-4196-a9dc-c0908e765941", + "6c80ee0f-ed9e-45c4-a51f-1f9206113f52", + "5c8ecc63-efb2-416c-9673-3a4c98246ff7", + "215a933c-6909-49f7-a7bc-8d1999015cb8", + "b7426731-e834-49cc-ae96-ebc2c0e52703", + "df60f13c-1db0-4d5b-8a3c-bc77588f0620", + "14562134-7d9d-41d3-9158-7262acfe4051", + "2e50e5c5-ed8c-419f-b789-526f5713af78", + "24094030-3a70-4019-bc34-44fe026b877d", + "d3f64ba7-f63d-4b33-a006-27f5b270e0b0", + "5dc6e0d5-af1a-4507-b7cd-84a88194914f", + "567fe955-bde6-435f-aade-e99bb57571a7", + "821347b5-148f-4563-bc3f-499cffa8d0a0", + "88a2b2fe-9e0b-4673-b577-fa26194f2f66", + "fada400a-5186-4658-9ea1-1210564925f8", + "1ba7cf4b-383f-4106-b88e-1cc5ebcceb62", + "977d85bf-d17b-4ddb-96b2-48ef0cb3af7c", + "37d1cb64-abfb-47bb-80e3-b3de7fc031d2", + "99c38ea9-880a-4485-bacf-8609c1d8dc8d", + "2c26c02f-3d07-4f33-b332-d687ba4a9535", + "933a8e60-786f-4014-acaa-5b60d3d7072a", + "c54bc126-486d-487f-b0c0-3316d19c6512", + "38bd0154-53e1-4c3f-bd67-ca5b82c69569", + "31c6d364-3c5d-45fe-abd1-8c0fe0d5e96e", + "2ebcff0a-3fe4-4cdc-b86f-bd674b196aa6", + "ee36d606-fa89-4b11-8f0f-f0254da0d64e", + "2fefdb2c-cf92-40ae-92d7-eeb7afd28f26", + "da979f93-1f1f-4979-b222-5e6ece8e1472", + "bccd2502-0e16-43a3-93f3-607e89768791", + "c672174f-7d70-4879-9fe7-0f8e8962caa1", + "4e04973c-c171-42a4-b932-2addaa48e1a4", + "dc559340-2f31-4f78-8aa1-a544f8c246b5", + "02f1d858-5e0d-4734-ab67-62d32f4d0101", + "2d4c334b-f258-4521-a56b-26b6a3f8b80f", + "742f4861-f125-4a83-8bc7-242320bf455e", + "a20027fb-b1d5-4229-b316-aada8e9f7962", + "f8655645-c8bb-4b8c-8d71-c2748be29603", + "72e52e56-84ea-498b-8ad0-2deca6c6ed67", + "e400c58b-0e32-4a85-9f39-0b8fa6c4fe74", + "986c9e27-91f0-49cb-9b94-2826364d09c6", + "e2173600-fef0-4474-b354-fc5cfab16cfd", + "8af52a4f-10b2-4488-94a3-24c2c697ab16", + "590dcd40-97ce-4427-af2f-8ac29ee8a985", + "8d325f50-c035-4d5c-a843-cbbb42442934", + "bdc0955f-db28-4fad-8f7d-b18ecfd9706d", + "3d840984-0bd3-48b0-b6b7-503870c74ba0", + "0d8b0f7d-d063-4f01-9198-05d23b8032fe", + "fb73c814-bde1-4922-8655-66d86f96bea6", + "0af5f8cf-ba4d-4a50-a09a-324ba2e84267", + "d2c7d611-1733-4b0d-9907-24dd51c4008a", + "d19977a8-37b2-4a1a-add1-b14245cc399e", + "03382ac1-6360-44e1-af9b-4bbbee44e278", + "2161ebeb-2048-4309-afbf-8882dd8416cf", + "d84ec398-4115-4073-adc4-99d8d1fb0250", + "3585236b-be29-422a-96ca-aa37d691f367", + "9a49699f-bdbd-42f5-8eda-acc32dbe0b69", + "a4011245-8669-4077-911a-2a60c43d69b9", + "cea21ccc-6e79-4d66-a8d1-ea23309afedb", + "29ec97fc-245a-42cc-b405-5fc37ad425f0", + "16514efb-1ef9-4dc2-b411-72c7fa13d303", + "02647d75-0404-4313-a6ba-18c94a90eee4", + "397305ff-dde9-4e53-85f5-7b0ed6201fb8", + "e921b686-cb91-4ac5-9821-c8edaeb44920", + "58a735d5-ab3c-4eff-af69-e458f3de4886", + "aa0cdc9a-6ab0-4c87-b4f0-f06eb2ea3ceb", + "9228778d-ae44-409a-9aa4-4eca481c2fae", + "f4025c66-3b3b-4a37-ba6d-74907bd9c6fe", + "80da456e-63de-41be-8540-7990490aafaa", + "2fcbb79b-50c2-4ca5-9621-c2dbd1084225", + "95a503ac-e92c-4b6e-896d-7ab025c3bbd4", + "1cc1fd61-3627-49b6-9eec-20b2712cd6ea", + "0c791046-7a67-4747-8e27-b75d19a62495", + "563e8436-2b40-480f-b3d9-d1035cfc92d8", + "4511efb9-9771-4329-9172-8418007e91ca", + "e7b82742-adc3-4758-b7b6-6c0cb41d8f8e", + "8e986576-e099-4e6e-b5da-cd4eee04e255", + "c2ed92fb-b74f-4ffd-9511-4fe03aa3fe27", + "b68e05b9-fa18-44c0-94f6-0793b80aa2de", + "00d7e07f-e9a9-4c96-a50b-eda0ee6029d2", + "2d45c004-dce1-431a-804a-cf0179e98038", + "b9027cc5-8d78-4947-b041-c96922485ad0", + "bf3ee64a-adb7-4e99-979d-b4c07ce7beae", + "1ca750d9-7eb2-4a61-b93b-52b700b51fc5", + "65798488-405d-4e3c-83d9-2e6f102f2988", + "d11fb523-541b-4513-97d2-009bed124157", + "b7679783-740e-4859-b115-f7ca2ad2fd48", + "3c54fd16-5362-41e1-b6f7-188ea1ea7a79", + "b80699fa-4446-40bb-b8a6-ff8bb3124830", + "a2c81470-14d3-4e2d-93f5-daf221129215", + "871b919b-ab78-4b93-a870-97200822d4e5", + "c31fd8ba-c8b2-4ac9-ac69-928eeeb94b4d", + "3cdad89f-c90b-45cb-89e2-1acf4413302e", + "07f19530-a0c3-4a49-ab20-77f65c576d60", + "3ac64a27-1cd0-4110-8769-a35de199d58c", + "aa7036ad-aa5c-4f24-bfbd-9fc4c5fb131e", + "29efda69-114e-46a2-9da5-2eab8c56190d", + "f866a510-f244-45ab-8538-6e48fa0ad85e", + "893ad032-7629-472c-879c-dc7878cbf2a1", + "cff4155e-6d47-4796-8360-b09a2c8f4a42", + "57493c1c-0bc0-452e-8a1f-c72db95541af", + "68b49eab-b9a2-4d8e-847d-54814e0c6f54", + "0070e176-e05b-4909-b7db-9449fb531151", + "b16ad347-efa9-4167-b225-21ddb8fccb6f", + "053f521d-9e51-4a8f-b9c3-233067df75c1", + "02b63d16-2f68-47a6-befa-c3e56e06d382", + "1f6e5094-187a-4655-949e-827e805ff5ee", + "8ae65196-f2dc-414a-8441-262d23cb4bb1", + "2f91c57f-8422-4eaf-90e9-5b091ccf3ed2", + "26ba7e95-a9be-41b0-947b-8afe99dc40e9", + "ea36b53d-dc3d-4cf7-b749-b37d42e49e56", + "f5530ef5-6478-4eb0-a19f-4e661d83475e", + "b906b020-d168-463d-957a-3de6626afa2f", + "a84b9638-78d9-42ec-b3e2-2943a1ae2418", + "d4356ea1-0053-4379-8f3c-2b5e7420afd3", + "7648a063-ae2b-4f93-9eca-094908043bb9", + "ec2438c2-e598-4fb3-a8af-f251ec758069", + "23e2e947-4776-490c-b4ff-0a6b10dcd8fc", + "f684cebe-36c1-438e-b4b4-bce42bc830dc", + "0a50b450-1d17-403c-8066-37ea4d8a510c", + "9cc5ee2e-d51d-4930-905a-7622c26ad8de", + "d5d40705-6d99-4396-8a81-5106c2853f74", + "dbb25ade-61c8-4f81-8043-1649a3d8ce54", + "aa412d45-1882-4c13-a73b-8dc1a8de9e94", + "b632d371-b8cb-4df6-bf7a-8d9d03998727", + "2c20e0b2-d84f-4eaf-ae9f-01fa345cb815", + "1897355e-4eaf-4cf7-9da7-d7f995758842", + "fcf57bcd-c2ad-48ae-a46a-8e2de6f24c04", + "7fb150fa-b11a-4bed-8a6e-df41fdcdecab", + "2c211ad3-dc06-498f-b1b0-dc64f19b31ef", + "97fe94f4-55d9-4c65-85be-72a1a67a635d", + "a6dec074-7c37-410c-95b9-2ab17e870c88", + "44dbc86a-7dd6-4f0b-857b-84dfc294722b", + "8597b770-5a66-4064-84f0-205dd33c997e", + "286c91d1-cbec-4213-ac40-da8e4dabaf4f", + "44e9fc2b-9e7c-43a7-9c08-66cf2f0f7f50", + "60108c93-a745-44a9-ab10-684b9f926c55", + "298c30d6-4257-471c-b3a9-4b3112419d3b", + "365a9ecc-890c-49cf-b9d9-9857c273eaac", + "9f2f35ea-67d9-4072-945d-3460b2de3b56", + "45335ed2-a0ed-41c3-92e1-9a0b5f92c750", + "56d97a5e-b694-4eba-b87f-56dacb9fa05b", + "32897e49-2deb-4ba5-909a-73caa80dfe92", + "93c6a36f-1abb-4360-8090-0d25fb171123", + "3e0f92d9-b8c3-4c0b-9865-fa0ff39bcb36", + "701ab5d5-03f1-4f1b-a25a-b0279af41404", + "170fdb99-2639-4be6-aabf-ba839991544d", + "ca3c863f-d3fb-48e0-b7aa-467701ec111d", + "72d49e5e-b575-4e2e-ab47-182e34a90699", + "5a7f9dcf-5461-4454-b751-0827cf16044b", + "f8f29b1f-a792-45d2-8f08-54d3a5d8250c", + "a5172f30-7ef9-45ba-a827-0d5da69488fb", + "5e4126b3-24c0-4e98-b506-1da8a558b031", + "1c71ed37-0237-422f-9cd4-8dcbe23eb73e", + "259f69dd-d2c0-448a-bab5-0ec3c2b38456", + "9097c1b6-4b14-4723-9e75-abddccb06fc1", + "acc01724-e7fa-4cf4-a358-0c7c58ebd064", + "b7c7d614-540b-485f-8b89-93499fad7578", + "21899d4d-7e7f-4474-9f65-90d843993fbc", + "001e93c6-d8d8-47c3-b4e8-2beb70326356", + "329a8d10-6b2f-470e-a518-86523d55a238", + "5beb2e91-c19e-434d-90fa-c46bfcfd53c8", + "c9dd5b52-19ae-444b-8564-995eb91bd89f", + "25ad95dc-0ea0-4208-aa79-083e7ce35127", + "bba5601c-7108-4beb-b088-ec63c7badb79", + "1218d09a-d272-41a9-a312-24d8d762d3d4", + "712ce0d8-8b69-4812-94ca-cf482d377468", + "3db2f45f-e913-47de-857c-2ba95058e1bc", + "c2d9687a-4b7d-45d9-8e37-fe7c38f717c0", + "56fd8dd3-3c90-4ec8-b475-3d5a2767ae88", + "c92a7119-2965-407e-9abc-fb762641aab5", + "6378fbd5-6566-40cd-9917-275c426f2c8e", + "4f2a4caf-6b38-41a9-8026-54ccd080079a", + "317a3660-5cfb-45a3-94b1-9eac71b00560", + "16f9bc5c-2e0f-4f53-abff-0d959a9ccfee", + "3f62b1ea-66cd-469d-85f1-b8420a08ffc5", + "cbdd3d53-602c-48ba-82fe-dd9780bc7584", + "b0aabd05-bdb7-4714-a579-f1c7559d2be2", + "8fd23f0e-3d3d-43f7-b8c1-12e914b67ef0", + "a9449b70-6b4c-40ef-995c-9012b56afbae", + "217f6c2a-8248-455b-9206-4810d02b2566", + "209ae188-bb2e-4a7f-9ab9-a3c89974f43d", + "9875213c-e492-4c9f-be7a-a71428df238d", + "1316766f-c0e8-428e-bc34-1b3ccc397a8b", + "16910e6d-22f9-45da-8f20-a26d72861af6", + "940cd084-3882-4a9c-83b3-c0f48812b0f5", + "f03645c0-9a12-4a81-9e37-64774f86543a", + "326e2482-1799-40a0-b1d0-a570171b4b06", + "c9891ff0-e4c6-4063-97b8-f109a7787125", + "6ad9f997-7428-4539-95a2-ce43736ed87d", + "bfd59d86-4dde-46aa-bb26-1eee153629b3", + "8373a197-40ae-4f28-ac71-1f76a5e8963e", + "83e51f08-3669-4323-aa88-1dfda473f8d6", + "7a5c5beb-50f2-4340-acb5-6c0350951cca", + "1eac7c97-6fa9-47de-90a8-33d5c3b69e2f", + "822edd12-009e-43bf-86dd-f4c8cb692331", + "8f1d8d0d-f227-4c75-81b8-8312f20af225", + "7e7fa67e-610c-4f76-b216-babade80cfae", + "1c97164a-ddb0-4e33-b209-66ff8a4f3719", + "c1100e2c-78b5-4e90-a35b-c06bebb38437", + "a843b60c-9d59-43f4-ace0-0a6335485f29", + "8787f525-08bc-4691-a1ed-b5763153c5c8", + "e9ca7d6a-3173-40fb-ab2c-20aea3ee418c", + "3c047449-0cf8-41be-b26f-630b5ebb2fcb", + "101ef1b0-f0ad-40ef-a298-4a8851d10a97", + "9d31f59c-f3fd-4ba8-9371-a66c49a12799", + "a2896e7b-f15e-44de-bb4a-ba321c3cf0be", + "30073588-f79d-4b27-8f56-6ad63d2224c6", + "c27adb42-5071-49cf-a930-861047ac790b", + "688d4066-670e-4867-b9da-98dd41d3ba2c", + "1fbe0f92-2432-4f1d-ae17-65e451a7585f", + "8b43d41e-6740-411d-81a5-e526274af892", + "7c89059a-56c5-4a23-b366-a1032a7b6e03", + "8d44ede9-ab41-4e06-b207-6350ff7c419e", + "10051a0f-4adc-474a-8040-8bbed05060fe", + "b2fd9d49-03f6-4ea2-865a-a909291bd3da", + "6f305fd2-ecb3-4f78-83d7-f814dbedfe51", + "4b073263-ec9e-4815-9c5a-5f714a2afe5a", + "c8c2ab31-6e5b-427c-8994-7d390329a454", + "9a7f98ff-f2a8-41f1-a605-396c74324e71", + "6357faf5-f50e-47c6-b063-8c9a5499a461", + "2e590a27-c675-4656-8d66-5885fb083b5a", + "9ba90f64-f1e7-4bb6-9221-93ce71ea9019", + "e0b52e16-ca5f-4af5-8392-7a5206401204", + "33d07d70-3910-4af0-b585-3da23e08131d", + "0788df6b-fb16-4c8f-80c3-d812ec57f839", + "a4a624ad-a508-42b7-adf1-890f76f40d3a", + "d13c4f82-af52-4a87-b1f7-36333bba1cc3", + "c36f141c-0f3d-4f81-ace4-9c221df0b6d9", + "e6e3482d-ead4-428e-a69f-35d40aed1992", + "6707c30e-b515-4f76-92a3-61c78a3df69c", + "f185d106-1247-4e46-90b1-6c59e10e9d56", + "d05fa1c8-ca63-4c2e-b543-104c0e1a76bb", + "516f7a70-ef0b-4894-814f-e4dae02c92c3", + "17e91145-e745-453f-afb8-0776e0cac573", + "6845ac87-2db3-4f15-b7d5-ff809a7ecaa0", + "eb9b1e51-8abc-4e70-8259-9b81ec276dc3", + "f94d7478-483e-40ff-a23c-0020bab95cd0", + "1a642d8d-ccd3-4079-ba0b-c942dc00db49", + "90412895-5b03-43f5-8eab-f91ab2ce06cb", + "61c851f1-c30e-469c-ae62-747528970e0c", + "b4ab6126-e7ed-4501-98b1-4d3fa7271f29", + "1264dd64-aa8d-4e1a-bb25-276aa91d587a", + "39684591-ea5c-4f7f-9533-80571ab83236", + "e7dc0205-7833-421d-bb03-4fa114ff83bb", + "56fd9a23-229c-483e-ad67-238a143d23e1", + "5bae6525-af21-4cba-92d5-380c19b18165", + "836ab0b4-451f-4528-b589-f0337e4461cb", + "88650a72-e80d-44b2-a528-435959908fed", + "3b93771a-500a-49fe-b15c-8f5ffc4d9800", + "72be010f-d916-4952-b580-07dc63d3dfd5", + "2f3e123f-b96d-446d-a04e-ee2c0ffe58c0", + "e86b4e31-9e68-4df4-adcd-abd4c061c2e0", + "36a98546-4125-4fb6-9f96-1fc362b48273", + "2f41561b-a2c5-466c-97f5-3407c528eff8", + "f23fb016-ec6c-4549-9b2c-70be6a2fc9fe", + "c2680d32-aa4c-49d5-92ab-103a5d24a6d3", + "11de3c4c-4d23-4d75-88c4-7d94b058bbbf", + "b75e30f4-5cde-41fb-b5e5-8bbbd5c3b179", + "0a5e338a-3903-4d18-8b43-0d1c2054374f", + "9ac6ade7-05a2-40e3-aeae-1e27c22b0936", + "ba684a90-16c5-4a5f-b40e-7a200e15d06d", + "80f20dda-9ae9-4074-b72a-517080de45ab", + "093be937-e5b7-4f5d-b8dd-0c3ace7cfef1", + "63655aa3-6525-4116-a641-a29fc5541438", + "37951d92-d283-4ee9-8304-43165c81ede0", + "2fa0e695-f8ff-4237-9c73-f1936b9988a1", + "3ce37304-c715-4f70-b6cb-ce387abb2ed7", + "43eb18a2-c9c2-4c1e-96a2-5c86e4212c63", + "f9cb9185-b6e6-498e-879b-460685976be2", + "7e4b9793-5480-4548-b582-195020979fab", + "d09a514b-d833-462f-a596-761be04a8c25", + "9e68c555-73d3-4465-83e3-2e6cd96e8889", + "8c79ca96-638f-411f-80c9-458375dee052", + "7630748c-d9da-4538-8885-87a5ed4af5b8", + "b91de4cb-b636-4cc2-94c4-bd7eea7a8937", + "10453bb7-ae08-4189-84a2-f05ae0f15b1f", + "be36f7e6-dd4b-43d9-ba2f-cdf1fbf2f85c", + "c7365279-6b43-4add-91da-7e96dcbb1ebc", + "d95a166b-ab46-4e19-9a2b-5d52827b2c32", + "ea2dcd4c-0c33-4d63-9daa-a75e852bd77c", + "f07675c0-f88f-43a5-9bd1-ed05399e0707", + "a54e06be-c449-4d04-98a5-7ac65e06e383", + "30055d11-01fe-43d5-9296-c7ae5e3e3440", + "b6226927-bb58-420c-89f5-b061f3ecf29c", + "ffc1d561-520c-4e0c-b27a-b2f4e9ff8726", + "d9844bd6-8c1b-49b4-9725-f7002251cfef", + "92f60f29-572b-4d60-a864-3d18a6e7800b", + "19084ddc-4863-44c8-9e4a-232bb7e343ff", + "ddc61143-2694-4a79-bc21-0a7eedd99133", + "4b845675-e197-45b2-a2ab-9cf2295803ca", + "b59cb009-a339-4e34-a40e-6614636410db", + "638048ea-28e4-4f8b-a698-cb34cee91e3f", + "37cb93d4-0b07-4fda-b43b-fd27c3f18546", + "e3c6c5dc-de49-4e39-b8af-e2cb8c759459", + "2816bd28-6694-41e0-af3a-e074df8168b2", + "33af6b99-1995-4f00-8ded-193840f6dcc6", + "f52286c3-b31d-45b4-81fb-ec67e51382df", + "0e42e0e8-cae5-4fac-aeed-bb636f94fac7", + "9e155cab-584a-416d-9d2d-25fc68d3dc4e", + "aaabaef5-3e62-4748-ac56-fcfca9528060", + "961c2eb5-8f4f-4179-b754-b3a684fffca2", + "9f005c5c-4a62-4fcd-a10f-a0ffd69f4ee2", + "dce108e4-8e06-44ad-9d69-7fb203d3193d", + "850d3331-3a57-4c05-bf59-3f573d996b8f", + "b794595c-bc42-4393-8c90-a75516a1ffd6", + "6978f188-48eb-4da8-9ed6-b62b0ae64ee2", + "8fdc7309-9785-4bb4-9f2b-023ec28e14cb", + "06748921-20f6-4e91-9629-d734b78f3433", + "948354a8-fb3c-453e-81b6-e3851d5fdf04", + "299879a8-ac8f-445e-a42c-720911866c21", + "87c9c160-ec21-42bf-8313-170aa2628992", + "893fd79f-00bf-4390-8515-d48f622b7483", + "197262d9-fe55-47de-a9f3-eb2c27284c23", + "41bef055-c61c-4a67-8e35-097537d18145", + "94886cb8-b3f3-46bb-817f-945b71f97971", + "f41f2486-a3a9-48d9-a00e-b9ffb6dbcbff", + "389eacc4-1251-4035-94b1-616a850d419b", + "5e762eca-0ba1-4cfc-8749-6e5853c6342c", + "276a59c2-073d-4222-916b-f126fad39e00", + "ba1b71bb-7496-4f3a-b0b4-7e861dfbdc0d", + "495d75d4-eb7d-484e-8b59-c56432a5eded", + "2a53cc0c-31a7-49f4-a391-368860679ea9", + "7896d97b-7670-4729-a624-e24c6bc99398", + "1b5f4cda-5bd5-449c-99dc-51ecc7543b33", + "e193f608-6126-4b53-9d37-ac03d76f00e9", + "cad8262b-668f-4c3b-870f-1c698f0b6f96", + "230b549a-77b2-4351-bb8e-f86868f7c6f7", + "30247c8b-a5f3-41dc-8488-edc4264baa4a", + "b8e185b3-015b-4ba4-8c6a-241e27a13b2a", + "baebe396-4948-40ab-8d35-7f40415a9149", + "f7353d73-a65b-471c-b9e5-9f8b067bddda", + "16693955-3926-42db-84ad-ceba7173c792", + "ea5bea92-8580-4858-bf6e-8e7c1c13d316", + "94aaca19-f115-4575-acde-9c6134291f64", + "7973f5f3-33b4-4178-b72a-efa1759a4ce8", + "c6fcd349-e120-44e2-ae1f-bdb91c1a835a", + "794a6551-b918-43ca-8224-d8956e51ea92", + "6dcb75bb-88d2-4d6e-97c2-dfa6a1995aa3", + "ad0901f2-47aa-414e-94ad-a794c78713c2", + "0b277a1d-0df2-4eca-bab1-41d2b7fa7c45", + "304defcb-25e1-4107-be86-b826437b7c16", + "881fc77a-d362-432d-9ef2-bc38c389eaab", + "b7cfa468-61af-4054-bf07-ae8de68717ac", + "1b0b7898-d1d3-4ccf-8196-d8319e4cf572", + "656a9166-155d-435d-87c8-3f940613d6e4", + "e1882ccf-a09f-46f5-983f-829edf431276", + "239d9e08-6cc7-46cd-84be-7400500b3b32", + "42d6a751-f64d-4124-905a-ac88e094a717", + "dc3dbf11-025e-4e61-b71c-e7b3cd7795c0", + "8c5cddf7-2a7a-4baa-9a6e-fbbb103c83c9", + "d4d99596-52d9-427f-afde-e33135e0917f", + "8d8d177a-f60b-4e17-bf0d-445d85b741af", + "f53ac17f-614f-424e-af41-faab75fcfb10", + "11b9f300-3c62-4195-a5da-85065eb44d10", + "36081879-6c18-44c2-ae28-29c7bf2ca855", + "4e495dc2-c49c-4102-86a6-542bfc9c6394", + "0ae42cdb-3275-424e-b7ee-5b4e4e801dbb", + "9efa93ac-7bb4-4900-ab6c-5678f09859fa", + "017ee28c-e2eb-450e-8408-c76a733a8f91", + "99fc5b79-783e-4e70-9a88-a1b252f82e23", + "959e3185-4e95-4495-b008-3d08313c3aa7", + "a7c78f07-773c-4b58-8610-7bb9943ad210", + "d187a109-2da2-4c83-ad52-21d89a2cd860", + "bafd11c2-e198-4fff-bf7d-8513c7fb3026", + "a351b7f8-d084-43bb-9209-6bf5c44eb3de", + "ec15feec-b65a-487a-926c-109b88698b2e", + "f5637cdf-149a-4929-9efa-bcb92ec56b2b", + "b1b4b209-f702-4690-be3b-e36f23ae854d", + "5d8bee93-785a-498f-ac49-73f65cd774f7", + "f8a3a5c5-b0bc-4911-b2b0-dd0ec0d5b38a", + "f5de0dff-afd0-4c84-b4c2-b163e124fca8", + "b278397a-25b0-4474-8400-8bbd1b71a78b", + "3413d21e-06ef-4bf0-895a-b2a52db763da", + "be41e405-5949-4453-9dac-767f4fc4e58d", + "33957d37-6ac1-4a76-9cec-368639605c13", + "72cd4a8d-e2bf-40a3-8cce-526367e8f7bb", + "e6c1da72-9256-4846-bc21-73d955101b79", + "2a7f1580-11c6-45b1-bbc5-e73b9a5d35a5", + "c90bb19c-ee32-40ac-ac6c-005fae1ed8cd", + "5bc816cb-0427-491f-bbf6-0ad637f83f5f", + "f551743f-a017-488c-bfdb-44c7330bb695", + "a5ba8ae8-5961-428a-8b62-8eaa73d75144", + "e101e94b-75dc-44d2-b918-7f66a011320a", + "d7b0ba13-d6c7-4e12-be7b-753fbc562e91", + "ccc98fef-1a59-466c-9a8a-40805908ae42", + "54c27899-6b8a-4eb1-9392-6a19a37d5191", + "edec610a-d5f9-41cf-87f0-e181ff75cff2", + "e091d4fa-52b7-4fa8-bf90-4069b5dce030", + "a7d350e9-1eaa-44af-a442-99fe17b9d08f", + "d6754d52-d6ba-4066-b2d2-d47b7533b32f", + "4f9e2b16-fb06-4ab2-a8b0-00e697585f9a", + "cfbde6cc-6e61-42d5-9b31-9c6c82d22f15", + "37238dbf-3236-4201-993a-b73a313655e6", + "c22efc82-5b00-42d3-8de8-23d7fcb2dac7", + "501f36e0-8489-424b-a92f-daf0a46a39f7", + "1408a5ea-0696-49f6-9ad8-08888cf98d45", + "651cb190-890d-4dce-8023-55737104e09b", + "fb7bfd1f-174a-4453-8b80-1fc3ddb55b63", + "0bc81777-cb5e-4b66-be93-627676ee280d", + "855dfd77-475b-454d-8652-27be9be64aba", + "e9ef131c-c25c-47ee-b053-0eff8b0fd2ba", + "20fd4f76-77df-4323-b9f0-b42c66f4c188", + "5b8c8212-775d-47f8-815f-1a332ca1d52a", + "52a1a295-1fd0-48fb-bf66-5d0ab3a8b1aa", + "340e953a-8854-4e48-9271-88db51e5a949", + "c7261c46-5a0f-4b48-8f5b-ffc8dda883f3", + "8985aad7-636e-4d6d-8df5-425069f64a7b", + "da9a9699-8366-4f06-9c22-7063d491b0a7", + "bf080e89-dea2-4bea-a664-a33a4fa399c4", + "59cbf275-720c-49bf-a8f3-b803216ea4c9", + "8774660a-16f2-4aa7-b4d6-caa0cdd755ac", + "f52d74b4-1906-433c-adc2-a631c2cb9fb1", + "ebee3eab-f6d3-49b5-8a30-e47d021bdb37", + "b5e23741-8f34-4a7e-9a46-77949ae5b606", + "996d3f9a-6ac0-434c-9538-2cb2bc447c0e", + "7504134c-8a7a-4654-962a-2f5bb87cbdb3", + "b7f214aa-cc7d-412b-bb84-8f8779f3e80a", + "42b7fc82-0ca9-448c-a2d7-fb9ebcd0390c", + "d4598f6f-8bab-4ad5-83ca-69cb2e95f522", + "9a6713d8-9a5c-48ea-b3bd-6c9558873aae", + "59f67bee-a2d9-4848-af0c-5a8fbdb5516b", + "e2dc24ba-f615-475c-bda3-dc1cd7275004", + "bdb4ea74-810a-4551-ad73-878dad91e82b", + "9e01b72d-1745-46bf-9593-fba1f75c2682", + "68b7c525-c844-4f5b-8d37-9394370d0dc4", + "17e2c22f-7e5e-416d-9315-e363e4e11ee5", + "90bf91b1-460c-4672-84f4-d7257a0b57ad", + "601f954c-e86b-4e1f-92c7-5d41ca2491e1", + "9e0feca2-a18b-47e0-8699-95e1cd7735aa", + "2d4b2362-5560-45e9-bf93-257e505faee9", + "bb9e6ff0-d14f-4135-8f03-48b76db2d34d", + "cf356a35-54b7-4d41-8000-66295d8c3644", + "5cdfb49f-1be8-47d4-9d9d-20fd08d99473", + "2e8bd143-0714-4dc7-8911-91dc12d96825", + "da37984d-c5ad-48d3-a820-8a4ad7effa80", + "4621145e-23cf-49da-a49c-8b3d794c8d4d", + "9d31a43c-dd97-4945-92aa-2478340366a3", + "e5373cab-cbc0-4583-ad91-948e369623ce", + "c8810d67-6ebd-4803-967d-ea443a5a333a", + "b7e80926-b1d8-497a-932b-3d0669689075", + "02d5b648-e3ff-4a26-8671-b7c62ff8d811", + "328f2b8c-50d5-4d38-86f6-cd3d1d2564f4", + "faa72689-22c2-4de5-9a0b-bf2441ed5d9d", + "4cbe640d-6fd7-4fa5-9f5f-178c716693b6", + "e095e120-58fc-42f4-b6e0-8fce6661b7fb", + "be76a91c-4631-42e3-bcde-31ec00898081", + "8252361f-6176-4503-b36d-5d062ca75b3f", + "2c4a313d-0adc-4404-9d77-13e31e21b5cb", + "ac72a0b6-5028-4c56-aab4-208029a3908e", + "898fedf3-daa4-4b9f-a84e-38548b33769a", + "e3c80b80-9736-48c0-9d86-6ac563c1c4ba", + "ed2015ff-80f1-41cd-9386-c2939a806cc9", + "dcb9c142-9934-4c9d-b044-e8c767c90591", + "4799e457-3c34-4bea-b58b-86e705decc67", + "1162508a-c640-4c7d-84a5-a9dde21e5c96", + "74b41689-4fbe-44f6-b298-8e7a48f26c4a", + "858c86f6-003c-46f3-bcaf-ceeee2594ee1", + "41817bf3-c97f-4a4a-98f1-73423bbf0b8e", + "c0ad86ce-4aa6-47f9-a384-f553acca2167", + "be537425-4847-4bcf-b3f4-6b3c83b93e0e", + "3dfe955b-f418-4bc4-b220-cf131b2ada76", + "4adb9cbe-383d-44ae-8a8d-d8127a79f670", + "793a695e-0a16-4c15-9dc3-45437ebd0454", + "2e799c0a-125d-4632-a726-0fb5d853416a", + "0dc24d67-f18d-4556-a6eb-b614dcc81cb1", + "70efd6a5-3210-4cd9-87f3-25f393cfff53", + "dd4d168a-001b-4f33-bad5-7ab36f51394e", + "a3e73471-0cfc-4935-9ec8-22d1eb751a4f", + "d44b5531-b2cc-4ff2-afa5-f27f771783b6", + "edd0a22e-2766-408d-bf56-0fcd121c5428", + "68029a5d-e392-4d89-a181-038adb2969d2", + "202249f9-709a-443f-a212-cd8210c9b6e1", + "2e3c9abb-d810-4a2a-8e1b-5a62f4d9994f", + "32e8c685-7961-4bf3-88b5-9057237cbd9a", + "8c88eb1f-bd54-47c8-99c5-b75537fe3ca7", + "8b197965-dc44-422a-b0a5-8df182bcaeea", + "beb0f6e2-06d2-495b-8678-bfe42a9bfbb4", + "9a893a9f-6067-464c-80f7-71346e0800a8", + "0886d399-4da1-4f62-8047-4c315f1f71d6", + "da37025a-3201-44c8-b47a-9d65ab10c537", + "d15e54fc-b523-480a-b425-483e36ab8156", + "5d506945-ae93-430e-90a2-88ff36762b4a", + "9949d43e-4cbe-48f1-a1bb-de54a20dadf7", + "1fb18cb3-b9f1-4b2b-8648-a0973d471e2d", + "67f90ea0-1783-4856-84e7-f739247d3284", + "0ead2187-c948-4530-9e42-666563697384", + "8bfde91a-bb0b-40a1-af06-1efbcf1d825b", + "0064a068-9d03-4da2-a879-08f5e1748724", + "f82c2544-4b31-47d6-b5c0-97ad5b388c36", + "af1d9c35-621a-4c03-8a11-6d80dc62d5cb", + "cba90a32-d2f3-4a07-9b47-3ea0dee725c2", + "08b2055a-cff3-4b5e-b107-0b96ac221279", + "37a75aa1-abcd-44f7-a98f-26c948a93a0c", + "b75f7107-b852-4219-a543-1f7015d7f3f5", + "2c9eea25-66cc-4aa3-82fe-f812e17597fd", + "91411802-7b6a-441f-b69d-f16766fb0988", + "b6e22f95-1c65-4185-b66f-c0c2f0f6eef2", + "f44f26ee-a8ce-497c-b6fa-abe4ed53c0f3", + "89d86b13-8680-4509-8d92-cef223349150", + "76a4ff7e-5d8f-4432-8e6b-42bdfe096281", + "316e7d98-45f3-4b55-b182-179d9743b8b4", + "25ee3aa6-0df3-41f3-99ed-2b80bc5333f1", + "ce19236d-01a0-47d7-85b4-271b5874f935", + "617c1bfb-a9ba-40a8-9b75-52fbba7d3f6a", + "faa2fec0-d566-412c-aa57-fac8176d5738", + "b3e3b9e7-2e19-4b33-9c34-e81966777423", + "4e1eb338-bf9e-4a3b-9a90-f2ea2fa6bc26", + "cf1e1012-7150-4a61-8b71-5343abb9bf9d", + "78ebc27b-ffe5-40a8-b207-10969ca1a4e7", + "f8ba91cb-6eae-42b0-bdd3-19922fd6b97a", + "9101fc36-3733-458b-8e45-870b5a7714c7", + "76145bdd-10df-4903-978b-266ae7d017e8", + "8cb5d0ec-bf61-4acc-ab59-43db3f00fb2e", + "1f166b03-e745-494c-8954-71562a2b0156", + "04aec872-a16f-45b8-ae22-c1833a19bc9f", + "687bb169-a079-4895-a9a5-d560846a5d4c", + "5c593196-5921-4740-8b9a-eda027b58a7f", + "9bee4676-6fe2-482d-a46c-3080b1b19326", + "f7e1ce7f-b870-4dce-890d-632674863968", + "46bf53d7-db1e-4791-9cba-3b196dc9af12", + "443685b1-9eed-419c-9d03-cd4d1722eb5e", + "3337487a-bf1f-4b6a-a456-ede4595b7356", + "a2fe6345-94b4-493d-bbff-5c0c873ddfcf", + "e892525c-071b-4b82-997a-25d2fb8a2486", + "4bb17805-92f2-4e2d-8922-0d2e68d2d73b", + "3b26c300-500b-482a-99ff-496d3b193624", + "5969b9fb-e62f-4421-af8f-c1ab79806be2", + "c14ee136-51ed-46e5-8726-659dd0a2de96", + "f0fcb9d0-e595-4c94-a874-a0b7aa8c2e5a", + "51932311-5eae-46fb-b5dd-56b3ddb1d155", + "d13024eb-4204-42a3-ab5f-d3d84d054eee", + "cc6c3c39-f7fd-4aef-947c-0a2b01acdf2c", + "6ed1bde9-46d6-4db7-8763-15e0c0ed3774", + "01bbd4e9-250a-4c14-9f9a-aebc2e0fdac0", + "517ae2ec-deb4-4788-ac71-539ee424c1e5", + "20a493bf-6c30-4765-a4e9-0fc143095cd3", + "fc0fab89-3290-429d-a56c-c49bc6d9add4", + "485806e1-e307-4449-8309-7792df308d29", + "046e8b08-2959-4d72-ae7d-f2e6db82608d", + "74b5847a-2dcc-4d6f-907d-260c892e9d43", + "803db209-615f-4c91-a656-3169899e1202", + "8ccbecc4-ab45-410c-8625-2f58c6792dff", + "dfae2df4-09ae-424c-9e7f-1776be795195", + "12d56090-46f9-455f-ba72-552ef990bd3a", + "0f957aef-597f-43c3-bc08-1dc2d0910c8b", + "16c155bc-1cef-4800-8c2b-b4104665ab1d", + "85658c4a-a3d1-42de-a70b-bf6e8b16082a", + "0ac42b87-f62e-4951-91e3-58d6036c118d", + "fa77686a-789e-47a9-b476-c72febf85154", + "f2eb133f-ccf8-4745-b891-cc93b9141d62", + "cea0c7af-71ec-4660-a3b4-c40fe59c7c36", + "613fbaf3-addb-449f-b234-9326283cabf9", + "35123ffa-925e-4d6d-9e5a-0945dd431dbc", + "aa29853d-e4ad-4189-9560-4d49d3802255", + "419c97b3-abdf-4c5a-94ff-53ab75c7864b", + "3e674dca-5e77-4ede-8a11-adc77505d1eb", + "51938e58-310f-4916-9bd7-726be4e491b0", + "713d4b9f-3457-4ce8-ad09-16e726b4f762", + "850956af-ca8c-4880-afd3-eec219fca38e", + "a8a9260f-bcc5-4487-8428-6e329db1ed82", + "3cd958ba-58d1-47e3-b8e3-ad912f7a545f", + "6b005302-2a54-47bb-b69f-1adcbbccddfe", + "b91953bb-d4ac-4e17-bdc7-a07ed98f4360", + "2cff427b-0187-4d17-a562-47d4a7dea5c6", + "fdd62c24-53b4-46dc-9942-75be742d3f7d", + "b52d0866-3849-40ae-a79c-193d7501d574", + "e84b5ae0-9d6e-46f5-a925-f007d474c901", + "a11f76f6-539a-4737-b54f-d27a41a70ce0", + "c310372a-2591-4ef7-a18f-74bc5fdc1bc4", + "43a942af-85bf-41ed-9966-9927211ebb74", + "a31032f8-6466-4a8e-acf9-c8ac3e8ac28e", + "3622fa3d-a55c-4249-a49c-e480c68d7f12", + "d5eaf2b1-e32d-49d7-bcfa-5b63c145d9a3", + "00cf74f8-effb-49fb-8321-01f331a4302e", + "d10cba60-da96-457c-add1-1ccd28bbc398", + "dc997bd8-90df-474c-8490-51d692cd5b16", + "a9bc50dd-5bab-4c5e-a20a-115d2bb0e1b3", + "a4254e01-46ad-4dc8-82f9-c6fe68b2ed4e", + "437fb6c9-9a80-4793-9e75-a8b1ad334816", + "c8dde266-df17-45a6-ba87-6038ed51d887", + "329104be-287a-4b01-aa4a-371a543bb025", + "2a7ad809-4545-4ffb-9d0a-dbb8bdf11321", + "8f5827b0-6349-42e3-a826-cacfd1406007", + "51c89706-0216-48ab-be7a-a0f79d2968a9", + "0c48cb05-2d52-4155-9478-98cf081c0659", + "671e71be-e4c2-4dd7-8a4a-58494dcc8eea", + "95f98517-cc59-45dd-906c-51ae3034412d", + "df9b7f2a-1591-478e-a15e-09d8420fb186", + "6d040367-12aa-41bb-9766-00c10cb2eafc", + "f997b7b9-bbcb-42b1-abfc-e024cea5c0c3", + "f42ae4dc-2299-4843-9818-d22fa90b5624", + "20339366-ecef-4c16-a83a-11bd275ecef9", + "b63aa58d-8766-4b4c-ad9d-3bb83cb42363", + "0d717b1c-f9c9-4938-9023-6de04dfb9aaa", + "a2ecd17b-79c2-4082-8e14-3e2458eac655", + "585367f0-3508-4bd1-a2a3-ffe1bfe57a45", + "076fdc38-7d8c-4239-ae62-91493e24e2fd", + "3ab512e3-8e44-4736-bff3-dad834c3467e", + "3e0846ce-e0ab-4420-b20e-3cf6dba37d06", + "0efa72dd-676a-439e-80dd-08fa39e7e144", + "9186a18b-4689-4beb-994f-f1b548e306a5", + "b7a7ed39-5a6b-4494-97b1-0a6515bc4f68", + "b34eaa20-5c04-4323-af02-37f63915632d", + "3ddf0800-f1de-40a8-9339-cccfb2e30642", + "0af7068f-26e1-455f-ba24-ed3f47b8e7b2", + "91efe85d-83d6-431d-ba80-c23fd3e50226", + "15e21976-ac74-4cf8-b1c1-ea4ff201430f", + "61406a8f-4634-4238-b872-7424cf920d0f", + "442cebaf-21ed-4887-8f1f-af4ba3a1ae4c", + "3e99082e-a97a-4ddf-88d5-991642ac913f", + "59caa87c-dbcb-4849-adbf-7869fdad6003", + "a11740c5-e939-474b-a934-b7817b2e84c7", + "b2df8fab-1419-45a4-bca5-db95186f7277", + "4c29575a-9971-4583-b2b3-f1ba46ee9e60", + "bc0ef261-99e2-4320-b93d-fd73ed2d145a", + "71430e48-3f50-493e-b75b-b6aac902ee0b", + "8f729fc1-37d4-424b-9f5f-4fb294ee57f1", + "ba42e604-734d-4758-baa9-fab17ad7325a", + "f6d6692d-08a2-4e80-96fd-af4319a8d27d", + "7a32b36e-e409-4f54-997a-a3e7206e2abd", + "81bf3d89-be7d-4ab0-a28e-8a35828c6ce7", + "af7ed257-e97e-4a20-bc6b-42c5a0eaba70", + "ec41fcc4-5522-466d-840a-7b2b08f4161b", + "ec6d1d18-b495-4768-9120-857f53994018", + "675d359c-475a-4d28-b87c-2e355292c6dc", + "6808e635-ad88-4106-badb-0678aae4b1aa", + "4f364e9b-8b6e-46c4-8ea9-d31a32f6a2cf", + "62c9e9a9-7afd-44dc-85f4-140645c40939", + "6730b4aa-e1f3-485e-849e-366823c29754", + "f01db4d8-06f7-4d68-bb04-877a99ba669f", + "805b06c7-a21c-4d35-9a2a-27d695d5a60e", + "7e6b07c6-cd4c-4507-b940-381cecd69b62", + "b7cde8a6-0f6a-4bb0-89f6-a5a252a97460", + "12c265bb-f823-40ba-a2d2-50d75ba76f14", + "e641e611-34ae-4e81-aa45-04706e96e76b", + "013ce016-1e5c-4105-a35f-84ea8ad6fd68", + "a3ffb4bc-a6a3-4055-9642-a397213911f1", + "d3df1225-fae2-4037-a9c7-12f5f742346e", + "e41a4b08-1e04-4780-8171-dce4b5aabf71", + "6fac08fd-7567-4e44-9f2c-6eeb8f20ad8f", + "ef3c0e28-ee02-46de-b897-5582964f80d8", + "c01a3079-cf16-4ced-92a8-d055674b2f73", + "ae3fe582-c62a-47ce-8458-2406240b9cc1", + "50b5f576-30cc-4e3e-a3d0-6aedc0cac247", + "987d71d3-b4e2-464a-b3c0-419aaecf72b5", + "cd4eb8e4-3e09-4cbf-91b8-f094f6e11e26", + "e86eb620-a58a-4172-9dbb-c03424dfde55", + "913db653-c401-4e81-bae0-7e6896e69180", + "3a648960-f72e-4c86-9398-f27b05bf40a6", + "8c14f46a-d647-4b13-baf1-803df9cb771c", + "653a78c2-d701-4069-8e46-6ce10139cdf2", + "ce2e5a2c-9908-4160-8ebb-fcf4295380e9", + "1d411d4d-b0ca-4a64-a3ea-544a4c409011", + "df98a632-4390-4396-b5df-1a009dc383e7", + "8fc1161a-b4a1-473d-842b-77e11db44ffd", + "f710dc45-f587-4e81-a542-4a6f00cf9deb", + "f86628ad-1069-4742-ba11-5ce4f5f18846", + "e1837007-a78a-4a91-92bb-b5b1763e8440", + "6a9cfe93-6a87-4cbe-9778-7e104e02d1dd", + "430e20f8-3cb9-45b7-84df-a405b0e2839d", + "05ac31a4-add4-48bb-aaa5-82cfb7e0cfb3", + "be4ddeb6-6bdd-4188-8b1c-a11f0ac41d9c", + "11ae5587-10de-4336-a934-397c90db69a5", + "f2e282ff-4074-40ce-9cf3-6b3a9a41cae4", + "3d5315bc-8636-4e5d-a0ea-0a71a0b95e1c", + "15892a80-8067-4ca1-9dd6-c2147e4ba1d8", + "5858a91b-527f-45de-8512-17f60da076e8", + "989e2d8a-82f4-4335-a6f6-58f9c1d03226", + "90ff7395-9f5c-4d8b-81ed-fc15184e21c7", + "2ae40976-0f62-4add-9821-186f231c19e9", + "d4c03079-3340-4bb9-9623-4a2abe00f735", + "44b5f262-0f32-4efd-9da7-a2809636f757", + "b845693c-ffdd-4015-9278-851bb0d37193", + "6fda2c60-23d8-48ea-b390-a1d9d0dde54a", + "fc6ec81d-11a9-45ee-bb13-51b200fcffb4", + "92c67f16-498b-42dd-a16a-f0ee0b19c508", + "d1c78b2b-5dc1-4487-aed5-c68d0eca121c", + "fbc34383-7c0f-4f25-b605-b5646da68356", + "4b31ed2e-8f6e-442f-804b-e00e79da0569", + "a28cca89-801b-4f9d-98ea-68dfb57b3345", + "39331ea0-6bf3-40ec-9377-8240b2f56e15", + "1970429d-6205-48c7-a267-5d8959182621", + "87decf92-27c5-4c34-9054-efe1de4882dc", + "8f65c122-799c-4ff5-a8af-dcc69d584291", + "eb3e7a1a-3b1a-407a-b94c-e111a42ef5bb", + "5c08facc-41dd-4334-9194-17a815dc7abf", + "edaf7c3c-f1ad-4ed4-9791-879384474ec6", + "53899ab2-f974-4715-828b-59157329e7be", + "d003c392-35f1-419d-8088-dce67536af4c", + "d7bd1b0b-e8f6-48c8-875b-74d463775656", + "fa7105c4-a181-40eb-b5b6-d421625a3dea", + "4be2848d-d13f-4269-8be5-7e0700a69242", + "ddd55a50-11d6-46ea-aace-8d429cd6c457", + "6355056e-835c-44d5-b842-c11165ee405c", + "5023ced7-3d1b-4f94-a555-9b8f775b168f", + "a02ee8ae-fac7-4b95-9da9-f8b00fc3ffa5", + "707b3ff7-4c54-4f7f-9588-76946e84ef35", + "56b74fb1-419c-4897-b306-cfc5396cd189", + "178fa8c3-f1e3-4ad8-9b4d-3619f4a290a3", + "b9e28f5f-5e77-40cf-99e3-b58c145a1a45", + "e121ea1b-ca70-48c3-9f51-ff7af554645f", + "dae54527-63b6-400a-b19e-427d84179f04", + "6ffa994f-211c-49f8-9247-a107fdacce05", + "42dde56d-16e8-43ea-990d-112d26527f4a", + "1fa99da8-cec8-416c-aa93-5ff9ff06af99", + "4f8dc84b-ab70-4064-9fdb-bfa8ce9b3925", + "4a40b2ef-9138-4c9c-ac41-a5f408f0ad8f", + "674bf21a-55e8-422e-9714-f20180d6fa83", + "3bbaf66d-5890-4ef4-86f1-e12c7fa8d293", + "c61bc799-0bad-4e94-9c57-cb51d74d3658", + "f7d81172-1963-4884-93df-e512090868bc", + "5ae11e09-1e1c-4488-b40b-18ac3c7161b4", + "0692b327-d73f-49a9-903d-d39b9bf071b3", + "9d44564e-4cd5-4971-8716-5f8149054c22", + "479609c5-3fe5-429a-be20-8370121066c5", + "3f4a5f07-3282-4887-8ab1-598ffc23453b", + "3a2b578d-571c-4bd5-98df-96b189c66805", + "584ab364-ee54-4f6d-b334-d6ebd0b13d4c", + "bbb4c4a7-ea23-40e6-81b9-e838ae29499d", + "973f18c8-bd97-46ae-ae7f-9a8b5774dd58", + "ab9ff72d-c768-4f1a-aa4b-3f4b8c4e26cd", + "eb46463a-0659-46cd-bda7-1cb59f130281", + "67be7b6f-ac9f-4f96-9c09-7460a436728e", + "267da9fe-9a39-4096-bed3-b3fa0cf3a862", + "1410c018-ff04-4fec-9f18-4fcefc4107b6", + "8ed57641-3fe9-4180-893a-612b247789a1", + "6d3de7a6-1bec-412f-aa8a-13bc85e1e971", + "a7222f0e-2fda-4735-a405-e0c8d018fecd", + "6e3db241-187f-4b39-b4f5-2d74d7c74a25", + "3d7f026b-8b57-4e82-a27f-54d183b4d7b5", + "f9c215d0-3f6c-4e96-a87c-6aacd5d51609", + "24727ea3-d4b1-47f3-8d73-b5708a0fc15b", + "def141dd-1e6a-4bad-84ce-446a4d29bc70", + "b0430d57-de75-4611-9e70-11bdd7b18e4d", + "d660d4b7-1576-42c7-9989-d44165ef69c5", + "5d623a2a-86c6-446a-b11b-07ff25032edb", + "e0978bcc-c86e-4ba7-8a06-318b58980a98", + "c66d4780-ce11-4aeb-adff-4939974d14ad", + "9adde035-85f8-4789-95f3-8f3c77764e52", + "c472d932-d5cf-4d04-b2ba-ebbb45391146", + "843a5963-5a26-495d-bb20-5683c542eb8a", + "3d507b85-f45e-459e-86e8-f486473d7fc7", + "a09ae230-c78a-4af4-9dc6-9da86547ce52", + "71a41d8e-95b2-4277-b939-b853b0bd4b28", + "259f3765-7810-4d6d-be70-471c0f0fadaf", + "047fdd44-6d1a-4f88-bdad-602b4546afe5", + "91ca1015-48f6-4694-9345-b698791e6f28", + "9a07f9e6-12fd-461e-ae3e-3672ac7de3bd", + "d7fb2336-0308-4d28-b083-733e57237f6e", + "1dc14147-1f3a-4c74-ab0d-a7eadb8bd129", + "7a7a6d5e-0df0-497a-978b-9a16c4f7ddae", + "2715c940-f2e6-4345-a27a-0c2fa51a56fe", + "668201a2-0a17-4d62-b90a-9d2b8d7db7f5", + "17f4bb84-f5ec-4083-83a3-1038bcadfdf9", + "326858ff-832d-4252-ba04-f6045354f808", + "211b2b85-8c71-4395-ab6e-09a60f702b96", + "8e5815c2-ae9c-4636-8eee-9a7bd5ff56f6", + "ccc43ba3-69dc-4c69-855f-710545e417e6", + "40868843-9c6b-4021-b035-6c453c567dd1", + "76c43962-9607-4dcc-8154-889301679461", + "ec6a3d99-5e52-4c9b-98f2-fac7ef7e5ca4", + "c59684c5-e091-4a52-be05-325e89f3b055", + "d8efdf4d-dcef-4683-b8b2-8d9c1e7a1ccf", + "e9d68f1b-bf5a-4bae-a6a4-b1bf3568a107", + "850d525e-733c-4d62-8a4b-8c8cd26bc14b", + "70fad4ba-c381-4395-96bd-7b03ebf69a7a", + "3c3a1403-3a74-4cce-b960-e760a12e820a", + "9a6a16e1-c751-4dd1-b63e-c69e5c2a1b1b", + "c97e7e60-819f-4384-8b08-6d0f831b05a5", + "2938443d-d0f7-4919-9727-c77e3e61c073", + "24664712-b0c0-4c1b-8144-21bc62f68fbf", + "cbec280b-a711-48fb-a56b-d6acf7f4b841", + "d4068be8-f47a-4d70-a33c-53cc0240c94d", + "76dc17d0-0280-4035-96ae-89bffe32ce37", + "7c338479-256b-473b-9744-f5861a7430b1", + "4930da9e-ade0-4b05-ab71-995571510cb4", + "e7e0644d-04a2-4e04-9b22-de76b8d1f2b9", + "f7c84d8d-00ed-4d37-a93a-f8cfcb92405a", + "48d9df34-33cc-4e41-8db4-e6604137763e", + "ed52889e-637b-438a-bcfc-66e954cee0d7", + "0a579689-e225-4ea3-9f36-b283600f744a", + "0d30337f-a0dd-4ccd-a3a1-a06842af891c", + "37178f7f-8e06-4df6-8399-ad342f5cc95b", + "dde0551a-fe20-4e22-9e6d-124d6280247e", + "55e78786-64f5-48a0-b0c6-5b3822c187af", + "2b0f8c41-d671-46ef-bdeb-77260f2ea344", + "cec3e46a-c81a-4069-af63-b6ad3d1699b7", + "02ad7c88-6cb2-4001-907e-6e45f71885ed", + "89d357be-cb2f-40d1-b490-469024c6eb9d", + "abf7dc53-2df2-4d23-a9ea-e7b81930c01d", + "09700a3b-44f8-4a7a-a6c1-eccd4f0b2989", + "03c1ecde-e39f-4064-b907-21cfc738efba", + "8d63cda5-c646-4720-b6b0-9dbd98931686", + "3833f9ee-04fd-40a9-a481-e74a732c8bf1", + "f2741578-b285-43ee-852e-106c40f8a1b8", + "8804196c-5654-4861-af0e-23306acd56f6", + "2aeade58-34cb-47db-be3a-38b4651b2ad8", + "7a22e2af-6552-46a7-90c0-96701242a652", + "b15f3b9f-c7f0-452d-a984-279d2c276fe0", + "e35d805d-8358-4178-bb25-33bbe780fa65", + "ee88d5a6-2abd-46ed-9d2e-893266225318", + "001d8d5f-a499-4270-b17b-54832da40ab1", + "0b9c8f5b-9a83-4af9-8b2a-e9ae08d16924", + "9d43d0da-3859-4d40-8e4f-1f57672c6f16", + "c4facf2e-0438-498e-8fe6-d25569a1bf28", + "5dd72fa2-452f-4a4f-b77b-a3755e8c5d90", + "0f8ff0e2-62b2-4f8d-83ed-31189a7f4f2c", + "694d6020-2f5f-43fc-a199-cead670d255f", + "4b4a5114-de5d-4ce8-b8c3-815d1962897e", + "5c4cc729-7989-43bb-a765-ddabdd7f842e", + "12aaac9b-28bc-40a0-aa29-b3627c84711e", + "d7a814c8-9934-4d62-a260-3b856f392fa0", + "6a1294ed-47ac-42e0-993c-742f3d6f4997", + "86955fd9-6cd8-4716-abff-408fc39b5afd", + "050ebf52-04e2-4ecb-9737-de6eef2091e3", + "e14fb45f-fbce-40e8-a952-b40ca8ffd03d", + "b6e9a5ca-b913-45c6-82e5-897e0224f1aa", + "1b412925-b90c-42bf-a5d6-5dc10c777f3e", + "36dec511-d295-4463-a087-1cb7c8725b17", + "68bca740-b1d1-442c-8ec8-46df51b650a1", + "6eeac7f7-9163-40ed-9f64-d2399cd1e45a", + "d1288d1c-8833-457f-a563-8d30758d399e", + "ac078e8d-06a7-4817-b162-12cb7a6267d7", + "0b261dba-b4ba-45fe-afca-784c72ae420d", + "de58aff7-52f0-4dd8-b2ad-253e9e43dbc5", + "4af86e99-37ba-4d1f-a5ed-cdf3fc5597e5", + "3ff9c078-ccc8-4318-9af1-6887c974c0e8", + "7f8493cf-3fa6-41f2-902e-38b6cd8b07b9", + "2b3fe130-ea15-4fcf-89b6-b2ee04cd62bf", + "263e8fec-5d97-4224-a84f-715fe04b717f", + "ec3c344e-4444-49ba-959f-2e33471a4b7b", + "c505e474-f3ab-4650-81f9-5be3a392b66c", + "b75dc7a5-5400-46d8-ae6c-7577f8cdc3d2", + "0f516f04-9da4-4ccb-a399-b70ed0c51db0", + "bac2927e-bca1-4490-8acc-4e29a45aa33a", + "18f9d4f5-c973-4207-80db-affe728b40f3", + "70af2326-aac8-4487-a6c1-0f4e73067579", + "fb74722f-e387-4cc8-8a3f-10c372f0cf0b", + "74aa1c70-8117-41ee-a109-88719e6d3bd8", + "95681c8e-11ac-44f7-9f38-9bfb8cdcc120", + "7f2dd0d4-33e3-4386-b196-2ca045920bff", + "49fba371-92b1-420b-86e8-58695f1ca3f2", + "799a3c12-0e14-44f0-a6e0-88e3aef0cca2", + "e00a9048-88fc-48bf-98fa-0447421e2a7a", + "112bb56e-e331-46d2-9ac2-79c7121c3849", + "688bc4df-e100-45b1-947c-722dbf36cc14", + "9c6e8d5f-0720-4ac7-8f92-442411fe2582", + "92dc94e6-fb1f-4576-9a05-6f87d0764c5b", + "51fda582-0299-4689-8af2-906a6b9157e4", + "b2468f6a-8336-4f38-b8c1-271bce9f1d1e", + "87162f3d-547b-4303-87df-07d28cb5d188", + "9e152d18-c054-45b1-b9b9-231c32791bf7", + "981ffe22-b90a-4b65-94f2-42c0b596f65f", + "259152cb-10b3-456d-9527-a4e3f36f72a9", + "1426e7ff-6302-4ccb-8ab3-57e276121533", + "574862cb-c65e-4ad2-8d47-49b69f7a261b", + "5f09d24e-9cef-4198-a12f-c9c5e380560f", + "8a01c330-2ba4-4641-bb60-e173d3472bfc", + "97590d0a-d8e1-43fe-acd0-6844617e872d", + "cee94b1d-7af7-4de3-ae9e-5ff4a3d78ea0", + "b5029b1d-ee91-40a3-b893-4ea212dd96a8", + "6db4593d-f5f2-4634-90ac-565a7755e2ad", + "8a88035f-487e-413f-bf7c-7018f1e9d972", + "c9c06784-60d5-418d-9645-eb82a939f6af", + "2aa0fbed-84a4-49ab-ad15-6388244cfd22", + "9d7fe2dc-10fd-458a-80f7-41f730be9857", + "71f645c0-5d2f-4342-ab4f-346f58e563c7", + "be0816b7-d5f7-49f8-8a9c-309bf8b729d4", + "5f29d342-1dbc-492e-82f2-979912fd2073", + "c79738ec-d709-441a-aed0-74bee073a299", + "e57d8735-fc80-4353-b659-d2cb3db55784", + "729f2565-7f68-4d8e-a9d9-ec6208caea10", + "9ad068b3-4240-4aa9-ba09-3242f7cd9d51", + "9255c0ef-00f6-4f26-a728-21c0dbabcd0c", + "c7c0f4fa-4d25-4a96-9976-0508c87b0e0f", + "d82e9862-eda9-43d3-848d-b416bfe30613", + "3e871265-cf6b-46fc-8e6c-4d6c1b736556", + "ba4d4cb8-4c07-489e-967b-ddd59e7c5df0", + "e6498d58-1430-4d87-9386-46f6d05fa13e", + "060f1cb9-f0d0-40ce-b361-1a080c60c697", + "d5bc003b-2b75-4e39-940b-e893e256ea8c", + "3a7a1ca3-f79e-45c7-8bca-f49f734f3388", + "18474be8-e5f6-471b-8c45-dc69680b29b6", + "7310040f-fa3c-4c7b-adce-cc37f1c37545", + "f1e79a23-76fb-43ed-9ece-ad99214f8a76", + "540fcaab-63a9-41f1-a227-b0e8833c4dcc", + "5e2326ee-c95d-4fe4-af49-af83aae4302b", + "0d61b5b4-674f-4e35-bc2b-0a83882de047", + "f834de54-4720-43dd-8131-1b8c9731c628", + "493cb715-755b-411f-ab76-eb1ae539ae4f", + "ae9a003b-9358-4560-bad6-88d8e2d6e25f", + "ed3bb746-f078-4590-83c5-ec098c8cdf80", + "d2a9574f-8ad3-4f49-b76f-2eea48fac2c1", + "f54cc608-2d29-4c4b-b84b-e2e24c8ccbc9", + "d9505299-c696-4185-a99a-cc8e17b1fba7", + "0e3c5064-dbac-438e-afba-676461e8b509", + "a7744d98-697d-4684-bec8-d5dc777cfd8f", + "ca388cb4-eab2-4822-82a5-8e17a0077a00", + "36602499-321c-4a88-90da-216028b4860d", + "389bf31a-bdef-4ffd-852d-f4f1a4fe8387", + "dde1c9e5-271e-44b7-b2a5-5fd10affff24", + "d7de14ea-6eed-475a-a47a-348f5293ce04", + "631f37d2-f6ae-4caa-acd7-7b0d708fa862", + "4ea20287-6820-45bc-882a-1e81aa1f8dc8", + "a363a467-bd9e-42b9-a26c-be59b4881fa3", + "ca4ff23e-de3d-4298-8483-af19fc610f25", + "c8fd264d-71b3-42bf-906a-dc3ff2d0ecb3", + "4bb6b72a-61b1-4754-91c9-0d858f3d4bae", + "07baa46f-dcf7-40c5-921c-7f606d73d905", + "c85c67bf-9bac-404a-98ea-ccab4806a1ce", + "38f6be9b-55cf-49be-9bc5-3b752a306279", + "c293f9c9-78a7-43a0-a51b-e650f2dfa9a9", + "cd554f5f-0d8b-4ed7-a49f-e8d4382354ff", + "66f30ef6-7078-4f39-8ae0-a69dfc682cb2", + "eed87087-0d13-457f-b7cd-26144b7a3f6c", + "fcc1a789-3de6-4377-ae73-e76416a07e06", + "5d8cb917-21f0-4e83-9de4-ce739067cd51", + "c7ad3fe6-332a-44eb-9317-4be39d575dce", + "c6aecbdb-0c6e-4c73-926a-eb25728165f5", + "68ce135f-af03-4081-bce4-11afc3764776", + "e7936022-5b8f-4bb0-bd53-925eb3cc06d2", + "4e56af3c-9de2-449e-94bc-7173f9523529", + "2d0de407-8f73-4483-953b-78303b5ff08f", + "6651da79-c88c-4b18-ac60-4b6df5d46f8e", + "607b2dc7-152b-4196-9570-8be6290449b8", + "d88ef0cd-e622-4918-ba22-98223ec9fe70", + "6f200c8e-0f50-433e-910b-34b1896059fb", + "5b124a26-ec25-4094-beae-8c92a35fa50c", + "a7eb9077-db37-4528-9028-76c1be55a888", + "28273349-e6b6-494e-856c-a5df904f082a", + "7cf30483-1aff-42cc-89a7-2d4b04a0ea82", + "bc678e7b-551e-417f-9460-b46710a630d8", + "373aa2b8-244b-43c5-9384-d38364c08d2b", + "fb1cc448-0f3c-4b1c-b578-071ed3d08495", + "bb2648c3-7c8c-409b-9a35-ad0be6a9d7a3", + "9ddd92a8-3aea-4c3f-af73-a2859bd05d47", + "8b854f48-207e-4711-a95d-c3004dbfd20c", + "06988250-5f6d-4655-87df-cc46edb6dc82", + "cee4205b-64a6-44a3-925d-3a168b2dcf85", + "e3764474-fffb-4370-bb87-523bc91140b0", + "682da045-71c7-4493-b8eb-79c93fdc12b7", + "8dbe0779-f124-4bc9-8eca-19de6d0464c3", + "b0846205-0ba9-45ec-a60f-e2aab2d1f2bc", + "96d94112-7c7d-4c8f-aed0-d6aedc9c5083", + "6f16c274-ef9b-4071-badd-5cd9cff645f0", + "0fa6d0b4-78e2-42b2-b832-52d6edf77d6a", + "35d932a9-50d9-437e-9360-367185966c43", + "7548cb2b-9f7d-462a-a9e6-027c358da782", + "66207746-3a0d-49a6-befb-c7f13147d5ad", + "b2885099-9b03-479d-a599-2ea0cca7e199", + "59d48d3a-c866-4ec2-8984-f8b4ab4b845c", + "f6238791-2d99-42f9-b999-a159fb621cf9", + "fc89296b-1d2a-4438-a063-05c0cca22292", + "e5abb491-a13a-4a2c-9a47-617708769bc0", + "ff866c81-fd69-4516-bc1a-ad0e693d997d", + "aa581740-8267-4013-94e8-448a76d94cab", + "aa71842a-8781-4bf9-9307-c8f557542bb8", + "0311fa61-cc5e-40b9-957b-5bae7f321cb8", + "9adea155-f1a1-4097-817b-67af8d26a6a0", + "32653f52-5c2d-4cf4-9030-8d7aa6ffa87b", + "57483a2a-2c22-42e8-a529-e0786ab63559", + "5d46a9d2-f9ca-49ee-ab04-91c339116411", + "5211b14b-b772-4e49-bbde-3fde13927fdd", + "e5dccf5b-6838-4f72-b382-d4534b26545d", + "d19c0597-8ce6-4ad3-b766-edfd9bb64a5f", + "75fa6f4d-0591-441a-a3ac-aba3389bb8fc", + "3587f195-8d5d-485b-91e7-03f6597e9b24", + "5d141384-b9f4-43b1-8cbe-bc076e629256", + "243de966-f18b-4caa-8675-ee47257c9f60", + "7c767a4c-7217-42c8-91da-533e30a73d65", + "7f3c97c4-42f2-4309-b788-cdba0030fe9a", + "9c4d79f7-d194-4cf2-b651-a9c320bba136", + "39e51480-f538-4680-8b7d-cd5c7e523c81", + "e9424a62-8ed5-4a89-b628-17c279b1775a", + "89c3c3d0-04a1-40cb-986a-5490a78df3d4", + "4251d37f-f687-4793-813f-728002f6e485", + "adf77872-98f0-43ca-a7dc-fb1afda930b2", + "88cb0491-d22d-468f-be10-3e1f94928e16", + "5b2d08cd-02bb-4ae4-bb19-cc3fe998e493", + "de132aa8-c7dc-41c9-9a61-5ea85d9b13dd", + "47b9f549-cc46-4a97-a630-8cda6cbc2884", + "2ba6f927-dc16-4073-bcd8-bc2f10eabc46", + "b9137f82-1640-40c4-bf2e-0b1f32b030b0", + "56f8c8fb-f8dc-49e2-83bc-b4e061f14e20", + "43dc9958-af73-4504-8893-de99c339d978", + "102391db-f733-4e0c-ad02-4c6a291e1ff5", + "a972240a-3f30-4a95-86f3-0a809740f3ea", + "6a6b6342-9728-4721-a72d-2f5c35ebbbc2", + "9788fb26-1036-4232-bc36-d4026569fd79", + "fb69369d-e306-4d72-9240-b80a197a207d", + "5b49e3fc-cc09-4c59-b42b-36d2b088b561", + "cfcbd0e3-eaed-4848-b04b-8479cb0cb9ee", + "ecc59ddf-e5fb-4266-8723-e81ad59db2bb", + "450b27c0-27ba-473f-913d-544a7f2e4535", + "e1c3676e-d5e2-4e1b-ae46-c46810b7f616", + "7bf53791-e740-4cc6-8931-d6ce00bc359b", + "6baac305-48db-41ad-8efd-d749de279d82", + "1680854d-c89b-452c-ae8d-7f45f65fce6d", + "fb056a25-bba3-4608-abc4-b06229946abf", + "c108b47d-0cb3-4ff6-9e38-26b69c227a4f", + "f905afe3-b8ce-4f26-a99c-e09c60b0a9e5", + "57397174-5a82-4b71-a463-e4b03ecff8f9", + "33201983-afaa-4714-b1eb-d9e76f80116b", + "a4db125f-e5d8-4e23-9d0c-65e29f345e50", + "64c7e714-0787-49b5-bb74-c07b65ea1313", + "78a9b319-aefa-486d-8854-2bc9f403ba5a", + "27081fb0-a814-45f0-9b50-ad49911fb5de", + "e6f774bc-37fd-4a01-963e-b2bbc3fd00dc", + "d7d55dbb-b11b-425c-8091-186bc9343c3e", + "3daa7fb2-55d6-4c48-bc67-d49897e98525", + "4c51204c-3a82-4180-8484-991bc27e16eb", + "6db2aa0a-0af0-4d67-a085-af9545b1e2ae", + "6280677c-a211-47d4-b208-9d386126022f", + "f24786e5-197f-4244-b340-bc70ee7530b4", + "61a5f8d5-beb0-465d-b01d-d8bbd265082c", + "55463427-912d-424b-b7ac-b987ad755440", + "154b1912-923b-4d39-9347-dd82adddbab2", + "884b7b1f-c476-4187-8baa-4362544b2a1a", + "42dc64d5-30a0-407b-9481-ac8d5d262165", + "bc7b2875-a601-453b-a303-a3a4368dc488", + "b5e8cb9d-546a-4208-8dee-dce7b4f2f27b", + "725ee4fe-c59a-4f6f-98e4-a4030c494ea8", + "0f64e0c2-9768-423f-90d9-a6d347d9974a", + "88398849-e2c5-400b-8791-d71d79996b06", + "f0ab7732-6c20-44df-8d59-77088478a9b4", + "302f3955-bf98-44e2-af4d-c33eae59bfd9", + "f4df1dce-161b-4a48-a901-4ee0bac20d8f", + "10d9152c-4249-4768-b11c-d205b6dc167c", + "85234d52-9fa1-4e84-bc18-27c0a1cd7d84", + "f123ab19-3964-49ab-b2c1-fd8d95f181c8", + "8ea43fd6-132b-4323-8d9b-35e3de7e5fd7", + "df2cc30c-844e-4d7b-9fa5-ac66d48e0b4d", + "06639273-62a9-47f9-ba6f-44b63117a0d2", + "7a7adebc-3e5b-49f7-be8d-2d72580c711d", + "31fc582a-c3c1-43f3-9c9c-915e01e67fd2", + "95e33026-7178-4368-a7b1-a9e416b2b673", + "c79d4e4d-8e13-4881-b425-831180f548ab", + "c51a8e3e-2239-48bc-9b67-77b10c872393", + "49768aa2-77fd-4dd3-abf3-d8b33a3ba680", + "5b4d3e5b-3eeb-4d68-afd1-5ba170aca18d", + "19c71a42-c76e-4488-a391-eb4b85f81609", + "36576023-bc67-4dfc-8055-9d238dbd5dc8", + "47065237-634a-470f-a34f-cdd084df3622", + "962d4d4c-1218-4951-840b-cbab1b9b59cd", + "ec2c46b9-c46c-4281-9077-0cb812ceac9f", + "2906ad25-92d0-4258-83b3-e0a7f314a450", + "f93b0549-4d91-4f38-8057-d3026defae59", + "e84fea4d-bfea-4cc1-ac38-9fb872636c55", + "9c2aaf08-a16e-4cd5-b9c0-6d78b860714e", + "d0385df0-82b1-4bbf-9df9-4b857cb04c1a", + "dd8766a4-9e57-4821-849c-b9905dad6595", + "e026953a-f6c3-4cb6-92e6-f26a41746bf2", + "721c0e83-16ae-41cd-8237-6e0cc92e6b79", + "97faecd8-00c6-48bc-90b9-be7c8d8ce16f", + "4ccef8c3-fa66-4a51-b9cc-6809af24f3fb", + "6dd0a842-7642-4bf1-81d2-20bfc0699e3c", + "45fb7a3f-0a86-439c-b2ea-d6e6d77834a1", + "c18c0148-bb49-42f9-b580-5c62f63961ac", + "e21f56e2-a77e-4b7c-9941-39d7fae651da", + "ede11877-0568-4245-9274-cb955abeed38", + "f70785c7-96c2-4561-90d4-95690a929a53", + "ca18e9b7-ed20-4b5e-b614-5684656508d6", + "1abcf423-d530-462b-bfc7-ec96e04a7439", + "19038cab-9300-469f-afe1-860214e8051d", + "56b002a7-2ef0-42c4-8495-68acd46289e6", + "970483e5-185f-4da8-bb24-a7cf0e208ac7", + "db6f5f40-0be6-4e31-81f5-dba9d366ac1a", + "f4d3d0c6-1f6b-41ba-8da3-dfa7411deafc", + "6a979a64-c125-4cf7-9d4c-fe289f7dc4aa", + "e1a667b5-14f1-4e73-b17f-060d236512c8", + "cf048430-1d25-42f9-bf42-d6db1038bf8e", + "1f0e8ad1-c111-423c-b1b2-c3b28ce25812", + "e339a8ce-3a1b-4cf4-98f0-2e81aa26172b", + "870acb27-869d-4ff6-9e63-b8578fd76130", + "e8ff5b03-3892-4e84-af94-735db8a875dc", + "1f579554-031b-4c69-a4db-96681638bfce", + "79a7a49e-fb99-4bce-bd9b-20e6586d0d59", + "3190c4f0-af51-4d8f-8f49-be6eb117d801", + "9a7eae1a-1ebd-4cc3-af7c-228c0f662587", + "6117a629-5c4b-4eee-91c8-53e1e12c6259", + "3ed5aa44-7627-4258-9017-b7053576a336", + "3f8f1ae9-d915-4605-b0ee-560dc8ae9be4", + "7e0f51a4-3cf2-4b3c-b63d-aa27f5795adf", + "722cf48f-62a4-40d7-b7d3-647b60129676", + "8bdf5bda-e05f-4e6e-9fee-c58ae6be9a82", + "4713eaf4-b599-4bd2-88ba-27a04050ab59", + "60b40d04-f215-4063-afd6-cb5288cd1865", + "bfcc584f-6d07-4068-b613-5bf46d14f6cc", + "b237b05f-07a3-4a81-9158-defbdf7656b2", + "0c2b26a7-dffe-4f75-beef-e00c6a030f75", + "80c747ee-5466-4bb3-a815-ad07c5896178", + "5e370291-531e-4793-b0f6-4955ba0859a1", + "6b96fea5-49d4-45d9-bfb8-9c45e66465fb", + "f296bf22-9ae7-4a14-b9f6-3ce38b681e59", + "ebe445c6-64d1-48b7-88dc-f8c4cd8e075b", + "6c95c5d4-4a21-4cef-a8ab-2aab72dd9ea9", + "944f6b8b-f539-4a34-b707-68cf6790e874", + "87380e0b-ff06-450b-900d-69a674b3204e", + "58433c5c-84cd-4250-bc2b-8f812a22cba0", + "5dfb6ca9-ef56-4b82-9683-789fb9839444", + "1e76bb95-c168-412e-8223-35aab5d3c870", + "8eb903f8-bc5b-4e32-a933-743aec0269c8", + "183f46d4-4da4-4502-834c-01c5edd14b00", + "92ec0c2a-de72-412c-b377-c2a7ed79691c", + "38bacee9-5460-42ed-b093-df8a7ee9f286", + "3d390026-0846-4017-8e26-3bca15c8fb77", + "a76f67c8-9e55-45b5-ae18-f884ac6d01af", + "99941a28-e9ae-4f52-9420-58dbab841af7", + "577c7890-c2e5-48f0-bc36-21c1c7690ae9", + "e5929cf0-4432-4fcc-a7da-30b1f8554467", + "b452fc10-dcc3-4ec9-b881-ac20e2099050", + "48d07f8a-5bf4-439b-8e9b-ff38d4d85bd2", + "ce3faa7b-3080-4f6c-ba0e-ac50edff6f07", + "ca0ea708-658b-46e5-9f73-0bf99cc3cdbf", + "eb959b97-744f-4f97-99d6-ab04c8472797", + "71098e75-b046-44d5-8c7c-6ffb4b6dc732", + "6f61a583-4187-4a59-8e9c-95ed7dcf2453", + "ae5ac56d-268d-4bdd-928f-26e84eb39667", + "cda77e2f-6f1a-4dc0-8966-ac9dad941af5", + "d6651a77-5636-41d9-a73c-f2fab5103408", + "802b9008-f6af-4dda-a93c-72b56b994a91", + "1371eae2-3ec5-4481-b68e-8f626aaa248f", + "3c18ed56-4cff-4d12-bf99-e9955df306a5", + "adeccfc4-5fa6-41f8-8b92-9524c62554f7", + "1dcdbfda-508b-4353-9d73-0358c8b3f5d4", + "cd44d94f-fd05-4992-9982-5eb8b7d5acde", + "455bb157-278b-4270-a0be-fae96fe2d40c", + "2ff6030a-86e3-4a02-9828-c9cf666da2ef", + "b3691318-cc7f-4c45-9c32-b5b4be06064f", + "1a3256a7-b308-43fa-b817-4bc47fac3dba", + "03b547c9-5159-4021-b277-9a9a2afc74b3", + "7752ebb5-4517-4250-b641-acd16b0837f8", + "8ca4e7bc-b6b0-42f2-a9f4-07783a7e3d3a", + "f34d181f-a155-47d4-8a11-20c9e32bb0c7", + "176e43c6-55fe-4469-9ed7-9a249f871abe", + "b43f7ccd-cd77-4054-a130-0299e3b11a23", + "9a895d67-7d30-496e-9c83-23befd305d98", + "0d3e699e-2aae-49f4-bd68-835af86e7544", + "c9b00c5a-7586-4a70-be38-ac64f9f5ee0a", + "0b625e5a-e922-4017-a70b-ed023fff09fd", + "53dc5df1-2a7c-4945-96f9-cf4865bfea30", + "23864560-39b0-4eea-b2ca-3634ce181626", + "b23c97a1-0761-4e1a-a55c-e078eadac5e0", + "5f9348dc-0ada-4ea4-9288-30025928a615", + "5b945202-1031-4078-ad64-0f43b3d21500", + "5bf4c324-a372-4116-8e87-d66fbe23458e", + "b3ed9e4a-3dfe-46b1-b123-377448911514", + "dc9d6673-db8e-4861-a21c-d655552cf8fa", + "aa65cfb8-2503-4b36-ba2d-c1699baf8ad2", + "ffd1bf85-0a82-47f4-a75e-37493eb7769a", + "3854e730-dc24-4562-8c5e-d926232f5cea", + "d9ac44e6-d21a-41c9-8bf6-eb245c23212f", + "7d6a1d86-d9f8-4c8f-bbec-2c809f70e181", + "35701b17-a1da-4f34-acdd-8002f80f5a29", + "4fa14c50-6dca-4994-887f-9d15142e180d", + "41a0d2fc-80ac-4958-87d9-d65cccde1d49", + "55fffd92-25b9-4820-8f8f-8140487af83a", + "140513f4-68ac-4d93-b2e9-517765e09f46", + "f98a5b99-7dd5-40b7-a579-a81658fd17f4", + "50667c39-405b-4074-9f7f-930ef9ac4ff1", + "395824e8-b7e0-44c0-a9df-75781c7bddfc", + "80b9543a-7f72-49d5-868e-25c616b6fd9f", + "7f4ef000-dee1-474b-b0a5-765a8dcafb35", + "c96b3707-7537-4986-8d9e-4021f90800d6", + "05150508-789e-4000-914f-3dea0bbf5482", + "95bd480e-7883-4eef-aa32-3aa44132cfc6", + "a4218ca9-a4cf-4ad8-b4bf-63eaf3b87f9d", + "8c157d46-752f-4e76-8eb2-fb25ab70988c", + "0dd0018d-a9e2-4e11-878c-817291531226", + "3e4504ef-204f-41f6-a98f-466f00bb5167", + "e2ef50a6-3e5b-469d-ae67-94d155549a8d", + "2b921758-0c14-4aa1-99b7-e7d10129d297", + "0a6a5797-3dbc-4bbf-9d4c-f76eaf85cd71", + "44e98cd1-abc5-4034-8b8a-9293b1646832", + "7a840a58-df1c-4af2-9758-cc1251e94371", + "806ae71d-5a1d-4285-849a-145fde359b90", + "ded36f7c-b429-4f2a-b3d4-24690609f4bb", + "693f3342-474d-48cc-abe5-2acc881466b9", + "303f1e30-c786-445f-8933-306850323e46", + "06aa3100-d21e-40a6-b521-19e57c092212", + "415c9c3b-cf4f-4c31-a49b-a923deaa7248", + "9858ba75-0223-4e27-9962-3bf2ddaf9e92", + "fb768445-2c85-49a4-bf27-ba47a6856d2f", + "aeea6bac-8a5c-4366-9d1e-9f31852d816e", + "1e4c7123-97d7-4344-85a1-8331e8002868", + "486c9baf-e3d6-4547-bcbf-3383abda78f5", + "8a1add0e-6701-4270-83b1-16d5fe3af76b", + "5de3ea36-2b84-4d42-9e36-85ff044c31da", + "61b20ffd-1803-43d1-8f52-da1ada5a5d32", + "ad303aad-db90-425d-8c9c-2420b23a3345", + "26ef0462-8f73-40c4-99e7-458ceff6b3fe", + "5875a71e-e4ac-46d3-9af7-1dd03b6bb81c", + "22b82242-efc8-41f3-850e-0e05e6c0755f", + "788c047c-2456-4317-a3fd-c6fa79249b15", + "274d2388-266f-4120-9f79-03ea2e60c8fc", + "c553a086-fc3f-494e-a048-e1914f0fbf10", + "d41a2452-dc32-4b41-8660-4d457590b2bf", + "17dff76d-4d17-494b-a71c-10f800e7c11d", + "b8f9379f-2c13-44c1-a169-ae169f853182", + "da77ccb8-6854-4205-8112-4b9bd6d0e6e3", + "1be8a9a8-4817-427c-87c3-a7837dfa4e16", + "bf2d3cbf-1816-4afe-9005-5d1e90e16180", + "0a25911a-c18b-4efa-bffa-d765b3b0ee33", + "064f3d1d-4a60-486e-bff4-35691f642686", + "8996c0d4-276f-4494-96c9-e350afb3ed04", + "bca6612e-2387-442b-95f3-67c9955fa2ff", + "b954e8ca-6a18-4459-bd65-c23ccf64aefc", + "49dd5fd9-0547-4a7e-89ab-f7faf253fdd8", + "00d97821-e2cf-4516-8998-4e1cc474e7c5", + "1fa834f2-afc8-4bc1-bc7b-03bf91424705", + "73e9f50d-ba08-48a1-a9e9-b8d5ce0a1c93", + "0a47435f-4402-4f3b-a6f7-fc42d85a42b6", + "67d2cf4f-2d74-4b2f-acff-a66288efef3a", + "eef86d30-2b90-4eee-9e43-61de42e14eec", + "41424ee9-e6f0-49a0-87ee-9985a9e70bb8", + "416fa35c-d094-4305-98a9-1c0c8bcf6314", + "d6e9158c-bd22-4205-b453-8cff74decf16", + "cbffb23f-3c9d-4237-a871-7cb8759e44c2", + "39fdb905-1627-43d5-957f-4ed34d0f61eb", + "b9bb59cf-c362-4f4c-b134-69fc408fd572", + "62fe8fa5-80e0-4db0-936a-d3a005f82b3f", + "55a4ece9-e04a-41ee-a3d3-6f6053188b7f", + "ad5bf594-5393-4df5-a683-17112be043bf", + "b6576342-e90b-4ed7-a18a-80daec6c082f", + "50810385-6ec5-4546-a90c-49d0195c1ab0", + "bd2051ea-40d5-485d-b060-6d178560ec7d", + "da00bd5b-380e-4819-9c1d-ba4a2868dfa1", + "ec7572f1-1e53-4fba-9153-0adb343ac051", + "da98ee4c-bc6e-4bf1-9cd2-0a43ca51c57f", + "45a2c865-af2f-4d69-9a16-7c809c6cbc23", + "c933ed2e-4174-4282-a12d-f7ab60f30971", + "18b29bc1-ddf2-46c8-ada4-f8f4255ccabb", + "10478466-b6a5-42e6-a443-df7bf92a280f", + "47d4d49b-a5eb-4bc8-876a-81826a942ea4", + "0e58dbb7-9aa1-4cdf-840b-7df4ae3f4c9b", + "08b886a8-8095-404e-a439-94f2768727f0", + "4c554a8e-5451-43dd-9e51-7c92b8d113c3", + "b4a45ae1-fbfe-459e-aa58-d744b0fc1a80", + "dafd9f7d-4b69-4c90-817a-6160bf51892a", + "cef687aa-5df5-4ca6-a112-6a5128e5b0bf", + "b406bafb-f2bd-4288-9492-3c46128dbe6c", + "b49cf9ad-6c52-43c6-85d0-114d6fd911f7", + "7b536bc2-790a-41ef-8f80-85046b031ec2", + "de966b5a-23ed-4e2d-bbaf-05d73c3e33f8", + "48973120-61da-43a7-8a5e-9445044a2bc6", + "8f9c68c9-5208-451a-9416-5eb253100fac", + "7539eeae-945c-464b-86d6-9f6913784a18", + "733d2dff-72bf-41c3-8e35-a4baea4ac181", + "4b8fb9a4-43b7-49e1-97fa-dcb7fdf3b687", + "b6fb5ff6-09b9-49cc-9526-66d953a8cd3f", + "cd1c3f32-5a39-40f1-9457-41e87c02e651", + "33da3adc-14d9-4dd4-9087-ad473dc24e1d", + "71275179-f5dc-4b0b-8611-5c12343f9a15", + "1be7cbee-1c1b-4008-8f35-9a490f9592aa", + "8ca9fb97-df26-42b2-8232-896d7cfabb4b", + "bd2779be-9ebd-46bc-93c9-6e084f275de5", + "6e4aab93-d216-478c-9348-bdc68d430c7d", + "18d273fe-c17e-4490-872b-4d1f29d4c9d0", + "e585c419-eafd-46aa-ae22-eeda53dd8ab1", + "7d51f341-8338-4c10-85a5-5cd6fe12c56a", + "af022c68-24a3-46d7-a274-78327034a33b", + "d2472fd5-17df-43ac-ae63-69b6f3b03be3", + "39df9b7d-2d9b-4314-9435-ca2c3c0e9546", + "cc254979-ef2b-45ed-b50a-4fcd3b6b1efe", + "bd280ee2-13b4-4d75-ba0a-d9a3715b19f2", + "addcd364-7e45-494b-9af7-360ac8a42067", + "c2f0a349-e309-46ea-93f1-5a6ac4c8d2cd", + "db5d9d78-8a9e-41d7-9975-970a252a690d", + "cabbb66d-e7d3-4840-8eb7-580056917d33", + "aa1785fb-10a8-4a1d-acb0-f52a9bcb220f", + "3e1e00ed-f687-45ab-8960-cf95e742234e", + "05b4590c-25f3-43b6-b6dd-3c40e68aa872", + "6416e886-5633-4f58-af74-f82a8e40aeb4", + "6f7235fb-9030-463d-a398-a5118c539d91", + "457bd2f2-9c3b-488f-89cf-fd9288208fd1", + "d39350c9-11ee-4936-9bb1-ec597772d578", + "dc130880-5632-4597-9c91-db4b90fb4123", + "91f9d51a-fe77-461c-ad53-85b0b1504cf5", + "2891af75-e843-42dc-ba44-cd6d5b830032", + "3e84c126-118b-4a90-a43c-e226d16a63b0", + "d202db2b-24d3-43a3-9b3c-d45fe02a0ca9", + "0bfeb596-69e7-4341-9624-f69a83bae51a", + "cfbc9803-7870-4767-9329-df0ca96d377d", + "f6ef1f6f-0678-45a1-bf62-b06bbb30e775", + "02c37e51-2536-418f-bfb8-c0cc2b5f6706", + "2cf15a38-a4f1-442e-8bba-65307102c114", + "6a6fe218-d181-4f6a-bc7b-847e126dd3e8", + "83def0a1-2e0a-4e84-bdda-55d1254ab0f8", + "42d68baf-cc42-4685-98bc-b5b573952e50", + "777ed020-6c50-4ab6-9a4f-c7904b1d88d5", + "cd90e2bd-d3df-4faa-8859-0d7a1c8f0f12", + "67b9549c-a522-43da-80a8-4bf99e70fd32", + "2a559cff-89d8-4cbd-b8af-fa2e63a29c09", + "a62fa0f3-2b2a-4567-b110-6e50b1df20c9", + "fee7d7ad-28b4-4c88-afa5-fc615820963f", + "a5441077-c056-4e4b-9402-b775431b242c", + "bc081f5d-3733-4c48-8b95-b7f47c6328ac", + "ba7f3d9d-4fa1-45e6-bb12-2ab5b6d41000", + "0de09c14-fab9-487f-ad9e-6895956beec9", + "9bb42a29-5f41-42af-8a94-52810c1bda10", + "722347dd-835d-43d0-9123-0ea8d21b2fb6", + "0259e565-395d-4398-b843-09775108d488", + "0d953cdc-b177-40c5-952f-30507f17b57b", + "3e881f89-aeab-4a4f-9051-2d347c431aca", + "f19be77a-edb1-44a5-8d6f-bdf34f005d5c", + "b90fbe88-7d8f-461f-825e-4dab585a47de", + "2d6b36be-2a9b-4411-95a2-189896acf433", + "5953e7ef-d771-4da2-ab31-4a9f6ee7167a", + "038bcf72-4065-4098-ad81-3b07a1ee9804", + "063294d4-c8a4-4b8d-a1fa-f5e89b34a16f", + "05db8a94-8f73-450f-81de-647172b6b513", + "f6e18d74-2ada-4860-90d7-26e9d80e28e2", + "68aedaa5-9fb8-4648-9abc-a6f8cf2b5047", + "d904360a-a70a-498d-af20-353e9fa646d4", + "1f29738d-4f7a-4d58-a6e3-d8dee0910b65", + "fa288c65-0e50-4050-b085-383bf82bd607", + "cbd884ce-5f2f-4567-9298-ea4f95fbc5cb", + "f3bef807-670e-49c1-8b69-c9db2c13bdc4", + "fa9d7568-953c-42fa-90a8-ec0e0bef4595", + "11e4f6c4-5c3b-4f86-a52b-44eaf245e95d", + "fdd8811e-81a2-4013-9e1a-8a0906d1d68b", + "8b50cc41-2723-4311-8cf3-68c0a304fadd", + "305c1f45-417a-45f0-b187-08abb8974bad", + "5ba75f3f-e71b-4978-9949-6a012d22a1ac", + "7669a7b2-0826-490b-a30f-d434ff43dcbb", + "b4c56bfa-64a2-4351-8b18-02bd8ead6920", + "144f1de4-17df-4c5c-8633-a6002fa3f486", + "4acd0930-32c2-403b-917f-050f37a5c04c", + "d6bf044b-031c-4418-8ab5-b840d1f16d5f", + "8542c525-5998-4539-ab59-dade3e778fba", + "8d19dfed-5fca-420e-b427-801bf1a51bd5", + "64c864e5-8f80-44f2-af81-a730e568c6de", + "aaee1f0e-8ea8-4a33-9efc-57b93c00c6b3", + "bfc1727d-a639-43c3-8fd0-6514a7b79cef", + "c2c70097-7247-4ae2-8828-a59eba379cf1", + "550a9f00-e455-4de1-b810-4b49534d0097", + "9ced8b47-64dc-484c-b461-20ee252b0041", + "f707ad25-8a63-4037-a6b8-5075362673bd", + "65b203df-9b4c-4d00-9e2a-208848f80d03", + "72e14fac-37cf-4de3-b3d9-e2f535131c3f", + "bf4958fd-de52-48e4-8c73-4deadb521194", + "d08ec8ba-92c8-4ae8-96fa-7cded6336ff5", + "d84d9579-70f3-4636-b35e-81f2a1b0ece1", + "b57de5c9-fb55-4c8f-9ad1-064f9104fb8c", + "e5abff6d-12bf-42f3-9e2b-c160bd09ef80", + "738e4284-6178-4a18-8cf0-8101226de615", + "1f1f9e6f-251f-4acf-9a73-7666f882cd88", + "da1f6250-7852-4dd2-953c-e699b1c3d862", + "a6de9205-5466-4338-8dbf-cfe1e2068681", + "e3054986-1d33-4d03-a072-f193c8f6a579", + "fc840cf0-e291-4892-bcee-08e68d33d858", + "a4cc0d42-1677-4bec-9a3a-39a314941404", + "e5b24b13-b85a-4e9b-9d5a-5a5517695106", + "233aec07-2b19-49a5-85df-b2252289b814", + "289367d5-f046-44db-9d7b-a113781941f2", + "0d90a347-bdcb-4606-89b9-ceb20d37be32", + "6af0c989-5b5b-4a2a-9de1-00d69614d875", + "c771650b-65ba-454f-bd5e-58442b359291", + "1852549c-ecf4-400b-bc83-95361feb9ef5", + "d85e1245-0e77-4163-8d66-f011f8a75baa", + "e74a6155-cecc-4967-ad25-77c2b9f90d42", + "7c4d02b0-df7f-48c9-8e5e-a56a0dbbe375", + "36d5586d-217d-4d18-8cf7-440e4a6cc3b0", + "e39d8fa6-c10f-42c6-a86d-908b7d500856", + "25d873be-c760-4bec-bc36-bee953841699", + "465bbcec-85b7-4254-84c1-05683d86989a", + "7ca82faa-9fc1-49c4-9065-d938edf0bdc1", + "12777b90-2932-4265-974e-3fe9bb83602d", + "cf3e402c-8419-4b65-abc9-fc77be2821bd", + "0654f65c-90d6-4977-871e-48544b153eff", + "413ba511-6aa9-468f-8a00-adcad17454c9", + "b3b8a221-0a5f-46d0-8a80-45d27abf99e7", + "b3c3d1bc-c395-44a4-9b20-c4fb5ca468b1", + "7a1eade8-1781-42c7-84c8-7939d5f50d17", + "762f16f7-83c0-4f7b-a92f-80b16ac2c457", + "4ecfa531-d9ba-4b3c-b24f-eeb41bc3a9d9", + "b37a3563-8446-4b04-b757-5c399178a9a2", + "2e41a065-9736-411b-9255-39bb8fe03b64", + "f21c2d1d-92a9-408d-ad9b-76bbaceecb28", + "76c483d8-2faf-4e59-905b-f5135d2cca0e", + "cb3b7ed3-93f3-4398-8436-a3efad9b5644", + "d0dd5b73-4c5e-4872-baf5-864a0492f984", + "cbdda8fe-c1d3-4768-91ba-2cd8abcd25cf", + "747e0bbf-f060-482e-88ea-abd8fa388286", + "97ece05b-354d-4e97-aeab-550a700e7b31", + "486f7a58-4af7-438d-8937-5ba77b91646d", + "efaec765-a208-461d-a2bd-6e7fe26e1886", + "cc3775e3-13b7-4831-9016-664b3a8b72ab", + "9c07e523-3562-4ab2-a71c-cae841d0d2c1", + "4f386899-2d18-4bae-9fd7-8613b9eb2000", + "2c080e6a-c691-41dd-bcc1-0e1d6cbf0f88", + "ad2cc930-24e8-465f-bdc0-e30dfc1dcde2", + "e582bf53-e9a6-4bd9-8054-a03edd007a71", + "5ef3f5a5-a46e-42e2-9226-1efd00024dc9", + "853622ff-fd04-46f3-829b-560788041c9d", + "be6e12f5-ef4d-4950-8a90-92813b22bbc0", + "39ad912f-fff3-4d15-864c-44dc16f66f8f", + "fc39c712-2d22-4e4a-bebe-8c13d9b0c637", + "e496b23d-bb12-46ca-aee9-84bb4dc4e246", + "668196d8-1789-40b4-a470-d661e38e5f75", + "e0b06d65-f5d9-4158-b5b6-f14010c02d2f", + "203395ad-24e5-435d-b77f-063d6524ca6f", + "087c71b6-a2e2-49d4-af66-96c7ad3ad10e", + "f779ef4d-acb4-4b10-bfc4-eee7cf7fddae", + "c61bc39b-2916-4fa3-8d48-2daf76959977", + "2843a514-02b0-4d49-ada9-78fb5bde8439", + "9b05cf72-4a29-48dc-9079-8408616c2e49", + "904143f3-4ccf-4e82-8c50-1aa823074270", + "d40f32e5-246a-4667-9ea9-0ee040ed5303", + "d1401c48-1663-4496-af3c-90c0e9c114fd", + "0fcb13e7-caad-4d7d-b61d-3c5e03763cc6", + "4f7f2765-1c69-4353-8821-d0f891a06167", + "a6b0793a-fbd0-4ddb-992f-b29aec6eb0b8", + "045daa10-1b88-4591-9493-c03963cd1027", + "b98f0e46-557d-4f49-b679-b1260c03751c", + "baac843b-bce8-4cd8-9671-06f324cb5d82", + "01f25e27-decc-4bcf-9e0f-11705911710f", + "065f65e0-db76-4cf3-8b2c-5c95f2dda7b4", + "82b2d226-18a3-4268-b3d4-368d3966fad5", + "ecbe91eb-bb84-483e-aca3-9dd045df161e", + "6f465e22-1298-418b-94d2-ed3f4d9fc114", + "45346dd6-876d-4dbc-9b29-ddde4bacd064", + "a893331a-1ce7-451e-b3b2-6c707bf36950", + "53632c7e-c7c1-44e4-bab2-8e95fa2427b7", + "8cae7776-4826-4fc4-9f81-712ad46b4ccd", + "a52e9a31-c921-40a1-b2b7-559953204b7d", + "57955b05-9162-40c3-bf90-79cac6117ea1", + "965b03b4-47c5-4239-ac06-fbe566b64ab9", + "522275b1-ba10-4f17-a352-e0ba1cb1b5ef", + "0574a115-fcec-4df1-b7bd-3fb2f9be8483", + "1e976193-9383-4396-ba76-8e2d2ecbaa87", + "ef1274e4-71c6-4e43-b936-b22bfbddb1b5", + "97697381-7a28-434a-ab5a-a886d46f5a54", + "c65cf3f6-dd7b-4b53-8bff-f0bfaf2ec173", + "e35713fd-fc11-45f6-babd-8560ab46a47b", + "e08433ec-a39a-4964-888f-94f79f4d4fce", + "40954e0b-791a-475f-8acf-19604cf754c2", + "4ab285a4-0e07-4989-af89-ebbd3509534b", + "ba57e71b-a9f0-4afa-a6c9-391bc66a596c", + "d9b7eaf7-29b0-4e6a-9db9-8da95a25aec3", + "34488ffd-bd5d-4998-989e-74b96a0ccaab", + "afd449d4-cd62-4104-b22d-b543a69a302e", + "2b4e7335-4031-4e53-a14d-7250fc6ed49f", + "2a52e1c8-a37b-4cd5-a9ce-40efe6b5e9eb", + "d4bece25-dffa-41d0-b39e-21a1abaedacc", + "bf849774-ba04-49cc-b45f-8e7d7377d718", + "b529b449-e0ef-45e6-8c72-220329a10008", + "4839e877-46ac-41a4-a932-3582bb457e47", + "2a7f7b3f-334e-4e97-bcb9-79d8f223d353", + "038a4fba-4e5d-40fb-a2bb-92305541972b", + "8b0d6ec1-9df1-4b8f-b19c-16a38e39e4c0", + "3f0c617c-ebc1-4a29-a328-e02bc76bed1a", + "3fdccc3d-dbfe-4370-9e4b-a6367d998dcf", + "6a684683-1fd0-418f-b4c3-23006addde1e", + "033976e2-a119-4352-b35e-c58dcf7012c7", + "88fbc210-5114-4c7f-b136-1ff0d91d07b1", + "8cbcc9ea-57be-4670-95c0-628158d22979", + "83b7a173-76be-418c-b3fc-bb2dfda15d21", + "7e48c135-4f39-4b0c-80c0-413492cca274", + "491590bc-263b-4bd2-be80-612a2e4fcdec", + "8d863a75-1100-440f-83c6-bce724513d9f", + "6390ff3a-7c89-4105-b972-f58565272515", + "14bb9caf-11d1-4ee1-82f7-5a44f1efefe4", + "31a79535-8aec-4761-939b-52af04d07598", + "867e6a6b-dd0b-43fc-8fc7-9d564478c71e", + "1aab4c8a-62bc-423c-bb62-9ab3d0caf8f7", + "60c09782-0e9c-4345-af4e-ad28534235bc", + "799738ab-597e-4282-b9ce-09a595b73b1b", + "e114dc59-f948-4a68-a63f-5f78500f5358", + "747df04f-9109-489e-8958-b38d6c8d36ea", + "00481266-e8a1-41b0-bab3-67343f838fce", + "97c75e5d-0f76-4576-a539-19f60e1e43c5", + "cfbbcc1c-2817-41d0-bd7c-ccbda2adece6", + "0195ecbe-5c48-44c4-baca-1a01ef6163a8", + "506c0906-89fd-4a34-ba3a-aa5f6b4f5bf2", + "cd4ac99d-83df-4f1c-b5ff-66345a4c6de9", + "6702774f-7ead-4b9c-9035-ffe2c25726b2", + "f451265f-d7d6-49f2-b4e6-924833289872", + "a05116eb-7e3b-456a-8b20-4d28a1966b64", + "bf96c2a3-58cb-4bd1-960a-0f4947e90f47", + "6ed29fc2-93e8-40f7-a16a-052f6d8909b2", + "5daa3e32-6717-4ee9-ae50-1bafaa76dfe9", + "e3825d9c-37a6-4c9e-8c38-0e28a18ae8f1", + "563ac82c-ca53-41e9-a6d5-0976328ae8c5", + "efe43dc0-865b-4dd9-8e57-44451412107a", + "90fc644b-e48c-4160-8cf8-0dc78809e4bf", + "5cf8d924-d374-4ea2-8fd7-044bd38ffc4a", + "7d6f126d-e146-4d05-a67e-71a5bb7165aa", + "c34fa136-9cfc-4fc6-910d-5cccef6c231a", + "43e0e7b7-1f48-4762-9c98-80d23c5e7d54", + "b1d48d71-a655-446e-9e03-8d9cd22af909", + "4aacbd0c-0a4c-4998-a01b-c0e3bc0365e9", + "c8cd8fee-8006-469d-ad54-6fa94c10b9d3", + "792a7b0e-2abe-40bb-b04c-60f05785f6f9", + "556cb4af-a7d9-4227-ade7-543ab9ea7c5b", + "2b0040ff-2852-4050-a73c-f09de78a9903", + "a442d6b4-2117-4fcf-abf0-56b3d5d8a7b1", + "7636e4b6-0791-42cc-ae0e-0c13b9ff8f59", + "195ca53d-b147-42cb-9131-8e65b0e3f2c6", + "9829a855-ac1f-48e2-90c6-563e7e82cd70", + "796c7a1e-c24d-4664-853c-cb98d2819312", + "22de58fe-7fa8-4d0b-b697-62e27fc25a0d", + "7d5c170f-684c-4379-adf3-71c308564e8a", + "ed928be9-aada-4bc3-8093-607d82f75a2c", + "d5b57104-1950-400f-b5bc-8eb54ba3f2da", + "ed9f994f-d349-4678-b22f-7913b6c0f4a4", + "8e28b597-8503-4b0f-926b-01a71a08e5ec", + "f8928c30-e726-45b6-9d7e-cde9fad4994f", + "5ea199d2-71d2-47b1-926e-425bb76b7ff2", + "cc5d909f-f4d7-4135-89f1-95e6eb53240a", + "d7cbf3ef-8fae-472e-a31d-717133cf5ecb", + "433a58a7-1d7e-4269-a8db-c14b90992098", + "9fb39eec-8d5f-403b-b3a2-a8ca8223db1b", + "1ac973ca-9089-4472-be50-0df941c3a193", + "4dbe0b93-1373-4fcd-a49a-957291f0212a", + "270a4485-e7f6-4f51-8e31-aeceb92a3338", + "162402b7-e5a0-4246-b81e-e45be30682d6", + "7cc7bfa5-49f0-402d-8e18-fc337807a480", + "4f712c46-9544-421b-9680-86cf1b101144", + "12f9226a-bf61-4e1c-8020-24aec513ebb5", + "41ef8c2c-ab2d-4a9d-9d26-559b40f75f29", + "8d449be0-c3b4-4089-8390-8cc26a45f7e2", + "29a17202-04f6-4ddf-afdf-d3086f26fa31", + "557c55d2-eaa5-4b6f-a9d5-66ab8349c393", + "c744e125-3466-48dc-99ce-77e7b6c29254", + "e62bb1d5-00d3-4423-abe3-585661b8cc2b", + "027ecd7f-4750-4043-8467-d2f611f02876", + "005dc709-afcc-4947-9438-ca4c0ec5a415", + "e0100169-76fa-4f4e-982c-594599058f47", + "887c0bfc-33e4-4d5a-a47e-02ded7b4e748", + "7cd5b222-ac13-4ba3-ad87-6c9f6ce3c89e", + "94b67f3b-5548-48d2-9ac3-a60940aa2fa6", + "fad9281b-5c5e-4673-b9be-fa457afc897c", + "3b2c68ed-6099-4468-84e8-f11194bf1e7e", + "31658c5d-300c-4300-b5a2-45b1da79bf48", + "c052c05f-949c-4a54-86c5-c64e932f6469", + "99eca143-0ecd-4fb5-abc4-0247b4e22441", + "b322ef78-8e74-438e-bb74-26ccc0cf45c2", + "cb2849a0-96b5-4778-b2ed-9c95bf4601dd", + "e949003a-4dff-4768-83f5-a4ca16ec588a", + "73af3c57-7f3f-4009-9bf3-d13fbc5e572f", + "83279cbc-b817-4e3b-8649-c33443cc35bd", + "79cfa9fc-4280-414a-aae7-d2cfa039bd37", + "c1ae99d3-cb30-4c11-bf4f-132cd03b4e0e", + "f23c3f7b-111d-4214-a4a1-7473608b0db7", + "d2ed0815-e1d8-4915-9bcc-81781e86ffac", + "f06b691b-e099-4a21-8d2a-0856b619b029", + "a796c7b1-d4d8-4ae4-b55c-eab78e3d2195", + "66a62ac8-b917-4c3d-883c-0e1abaf7eb5f", + "8a3f02ad-10bd-45ff-802b-3c7f4b8a56f7", + "acd8b60c-c685-4c26-bd7b-db5abac71c00", + "98e9c0e8-d5d7-4e3d-ae4e-e0b9ba6cd097", + "5cc19b8a-dcf2-4929-945b-94585f103078", + "3b0166dc-22a2-4e4e-a800-960a7f1610c8", + "d267fa39-2683-4493-94c5-af19c7be7b29", + "414e9a07-a5c4-4576-97f1-7bf50b7c7c84", + "b1a7f46a-35bc-4bb4-93df-10591f5d1c41", + "07f880f2-1b09-4ca3-81c8-ff432ac90bed", + "1287d532-feed-4982-b6e2-98817a87b255", + "433fa4ad-a0fe-4d63-a211-279d97a715b4", + "6524cfbe-a899-4339-a6f3-6d91d9c0128c", + "bc629aa9-083f-41e7-ac00-9906cc2e3c63", + "f82150d7-b488-4a50-bd43-757f0f3fca3d", + "7c8dad43-54c3-47f9-a8e1-ec46f1100cc5", + "b9d0640c-8403-4cfb-8089-6b5a189f1469", + "86459178-d597-4ae8-9d2e-818f374b5000", + "24dc7328-9c60-46b8-9e17-65186f0227f7", + "28d7eebf-6121-4e62-93ef-d6fd20fafa61", + "8545e73c-9897-4ee0-8555-f7010b8cd265", + "e424f547-17df-4112-9a83-d0de2490ff59", + "cfd62511-7a52-4cd2-9b34-3ceb20b5678c", + "665f6c86-1bb1-4bba-8352-cce9665047d8", + "31574579-8ad7-4be5-a96e-237231fe0eda", + "4a340a55-ece2-498d-8ff2-d64a0539c60e", + "b94599d6-a150-4229-93eb-fc07e4e8a53a", + "724f1f50-6039-464e-8341-bb99897fa50d", + "63ee0790-5e08-494f-b8aa-4566fe9f98e7", + "c2cd6dc3-1b49-43da-962f-a72d613a146f", + "6b728d30-4e81-4923-a5a6-f1f991e97f3c", + "cc37da12-3fff-408e-9fc2-5635f7b8e6f2", + "c30681ce-94f0-45cd-a488-72c532513c8c", + "3aa98880-1a4f-4f84-8ee3-0a01e723add2", + "a557b574-e507-49e7-8793-89f577c12c45", + "8f81d4d1-04c3-40f1-8cb9-114438742a8b", + "177438f8-22fb-43b3-ac81-3dc31a24fbd5", + "ef45a1c9-2621-4de1-9780-a37274b19ff3", + "5e791578-ebda-4da5-be36-ab9fec07edc5", + "322e5057-f869-47b7-bd91-2000e2b4e2e1", + "7eb27746-8256-4028-9775-57548d2a9695", + "6a0bf66d-7f07-4cc7-932b-6fb7fdd8c498", + "09066edb-35ae-4ecc-a468-03fb8b2fea7b", + "2e2ecc20-1382-4b62-9726-74c2c4a84cea", + "fd0e13c2-404c-468d-9dba-8b17715ebd7d", + "1d33604a-9c5c-43ff-8db9-7c3e3effb57e", + "b8209fe2-35c0-4994-9638-6c6fdaa5dea5", + "94081005-e503-469f-aac2-438ec1951239", + "60478840-6d92-411e-84b7-532d918c2782", + "0f05c28c-e4b6-4952-99e2-4bcb7425f557", + "e11e09f7-5837-4dab-800d-955193d3d599", + "b0885d16-ab58-4712-a3b2-966c3672548b", + "2ac5e445-928d-4f5f-b03f-f3fe7518e581", + "caae4893-21c2-4e30-9c31-d17286010271", + "e21bdcea-d4ed-469d-b751-48f66d8985f4", + "c37a8034-62b3-4508-a57f-43d326df876f", + "b1f1a0e3-f154-40bb-8aa6-3da516a6b474", + "19e9e548-4893-420b-a77a-0499a6a792d3", + "ad45a98d-dc34-40fd-b123-a6f1429c1bd5", + "3401cf0f-674e-4c8c-a69b-1fa2459e97d7", + "deabff4c-6df1-4695-906b-2ed391822b0d", + "e3e74eed-932a-4d21-8e8c-c61295f68170", + "6bb3cb21-c7a8-4d5b-83a6-9dc084e8515a", + "6c630a7b-fd53-44d4-866b-a3e1f3b0ef5a", + "683aa595-c282-4163-ac12-c96b7831e22c", + "413ae105-5e63-4478-acff-d34d8c69bc93", + "f4be2918-5d6b-4eeb-9177-64655459228a", + "cd13750e-07fe-4453-b89b-04147f731a0c", + "bbc02399-ee03-4133-80d7-81ca50632490", + "6d77fe5d-871f-4e6f-884b-8cf4bd8343b2", + "5964fde5-e017-4064-b5a2-ad5fd4aec665", + "251fc55c-b2cf-4eac-9e1d-dcb237ad4ac3", + "8f078c66-7bd9-476c-aef4-b6b326d68851", + "d4d38907-8333-4474-b94d-fc246fe10272", + "dc4bf455-53ec-4ab4-9a0f-834096c8b401", + "ffff475c-41f4-42ca-bc57-a19bfdd66e27", + "52afa330-c1c8-4820-bf81-7191237f8e5d", + "248fa036-4645-49d8-aa2e-050f790add8f", + "e0c60b5e-bebf-450f-939e-a0ca89fefcad", + "156b2373-bedf-4658-a2d5-f083ec6bc5aa", + "8c61318c-c09e-4726-b94b-3cc2fa4c6485", + "668d3254-9e08-479c-813a-af42a891a408", + "5b14dcc5-06b2-47e2-8ee2-651795d9b039", + "81be191f-28fc-44a3-a684-35d3ca5a5bac", + "83df11fe-7515-4221-94d4-f4c12921b0cd", + "13619473-3322-4a15-91db-5988cdaf2216", + "d6e85647-fc32-401a-abef-7e2f3d770beb", + "c6952baf-4f21-4d97-adac-f9a3de948140", + "b5f512bb-92d6-4ddf-a698-a825d85e79e6", + "f2500c03-3422-4813-91a5-f0a717ed1f14", + "cc917a39-fe39-40b2-85ad-ff83ce976010", + "bcfb9aa3-947c-4e0c-9fe7-149d8587eef4", + "150a8971-85f9-4635-acaf-70ee8adec550", + "5e70f194-2e68-464e-8aad-4170d5f9a6c2", + "b0c635c6-dc0c-4804-8587-b198f1306245", + "4c299fda-73e4-407f-be39-24b81748f678", + "9565abea-0dae-4a31-99fd-7d9dd27f3b6e", + "4019cf95-019a-4395-a239-803b31c63494", + "977e4d09-d2a8-46f5-a736-f9d3e69e4f3d", + "f6e97ef4-46df-4c2d-a6e0-b4097f385352", + "7ab8b974-1d85-4711-80a2-03e3f649a2dc", + "254fdc7d-f0a3-4fa2-8918-5b593d2351b6", + "71c7a540-85c7-465a-87be-0ae50120ad05", + "7132f2c5-48f7-45a1-a561-a89770cf4d46", + "c923ce78-50ea-43df-bea0-80327dd5f8d6", + "22de8192-d93f-4d89-ac97-e202bc9d91d5", + "5f4eb6fe-c6b6-4418-9f73-fbe3fd475c18", + "4803047f-12c4-431f-815e-bc30ced0864d", + "dcc73d85-7694-4ac1-b198-cd90dcc24ff8", + "0f423bbc-8dc2-4744-961a-e9ca1c46aca3", + "c8ac5fa2-c0da-46c4-b7ca-35731bd1d3fd", + "60aebd5a-a188-4f8b-948a-ff25eaa78175", + "84ae0028-1ff2-4807-b424-2648c2740747", + "3b12115b-5f0d-4b6b-ba9c-76628d8ae7ab", + "da72f64b-68ca-45a5-aa69-febd3d34dc59", + "e4429aa7-e64b-44f1-ae9f-15d07ee8be7a", + "9c47d1e3-d34d-463f-a381-6f6c8b5e005c", + "ed962026-3504-4a23-996b-9151fa243b1a", + "2ab60538-ae94-4923-936b-75a5d9ec3a00", + "dd4c04a4-746e-4dad-8624-8947355db64f", + "4bf4b0de-cee6-401b-a3c4-af5b26c15a33", + "da81093a-1762-4fdb-89b3-71279cffc392", + "95a80bf6-93ce-47ff-89f2-957930843e84", + "3aae2ae5-b439-4342-9325-cf206a2d978e", + "3657c252-a240-41d5-86f0-717b088e1415", + "618e48c7-b0ae-48bd-9eae-7b821d8c61d1", + "594b15aa-cc94-451e-b53b-a44071673a9f", + "481385ea-0c27-4bd6-948f-e86bab7499c8", + "2ee3eca0-4de3-4fa6-a4cb-46bcca2d135f", + "9d38927a-b4fd-4270-9c09-526d1f9f5f6c", + "796c3fbb-bb03-434b-abb0-8f83a75bc062", + "c5b660df-6b91-4175-996b-d54f3698fc5a", + "868665e0-7824-426a-9e09-f5b572880007", + "d5a8a841-8295-4cc5-8a27-4b86aa11fab5", + "c60d1100-9811-4c20-b7fc-f07810ea127e", + "00b53944-ff7b-417b-ac60-01b1150d2e5f", + "82572fc6-bfae-4f13-98e6-8f27bfbefcfd", + "99cd4e93-1992-4552-8f39-fde7ec6727fa", + "c6a01422-1951-4118-a2dd-aa228fbe6c34", + "9f7e0758-5cf0-46da-a020-6c7c5dd3798a", + "dcb721b4-c2e4-4ba8-881d-7dfb52c05cfa", + "152b8dd0-c93b-4b40-a976-1a1737bedb2a", + "51c51af4-6e04-4f99-8112-236d332b69bf", + "ad72fc57-f226-4ccf-a183-336e16bce6cc", + "14cdfb54-d1b4-4e93-9a72-e4af286553ea", + "8c44019d-8a57-4382-8cc6-b20c6aae2225", + "7794b347-073f-4e63-8e37-96bf56cb6988", + "5dceed61-9b28-47eb-9bb4-d630d91adf71", + "74b6aee4-ac93-4bb2-a966-51a1f2f333ac", + "d2fb801c-0f30-4c01-8272-ab08c4dd6906", + "78efa71d-f83f-46ba-95db-71a712e3337f", + "af7319ef-059b-4995-8488-70273e8c92e9", + "70d7eee1-ed24-4901-892a-ace44966313c", + "be8f2d92-535f-42d1-a5d0-2f938c713a9f", + "71cbfae6-5d97-4b84-99cd-3f057b950dc7", + "2a6d8f86-73bc-4ece-9705-5d5355b8df2b", + "64999aea-5f8c-41cb-b4e4-c30ad0c6b613", + "40fa5866-90ce-43c8-8214-c97839e38d1b", + "31abb3c9-fc57-400b-b576-8d39cbaf516a", + "31935d3a-9bb9-4478-a5e3-9fb60fc9a84a", + "046a0989-9fd7-4147-8742-89be426ad334", + "0066acd3-980f-4935-af2d-9534bf2c5110", + "df636365-c252-48ba-8e84-228dc7411005", + "156aabae-b4e8-4789-9782-19b4d9b31b7e", + "379766b1-9708-4320-9e3c-f6650a9ed104", + "4a0eb61d-367d-4316-af3f-c5d6079a0e6a", + "1f2e3619-6067-448c-b608-440cbbc0ba03", + "646fdf44-c29b-4f6d-9e51-0e8c05e7bb6b", + "0191614c-5864-4dbd-8176-c110b9eefbc9", + "5efb0498-8177-4fa7-baea-0facb43df24f", + "a1aa3df9-c264-4659-b719-0f6a96a982d4", + "ff857438-653f-4958-a733-101506f109b8", + "de816283-cd40-432f-a7f0-588eba8b2ccf", + "ecc6feab-0d1b-42cd-9d5e-2f43d29ba0ce", + "a5c9d015-18c2-4384-9e8e-89550cdaa953", + "9868b870-4767-45cb-8235-e645d6d99cc2", + "9488c799-18eb-48ef-a6e5-2fad27cf6719", + "955876bb-795f-42d5-aeba-776e8cfbff70", + "5948bcb6-83a8-45b1-a525-1383fa869645", + "99edf642-d51c-41ce-bfe7-747c8600bc56", + "f7fb9c6f-8ed2-45df-9e72-c131c81153e1", + "196ed728-fb63-4619-9a0a-da6693575a8a", + "33a6c960-66a0-4400-afee-53850298d75d", + "7f28e549-0199-4579-994e-3a76810cd693", + "575e9543-e0f5-43a9-b957-56fe9addf0a9", + "48d51abb-b7fa-48e0-8bbc-3b721b05d952", + "bea1ee50-5c2c-495e-a6a2-0b13b949f0e3", + "9f2c9030-a85d-4501-826d-65c6b3c689b5", + "79ee7f1d-645b-466b-8440-01e679986b39", + "ace8a49c-346f-46b0-aba8-eb7739f8157d", + "37688904-bd5e-476d-aaac-1ea40966d7a0", + "e3422bd4-9a9c-4ef3-88c7-6bb67c928aa9", + "91ce4875-de94-4ed9-b7de-a4c3088a88ee", + "3808ed86-1e20-4bf0-8c6d-235e74b84a2c", + "1294ee89-bdc6-46cf-ab61-21b11f584e27", + "fb05d79f-ee1a-4fb8-8d05-a0b888746aad", + "99db0b9e-6585-4dac-9e2c-b5bf1de0fa53", + "e403cbba-108e-40c2-b6f8-d9c9933734e0", + "427d1948-b387-40dd-84f1-e10c72c4c718", + "edb7dc06-1048-4671-b149-771d0aba9a34", + "7a5982fa-b41a-4fc7-b7a9-aaa12da926d8", + "12f18f33-3bab-44de-bba6-3246ab951f8c", + "21ccb7d7-d15a-4f86-b0f6-68df038f9c10", + "4b82b430-25eb-4996-8e84-4aec08a80eda", + "1b7c200c-2162-4bfb-a073-f0aace5e5be1", + "350d3102-ec98-4449-99f6-d87f98d3b215", + "d0153dd8-3d28-4701-a620-b726b160da12", + "392ba522-d5ea-41a4-b1e5-932521b168c4", + "9e471216-63ac-46ad-9627-e89856e4c157", + "2bcc7195-8a2e-42c6-b306-fbec7e22f0f8", + "7da457c7-8813-4edd-bb7d-3ffd8898ca16", + "38a7e66e-940c-4a50-9e5e-c0e7a40a961c", + "a8196018-2243-485a-9217-7995308862ef", + "716c0bb9-0fb6-4a81-9978-eb742cd14341", + "2d5d8168-7d49-44e7-b6da-9593bcefa150", + "335f5c2c-28d9-4a96-9998-50dae1e80ce1", + "b5821155-220c-4ab5-9c77-1db2cf2c8e0e", + "01e7dbe6-0b65-4e3b-8bbf-5ea41795794d", + "342abd5c-581c-47fa-bc60-e829d84033ab", + "6ca7ecd4-f5cf-4cbc-abed-e9f62c650a8c", + "dd08645b-1133-45b3-9356-8d6fa2d130df", + "f112776c-164e-4bb3-af34-45ccb5dd938a", + "c785a3db-d694-4f52-9616-0cbbface9ed9", + "e73575c4-2a3f-42ae-b98e-87800d3c852c", + "af29ff56-5745-4beb-99ed-a4491316e75e", + "0fd8bd5a-c235-499a-95b1-fe6d534c5cdd", + "481895f8-47fa-44fb-b6ce-53129c718668", + "44c18205-9636-4769-bf1c-46edfe935b8d", + "d67eef2a-67bd-4096-a94e-a8a9dbecde8a", + "357670ea-8fff-4b1a-8e16-83673fce5137", + "4743b136-bee9-43c3-bdf0-f7ed7618c67d", + "2c382868-6b7e-45c8-bb40-be7e9307fdcb", + "64b3f867-8956-4f02-adef-3328c51d805f", + "58cf9095-3b19-45eb-9da4-978eb3b23b82", + "63b408f9-5697-44de-9362-eba6a2fc5a5a", + "48268524-9180-4984-98db-05b6ee8df956", + "f69e1d0c-a433-4f2f-b854-c7a1c3732c95", + "c2bee2f6-fdb2-4e80-bcfb-74c62daa694a", + "81e8fdea-ef04-47cf-9a29-7ee5b5874b3c", + "b730e2eb-ab66-4fa8-ab5a-ff65b724188d", + "c971d3da-7556-4067-889d-65e43d39dfe3", + "75697112-780a-40f2-ad6d-8f0d182edfdc", + "af9bb68c-7ede-45f5-8d43-6463f1c47a2e", + "cbd3de4b-fb41-4688-b7fe-c744e1097060", + "b39173c7-6bbd-482b-97c7-e57e1ac48081", + "e5133d5e-6789-4cf7-bd64-1c2ee15b4ed2", + "cbec2932-30e5-45eb-b383-ee2f52fe38d3", + "2bacfb72-549e-4627-8067-c461ceb997a7", + "1eafb9a4-dcba-4b5c-8b9c-2f8110744392", + "cbed0eab-63e0-48eb-a21d-6e64a12f2147", + "2df3b4ea-abbd-4e22-9155-1d1be3125ac0", + "45dcec1b-1912-46b8-a92f-c344808ba3de", + "2e54dd0c-2552-4899-bfdb-02ae2e5c3d92", + "e9c51e2e-df89-4f38-8e57-be80e2cdd6ca", + "339e3b1c-3fd7-4c8f-b794-3c869bc1655e", + "cd147691-6acc-4fc2-9618-6066fd716e53", + "bd6beeae-5d5e-4a23-8208-0057129df0ec", + "27d8f5c3-290c-4d1d-91b7-e46b7e08626f", + "09250392-d7cf-4dff-8375-c49d64e97bef", + "385dc11b-14d6-4ef1-8159-fc28a1b0ee69", + "10c0d652-870c-41b0-add4-bad8c53ee448", + "406b2f93-fdc7-421d-b1a6-ded3124ec470", + "509947a7-0664-4952-a6f0-38692cf80337", + "783e5553-f999-4e78-b70b-b71e52cab666", + "4ff85541-62a7-4fc5-9d51-ee5b292828ad", + "48988e5f-bca7-41e6-8a90-7a953586d46e", + "62bcef94-66f3-44c1-a94e-7f2921effca6", + "c9fbf407-b257-4854-a464-5f1880b2c12f", + "594b16cc-c392-4441-95a9-14b2615920a0", + "feb743f3-ba47-4bfb-9c05-ea9e71308120", + "79c3e93d-5443-4980-b9da-c5ec4927c45f", + "36fd38f7-7574-40a2-8b4d-8770bed3d57b", + "8b9aba3e-11b1-4c39-b8d4-e08d1630ce94", + "e15776a9-9d72-4fb5-a94a-a3f696392215", + "f278c194-9151-41fc-b585-5d315a07f68a", + "32a5a639-4ff6-47a2-85be-c2a8c0d62c08", + "8470ce3f-b499-44f7-b08e-f69913614953", + "730377c7-8330-4a1f-9dd6-5797fa50e82d", + "70fc0ef8-6987-4f00-bd93-42f4fc61d715", + "820b8262-fd88-4e6e-8b60-4e8fe88864ed", + "1b8c7af7-5db8-4965-ac8e-a5f0c05dec19", + "088900be-f240-40ef-ae46-b89ae284821e", + "72b48951-f339-48f9-acd5-bc6cb9f6f657", + "cfd322bf-80a2-4449-b189-cf5cf31e236c", + "4d6795a3-3e33-46f2-bcf7-ed728731ab08", + "6fb84e0e-4082-4d11-b5b5-2bd764219e24", + "649f1db9-7a04-4c14-8ddf-4d28a81d48b6", + "63e674dd-4827-4fd7-82a4-a667747d5759", + "536aa473-383a-445a-8af5-87ea0b7548eb", + "222d053a-42db-476b-a13f-798da6aaa433", + "4e1fbcd7-3e5d-4f6f-852a-922658fcac9f", + "247be671-f34b-4847-984e-8853d37ef6bf", + "b50d9085-c080-4ebe-bb24-809e8a6148ef", + "933daac4-52ec-467a-9628-cf0db5f7254d", + "9b95fc0c-e6cc-441c-809f-d74bdd55deed", + "f876cad2-a9e9-4af0-9303-4da5cdf36158", + "bcf0ea4b-455b-4bc5-ab05-b48e541c7585", + "1bb6c02d-8f67-4447-917b-02560a48665d", + "559dcc13-3ebf-4ce4-934d-0ccd1d7d6753", + "b2bbfbd8-e207-4647-b1ad-6c9db5b4d7e5", + "b5a36dcb-ca89-4039-a3f8-0e4d080d81e2", + "f469e76d-daf9-4799-9465-85effc4ab140", + "ebffd629-026d-4787-b3bc-e5cd199a6f72", + "ec6d6e79-1e2f-49d1-a430-5d6dc7039a52", + "f3ef836a-4d74-4c9b-9014-28326ce35eb9", + "be2d33b1-e43d-4f06-aef8-81189b4cb976", + "504c4b47-5603-48ae-84ef-57d7eb714514", + "ba205a27-34ca-42c9-93c5-29fcd2b5c91c", + "2677ab66-661f-4a9d-b9a7-374aa8d3b77b", + "c49d3683-5ac0-47e4-be20-7e5004c07fe6", + "6cbe6e00-6d12-48d5-b38f-f4ec3dec50ce", + "9168913d-8fa9-4456-a3e4-6b861bc5ab3a", + "602b9fda-a10b-432a-b12d-4b2766080a37", + "620ceb85-a803-4efc-b6d4-3cc560f5dc52", + "8354cbed-e501-4455-a5b1-1fb24b41ca86", + "2db0a8e8-6362-42e6-a642-1814b30ddca5", + "9b4b2ba6-1ace-4e30-b7e1-35607d2e380f", + "0a672d17-cdff-4737-8016-aecc92f88a96", + "79a9b49f-9ad0-4181-8150-8be262764b3e", + "6edd7a2b-dc35-4cb1-983f-ba110ca5ae7a", + "671a4852-2c14-403f-86b9-d7560188df3c", + "6e4ecb72-6443-4641-9b9d-a96d134484ab", + "06f8c051-7e04-43e5-bd9d-90481fa911f8", + "c4b923c3-a547-448d-88f9-e00c8732f4b9", + "e9ba9e4d-ebe2-4a82-ae94-58173493736d", + "e1056351-ca6b-466f-b5b6-3d076defc8b1", + "21b51ef6-50ff-4939-b131-d4ec28dd5cf6", + "871067c1-d9eb-497f-ad09-6669a08753e0", + "dd70f452-1cda-4671-95e0-98bbc07d7a49", + "e4e2d583-e46f-47f5-8f94-2b0bda36e4b1", + "8151dd1c-2ea9-491c-8e71-a5ba8af238f2", + "fcd033f2-8c0a-4fb5-85ff-9ec66c0bd1dc", + "23ed1cde-d370-440e-b6df-4e95b3382f0f", + "438e80f2-5654-4c1a-838e-6ce87b73a20f", + "c4e4d338-0295-4321-840c-0a70ce40baaa", + "889f965a-d6f6-476f-8047-6b3ff6be4a72", + "85a77d02-71db-4442-aa81-53b49e5cf163", + "f07af006-3bad-40c5-9c6b-b91ccce8afdb", + "d3f550bf-d807-458a-aa2f-49371fa00855", + "f0f4aa0b-84ae-463a-8f8b-537bc9dfd1c3", + "6079dd68-29fa-445d-b705-a7980ecdafb3", + "717a5981-f38e-4e17-a54c-4f98059f6393", + "89562759-68f1-43f8-9264-07a7caf4e380", + "9ebab63b-fab3-4eb1-84b3-8a4d28cb672a", + "e5193ad4-a9ae-4165-9a61-eb7834657b75", + "15892c73-b487-40ea-b9ea-19fee5ef779d", + "edafbc40-bf9b-4913-8c0f-bd831444db54", + "178e8600-f60c-496d-b8d9-1823d8304dba", + "dfe6b134-21e0-4d6f-811c-a44ae8db2ac7", + "035d53c4-34c0-4bd4-b0a3-163d931b04ee", + "49d0ded3-458d-44cf-a987-f70f4cb1ce8a", + "93912799-0f27-44e1-b776-1a9f8b620cd3", + "c397d83a-001d-4e48-a819-5381b6ccb14a", + "3860d69c-1e75-48e5-8a25-578ebce75fb9", + "32810697-3709-4c36-bf83-f9f4e566f7d0", + "d0469822-8f98-413f-ba2d-4b7b1a34db01", + "29d1c619-d2b2-4192-8573-59b6d9a7c8c7", + "f0e5d4eb-c09f-4661-9344-4cc7d49ed61b", + "18b30a34-6dfb-49bd-88dc-74d8446f0798", + "0dad46f4-5f59-4c17-b656-146dce2de372", + "4bdc9b6b-a833-4c42-8075-3e9a3bc43e0e", + "0e563220-0a76-4146-b87b-5c2638e46035", + "ed14cd2b-a6d0-4865-9ccd-95db621393f5", + "7764528e-8f1d-4883-ba86-59d190db8cf8", + "1ba6b56a-faec-40ef-945e-9504f9b97665", + "0be84f98-a2f5-44d7-a189-56c8ad80a253", + "a9494796-0bf6-4a94-9a77-f6a7a45b3bf4", + "15c16736-9cf5-4f85-a645-f064284de10a", + "c177f828-6a2a-4471-abff-09ce2e78c8d4", + "f93b422f-3fbc-43c7-ace9-75e3785c3f86", + "68a1e945-a160-4934-b41c-712c419e42b7", + "f1b5f4db-b42a-40a1-9370-24dd21bf16b4", + "0731b709-630c-400a-812b-fc091e069540", + "50c0eeae-b912-4283-85ab-9d718596df86", + "66d9054e-4c27-4426-8fb9-b3e1a39ce10e", + "01ac48d7-86fe-4816-9264-b05d99dbed46", + "ce0e320b-d1b4-46fe-8553-0399f081d96e", + "4ba52a51-7c01-4f99-b0b8-25de14d72b94", + "57d188c6-f554-4569-b065-9fa5523bc4c4", + "54945a01-34b9-4946-9dd6-49f9f7cc84b1", + "9e8153c9-d9e3-4a93-ae77-a86341aea1c8", + "39127e12-c181-4c0d-ace5-12d66350d2ef", + "7c496701-6b0f-4344-8408-3faa26fcd863", + "4bf73b26-a68b-4336-825c-1df9de14af1b", + "be9dfbef-4f61-460d-b09a-b2cae6cb50d9", + "e5fc8fa3-0fb4-4a45-861c-92a918b39e86", + "284d594f-3f46-4e82-975c-c0e6680920b3", + "91b725b6-dab3-4a6d-9082-35b5057c8e5a", + "3f1304c4-5de8-4d4e-8449-54788fb6fe39", + "1e15d45b-4311-42cf-9257-1af88100ca2b", + "e1237c10-0780-46a1-8276-911060dcc99f", + "1bd36063-14f9-42c2-a6c4-3832a66f8d5e", + "d37e0f52-6cd4-4b06-b831-387e249eb8af", + "60b466b8-ffe3-4e13-8fd7-2a540dc346ff", + "bda5a1b6-2f14-40ae-b2cf-05f62ccc4569", + "ea3369ea-8c9a-46bb-89f3-251971a0d115", + "8d2a1cc9-bb98-411d-af9a-ffc9321f9753", + "604624b2-511c-46df-8487-a2d1d577b292", + "e3125d48-3472-43b9-80ea-af740923933f", + "2b1da79d-098a-432c-8088-e743a8c8a2f8", + "58820367-aab5-41ba-841c-57f1553069de", + "c56f3e6c-7795-4dda-b592-9580bee3d56c", + "2410e041-7b01-47c5-a90f-965fd8c0fc4c", + "d3e4e94b-8c2c-4bad-923e-f69ec02a5f60", + "3e052b9e-c32a-4225-8a5c-2fbbe0c0ba2c", + "64833a3f-b75b-4772-90d3-a49641f45fa6", + "04b9599e-9db0-4c32-ba6c-bd327017b5ac", + "ee2353db-f1cd-4361-9e52-dac78a0702ee", + "74994ddc-ef0d-40a0-ba3a-8ea6b1b8202a", + "97bf20ad-8bd0-4ef1-9d06-d9c12979a710", + "ba278f57-0098-4fa3-a832-383228b0f095", + "f88a49d2-62a8-4009-8818-859e12a5b1c9", + "0b960df2-5f26-4a81-b33f-6cc297ed1dee", + "d839a3ba-154e-425b-b610-f27ed84f2f7f", + "48b59fc6-5cae-40e1-ab94-d4e96f0911ad", + "91d873ed-8745-45bd-977b-f1b4709f0821", + "fafddb7c-08fb-4489-99d3-3e12d21b9f7a", + "7e5de6fb-7ac9-42e6-8e57-d5ac6489791b", + "e940bf03-f447-449a-bc17-df121191e677", + "9d07c7e5-2d84-49aa-b7a8-07ebfc9bca66", + "470b1c78-82f7-4f82-9656-82cf855ec077", + "b7e54e47-79ce-4d83-b35f-46d3f9d57f78", + "2bc0f616-7026-4350-9e83-b8aaba19a61c", + "aec78894-8277-475c-9ab5-70558a4a4475", + "a07814d3-ff4d-4067-80b8-a85975d6ea48", + "d5259724-1db7-4aba-b7ce-faae27b65697", + "55124ec3-290a-4c47-a90e-100c2efb036f", + "76117480-f203-4afb-9b68-71cca5af9fad", + "192cfd6d-9a61-4cfe-8328-4130adcc8367", + "401c1737-0576-4459-98fe-0e637a29a672", + "964dfa2a-33fb-48be-85e7-33aabdce2697", + "2225d509-af34-4e0e-87d5-a36e272aebca", + "9d66238e-b2b7-41d0-8d52-24c9d0d28319", + "54374b1b-7e01-41d8-b8c4-1ad151f4b271", + "3a63545d-9d49-4c83-9133-7eee6b1d0518", + "e576063c-0eb6-4d7a-8220-3a031a146ea2", + "f898da86-414a-4a35-9c71-46b7d8f2d74e", + "5d29e83b-bd9c-4dbc-aa3f-ecca78922cd4", + "cffc4ea3-8316-4ce4-b9c1-c60ea41f5fba", + "473da9b8-68bb-4aba-9855-ac4eafb6c5cb", + "d0e5d3a2-e5c5-4105-8f90-92a3b5b858ca", + "c350a9dc-9488-4895-87c1-8d0de3a08b7f", + "c581f6ae-e56d-44c2-bbe0-5660aa21878f", + "b0f30a82-d6a8-455b-8620-bc5b9aeadabc", + "46ebbb4d-3228-4511-ad33-4cd6c26f872b", + "ba8549f6-6566-421f-b237-385546a18375", + "391b86b5-e12e-410d-bf62-a18aab92e28a", + "ac1fc559-4905-458d-9724-06ef09c16fd0", + "7e3e07c5-7bd7-416a-bb6f-d31085df312e", + "3ff04345-c35c-48b9-91cb-a18cb8f5a391", + "b232234a-ea08-4327-acb3-ad60f976d00f", + "f94b1c47-fb76-4ff6-a988-05cd16b4912e", + "c2664b42-43ae-4902-8a12-d122892b276f", + "17779fd6-67db-43ff-bbb4-e2e7975ea337", + "b12d7184-d369-4176-97f5-6f493a6a7aa2", + "3aa7e3f4-5bed-4826-aecd-dd7491f6bc1a", + "13122648-676a-4318-8ca4-eeb249ef7bda", + "750c645a-2d05-4148-a91a-1448b630a0f1", + "d1b43277-82cf-46c6-a870-2bc7582de00b", + "1c3e4bf3-10d9-4e01-b006-477ced50bd58", + "9227c7b4-8e18-4849-976f-5368f5460ddb", + "85c675dd-e946-4c3c-b7c2-ba79e9880f12", + "d68baf8a-8fb0-47a4-bdd0-db21f71e6d90", + "515ee204-4d88-4eb2-883b-82092ecae780", + "553a4608-47ad-4e8c-bad4-0fc402329cd3", + "0317fa99-6433-4737-adad-980001af0611", + "18b6ca04-b240-4871-b566-d59b48353b09", + "9032445c-1547-4f1f-9588-8044e3d8f112", + "a569d940-8f64-450c-9561-767432ee5a14", + "304a4514-be15-4977-9479-68a35c8381d5", + "c58523f5-fd6e-4352-9bec-d9a240a694fe", + "fa25456d-f9e7-41f7-9fb0-c9b926864482", + "2d2aff9f-20b1-43b5-adc9-049cee3c0322", + "039d83c7-cf46-4b2c-a77a-c064ad25aa55", + "b4f7228b-872c-4afa-a4c6-ee35a0db2027", + "0c8634cc-0fdd-4f23-91d2-f5b5c3321f87", + "937b64d2-ce0c-411a-8fef-16d4842cef91", + "672519b4-ff29-4dad-b25d-d9ca0d3c6fe2", + "c06ba75a-e9f8-4742-8713-f48c958e0b52", + "4b6ed23a-fb8b-4f5b-a4a1-6bc49878869d", + "10525dcd-dcd0-4d9a-ba37-5059c647a71d", + "b8f071e6-96be-4239-8ed5-94c8335551da", + "7507ff85-2afe-4d45-8d9f-ecc17fc7b5f8", + "dedf7940-9676-48a1-89e4-329e6e90f97b", + "7b25d112-3360-4187-a116-6d475e7221f4", + "10d606fd-8032-4816-81ae-ace18615bbd8", + "22da7177-39e7-4fde-b1c6-c9907040051a", + "e1d6a201-d484-48f2-9f26-98b642467740", + "4786c321-9a6b-4328-b8d0-812574843530", + "875a8fa1-33e3-40f6-86c3-4df9fd1046d2", + "3af4ccfe-d7ee-4fcb-a57d-28b66524ae99", + "dedf92d7-f491-4904-8386-9158af658b4f", + "fd66d6bc-0579-4dac-8b77-52cae1ae110f", + "25a8b977-dac1-463e-9baf-aea2b96839ee", + "990fa427-1f72-4ce4-89c9-96887e08acf7", + "631f6713-ddcd-421f-bce6-45c0f1ee51a9", + "760d3ffe-9735-43c2-9faf-8889d9395f42", + "6dd64db3-93c4-4c81-89dd-0c855203eaf8", + "6cddb8aa-f60c-43a8-85af-67d75412f3aa", + "21891b49-734e-4357-8af6-79b016326a88", + "eccb778e-d241-47c6-9e66-66a19cc29daf", + "149d437b-3a1d-4a63-9ca6-060942b8c3e9", + "daa667a4-26a2-4d12-b208-400878bbcec6", + "f6df48db-2e7d-4337-8270-9b83dd23b249", + "8c561050-d7c9-4cd4-8b05-924e50062ba0", + "01c5a19b-479b-4e3b-bd83-0d66bc4dfef6", + "04481036-e5db-4fff-ab0f-a1d8732f8990", + "148f1ff8-e17b-4429-a4c9-7b5f9e32f9f4", + "6be869e7-0106-41d5-a66a-56b8b3521693", + "562f0c80-bea1-41e8-9b8c-0819ef79863d", + "ece29f5a-d992-4e0a-b5fd-2685604c7dcc", + "5347378f-a0a4-4099-9672-ebe9e4da7786", + "e3712aa6-2fe5-40c7-82bc-91479bc67eac", + "f5e4cd7c-0aa2-4964-81d8-e28d5d50408a", + "1a73d867-8e2f-4830-aebd-5abf89ed4bbb", + "0f5d6635-e5b6-478e-b97f-28b91becc149", + "9ded8e51-af83-48ba-8bf8-35a2faa44a52", + "34493178-8b98-4a7f-a39f-c35e91434a93", + "11883c2f-4a89-4495-891b-4fd6aa22288a", + "1fe4c308-a59c-409f-8998-42d4bf7bf99d", + "94957004-fabd-4ab8-9baf-5182c2b61f3c", + "02d040ee-897b-409b-b044-d3a59ed20ae6", + "30c348de-b19e-4103-8e29-b2fef39e4278", + "e6b4c33b-c13a-4add-a406-30aa6db46d72", + "13271f45-682f-4a21-8fb0-8493ba41db09", + "39c5f0da-8761-4958-97f5-21b1e06d39c3", + "0a15ef71-2236-483b-b1e2-fb62f28df89d", + "da363ecf-4276-4673-928b-bb2ee5ed7d03", + "dd318d52-1005-4fe8-8d64-f1c5ee28ee63", + "5020091a-7aed-41da-80d4-fc378c027c4b", + "839976b1-ffbe-4627-9d3e-4abab2be9696", + "f7ddeb7e-fec0-440c-b388-b7034712d46d", + "a523aee4-016a-43b6-a0eb-5dff89d83721", + "3318460b-02b2-409f-a45c-57cb1d871fb7", + "267c9538-b4ab-46eb-80b8-b64d9600f84c", + "9e3b1ab4-7c99-4b02-8318-b5c12a2142b0", + "38f7e24d-890c-41a4-a8f2-1f11307f58a8", + "2561990b-1622-44eb-a08b-1c7c18cb7c9a", + "8c874871-de88-484a-ab73-23a1ef1004b8", + "4172b65a-8b4f-4175-94e4-00187e2d5f33", + "81c5a076-dc87-4a7a-8610-4daa7cea7264", + "3cc18252-4854-4059-82ce-36141afc0c9a", + "5633a578-b5d1-42fe-8216-f1325385e713", + "228adabc-9c86-46ab-81b2-abe81ffa1e68", + "867f7e51-78d7-4766-8ddf-3c69f934484b", + "483e3ac5-a6af-4640-a77d-77e053a31a8c", + "1b672cc3-3d64-4cff-a4fd-79cae602fd01", + "dcb88365-4ae5-40a6-aa42-f6d7f26da26b", + "4f123ad5-f689-43a1-9920-28b5211dedf1", + "e11464d2-a10a-438c-ba33-a5e04cd05d76", + "fd214d4c-4c91-4f7a-b5b7-71c34180fff8", + "7cb7723f-4fab-4abf-ae43-8b538f1144b8", + "544de055-6868-455d-89ec-bb3cc1f7b184", + "75e1b3f4-ce4c-4144-9b2c-55ab407ddfd5", + "8fa97e76-7a2a-4919-b81a-c7074522a93e", + "2cca742c-8dff-42e6-98e6-2f884017c5ef", + "a4454951-e0ae-410c-b3b0-ca891d76f38d", + "6c14f973-4411-4d00-a42a-ce1343068298", + "406fa860-509f-4045-b107-e58c76816755", + "43eadd9f-c3c7-42f4-a383-74018ff5cc71", + "3f7e93a3-f55b-4ed6-8e65-e820b5ce4b0f", + "2dc42ae1-14d8-4c76-b38f-fe0a8bda5de7", + "439e22a5-d938-4b10-95ba-c8bb18cafdf1", + "d4a4031c-4ffe-4fd7-86be-f835e5b0bd88", + "f0c3ad1c-18f3-4006-beb6-50318df59521", + "ad9ff6ce-0531-46c3-9ac9-320959629b25", + "a40d5b0a-df4d-4304-8b92-59967f57cdcd", + "30490c33-b57c-4aa4-99c4-75a9db87eff1", + "e42386ac-08b1-44d7-9b97-556f7f838b1b", + "cd019e79-5138-453a-9e1a-391182e47d60", + "1355cb5d-bf92-49cc-afdf-38dd5f119696", + "8f88ce26-3669-4633-9c15-c470b3861415", + "d655d2a3-cf02-47e4-a2ec-4226df5d55b6", + "327ac342-dda1-4d8a-81a9-07b8a7514bf7", + "be0e1d08-cad1-4153-b99b-a68bacffc048", + "dd1b0bf6-5d8f-4e85-b886-3e294c0848e4", + "5417412f-ccbb-4112-a858-8815b92e9278", + "01e9ed68-b2d2-4619-a9e9-1b8883390c5e", + "0d50a519-5ee1-4c06-97fa-267ac23cfc2f", + "c07f2b0d-b945-45af-8cdb-fa4876bd492c", + "ed67de5e-b241-457c-87fe-7665d0182ee2", + "19e5ea53-3352-4f5d-b78c-d6e97d53098b", + "605bbef2-38ab-47b7-b6b7-ecd573150a40", + "c3218f60-9b63-4482-ad59-c05e988aae4a", + "631c021a-e93f-4d36-927a-49e2ac1f8bbe", + "3ff9b270-9aa2-441b-8a21-4b25ac2e0608", + "34f1f818-561d-4f5e-b6bf-a236b01f5761", + "235e3ab2-8656-4426-b8aa-0b7ef71af19b", + "88cf8d44-6e53-4b17-86bc-4eb799f5ae9c", + "1a6d5e62-b435-4d89-9c72-e56eb13caf09", + "410863c5-98ac-47cf-8aad-c80a42173078", + "a7fd355e-be03-476d-b2a8-7ee45aef81e8", + "a067c957-7dee-440f-8561-e29bc6a74223", + "3182d762-8dda-4a6b-bc2b-648761392637", + "3868d965-f9b9-4594-9bc7-eb5fe785723b", + "18f29782-6f27-4936-967f-e428c4d57819", + "0121e062-fbc3-4319-9f71-e3d4cb21cbbb", + "832ad1ca-c01d-4b98-8976-f3d51707dfcc", + "b6753187-002f-4e3e-b579-f0e1cc51922b", + "d8c4605b-3876-4b26-9bf3-1ba846fcb33a", + "06aa44ed-b1d4-4204-9866-dfa5e6376c13", + "50d36b7d-e03e-4aa7-a855-b420a029086f", + "e3ac44a9-0bf0-4cd7-a22d-a31c3881c6a1", + "d77f5d43-cc62-4d05-aaf1-829d4d16bea2", + "35385643-b948-454b-8aa0-3bc255f25ca7", + "acb75940-7603-4011-9591-987dcdc686f1", + "d4730797-f51e-4975-a9ce-e50e55242523", + "1783cef2-7ad0-4d69-bb2c-ac5f74e8b081", + "c9c8280d-295b-434b-82f0-e67dd7cb0488", + "10f564c9-bbd8-4e80-bd84-381ae2a52438", + "df4eaf8c-a49c-49cb-afe2-93a981c8a1fd", + "45061810-4868-4727-a9ec-6a939d8a1d37", + "0cb1dcf4-29db-4214-ad98-5bd6b7f0b756", + "2ee8803a-f629-4f9c-90b6-759854f92d6f", + "b59bea62-c5f3-478a-b2eb-92257729244b", + "64ab26ee-d333-4fb5-8c08-44f375646ae7", + "c1f94676-880b-4180-958f-02d0de31b5bb", + "1571d658-4f65-4295-9201-6fe7eec11213", + "822f81e9-4726-41fd-bd99-341f76398ac3", + "412b01d5-7593-4d45-a470-a679a6e7c99b", + "99d2404c-7881-4768-925c-987f031d8ab3", + "f8a88baf-3e7b-4096-a63e-741cc9b6b956", + "1b565c9b-a4ef-428b-afa4-58dc5226fbd2", + "59b27b2c-4c6d-483a-b11f-e8dce12f4dee", + "5e3293c3-e2e3-44cd-88e1-3fb41571add1", + "ef82370e-2d17-48ea-863e-89df89f25f51", + "4490bc92-a6d6-4391-9528-1ef18bfe9454", + "eb4b5577-e57f-4531-8f43-7bbff8a6d434", + "cfce1c16-6752-4b25-8ee9-154b14cb7e10", + "a1d50a61-ae52-46fd-ba01-4553e9cb8894", + "e23556ba-3b29-492f-914a-6fc5ea73cda8", + "d0498bec-5c92-4201-a340-7c2403832ba7", + "870070e2-ac89-4cfa-9246-cf53e218f0a3", + "a00c066c-938f-4119-ab93-53db46986942", + "e40d8d60-cdc4-4016-b58e-9b5bc414a92e", + "5d1e8c50-9691-4c6b-8556-3226f5527d8d", + "c6f43959-c6d9-4d99-9f34-2fea82ff71ba", + "aa584c59-44eb-42b4-9b3b-776193b3a4d7", + "f88a2a2a-db26-4c40-926f-0e38928d30fe", + "cb4c5168-d3d3-4505-a0a2-a94a902e6c59", + "8e7d8407-ce15-4203-bb89-ce6f8a3eb265", + "c83d9714-3d3e-4200-a351-7ab7cec8e340", + "64aa9c56-5fbc-4500-9f3a-d237841c17a2", + "e0e399cc-a9a1-4065-8534-fa8806769de5", + "c8613925-a42b-472b-9cac-777bf73900f3", + "ec2dd752-df34-4eca-bcc6-304cd9b69815", + "ac6f148f-fffb-40b3-bd9c-9f92e8e2175c", + "6d8bc377-aa7b-485e-8e4a-054a536f5ef4", + "6919854c-6cf9-45fe-9ef1-8ce106eaa826", + "25c521a6-b8f1-4c62-88dc-b84704a2d36b", + "3091a682-df2f-4511-9e3d-693a18bdf76b", + "694978b6-01b0-41ec-a500-47880fc29ebd", + "6faadbe2-4476-4e8c-9bd0-427050dc4cd1", + "7b85ee34-f8be-41a8-9db7-f4efe4bdc2a0", + "264de26f-a6a8-417f-8c7d-2fa4cc857b52", + "a9500e65-d19d-4f0a-a2a5-7bbb7bb7d77b", + "08076197-f623-452a-b023-7a140f613f34", + "030ad351-e335-4d09-bd3a-c988f9c002ec", + "6df9fb9a-4ba6-4224-95a4-bf69c2b5bbd3", + "94a0892c-f550-440c-b2f7-fde2e331d453", + "664303c2-1395-4a7f-af74-95f81fdabe85", + "43336998-53ba-4472-88a0-5e93cfe19928", + "44125dcb-8642-4f7b-a6fb-43ae4e5b7f0d", + "3d32c92f-603a-4b92-91da-352a5809ac67", + "a3035eec-fd26-4132-b0c6-9c0093c58640", + "fda20c72-bdbb-48f4-b87a-4541342ecbab", + "7ecc06c0-eea2-4796-81e2-16985ad8c75d", + "374f250e-8339-417b-8f05-ff1d4f477e41", + "170f8665-91bf-46cb-a800-0e7fe706f770", + "fbe4d16d-d588-4fc4-9841-d393834394f9", + "ece396e8-5926-447a-8c8a-da9e8b8ec747", + "35f7b629-b7d0-46e4-8807-c30a8f23e017", + "97c19ba8-0522-4855-a602-95425ac387d8", + "898f4511-9a2c-447f-9f23-0aebd03805c3", + "bbc315f0-d67e-4502-b7ee-12c985cb2fd1", + "60c9cb40-8544-4187-b565-9cc97982ee69", + "0c36573c-be2b-4ea6-be9b-07e73e43aaf0", + "954f5288-897b-4477-baa6-9166fb8c6729", + "f382e846-a335-4887-82a5-9989aa6b3fab", + "35108758-2337-41e4-a4d3-d38c296e0652", + "06fc1126-3b99-4a12-9484-71cf3e80672f", + "11919040-d4b8-4f07-a153-cabd79ab70ec", + "d7ce4028-9f6c-4aea-bd79-cc52b7431741", + "5b991baa-5893-4ee1-8dfa-abd0ce3a7351", + "47c1c9e7-d6bd-4597-973a-5a360d91f1f3", + "4a276816-12f8-42c2-a6a1-1fe472f3137b", + "a34172c4-ec47-4b4e-b2e3-038bf4fe27aa", + "7a962137-692c-49c0-99f9-bc1457416b2b", + "fc7e8a75-4d25-490e-a53e-1abfd6127576", + "bca92772-0dcf-43b4-9138-9bda08d5eaa0", + "2f80bdd6-8475-4788-94b9-8016bf3ce5f0", + "5bb8268b-ee3f-4838-b5b5-07521168a979", + "f00f3e95-f475-44a9-8e43-d274f86cc771", + "e46327ee-dcc6-4433-9973-c961164f5af9", + "9ff4a351-76ad-4025-9622-cb8aea3053d4", + "d661aa1a-c82e-437f-9140-493902a24780", + "e4899772-4b81-463a-9fdc-63651e56ddbf", + "3c0ee401-c1b8-4801-af6f-aed0308dc3dc", + "9b542e3c-6d6a-482f-afce-49a3262cfe60", + "e01b674a-7c2f-49fc-8f25-e05703bfb90c", + "51f63c58-69e5-424e-8f22-9f8322c33700", + "0a5183cb-8ef3-4455-af83-45848559ca31", + "3eefde2b-79f5-4cf8-8225-b5c57d7f8825", + "d0b9c392-4961-4a70-84b8-87877089b2a5", + "01c6f750-372d-40f4-8e3e-d4bf8acd56b1", + "da871b6d-873e-4328-9b5a-9eabb8ef3426", + "80d8176a-8cc8-47b4-8899-3f67d36953c2", + "92f91282-27f8-4565-bd18-adf5951bcf49", + "27291c50-8755-47b6-b5d5-85976bc94551", + "73c9f545-0f5e-4fa0-9a8d-37c6f33212aa", + "f3a3a234-4871-40b9-8e6e-f64a673bcf01", + "9eff4f22-f1f6-4ba0-8249-231fef8750c0", + "9819e57a-c0b0-4b97-ac20-5d1ffe8dc050", + "1655b8a4-0ba8-40af-9b40-578705876224", + "37529648-be4f-4175-b38f-1a66ee2ddf2b", + "37fa627f-bbea-44ee-a927-ca7451ea4068", + "82bc90d5-a20d-4bd9-9db4-960b7d2508a9", + "3e58ee7b-27a1-4284-af3a-591145e28d21", + "9a773047-140d-44e8-a4e2-76000660caec", + "f041bcd3-c39b-4dde-9662-46090202f08f", + "e83b5aec-679b-4669-b613-ba2c168a3088", + "15521a4e-23d5-443c-b988-e064f69eafe3", + "23caf1e4-15ae-4766-bf4b-2300486a7f74", + "0f121f37-9aee-4a7a-b033-eb97aabcb43c", + "2de2a5d5-ff7b-4a7f-9d0f-321f591ba550", + "417af16d-dd68-48cc-aa94-dd66490609fd", + "ba648edb-ee20-4a1a-8f39-59667d75de0f", + "49c603a7-6769-4a22-8d48-fb5fbb473bea", + "d492227b-605d-451c-9a47-e1f23c973513", + "17742e2d-3587-44fe-876d-4eae3ce9be5f", + "994aa948-72af-4fbc-b895-254673a34c1b", + "0086428b-787e-4d2a-be23-4ba0eb5df38a", + "7e4c0b4d-ece0-45dc-9313-a0a5d2cc542a", + "9b2bb018-26ae-49c1-b8d8-b52e3e7d5b2d", + "80701dea-501f-4a40-8a2b-adfa8ac92904", + "05291730-f3ef-4c82-a6e9-88ef066db33f", + "42bf4f57-d7d8-4e01-b82e-50576348d04f", + "33c6bc7e-e81c-4e18-81c6-9b6ee9649e78", + "f36d1441-6d81-4994-a0b1-acc78db16f17", + "f789000a-e7ba-4110-8d07-3c2cc2f5bf90", + "abb39cff-a313-42e1-a447-2886f3660133", + "5db97ef8-ca98-46ff-858f-95ea4d8b38a2", + "424cbfd4-6618-4a48-8638-771f626ed776", + "23e5fbb1-bdb0-4c8c-b7d7-967013241ae3", + "7cedfa27-7240-4e8f-b783-35ff073b86b2", + "d86cffe7-b72c-4d64-9a0c-52a14521fa55", + "9b9931e1-fdb8-41fb-bf90-3535e0d9081a", + "62f497b3-d75c-46f4-8a99-8a9935ab5d15", + "6512a78f-5902-46bd-b3eb-cb8068867fb9", + "06bd4b9c-3cf9-48e2-a58f-9585e6883c5e", + "c652403b-6682-419c-81e9-5a77185c508e", + "19767713-554d-4ab9-9ce1-f52dadc8216d", + "7fde1919-1404-43d8-b086-08638562f237", + "e169ee23-820f-4bf5-b914-4e9386454aab", + "f905fa7e-54e9-40f4-b0cb-52c2c42315d0", + "190be128-88a1-4146-8177-33c4f925e90e", + "84d34d37-6b3d-497f-87b4-3beb35e67dee", + "001903d0-f1d5-4df4-bdb1-5a8272897a3d", + "3761e278-d8bd-4402-abeb-e04e369e9e0e", + "93db7dd7-c80c-49a0-bfd6-cf10ec459452", + "6b2c5290-7c15-4be9-b8db-4938eb94598b", + "aa143e99-6653-4a96-94ce-e8a35c3f6173", + "ae9f9493-b41a-4830-a774-51f5e301e9fb", + "57b161b8-80f0-4aa9-aa06-bb5652ed1f59", + "742417d1-e12b-4e78-a22a-b968913f42b7", + "bad5343a-a27f-4157-8942-0194261e44e5", + "19fc71f7-28b2-4a01-9922-38b661b62f33", + "5978d624-ff3d-44ba-8460-11d0019b5fa9", + "3640bff9-b21a-4973-97da-352b88e3f609", + "6a959c7e-ae56-41ba-ad23-1c6d9ac5bbe6", + "f571edb8-c1f1-402d-8263-a31f78cddfce", + "38c98f20-54e9-41dc-8c48-d36f5a3bdfd4", + "f806bbdb-c2e4-4e41-9be9-2eac7866ed39", + "f13d6ce5-577a-4153-874a-40990eda2b06", + "5fae3980-35b1-43de-a7df-30c338e944b0", + "f92dbca4-6c7e-45b6-98f0-915ec02380b3", + "a87cffab-94a0-4306-850a-ec3c68a4b184", + "8c660209-a2a7-4d5f-89bf-a070d7c39974", + "fd0eee0d-0dd0-4e70-b661-f2b63f0a2ce7", + "5b671edf-07a1-41f0-ae7d-9ef55ffa0a6b", + "09e5eb9c-16fc-4374-a028-8a1c04d9845e", + "c0a3ab19-b97a-4231-98a6-072d03c6661a", + "b56d0ec4-1bea-4241-b018-dc7d96e38a51", + "045db697-40a8-4e8d-a4c1-00ec3e04d8c7", + "9dc3e70d-8068-44ed-bb65-51518decf24f", + "5b48682b-ceab-4495-9169-59a870c23e35", + "59fd5767-c95c-41fb-86b5-dd3b58cf7feb", + "d40895d4-1e6c-4d35-a5e7-5839a7a94902", + "c8d1ed87-bb5c-4122-a0bf-09f1838e8f38", + "1ef9cc23-5b6a-4f03-b7dd-9b7d8ceb1721", + "015c3fa8-6831-4bf8-8e27-77d07bc5c03a", + "2bf3454e-d07c-4d03-8798-c028dfdde734", + "b025c03c-2812-4b07-9d8d-c4b0339121a5", + "ba91200b-05fb-49aa-94dc-1bbdcad90a89", + "6563caf8-bd27-4a72-9224-81108124f138", + "c4c7a18f-c87d-4fdd-b1c1-f6b397bb9e33", + "3400e7ba-cada-416a-8616-a45dae3b9602", + "892a94b7-6526-4115-a746-0fea0445a0ed", + "0440f024-1750-4ef8-af02-d538227580e5", + "02618103-578b-46b7-bb2c-5c5c74c6dfbd", + "687ff629-39cc-4d3f-9163-1d722f662b30", + "8171121a-401b-4ad8-bfbf-0e0f89cad9b2", + "33e73a2e-128f-48a9-b24e-9075ba778273", + "4268a796-6e23-4a91-ac04-d577ee861889", + "e2dc50b5-fcac-41a0-b6a7-8246ad3c8843", + "dbda9273-17b6-40c0-b7f0-2f3ccee680f5", + "6c4e7113-6aa2-45af-911a-832d49fedef7", + "eea74d38-5db9-49e0-953b-485eda4375f8", + "a0456cd9-83d5-4d12-a42c-2d7e91c55dff", + "ae0d4324-7d0c-46b7-9a31-19f7f00897fc", + "c97fcdc3-aa66-456f-b059-b5ba056c6e43", + "e0a7a52a-88ba-44e3-a6c3-09c73b59403a", + "f53ea673-a0d4-4036-86c2-828e48247955", + "3551b40b-75ed-4759-b134-b8ee3a56cdba", + "150e5798-56ec-4e8a-ac38-5b66ac451c4d", + "9da174d3-6349-496c-8e63-21c15184c110", + "c8877c90-2ff6-4939-bdd8-95010343f7cf", + "8727031f-13f1-4776-a31c-17a3bf4f0eb1", + "e33bc582-54c0-465e-a9c0-c648480cd97d", + "2325b807-6fd3-4f78-a622-0261aac1eace", + "40d7648a-c7da-48f1-8420-cf05b53023ed", + "5d36b747-1d14-4c8a-b877-4f2043adbe0b", + "9b4b9587-b100-4dc0-a3f4-4f0ab11e5542", + "3cad65e9-fa26-4faa-8c1e-e844f0a349d5", + "8aedffa6-c24b-46cc-ac2d-cd56bbc5bdcf", + "fe9f3598-daf7-4a47-923b-48a1137f30fe", + "634a5b66-6465-4153-bfb6-b4e4815cda84", + "c63e850f-31bd-4157-abc4-cb28f540098b", + "510f68a7-bb20-4c64-b8b9-13586eb4e14f", + "b1cc4159-f638-40f3-a73d-9ad1f677231e", + "dcee340f-08da-462d-a413-bf378ca8efae", + "ae13174e-1d0f-4321-8ed8-3377d68d9545", + "a2b80ed3-f951-47be-8581-aac243fdc44f", + "6152dbe1-66c2-4d66-8993-35ded08b05e6", + "eb96a4ac-4d8f-4cf8-8e38-db5bde45c0db", + "bf55214f-d8dc-4c1a-8182-5ed5fbbddc3d", + "67d46f55-3017-48d6-b1b6-4d13928d102c", + "4bf392d1-0b4b-4195-9ba0-0469d130bccd", + "49931ab2-efda-48fa-bfac-3e772c6fa4ce", + "f3bcac47-09dd-4637-a79c-bc7fd7f9c778", + "16a4b07a-7238-4441-814c-eb6e38e038ae", + "9952a516-62eb-436b-8d58-0308e57d2355", + "73a37b38-db1e-4b3a-9d4a-fda5c9aee6f9", + "cf398d33-4c8e-4bd9-80df-b817198f0c73", + "67aa332a-9904-49d3-b870-12cec7b9b878", + "af508123-f929-4803-af17-c4d0f1a1ba0a", + "7e885495-61eb-48e5-8251-1d03ae11fd52", + "8bfa73c3-8451-4de1-8d42-71b97c6265b3", + "5f04f502-28a7-4ee6-9c72-9d5be5f9559e", + "5f3dc0cc-7e5d-45fd-a35b-d154ffcb8920", + "8842fe64-a855-4552-84e1-ea6c1f675bee", + "ebebbe4e-ef5e-4eea-984e-c6584352afb6", + "17c74efc-b27e-4e21-af41-9e06f67532ed", + "a2058916-b26e-4673-94fd-48706f420e88", + "3cc1f8af-4210-49e7-9d71-125245a42bf4", + "f7057b86-28d9-4523-a64f-5fc1b34f8994", + "8c612f0a-9ee8-46b6-bb91-d230d3dbfca1", + "fa3e4a6e-9eda-4738-8278-d7e89cf408f9", + "c1a8c70b-c0cb-4615-b574-f1503afe2a76", + "c8745d3a-106b-4697-a2d8-6cb3ea409bac", + "942901be-2342-4ac9-9a3a-a91ef225bcdf", + "c16ba447-b108-4f07-9cde-3054aabb4c37", + "cf6b0b76-4144-4b64-9ac1-7ada893fb0f7", + "ebc82dfc-9d3b-4de9-a4e5-d8323fd359f6", + "f0b5922a-da99-4f83-940f-7a1d2e4bb238", + "1e945057-c1f6-49d5-bef5-a0a732ccbd1f", + "7f64a562-8d8f-474e-b479-93e44c6db381", + "8e3240bd-9568-49c3-b579-4e71afa1ebc0", + "646c347d-f400-4c12-a020-280084bf78f3", + "4469e0b8-f7d3-4e91-b9a8-3c75f01f4f6a", + "274303ce-a344-431a-965b-667ae809635b", + "d9c174b5-5538-4b54-9d06-50e4cb68bd1d", + "5439617d-e4bf-4c61-a5fb-79f5339cac04", + "3467274b-0b3f-43e7-aca4-807892807b19", + "89d6f413-2199-440c-af9f-fb30cafbeaba", + "47751f0a-19d7-4864-8bf9-3c4b2b74984b", + "463f5a74-4146-4983-91c7-16a7e58c8293", + "6681545c-a1da-400a-8454-ee636a560f4a", + "28f02ce5-f4f4-41d0-8431-4e79cb5c21a3", + "b273535f-79c5-4983-9a2a-636709689281", + "6e508f8c-fe95-4d8f-b688-ac2d4cb115fc", + "db5b8070-2b59-4566-b883-a3cbdf5d8db0", + "36378806-66f1-4039-871d-45b0e721a3d1", + "5a63a46d-e8f7-4e65-9c6f-edbfe0e59c6d", + "7559a109-a08b-4269-ae0f-ce15c313e4ca", + "eb757762-cd66-4454-b78b-1b7c3332abd9", + "7693454f-6c6f-42c5-9c22-3b8cff655d0b", + "b9ffd974-ac97-4e28-926a-d9984a28f512", + "bef1a127-a587-4f09-80eb-8d0f6c21311a", + "04d688a8-3307-4091-be68-88e5cd809629", + "95975f43-913b-4816-bc8e-19fec1f95611", + "971e0d5e-58a4-4d41-9fd6-445a4d9712c7", + "5c4126be-7bb6-47a8-8bd8-6085e94fc3cb", + "1da6556c-4a7b-4aca-b65e-27846dcf840e", + "13e9550a-3c0c-4585-831c-f21a417723b9", + "1951ae58-ed02-465b-a9c9-039ed37170bc", + "e3def82f-8937-4b3c-b461-e7f093b0633c", + "d6badf8b-5d3a-47a8-90cc-2ed3bd64ea3d", + "bde54b2f-ad7e-4114-9279-857937535971", + "da1ddad2-2ede-4803-aa94-e6f8757b37b8", + "2b5f6e45-9e55-451f-a3f5-ea1eb264ef07", + "14dceb62-a6a3-4680-b564-30991b3b21f5", + "768d7d09-fea6-47a3-a47b-0f341f8446a0", + "af025608-d26a-4714-b5b3-639d42dc0516", + "b6d4cacf-f63c-4d07-b058-7cecc01859e7", + "4f1e72df-c411-459f-826b-9a2afdc9e374", + "e30a0057-5403-426a-9df0-4f9e009c5c50", + "41bf5b9d-6145-434f-8baf-fe1c9ca48fdd", + "a64a8f48-7a57-492b-a2b9-d49dbfe7c3e9", + "a093ae11-6250-491a-a631-af497f9f4e1b", + "08ae6e6f-ad7d-4ba1-bc2b-61b2b3fb6a39", + "1d5e886d-b3a3-43ef-bb0f-f7d0adb70336", + "f766baae-a769-48e4-9694-d1b4c0c26f4d", + "6d8255a1-b6cd-4ecb-a48b-7f51f3a6296c", + "91be2cee-ceb9-4fb4-9707-e825a7c1c1f0", + "2193c709-2e9e-4802-9b05-341cda1ec8a5", + "9c8440ea-be19-464d-9474-ac27f65b0102", + "ba79f412-db15-47ce-8ca2-e5eb8388752e", + "89048f19-449f-4802-ae24-bc85067b8385", + "771515a3-c1f2-4a34-91c3-bf91b155cf6a", + "6ecd782c-1b87-41fa-be19-bcfc15ed9e8a", + "598d52e4-d54d-41d2-93e7-2d66b7e80707", + "d4f1d09a-10c8-46d1-8a5b-c5d3d9fb31eb", + "00aaaeb6-59be-45f4-b22e-81d1e1420dc1", + "5d5eebfc-d71e-4b1d-98aa-ae2ecd07edd9", + "9d798a5a-592e-40d2-be71-fd084651778f", + "fda74860-21f3-4715-a33a-14edaaad08d5", + "42b2a787-b856-43eb-b8cf-3163e71e1e30", + "72c1765f-a3b8-4f64-aae3-cfe843079d62", + "6e220e42-cf4d-46ad-9de0-6d272e7c73dc", + "bda3dc95-f4b8-41a3-9d3d-09cfb3801045", + "cdfa1d78-82f5-4921-bf87-e6e3fd4a6843", + "9746309d-a896-4c53-8efb-ed1a1849a9b1", + "6042a2a2-9775-4ba6-bcdd-32224cdd4bce", + "8dd6b17c-ab70-49a7-bd85-9ba829e4ac90", + "219b6d45-6474-4a96-a88a-c815be657367", + "a800702d-a31b-4290-bffb-29c8dbfd5f57", + "dcdcce95-102a-46fd-854c-c42405b40b9e", + "e828dae1-442a-41fa-a006-8ffda501a3c1", + "650d114c-a5d0-4eab-81de-0711069b6666", + "3be99fcf-2e99-40c8-935d-80ee020fe5a1", + "ac70dee2-6ad5-45fa-a0ab-e3bd8b5e53ca", + "fb83695c-7b5b-4200-b08f-600837107071", + "5e42623e-cc9f-49a8-83de-46fee8063618", + "d76a9d93-439f-467d-808b-bd022eb19373", + "0c8f06af-17d9-4834-ba1f-da381f307c6e", + "e4ea6e68-9690-4931-a3ad-f58822f24366", + "ae32eba2-cf14-44cb-b688-34d497396029", + "d69c3e8b-75ce-4cdf-83bd-eefad5c362ef", + "1398375f-f8bb-4917-a95b-61542354e91e", + "18d02bc0-be62-4156-98ee-de0adf7b16f1", + "8685eccd-dd7e-43ff-b9a9-d0975a805730", + "43886b1f-787f-4b7d-8bd6-a7349dee7b3f", + "25c296e2-fe70-4f73-90ff-7cd24c5cd7e4", + "2f79f291-882d-403c-ab00-ca5b0008d7ea", + "73beacf1-cb91-42d9-9e5f-60b10968b55f", + "75f9f61a-a88c-499b-ac4e-34663a5fdd87", + "79b1b841-06d7-4ba7-a141-c4ee4af060d0", + "99548118-7a86-4bc6-a4af-4fec03d2df81", + "68a96307-17fa-43f6-97c9-391107a3f252", + "3520822c-ed96-4029-9662-c3823c3a1d68", + "bc8eb355-5b6f-452e-b732-bf5c7c5fe3e3", + "0ca2c38e-67bb-4faf-a343-094b08c05ed4", + "a49238cb-73c0-4e11-be0a-e4f0b239a55b", + "0fe47970-8edb-4889-bcbf-845ee959a39c", + "89ee3262-11df-4ffa-a5d2-dbbe39c0107f", + "bafb55cd-2825-42ed-ad43-29b0a1aed6c7", + "150e7071-1df6-4507-99cc-4b2d13eb1ca4", + "62b276de-56a1-4d93-83b2-aa6b7e1751d3", + "3ce7c41a-4869-40f9-b56b-c29210cd6ab9", + "f4c824bc-bbcb-4ad0-a9cd-a11656c922ee", + "f483aeba-4016-41e8-adb5-717dff724e68", + "6d649d30-f575-4c00-b9d4-9c08ace1aa6a", + "19c1ec46-0344-453a-9ff2-9c91e4483083", + "ce65036a-244b-45d0-a87c-f97b1aafa32b", + "e1a0e09a-e6ce-4e5e-9f57-c31f87c3ec3d", + "f9e63df9-4d87-439c-be21-e9e4d0d6f45c", + "09758423-34ae-41e7-a9e9-25455266dd79", + "83ed45a0-2cd0-4c41-8c85-7df6d7a0dff2", + "cceeb3ac-3373-4a83-9f3b-d0109d8afcb4", + "8acfc3d1-1e87-4d0d-845b-e5535ee34463", + "1fb49d67-ae8f-44ad-9d24-603114e1e2bb", + "3e5b97fe-63b8-4544-9a1f-7cbc64755cad", + "db488cbc-9685-45fe-b950-43f2abb08ae8", + "04f200b5-7937-416c-806d-e57f732affd1", + "53380a8b-d69f-4ed2-86de-e7387ff6bd6d", + "c66c4f8e-d8fd-49d4-8606-4cdc6f249f60", + "dc4985d9-9a48-4ea8-b507-c2bafed149f6", + "a228f18d-2843-45c7-8289-68bc62a4598c", + "2d3edc5d-66c9-45f6-90ee-a3fcdbafa130", + "d7e9c2a8-0116-459b-9c41-46c0ae80506b", + "96102d62-27c1-4847-bc24-1d9e81094cf1", + "bdf430ee-add4-45f5-a924-689305850297", + "b9cf8cbf-aa8c-42cd-a482-1dba9e1925ad", + "4f85509e-bcef-4ad8-994d-3ab2921ccee3", + "b9c43c11-650e-47a6-be8d-d7d07e1df885", + "4f8d8cf0-d6f5-4a80-86bb-2f08b2e5c63c", + "0dc2995a-3c56-49c9-9741-b3326149c714", + "16e0401d-1c82-422c-8ef5-5cd8ffe31dde", + "f05e973d-e88d-4858-b250-45e184055aa7", + "5146029c-c93d-4665-be97-c97a9c35e0f7", + "443f3faf-b7df-4a09-b850-f7e0e725675b", + "914f1d24-b179-46aa-b0d6-567ea107a850", + "662d3c2c-b730-4173-a967-17937ecd9791", + "ac0a2980-7d0a-419d-bcb3-c85901a9cb9d", + "8786abd7-2a33-4a40-82d4-8ca21c8e8781", + "d8471224-6dc7-4497-9423-aff19a5de232", + "25a5eef9-8c63-487b-9e09-61e809e2ca02", + "eaf9a497-4480-4ac8-b84c-bf977b0a7dbe", + "24114521-a7f2-465b-97e8-072551251b8c", + "de86ce47-4e4d-44b7-aae8-b218af26065d", + "f365401f-fe68-45b0-978f-45ead4bc52cf", + "720823af-d397-4fef-85cd-582192d06737", + "4e6e5b07-bbca-44db-bd6c-cff11d4958f8", + "499acd50-e292-4419-b1ed-41be4eab25c6", + "65a938de-6837-4ebb-a358-7e7c93223290", + "953b252d-0345-467e-a343-5ff2ff388029", + "29aa7e00-7ea1-4deb-a449-ed4044ce3e74", + "135ea2df-a1ee-455d-9d8d-d1ab86e29953", + "0e68bea9-fa1f-4d4d-8666-9e4e55759066", + "0bb0d4e3-8078-4cb9-81c5-4d161070a3f8", + "039fb1fd-eefe-451b-9365-4810a0c6e8f3", + "83f35f5f-9e9a-4d5c-bcdc-fa396c5135a9", + "9f1d7fcb-bed6-485a-9bce-ecdb2db896f4", + "92c30981-ae87-44df-b392-ab5d5023d4f3", + "e3c8d574-bec1-4e13-8327-568cb697093a", + "4c8ac4ab-1539-42fb-89f4-91ebc97b8e09", + "aa9100e6-3c93-4fd0-90af-9af455c6b9a8", + "9d3a6b67-54dd-44f7-95c1-2bd170ce6d4c", + "05a25554-d080-467f-b312-64f39d87569c", + "5de8272e-007f-4a3e-abb9-9d10402c4b8a", + "d6f96d6c-8dc1-43cc-b135-6ccd03cfc441", + "bae3274d-dc15-4f29-b79a-f121a93f2b9b", + "a2ce2505-5347-4815-8066-6a2aa4f4074f", + "10c44218-c4a9-4162-8639-f57a36856462", + "1c168d66-adc6-40da-ba02-f351312e4ad7", + "a4f20cf3-9965-4cbf-80fe-1ad9efaf61dd", + "d62e640d-349d-4990-95f4-e47acb33d6f6", + "b79591f7-11f0-47b8-9ee3-1447b16410f0", + "28e34ce1-5c3c-4f04-80ed-98823d19392c", + "1f54e2e2-4410-4e2a-8c96-50428b265382", + "eb44d841-3ddc-441f-851e-d0c1a56921d0", + "5d943391-8d4d-4df8-af95-a9f8a8d2be4d", + "e504c66f-0fc3-42f4-b04a-e0cd7d2411e1", + "a0527a40-30c7-4c14-bc85-76d5da491e30", + "4cbf6ae4-5fc5-4425-b5f4-14f4518bc73b", + "d1823203-9336-4ffa-afa8-12b647330108", + "4ff56cbb-7d84-4689-8d62-1a02288e0655", + "f282ed17-7150-47b5-887c-bf85f2fc1d4d", + "cf815ad5-e04d-4e26-9447-83678bd5c109", + "3fa7e701-6123-42d0-94ad-6acd80ef1eef", + "7ec3cdde-1ac6-479d-9d4e-22a9fc7a822b", + "2ddacd5d-114d-4be2-9dc7-eb8a8f8af3b6", + "534bbee4-84d0-4f25-9c34-28f09f0bb23b", + "51c0828c-059f-4de9-bd3b-894476dbd205", + "6155b840-45b7-4724-9276-55f2ebd4f0a7", + "a9e33374-a10e-4871-bd2c-4119aea0c9bd", + "57e6f802-d52d-4617-a266-6f9c1cef81b5", + "34277fff-e238-4abf-94e5-a6bf3b117fb2", + "de4c1b25-68b3-4f32-95b8-3fee92e41b8a", + "a73fbab3-bd6d-4419-925f-3bb9aca70a52", + "a113a4de-0853-44be-901d-6b5e43cd252c", + "33745ffe-2c85-472b-a408-a294ea7414cb", + "69e16c9a-bd59-428f-969b-1db931953294", + "c43c1473-5db2-42fd-a027-9ec06a02a4ae", + "5cbbcabe-bfde-47d5-b4ce-0d8c732c1c10", + "030f7a81-35dc-4901-85d6-a5535fe7e6b6", + "fe859508-372c-4a3a-bf0d-778a709133ed", + "e857fe2d-58f2-4889-8da3-45713b080bbd", + "39c63943-0dcd-47bb-acaa-7ba5389be2fe", + "7365c9de-d067-4510-bc9b-74f574ab53b5", + "85386bb0-0d3d-4a12-a6a1-7d79e67a8258", + "aa43b33f-23f1-4926-af14-02e825e8875f", + "155cc293-6b65-4419-ab7d-16df5f65ab24", + "f6d21924-48fc-49b2-8b4a-911227e16bcf", + "188a9e7e-a257-4b2d-bdb3-fbdda36bc0d5", + "6fa23c9b-2b2e-46da-8616-250c380e16c3", + "da8806ff-eef4-4865-b68b-964312da05a2", + "af7db4a5-2e7d-4b16-a4bf-43d1967fc364", + "bef7db04-6b2d-4302-8b6f-9effc9beb9df", + "91d61deb-691d-476c-a172-db9ef1a14d1e", + "4436be7a-e011-43a1-ac0f-0e170cfbb898", + "4c190a53-ec0a-4d38-8c5d-a56917296378", + "e4830c81-e978-4d35-9f85-686517d29854", + "ea984a11-42b8-4b35-9689-d38048c17ccd", + "1b8e4535-1606-41c3-8c93-6c374041ed3c", + "379f30b7-fba0-4bb7-83e1-75a77d135227", + "11821886-463f-4656-a439-d5c095ffde2c", + "a9582286-17dd-4319-b081-bcf467c33dee", + "b758f9d7-a8ee-4bed-8019-edb49ace9724", + "45916038-f6bc-4600-9e8c-2ab3d4bd7574", + "3945841a-f24d-4a3b-b876-07779dde997b", + "0e1b59ac-61e2-4837-ba78-d378634383ea", + "99754693-7f12-4499-9fcd-e28102b1c480", + "1b0d1f98-b36c-41bf-9641-1288987efb8f", + "e4c47f3a-d483-48af-ac03-05b6c1da5a16", + "8cfa1657-fdb5-4d47-ae60-eeae13952e80", + "a330cdee-4899-4e1d-b791-bb701f91b6f9", + "c00cc210-b0eb-42d9-a221-986c694abfa8", + "59d89525-c285-4239-a018-1732313797ab", + "c7a46692-5017-4f50-876e-b619650b3d74", + "fdccde8e-8353-4aea-8115-aa61b8886765", + "6c94cb26-d141-4b45-b124-babc0434e50c", + "e52dfde0-d9eb-4088-960d-8e72efd9ec10", + "179755d4-777b-480a-bc01-625d7de030ff", + "3fa561f1-7ddd-4fa4-abc2-6ecf25bed8cf", + "86294e42-1bb7-4921-9fa6-ef86973085db", + "b7a8d110-2d3f-4366-b893-8b3c0ad47b55", + "9748a03f-ea81-46a8-ae80-8ce6a5556a03", + "033cfe0e-d643-4024-84a4-69699dfcb375", + "6c8a0e71-7d6d-41c6-928b-14d0528fb00f", + "89aa225a-c4bd-4fd9-acc3-1813a2eb6261", + "6e62ce94-8011-4157-b476-1060223143f9", + "4cd39c35-7082-4802-a028-348a5cc1309c", + "79a7c477-29ae-4aab-82aa-fc047fde1503", + "2f575583-8f5e-4198-9dc7-51dfe32920f4", + "69cc174a-9254-43a9-8043-0406a18c8fd0", + "f3eba694-1f4a-482b-b4b2-3519dd5462e9", + "86fc0aa8-6f08-401e-8044-d9e6ae172674", + "1fb331ed-4c54-4da5-9374-d7025397856d", + "6681560d-0f7c-43fd-bf1c-b02dad7b5d39", + "d09e1290-8041-41c9-96ea-182d88c0b9f1", + "6cc67799-1539-4859-a0d6-d41fe019c058", + "1c212034-2598-4d92-a069-3f0f4f8a66a3", + "d6f534b7-0af1-4ab3-a3fb-b208ff24afd8", + "19b47aaf-f5c5-4ceb-9e2b-27d72dcb4458", + "19d89031-2b00-40ee-8ec7-247758829e76", + "9c25d4ba-c9c2-450c-918f-82915c878478", + "78750bc7-75ca-4c4b-b910-eacae0dc57c3", + "2c96376f-01d7-4e81-b0d9-de689e83741e", + "221696d3-7448-4adc-83c1-89e347a79a71", + "887f86ce-0299-4c30-817b-e065509de907", + "1008bcd1-01e4-45e0-ab90-7094084b0abf", + "e8762a2b-5549-437d-b6aa-78e0fd44a5cd", + "20fb05bc-5f16-471d-ba72-f69c99652518", + "23d827f7-fd3a-4540-9803-b499586bb77a", + "0d5193b9-e727-49e4-909c-ccfdd0cc37a4", + "ec59886e-fd60-4886-80f2-e599e0e2df27", + "5c6d81d1-971a-42ba-b351-5ff130b2c8bf", + "23daa4bd-6d3b-405a-8a4f-4b64f90f80f6", + "069d9360-d894-4712-b13d-df7990edcb38", + "43123c9e-730c-4e1f-a843-a66fee78f9a0", + "e1f60541-ee1f-4bb6-92d1-86ae048112a7", + "b0d64bdf-070d-4d4e-9836-97ca64a14c76", + "24c623ea-3ef6-4f3c-9735-d3997bb0b2c9", + "8c602f66-c1f9-42ce-a410-356e602305d5", + "f5f5e4b8-16fa-4f3c-8d7d-f8dd01e4203b", + "3f57f611-367b-4775-811c-e5a524663806", + "d4d575de-4a2c-4658-aafa-e788bb679711", + "3125ccfb-bc4d-47d8-acf3-d353dc63f3ca", + "d9ef4070-594d-4b2d-881e-1f9474be5815", + "a75afc83-9260-47cd-8362-ce9efadf281a", + "d2c799eb-fb45-4bd7-9f98-cdc7faefc401", + "5942bcaa-07fc-4b86-9d58-4c9b8a96b48d", + "473e5606-5961-4a4a-98f7-6ec44cec6d3b", + "f396b9f7-f156-4db8-9167-a74021ccfcdc", + "4902af06-be46-4779-8156-571ff8e00c0f", + "43cc8606-5b4f-4685-a266-79d32fca3a1d", + "0b6d531a-d8e5-4576-84a6-dd9311ed3693", + "37fc4b40-e16c-4636-8732-834b03f98a2d", + "6a45c5b6-d752-46da-92cb-05877b457452", + "3cd61c1b-732b-441d-ab91-dd2469680342", + "e989c02d-d677-49e8-a211-cbf24a4a490e", + "c353b32f-27ef-49a9-ab5b-406a069a45b0", + "f1350459-1b38-4c4f-9385-967da4e95733", + "0cb9c18b-3ff2-4b06-b251-0beeea271ea9", + "3b5192d9-b859-4ed4-a01e-83a85a49bb6c", + "0b4bbdb0-183b-41bd-bb51-22ade9cbf873", + "f5d41bfb-ec2d-4d4b-b10c-d0dabfcb79d5", + "9f52cf1b-16e6-4d3c-a3db-6195cefb25b6", + "4ef8d119-edc8-4d69-87c1-449752fda426", + "1d931845-975a-476c-8f9e-e91de90bea42", + "752e8fea-23cf-4d64-a297-2b9282a0c008", + "c3336d1f-fb86-4d7a-9e34-0b3a2a3931a8", + "1805ed42-baaf-45c3-a54e-6de03812b9fc", + "c45d7434-289a-42f2-9460-cdc3408a3ded", + "18801c9e-631a-4f33-ab4f-aeca06388738", + "fcb7c4c3-b7fe-495a-9857-861e74b60143", + "fc5cd9c8-7c91-4a4d-9938-4ee49964c7d3", + "f4df7d53-3e72-4cf0-8293-423047d0508a", + "734bf1c8-a7bf-47e5-9027-e4bbc3713827", + "0c71e527-eb88-4019-8fc2-14dfebd53bfb", + "8483bc50-8a3d-483a-a5b7-ad3454f3d469", + "6ebe16db-3f43-468e-8b0a-947d690cf9c1", + "dae311d2-64d8-40a9-857f-b9bcd283af5a", + "b53bdbd9-eb02-4f6c-a223-17f110e5b39b", + "8c68d09a-2320-4a2c-80d1-2ceb36506085", + "df8eb1c5-7dc8-4555-8b2f-079d6dec8ff1", + "624362ae-136e-4a07-9fee-69ad20de7daf", + "cc254a79-222c-4cd5-941c-f22fc1de6024", + "f87a5392-1ab7-4b3a-a398-bbd4e43ab088", + "ac3a35bc-c914-47a7-809d-136bf39cda61", + "9602d472-a237-4b4e-b8a2-9955f28748b6", + "4452158e-3044-4e0a-b8cb-619b54e2c240", + "109ad519-676a-4183-b83a-651b6bd3ab69", + "4e4d29af-71df-4986-aca0-0cc609f322b9", + "29070ac2-182b-4a1d-b554-4ea773443a44", + "de91aa45-bca1-4683-895a-c8a392d80f53", + "9a903616-4fd0-49e6-b721-09df7371f028", + "5dce2363-6b7d-462e-adaa-41b2479ecab2", + "4d1a4bed-8973-467a-800a-f08a8f5e2166", + "54242437-6b3a-465c-a50f-27a1f8962df0", + "4ad18a5a-0b1d-4c62-b9e6-1fc5657f8ff4", + "4819416c-be71-40be-9230-a400a9c633a0", + "1adc5a89-fb0f-498c-a901-24b09ce6a36d", + "eb23cc96-3534-4492-9eec-6bf1d4811512", + "1d56650e-fc64-484d-9668-0b7e40d01b2d", + "531d90f7-2a8f-4c5d-9553-4446d8050d10", + "8bad8289-4c6e-4f40-95e8-4d3ff326d154", + "b2da97e8-3b50-4585-bfab-5de34b2ae07e", + "8bde6ee5-5533-4910-ac7e-b0cd98316c3f", + "f697eae8-504a-4227-b6e7-3511d1c69732", + "813e8d3a-a699-4758-b639-59e00b9260bd", + "c95293fb-cbb2-423c-8bf9-b640ab2a22bb", + "25479e98-5ac7-40bb-b139-be6fb38dc2f0", + "97e90d23-6432-4760-b4e4-d53a30daa36d", + "0541bc1e-85dd-416c-b338-7b436d21dc6c", + "4dcbda76-baca-41fa-95d4-5bd8fce7c221", + "355f1062-dd50-4f74-80ff-b57319330922", + "3ee9288d-6736-4db3-b53b-715d1ebdbce8", + "cb42722d-0b4c-41b1-81be-590b165417d2", + "a51bbb79-216e-4f17-b187-072830b14e44", + "5e6ccf64-3fe4-4650-8ecc-f3fb85cd2b70", + "c3b9ab9b-d06b-4dd6-be5d-614088d91a9b", + "7eef7499-e55d-44f5-a5d8-bfdaadac4459", + "7b1aa67a-a2a5-4810-af45-882b0404279f", + "63d4ab10-6c6e-4e0a-b7fb-c3b86fccebd3", + "1474a155-961e-4390-a240-49b0304fec09", + "4a1368a0-d53e-4fbf-a69e-1408b8481a4c", + "2f70889a-5c5d-4d94-b9f4-6e10b27454a5", + "173de038-88d2-4741-b9ec-1788ec5150f5", + "2d21bf27-262c-42ff-8aaa-6bfda3148577", + "624e60d1-4448-4aab-907a-cfc17474bae9", + "21fb21df-4067-4039-959e-69e400738e35", + "9ddf2539-6dff-4784-ae13-824afdf936e2", + "4d52c4c0-c6a7-4860-8a95-a1f1d8be1b59", + "20bcb22d-1ad3-41e0-bc55-4dafa184a9d2", + "26cf6da9-6cd9-4861-8f58-62fc5e59b8b0", + "531c17dc-59fb-4c8f-8c0d-6266d32baafc", + "56fde1a6-b7e3-4c19-8af6-0306aca24144", + "8919aa29-a79a-48e0-97a0-ce97dfac232e", + "a26f30ff-0143-4901-9f85-e343991aba13", + "6603d890-8309-4c93-919c-019b217c421b", + "f7491953-f59d-4dba-bfc0-584dd8b235b0", + "b563d2bd-498a-47ea-bc6a-7715c64e1c7c", + "67f7dd86-0e6d-498b-8e8d-1af4311ef968", + "08e7a242-9286-496c-be70-02d071c51ada", + "309238ef-5213-4287-a8f7-3ab0dd52736b", + "02f378ae-dc53-4e87-8765-fd8097d629cc", + "763384ae-04bc-465a-b3ed-67691d88cb35", + "e90ee444-1043-4fb7-95af-96c04ca235e0", + "ab692d98-5c80-461c-9395-d799f1f7ef23", + "d9c797d9-9368-4e7f-aae8-fd70474286d9", + "b0657a1f-535e-4202-b81f-3d8a6ebd9988", + "65c02cd9-83d2-4087-92f1-36664c0bcf69", + "6d0be91f-cf5a-44ea-aac2-8bb04c338e16", + "248ce4be-c861-4bde-9c5c-a6e652c050d9", + "0955d808-f3a6-43b3-a2eb-09c2b14369b0", + "6254b10d-26f3-4e42-9595-d246a5d21bc9", + "22b3790e-27a5-4a70-a2ba-a1004ce8b4ef", + "9a272a1c-50a0-4e54-a052-e52fafa9b0fd", + "1220095a-055f-408d-9f53-d66f54929739", + "61fb0169-e601-4126-a8dc-a370e5a9cfea", + "d570b9d4-00e8-4f13-bd5b-39d15d7f0738", + "397c6811-596a-42c7-880f-2d2a9a1440cc", + "3de83734-a4e2-425c-aec0-28185e088600", + "122b7156-25cc-41d9-81e9-758d404b6d10", + "5d3dd6ee-8767-4431-a7f1-56f598e65d60", + "a3754e50-2335-4a2d-b987-8d5d21cea905", + "861a13f4-16bd-4c2f-89bd-8da98f9b5804", + "3b0380d1-e202-4d8f-9d6b-dab3b38035a0", + "2c59e9c2-466e-4d61-8af1-488380dd2b33", + "aa41795e-9027-481c-adf3-a1264eb3af3e", + "880581df-f506-4531-9020-074924637c9c", + "a899598c-c243-4906-81e0-22f0dc2be9df", + "3ed9495b-a47d-44e7-95e0-0a69d47838fb", + "88ff9cbd-f1f0-4ef0-8b6c-dc5ae9800369", + "dbac67c7-694d-4cca-aca6-d13e0a6aeac0", + "9cf2562b-2f8f-4c92-a8e3-a9dd18b53754", + "9912560c-30f0-47be-add1-aa2445009c89", + "cdcdcec3-2c60-4452-82f5-d786657212f9", + "851a222b-ad56-4f02-864f-b3a75ca86c71", + "c7b6717f-acf7-4ea2-9fdf-bb2dd72c6932", + "c904a46c-872c-45b9-82cc-f8bd32aa8dcb", + "5990a66d-b1a4-4be4-b629-29ed8ca644eb", + "d9e6de20-af39-47e7-b0f2-ac3e8700d999", + "81ab55c3-54ae-4992-8359-51c7fe342784", + "19bb3210-4747-4575-80d6-cf665344224a", + "706cda03-8dc5-4ecf-9b19-38836d622d89", + "a2a19be9-61ef-4b61-9074-8aad49431d8d", + "3027fb92-c85f-43d1-977a-5cfe4130cfd5", + "fe02cf64-a065-483a-9531-86d7c2d1c8eb", + "41e6efbf-f55a-4a13-83a1-9da7bcb39470", + "79d1ebe6-5555-435a-90ab-a05fe1916fbb", + "d93be80c-8553-47dc-a8e7-bb3714a99905", + "a5a49af0-eada-4cad-8d89-a065787a1e49", + "5e8bddee-652a-46c4-979f-7bc5a9a4b04c", + "366b6b1f-d2dd-41af-83e7-ecc755df60e7", + "0b66efe2-54bd-4713-8eae-fb976f95e73a", + "d89655a3-6fbf-48a5-86e8-98c6a0fcf0f7", + "ddbfc22f-5607-4ffd-b0fc-75a9dff0ccf8", + "6d8f5811-a21f-4714-85bc-24e3011889ca", + "7619ece3-6d27-4e5e-b32a-58ce39343bdf", + "d07e79d2-9edc-42a4-a619-9b449213cf99", + "37ebaa05-3c41-477a-9b87-f76dab4d0a77", + "9418b4cf-dabc-42f5-9c4f-a86568197684", + "e4fedc74-accd-4b20-9289-6b9cd5ddc2c2", + "7d2981e0-1068-462a-acbe-32f5edf74896", + "745f5730-042b-4ebd-8223-baf1301ccc98", + "b589f98d-6221-4021-a373-d54d668932e9", + "da12f4df-c18b-4b60-9d22-7c6e8743e608", + "66db2f31-71e9-49ee-b330-2b6cb56472d3", + "a231f839-236e-452f-a49d-4d261916746e", + "10ea0b3e-26f0-4b91-9fcd-ef429e7f3b3a", + "793edbe3-8e2c-4d62-a1ea-0c2136d691e6", + "df6d1c62-65b3-4116-a2fb-a708b9aea02b", + "b4cfa3c2-f151-4cf7-bf97-b27026185644", + "cbc613e3-53a2-4128-a6d3-c2a78d7256ef", + "bc39b7ed-e597-4b9e-bbbf-5081d8b14f8b", + "aa04832f-09ed-4e5f-a000-d20147399f4e", + "349da8d8-0124-4a4b-b02d-9e913d97a739", + "53570856-d00b-4e75-b95f-59d4cf0cadf3", + "1a2bc97b-c542-4f76-ab34-a8bcb59c76dd", + "d3f03721-0800-4c57-a42d-f88b76bf0ec9", + "ff104c11-e28e-44fc-afc9-03a03938eeba", + "65e6c4f9-1264-48aa-89a8-2a00df3bd77e", + "39ced82d-6975-4b3f-9b87-041e38d24deb", + "29ca43b8-900d-4aa7-8367-32584a24b0c3", + "3a72484d-1071-4be0-a1d4-2e7de043b7f2", + "01681e24-f5f3-48bd-86c4-91ecbcd26585", + "0a464f3e-97ed-41ab-931a-1720c098a5da", + "20432b90-ccfd-44a3-a0ac-da6758c3ae45", + "5843d223-7be6-4206-8fc3-e0a22363f51f", + "a49826ba-a660-4605-98f1-b6a35a986ee7", + "faa799da-6247-428b-b518-49f286ee19ea", + "3c58139f-dd7e-4f9f-8a8f-2ebcb3726e34", + "2b0caf4c-492b-4a1c-b499-444f4a178da0", + "68c198a1-8cc8-4dc0-a62a-8eb60b101dc1", + "f11816b8-8adb-41b3-8a2b-0ef976d8af29", + "aa8f9c60-7613-44b9-b020-c44c1e5464fe", + "24a917df-4ccd-4899-861b-5334ad2fe73d", + "136dc01b-ecf1-4dc0-ba20-53a6451fb977", + "a0b3b305-aacd-4c0c-ac39-16ad7e561bea", + "d6f7b8f1-03d2-4454-85f7-1ad3c31a4e48", + "b80d9232-b4e6-4bc5-b64a-55d3547ff360", + "a8fe4907-8aae-448f-bd49-9eee6d7d1902", + "bd2c3cc2-8c12-4d25-a48a-1b754591ebf3", + "d07f3aaf-6953-43ba-a82e-e34be61353f8", + "955ab73a-fb50-4f50-8a45-474f16d2bfcb", + "2eb5485b-27fe-4f57-b9c4-efe93289a97b", + "14fd10b4-a11d-4c44-9a30-d3f472725eb5", + "00eed0a3-af84-4cb4-a085-0a6b54e670c9", + "c66c0668-4254-432a-ad3e-f91e01306749", + "5ebec6dd-efde-4311-a27d-26bd7f8665c3", + "7b68c33c-fc33-4688-99fa-09ef769c16d8", + "28e5c322-ea5b-4ceb-a854-7bddcb127217", + "2cd7af34-dbb0-43ba-9ece-f207894caedb", + "278e1720-f1a1-4587-af96-06aa82621db4", + "bf225733-de28-41e2-b327-9fb171847619", + "e7c33244-fdba-4390-a4a1-086775550edf", + "70cae94a-97b7-4581-a9b8-e84a9fc03c62", + "9cdd12f0-02f6-44ab-b9b4-9d15be5d243a", + "ab5a8a3b-1e8a-43d5-9255-d100b481d846", + "a9a37371-8122-4de3-9e8a-8805aa4861b7", + "9152f4c3-b5dc-4646-9996-ba37926960cc", + "97d85edf-0215-4c2c-b278-03d2673d5f85", + "0ee71a38-258e-4adc-8531-3b09fa6db9ae", + "fba68cdd-3d59-44e0-b5e3-5a677abccc4e", + "4299b8de-5579-4c11-810b-78d588e160d1", + "7c373b2a-3dbd-4c97-8627-1a270150d488", + "53ee856c-49e1-4063-a6ab-642b4c89127b", + "ee022616-c223-4cde-8d43-f5060343b162", + "0733c837-35da-40a4-9dbf-39f95610044a", + "0e478964-e9ca-4217-b2a9-438dec182325", + "0146cd23-2421-4fd2-924b-c0da26104b6d", + "d98777d7-9e41-4ac6-a095-64921f7675dd", + "0fd82fd4-e7cd-4a0c-9a41-8abcf842636a", + "c9659153-7e25-4272-955f-842c53b26b7c", + "6588f75c-4f7c-4635-a57d-909115c83a70", + "ac4b4d85-e382-4894-8d77-dec3b56f0296", + "e1e773c4-a90b-4b3e-8c6e-164b12d45cb2", + "c9872da9-7c14-45c4-b282-393837a329e9", + "24fb8585-84a2-40c6-a19e-b64a4202fc4c", + "6d97e0d9-18ba-49c7-9924-3d0712a6f082", + "c187faf2-a35f-4141-b70e-bffaef8a490d", + "a93aff98-342d-4b46-99a2-8861eee4384b", + "2ad09b80-6a12-4825-a501-7eebddeabc97", + "434125bc-4fbb-4246-8b14-b4be998712f7", + "86084d15-1bae-48ea-bd79-30ab393e1f26", + "29dd2c92-0c6c-4d88-a644-0094873e58ba", + "29914fde-7737-4fcd-83a5-e3180a052278", + "fb23fa9b-440c-4696-b055-f49c6bc5b60c", + "549dce00-3c1d-450a-9a6a-16cae5f462cf", + "05215690-3479-40cc-ae3b-a665e9c4ff72", + "854ee1ac-bb49-433c-8637-f90d4c017691", + "ab1e5a20-bd22-4269-97f4-c870dc742675", + "a59e09ce-db90-49a8-ad7a-65ea561ca2bd", + "7e0fd8bd-dd66-4436-b5b3-76b68711d301", + "02291acd-c9e2-43f1-8f35-abbce4b7fbc0", + "1847ff56-ca1a-488d-929c-52e86a119af6", + "0a120b09-8304-4810-b9c1-0b6fdde6616d", + "0e6c1471-f11b-4a19-9f64-abf9aa877d15", + "48b77983-9beb-449b-93b9-642554188b26", + "bcbd0826-5379-4da7-beee-15b86fc526ee", + "0ca49c31-213f-42a5-a24b-7671b0e93a5d", + "b7d61e05-de79-415f-893e-cf3f7f9b10a5", + "471415f4-43f8-438f-a7ad-ebc2390bcd95", + "74f90639-7cce-4646-a83d-0716540a0bbf", + "1f093fce-7a73-4c16-8c14-4a2311be3bdb", + "df8a7e0c-ff3f-4d44-8976-9b0a3c1e3b25", + "9a563512-cce7-4779-93dc-cd02ec266d7e", + "167bb224-3529-4dad-9a66-2055b7cfcec8", + "c405aff9-d064-4c5f-8efe-2dbd5c5090bc", + "bee820b7-14d6-4de1-b461-3947ad1ef6ac", + "6337c8fc-0856-44ff-b6d6-297608c4e1f8", + "4e9ffa16-641a-466c-9216-2b9548644e42", + "cc100969-799a-4394-831e-52ae4af45a04", + "2131d333-84df-4654-b686-db1e31143e29", + "130fc001-6382-41f0-bd40-013724eac6df", + "ce38ec2d-3547-4c96-a6e0-061d051bf58b", + "3b220a92-eafb-4c31-a704-1d100d31536e", + "802ed04c-11e6-4ba6-be8b-47e4e0404d0d", + "025dd206-d8db-4ecf-b7c4-3e3865afd30f", + "3e1b1740-2d17-4bf2-845c-6f5aff1a6da8", + "6766fd19-c9a6-4664-9bea-1eb329bad346", + "4c851e0d-993f-44bf-b8f2-5c929ea66b59", + "d779375d-684b-47aa-bcb2-5878087326b1", + "82305ea9-4546-47a0-9623-6484cd9104a7", + "9b210361-f80f-48a0-910a-7a63122646d3", + "e252f840-876e-42a8-9158-20aa4cda7ad1", + "06378b6b-201d-4cc3-9f9c-1e0e2accec77", + "b255446a-bde6-446c-b112-d97ec28ee711", + "820daeef-4161-4e30-925f-2d62e64a7ba1", + "a2016d07-d7e3-43a3-96a4-eda78305e54c", + "e2c207e0-2eca-44f0-b35a-5b48840fa607", + "2ab2c3f0-0fb0-4110-a936-7d45f2a293be", + "26b0a14e-e974-499c-8755-940a32182c9a", + "b4cbdecf-a41d-4b3e-9b05-93968d53e331", + "7cde7449-dace-4f36-99da-5448bd3fb5e3", + "34aafff3-622d-4800-92f9-4daebd8964a1", + "dd6c06f6-bf50-4c32-a493-9405361b08b2", + "01dee10f-9cfe-4d81-8120-7e67ffeede53", + "c80e0951-c4dc-4b62-9baa-92795bdc5e08", + "5b590f59-eb5a-45fe-b5e7-a7c9f7241c19", + "0a790ba6-ef6f-4a7a-874a-8a58d4393223", + "25902a9b-c5d8-46f6-bc3c-ec95805703ee", + "cb48c0b0-3fe7-4342-904d-79ff964d63a9", + "dfa30c0c-930d-4f64-bfc8-3318698a2aed", + "90d19693-6e90-4b38-b1a5-890cfc3a71ef", + "eea56151-300e-407e-af34-1af05b88ef2a", + "930424ea-3100-409c-b7b4-99042f880c7b", + "10fe0d71-a41a-4e4d-b3f6-74ff2dace684", + "50dd6a36-6079-4c00-9d69-867ef8d4ed15", + "759c4696-8784-4dd6-9b0c-7818b62576be", + "ea8658ac-b7a0-4d40-a255-1efc6f8dbc9c", + "a0f5ee2c-f6e0-450b-a0f3-82ebc74933aa", + "ae109ccf-086b-4e0c-b7a6-9bdf2e317c8a", + "b4d1edba-9653-4b49-adb0-f23311dfbbe6", + "f30afeba-cabc-48df-9b8d-26b09a57cccc", + "86087995-4d16-473f-933c-756289f48c0d", + "d9f67ed7-5110-4c5d-881f-55404e2c5a9c", + "28e8475c-d25c-4f1c-99c8-d139e68bf0c1", + "3f96173f-c490-4fce-8aa3-714193498301", + "8631c2ba-2dff-4702-8c13-28d90c9c804f", + "eb585aa7-4490-49ce-83e9-9564912d3c6b", + "723f59bc-4d5f-4539-a633-fd00bf6c6ac2", + "510c8398-82ee-4aba-a28a-19c5428516f9", + "d0759921-1f95-4645-a80e-5536d66b5fe3", + "b6d3b3dd-43d2-4151-8529-461bde30df7a", + "552d411c-2550-4fe7-a30c-4c25c3ddae94", + "a5364e3d-8851-495d-a826-e8ea49025c88", + "77842e3e-bf27-492a-87a8-dba270b75921", + "0b6e44cd-f567-4287-923c-3b5ba30d4bde", + "9ab42cd4-43e9-45eb-a0ad-723cfb8361f5", + "488161f1-b3d8-4c57-82e1-f351d3ec1391", + "45f991f4-d7fa-47c2-b8f4-1114c99a6d30", + "d7c724fa-9e30-4606-a78f-7490683d682a", + "85af6e86-8a69-4dd7-8534-f39b5b4d3739", + "ea4089df-12ae-48d1-8997-c21e0d32a888", + "fb90746a-9586-4316-a401-cb1c7e16ed70", + "9a259f7b-d382-4441-8a34-e4f8a682983d", + "9aa97fd1-c2a6-4aff-8092-ca6de7497916", + "a44819f8-766b-4870-858d-572afb21b1d5", + "82e46a84-f1ed-404b-9416-9e847db67cb3", + "1f0aaff1-fb4b-4ecb-9740-922e3ab40fa5", + "ec85c16b-554b-424c-8d4c-f17abb888ce4", + "823cc86b-f721-458c-a22a-8081dbf6c4c9", + "4b7ffb9e-9404-4c67-8caa-dfef0849ed1e", + "6948aa75-0fb8-4f19-8234-03b95f566051", + "097837a7-2460-4fda-890c-658b5a17ff6f", + "a10dd09c-bf99-4d7d-8b7d-8df90090778a", + "a7c9c53c-43d6-4b07-b1d1-9c4cefa92209", + "3d406eb8-5f7b-4d93-b2b8-65df30ee5f34", + "4fd88c5e-dd31-4f70-abeb-c8db7909e231", + "622279d8-7a9f-4391-b5fa-b98175780979", + "ec06e5d1-0b55-416d-9c8c-6db65fbf318a", + "c3342c44-9622-403e-83ae-77c685b0bec0", + "2b026319-c2ea-48c2-946f-65396b2af10f", + "63ed37f0-3c06-48c6-8767-b8891895e937", + "1b5fe6a1-06c8-437c-81ea-bc6329b5b89d", + "12958ad4-e0e4-4b7c-923a-8885d981bbc8", + "58b84402-70d3-4e4c-ad07-2aedebb646a9", + "567a02d9-b4a1-412d-b008-9bba912ac592", + "5d1cb9ba-4096-4e95-80f4-a544ada77ebc", + "08f9fbb9-2f28-4387-8f32-721b0513e773", + "76fe209e-5fb1-42e3-9ad4-22e2a8bae00e", + "e9ffbd0e-dcc1-4d96-afab-3ba6ea7b7a1b", + "c3662627-e507-4218-92df-063039f8a01e", + "f2b4bf56-1454-4753-a3dd-67d41524e8ef", + "df5daf31-5d07-4ad4-a98d-c6ebfb1b05bd", + "98437b15-fb9d-4205-a0a3-5dee0b5894e0", + "f9db51ac-63b0-4a3e-8263-b51d566b2cec", + "08ff46c3-ae44-4fd3-aabc-04b78047b2d5", + "0af42fb3-2b3f-46fb-8f81-7384bed2308d", + "c5302063-6604-4a50-b888-94be3950851c", + "8133c392-6eb4-483c-a051-976be86f60e4", + "7ab2bb78-bfec-451b-b2ae-8878eb005d25", + "7b21ba54-a68a-4ac7-bb51-1b32ac718282", + "41decfe9-f512-441c-9a4b-84a48295d58a", + "ad3e880d-75bf-4c02-be42-a3274ba90116", + "26d29be1-d5b6-49db-a2c7-f06225e85f87", + "40de2404-5bd5-4512-a6f2-4866ff40b9cb", + "dc0e67a7-cf2a-4689-a4f1-d324143a3b05", + "58223b20-d3a9-444e-95aa-51fb191ee262", + "cb1a53ab-e5f1-4180-a15f-dd13b959e5b6", + "90735255-d5ca-47ad-931d-cb28d3c31859", + "9e9f8b6d-2b71-40d1-a194-f2be847c0d43", + "83f6ac29-389e-4843-95ac-953f6eeddc6b", + "9f2408e5-78e9-4b22-bc96-22458bb77aba", + "bc07fc23-63ea-4db0-87a3-4fd11316e5da", + "032906d6-21f9-4273-9248-16a648934531", + "35c9fd7d-3b12-4638-abda-9efd42bac8df", + "161dd8a7-764c-4f37-9f8d-d2f51aef9a29", + "f44323c7-ecbf-4b47-b59d-4f4ff6ed6279", + "2a768bd5-806c-4be0-9571-6c339567e83a", + "c1dd8345-1795-4b20-a03d-4c897178c10c", + "7275ccf7-e550-4c5e-8b9d-ffc2c1756f9d", + "adb86d93-8e78-4ab3-855b-c02eef8fdadd", + "829a3df8-cf5a-479f-8d2a-7b2e792122d6", + "f78e36ef-7eb4-4be1-9d2d-f492068127e8", + "755fbcbe-76c0-41d3-b115-9ccab1ae7236", + "080478c4-7d39-4faa-883b-5c4e64457202", + "fe97ce37-f259-4eaf-883a-6e01f720bb4f", + "cb7c0ea8-cadd-4de8-9ca9-4fb9c6b5b125", + "b0826bfa-99e5-4cd8-8588-5b1464cb6103", + "30482128-a86b-4ca2-b6d6-d4f78c8e0734", + "fd7c8ef1-903f-452c-956c-57593809e220", + "6ee692de-9f6c-4e3e-9d18-5d9d4f788c75", + "9e577b79-6b89-4d2f-a7ce-c8be652ffa2f", + "b16a6c33-eff5-4bee-898c-6ff6b5ae8b5c", + "dd5dfa42-e977-44d7-8c8d-9255a37b9c3c", + "0470a3e0-e516-4076-b54d-e1dbb1d118d6", + "8648d6de-8eb4-4184-bad9-2b21c6b1d064", + "45447f26-6daa-4663-8ec4-deaaef2d1b0a", + "b29e842f-6441-4c5c-8a10-79102a28f5cc", + "41edcc74-f056-4c56-814c-51705ccd3020", + "0e9c12ad-0076-423a-b9ef-49b07c63eedc", + "653d39a0-0519-4c68-a530-a84dd1c249c7", + "95385c1e-8b92-4664-998c-187ed5ab4c11", + "72383cb8-c4ed-4001-ba29-d3c348b85034", + "ec026c52-fee6-4dc1-91e9-f5998ef7a874", + "50f9ba28-29aa-4c27-8c9b-064b881c6713", + "02a0b9b5-1d29-4db0-bfc3-8b3188e4a028", + "e66fc9a7-1cac-4264-a4cf-35b7f96dc33f", + "31ac0057-5939-4399-b034-64ad4b1d16ee", + "99e5ced0-882e-42d4-9421-03b6be389d53", + "bcfdb48d-8a37-4f9c-84b8-8c63297f25c5", + "aa18680a-9001-447a-8886-bc9c5c666e47", + "08a4b24e-627e-494e-a3a4-218e67f088a3", + "bdc06ebb-a6a7-4e15-99e2-f7db8e0ab70c", + "65e82559-f1b7-4db7-8c42-f43f27280cc0", + "e697ff4e-ac51-4c00-b86f-d43039cecb30", + "219dc347-aef8-4a83-b27e-202c357d67e5", + "4c31cd00-150c-4451-9e82-94390a7397fb", + "afff21b0-ac0b-4793-85c2-03700056b7ed", + "c999af11-f096-4821-9d22-63b130dbbd37", + "d7bc1c8d-49e1-4838-8bce-854737fa9d36", + "d6943af1-85e7-4cb7-99e0-45cea9c0bb9f", + "c58690be-972f-4384-8b94-bba60a27d859", + "5b723b48-2a2c-4915-a4ab-1aec4463e010", + "dc0abd3c-c347-452d-98ab-276a897c6c3b", + "c6a9fcd8-82d2-45c8-b613-cbb22f4e4240", + "714ac606-7c3e-4c85-a3cd-6e2c5f8afc71", + "769bb68b-9940-4ba4-a1dc-c5a06b90de65", + "ed4f5f8a-8fc6-4752-a6d5-4da9a5634fa2", + "d5a96e07-2fba-4551-91dc-99310bbceab2", + "a069e883-fcc3-4cc8-8133-35bd0096e1cd", + "10bf620d-123a-4b9c-b668-94a10393e51b", + "e11a6f5c-3b26-4827-9743-056e90b834ba", + "2acf967f-568d-4bf2-a7f3-f1515bcda328", + "249ee5a6-eb0b-4a59-a411-0fc2ca5b534c", + "aa464d89-c9e3-4c1a-a50e-fecc1bd58280", + "92e00c14-4b6a-4d3b-ab2e-fe754c1e3c1d", + "ade57347-2a2d-44bc-b4fd-e05c41bebab6", + "4300c10c-5566-4588-a476-879efcb7413b", + "c83a1905-7de7-4311-854f-b1e5c4727c1d", + "843daa46-0512-4745-a27f-fc593ac15430", + "6c761350-f14e-4aa7-a836-371d396b83ad", + "d6272601-4da8-4bf3-baf5-38141aadf567", + "e5c4698b-4527-443e-87ef-ee38a6e3f632", + "e0d31625-75e0-49cb-812b-4b2ef90ff069", + "eec9ecdc-ea79-4b64-a4e3-42f1c547dda9", + "f2216e74-96d7-45af-804a-ec564992968b", + "fac6f36a-581e-48c1-9a4b-dcb3e9121b4b", + "045c551b-f9ff-4e70-9801-4c0f791145aa", + "143cf1f2-8d16-4dbc-8c8a-4acff7d47688", + "3b412257-c3a7-48ab-b301-7c3ab48ad52c", + "8e7fd428-0e3b-412b-b172-af5137ab5cd0", + "c70666ec-deed-4bcc-9b49-9d7c55a4eb53", + "df3d493b-510d-4128-b682-c9b5afb0ac00", + "12533554-676a-400d-baed-d1a563db54bc", + "43e10727-23c1-49c9-a6bb-4fcb8f7f2074", + "6c5e99dc-0324-463b-8e6c-1e1d201a8acc", + "cd990e54-60d5-4959-91b1-805ed328d24c", + "4b4e185b-0874-4445-9025-cd7466b3da4a", + "3eb3c2b2-c1f2-4ea7-af32-15d18c7c9857", + "5c5fb79b-9534-41e9-9ebd-9d673e7b8035", + "ac197361-0f5b-46a9-a7be-76f782cb945a", + "83d22577-3942-4d28-9c8b-8963b675cca0", + "bd7783e9-a1e0-494a-8c84-b481eb312dad", + "94acad55-06bb-44a5-9166-2b8997036ec6", + "b00b208c-d0f9-4745-bf6c-42f7c1845caf", + "5fe57a38-2f15-490f-bda5-a60288efe824", + "95ba49bd-57c2-4838-b028-d6653db91901", + "3af72b51-130a-4570-9151-d0922813c52b", + "611121d1-7f67-4aa6-a82d-d5b5e661c97d", + "19839a96-6034-4e2f-a8b2-4ab4e626ee1d", + "dcf2c806-a2e1-44f5-a6f9-08b199e0346f", + "e1e1db61-4c82-41e0-8459-bee6f3bbb509", + "e8422b74-937e-4269-a358-29481d545a8d", + "8e0f642d-43ba-40cb-9cac-b29c58556461", + "5c927fc8-c34a-4470-8f46-704500376cff", + "89d52f51-89e7-4bc4-8818-0af90a082905", + "cb53ddc3-c218-40c0-8277-f28494e6783c", + "45db9a2c-8e9b-473d-88ab-0210d00d134d", + "229c08da-c730-48f5-93ae-bd610dfb83d3", + "76f9c915-7b3c-4709-901e-7886933e1830", + "5ca72228-de70-410a-9853-8ca25ba67c13", + "e6a5114e-7c7d-4462-8aa6-3ab38271d9e5", + "666e19ae-88e5-421b-a009-e5b803655118", + "737549d9-0973-4f91-bbb5-aab602cba16f", + "77607464-0392-4312-9539-1aeb14245a76", + "d2af9e56-2a8a-4ff3-b794-15c3d88dc0f4", + "c6325b4e-f3c1-46ee-979c-cbbe4ff689b6", + "9f65f611-3bb4-4a68-be5c-5faf97d6fcdd", + "62a9d2a6-99dc-4e13-aa44-e1d0a0a4e4a2", + "0fd29e60-07d4-4101-90c8-9a88959ba39d", + "c64f14c7-287d-4834-b6ad-94096a39def6", + "92ef70d2-feff-42de-8f0d-11ffa0092744", + "b8fc3655-0f53-416e-ab7e-41d18f58eed2", + "51c3cb64-c4e1-40e1-ad46-67960be9b735", + "dbb368ad-56b3-4507-8c73-855b11fda922", + "989d6651-a023-4809-82a5-9414ae12d0e2", + "7bc6904d-cafc-46b2-a985-ee485cd86c8c", + "a28ff756-c110-495f-983d-fa521eb244e7", + "98413a2c-fcfb-40e2-9c00-e75843ea31cb", + "39d93d8f-81ac-4b41-8b48-0412c0cc2399", + "8e12dce8-ff6c-4414-b14b-cccda0e5dbf6", + "9bc3882f-8a42-409b-bec4-06dc256b2639", + "814fe19b-7e00-4892-9155-a2697324e036", + "ed9e9449-1c57-4625-85aa-15f7af9458fe", + "1445bb42-305e-4280-8ea9-da570f42b33a", + "69754cce-e0a8-45b2-8c59-f5d4b80848be", + "e040579f-b003-4cef-8340-be8233da1dcf", + "259a1a48-a926-4f00-81f3-439939593a40", + "a77a951c-476c-4ac0-8401-f1a64c1ca78e", + "4bf4d8dc-89e6-43df-8db1-dd6c315fdeae", + "6a99c9f8-b8be-47eb-90ff-3835c69efa2c", + "52904133-73d9-48d9-b668-d1084e41b9b0", + "90d167d6-1274-49e9-8662-de1d46dd96ce", + "5a90491b-e399-410c-a39e-314ad5ef4e8d", + "ffaed21f-a41b-49ac-887c-91970aac6388", + "7da07e1f-9d21-4a86-bb04-342a2881dbdc", + "7220db39-e32f-4ef7-b852-1c075e1043ea", + "5034184f-bcf9-4f7f-9ef3-f3e3d7491122", + "8f86d13a-436f-4c90-8322-0b73d283dc35", + "28e8f6f5-0eb8-4a65-ad5f-dee4927f17a3", + "0bbdc9c1-62b0-4cb6-b3eb-40427d7b1ee3", + "4885423d-f149-4e2e-817f-11457a156f02", + "5e711c56-639a-4222-9cb1-2fa34907ba36", + "4f17075d-7267-4c55-bff3-0d528524abac", + "b461790d-9c26-47fb-aa71-11c9c6809b90", + "069fb103-7505-4510-90e2-56baff2027df", + "ad419938-d12c-47e7-b0f8-7ea98f209acf", + "a9b8ec81-e6ff-4fc5-8bb2-299566dd938e", + "6157d645-1bc4-457d-b426-4caae5d88858", + "5fbc4325-23e7-44c9-85de-a3a2e37e8889", + "5fa46a3a-c7f0-4b17-a7b1-c57410dfc277", + "c345fd3d-b819-48d6-9943-bcda442d8dea", + "2cf81b66-c5f6-4a9d-8ed7-83dc7c0e619a", + "3f68a5a8-2c6b-40c1-b76a-da38546ab818", + "6f5751e4-3f07-43fe-940f-1049f43ef78a", + "7b439e40-6798-4687-91a5-c7f1a3466a5e", + "132c1d30-2665-4976-b1fe-6e49949d3a96", + "d7b890f1-f283-4f32-98ac-b1425523d040", + "bdb184a1-704f-4d93-af99-2112652424c2", + "52fd3092-dcea-4128-907e-77224d027284", + "5f96b0bb-ada3-42e2-b00a-74571323f64a", + "4c7cb818-2d77-4f0c-8c78-e4b6469f3ec9", + "83146695-5443-4df0-8d13-3faed27d968b", + "05be1544-1d19-4a4c-beca-e53f8b5566b9", + "4c99fd2d-e8eb-4b27-a4d6-69c9231fd664", + "67af7dca-a1b2-4f60-ae86-8b488746cc1c", + "e7bc3994-b1ee-454e-9810-3fe943e8ec39", + "e0bf474c-4341-40a6-a9de-7c4c7e13aae8", + "2462b064-7c0e-40dc-b1d0-4f1c8754a6ea", + "37bebac8-69e9-406b-a651-2bfb4cdb3b4e", + "39d2c2a8-3982-4f16-abdd-4f87714b8346", + "3a2aa515-5d13-4668-a552-7e79da905967", + "f692dcc1-b21d-4887-9ca6-b52f90f063d2", + "f597a5e8-7ce2-4833-b74c-f58cd4f0f605", + "34d22011-ff7b-44ea-af7c-84fd02387abd", + "9ad6619b-a7a3-4033-b958-dc0c4b093a04", + "2f5e4be7-b654-4ae3-91e5-82298ca8a451", + "d501f809-bd01-433e-96b7-574b0fb3bb34", + "188b4c75-62d6-45d6-b6b0-4ecf2c3c6b58", + "9ff6e046-73ea-4d57-ba88-220008506862", + "9c4b70bd-fd7d-4ec6-89f4-93576f3b28be", + "1648d52b-b4c7-4e3e-a2ad-0d5242e202f6", + "e4b0f667-e845-4279-af4c-fc3476afbebc", + "09090609-6f4f-47a4-976f-35441428b183", + "47ca9d0c-2c6d-4e9d-877b-5c4e79bae5b7", + "bc1a27ab-e798-4d4e-a749-af9e4d78585d", + "e299fa00-fc14-4be1-a005-c3c735705ecd", + "e27dce41-b875-4887-a225-8a8e61f4c97a", + "3b9814a1-c932-4b94-a48f-a6327b3884c1", + "7c6e7dce-1eed-46fa-b9a6-0781941cdb5c", + "1538d57d-df6d-468b-9552-c06eb5913e47", + "bdc595e5-c4df-43f1-a0f1-c522086e1817", + "9b25918c-1e5a-4aa7-8938-c9e84a39cb95", + "4cd7ef99-dd38-42ee-8081-97c1925273e3", + "d9766084-5514-4ad2-8cd1-4792ef0f3018", + "62e2a5cf-2939-4a1d-adca-9f273f9c4a21", + "7b8f3635-ef3f-44fd-8819-6a25930e01a5", + "f976ba37-1269-4c17-a20d-b567cecf5158", + "b6612623-2b6f-4e0f-8a09-2a6f242220dc", + "1c66aa90-8246-4cfe-92fc-b3bdcb2bd839", + "806912a0-7d4e-47df-86d4-51e6136df7f9", + "6dbad783-ee49-4513-b4c6-98ffa79fe125", + "9caf478a-7c14-4f43-a895-9482e3fd87d9", + "bf7d66d9-a96e-46bc-b7ea-25feaa2bd7c1", + "d9a20af0-066b-41c7-9650-a705d1ca145a", + "0fa94d12-07d8-4a26-99ad-0df1cfaecc50", + "d1c5ce32-4c70-418f-8bda-e0e5817ccb00", + "6610114d-ce75-41b6-b229-8fde9b050be3", + "2f4d420f-e195-4919-ac2d-74ea92f3b767", + "c5e179f3-8607-423c-953c-9c8d791f3b77", + "3b9c0e62-2ffa-406f-90c9-0a9a0daa2cd5", + "7a3276ea-9265-4a7d-b849-ba4d335d8776", + "ba00582a-1302-4cc3-8080-e2a9565b06f7", + "834c8d39-bfe0-4ed3-886c-2433cdf8dd2e", + "3d452ea1-d6d7-4441-be2b-4d39c7b66f48", + "241deb20-3dd3-4539-b4bb-44bb4e40d701", + "c3daf795-fe91-47b7-84e2-8100cea91203", + "ff918a29-aa32-4665-8edb-9c0b9052742d", + "c65c37fa-6735-47ed-bf8c-b976068ceb03", + "dc5708fb-d5b9-4625-afbb-e0b23abf1a0f", + "88eecea3-3856-4ae0-a58f-b67740b91246", + "544355db-e76f-4f97-bd0a-1a13cee7af1d", + "dfd91846-6dd6-465e-be23-4d15787e4a33", + "00fa3a54-f228-4937-b4da-7532a28dae66", + "e56ba28c-20f5-47e4-b464-bdc709a5200d", + "a27603cc-30fc-4270-879c-4941498f054c", + "0aab24b6-3fbb-4fae-842b-945d88de2131", + "568be1d1-3f5d-427b-8b16-1b13328f9061", + "162092e4-2d86-4c2b-b4dc-d8823bb81eba", + "30f5379d-b3fb-4b82-8e99-64357b730118", + "b1c5af1b-7754-4040-b18a-a5656a61c1d7", + "cd719ede-08d2-4e1c-a410-bf257bff894b", + "733cb3bb-edd6-4987-ae27-1f7d4fa80fa1", + "62e694ff-44b2-46eb-b2d1-694882270e04", + "271fb0a2-dd37-432d-a6a1-5824ded51ee1", + "8e0282a1-8c13-49ac-8f86-171750e56929", + "61b392bc-1cf3-4be9-9fd9-1690ddf426fc", + "d96739bd-826a-4b45-8fe5-3dc72f74e592", + "90f4df3f-1871-440a-96a3-5dc18b244c64", + "f08dd324-b30d-432f-89e6-5779370c5e60", + "a5598b89-2912-4b32-ad96-419e0c4ae6c7", + "f3c3eb99-89c5-4eda-916a-49de3c783b27", + "e796d78a-4a57-4c79-b9b5-bb1aa20048c2", + "3b86cb06-1f43-4873-8d25-816f0845f231", + "769810b1-e61f-4207-9328-2097a7840477", + "112a3151-efff-49dd-8d36-646e9a94df27", + "c52c4cf6-11f9-4d87-b6c1-1670f69798ba", + "93981383-9a56-4504-b381-a82611c926b1", + "7c619f4b-59bd-47a0-9e5d-a4c0e852facc", + "61f715c2-f728-4a1b-befe-0f23cb806642", + "16ee32f9-9c99-4107-b5e9-51f5bc23fcde", + "3c2fb716-58e6-4fd6-afe5-ccfeda1d106f", + "feddc82a-9f4d-4220-8381-45ea73f6a382", + "f0595bd0-e742-46d1-8eae-920aaf253f22", + "b53eb9bf-1a4a-47b9-9297-4e1b3700c92f", + "14f9fc33-e671-420e-b137-f6a0591a4e95", + "67711850-c5e4-45be-aa8b-6d3906780a02", + "20be685f-42e3-4f3c-aab8-5bd75b755407", + "9462e5da-9ed3-4c42-9970-67959751182a", + "8edc2b33-d937-4bd1-baea-da690106310b", + "c7908ad5-f026-4850-ad06-a3d1cb48b0e8", + "5ab98669-f193-424d-b536-9adb6b7b4710", + "ddbbcd1f-4cd2-4dee-8575-c9e67f87da55", + "6c3f734a-7b8e-4d76-86fa-5fd7d351b036", + "43a22dcb-4ac1-471e-9cf8-37c41d9f4cb5", + "a73644f5-9d02-480a-a5ed-814efbbd213c", + "d9c7e74a-9188-4f39-940c-2783228ec4a5", + "d0e4aab0-28f5-4947-b4a3-d3a1eb334ec4", + "9039cdc2-5fc7-4348-899c-3f70adec2956", + "7d3bbb2d-afe1-4faf-9696-6bf6cc921540", + "1c628f9e-65a7-4df2-88e8-be6ec37875ae", + "f4672b90-8466-4deb-bd22-d4394eaef946", + "1f1800d4-fe08-4849-b047-0890920b7ce2", + "6789679b-f63b-4e1a-9695-c2db4bec423d", + "6be39a55-28d3-4c5c-a799-be55acf9b433", + "e0e6b455-0f0e-4829-9d8c-92429bb6d262", + "a928b311-a886-4017-9788-3ad78e350b06", + "e7900edf-f0fc-4e00-9bb1-3dc5865f91c1", + "41936e51-b2b2-48ce-a0d3-bd9609d332f7", + "4db44737-bf43-4492-aae4-0abb9f4bc368", + "651ade49-738b-463b-a246-8122c83630c8", + "d4b5c6c5-d5ce-4273-bc99-f5cec7b96ecc", + "00f2bf86-5d62-4368-9479-ff86f73f3054", + "0b73f03f-8d9f-46f9-bfe2-518cb705d364", + "124a8aa7-e5ef-4361-98ee-c64173424ce9", + "13b25d48-863d-4868-a876-0262a92f8d23", + "af72f78a-2a36-4622-9e4a-3ba5c7300c2c", + "417b5104-049d-46a7-9b18-4b1e8b166b99", + "012dae3e-a876-4cf5-8588-6d118bdde189", + "498277f3-50d3-4b74-bbd5-d06596998a33", + "5224a83b-236e-4636-b7c5-32535226ebe0", + "466b7bb9-e4be-4413-8b97-6308974ceb42", + "9c26922b-4dac-4c3e-9d6c-5c71b316c0fd", + "23659a75-282f-4dec-a638-56f49d389e69", + "7049d0ad-61e9-45a2-b778-289d1a183ada", + "999adcb2-07b2-4290-b4a2-b83b86262f68", + "cc11b87f-afa8-4d02-987f-1fe8243eca1c", + "0adbfd60-2f32-4b20-a70b-f5fe35cd815b", + "91fe8509-8f4c-4399-8b0e-9191b649fd84", + "46414a3a-9592-4e0f-99d9-b8074ed05844", + "0f5990d1-58d6-410c-a3f4-2ba94991885e", + "2ed32a3f-3e38-4f28-8551-2e455c09b1a0", + "364b980c-58aa-4ded-bb0d-64919074a6cb", + "e3a6e8fb-1a13-4a55-9b9e-2368e24fab76", + "de70171e-19ae-4a40-ae4b-12d1ef5a6fb5", + "2c6ef655-6ce3-4246-a34e-b2035c16a516", + "84e3e095-dd0f-4b8c-a5d3-92582572bd5b", + "61930703-8b4e-40c7-a806-18f77c36029f", + "702e2367-b9bf-4eec-8b76-13461df0bbd4", + "f888bf29-461e-4e19-875b-5603b0f6660d", + "49e1a391-a208-418d-aa4c-603603a3e7c9", + "2108efe8-0a60-4f49-a5e4-82b5369d5ce2", + "c5b75089-7fcb-45e0-aeef-d293a8f73763", + "6dc450c4-05be-4cfe-8763-a834ac35516a", + "051a0664-32c5-434b-8ab6-02305f380c9f", + "ab58a800-9880-409c-a386-266d693ef302", + "b0729081-d1da-434d-b82e-c70c54c6c58b", + "d93ce27c-35e3-43ce-a9b2-47f98d624bf1", + "f560281d-44dc-464f-975f-1e0a2b6b4d78", + "3bac3e6f-38e5-4658-a0ed-1d7f56c3261f", + "ccd5250f-3434-4937-a837-987705fd0539", + "5f0552a3-0c6f-485c-a2af-e40f5a184540", + "7b5a4294-e9bd-47f0-bc76-c1e76370f75b", + "22e018d1-b392-4389-95c4-b0e577076aa4", + "aa01f10f-47f7-49b4-8d26-7b39c4563202", + "f0d865f4-5162-4089-9ac6-c9137b6e211e", + "c6a222eb-b980-482b-9e02-1fe6874fe4c6", + "e660530e-ce94-4fd8-b39f-2d32896e55e9", + "2a31d69e-20aa-437b-9480-0d099f3b818d", + "e5710f2c-fe3a-4040-9b12-c5bf22a60351", + "30a0bb41-4b90-4096-b8c7-9eff885bbac8", + "d8edec1c-7756-4728-b719-4373c926249e", + "8db2ca5c-3099-44e6-a184-e5eb14316587", + "e423e6a7-3b25-49e5-9276-bc10fe3bbb46", + "2753f8fc-8e0b-4f39-89d4-d1c8893447f1", + "e8db996a-3ee7-4426-8526-dfdfd91f4a01", + "50a2ee4f-55db-4737-8c4f-2f5eb7dd21b8", + "8afac747-2586-4855-a3ef-261c2e86ff5c", + "c99d1280-4290-4e5c-9e7c-6a44349002b8", + "8a9f19bc-232e-4795-a4f6-2871377b6579", + "1e198ae7-0aad-4a7f-8ed1-642c0b4c4089", + "3da8bc3a-29d8-4b89-a9e1-978655b791e7", + "60129ffa-c4b1-459d-9cc8-246999b948ff", + "604007ee-2e30-4e23-a17e-2de9f08d7075", + "a1ace2ad-50ab-40a3-86f9-06b2db8566bb", + "96465b04-0a5f-490a-b670-eef9c7e3d2bb", + "fd39ae3f-9783-49c2-9d31-8d8c23467233", + "fe61ae29-b005-474e-90da-cfb0bdf44a69", + "1a745387-c3da-459c-8854-def86291c4e4", + "25bc2216-fc99-4e13-858d-b9ac237485be", + "f47f1600-0fd3-4996-acbb-5ee4273219ce", + "c4598efb-8807-4187-9957-d8a74da295ed", + "d2c10ddd-92ea-41b7-bf4b-d08d065c5c20", + "974ba1f7-1316-4019-94fb-c9a374b5cbd6", + "6607f6d3-d4c0-43cc-be62-c3113bec2c84", + "a86ad435-895a-4557-b986-620ad654fc21", + "a577541e-0162-49c4-b8fb-7f528a0193d1", + "67d6e450-224c-4a5a-9147-d3048bec5346", + "a8513d96-0cb7-47de-bbf8-1011f79c2d59", + "f107b4e8-2b31-4e5e-9f38-2adff4dc0ec9", + "a1d32758-a546-4bca-a124-a719b0ebdfab", + "545f66f7-f6f6-4709-99db-40bc8c41f9de", + "4cf530d5-6601-47e3-8ff4-077f79c60a6a", + "2479025c-aec7-49fa-af18-1c211064002a", + "5cce7a8d-f77e-4dfd-949e-6976cb0edea2", + "654937be-2d9b-4349-8a1f-3a7a727e597c", + "fc876a92-095b-4cb4-b37c-524315d37913", + "9e418203-254b-4117-a176-5d102aa14f6b", + "140eb5b3-2a2e-4916-8e53-58d69a5febc3", + "d5f9bd8f-03c1-4406-a584-0e3261d5d307", + "adf6c558-2cf8-4413-ba6e-5188eb0dc164", + "5b6a2a9f-bda3-43c7-af94-e4360ed4d73d", + "1c9fde03-cf3e-4781-be5a-a5fc12862a24", + "722c2c63-1069-4105-bd53-00ece59e8e37", + "a3fd5444-c1f9-416f-a8f5-7e873d6a7116", + "a4903d45-fea5-4460-8e23-afce6d3b49a6", + "781f3c0b-70b5-47f8-b737-7a4d5a2b3a44", + "8ed444c5-d97a-48bb-a8ad-22dfb9b71b6a", + "cff6626c-fd73-4f14-946b-3580f4a19e3a", + "6b868e75-9b7c-45c5-8e23-5b458173b228", + "4af75eb4-384c-412f-be9c-5b85f485265a", + "0d6ad052-7241-4da7-b2fb-7a608ff7dd03", + "daaa8120-ecde-446a-85ed-b97964610b60", + "f3b0b502-8d31-4bf0-8797-828565b8b7c9", + "9b21220b-246f-4155-93a9-7b6ca4d44486", + "42fd227a-b8e1-408e-9c33-32ed862cfb5a", + "49570a00-9ff6-4f38-8eab-42894e91c46e", + "6c30e37b-f333-4064-be4a-6bb287eded5f", + "27f4d252-1963-4233-8430-a2aeb978eac0", + "f5114088-6f07-40cd-bd2d-53b6912b0fc0", + "28cdee05-1bf6-47ba-a585-0789cbc118ed", + "946b6f4f-6a8e-4f45-99ca-06ae19e3f421", + "4fe6dc6c-5808-4db9-9f8a-4cba3ecf21b3", + "3622687d-1a86-483d-8488-87efee27778f", + "361341cc-52e7-4a55-895b-8ca5ba2b683d", + "5b9b0924-52cd-4710-a0e8-bf60aaf9152c", + "f04b77e4-f61d-4275-8f98-d47161e5d73c", + "cb8828df-58d6-494e-b57c-92dadc0b2012", + "088e4bd5-9cab-4346-9cb7-ae7bfbb2f9af", + "94f63c62-9f34-4a8e-a4b2-b7dca5997bc5", + "6ed11685-a287-44da-b5bd-8849b1eac3f8", + "26682c2f-26e0-419f-b450-3c02bb6a6b2d", + "d1578612-b5c9-453b-83df-da0ab2256440", + "7eca5661-db2b-462d-a63b-962edeb0d774", + "7f90dd54-96f2-4d4a-b192-274a442205da", + "ac59a609-2209-455c-b5f3-d24ddd64625c", + "4132e9f8-ffb1-4136-8750-18ac4acb2bd8", + "b977197b-2e5a-4827-ad52-d6f4e23f88a5", + "73896103-6ac2-467f-93dd-c53978ad5caa", + "08856008-3834-4879-aec3-6f026d74d42b", + "dfc95b96-679e-4e87-ba32-390a5237461d", + "a918141d-0d8d-4c0a-b87f-0aa7e64e307c", + "8dc8be53-08e7-43b2-ab85-29b3ee7f821c", + "49196d10-fc79-4a4e-baea-a91e4800b615", + "5350b93d-7d2c-4b4f-9fe8-4ab528dfcef5", + "f1b3f629-9a02-46af-a752-0e91ea1e5d0c", + "2786a6d6-489b-4853-a6aa-fbf9fe687091", + "ac6d6650-c8f3-4643-9050-0a49d70e6084", + "f9c7d1b4-c9a6-4be0-b328-324e29cbfaa2", + "f4d27cdc-1999-4fe9-a9a3-1dd211fca51d", + "6bfe3335-693b-4be9-a2d6-1a2a4ea14279", + "9d8b3cc6-f02c-4589-850f-7c9bcddd68f5", + "c5270e3d-0f98-42c9-8e0b-277c848dd890", + "d5d79d0c-ac9b-4218-8a1a-dfcc4c7a0038", + "e867d985-044a-4d97-8c3e-abe9f6049587", + "842ee2ea-05fa-4c43-81c7-43fd385bbdce", + "863adcb1-3f44-4d2e-a5c3-5763d8127a1a", + "b2718e5c-e080-4b44-901c-bd3267ef6bfa", + "7358ab8f-1246-4974-8bf6-40da17ee3112", + "9e6a9aa6-768a-4d10-8a0c-d4cb5a9ce96f", + "9c57a368-85cf-46f6-ba0e-46b80d7aa3f7", + "3e8fbc23-7b11-4c60-9ba3-4c4c1768f117", + "8834b665-89a6-4fe8-a3dd-a1d9900386ca", + "08ca706f-63ad-47ac-8f92-cd7bbd90cf4e", + "370a5aa6-3b54-4bc2-8ca7-e94c36aaa7ac", + "e7a0f62c-85ed-4d9c-afd4-77fbb6bd31c3", + "280ac06c-54ee-41b3-9e58-84801d2f8216", + "552cc675-fc94-4ff1-a241-4af9822b5ab7", + "e1331f8e-e2e5-4c09-b13f-cb9be33e8a8b", + "0876562f-6ce6-4d9e-8695-75ae7cb5648c", + "397b0637-17a8-4aa6-a8d7-0d389ce44c76", + "772668d7-32f4-4044-bc81-0b4dbfa5a77d", + "d68ab6a6-e6ba-4bf2-8680-2ad4dee8d147", + "7818d36d-46fc-442e-a6e6-bde6a3e90ef4", + "c0557eed-ffe3-4221-8655-cd4798b9df39", + "4557cba9-0345-494a-a5d4-c8614109a08a", + "6d896b88-571e-4901-bc8f-5095fc8a70aa", + "9ac3c1ed-18d5-48dc-8abc-be8fff30474b", + "be791930-51d2-4cba-b967-d2e143d18018", + "9c5fdd8c-39dd-4f45-8f86-1264ac6ea532", + "62ee1d3b-44c2-4a33-b910-c56733ac4506", + "33763742-349f-470c-8603-6c2e9efd90f6", + "c5461265-84c5-4980-9b2a-14a8ed07c94b", + "78eda089-3a6f-4e30-a6b6-574471b2b54e", + "e657e87e-0805-4c34-9eb9-6a19c43c29ef", + "83e37c28-3ea1-45f7-8daf-2cafbfca0fb0", + "b864dc08-a3d4-4830-a299-36f4404d333b", + "16e82380-0e00-4f0f-a613-3a172e9ac9cd", + "99dce10f-bf33-4044-81a1-2ac3aee27cba", + "dfc3923f-e54f-4b5c-9067-bc5a9025fd79", + "6055dabf-062c-409f-a1bc-b7e935a9e1f6", + "e0e54b74-78ca-4285-a103-56fa823e3920", + "752ab9e8-70e6-41a0-840a-28c66ae40028", + "d5358a00-b4c2-43a3-b5d0-ada94f88b0c9", + "c977aad7-5e6c-4c42-a45c-09f3fad16251", + "cb90aad8-62ba-444f-83da-04ae1e773b35", + "4c3ee7f2-b929-437c-ac39-b553c971b139", + "4ce02a1a-d3c2-4758-a919-f6455aaf6797", + "0b83b7a9-e0f2-4770-90b2-f50cb8a3388d", + "8d1cc688-accc-4473-947e-d478100db620", + "d0c93583-64a9-41a7-87db-2310b6f7a883", + "84044477-7455-4537-be7a-d842eef7db5d", + "632a222c-9737-4972-8d5b-01ef3fa3a977", + "49bdbff5-91e5-422a-80be-cc52f14893ab", + "2efb0c37-9ccd-48ec-90bf-e8b90a2beece", + "905dedc4-7d66-470e-9c2a-a7a8ea5d883b", + "42febf2f-d04c-4268-976d-7666f8d91746", + "f43738bb-665b-43c6-93d6-3268c7e50075", + "2189a3e2-0b36-4dd8-9443-d19e47ae1bb7", + "73e61f4f-f0d7-4707-b04b-ae0111fe39b9", + "fdf7b4a7-0ae2-460a-9a11-5a1be11d0232", + "e7b9224f-375e-4fca-8ed7-61247509110a", + "1a91ed13-58c1-4e58-a189-8e18a8b8329d", + "cca48f60-7892-4752-8f4d-0617be6e42cb", + "4f6f4202-da87-4791-801d-3793cbe41616", + "8a294bc1-b504-40f3-8e13-31b8c60c4485", + "45835dd6-397e-47a6-a860-3fdbdfa37b35", + "d650deef-3430-47b2-bd40-22ef1de7bbd8", + "7e0eb262-bac2-4425-93c3-ee1979cfe6fc", + "721bd6ea-dbe4-4377-8c07-663a53778632", + "989d1a03-3aa4-48cf-8edb-bf6c7cc60df9", + "cfa55027-3c4d-4a16-8079-859dc8a465d4", + "43143907-ad1f-4be3-8be7-6954244f7fe7", + "688d387f-ade4-4dd2-97f7-9e1de48ccdab", + "9f32bdc1-f17c-449f-a4cd-77d36fd69627", + "3653fe6f-af07-430f-a474-a35d69e79b34", + "a176f456-6926-4c20-a3c5-2b63b46ce81d", + "36eadd80-f9d9-4b1a-b5df-d516f2f6f52c", + "74da1579-ba3d-4291-b723-32bfebe4a5a9", + "5d695c3b-451e-4914-a8ee-5de398a5e67d", + "a6da6581-5c61-4b15-af17-ea8ccdd6eb04", + "901f59b3-367e-439f-8af5-7168ecbfa863", + "51e4d999-7af6-405b-bfc9-9a0ba516f6d0", + "bf3bc9a7-d11d-4a9a-9e4b-16ac2169e9fd", + "9a4c9439-61ef-4c84-899d-f39bc950c0c5", + "4c603eba-a6e7-4754-a119-e3b3fecb0821", + "0615873c-66ff-46cc-896c-025ce028414b", + "814f6676-648e-4d65-9430-d0dd80d9af1e", + "c2d2a846-ac62-4ead-9b49-165a8150a067", + "1cecd4ce-131a-4bff-9e4c-044eb496760c", + "ec895832-84c5-47cf-aef9-11436fe3051f", + "f7e862ec-f549-4718-a56b-d1b7170ad668", + "2f99081b-df73-4974-89a5-d98e33246e42", + "a850a7eb-fa27-45bf-bc9c-67737bb0c117", + "a10597ae-5d13-47b4-b211-811bf76c41b3", + "aff50426-203c-4b16-b97c-f9c1b5aad3f9", + "9b877f26-fa71-4f9a-8531-eda199c88802", + "3dc61022-0209-49c5-9b77-3f40cd043ff5", + "1f88d82e-b471-445c-9b16-b9b8e58fbea2", + "515232b8-45e0-403f-9ba7-df27ba19b4fd", + "c655f6a7-7b07-4cf3-b674-ec72529943ca", + "5ecc3c9d-c033-4d70-bee2-bd8594c83662", + "ccfb339c-9e0f-4e34-ae99-baf740fd0215", + "07ea9119-f28a-4447-bb38-5f85409dc67e", + "ca102f6b-73cb-40e4-8a0d-422168b5ff76", + "8a032d8a-e1cf-4434-a1c0-be4204372485", + "6d637352-3548-42cc-ac43-5e15312543e1", + "77939425-996f-40fa-8a4d-9abbfad76a78", + "58aa885a-b9d3-44ae-a4b4-6d963b0e087d", + "7898938e-73fc-488e-b350-623b69cd6da7", + "d9bbaebb-65f9-43cd-ba09-d5f33c50c825", + "d60b5b72-ea31-42c9-b49c-011cd737e1eb", + "9f772f61-db17-4106-8855-dc3039c3e2f5", + "21ab0e35-f223-4177-a552-3f8d68750044", + "db280918-0812-466c-9370-c5b1ad6a41cf", + "04ba3efc-e6a2-4d8e-9b64-16262e0a75a7", + "7988e08c-fccd-4f5d-98e0-564bca76284e", + "0ac2a2fa-c048-4cde-b453-9a6d456085a1", + "8bf47143-ae0d-4bb7-a14d-c89665ffa942", + "3efdda7b-adb6-46a1-addf-2959fecea0f3", + "fea95179-a8b0-4780-a4a4-28e2f0cb159d", + "d20e93c8-29a3-49c7-a98d-433c355d3951", + "21a2daf3-9a2c-48df-8f87-3531ca4d49c7", + "ccbfc8f0-e2b9-47c9-8893-e34d78a79006", + "a90f9c51-eb0b-41fa-bd18-5a414dd8091c", + "1ca963e8-7f3e-40ca-af14-717511560ba1", + "9b96a8a6-553b-4eec-b153-705e6813ff92", + "0014aa65-7b48-4a13-b640-45e9049d0b2f", + "2ebb6bcb-e241-4979-8825-df0b3feb0307", + "2061da62-728f-4fab-9401-77c52eb302d0", + "733f4cbe-af39-4ccc-8805-432818ca5c39", + "8664fe78-be22-469a-97f1-f5538c3de865", + "8ed8e2d6-d81b-476e-a294-2aeea1214d8c", + "e3332c74-7a1c-4d98-9f67-d83b61704e6c", + "91ee624a-602f-4b49-9052-d8d0bb4865c3", + "fe9b73ca-20c8-429e-bcb1-1db4e6dec7c5", + "719f1f5d-e3d4-401b-9562-d3b5678c1f3c", + "fc293b71-4291-449e-8dfa-44a4f36aafc9", + "4a614cdb-8c62-433a-b1c8-dcfc3df1f834", + "3835f842-0273-4e54-ae48-4ce55fd09613", + "4ccf39f1-abe8-42eb-9174-8808cc0da53b", + "c1b4a5b3-f8af-45cf-8a39-5aeb5f7854cd", + "262bd1d8-2643-4abd-a5ac-3d1432b59cf9", + "ef44f569-c74a-40fb-aff5-13dfba1517bb", + "c1055ecf-6637-44d1-a45c-025dbc60f364", + "21dcf033-4ed5-4633-9d35-f469452cfdcf", + "e916f785-4722-454a-8efb-37efaa743261", + "7a30e544-2180-43b7-b625-1970b897d930", + "11d62fd3-3e91-4a0c-91e9-91f04fe30738", + "9f97d795-7a11-412f-884b-3bedf3da7936", + "79fa283e-3b1c-4882-81c4-50791b0eb8fe", + "ad24de25-6285-487b-82f3-1dea37dfd494", + "30f42a00-ac1c-4324-be37-5d32f2284625", + "783b6bf2-0f00-4a26-909a-923f06f83536", + "072d0c62-2763-4b9a-bda9-4df80b7ef699", + "ce789c1a-73c3-4ab5-89e9-05bd32426437", + "bd39d8fb-a94b-400f-b817-ac4f8ed776b5", + "2f1c0759-f8d7-4173-b1b9-aadb85bc3fd2", + "2eba10c0-9b82-4d53-addb-d3ca66db27da", + "b91022cf-a588-42d8-bb85-c530ffd45073", + "b4d28c02-6b83-41c3-ab74-9bb2f271c293", + "36cf5f8e-40cf-4493-9b83-09c273890b35", + "89d0f118-eec7-4de4-97a5-a07550694ce8", + "2dc482ed-59c9-424d-9d57-921f1b73f3df", + "446bd2b9-713e-4c46-b3a3-980f017178c6", + "75e742ae-7955-4d17-8306-7f3e6ac7ad66", + "3191e0c1-b8f1-4a32-bdae-29df1e9f5891", + "6ea9f9ed-acac-4c93-8fba-9caa6dfd4f31", + "d963a6a6-8134-475f-9562-866fd2cb801d", + "6eefca3f-4e20-442a-9d80-420d8525f642", + "eb41275b-1ec6-49d1-92fd-93e7cdda3fd2", + "fd423291-1476-463a-ae80-4a0038a98386", + "0c95f564-0d25-412a-a6a4-aa01cfd3e921", + "bea96a57-c54c-4edb-af97-9d4b48100e23", + "43d24143-3995-472c-bd87-752d4d580f25", + "f0f7014b-d2a7-4059-856e-0047e2c112a3", + "96330043-1b37-4720-8000-fca84d0e01b7", + "725d6145-4893-4def-8028-9616acecdf76", + "9a099683-5fcb-46e2-9da5-dc367d5641e6", + "c5f2852c-3a75-4e39-9c89-f6383f521492", + "0cbbefd6-4e2e-468b-ad05-eb0751645211", + "45ddbc38-617b-49f6-b3f6-0e86657b8ac8", + "221b4ea5-9e99-4486-a8bd-ae6cf7335aa6", + "e6036532-05ea-47bd-9545-3884879869ab", + "f6ff0708-d86d-4b13-97a8-f32abf4a451c", + "c2d9e823-e74b-4fa5-8524-283c942e9b11", + "51b4635b-a8ab-406d-8249-7c25057eb9ae", + "bd5316e7-69e9-469b-a66d-8f8aa570db0c", + "7df4ee6c-44fa-4e4b-8099-85e86fa29057", + "12fa585c-3953-4717-8689-f98f182fcabe", + "1e01f4e6-b4d7-4c71-8e80-b6fe9e45869d", + "b1ab56ae-8796-4429-aa6d-85de55aa44ee", + "459b21f3-3ba3-4ac9-8fef-5c1c196d6964", + "352dd9d0-3023-4a71-858a-44eb664d4d47", + "31624ea9-3162-44ca-b828-7da5cfddcf1e", + "36e3e6c9-df07-410d-85c2-77bb03541702", + "a92b9a4f-0cdd-4f9a-a3ce-c64d8b595205", + "9032d440-09e8-480d-b7f6-3b440a5af4ff", + "91322d7d-d5f2-4f8e-987f-ba410adb447a", + "8654b74c-3f4a-4390-87bb-212eece9f8db", + "4afd9f00-eb86-4711-b88c-2bbff4b8f2c6", + "5b178740-d04c-42cc-bcc6-8dfd3737c081", + "259bc9cc-b043-4747-a2b2-6af324493d59", + "2b1c1835-d13a-4861-8a85-ef1928e2d491", + "481181a0-e0a1-48b0-8c0f-04766eba0c28", + "f8e3046e-e930-4f20-9c8b-b1ba4b60c1f4", + "4d7644a3-2abc-4ab8-a59c-5645a7a019fe", + "7b1a58a6-bd67-40ed-a959-bb4bab3bb90d", + "cf029dda-bf35-4254-b199-5f4623047990", + "02d4537e-8b8a-4e28-afc4-242b39e1dbcf", + "6d613d0d-db36-44b4-94eb-393a781eea20", + "9bd16669-0939-4c24-bc8d-79c1c10fc5f2", + "6fad0986-dd47-403b-a83c-d305ae946f77", + "191d603d-80fe-46db-81dc-9955a1c2f050", + "adc7bb35-fbaf-4be7-971e-abb2b44a5545", + "e03909f2-14cf-4ba0-a920-4aa15ddcb252", + "67083172-6691-40ef-b1ba-423693d86917", + "28bb05db-f3a2-4d83-94df-84682e5d1def", + "b1387d8b-a26c-47d3-a496-ffc1d1ddec52", + "055d5727-ebf4-4c3a-b61b-3b2899215e8a", + "c3e6f83e-7e88-42f0-8732-14175fdfddc7", + "c4bd1b14-8b71-4ed7-b63e-e9965db01071", + "73735987-1d3b-4f7d-b8c2-b53396963b6b", + "6c28c86c-e6cf-4216-bfcb-a38992d8d753", + "9ce604bb-5a84-487c-8174-ac8b54fc97cf", + "18d4d8b8-f6aa-406d-b87d-28aa822b6bd6", + "623ffd2e-69a4-454f-b3f8-0c4e92560c1a", + "47366fc0-d68d-4e06-ac73-af3a4a378f5b", + "051d7ff4-2d49-47fc-b2ca-0ed73d3de605", + "109fb00c-af6c-4027-9328-bae38f7b60aa", + "2a1593e1-1d76-4813-83e1-a5189175a2c0", + "dac1c209-84ab-43d4-8ad6-50bc4b0d7002", + "43a15e58-a77c-408d-95ad-4abe4facb849", + "baf606ac-35b0-498e-9408-56f3bd2a68a4", + "694d477a-3bdd-453d-910f-f2689ab4d31e", + "b2b120ee-a49d-4f3c-a878-b4590d3b3fbf", + "7a830d39-30a7-41da-9e3f-e927dd88eae4", + "a1afdb34-71ca-44c1-88c3-93153a33fdc5", + "8fa04e9c-50c0-405a-bec4-f5bbaf7c53d4", + "8c3c422e-f329-45a4-9295-8432c70bd857", + "bd24abc9-66c4-4f28-838a-6757a3d11b3e", + "17dcae3f-1a1f-4aa2-8755-8d73df02470f", + "3221b184-0672-403e-b6e9-06b7828d996d", + "eb9d56bc-8258-4991-98ed-5dd1a9915dd9", + "a3e40f5d-6716-4692-a1dd-a4d434cad0f7", + "f9e25c1b-4ab0-4916-9523-7eaf80e043b3", + "b6107150-114c-430c-bf5a-349d7e51c605", + "4055c4c1-21cc-494a-9d96-41f07a3a1465", + "de0d1b34-f1aa-4d7f-adfb-5c90f98d2d56", + "1736485e-9a62-47c8-a8e6-ddebc8fd00b1", + "c466e88a-838b-4e11-90ea-ad6c66ca534b", + "57659813-661f-46a7-8f33-7f9742f5294a", + "74177e93-756c-4d8b-8d9f-f16a53c57f0c", + "40acd034-4cbe-4a68-9dbc-f29efd9fcf35", + "b9e08133-6883-4c7c-9c4e-ab3b750a75ab", + "28488728-c10d-4bbc-9601-962e0ca8eaef", + "89022505-4a75-4c16-915c-34f529adcf18", + "a9e8e875-4b3e-497f-b615-e7c1714e188b", + "1373c9d2-830d-4f2d-8066-772a5d36557f", + "9ea9bec0-7395-4bf4-97c1-94869432732f", + "9a86df28-4d9f-4c08-a9ad-42c5f12d5542", + "35a02bae-654e-43dd-a58d-dd06c002a94e", + "d6292bfd-ab4d-4bf2-b434-62901ac45779", + "a5bf8376-58e5-45eb-ab22-a147b96cccb9", + "85803be1-d861-4945-acd3-4855f687f573", + "5bbc001a-e384-4e5e-a351-23ca2b6a8ace", + "c60e43a9-2b69-4bd5-91d1-1eee13a21258", + "e25bcea2-866a-4848-9d97-436fb35d6dc4", + "214804ef-95dc-49cc-84dc-0f3fb45d7945", + "313782ee-d490-4774-b5b0-b8d551b80f3a", + "742759e0-eae4-4b3c-8b8e-e68c54228eaa", + "178eefa9-ec47-4c68-892c-0cacf47c44b6", + "4d67039a-ac87-483b-9082-cbba9c3effea", + "1bee9feb-c324-4202-bac3-c9b2d443f2ee", + "008fe623-6fe3-4f78-afa9-375f15e1d80f", + "59880e07-bf1d-4dfe-81bf-cbef23582243", + "4d07dd57-722c-4b58-9cb6-c0c79f87c94b", + "5cdb5ab8-ef0b-4f52-a7f5-4cb9be41d1fb", + "d04a78e9-7698-4de0-b430-ed22c2d18724", + "5834dc18-9d0f-42ff-9646-75480f79bb4d", + "0bdf910b-fc36-4dd9-9e10-ebeedcceb401", + "e69e734b-b056-4b40-b199-eb002fbd08b7", + "534e715b-8689-44b3-afc4-f818b7857e30", + "9f99ae67-5af5-4427-bb16-7f894603443f", + "55993fb9-54d7-4e2b-87b4-290dbd421626", + "73ad1775-99e2-4edf-bb99-e8b9abfa0fa4", + "ae8f83b8-1aa6-4989-b9a3-d51685c2ae45", + "bb96091f-39f1-42e9-8cec-9f622884038e", + "772f46db-31a4-4f45-b98a-1bae116127b9", + "c3c61cb0-cc23-4315-801b-c0335563e051", + "0cd38987-3526-4d8e-a10b-f4bff00b8d1b", + "90ca72f3-37be-4542-9c9e-edaa2b9ee4cf", + "91fabe49-fa18-4a4b-9d03-4fcb06977d90", + "499b2589-0da9-44a1-86f5-981acb764ce7", + "eeb771bc-f79a-4972-9ddf-781f7c74d333", + "886ee490-354d-479d-9de0-9442402d0442", + "39f64337-d77d-4282-a17e-7ab468bbb543", + "93ec56eb-e104-4716-97a1-a1d2cf2ce0cf", + "8ca184a6-0712-4a28-a760-6ab7607c60e3", + "59ca22e7-1ec2-427c-9e49-6a267da35f18", + "b8bf7004-26f2-4888-9c86-6ff8aaefd250", + "feff209d-2962-4d9b-adb7-aecd524b5a5c", + "6bee0169-cebc-45e3-bed6-3ea69e238508", + "e823c039-66c1-47a7-9689-09d2a4708209", + "d0abdb2b-353b-4676-8a6b-f55301dd9440", + "2ab95d9a-2920-4353-994a-9695f5ebfe4e", + "56406a5a-b6a6-43ef-8c15-b3cf84faa90d", + "59c0cd7f-cc10-4a38-be5f-548c5ad12e0d", + "cc8ce7bf-1b3d-4fc1-b097-9bf557b1ce0c", + "99a67cd5-0a19-4de9-b6d5-2f7a9aabff3d", + "219d5b8e-5db8-410d-acba-e1233aa2fc85", + "da02b471-2f06-45d8-a77a-87f777312ac6", + "e14e2f7a-61d4-405b-bc1e-039b78d1b7b9", + "2991f12a-d418-444a-9cce-b218ca80fe78", + "a8f677c1-7a43-4e7b-9ca9-c1805dd77c9f", + "d3a4b496-4570-4947-bf50-219b2667369c", + "98903d03-7bc6-4809-82a5-a4c1d1b49c7e", + "5fbb7f40-ee87-45a9-8735-97396c3655e5", + "6670b72a-71d3-45ff-97a6-0835d354eb41", + "db49ce69-01f5-4bb2-96c7-7107243d6c84", + "996e0beb-4095-4b3e-9044-697b6e6194a0", + "fccb0d96-8d85-42c3-a11e-e44a7a77289c", + "59268dc6-1cbb-44d0-b9bb-d6fe64eae90b", + "1954eb04-50c2-49a3-a490-e9009a57e0bf", + "32677126-141c-429c-8806-878f1d348c6c", + "77d06f8b-3f2b-4d8c-afa9-5139271283f8", + "73f4ea24-9e67-473a-b540-45083d68f8b9", + "15c535a4-e459-4d1e-a8a0-5cbf82da2d07", + "b37464a1-9912-47ba-8234-a1f9383dd040", + "22d8176e-a14b-4043-8cc4-4b4822f31baa", + "bb974da3-9c63-4885-a1b0-4fdc9b94f388", + "bbc04318-33bd-4050-a8f8-6b264d5a150c", + "e73caef7-7ba5-49db-84b8-7f4c3b16c6e6", + "b4d14055-4a8b-4b89-b2c7-c36e176931af", + "eca1d085-285a-40d6-b89e-95d771d86dc9", + "3c7e9981-07bb-4dea-820b-c66ca55e677f", + "839f85fd-1ac7-40f1-9814-c31ca6c71ad5", + "566e82a0-f891-45ab-8a3b-3775e8da84d4", + "5d56bbff-092a-43ad-a46d-fcf5728319a8", + "f21956e5-ee1d-448f-aa57-72619b994404", + "a7149386-3fbc-4c33-90a6-083acd2c88c5", + "ab579538-17c3-4bea-9ff1-9f1663077d8f", + "36828845-fb5a-421c-bb6e-b316b751c173", + "8505949e-594d-4145-a249-4af1811cb19b", + "edf2f738-d42c-45c8-bc51-7a123af0416f", + "b3e81396-5336-4e92-960b-fe797980f504", + "d1b0bf5a-9741-45d4-9fb8-614e5bd77611", + "3fa40600-4a11-4324-893d-8b1b4ccaa533", + "afa6b99f-40ed-4100-a464-c65b1d19d6b5", + "3cc9285f-979a-4a40-94f5-3ed86f258a72", + "58460990-bbae-4343-b7a2-0bdde48f7602", + "2aea985c-5151-4223-908a-3109d5eb9ac2", + "142ac751-95b8-4929-9b39-4ca0242524ed", + "1f12dc9c-0a2a-4cf5-b247-3af1503b1ef3", + "c13d4951-45ea-4fbc-ba38-21d21c5b5e7b", + "d948766e-f013-4a97-8c3b-9e7b6081c157", + "e56c21e0-0f4d-4af6-8dbc-8d30b93c0eeb", + "0a6b2abe-2f06-4b15-9b83-45e2b662ebe1", + "96a9b7bd-b49b-4db6-be38-b5fc4d4f1f07", + "bce702ee-799d-4669-9574-526255a6de90", + "25c09a93-abc8-4cfa-aa59-a8f3e948c7a2", + "bae35e52-a666-466b-8388-0b8192875a3a", + "c1810a43-6440-4d17-9b59-5cba648c388b", + "60902c4e-45c0-4064-b91c-a06c34b451e5", + "09a38330-ca4e-4252-b74c-4ef8895c91db", + "381b0c62-e8c7-498e-871d-292e29ffdb65", + "a604d5e2-4b1c-45a2-8f1c-46079858dbf8", + "ec865a0c-ae18-4bb6-9cf3-1904d1f21650", + "f4aab81f-4c4d-40ac-985c-7c597f0d5b43", + "5d9c96ba-0700-4d80-b61d-ffbf5969e038", + "9c8b7bbc-b3ee-4957-836e-ccfa6aa5ee02", + "89017e9f-ae27-4c63-aa77-4bd88cdb6ea1", + "36baf83c-3ef4-4eca-89c6-1e367d1d2f6b", + "b83c9b92-99f9-423d-88bf-27395f841423", + "36e013ed-3361-43c5-abd7-32f9b382701c", + "bf528d9d-0c0f-4db9-a99d-c13fe8c62046", + "4d7808c3-e33b-4790-98d9-e5a50fa8249d", + "b64cdb47-ef30-4a43-87f0-3e9c09c28ae3", + "32c3f945-f8db-4a1f-9b4f-4bd7113ff910", + "c00e2754-bbc6-49f5-8f8b-a4ed7c80ea72", + "d92a36cb-633f-44b2-a93f-f9afd756b7f2", + "b75b42b4-5ae6-4494-bf21-d0a29e1e5e0f", + "fc970642-c526-40f7-b470-f0b864dcc898", + "a962e8e4-59cc-4c68-9c74-0d6a96869bd6", + "f0536d17-a7f0-419b-a188-566093215618", + "faab74a8-130d-454e-b007-67bef2bd3734", + "c7a622a8-e748-4f7f-a52a-4ca277d1beff", + "bc5bbb24-bd01-498e-a626-01ff910fa4fa", + "821a96ae-8e7c-470a-ad04-d293596d71a4", + "89624801-fcad-494d-94d7-7bb379998702", + "f0a0430f-d7c7-4a5c-a970-9de4986a5830", + "216531ff-6b71-41db-9bec-84c5e3cfdf81", + "0dd11648-c5e3-4651-80fb-fb62ce0239d9", + "e262fe34-8adf-47fd-b551-e123f84ed0fd", + "6adc1aa8-980e-4eb4-a4bb-2ee0d6073865", + "1fa10d29-83b8-4af1-98d2-30100d02e0a9", + "22211e41-bf65-41c4-b4a0-6b6f5a7cfe1d", + "6406c82b-cd6c-42f7-af93-34b87b77210b", + "39382e2c-96dc-4b9c-8df3-01878495e3e3", + "86a20876-7f5f-45b6-9d40-62eb3edfff6e", + "676d2324-3f68-4718-89a8-3822acadea94", + "26c6bde8-0437-49bf-ada6-a774c28cf886", + "87dc28f9-1e49-438d-bc87-8b2c918bd270", + "2bc92600-500d-4a04-8ddd-67b51e0ac080", + "e82f01c3-b4a7-4aae-8986-8a61aedf9b22", + "09f95c5e-fa2c-4b33-a05f-c7c6eefb4a67", + "9ba939f4-d977-4d01-ba6d-921be2504855", + "dd1fb4d3-4057-470c-90c2-a16fe5982d01", + "2317b2c0-49d1-4e36-b371-0574233ff3b5", + "5e98621d-669c-4384-8826-880f67fbf8b0", + "7c848c8d-2ed5-4b17-a014-95e1018d4646", + "42a08f6b-2b17-451b-bec1-28c7508c654a", + "7607b332-e175-40ed-81ab-297aea2876a0", + "4d978a22-6d98-43ce-a6c0-a6bc506c87fb", + "68e9b5f4-74d5-4958-98d2-9fef7a6a7556", + "ed823f8b-c186-4056-b70a-5a421a737087", + "f597f48a-742e-48c1-b604-9a6780b6b50f", + "243e30d8-324a-44ce-ac00-97d3e1f08968", + "1acae5a9-ee7e-4fe2-8f6e-914cbe0cbaad", + "f8dbff8d-07a8-4ee9-a892-c1e1bbac5639", + "4b60aa82-57c1-435f-b77c-d8807c2e611c", + "d608f5c5-132e-42cd-b732-a73ff2b1e71e", + "9d18c527-0e74-41c5-9abe-e0311f7324af", + "fd8f0ba1-ceae-4fbb-9bee-ccee2a5ef46e", + "8ad461ef-443c-42fd-83c1-4c20d0cade73", + "516cc29e-e529-4759-b6ab-4cc6cab61eac", + "34a6d138-ebce-4865-8de3-63c3d7fc0ecd", + "79b4c6cd-d839-435a-ab31-f2a14356163f", + "f5f41a55-e50c-4272-ad13-ff65150e8efe", + "c5228ff2-955c-420c-85f4-729dbce02893", + "a8e2ca19-a248-4db9-b8ea-18ccbcdfe6dc", + "a1b87055-611f-4ba0-b4ea-eeff301cc616", + "7e4fc071-1888-4453-a6d3-9e2f9a63aa17", + "a80b04a3-e194-4970-9a0f-1e21b082424a", + "4bdaf4f3-1738-4974-97e4-77416f028701", + "2481b03b-4279-4348-a04a-5bc9a40326c0", + "8a69e274-75b7-4439-a57a-aab0f8d62b0c", + "e39c0c92-f311-45a4-ad13-4ac277b67ae0", + "c10a4466-913a-4f57-9786-ccc6844d5290", + "f12bd229-c6c7-4626-b137-818498bb7329", + "d13ad293-753c-48f8-8ded-4cae238361ac", + "89e48316-04d7-4050-bf49-02e4a3473b90", + "6fad1874-32f9-48a7-9bef-8404fb7206e3", + "198e64bc-f494-4aeb-b61a-126433905d49", + "5f973f4a-0020-4fc0-99e3-3110c0735b7b", + "5fe69813-4648-4a5b-b48d-4352b366660b", + "c3b165e1-92ab-4dcf-a966-4927cb313c53", + "1350f1ac-1af1-469f-80be-805bb546a667", + "3677c5f8-f92b-4f41-88ec-85ff2b2a84e9", + "9a5f5da0-5a6e-46c8-9bbb-b542fde462d0", + "b00f3ffa-ef7b-41c2-863f-628eef8d77e9", + "9594d310-6f79-47e7-9d36-886cc75793be", + "945af6e2-47d1-4110-9a0d-ae32336dba1c", + "ce28dcd5-e1ee-4c1b-a0d5-f3f3c877e492", + "c41e066c-70a7-4ff9-b3ce-4da89af76d1c", + "245c6d07-27f5-47ef-88af-8514129f5134", + "c298bf44-a6c8-4780-ba32-f562da1dccfc", + "4bfba513-d82e-4b5a-8d5c-3ad224a6692c", + "b9fdf5f9-44c7-4a79-b176-6e31a41c2a9f", + "de051cd9-d7bb-4785-9956-2ab97768d9b8", + "cc02912a-456b-48ad-8535-77a94ce218a4", + "46e6b4c5-a412-45bc-b27f-6453362d8015", + "abaacd8b-c7f7-47f8-815d-68d4ea0eab10", + "7e10bcd9-3af4-4c42-914e-700b1a1f0d75", + "c0260131-bdc8-47bf-827d-bd0796b8edc1", + "91cc990c-d680-44ca-863c-b5fe5b73e366", + "551a4a24-1507-4338-9f46-1fb171c9526a", + "16c07384-c0b7-487b-a5e3-e67030353eb1", + "37ba6871-5dcd-46ff-b449-c8e05d58976c", + "82e6ef63-a9f5-473a-8138-5fdcb8eb10b0", + "cdebda5d-d41b-4983-9822-3ffff133e9cb", + "17edc103-a2f3-436f-9a70-6b235f629817", + "68e50956-abdd-45bd-a504-1c1619b349bc", + "626890c9-fab8-4925-8be0-a037e33130c0", + "a9f69f17-69c9-42e4-b33f-463e67467e73", + "c57ca5e4-bcd3-4db7-be17-ba226465907a", + "2fcca3bc-ce33-4640-81d1-0901bdb2d985", + "6bbed77d-5098-41c0-8d52-2ad8f5cbd01b", + "c935d70c-f38a-4c8c-b45f-fd98706601e7", + "9ecd7886-3f19-45a1-b129-039892f1ac06", + "61a1f12b-bbcd-475c-8520-faaff11da601", + "470e9427-3328-4034-8495-5b4fcd4ea778", + "d31a1890-92dd-47fd-9187-b313c27e4ee7", + "08e0c2d3-4c7d-4aa8-b439-4ee752ffe045", + "0664886f-2b9f-4287-b4b6-b7f242a80321", + "87764c1d-9f1d-4f4c-a75b-dd81763302db", + "f4986ff8-6575-4f72-8434-d5ace8be953b", + "641b2f61-8d7a-4430-9b40-7d9fc5dd0fad", + "684aa3df-c121-4cfd-b803-77f3425cd851", + "b3888a8c-ab96-4fa0-95dc-0f803cdfcb6c", + "4acaf601-8b27-4d95-be66-06944e8e7ae0", + "d98d3e16-af8f-472a-a1a2-5014724e2b97", + "c00e5086-0192-4434-8f3f-88fd6e321bf6", + "d8f3c168-3ef6-4d0f-b683-9366314b9ff0", + "19ac992c-a219-4e7f-8f05-ba1f5daaed7b", + "b16498c3-ed97-4f90-947b-ad5c37675fb2", + "769c7d5e-0331-4f65-a3d2-2db3d29bb89f", + "f18d778e-1a9b-4f34-8663-a68be55aa65a", + "8eea4c6a-3a39-48a7-bb9f-898e37505aa6", + "9de4ffc4-a4dd-4fd7-9743-d15b7257166e", + "1fdefd05-e86d-4c8f-9c44-6bfa395dce94", + "6f112a35-fd7c-4bab-af7f-9e8d24cdf53f", + "5075468b-8a48-4e29-98f3-729a2a544084", + "0fe556d2-a0c2-4aab-a29f-2eff2a2d80ed", + "3c9798e8-18b7-48e8-89fd-a302c7cb1617", + "21da31f1-1f16-4f36-b26d-aecfd48095c3", + "14a1ff92-1f50-4925-8aec-4cf256af491f", + "ceb5fc80-b154-4199-907a-ce667efea85c", + "ec707a8e-cee5-4751-86e3-1165495d4e62", + "7c0b97c2-708b-4cf1-bd70-01354aac8243", + "5a5451ad-2da9-4193-82b3-d7b35036674c", + "8529aec9-a99c-4a7e-aab1-d150a694b83a", + "632dedd2-d16c-4995-b8f2-5f67495e82e1", + "5b9bd3c9-c71a-42bd-9155-0d4b618be49c", + "e193d643-abbb-420e-90f9-fb0b13f68811", + "d8489b00-c71a-4fae-a3d7-97ae432cc948", + "0ded4f4d-87b8-4c5c-971c-40dd90c22d2d", + "3ef9c947-cb95-4817-be7f-436dccd3db47", + "37b2a20b-cc24-4ad0-80e0-c7f0d76fb99a", + "c5a34de3-3f23-477e-bb5c-14134be2be7c", + "c5185848-5864-4330-abf2-a7908e19f10d", + "987f6437-1278-48f2-89bc-e298b9d235fc", + "37dbf1ad-b4a1-4a8e-bd50-ca7fa38fecb9", + "fb3944af-4db5-4ad4-839c-dbc57aaeca76", + "67669a39-dd8e-4bf5-b7e7-ac8bf8d80613", + "ed84aa15-1e49-48a5-b677-b734a139214e", + "7aa1a6d6-85bb-47cf-a531-9d047a79404c", + "3bf403b9-a7d9-413f-9baa-c768b881b589", + "d05bf6d9-2747-4ac2-b449-31083b5c0439", + "c6523f61-1718-445b-85ef-f00abc60a873", + "262b007b-e65b-46b1-a18b-13a9bcb9a63c", + "3c50ab6f-cca4-4809-8b6a-82ae1cf5438a", + "3453a9a6-72ab-4ac1-8471-a9634138c243", + "15d63b2b-74aa-4d1a-b4e7-20707319f618", + "7038c83e-d521-41b3-9efd-b9f706e47048", + "03d42620-511f-4909-8432-17444ab73bfa", + "dc069e7c-d145-4f8b-a33a-355e6b945534", + "40272703-214b-48da-854b-f09eb28e4bee", + "0dd06aa3-555f-4186-86c0-bd74f03838f3", + "7b500f2e-96e5-4fc4-b858-28ed97aeb805", + "0e3607b9-d879-445e-ae35-1134f254e703", + "6581392c-b829-4212-91d2-06ada75f7355", + "b33911b8-1d30-4423-9a5f-5b947f1ad661", + "1367c291-3657-4f76-a52f-30eea16a6ac4", + "5de060d4-45cb-4d53-8ee6-b1bd3fa7f5f1", + "77f98200-7ead-4bab-add0-b7636446794c", + "d8943ec1-cc23-4a8e-9f6a-f2c1626353b4", + "6a7ea4a5-890f-419b-a965-51bbc1a205e7", + "9376143c-cc64-4c57-8855-787209bf7039", + "3956dd56-a195-40ab-b946-9f601f35b142", + "496f5ab8-ce25-499e-91a8-312ef393a2ec", + "fbb43205-3357-4c33-8489-839f378f2345", + "6f748d35-78fb-4f6f-b497-054af6148981", + "e3440020-0abf-4b3a-898a-4ddba33bcd3d", + "a0572d32-5bd5-46a8-92f5-b372f29c8ae0", + "710ea5ee-24c7-451e-a2e7-8b50fb520f62", + "411a88f1-e21e-4028-acdb-daf7062d2883", + "b9ba619f-599c-4f38-a492-9092d2f2c1d2", + "9e51b014-2b4b-40c0-a512-76a125cc1669", + "d20bf2c3-4484-4b7f-9a20-299fba159e6c", + "53b48b76-353f-42ed-8372-a8b99ba9e307", + "770add49-1909-4a0b-a13e-2f664dc7a9b9", + "92dd80a3-5899-494e-a765-970cf7bba96e", + "d9c10e74-5257-4196-ab9f-ce8df0b1bda8", + "ddd66ef9-9695-444f-923a-b8b2cc55dba6", + "f3623651-6fb3-46f1-bc4c-b93ab05ae6f8", + "2ac08338-20d1-41b3-9386-cdc23920de36", + "26397127-df6d-407a-8e42-2b38df2b5f43", + "834df16c-7392-46b3-b015-2531da0c8782", + "7a9e8493-d317-44dd-9a7d-4da8b6e612b6", + "d1358979-54e8-4149-9d77-f2585e0fd578", + "4e39088e-54aa-4709-a383-5d9044222cec", + "66f92af9-7ce9-4547-8d89-d63ca8d035a7", + "c572f356-f203-4392-a195-2f9e64a27240", + "c3337342-e659-4bfa-8fe7-a26cd4abb671", + "2e093aa8-530e-4208-be17-9ac368ec7c29", + "4feb0460-cfdc-431d-9363-c5d4262c834f", + "2027de77-bab0-4c85-82ab-5d19c48fc85f", + "9eabf4dd-9941-4a39-b750-17cf35a2895a", + "d6808101-1cb3-4efb-895c-6d3f5c3545c6", + "3eaafbbc-3613-4bb6-bad0-26434e220265", + "26e13cad-6b91-49ae-84d4-0716a4b9d3f2", + "dbfdd960-66dc-4954-9805-7449a25dd899", + "32a8b533-801f-4056-9b5e-f125db9d79dc", + "df58b9ad-458e-4c10-9678-cef4170e6686", + "a9e5839b-e5aa-4a62-ad9d-f264af8f7f96", + "af7db5e3-7f23-417f-9dd6-6a61c680f827", + "9449cdaa-fd92-4557-9d15-ab544904b092", + "1435c5d1-d11a-429e-83a5-21695c856da6", + "a630ea73-f667-495c-a939-8ec027d22776", + "b9e11947-b783-435f-8b75-5cba55d335c6", + "3ccd912d-b471-492e-8087-5e305ab0c942", + "e14662d1-390e-44e2-b11e-ea46d45472d6", + "3a9909f4-3300-4063-84f4-6bbc4bf00fc0", + "6a83099f-5106-400d-a432-76a27effe4c6", + "d0249ae1-2b4c-4e15-8a4e-57d17498d6f1", + "a877be04-a422-457c-89c8-e7a9c7470100", + "b7e45571-7083-4039-9955-01b3272937c8", + "3e59369c-a3cb-430e-bd35-4a993fdab6f6", + "749d2094-14f4-4e73-bccd-80a640196c49", + "2d18c07a-7672-479a-800d-547d5ef9cff1", + "91509adc-e5fc-444a-9068-b8bec4a33845", + "7accbfd5-565c-403b-92bd-2df00505fb33", + "78c1764f-1961-4f65-affe-6c48c7d0d723", + "a6fa015b-432e-46f3-a269-9cecc07e9095", + "c297cbd4-e635-4c5f-992c-e6455fb13480", + "174520e9-8de1-4b73-b3be-0defca19cf5f", + "94719484-31bb-49a2-8409-26c0dfeeb495", + "299ee32f-c9ec-412c-b1af-e1dc3baaabf8", + "58e0d77f-933f-4932-a334-20113d0e51d4", + "643d1a45-7f94-4d73-bc19-f2f421bcaaa2", + "288bf621-905a-41c8-9cad-a93153e4870f", + "1c07e864-11a4-4e89-b2f5-539faca08025", + "472c661a-2226-4051-96cb-62a6f726dac9", + "7765fdd0-ace7-433c-ac4d-3a71e48a8980", + "2ff4c806-d9a7-481a-ad30-5a90c27ea438", + "aba95267-b99a-42db-bb19-7f7bfdcd6815", + "d561e420-a38c-48cd-9efd-90ead0c3c9f8", + "8981802b-db94-4d00-8af4-2f93757689d2", + "c656b350-1617-4141-8f02-f50eadb492ba", + "965cf242-63ad-4312-a93b-519b16d641d8", + "1fdb6fbe-895f-4b97-a0c3-c526b58e7782", + "ab63da20-c3c1-488c-b202-064c4061add4", + "87128eb0-b69c-4655-8f23-f8b0b9a8f8ee", + "a56403f5-13dc-4325-a7fb-4fe69cf67549", + "ce2e4c19-1323-44f3-868d-0f8e99afc82c", + "41ab2f12-067d-47e1-a484-85be80e8c151", + "4fdf575f-3e7d-4e16-a6f1-104d1312123b", + "989f4572-9a5d-4380-967c-f1d53f6205e5", + "33539712-677a-4430-b2f6-a39e97dde0ad", + "09b43108-2336-437e-b609-80c299bdc2fe", + "52f575c0-38f5-4b3b-9824-45cc4dfccbc9", + "85767927-9cbf-42b2-9afb-e95dc3438994", + "29400d7f-e0bc-4540-b3b8-ec667839f115", + "a25520db-910b-4bc1-b704-006c29c19885", + "85ffb3d9-da85-4a0b-8ad7-bfe177b54478", + "fbae7b41-7983-43f0-8684-4718d9e6fd18", + "5fd2c60e-9b4a-436e-a2fb-5b03e9b85d57", + "e9c65967-8bd1-4ade-8da7-7b551ae70b70", + "2132b66e-0e40-44f6-abb2-1a19c2ad8615", + "3afb7575-0548-4c6b-a0d5-737efcaee222", + "1097bdfa-198b-4ada-b062-616958b670b2", + "8c771d42-c838-489b-bd9e-841403961c75", + "886b49c9-6255-4284-a1db-10b673a339df", + "0e83aa3c-76d3-4a17-a752-48896b34cec5", + "c5e5376b-3b19-4719-80ba-861c6ec121c1", + "58c47cce-c0be-4c81-854f-6a4caf9bfcec", + "5a1c0f3b-ccc8-43fc-b016-51b14c28731b", + "e01455df-c2c4-4df8-b235-2ea623104f71", + "dc5c50ac-3e88-448e-bceb-cd929785bcba", + "ade907ac-e4d7-4571-8ff6-e7792b4dd351", + "435ac79b-2415-4409-94e1-466129d02a9e", + "ed07e31f-0ecc-4c52-ac8d-d7a57cea1f48", + "9eb1bc9f-fb6c-4add-9015-2b29ff482eb0", + "ab2ee6bc-e372-43f3-89e0-4becde4f1d5c", + "6ae83cb5-7298-4eb3-ba23-58f1b92ac574", + "0976991a-1a81-4cf8-a7e0-9ae2a97d0089", + "bc3e56e5-9293-4c8d-8683-0adddba21661", + "ffdf5432-1893-4511-9622-300826e3660e", + "bdf2ebd0-9d75-4d19-8acc-3f22fe79627d", + "423d21fd-1686-43b7-92b3-afcfdbc217d0", + "862cdeb1-811a-42b8-94da-96252452eed1", + "1cecbc9f-9f9d-424b-808c-7df9192378dc", + "fa7e9c87-56f6-42b8-96e7-1d1908ed9435", + "6c7063d4-460a-4ea7-9c07-d5d26a9c821b", + "b719f7a3-5e96-480a-aaa3-8255a8788270", + "5f7cdbb5-ce87-4f46-95da-8ef26a9d73b3", + "ad5f4752-14a2-4099-9329-68b10f775370", + "816d8437-1f08-42df-a976-94d17059f215", + "469adaa6-a0b2-4972-95c7-e4d40e656cec", + "d29cdce0-4a31-44be-8b6d-c752604b1842", + "8a51b23e-0748-4354-a47d-8aa8a032cba3", + "08a41317-3d33-4fef-b22b-677f74c8afea", + "5f4a5f3f-492a-45cb-93a0-6a5c2aae49df", + "f7532b63-a301-4d00-be36-dbe780ff55d4", + "c4398ffd-1648-48ab-819f-2011ff862d56", + "698add28-ac1f-4de3-8166-91d76a6f4b24", + "92bc3966-20e7-44d8-b4ca-2657423dbc47", + "fe087937-5cd5-47d7-b4b9-ea7971a23b42", + "fdd1c526-b74a-4848-b168-09f05a21c96d", + "cb38c536-034e-435e-9ea3-f8810a093417", + "40628cae-5460-45fe-b743-9ccad27e7c3a", + "d388ef6a-780d-42a4-a919-6474fccfe0b5", + "11362481-1d78-403a-8728-e7f2e728839e", + "25a1eab5-200e-4d9e-b19c-36e43afb6518", + "6a41c15b-e300-472e-944b-76fe0974a2e7", + "a79fd15b-8ee9-475a-968e-c31695f78880", + "e56fba06-9f67-47de-8b17-21502e40271d", + "2e6ae463-196c-4918-9924-be460d0b8032", + "39b8540f-a663-4782-9d90-ef0b9f206a69", + "6cd03384-fe83-4c8a-9904-ce29a45f0b41", + "472dab1a-a227-4d35-ae67-de4e85a7524a", + "c56cfd0d-24da-4b37-8026-0ad9eb902c8f", + "38b09c9f-20fc-47ae-8ffa-6fa9124585c8", + "c7b85eeb-7b85-491e-a0b7-34d2f2164e50", + "6fd15005-a0d6-4a04-b187-71e017e282cb", + "1fbd5fea-a4d1-4b34-a819-c0c6161c0b33", + "71ca5532-1a73-4611-a3b0-fcdd4f6f68f7", + "525e1ddd-6753-4c63-b537-9711775978d0", + "93cfbccf-419d-4402-a7f9-072adbdd70ae", + "27fbcba5-e2af-4fe6-9e48-b057bb59b7f8", + "3ccd6863-5433-487e-8dae-9764a3ab5e6c", + "c42dac59-7aab-4068-b494-eb09cd999a7d", + "ac53d9cd-94ad-4792-bf01-8e55ea953dc7", + "040452a9-3bf3-4c73-ba03-353cca8f0080", + "c4a97084-cdc6-4dc6-af9a-84b00599e2ea", + "f20f1a88-c922-4541-8a11-e8f103facd8e", + "c7e2e196-8b3f-47b2-86f0-5a8661d3af80", + "ba30d870-607e-4b3b-80e0-bcdaf377ec70", + "2376d8e3-89fd-47cd-b33e-d4d17b06642e", + "bd3a3f90-9eeb-4225-8eae-98bca5db7da6", + "65c4c01c-3263-4ea0-9cff-817c3185a0f0", + "d00370ba-3480-40aa-8da3-faa99785241a", + "4d0788bd-5844-4019-b550-a4ec9a90f627", + "f637e092-efe0-4ee0-9693-ed354ef47e42", + "4fb661d1-7fe9-4e4e-b0ef-2803c50e4808", + "27bccfde-0892-41c3-810a-47fcdd48f525", + "084996db-3f52-4b86-a41d-981ecd2abe85", + "6788f4a5-cf0e-4015-8342-fb874483f80f", + "a8e87d37-6387-48de-8b1b-7d73594d6225", + "97e187d9-8b9b-4ac0-acdc-946f67b8c47a", + "f6911a79-6ef6-4bf3-a501-34baf531ddc2", + "a50894c6-ad39-4d6b-9df4-ebbfc5ec31bb", + "cb38ee76-d165-4550-ae9a-fda431203a62", + "ee2e0ddc-2b9b-419a-8b97-2d0ecc739afa", + "97f90789-8f8d-4b5d-bba3-82fd83e03c49", + "8d20e51d-9db7-447a-8d55-f87a73956fb9", + "23303ec9-3a24-4386-9c78-5940b4f1103b", + "e2dc9a97-d961-40fa-bb9a-fe4b9f031fa0", + "58b75077-9c08-47db-a49e-78a820c23fb2", + "6f50c05b-1d31-47d5-a27f-61248d678a56", + "81fb45c5-10d0-4321-a901-29fd7862761b", + "2e911453-17cc-4f5f-b299-d3bd39b153f0", + "7cc5c86e-392a-4a75-afa2-253e6d19c946", + "b1c180e8-eeb4-45f4-8c15-19333095aded", + "4a9070db-f0a4-4eeb-be05-41de11ab9907", + "77088400-b27b-4232-b46e-59e24458fd88", + "5e076cb2-216d-436e-9cac-87858696f80f", + "11f0e807-9628-4d2a-9016-bd79b30e5d19", + "2bf98b69-ba89-426b-b403-8209af68701c", + "c806d123-0d81-42e1-b568-b74f6f80fe16", + "8dabbdfb-fd7d-4df4-ab74-db81d88e4eaa", + "7008cfaf-2cd3-4f6d-a735-ec5442eaa785", + "d828ac73-eeec-4afb-9ddb-8763cba15a36", + "ae52d15f-b8e4-4d32-bd0b-312235d5920b", + "2400a133-c24e-4f5d-add4-6353e007ea4d", + "dde88def-bb86-4f7a-b586-5fee894fe1af", + "d9e42a8c-5851-40d0-b3a6-d46754665967", + "09b44cac-4486-469d-8fad-dd78ffbb678c", + "af48a69d-3786-46d7-9183-c6f27891a4ce", + "84371aad-c679-493d-8895-60f61d7690a7", + "99cd342a-39ea-4f9a-a4f8-362cfdb0ea57", + "f6e3e056-ce7a-49ce-9a37-6264300ac91a", + "9fb10fe3-5004-431e-bb4a-54c68df0543b", + "d6376843-2bf6-404d-8d88-bd9f9e010d5c", + "d9fa1f67-1159-4a7d-9a0f-bf109dcc8fa6", + "9c18e71a-49f6-4cb4-a3e9-b9dc436ff1c7", + "dde992e2-4229-41d4-bfc2-0c83ea8cd21a", + "a70079c6-07df-4182-b80c-66f7e6f2bb6a", + "b0cdfd33-b3db-4c83-bc3a-5b4809e6adeb", + "8caa324e-6769-4859-ae8f-6a688f1ca6c7", + "f88f0424-835c-4670-95b9-ce9a2d74f0be", + "524080d2-ac49-43e1-af39-9dc6e1a82768", + "0a92d04b-899d-42c2-bed0-7c7a652fb9c3", + "ef75ae6f-803c-4ad7-92f9-6f0136934fe5", + "e74a2d84-c14b-47cf-9734-d4eddf5ab7ac", + "76baf3d7-e76c-4289-a669-992adb2ea599", + "ad1bea9b-68ca-4027-acff-ff1bc62fc41d", + "fc60c1d7-e421-4a63-a590-48ed619ebf47", + "c52df4f0-a336-433d-94a3-288758704203", + "63f78108-031b-4d73-8755-c364a9834211", + "fab6499c-776e-49f9-8036-4b7a668abffe", + "089cd0f0-69b4-43fd-ba89-a51fe70e9c79", + "a2a20496-0f56-43dd-9536-ac6604f8dd35", + "5a5d1531-5862-48ba-998d-62856c4726b9", + "2b56a104-706c-490e-af43-e36f10d42642", + "3ed2f285-3a14-4838-b3af-bda843bbbdd9", + "3cc64b2a-ca75-4798-a0ea-14e66acaf8bc", + "e2b267f7-f865-44ec-9e8a-58d3b98ff9b7", + "0c8470d3-62ba-4a3a-92cd-e3a3a981e5c1", + "450a6a3c-4786-44a8-8266-29e6ec1b3b2d", + "3af3a41c-bb32-4ec5-b294-02de79ea086d", + "6aa0121b-0a0c-4fb1-8784-404abb8c5e5a", + "4554374b-df72-4446-a172-37361b17adb4", + "a1ae1316-73a2-4841-8959-34003a7ff8d8", + "a2857474-d4cb-418e-925b-cebe73442d3e", + "4755a361-6a98-4985-a3e2-4751c4627798", + "3cd652e7-93d2-47b2-8a72-6cf2eba15bb6", + "a2b9ef6c-1d3c-4c12-8cd4-7d9b7e0c9851", + "852d5aeb-fd46-4124-b458-63528fac1f5d", + "a85f45eb-8c7f-4897-af0a-8fdb2ebcf8d7", + "7781c566-5698-473f-b100-6771829f3bf0", + "1337202f-38b0-4a66-ae48-948890e60c64", + "3d6e939a-64fe-4bae-93d8-9df7750dc5e1", + "5395884d-5afa-47bd-a245-7b54f0f16f3d", + "b1c16ca2-9707-4893-b417-2038f5a10ce6", + "d7f7749b-137e-41e7-ad43-324dcc995dc2", + "448d3b07-3c6a-4510-ab0b-ebf13c085348", + "5891cb60-f601-4a04-89ce-e3928989d93f", + "e227ab96-7b58-4d2a-aa77-3cd7b50314a5", + "21f1a62a-75c6-4e35-aa1a-6f98a2f1a1db", + "fa55416f-84d6-480d-85e4-482292f296dc", + "dae3aa4f-2a5f-43f1-a37f-b0f2603740c5", + "dabae23d-56d1-45f7-a15b-cd63c75398f7", + "4e3af79a-460f-46da-a4e0-2a9bbd3863d2", + "ca587050-81e5-4073-ad0c-6431acb64c89", + "a26365db-344e-4bd0-9146-b1e3b589b407", + "0855ad4b-ac0f-466a-9a9a-dc9ed74379c7", + "d133fe71-2e18-41b5-b6a5-e7805dd97acd", + "29266a7b-f349-46d9-873e-e957ad3bdb9f", + "35499b84-c83e-4269-bcfe-bf49dd049812", + "06f626e8-8f8b-4947-92c8-38038e477f7e", + "23456952-d8de-468d-9c84-39c344c207b4", + "b5cbfa57-ab49-4075-9e8f-ead076ffaba0", + "40535c58-d818-4757-9ea4-62d0e28f1837", + "2a76aec3-83b9-4ef2-9582-82da7ead0259", + "0ff2b04d-c498-48e4-b428-9eafd2f8948f", + "67d9c38c-a572-4c3e-a9f4-2fe25cc235fe", + "7d702f17-108f-461f-9978-c89a7a4a9649", + "921b8050-7350-42a8-b4cd-82e89eeb4cf0", + "8655e08e-2efc-47ae-926d-4464afb877f2", + "49ac8487-920d-4d2e-a33c-95c6e6f03b63", + "9c7fc7fb-a9a0-4af5-ace8-11526223ac47", + "25890354-2444-46d3-aab2-ff5cdb7ffbd2", + "5aa7f474-1f7e-402e-9654-117d867efeba", + "ad7d41cd-bfbe-4190-9ca2-2026a8faa7cb", + "4f51a998-a235-4a77-87ec-4bcb1de187a7", + "ed9dfcad-e331-43bb-987d-1da15e81bda4", + "3adf5c3e-b776-44ed-960a-7ebf26386b49", + "69b3614a-1044-4972-913c-a2751f584faf", + "89d6f8b5-ef5d-495d-8443-26561be6d5af", + "bcdf61f7-a8c9-45bb-8706-96cd2a71a125", + "9666e792-d330-44d9-8599-ad4b8225552b", + "6c7230cd-4acc-4a7b-8824-83e815fa0654", + "c8812e14-3955-4724-a019-2d0e7946de48", + "69c6e2d7-487b-4508-b4f3-6bc8a5e9b424", + "d31e13de-417f-42e7-b713-1c2438679f63", + "ce90f24f-4c3f-49d2-af40-fcf420ef5a36", + "ad8df83f-68a2-4ccb-b977-85afdec2e02c", + "287c5061-661e-4314-85cc-36c5a0e719d4", + "baa7f208-cf20-4d18-8823-328dba20c447", + "3e6afea7-ee10-4b76-b1cc-115cd004e7da", + "7c5d2d4c-594a-4615-b7db-52d22aa01e43", + "7f918ad3-6b76-4779-9e96-d1c2e51c6576", + "20b7a3ec-5688-4646-aae8-2257923485f7", + "1bc7c8a5-1226-4e2a-8125-8d52a73d41bc", + "07f67154-084b-4c4e-9235-69aba073fea2", + "c8219a6d-f0d2-4f2c-ba9a-382ad797698a", + "4c44033f-ed35-42cd-af26-221ad98e141b", + "31f8ea5b-c6a8-46e8-9c53-22f08e1902f7", + "755d35c9-8350-4dd7-8096-a036081019d4", + "5aff702d-8cbe-40a9-b8ad-4381f3325695", + "efd18e07-8ac1-4af0-98f2-e2077cd29d1c", + "a4afb75d-418d-4c14-9d21-238812e809cc", + "8d9cff6d-ece4-4942-ad0a-b57a4217aa8c", + "ea643e73-d8e1-4e46-810c-86da73e9849d", + "261cdc5a-c372-4adb-82dd-281a1a203d13", + "aa64d417-e23f-4979-bcd0-1dab5caee86e", + "49c3699d-2fea-40e8-838f-0095d6df057b", + "d2709430-cd7c-4145-afdf-96055a7e50e1", + "0ea84993-e009-4fa3-ae13-4be87d66a1f5", + "6ee57583-5271-43e7-9151-cb9341a642a5", + "1efcc962-00ef-4803-be75-1d90e82ffcaa", + "d4a36f2c-73fa-4031-a87c-5ae91c50cf8c", + "e2ea700b-23fd-4a7c-80bd-978d8042d14f", + "4dfb5656-ebd7-4f07-95fc-1fdc41ad0b93", + "6c8c05d5-0a7f-4843-b93e-dcf644fb220a", + "8875ffaf-72a6-4e06-bcc0-3b342e28740b", + "23f35240-f086-4107-be44-e35042d3cd54", + "752dde64-3c54-434f-88f1-83fbd3c00a77", + "f271aa7d-32c8-4302-bcb6-4fc6f1813bac", + "fa0b200a-f43e-4d02-9835-72d4b0ceba32", + "649518f4-9836-48b7-9764-90a3f34512ae", + "a2d60544-0dde-4d2f-9ba3-609997a101a5", + "00ab1fd3-9fe5-4375-af1b-e5c1a4347055", + "3ac266ec-8997-4591-a875-d38f7669a288", + "3cc93bbf-1dbc-4f76-9a3a-2f7c02c2fd35", + "9d3025fd-78de-45b4-a971-7ea6e23d2426", + "16c9f99a-075a-44a0-b93a-4b1b888ed918", + "27ecbce2-3e30-4f0b-95f1-028795e39a6a", + "46c8004b-d851-44ff-8299-a4aa1173cebe", + "5b1a5ecd-f59d-477f-81d6-6e3380b26651", + "bea6097a-a214-4def-baad-bbfcc33c76b2", + "df3e6d5d-6c9f-46a8-a2bf-a5849fd78d79", + "2aaffdd9-33c5-4dab-b63f-e239b926ed6f", + "91f1aab6-4363-4713-b906-5adc74d466bd", + "a5eecaab-79fd-4009-813e-6c90e85b825a", + "9339e15e-5cd0-4b06-933e-1a6b38c84e9d", + "bf197310-458f-47d3-8736-6db6aaaa79a3", + "61491a7d-6eb1-4cb9-b090-fe292e7240c4", + "173744e8-d656-4a60-ba05-79359ced2eab", + "68fe3ab9-4f8c-4cf2-ba44-8d1a360ef4ba", + "be1a0058-6ab3-4b20-8a29-3634bda5833d", + "cd742d1a-5ffa-4af0-bc8b-d605ff3fd78b", + "1783ca2b-29e0-4e66-8681-3d7850b63ce1", + "969605b1-88bc-477e-a28f-c25b7081ed74", + "b2566609-aca6-4239-ae43-894cd06e730c", + "1ed74ea5-5baf-490e-8bde-7345915f2efd", + "e0fd2f7e-8709-4050-83af-b7aac555c6c5", + "e81d821b-9d34-4954-83e2-60d9829ae399", + "e45505e0-b338-4fd8-b4fd-b6456806a0cd", + "fa6aef1e-9464-4c17-bbf6-9238dc81939c", + "cafe226b-428e-4b07-96b2-5b418a30ac64", + "921a41bc-f0fd-4125-8c5b-01e8ea37bf65", + "0ef65c74-d771-4ae2-b8eb-6c6de3043529", + "67119cbe-1f8b-4046-9b6e-d535d81d667a", + "feb0ae86-65c0-45c6-9f96-1a2691106071", + "1b8ea336-e9e2-45ee-9526-51396c44ff5b", + "0e9402ed-ff6b-4267-8ead-0c49cdc2ecbb", + "cb7654c8-fecf-4bbe-a5e1-ad2622d84f8a", + "f33fbddc-327d-4738-b3ef-72f2d4f48bf1", + "d3d40910-c794-430e-8474-9053e430200c", + "e886852d-8c13-46a1-9e0f-d979ff42c66f", + "744ded0d-25d7-49bb-b45a-f4bc71689422", + "8ebf391a-4ce2-4b7e-840c-b1c357908fe7", + "930caee4-1be9-4b9d-b929-ea97f4c06cd5", + "3f46d984-9583-4c94-a706-b52364471a58", + "b3d204b1-d4ca-426d-8ce4-b8a339a93f81", + "77ea6972-d7c3-4e35-a8fd-cd0f7ebae987", + "c320ddce-8bf6-4da6-99e9-0fcf490cd3b6", + "444ad11e-b098-446c-b92a-353d53e335ee", + "25bcbaaa-f9d7-470e-8ba3-4382387da483", + "fd28501e-b3cf-40f7-b135-30551a322f8a", + "3d2878ee-1e7b-42ed-bc57-57305a5f56ad", + "4fa2da4c-cfad-43a5-aa58-dbf336f81dc9", + "dbe3e112-52c5-4a6c-af4c-e2c485786730", + "3f8377bf-dad0-4203-875e-76cf6086f7ca", + "9e055015-ef0e-472b-9fdc-5ecab72b3c32", + "d820c8fd-a9c1-49f1-b296-c3bcbe56573f", + "b83a50eb-d72a-4f4c-96aa-b6268f4b5f07", + "c7e20f20-7462-4554-827d-63f399770d73", + "0434b6af-efd0-402f-88f6-c0198df93c5e", + "efa8a1a4-85ad-4f03-bfe8-e5811f307a9b", + "b2a2a54d-adc6-446e-ae4c-241b10d22d79", + "43f93773-6380-4cd8-a7a7-57c83c62b6c5", + "20ad5299-2670-4acf-b3bd-ec55ab5a29f7", + "9c81b15d-03e2-4e81-8375-2106c472dedb", + "52bd63a6-82d6-4cee-9fe9-16c1aff189ff", + "daea8424-ab36-4daf-b99b-4a2711d1dafb", + "b0fcd366-b373-47d0-9a43-053f8b26deb4", + "0632ea46-2be8-4fc7-b830-0f2d1d989f87", + "d5edb31a-9c93-430f-a5d5-0f682eec9bcb", + "891ea66e-3424-41a9-ab82-8b4fb7e189c7", + "2536a86f-3dfd-44a0-af25-104027d0c2cc", + "465d8419-8524-46da-8dc1-88d45423e2a2", + "e39392c9-c8ca-4df8-9a3e-1bee2e667ed2", + "5144bd3b-c03f-44bd-99b8-27cc0585129f", + "c30fbafe-30ec-4f05-bac7-9532ddc8f726", + "d6f7de4c-463f-420f-8cc5-a62ab872ef28", + "7b389052-f090-4bec-9831-2e1f4c2caae3", + "f73bc7d4-6d58-4680-932b-dac0d65f5530", + "a1a05770-7fca-4693-8800-0602dfe8bd19", + "50297c99-f944-4620-a49d-c56960af573a", + "cbc5f960-da0e-4e69-83d6-361d783493e4", + "6c8065aa-6fe6-4781-afd2-c7ca8b358cca", + "cae09f0f-d8be-42b7-a5b6-3aa5cefd8831", + "ce977175-9151-46f3-bd14-26b4f5e94f28", + "20b9fc67-605c-4bc7-bc05-abcde7babfb3", + "44239fc2-2e34-4811-8675-9aa59600345c", + "ee8c0629-f44a-4804-87b8-f2de4d349d91", + "1c598a47-7585-4e39-a34f-0eea381fbb65", + "e12f42be-e016-4c15-a85d-7b0e0bef446e", + "81ddc249-0742-4db2-90c2-0855d4867d21", + "c507d7bc-b737-4ca5-ad6b-4210d962a198", + "40d64387-f7c3-49f2-9177-cff5697e822f", + "0feee956-d49b-4da3-85eb-0a3fd3057d47", + "87932f65-c86c-4781-8e8e-961f5a2b8cca", + "695571b0-dffe-4c75-8438-4efc804b4c12", + "6b032f1b-4934-4bb8-91bb-088356a50045", + "84b70808-7374-4298-84d8-5180dfff83ed", + "e54cad82-3274-4a7f-aae8-830acb4bb168", + "6698718f-b01c-4f5c-8331-2971e87232fe", + "f0d757ff-8068-4c7c-b57e-a4f2fbc909de", + "1fa319b7-6ad5-4963-ba4c-2932574915c7", + "320700de-4035-4925-8028-c6931ca67a8d", + "e3dae78f-b029-442f-85c0-1e8d9c3fe79e", + "a525e5ff-08b9-4a39-9f6f-40e2cebf33e5", + "4486bbc9-4749-434e-af12-776f70e58cde", + "9c1c3ac9-514d-4b1c-ad6f-95c9d3490740", + "b6e38d15-b442-47a8-8e4b-fbf35a7b693a", + "0cf72ee4-3e65-44b6-9c30-32be22c3531a", + "904d68d4-61d4-4bf2-8833-a987eabcb6c1", + "9915637e-23f5-4a7a-8b75-4bc6eaeabcb2", + "e2442e93-23ae-48bf-a1b5-f9ed765939f5", + "71306c02-bd0d-48f3-befa-5c62bb907ffb", + "6dae84f3-77b2-41e0-b1bc-36e9bb3914fc", + "e92e543a-b372-4f81-a305-6616f61eb3a5", + "dc5ce520-22f0-41ad-878d-d3fbc53eddc7", + "cf149a8c-8bde-4a6e-b977-52b69ddef04a", + "4263e99c-d1bc-4167-8a8b-0056b7367d80", + "fd9021e6-ccc8-4b7d-a784-12d697f18eb5", + "a72030f1-8da8-49e9-8ed1-a5f67c3ea58d", + "7ddac1d5-7619-4f80-bd1f-5321e56c9a2f", + "df384cd0-da17-4822-a691-62e37baab69a", + "865bac6d-c5c0-45e8-b1da-9a10691a229f", + "0e829e71-9da3-4124-8ab5-704f287dd886", + "68540166-8aa2-4cc7-8a0a-03bbf510642d", + "5fb4cec7-c1e5-4be8-9eff-43a3e247f77f", + "b6e4e25d-a7af-45d2-a0c9-eeb96edd871a", + "46b4fbd2-31d3-4702-85cc-9e13a5485e43", + "90309b93-939a-466c-9cc3-246c17f68f3f", + "b2f59ba9-bbd5-4ce9-9e76-d492fff526cf", + "20cfc8a6-4f58-429d-b2e5-33b6129a74e1", + "0c99fb76-c615-442d-a7df-507303ea2a23", + "d1fdab32-9f2b-47c1-aca9-51daab842091", + "c2892571-ba77-481e-9065-78e21f6f30c4", + "4a07ce68-8db7-4c24-b89c-da646cc612cb", + "35f180d4-e905-4a06-9b24-3ad26cf2fc06", + "fec0d830-fd78-45de-8e0c-b17fb1103c09", + "47c9f848-c697-4d15-b899-4a64a987cf53", + "c3de3773-0401-4fc5-98e1-56ca0eaf7a2c", + "57948df7-d34f-4f1c-864f-1c6035bdd98e", + "42d47760-b9a0-4181-9900-ce045f53ea8f", + "e7607d7b-9ace-4675-928a-a689d9079bdf", + "b48eb4e2-c155-442e-a232-07183f2c79ba", + "2b101c32-9ebf-4b48-9bee-39420f0e28d1", + "f7612015-bebe-47bf-998e-d66723745ca5", + "84129357-0dfc-4e1e-b53e-be06084e9537", + "6b4ef3c7-f456-4874-b6c0-5598e9f5f6f0", + "dc2b0a43-a742-4eb6-b7fe-a6aa8f36535f", + "f24baf96-476a-40f8-8919-2a1a222e6c71", + "0f8bce5b-c5d0-4deb-947e-0d0faef38aca", + "067d79fb-72ad-48a9-a8b9-45e14897bf61", + "b167613b-0a1d-4be8-ba0d-a163579b470e", + "5b2b55ff-2bcf-4174-bc19-4d02aa1f03a7", + "b9ff087b-ba99-4eb0-8578-7b431dbf25e9", + "98e7ab98-3a96-487b-b3f1-0131386afd47", + "62f34c81-8e02-4d91-8974-c6c689c0af5a", + "6485c99a-bc70-4257-942a-35003b245a7e", + "6dff98fe-5f9c-4849-81b4-b7b4d3538e85", + "c3485d9f-5e92-423a-b901-f64815b1b958", + "6eeb5082-77d3-45b1-a1d8-9c689365a611", + "2cf04fc3-471a-4bd7-a15f-b0c388c5be41", + "e872ceae-88fb-4914-b711-5e417fc3fb17", + "c80d1746-3fa9-4151-ba28-c9b6f5ce06e0", + "9b20c8cd-2ddf-48d2-8490-589e4f88e3ba", + "0b08a83f-af95-4d05-95f2-077dd0632aec", + "6a1e1a34-7f42-4863-8f5d-89128465d149", + "b6c3ff3c-f056-41aa-92f9-f409ecdfbc00", + "ec0a83e6-192a-4739-b20c-7125d23d7393", + "9b60a75e-1e99-401a-83cc-ea95eb30b47e", + "19c93e76-027d-4d5c-9f28-e4baa483b660", + "96267248-2cd7-43b2-b1cf-81a31fd1ed11", + "96c8b452-ae77-4827-8365-bd5e0ce68f4a", + "4754953a-43db-4a13-a276-e63e1459c8f0", + "bc813815-59d1-4ec7-afd5-74dd805099f2", + "6a650705-512c-4d4e-bd44-b2bc14df9293", + "b8f9efc2-f1f0-4e2d-8dfa-aa486ef83a1b", + "178bd8a2-6d11-4966-883c-a822da02b69f", + "e1a832a6-2dcf-4fa9-a931-dcd806fbe416", + "665f9a34-89fd-494d-8c79-be882128fe5a", + "f24dacbb-7166-461a-a298-be71a00b4986", + "2b24fe19-c1b4-46e0-b71f-9a003b1c0728", + "171bcbaa-3b8f-43f3-8bd3-119dd159803a", + "726869e0-5f5b-4a68-9b00-8ab832a38b23", + "bc8185cd-1fdd-479d-9dd4-785bc5d6336a", + "473055a4-46e3-4e7e-bf06-11739a04f37e", + "2ea79779-504f-4747-adca-f6e2acfdd964", + "fe149954-71fe-4d81-a6c8-d01734cf78c9", + "2c60dc93-67fa-426b-95e1-b0bcc1e6acf0", + "8d389d35-744d-4a10-8896-777246ea8e32", + "05fa4d8b-f0e1-4a22-8ebb-b80224cf0565", + "91cc7d33-8e91-4c24-a5dd-8d0483819d32", + "16ec23ab-2e2f-44b1-ad91-22ffdc21108b", + "a2c2cd7d-4adc-4fe5-9e93-d9d76a00c247", + "7a38c326-0d6b-4ee3-955b-db7522a9b529", + "b8f739be-b348-4db3-b11c-555346550d89", + "6220490e-6520-44e8-a90f-99a1bc1b8dc7", + "10b1ba0e-9184-45e9-874b-82493ec487fb", + "302e9af0-bb94-41fc-8608-40320c188ce1", + "904b6fb9-b8e2-4ee2-9635-ab806af64af4", + "fd777d59-59e7-4f75-ac3b-a921ba5db5fb", + "3bbe6b4e-8e51-48cf-9dff-543c8656271a", + "e2194762-ee43-48da-af5c-d35fdb13b100", + "b978afc1-3847-4369-a545-c8b6f9636289", + "c066d6fe-e070-4c4b-88db-46e626341beb", + "37252c7c-f34a-4237-b91c-003122ee6d02", + "f3c12097-64ff-4574-95e0-299be1e33d2c", + "1aa7d9c0-598d-473e-bb45-dc1995fffaa3", + "75d36a6a-83f3-4a9f-8258-c041b382133a", + "554d3553-e687-43a5-be8a-ce6471f8dba0", + "8bfec9c1-264e-4789-9f7b-91394cb863d3", + "caeda205-9082-46ab-9deb-b22dab4c47e1", + "c6cc4137-fd5b-453d-bc3a-b39f3f6a240f", + "d215f28f-90a8-40e8-a1df-f4f936362fb9", + "86619459-52fb-485d-939f-31aa50a2d932", + "363aa74e-ac7e-43b8-8e95-a0b93afc154f", + "8e1a98e1-a4b0-43f2-bc4b-efb6a926a43a", + "310eb8c9-2995-48bf-9d42-c1e114262461", + "59a0db53-c028-42b9-ae81-bef3f3f1c251", + "db31253e-44ab-4f8b-8982-8e5737e28991", + "6fd204fb-ea90-4d25-add8-f239bc7edc1b", + "72891ace-cc31-4f4a-8af7-61fff36ccd34", + "a754f201-226f-4019-aec9-6a9a08c0c9c7", + "00dfb1da-18d4-40c1-bdbd-cbeb06d782a7", + "1f9717c3-75fb-4d3c-8012-191a69515f38", + "c107d46d-7f69-4249-9cc3-daaf195f6b68", + "a9018cb6-a0f3-4f96-90f0-0f1ce68d2967", + "3b36c713-450b-45b6-9196-745b453621e1", + "aab7d992-f1c4-4a5f-a41a-68fc7f3cd215", + "e8ad8e92-4f89-4b2e-9552-eea27e61d936", + "6dfd4a54-61c6-4ad2-bc47-0f69350f44c0", + "45dc56ef-48b8-459b-9b85-1260719bba62", + "5f248577-7b32-4aef-8ad1-f0117b7ef120", + "9052dca7-6317-498b-87d8-2d867a13522b", + "22b493d8-242e-4497-9524-0f89cf5a1cdd", + "1d1fbc16-ba1f-4c40-9a40-b72abaa9fff9", + "1d732947-e6f4-4330-9bb4-4fd50665e3f6", + "46cc9fd0-ae2d-40a7-a305-5304b1640d82", + "50b89c53-03e8-49cb-95b4-073c450a2695", + "66d2158d-2e8e-4e66-89b4-3470d87ecfd3", + "ee6c90b4-22a9-491b-b229-b4a8faa6186c", + "5c2f22fe-3b4c-456f-a7ff-ada7ee00a4b0", + "6decd800-eda8-424e-bac9-adcc118d2a89", + "7935b05e-c4f5-4976-9aa4-1d1d445855c3", + "660d318f-f26c-4b41-b980-2c4c65ec7a92", + "aba060a9-af40-4ed8-bd2c-ec7358a7a276", + "c68f1b0c-6d80-49c8-a239-8488bf4d9d5c", + "aa423556-1e9b-41f9-9864-5dcc1d386e9a", + "43e98c3d-c56f-4ed9-9f6f-31dc95cd3a80", + "d88f5943-0701-4b0a-839f-c384c9b30b4c", + "51c5fbb4-d51c-445d-9dda-640d3a770743", + "7f202c48-8843-4bc8-a049-01949843acca", + "1f787efc-924b-4bee-8b9b-eef0a0b24d85", + "8fcf52ad-c2e4-470b-9f67-ed11d871dca4", + "8f6c1aab-4857-4a7e-9e67-97cb0b00e5bb", + "afe7c288-5a1d-451b-ac47-77a12d6685d6", + "4598a0a3-29c5-4956-ab05-14191ab69d9f", + "58048754-c667-436d-826a-599d8cd9841b", + "77c502f1-f987-43c7-9184-4a1b0afe1f46", + "8fa57fae-aab2-4c39-9567-f7ee9d4b1db2", + "d716599d-2bba-46fe-96cf-96374ac00fcd", + "049b2872-0c33-484a-88fd-15b33f85e2fa", + "bf8368b3-f4c3-4d98-83e3-e977a9afd0eb", + "b66ef1d1-92cd-4e56-ab43-6c99e961b0a3", + "fb26d708-2b67-48f6-b6ab-f93be2edc4d6", + "79f26d7a-09bc-4219-ae63-c30627dc8bac", + "9b55a536-a056-44ed-a90f-62da454983ab", + "8868a7cd-36b2-4446-a83d-551a9fe74a38", + "89f4c8d1-d065-44bb-85ba-ab24a8bd21f9", + "5ba19e9c-71dc-4f1f-a009-3d16086c67c9", + "f5f0ba81-6d62-4498-9803-5f8acd2ed37d", + "a4393618-80cd-4c50-8f76-65b0df39aa74", + "32839b52-d616-46c8-b1e5-de45e13ffb8b", + "59e62b41-413c-4c2a-81b5-b7e0f46d93ae", + "c52525bc-5818-49a8-91ae-ca58a0b417b5", + "21768c0b-271c-4337-9a89-10b8cd9071a3", + "5271c9c2-1552-4e79-be7e-b97e689eaa40", + "30519f34-2e8f-4523-a124-760b4554233a", + "0d5571df-c709-4137-a6cb-aca3cd6fbed7", + "be3c3e3e-0ed0-40aa-858e-6e10e4bddc39", + "74a6ba02-9b97-436a-a862-a9c126266462", + "60e56659-661d-45d6-93e3-28b2a16e1e29", + "92d18c33-5fe6-4a3b-a7a7-675aaa661a49", + "804c44a1-5a78-4876-8f97-02da9b2cc36a", + "c69febf4-7686-44e5-bbfe-b3664af8ed4e", + "66f7097a-2f96-467f-b4cc-06a7b995d6f3", + "4dc4ab60-6b94-43a1-86a4-1aa856356736", + "56c9e362-bb96-451a-afe6-aab09b15016e", + "ebc8266b-05bb-48af-9c91-29df7dbedbfe", + "858b44c3-b031-4b24-99e0-18f6f2f04c9e", + "88c080a4-acc7-4152-b311-7b3acb87265a", + "a9e489e9-bae0-4a79-9db6-820a222810dc", + "02e6a845-04ce-488a-b83b-1a69869890c2", + "5c8c9a06-5cfd-4f92-a0ea-b827ffaa649d", + "520afa30-41c9-4f68-8a8b-abb61544b922", + "a014a59d-d7e4-4ce6-8fa8-abfe319e9fc3", + "8b98032a-fefa-4249-b718-37b273c5dfef", + "f1ccdcb3-ed06-42ee-979e-311376d3eaf8", + "16b061ef-1fbc-40b7-936c-584c52366d3a", + "3f4d6dd3-c3a4-4828-ae54-b208952aa9d4", + "e7ce03bb-d986-4326-ab18-b9b92f3ab790", + "765e4e87-c76e-4405-ae71-2ab01d4b53c4", + "f778c421-eef6-4ba1-9e71-878a09787500", + "3581e24c-b3fc-4d47-b9d5-cb36f48d649a", + "8f53a0e6-59db-4254-8be4-c6b199987d41", + "90befaef-697e-45b2-a45d-a654fa835541", + "fc6f4f25-ac2b-45e7-ab25-a4628c32f2d4", + "ead596ed-396c-4901-8d3f-0501686aeadc", + "fbdd9ecc-b5ba-4b90-9797-20752013480b", + "39088300-d78d-4a67-8b1c-5abe13cf1ac8", + "73575333-b861-4b55-a6d4-85ca97f7c98f", + "d8e2f864-5712-4a98-b35c-48ad0af01cdc", + "1dbe4bd5-7bc5-4aa0-9202-2df4723543d3", + "68490961-e34c-4dd5-9008-343b60230e35", + "1c8d1e42-c935-4843-9020-afbb1a1097a8", + "31769a68-88ab-47d2-b5cb-0a8069f6e8c2", + "0f51653a-1016-447f-a30c-8063137a1415", + "c3da8b05-7067-4cbf-b078-9a3fe8990f7b", + "25734045-24e8-4fc1-ad6e-97168f2d6238", + "5dcaa41f-412c-4560-bfee-350f71a67a0f", + "885a0b95-70f0-4193-aefd-9be2d36dc308", + "7ec9396a-781f-46d3-b1bf-d230d7699978", + "4c315727-2638-45c7-8500-13e0adfe135f", + "709bf983-0e65-42e2-97a7-611d001eb5ae", + "581e3e58-f11e-4aaf-8906-2d3c0aa06e93", + "872f7f07-3668-43c5-9fda-5cd2cc4dd967", + "f981a5aa-f53d-4a73-ac2c-c834520d4378", + "c15d0e52-44a7-45ed-9ed7-9054657966a2", + "c5a92059-9a20-4ca2-90d6-cd8a374d702a", + "aa02ce63-2933-4d53-b453-82b2cda42840", + "986942b4-3a42-4785-adff-e5e33a284073", + "cfc6b18b-f08b-4895-b18a-aa773c347ca4", + "1980ad96-41da-4a39-bfc9-4abe0e591cd9", + "54601236-dff7-4a93-a5ca-0347ac8f0512", + "dbc0d9f9-b2da-4525-9322-cedcd5843e8b", + "a7097d80-16b1-494b-8868-ec2cd9a5b5d7", + "5b8d48d3-3494-4112-b06e-d43f82d83b07", + "c8f9f8a9-3eaf-48d4-8173-0758a638709d", + "72192970-4fc9-49f0-9ca5-72805d05e8a6", + "b4ae655b-6c32-46a4-83d5-427a8d5ad426", + "af35b9ea-cfb9-4214-804d-69ccc2669e3a", + "5bb97771-ba69-410d-8d8a-cd3df3bf1b15", + "c945b842-2fc0-45cf-82ad-f4273d60e370", + "7a46dbfd-693b-444a-bea8-6c925d8ad587", + "1a473170-9596-4ff1-9758-be479ad4819e", + "27b9dc8a-216c-44e0-bbcd-1d17d339bef8", + "70cf948b-bfdd-4267-a784-40105865e050", + "15b8c253-a759-4fe7-85ab-60c6a9e47954", + "ec92bbf5-ea3f-4782-85b0-f12d7cf1c839", + "57693a4d-ebf7-4c16-adf2-062b900323f7", + "33976ed4-9e93-4ebd-89ad-62aaa8156a81", + "afb9abd4-bde0-41fd-9e9c-df1ea49e9008", + "de43081a-a90e-4f36-a542-1419177435d0", + "b0593900-1d57-46f7-890b-7840f353f5cc", + "1047af99-01be-4570-9c61-1b77880df2b3", + "1c0bf0d4-a936-4be1-b3cf-da48bc56f13a", + "62b49681-dff5-4f6a-9351-d92874ad7cd6", + "aaefc544-1e5a-406d-ad97-c85cd2c4c6fd", + "014fac68-9e9d-4721-ab60-febbbc8fc9e7", + "a10547e6-bf87-44b6-a4e2-0f8892dbee8d", + "322ef58e-31ef-4396-b4af-9267598a4a73", + "bdf2b7d1-4f03-4648-80ab-2829c65e7515", + "e294487a-dc06-408c-9af6-e82dfddc6619", + "4f6d89fe-a6d0-425d-80fa-91b35885f61e", + "eebbca36-f032-4370-9d79-30fff6889a45", + "1ec376d9-aa57-423c-aedb-b1a41a8ddf08", + "bcf95a4c-21e8-4e92-867c-54dc43c43d1e", + "bc8dbbdb-3f90-489e-82b3-1964d9d28409", + "5faa91ef-44e7-464a-8164-67a270126dc8", + "21095074-24bc-4388-9a2f-9f2087eb8f67", + "68e19029-09e3-4a29-8cb8-42af87562d00", + "4adb3236-38f5-4bb6-81e9-2f96cd96a35c", + "47412bf6-0c38-4f8e-9294-0ea659931cbc", + "26f5207d-efd6-401b-aba5-926a5f3d0d3c", + "dd2d3b76-4566-45ce-af92-534ac2381959", + "32b448cb-f530-4ae6-91c5-c8ad2496eafc", + "d78c6756-16b4-472c-992a-47bfebae4118", + "d1f44913-c032-4d8f-9ed9-82009d7828d1", + "faa81a51-2137-4761-805d-e9b7724fb5b1", + "6c129d5b-8513-4734-b814-db6b2f4518ae", + "92f98a47-634e-4b47-b483-e1ccc435f55d", + "dc6f43ab-1e14-4135-b960-19e84dbdb168", + "8cae7ef6-6fcd-407c-a1f4-be54a8fb3611", + "c135bf7d-6cc9-43cd-8c5d-c4c3c1ae8ec0", + "1fed5395-804c-41c1-8ef9-eda01c22e10a", + "06b46bb6-1032-4adc-b789-0fe928468d96", + "88dd2dd0-e3f2-4c28-b7ec-b02477dd5f05", + "b0075cb0-f0bd-4cce-9797-ff21981670a3", + "2048907d-ff7e-470b-a4f7-40964d7a1818", + "460fef12-25be-4284-a0c0-8a8ca1b6c574", + "4fbd76fd-7e82-4088-a4ec-bf6ae33c1b24", + "1d668352-7ee0-4e51-962e-717814e8610c", + "fc3e41a4-cfcc-4593-b95b-875f067300f5", + "56c7478d-7614-47cd-9557-e2dabc581ba3", + "0388e93c-aa21-4a95-a614-745d26e83274", + "85aa1e4c-53f0-450e-b743-e340f09ffb9d", + "e654ec70-50d8-44fc-88e8-fc51af51fb25", + "3fe52d7b-09f7-49d3-b21c-f8c5a040e07e", + "5e115500-f1f7-4774-a3be-886ffaa3ccda", + "e4190ec9-3397-4303-81e5-3cb0f33bdd11", + "02d92a54-ebde-4f3a-8e86-5ff744f5cb57", + "3b6cf22b-5402-4aa4-a87b-52147a930800", + "c3d62970-87d5-49b5-a72e-500398bd4529", + "5e65121c-aa6e-4058-b5e9-c9f69a06fdb9", + "cb4e67f3-b333-4831-81b8-52f5f6c56f88", + "71906aae-1ec2-4680-b23c-6ace494cd910", + "8694a847-5137-459a-95b1-e82a2c3512d1", + "c4aa5c14-0b15-42d7-a386-364a6e409c3a", + "b9dffb84-1894-4969-80cc-97813078654a", + "3a3fc420-ae78-44b7-8242-5f22ed56a0b9", + "43b1378b-9bbf-4f1e-8147-15c96746472e", + "75c6bcfa-783f-417d-907d-5f97fe9c0213", + "828b760c-abcd-4767-8158-1f0f8edb0bcb", + "1756d55f-604f-430b-ab09-3386a89bc6e0", + "517cff1a-bcc0-462a-930c-69fdd3b1bf08", + "61b35159-8d99-44fe-93df-a6fda1db4300", + "5983a4e6-01a1-4bbb-9756-5cb872e510ce", + "1e79c3f9-c15d-49d5-8e29-0a5f397be062", + "e731da3c-8658-4169-8cf7-2392b6176bc1", + "e105bffc-94f5-44d2-a395-2cf0e186d9f2", + "8284558a-fc34-4c42-91b5-26e79a8f36b2", + "db284842-0135-4baf-900e-03ef5b279f94", + "7f5b57ec-6816-46ab-9126-ea5447651ac3", + "e5174b8d-65c5-42e8-b8a7-ec09fd22206f", + "56eefdbd-3b6e-4b5d-95b4-a749a2109692", + "bf2f2010-279e-4704-954d-6e895f64ec2a", + "6056675d-11ac-4225-95c3-99621c9dac71", + "9b10fb49-b75c-41b0-a9dd-3614f1a82508", + "ff1fc2d3-3810-4e48-8203-01fe50b6d598", + "2d30ded0-a1b1-4a59-b2d0-071f105461e9", + "3e41ca37-7671-4f6b-a4e0-f2410b43e5d4", + "8a78e76c-f114-4c2e-9436-49861ec0d83f", + "88b263df-d704-4661-9dde-7fd67cc17d1e", + "e7a902cb-efb1-4bcc-a51a-93afb6a59bc1", + "c70a09d4-f255-4b2b-b7c8-e0617e336e76", + "7d73a9c6-2232-40c3-9856-6dbc2ba9ca38", + "3c69dec4-eb46-4efa-b8d8-f63bb30d2487", + "9e754886-7c28-43d3-a08a-a3bc5ee901a3", + "7514030b-5615-4bb3-826c-ba9c32aebe59", + "2c1dfd36-f273-47e1-bb0e-1eea02028bba", + "b1c6032f-48a4-4c14-8c96-143795880eac", + "7bc66706-47b3-401b-8377-2c34463a13d5", + "fb9bd131-2e1a-4785-9e0a-8fe0f2929bd4", + "42430bf7-a809-49ac-9abd-2afa3a3a3b13", + "e12a7df8-52f8-4155-8acf-3f4cfeb5e1bd", + "8b161951-80b7-444b-a67b-a7ace27b71cd", + "0b474ea1-5af2-47e8-8c48-b2fb0a8207ec", + "fc73b78f-4dc3-49d6-a086-0293a49e43f9", + "bea317ae-5649-469e-816e-a38f3585dbd0", + "fbb3b0fa-fb69-4d0e-ba14-e4dcaeed385e", + "6fe1871d-f37f-4e2a-92c7-659bdb03d6be", + "ec6a34f7-9bcf-451a-95aa-6778cb3a7c37", + "27b3d586-671e-4c37-84fe-a75858f52599", + "c0d560fb-3184-44b4-9524-708d6243848c", + "b6650ef7-944d-4361-85cf-2d7665d24391", + "82e986f2-dd4c-4cc9-b543-21c9c990cc81", + "697af90b-b233-4b31-ba5f-2944e40ac807", + "c519017b-2ad0-4dfa-a4c3-cbeeb290748d", + "60fbb721-e529-4b55-8495-34be7a2ce3d8", + "324299cc-0841-4273-ba9d-bd1c194e89f2", + "0bb812c0-ed78-49bc-ab56-1891ace247c9", + "42a3be3b-5c38-4ccb-b42b-9cbb7495f871", + "99d07bb8-c462-420d-a774-88b4cbb28aee", + "164ad154-abb0-4225-bdc4-8a5f5f28bdff", + "d7e11abf-d9ff-4775-85e6-08e5405f3a92", + "69fa58fb-5a5c-47c5-9c14-9843a1408c90", + "b5ace4d7-39a3-4977-80a1-9b936e3bcd02", + "27b47af1-6fcc-4680-9bb4-de45d5c618bc", + "9807aa13-0499-462e-abc4-ff07b581d077", + "971c359c-a7a9-4e32-8f0c-8ad1cf3efe9a", + "5f2fb69b-1ca5-4711-a776-bf76f81fa596", + "301f594a-2bb4-4f87-869e-6b2b799cb4fe", + "92df7d52-4422-4f94-87d7-3eb537922053", + "07fc68be-26c4-4c34-904e-28d04b93b047", + "9400323b-e3e8-4fb6-8fec-5476688436b6", + "56dd0739-7d3c-4081-a305-c5e548f8ae47", + "43d90676-29c6-4d5d-8de0-dee5e5e18c82", + "c4d813e1-be27-457d-b068-ca6981553ff1", + "16808efd-a4c0-4702-ac01-16d570658a24", + "970bd367-79c1-4870-bf3d-192c2bc7681f", + "61f7252c-c2d1-496a-b462-1b9cdd2d1a5a", + "cd22afc8-75b4-4596-ab4d-f5cae3f7f893", + "059c06e4-ba7d-48a2-ae4b-a7387b26a4e6", + "a5e37208-d3a8-4ba7-8820-c3c01ff806f9", + "da064108-f4ea-45df-a4b3-e5f497f108d7", + "29c8a538-ab93-47a0-8278-4b3770fdaa9c", + "0e480b22-63a6-4aad-97fe-a86b60141555", + "727eb715-9766-493a-8263-d516dafb2fe0", + "07f457c1-0af7-4431-b831-f96f63360066", + "402e4ce8-b1ee-48db-9570-b6bbbf4d3f89", + "5739f2b7-2267-4d03-a922-af84559a75a2", + "ce95fc19-326e-4da4-8a43-3c76500758a4", + "a1c0196a-9725-4da0-8e54-e58cf05ca000", + "d9cc60d7-126f-4804-99df-29be3bcaaeaf", + "e9e611f6-e9fe-46d3-aafd-323160ab8074", + "988ca70c-5107-4af0-9c6e-1640b3927d7e", + "a1040055-ccd5-43ab-b30b-2ba2c16ba62f", + "d90f250e-7cb4-4c0c-894f-835a0b4f27cc", + "19da72f3-e3bb-416e-b7ce-5abd145a174d", + "7f13156f-8441-4702-8146-906bbd7c8c8b", + "acb281a6-2918-4255-9009-d7db8314d91f", + "8a8bb8ed-7c4c-492c-bcc8-f5865a160c5b", + "ddec58a8-f282-4fa0-ae07-96bc71550102", + "711fc1ff-5f42-4ede-abe0-ef74fdc4a9f0", + "e5e649a3-ab26-400d-985a-213e49ecb383", + "04448d9f-552c-454e-995a-3cdbc3d16792", + "76b8eaad-3754-4afc-a027-718793e9d953", + "cd3ab708-5bf4-4d60-8352-17c26b538a3d", + "321b4517-cdc9-4bae-a5b4-2fe9e34debd4", + "47e4aadd-f0be-4204-b581-2db67a045ba2", + "b07fe222-4a40-461b-9b93-bbce1d008964", + "69bcc302-ae3c-44c0-bc96-221d3efff1ef", + "9f81696b-78d1-4cf5-a275-079ff74a257d", + "b3d2bd0f-861a-43a2-8ccd-6a534a26396e", + "9a8b1b4a-d5e0-4ced-8ceb-d27fd3778422", + "5662ba40-0dea-42b9-9b72-1aa1f488412b", + "62e944e7-e3df-4327-bbe0-c121d37d88f4", + "6818a0b6-26ff-4efd-accf-42efa3f2ddf4", + "0377203a-155c-436c-852d-b3c19ddcf5a7", + "394358ad-c4b3-4328-8b9c-a8dd03211960", + "75068610-4261-4448-8d73-ac3e1fffa540", + "f973c80b-ea93-4248-8013-bd6401c2b9c8", + "f135205c-3e73-44f4-a678-ef0ee3a2b732", + "fc51956f-0079-4eb2-891f-a704ea554514", + "e49f967a-192d-476a-a678-a6d991da9dd0", + "df436954-b60e-479e-ab15-32d9e6f98c7a", + "901d0943-958e-414f-bb7b-9a72d6dfcdeb", + "2d4d72c6-8887-4044-b25c-d1ff68a433aa", + "cb18052e-4db4-4f6e-9e71-2b3d1037e3d9", + "cac4d934-916f-4ac5-a86d-4c68472bf90b", + "6645eecb-9f61-4447-a992-01bca2a5d42e", + "482a91f8-4d9e-47f6-8ffd-5e3b7d7a0f69", + "1d82e8b6-83f0-4d99-9c2f-975324df0695", + "fbee6bec-7be4-4380-b0ac-9e08272ae95e", + "6a6c28a0-2832-476a-93a6-97885db75834", + "5aa65d99-7ba1-48af-96fb-2ffbb03be3a9", + "811fcac9-f801-4caa-bdc1-77cd553e01fc", + "c54eeffd-68f7-4ec7-bd8f-13be3f097eaa", + "759ff98c-e888-480c-9f26-abec2a4d09b4", + "61e1bc04-310b-4e83-9b67-916968ba955b", + "db561887-a46d-4699-9e00-07f01a0e249a", + "e7055baf-256d-4df3-a289-f51aec4bcb32", + "78c0694f-5df9-40dc-a5fb-7a0c88158e99", + "1c805136-4f25-4d07-9a98-4b11b844bc4e", + "95a63ad4-3a5a-4131-bac5-591803ea5b98", + "aca36f4c-d42f-4f25-8275-14d6cf8c0e05", + "c44ab277-a990-44fa-be42-9770df33a5aa", + "afa49295-938b-4cde-9f99-2d4c7567b3e8", + "62a9ee4b-8d96-451d-9c92-3c612309d06f", + "e34bf2e5-50cd-4e12-be40-7c01695fee7d", + "6915e2c1-16ff-43ba-9310-4753be0bf6f9", + "5ae4e068-a12c-47c4-ba55-e63d1811fb9d", + "661ccd23-0bd6-4178-9afc-35e3b1b7b508", + "774dcff4-8d8c-4479-b220-2b4e31b452ed", + "aba6541f-e5f0-4b21-b86e-8af44738a7f1", + "a6e710ec-66dd-4e84-b3d8-de3cdc74a5f9", + "f5bc834c-dd12-4651-95f8-a01b2600b2f8", + "f2f9a651-ec49-41d0-91aa-6392e081415e", + "294a2152-9e18-4521-a764-f1f65d9134a9", + "d6053c8a-8b3a-4f13-82e1-40a97032480c", + "03e3fa3b-bd6d-45d3-94d6-3cfed08b679a", + "550b2dc0-9697-43d0-8e2f-0dd508dbdd8f", + "8276284c-bb35-4ab6-9ae2-fb5ca9f74019", + "ab68f4d6-ba65-44fc-accd-2451cf0072f5", + "56512960-9937-4183-9085-12c88c9911ca", + "9193965c-b1ad-4046-90fc-c40cf793c873", + "f50732ee-fc78-4319-8fdd-d7797273fae0", + "1387c9f8-534e-42bf-8215-ec248dad5926", + "35543f48-b8dc-402b-9ce7-55965bfda94f", + "95b547b5-c122-4c87-9344-7925422edf1f", + "1a62da0b-bb24-442c-9a7c-860ffdab54e0", + "98ac8e43-8477-40c4-8da3-f434bed63e8f", + "455f7b02-dd6f-4851-8ba6-45c918b1d57d", + "5a4648dd-3b36-4971-8260-b3b0993ae79c", + "c38313ee-ec67-4886-a9dc-5ed5a4e0f9f8", + "2b4047fc-1d77-4d9e-8aaa-cd836ae51d4e", + "31e156b2-0ed6-4d4b-b03c-ecc46625bc6f", + "1702d1d2-41c8-4c31-9f38-148dde884337", + "25f9ae4e-1350-4d65-93d0-88853e8856a6", + "14ec5938-7e4d-496b-9a3a-12f8651412e3", + "7c98591f-d258-4aa1-9fae-dca309e41414", + "c894c733-0a61-4a68-9535-1ab2221c8f6b", + "91c73a0b-c7b3-4855-970d-8e619611586b", + "19c1b916-29e0-4590-a23d-7a429afc7b45", + "3e8acb62-4d7e-4c5f-ac8f-6b1ec0e5e6f3", + "5eab7fd4-2a95-414b-bc09-2baa925e0d69", + "8f6a4244-6530-40e0-9858-b91fe1146d43", + "3d6f226c-1980-4dbe-afd9-8fe9b0498904", + "5efc0b76-0a44-4d0e-87bf-bfc64c1d9b2a", + "9c72a9d9-8265-48a0-b0fd-69db6d7ef039", + "3c503259-ed8f-485c-a1dc-c4c80be9dc9e", + "098df64f-13de-49bc-bb31-689ad146e2f3", + "d18dd16e-ab82-4ca3-9cf3-cd2da1257d83", + "ac311e4f-27cb-452e-86fe-8f59c4c20a74", + "fe970a60-cffc-4b22-9c75-e78320bc6432", + "cfcd52a3-5cd5-44b9-8fd4-4ff590099e17", + "c38d671d-ee2c-4ae8-9064-4fa0401fb321", + "95af39e2-7f1e-4061-bd24-07c5f71ed85d", + "73b0ba05-8491-4d4c-a799-689d8291250c", + "a7f84f48-6bf9-49de-8615-183b3baf2ee1", + "ea4b4a2e-5378-48d4-b66a-4f98f0c922b7", + "ff2102e6-6b24-41a6-bc11-867b8d3f1aeb", + "e88f83ce-c51f-4990-92ab-a46a757a7f57", + "28fe7111-b292-42be-be31-c71ff0d1b0dd", + "309e55da-a44c-4371-a591-b8e7a8180006", + "11ddc7c3-b569-44ea-93a7-ce22e8fc07ad", + "b392b595-f161-44dc-a3ff-41899f9f1bb1", + "0288ab83-52ef-4272-abef-d721dc186fcc", + "bf080e1e-9fd5-45ae-a4c6-7f4f92e68ad0", + "2212c5c8-4ab5-425c-b313-667e86bd97af", + "7cec4b8a-6df8-483f-a24e-9f331e38ed7d", + "da2c07be-010b-4c6f-93ff-9ef65af85818", + "91129e85-f0a2-45d4-b003-efc3258cc0de", + "7e3c08da-b73a-454a-a12e-dd633a90ba7f", + "7b9b17d4-3c86-4e7f-921f-a1271381a45c", + "db63b3c5-543c-4f98-9ea9-aa39053e5c19", + "c0eb6132-4f44-4296-9274-91626738371d", + "bf6fa836-2c94-4c7e-9a11-b5d7a05a45a1", + "a52eb093-9723-4525-964e-b55e59a47347", + "bc16538e-b0d2-4ce2-bf57-bedc4bcc43cc", + "a6fa20d3-6025-4123-8727-579c2052df62", + "265432c5-8e99-4884-9ee1-0c2b8479cc4f", + "60ffe295-e57c-4b69-b700-8c11812b8e0f", + "3b01e182-c29f-4ed1-8443-33e44431376f", + "070adf6a-91cc-4599-9611-39858a965fdd", + "ed322189-800e-482a-ad2a-5e6600c951b9", + "202e083c-2f4e-4d5a-8edd-b7ab9425bae6", + "f1e3b160-b0e2-4266-9077-18bf9e717420", + "cf5d7303-92a4-40b8-9dc7-d80cb68938f7", + "ff837b70-f6c6-43c1-9444-234f68f5fada", + "33f21b30-aae2-4ef0-9049-f65b7ce030b3", + "75f89d25-e92b-4cec-bdb3-4147be18d0e3", + "e4498ad0-a7fe-455d-8cbd-5a3a23ee7b58", + "4ab75314-00b8-4f34-bd58-09246a4c5d54", + "b1c9f0ee-1f15-42f0-acb7-ace2819d7878", + "994e6a9f-4485-41f7-8707-b881bc5eb244", + "e2335685-7827-46da-b4fe-fe44b80a1029", + "156985f3-092e-419b-a324-673b56ac4cf7", + "a5c14012-3178-468d-8664-0ab1e6bf4946", + "1213243b-3d1c-485f-9e89-f8dfdf027343", + "86f311d8-14e9-4c0e-99f4-dcf1b2cf0896", + "9001f61d-016e-4c44-98cc-45713df527d4", + "fd452307-f06b-4c0d-9c8d-e3bc10d016e3", + "1aee0ec2-9563-416c-b0b0-4bb1293aad73", + "7de7e4ad-d09b-4b7d-bc7d-61efe8b3d2c4", + "30e83455-a5b8-412f-88c3-27abf94f2ab7", + "481a7642-9e5e-4ac6-b2a1-b984f6ff824f", + "55353e2d-00d4-4944-8c59-31f96aef6911", + "26e667bd-3ffa-4d8c-b01a-39d9803e4f00", + "11d9d897-90cd-4a50-a2bc-fceafe8fc21e", + "8bba0cb2-6a16-45ee-904a-23734ce0a4e4", + "33e56d17-4c07-4570-9a27-439122b85268", + "899e8248-ded1-4ad5-8fb4-f2c09e31741c", + "de2eeea0-b9e6-421c-8653-d529ce89d125", + "7bd6b906-d64a-44ef-9c85-fc300c2ce28b", + "c96203ab-d703-4aa5-a6a6-b69c5f47898f", + "fee538b9-56d3-4346-8d37-7b5cbb17aaa5", + "14d02dd8-3086-4568-9457-d3844582beae", + "7fa63e9d-8262-4788-a486-b932323c406f", + "aedef1b6-5ca0-4dde-bfbf-4ea95e61f68e", + "2f27e9f4-c1ea-4e7b-a536-89ee57c13f50", + "be0b660e-9cdc-4fd3-9b22-91f9285f41f7", + "e868da4d-6096-42af-947a-2126a13fe25a", + "67fa367d-870f-4f9f-9287-22c051f649bc", + "f591e2f5-dbdf-40a5-8191-481ea53c8baa", + "5f5ffc75-c3e4-4b0d-a6c9-ff993242ce45", + "7dc135d9-076a-4295-993b-49f607b184e3", + "9ea7c425-ef7f-409f-a8ed-951a01af96b6", + "2ec9b107-c0e5-4f9f-9188-2b4298e35249", + "fbdc87ea-3c72-4346-99c9-f3c7120bcec9", + "ee1ce3c9-ca50-4ee5-b422-395c8def4b2e", + "85069b09-9570-415d-81cc-214db533af99", + "364ee35d-9ace-4311-a6c1-816bfeb0ad31", + "81c1cfda-e095-4878-822a-f48a2c949aef", + "0ba20109-635c-4be3-a7d1-99fe22af40ed", + "8b064e25-0416-44b3-a995-97bd7fc4d21a", + "e4e9e606-62f3-4676-af85-a6f38673b3e9", + "ce8c164e-236d-44d8-a69d-3227a2913076", + "01c378f7-9805-4a1f-b3a3-8a9d05fcf37b", + "56121476-5ab4-4676-a1b6-cce7b0758539", + "b98bc6a4-e6a9-4f73-a424-d19f72814670", + "badfcfac-a0c9-4f3b-9f0d-8a79d1117fd9", + "2130bf3d-c4ba-4c79-bb1c-75fb3eaf9121", + "3f6a22aa-4f12-48e7-aa1e-ccbc16be743b", + "b72b1948-c511-40a4-b5b5-466e9777d2c1", + "cb057874-b9a4-4ae9-ac94-23e5fde437ff", + "178eaf54-ac08-4a83-bbed-1aa0c1294a11", + "1e5d6af4-1af9-4705-b4e1-d583d582d721", + "fb9d5048-7078-48f4-9a30-cbdc9b82246c", + "9971bf8e-324e-47ec-a1cf-a3a00b5e6a2d", + "ba6eb4ee-fe33-4e15-a3a3-95f5c93bba01", + "7010d08b-fcb0-4bfc-8cd4-02c873e3d65b", + "5ae19a85-5cee-4c0a-a586-7178021d8225", + "9a8a971d-fa12-4aff-8089-1d4422d7f9a2", + "e2c9e189-dce3-4b21-8710-c85604b178f3", + "cea957fe-6533-47c7-9faf-580be8783f03", + "d98e4da2-89f1-481c-ba48-99ef7eb182be", + "e489eb9a-436d-4250-bb2a-f988508b35df", + "95b0ed7e-0a6e-4648-91e4-491111f8ed92", + "3c3f47c3-c0de-41a7-b90a-64292887a8ab", + "bbbceb11-3d25-4647-a93e-5c9ced0929a9", + "a1d1c5e7-1f09-409a-981c-d396e03dc86a", + "a0a99f45-6a00-4660-9765-21efae0fc424", + "16cbf6cd-7e9c-49e9-9bc6-f475f35a1ee1", + "df897688-51d7-40a9-a5a4-e6dcf9cc9d74", + "0a0ace18-f772-41b0-9b28-543707e97c40", + "99da342b-2d81-4c28-b072-c67f328bb976", + "55d3efe9-945e-40c4-99e7-e5aeb0657e5e", + "6ba70482-c7fe-4d90-9737-e010785154bf", + "74175d53-5072-4c91-b0f7-c7d1dca3cdc2", + "ea1a7442-fbe8-478a-8e33-f5dc83305a8e", + "154a8475-bf62-43bc-b064-5d4bfac2594a", + "b49e8323-5c36-4d47-a719-0f60d7f1818d", + "61cd9b49-cd4a-4e6a-b1ca-b530d5fb6914", + "0fe0a6f7-2568-4abb-921e-484cb228c2c7", + "7becf14e-b191-451c-b666-6e4667f6fff7", + "8f6f6265-a942-478a-b923-29007e56a29c", + "a39cfa86-42ba-4409-9775-7d100e17e549", + "eff29983-ba84-4615-8370-2b160ac35cd9", + "f31597fd-b5c5-4311-9bc5-d7295ad8c5cd", + "d65f953d-1429-4676-8997-dd1755a18c69", + "7e553a5e-14e5-4ee2-b28f-aa83df8dc286", + "6b99b14b-2b7c-4ae5-a895-4d28c0ed4203", + "a1fd11d0-8779-4a92-9944-a49c85ae14c9", + "6ddbf371-80bc-4917-b1da-feaa0c5f7f40", + "ecced50f-0362-46fa-92c6-55139f5224f5", + "f57202ce-ea3b-40c5-9a96-2f9c69c7611c", + "35e22317-83a5-428e-8611-9266c193694e", + "3c140582-23ef-4131-99bf-ca95c472db37", + "1242aa07-2ec1-4c13-8674-c2d0197bae1a", + "cc00caf5-5434-46b2-9cfe-a5593f174201", + "c2734086-ee82-4468-bd05-46dab640a79d", + "cd3a551d-fb7d-4392-9356-72cb8a9c09f4", + "22bb1090-8203-409d-bfef-d1ca0d937c18", + "20d92525-0b3d-4f0b-a18c-3157bf687699", + "d625abb1-bd2a-4246-8070-7c200168de0f", + "81e345b4-cf99-4fdb-a1d6-e106d92fc0f6", + "5285f8e1-a3b1-4ae6-ac73-ee7ec17e5a35", + "ce2d77c8-10d5-4180-b099-d932fa83e769", + "ee91715a-8947-42f5-b512-3c531f7498fd", + "d712c2f5-ce4e-495b-af3d-856246c94ebe", + "5d2b90c3-9ffb-4c9c-8707-2f770a2ae17c", + "7ef4f6e4-052c-4da1-a9a3-4937487deb7e", + "f592d7b8-fabe-4b78-8e3f-c4ae22f1f7ec", + "248ec979-e2f3-4586-aa27-1398d06d3327", + "0ddf31b5-144a-4493-b5fb-c905309a1e75", + "28f556e9-f78f-4c5e-9b36-c08433512409", + "f2eacff2-61e9-437d-ac7d-619ff0a621f7", + "bbec7220-7738-4ab3-9f6a-475d7d06cff1", + "4d695bf4-6929-469a-9bbe-45d20be38246", + "c9869335-1652-43ed-8704-6ab144595fc2", + "e0ba115c-788e-46b3-b7b0-b622ae2a147a", + "49b88807-1e37-4d8f-aa2f-3327a2e45974", + "65189622-12a4-4655-adc0-202d4c59d1a0", + "3daec6fc-788b-49c1-a534-adf82b4942e5", + "726a4143-6464-4638-93f3-74022d3e3d94", + "bd632c9c-b173-46c4-acd4-e6e4e6cfa720", + "12c5538c-4461-4ab8-acbe-74f2d489861f", + "cc5ddba8-724a-4f94-9aaf-e0c5b9b24b4f", + "93435aea-940d-4e30-858e-386c793f848a", + "14413776-7f1a-4b6c-87a4-865cc492036f", + "b9c7e007-862a-4a4d-8294-f8dbe1b63c9e", + "104209c1-edb5-47c9-8499-c427dddee458", + "89d93c19-5599-403a-bf83-7883422c756a", + "86c37132-1be7-498f-a66e-8a875d89ef6e", + "65d9b7db-550c-40fa-8202-422b2ea7742a", + "0af37f43-5c5f-4dbd-9a75-5ebcbdd2ba70", + "793c3d85-efd2-4eeb-8334-a60fcc40648b", + "88bcefb3-9ebb-416c-b5b8-27afcac0a75d", + "b36bd431-226f-41c0-baf4-41551a4874f9", + "e7e0efc9-a5a5-4c21-a7c3-6dbdcf1765ec", + "986c6cec-39a9-40bc-8689-cf73643cf11a", + "a0be5872-f8e9-4e8b-b241-be71b805f43d", + "04530ead-3e04-4f77-99a0-ad9848730f0d", + "87f7ba0a-e7c8-4b57-bd2d-5a79b4c6f9fe", + "971e12ee-168c-48cc-a3ea-8411681cf074", + "bd47a555-9148-4054-96ad-13312d7a0f7c", + "5a2d1ee9-9d57-465c-9a37-9a83a5e33cfb", + "fe15f6de-5c17-4a2c-8e5a-19ddaa02af37", + "65ad5933-457c-40b6-9bd5-fcc4afb02fc9", + "cc14f898-c516-449f-b8a7-3620077e2167", + "c1c4e61a-0050-4f19-ac1a-87903f2cdf3a", + "016ebc47-e675-463e-938f-d50517cd25bc", + "db5f3460-c9a4-468a-88b4-e14f2716d452", + "22ecc415-db14-42f5-be62-875d4de8dad8", + "1e282ac4-efdb-4dcb-8af1-f5c5c6308b87", + "6a7b9f47-e063-4437-94aa-dd83e81afc94", + "58b36f9b-9c5b-44e8-bd4e-11dbbc2f3c46", + "fdbb2bae-dd99-4d5c-9098-1884a11ea0cc", + "01731ec1-1bc6-4c52-b1e0-dbfdcf987609", + "3477bb5a-999a-45d1-aa27-d7598f85c89c", + "06040e9f-da19-4ed6-8d03-a30228daaff2", + "aa335299-b539-4726-9812-7946254e265e", + "bda3ef1c-9146-4af5-8f06-1ca39f768c15", + "5e102ca8-9ecd-4a0c-b1a1-6845c0fdb4bd", + "b82dfd25-3d72-4c5b-8bcb-086df4688049", + "361a1d44-aaee-4ab2-9cba-fe3975e08b18", + "9845c929-34de-48ce-8e07-a913ac65483c", + "e2d4544d-8e08-4912-91aa-78ad08449b14", + "61c80c08-b4af-48f7-a388-1ae81d8295ef", + "ade4ee5b-3ade-4bcf-8318-6deac09872e1", + "a37fbc05-d8e3-4f72-9426-550ff4b2df8a", + "8b199bff-be2c-4da2-a47f-3c6e4931eb43", + "cc5d21a1-3b61-4bf1-bc4a-3186dfb0bd51", + "e1c5c28c-1a7b-4c79-a266-13c44765d2af", + "dda727f8-39d4-42a3-b96f-57bae700f8b3", + "e44cf62d-d5a4-4a08-9056-e3d41f2a47ab", + "1a5aac6f-7472-40e3-a1d1-f5a3f26b37c3", + "37626739-c60a-447f-93e7-faa5bcd80c2d", + "3ba664a7-7fc7-43af-bc34-c2d83f4b9bc6", + "f546b943-6f24-449f-9858-9671324331d0", + "3db02d3e-83fa-4de6-a9e5-e9b0d4c1bfc8", + "e71097dd-99ae-49fb-ba4c-3509151889fa", + "0f51b627-fdb4-437e-9779-34915f55988f", + "9990556d-710f-4dd4-9d50-920ec0a9ce54", + "09b5d4d4-da2b-41d3-9cde-c758817bf717", + "81892797-c75a-4c81-8f7d-896a380bb51f", + "10617df0-a931-4bc4-adfd-51716cb8d19c", + "f790bff2-8756-4734-b69f-2e80d529e1dc", + "2714fd73-436e-4d8c-8f14-de0d8f68fe9e", + "81c6b1df-a1af-408d-87a6-9ad868ff195d", + "ef4c799f-3617-42ae-a6c1-a1e9df32d490", + "cdb1decf-afe2-4e8c-b4eb-4ce8f532f60c", + "5033c170-cceb-49f2-9ca7-d1b893c78178", + "b99737a0-8d86-4576-89d7-3cff6e8cfcc2", + "1173f083-aed4-40b2-a545-ba24e2025f03", + "8f93113b-d1d2-48cb-8004-420b1ac533f7", + "4f17458f-1cdc-49db-adf2-7cb7df374a18", + "217b9b1d-7c9c-4cea-a562-7896f8c45b67", + "f8927cc3-b3b8-46a5-b406-b238ab2283f1", + "6ec8f545-4180-4028-be30-6d9162fddceb", + "1681636e-4269-4b9f-b5f1-a86ae831c216", + "2bc0b663-dfef-4608-ac4c-e7a05d069ece", + "5f45faa4-cb90-4845-88a0-47b226b3488e", + "91b0a745-c6cb-4840-81b8-518aada52e50", + "dce646ff-1347-4e41-80ef-fb3dca7b43d2", + "2a1b84f5-ed63-41ac-9f17-ceedb8aa2531", + "413d6ba9-8685-4220-a7e7-6cdcbfe8ad00", + "f47bfa1c-5515-4e9c-8990-b0fa659e930b", + "460cd85d-844a-47d9-9f19-d6c190b59f6c", + "6a615fb6-898d-4d83-b77c-56a6fdcd7b07", + "3bb9392a-20c1-4eb9-a813-39d0c212e5c6", + "a26c13ae-ed00-42eb-aad4-1958e95eb45a", + "bfa25743-e2f1-4508-bf9c-b377dfce1980", + "15b20cee-a830-4501-9ae5-5ba1090c7452", + "468f9467-ed59-4bee-bf40-004438f0e62c", + "84ccb1db-2339-46bf-a0a3-3693a5680b07", + "1d794a83-c276-4307-b1ca-17b9b23d396b", + "8f4fcfdb-9525-4e89-9775-16ed47b13e75", + "5b6b75b2-ba14-44db-93f0-6388e3a11bec", + "40012426-4c69-4337-902e-56ee27ab8bdf", + "4173e4d6-c8ab-428f-a8c0-57ff245799f5", + "a2123dc4-46be-472e-a6c9-74a8a974da2d", + "475295e7-666f-4648-bc70-91396fef453a", + "4f1dd8ca-0d02-4550-9f18-9ee42d7f5b41", + "7ef5601d-60a4-4587-abb0-3502faf576c7", + "8d297e70-700c-495f-bebe-adf70903f1ba", + "28d5008c-d5bd-4774-b588-a0d99423a2dd", + "4381ecdd-f95c-4855-967b-9037fe7a2a09", + "ad6ecba8-9dbe-4c31-94f1-8fd8c9dd928f", + "72b2bad8-5db2-4d48-88c6-ba224a95dcac", + "4cd7607b-84b7-421c-a470-5dc5dfa62899", + "503119f7-89cb-4009-af7f-2716c0dd2768", + "97bd4ca6-7c49-471d-b93a-41d457e17e4d", + "8e66c455-049c-47af-90c7-6e51e7e7f9a5", + "5b364ea5-6cdd-4f23-a633-9d9ff9fcc459", + "6471dfed-e08f-4d7a-89e9-ef5028b4e244", + "6f7f36a6-b2e5-4e13-b159-9060db6d9658", + "2ee45240-4c3f-49f1-b925-dfb0e5cc035b", + "93d3b4bb-6ecb-4a71-bf6e-c11de4fe6405", + "04e1ff59-2f6e-4a6d-bb1d-e21b6f902844", + "707f7cc1-77a3-46f0-9adf-8af5ba10d303", + "357f093e-cd1a-4f08-9508-cf2e12b71515", + "5be5d3f3-0c07-47db-b7d7-e24eab4bf11f", + "0ca6bb53-5569-4690-9304-5bbba596f11e", + "d2456eb5-6d26-4b8c-be89-08bc2050568f", + "8d95940f-9683-43ee-a766-4bbf4a9e32f4", + "782055bf-f122-439d-8f53-06904b6dfe5a", + "634d3376-b690-4902-8961-dbd9583988c8", + "e81851ec-30b7-433a-9dea-a0af7a2f2fae", + "1a56e6d3-ad31-49a7-b7d6-21ea87dc822f", + "dd541fb9-a7d1-46f8-8017-4a8b1dda542e", + "4720722a-309d-4615-b8a0-d9d29da4846e", + "8564bc1a-fc2b-49ef-b48c-3c9d9da67ccc", + "35664e24-3947-4408-8bc8-ecccb75c00ae", + "76faaa5c-312b-4405-886b-09b1c6ebbf0c", + "b559e1e4-b80d-426f-8d8f-b6eabd980f8a", + "ad460d71-5aeb-4ab2-a3d2-caacec2ab967", + "063bc977-e3e4-4264-89d7-8313b61466c6", + "8bc1ed71-892d-4b88-ba96-3c339b14d464", + "ed130301-7a77-464e-b194-f33375c5b1ae", + "aec4893a-4d6e-4354-8ae5-5af19ab4f04e", + "ef01355d-4187-4f00-a851-b17aad62b0b4", + "95fe428d-79fe-4b77-b18f-f29a975cd715", + "d6f33648-2230-472f-873a-d11c113f6124", + "23eca350-060d-4d98-adee-de1990b9cc8b", + "2dccdbbe-eab9-4617-8353-44f89a234593", + "a0525d2c-fe27-4c86-9352-19a0efe7a69a", + "3dff9f70-6b09-4e9b-8f3d-e733d6d40d25", + "0fe37a11-f8de-4df4-8f36-7136f85e71d0", + "f1c4a284-dd00-4466-baed-8b4574bcd503", + "b4f3d724-78cf-405d-86e0-68cfa4da1cde", + "7ef07b56-d75b-4446-8234-32c4363ac99b", + "e55b1965-affe-4770-a488-d7fd43ad6385", + "361311af-e66f-47ce-b845-c6b50c38d24f", + "92182535-5540-46eb-8ce8-3a9a5ef9b9ed", + "8bb7b070-a36c-4ce5-bde3-c81cde5bfe11", + "c3738bba-75f4-4d0b-9799-e4dafa33633d", + "6ed32a4c-987c-444c-8c70-4abe385b507c", + "1d94311c-5f55-40f6-8c58-8da4c6b571bf", + "05397641-65c2-466a-a0eb-e56d7813f83d", + "578376d2-43d3-4182-b81e-c8264d8faa3c", + "ad25b7e5-8351-4d3c-9cd5-cc1b9e6a4860", + "2e436015-53f6-4eb8-bef5-0f9a084d446b", + "a43e7472-2c5e-450f-8df9-443277c634ca", + "c14d2863-be67-4992-a568-eff91489d553", + "d54cc780-1b9c-4536-8be9-e40db15c97a9", + "2d687099-545a-41c8-83de-14399a7195d4", + "cc33c7cc-2199-40ba-a41e-4b3daa5dccaf", + "fd97a450-7eb5-4df3-8b6d-4a424f3fe43b", + "ace77bf6-2584-444b-835b-090924c16b28", + "815efbd2-5f2c-492b-9a9e-776e7d2cecbc", + "73896c08-369a-48c2-aec0-5c8348eb437d", + "307464ec-bd5d-4c8b-ac7a-58e230e61ec1", + "b9641283-6ee9-444f-bddb-79896b36ff79", + "a05aef7a-a940-490b-bd26-6bb9e54bfba2", + "5b2471e8-b01e-4e2b-bca2-12926aeb8284", + "3e4b2566-4a5b-4c67-b49a-a5a0f896c6c5", + "81ab2134-265d-434f-9720-d0627e768893", + "60a532e2-197a-4578-acce-5b92aa121c1c", + "4e5d24b0-1c28-4f7d-84cc-a0700c765138", + "91bc813c-31f4-4ada-8bf1-7830db8da03c", + "a18a3e63-aaf6-4e4e-b625-380ab7e23186", + "1a04e0ce-32b1-4f9b-84a2-74d439287f3c", + "37da6de8-5133-4715-b19d-7c8c62652b9b", + "777b2db9-5700-4aef-b59a-f5c9adf281b5", + "34762842-06ee-4a04-8b0d-e694a72d66fd", + "a587f401-e31d-4c73-b18a-e2a18ee97edf", + "93f076af-aed8-46b9-b29d-76458b117ff9", + "f2d3304c-89e6-45fa-9194-5f5614c39a80", + "ff1f0132-44af-44ff-98a2-55b2d983550b", + "7ce2f90f-48d7-4183-80ce-0907483b2745", + "fc1a279f-8402-4068-b9c0-88c5a0f096fc", + "51825a98-af23-4b42-aa09-b1441178d2ca", + "7f8414cf-e268-48b6-b9a7-8300dff5f594", + "d60dbd40-d35c-4de6-ad4f-310b5f7b5769", + "23bbd846-0c58-414b-b968-c301c6ef2e51", + "6b1fbabd-3a37-4140-a3ad-44401a93cd6d", + "7f1f1277-eecc-49d5-80d1-dd861498921c", + "5e38ac08-5897-465d-bfc0-89bdaf239667", + "6347d6c4-18c5-443b-8804-2481e80f83c0", + "bdb76390-3e40-4614-b6bf-ec24a824e15c", + "e8fa2223-d646-4059-93de-544617b53fcb", + "0152f746-61df-4e18-9218-854a2b85101e", + "89e8c997-ba93-4b52-8976-b6be4fba732e", + "271a6bf4-5985-4e1a-8e5d-63d15a964c8c", + "c430b4e5-2fc3-4b8a-a4d3-257cecae99ae", + "db47e04d-7399-428b-8a13-225a78048c7d", + "dcdf5f65-f034-4ebd-918a-d00e0c7af33e", + "86c4c620-2d88-482e-a87d-2c9dd363d506", + "4832c59f-f4e5-4b3d-b99d-7ab3780e814b", + "e8934389-e9e3-488a-9b70-407e05fca233", + "d099109b-ba87-489f-b165-a259821eee53", + "2b54134f-bf6e-498d-b1b2-2d550b5f5442", + "c0927789-fa1b-4a1f-ab75-430ffffa8f8e", + "b94d9cbc-83ee-4afd-ac26-fbd7554630fd", + "a8eb6012-089f-4d5f-ab80-0129c8edc01a", + "3976ef98-83c6-45f7-8959-1c91f731f74b", + "df7f5892-0d3d-4527-b420-5bc549dfa604", + "4f7984ed-17f4-4adf-878c-7cbb65579b1e", + "c0194f69-1f52-4d0b-aafb-5684ac301c1b", + "40b3b4de-9bd8-4ed6-a572-2838a1082cd1", + "21e5a347-493c-4da7-918c-46f940702e3e", + "bc9ea921-9fca-40ae-b8c8-48458d43b233", + "88e6f5db-2fbd-4e90-9688-6952ea297698", + "4aee95c7-7caf-4f0d-8c21-88239197cc86", + "2d47d31c-173f-4d2b-bab5-2a69e8a86c70", + "84eba708-1197-4a26-b9b7-46fc9966cdc5", + "2859c052-448f-4e3d-8c77-164a49d15d50", + "c60a46a9-c3f0-4df5-a9a8-16825d5dcbe3", + "0cac3611-6756-4a7c-b5be-3eceec1a36f5", + "d7858ff9-3e83-424d-8794-9070cf721693", + "59a78356-616f-4432-a75b-8a6a6a8891d6", + "44776c4e-128f-49ad-a267-61fd8b8be346", + "a3b88288-e4c8-428f-8924-fbda9d6b5e1e", + "2c4506fc-6f4d-497a-bee0-ffbfb80a4826", + "685f55fa-597e-4804-bb27-6b1e75c112c0", + "f6342fd6-c151-4333-9392-6742e5a72655", + "a5d0621f-60bc-4513-93eb-839e3022ecb0", + "ce283a46-6b7a-494e-b63d-cd6940ffbb35", + "1b31dc5f-0e73-49ba-807f-0f5cb4c168ea", + "6f593978-c3d3-48c7-8e0b-1f028c1ddf74", + "ebca33a4-15b4-4cc7-8a7b-303e18126fc8", + "622a2c4b-ac31-4676-8648-ae80277bc7e6", + "19ec23fc-1bdb-403d-b6de-f70bc6769684", + "28579139-c6d5-42e2-a3c1-ae1b346eea09", + "abc91a44-04e4-4de2-ac17-920c1cd59e1c", + "18256be2-a627-442d-80d6-0be9d1d80610", + "b182e185-a8e6-430f-a186-1752f769055f", + "f38cbd2f-9a1d-420f-aad2-eb04ec285ea1", + "a930b68d-b9cb-488e-8384-a2ebbd900b24", + "f17b9caa-7a42-474a-ae32-c7b4aa7b336f", + "10bac215-2727-4552-90bb-c80a63df9ae9", + "a3e03029-7002-4807-8366-b69321f1cf5b", + "4b89067d-4278-4ecd-8f08-87fdf7e4b5c1", + "aebcf577-bbee-4852-8ed7-bc2720b2f2d8", + "5e343b83-509a-475a-991e-bbbd220f8840", + "10ada54e-43e2-44dc-963c-7b806c07562f", + "5d05677d-42e4-4c20-913d-909f59abbf85", + "5616e2f6-7596-4214-b3b7-0a099029cf10", + "bd02a110-7a43-4235-9533-5c53af8f85b1", + "488304d2-c6fd-4d3e-a823-390a6a55615f", + "deeae5e4-9fe5-4903-a182-a7e95fd4c4db", + "0a790f56-239f-4e4e-849a-7644743dbe61", + "ed5d45c3-216f-431c-b392-f357f76f2060", + "42795abd-4c0a-42ec-97cc-c122621dfc97", + "4dfaa848-4ccc-4f82-9bd9-4be909e09b71", + "6eebd3b2-2ec1-4f62-8b00-25d90eeec1f5", + "d6e46d4e-c442-40e6-9629-43eb8816d3a9", + "1c8fd3d7-0212-4d47-8dca-19b6fd94c5cd", + "9d573930-b9dc-4862-9f68-eb7e0cc0424e", + "62bac275-335b-47f3-9ec2-a39c63464ebd", + "e12ed3ba-0ebe-4490-b6b7-01c41fa76437", + "e7935489-2ddf-4564-8abe-7e049e2d2470", + "19781dad-94c7-43bf-b6c2-bcabeeb53035", + "11181cbb-0be2-4039-b198-12faa4525467", + "4102d2ad-fc25-464e-bded-bfbc83f567cc", + "5ef7c7f2-2257-4e0a-b0f8-8b9fdd20466e", + "98a1c21c-3238-4f37-b659-b38c0f966fef", + "f471c291-a562-4a44-a410-ad8881b4c730", + "eaef7b42-c9bb-4ae1-8b43-8b22d1b60ad4", + "d7a99bb9-ab3a-4f2a-8aae-d17e6abf69f0", + "f03ba8f7-ad92-4462-b693-c5b3d1eb5404", + "f26571f7-f385-477d-ab82-ba03580fb0b7", + "07725d17-4c11-47cd-a08c-9a9bd2da59c5", + "fb52fc15-e698-4cec-91a7-aa3918bebf87", + "25fe3778-1fab-42db-8f8f-7a5c3c7ad181", + "e9304bf3-8b7b-41a3-b935-8ce2d6e841c5", + "fbe62b0e-cdc0-4af0-bea4-71836357b4e8", + "5fc26692-f48f-485d-a660-339edb2cc823", + "84267f8f-b07e-494f-899f-15ece1dddfc1", + "68672eea-d9e5-42f9-a74d-534d81a769b7", + "75d8ec82-30c5-4760-b74e-b8f479c9bc9c", + "581ff3b1-dc5c-4a60-86a4-848d6de30f66", + "a62ba0fb-f1c8-4698-af7a-04e0ea84bd51", + "f0d937a8-60a0-4fad-bed1-f0af906092a6", + "398d6804-cc01-4191-a3c9-3d0916365e86", + "999201ff-47b3-4669-8fe6-8c4563d33fab", + "a13b587a-0ea0-478c-9c6d-65c05ac3efab", + "3ef7d6e8-c00f-4dac-bbf0-345c9b166fdc", + "bad6706f-ea17-432b-854b-7cc23d079bfe", + "ccab3a5d-c012-4dd0-8233-680f4946ba49", + "76379fdd-7ecd-4639-ada4-d0a48a8bd290", + "12c22398-0167-4350-b242-d0db4b69db9d", + "9425a975-388b-45dd-922e-f261b30372a1", + "88c317bf-c7bc-4d7a-a6be-71c7a6ce7214", + "90508480-7df0-450f-bb1e-80462661cd7b", + "3366d3e7-4953-4071-a381-7c92f295e73a", + "05cb44ec-e18a-45b2-a5bd-b37cea5d5044", + "cd50448c-2629-44a0-8a4f-648d9b5c1ba9", + "fad15200-842a-413f-947a-65629ec1c0f2", + "223678a1-161d-4433-947e-78c3e91643e4", + "7904037d-47a7-4b1c-bfd6-6567b8fdc78e", + "214ab4b4-de47-48f6-878b-0f48766ec69a", + "c9a78717-1659-42f3-9e7c-3a0ab344a2b7", + "bf629e96-fde0-4b49-9856-47acac31c006", + "a467a932-4f53-4a5d-83f3-05275ebc118f", + "8c52e741-d18b-4c79-bfd1-b2d1d70d7131", + "89e66678-0003-4f2c-ad99-02218aafe75a", + "5245b74d-2eca-4cdd-b371-5b9bb745b19e", + "f3dc6b63-dc61-46fb-9a81-758a2fa11d12", + "9b264e32-3aae-4906-8c65-e59288c9dc02", + "04d332a7-776a-4334-83c1-c83cf8ba02a1", + "2bf5e486-a2f1-4292-a4d2-1cf0a7f6fbb4", + "c6d7a707-89a2-42bf-bc3e-a819eb96e4f7", + "89a77614-8bce-4cea-b0fb-ba9014c40f95", + "6d6c5b04-f20c-4944-8a4d-c7006499a2fc", + "49a43be2-c6af-4c04-9313-89743442d4ac", + "1a3c6105-3713-4490-ab2a-e99626e18265", + "1bf62b84-d2bd-4bed-9caf-e743cbf5ca4b", + "28b8668f-47fd-4b37-b88e-a5e295b3ba69", + "e4acd8d9-e189-4cbb-8229-7215e4edbcd3", + "20416b6b-a26f-4709-94c0-06fe7ed1bfc1", + "28dc9d07-6588-479b-8e8b-3a2f0b50e5d6", + "a91926c5-037a-4f2d-9c4a-4fdfa05a5b24", + "24261a37-f071-4c44-9ca2-5d97ffff85ae", + "bc0153d6-5168-4f29-b711-8013a1349f3a", + "43d2c541-7de5-4668-9934-de649ba9d768", + "5500e040-a248-4e4a-af41-0ad4212f50b2", + "1833dc92-cce8-4052-9684-c8f92a95d34e", + "cac2d9c2-a777-4b34-9e0a-bd25957a8ac5", + "c6b50a94-f72e-4171-9516-7578ceaa4e55", + "f99730bf-6b8d-47f3-a6b5-3ddd75585618", + "0400fcdc-9764-4985-bd1b-6b985449a392", + "2a9c0d7d-0526-437d-b2fd-b46bb38a3af4", + "6060a1ff-fbf7-4c66-8dc1-12c1133c242e", + "c17438eb-a416-4717-9fa6-5d3af2fe59f7", + "4d5e6c8f-a5e0-44fa-b63f-b397dd919502", + "0240e8e8-4afb-46c4-90b9-85bcafb53ed6", + "501cf5e4-4d30-40b9-a45e-419928c51003", + "a8900b71-fa54-4e13-8905-5e41c0c5ac76", + "38d5e157-6479-4843-9e9d-70bd73cf6d90", + "6d5e3007-3878-4e58-9fe2-bf86b7bd79e3", + "b04ab495-28c4-450c-b9fe-7247dfdfa87a", + "190a9fbf-085e-4a7e-9260-e24e27622248", + "dd693c34-78f7-47c2-a22c-feffeb7da401", + "997c5c18-b5aa-4559-9290-aacee8671f28", + "ee6c3b5d-8585-4193-8c34-54ca2be04159", + "ba0b4541-5a5a-4b4a-8ad7-234cbb7d018c", + "9d8547b6-f5dd-4348-9d43-7666a01a1ad0", + "b39e2951-c0d0-4eea-b488-b6425cce2bdd", + "88a0f0f0-0ba7-44d3-abde-101d63620c4d", + "2d07d5f6-49d1-41a4-855b-82875b7cedb3", + "e932944d-f68f-4a16-9c6d-63c406080428", + "87ca82f9-563c-41d1-99fa-c65637ead500", + "9774e474-5fe3-4c30-874d-f060f67813ed", + "474b4bf4-b62e-450c-abaa-1e2314af89b5", + "ea4915f1-967e-40b3-88a9-61e8ab90e809", + "322cd848-f04a-43f7-af48-791080537c11", + "05b79e81-d99b-4fc9-bf33-87392bc9ecf8", + "7d98544b-b0bb-4c4a-94a0-5623661adcb7", + "6be342fa-82ae-4017-8c6d-292b38e4bb2b", + "c5cf0c8a-21f3-474e-8965-fbd275d7be5f", + "4de09339-50d5-420b-a155-f14dea2d905a", + "6b58279d-a9a1-45d5-86f6-9c461424057f", + "f5991e34-bba9-487d-90d1-00ea79818eed", + "c58ac7f4-08ea-451e-bffa-c6cfce996a29", + "06b336ab-df55-479c-b188-d088e7ddc270", + "4a15756e-96ce-47d5-8924-378ff74ddda9", + "56515fad-97cb-4930-9300-563e663887a2", + "07e4c7a2-6ebc-4ef7-96c3-de3f7409c7c2", + "27648305-0030-430f-8586-0c5856d77b17", + "6e46f459-eb47-4e09-b325-82553a1e2279", + "b5ada1f9-16eb-4ef7-8ad8-d015ac7801a9", + "d8477f73-55ae-4a02-8983-ef8cc75e618c", + "f1df5ea9-d143-4941-9898-2f262bffc4bb", + "cda24c1e-7cca-4113-b429-01c302ca657b", + "7b32a720-e098-4eeb-bd8e-17691ad6f799", + "8229d30c-4168-447f-a31f-17d29bac3ae2", + "f99e034f-c9ff-410d-8a0d-6f6eef86e930", + "eab90516-0b5e-4893-8738-99ea7a591f30", + "5f93ad33-e798-4ad5-96b9-8ac5a811ade9", + "f884facf-81f8-4d2c-8f0c-3ce1099d8239", + "04d45bda-34d6-483a-af34-ddc8259370f6", + "7084f99d-170e-4832-b0f6-563d36c1494e", + "e42678a7-94d9-4598-b00a-396eebf5aa48", + "6ef9486c-ffa0-4f11-a64a-9ca47a509e28", + "23697cd7-4b4e-417e-9537-dbe3e6380f64", + "ba741e3f-d369-4d5e-9c95-61076f3742e4", + "b71ce081-bffa-4567-8d5a-ec40fe08316f", + "761d50b2-7aaf-4552-b711-652a5809ddea", + "c528b8ab-ab5b-4e57-b5e5-a4087fcf8c64", + "013eae33-7c61-4459-ae3f-6c11de06d74a", + "a70ddf1f-dbfa-4fa1-bbc8-2ac3ab18b0c7", + "b42fc73a-87de-4c4e-86b6-2a286192bc85", + "4da890f1-f9ed-4323-bb0d-81d7184597b5", + "3fb3eb1b-a9a5-4002-828a-cbc90b7982d5", + "c24634db-160c-4502-9f13-9aeb99927c69", + "0eff2b09-1658-4c3e-8d3f-58a41c379ede", + "6d4fd019-7978-4792-880b-a255cfe42b6b", + "538ec3b1-c1e1-46ee-91cd-f7da0e48b5f7", + "f789f805-3202-4f7f-93b2-0e0739947347", + "72e5846f-3d42-40e4-bcf7-f6059a620187", + "7302951a-d626-4a7b-9208-8ab2840337c5", + "34a5119b-fa1a-487d-af0c-083708009b91", + "3ebf7f24-88f4-4d75-9864-bc597d5140a9", + "98d6a279-c538-4286-92a2-0809a8e8e66c", + "db51afde-0a57-4d26-af93-79eed794c69c", + "c7905b1f-5022-4881-a0b0-e110138c80c9", + "1d069a47-54ea-4e09-8cf4-16cc82f9b17f", + "de44de3a-eab5-4694-8e45-badc1d802050", + "5d44dc57-b27a-4e40-aaa9-dbc86db5ee4f", + "343afc86-5923-4f3f-9016-c30f8c35bea8", + "74a91ca0-f06d-4b9a-aa74-9b4a6d4b020e", + "82f9e99a-dfac-430e-9154-1379cf639605", + "6d7cc20f-e56e-4843-807d-889ffcd26881", + "8da733a9-a48c-46bf-8046-90ef8e33fcb5", + "61e42083-878c-4bbb-a2c9-2aba4e7be191", + "7cc62aac-a94a-4d62-ad3b-c7075fe792b0", + "c173d907-6260-4ec2-89ee-73a00f8795c3", + "3e0ff61d-f463-4b8e-84c1-651914a92970", + "cd923dc5-5ee5-40d7-9e69-fb9398340411", + "5874b23a-c286-40c5-866f-9e4dc1d1987d", + "a599c540-03a0-4d6c-a014-98002e8e178f", + "93facd96-e81a-4fdd-a6a5-aee04ed1b666", + "0bcd86b8-cf87-4ff7-a586-360f784a6185", + "918b1ecb-73ff-4477-9909-0d9a66d0a89e", + "9eb7457a-3574-4e6d-8d3a-e459cd58db2d", + "0800f130-2041-4d76-b98d-619caac2dbfc", + "83e7ca89-2f1a-4a10-af7b-0a28cba9206f", + "edebd754-6949-48d4-9882-cfa96221c7d2", + "ee5599db-a5f8-4e2f-b805-11a327f2ac3f", + "6dca3707-2550-41a0-929a-6c29e572cca2", + "71cc69e7-fe32-4a99-b738-6dfbd9742a56", + "c9a5f234-b4d0-46ca-9ae7-b1b9efca5aa2", + "7e14bb0c-0b50-428c-a812-4e79a44028f3", + "77148020-f9a3-441b-a08b-968e92f2613b", + "f738f5ac-2287-4545-bf20-aeb733a65a6f", + "5b8ef711-2293-4b8c-a84a-3c661c297887", + "be6cf848-015f-4f35-810a-662d3dbde254", + "5a0b12ac-3832-4f4d-b436-d65d0f314eb6", + "ebe4ec3f-0ce9-45e9-a15a-911edad74bca", + "983e738e-f06e-46e3-9244-fe65548b6e10", + "687542c6-879a-4958-9d0a-63582508f47a", + "0637b426-ac1e-4775-a070-266ca9c73d51", + "d193a609-86ed-4a7f-87c9-ab840912d6b4", + "92294ef9-63fb-461c-9013-5fb77f6230d5", + "710f390b-3e50-4c30-b682-86fa348c8310", + "f417d8e5-892c-463b-8168-5f57deaa8af5", + "e466df93-2971-4105-821e-1d583405afaf", + "03c2144b-a92b-40d0-a6ab-5423b839f516", + "e7f23650-64f1-4476-a22d-a6c3c81e6921", + "989b0d12-23fb-4f25-ad47-9b895e69b4d4", + "9f146436-ec77-4569-be2e-85805a438573", + "c94aa51a-31d1-432f-95c6-c69b98afb523", + "fbd24006-d894-4fee-be1a-bea9974cc5ab", + "4f155adb-c7b2-4181-b8c4-90d085577a8d", + "e3ca70fd-f594-4bc8-8952-5fd7095c3b99", + "18bf909d-33c6-4107-9c69-05a3f8d9871e", + "9a71c81f-b02b-40a6-b8a3-77cbc13b0c7f", + "e3630f42-b550-43c4-b75c-230eef739162", + "19686843-af9c-46fa-89a9-18bbe5ed6da0", + "7a800315-f8fe-461b-ab9d-17c8e0d74a81", + "2eb01197-705e-47f1-852e-fd3815bc4cfc", + "5c46ab18-4d6b-47d0-8dd6-d3bed18faadf", + "8bd953fb-ec23-4699-96f5-9422e5742245", + "5d7dc542-ebb6-49bc-8f7d-4fe14f9dbf94", + "1c393269-7db4-4c0d-bcbe-9eaa65b50bda", + "9ac6a483-74d4-4df8-ba11-579f1ad40783", + "63b602a9-704d-4bf7-9477-98100bc81cdf", + "8eaaa26b-0467-4d6a-9a06-f5f0487926c2", + "69543226-6314-47f1-b5f2-61c2c15bbf00", + "804a397a-d183-4275-a869-a7c1f4a0dbd7", + "83dbf129-067e-435d-88cb-4bb54e418e81", + "f91b6703-f92f-4201-88db-4978fa886c79", + "19c01f92-8c25-4454-a271-aa5641cf9488", + "7bbeaf24-566c-4862-af0a-591d5589c07d", + "4762276a-f682-45f9-ab7a-eeae7557d9c1", + "3b93832b-56b0-44fa-9fce-5699cbc96177", + "112fecbc-b6f0-4a29-9185-cf78732e738d", + "e031d2c0-21ec-40be-bf8f-2ce046898421", + "89676c6b-1e92-4202-9f8f-93ef1c2d9bdb", + "b5e2a955-79ff-4ff2-bfe7-64c6119ea752", + "9547b8d2-12a1-4341-9c3d-bb3e3c129bde", + "1ceb794f-f0eb-4368-93e8-7550fb7f2352", + "46cf4681-05ee-4ad5-b71e-8b8459b8d919", + "8bf436b6-35e8-42de-8cf7-2dc2e5c3089b", + "97cf3a3f-b5e7-49e1-9b68-007748fcb5fe", + "33e08159-e431-4ae8-a906-7c6bd1ee5e45", + "5ec20ca4-16a7-4877-90fc-970f41afa675", + "ad1cfb7c-dd75-46a6-9f17-7b21262f1eb6", + "3c37d739-c221-45e7-9040-69b3ba44f3bb", + "37f8a5b7-262e-419a-b0c7-bb83c359334d", + "d0fb35f0-feb7-4ff2-a171-cdc7212bd863", + "b1760b94-34ec-43eb-ae5b-71846e4efb16", + "c3a46255-fcf7-4bda-8568-f37c34578cb8", + "0b1894d8-a07a-434e-9c26-d73bc97ac825", + "ce7328de-63ec-4ed8-9418-88ac1a58248e", + "f0ea56c2-b4bf-4534-a92a-803c3fec70b5", + "9b848c89-dbf6-43a4-8d67-54c68d8767b8", + "ae77e88b-24f2-48d6-8e46-a1333c7a4303", + "9ceb00d6-b2ac-45e2-9dd7-7d0f68a23dc2", + "b6d9de54-45ac-4430-84d0-72d5b47ddf9b", + "59f12a21-d2c1-4584-bf09-d3b44e5e583c", + "a7ca2a4c-0a7a-4168-85bd-98108491db85", + "0f7472fe-49e8-4f1e-b5bb-733230a1ff6d", + "0febcb25-927f-4032-91ce-894cf942fcd9", + "5f573e63-6808-4693-8247-774ba08cd403", + "4b1ed080-cd95-476c-a50c-11a524c319fc", + "e281f990-13ee-4833-8c5d-64faec231f76", + "b3cdbf05-d219-4cef-a14a-97a56e5ac25a", + "e725e363-b51b-4d2f-8d15-521d7aa8330c", + "5196f8ee-1b62-4735-8fc0-a83d66c3473d", + "7a9b9026-c2e8-45c4-89f5-afda9689dba7", + "f88aef03-10fa-4203-80d3-5e10b09cf8af", + "d285967c-8770-4781-a044-19f6c075fb5a", + "85249ab3-63cc-4f91-bf36-2d8b40c2dfb7", + "a4223af3-0ff5-4676-b2fa-a18a0ff7c152", + "9f8cbfdd-ba6c-435f-9cd3-f7b073fe5447", + "b6e73c06-9a82-4cd2-95da-7f53f0ea31e0", + "29ba17df-9f7a-41d4-8d7a-b7530444c558", + "11f8aca9-da34-4e15-a59f-627a6ef5fdd7", + "0c140f39-d3e3-4676-b1f1-f65f02281e17", + "a9b3c930-7200-4cfc-9587-0916022864b4", + "a89fe334-0b7f-49cd-b2d8-c907ae450c4d", + "b5b7d3f9-44ca-4c05-a768-fe1b779d3114", + "7a420e51-d9fc-4428-823e-6ca38ffc4117", + "c1d1681e-99cb-419b-84f6-4740c06cff79", + "ca7f8dae-4b1e-453f-9307-80c91684e95d", + "6b172d3f-0e31-4fca-a033-f0d9666a71ad", + "a945d646-1ba0-4c6a-a852-081160c62cb4", + "1ac29ccf-bed8-495d-ad2a-8c4c011ec6b6", + "a9f8f01f-b8d0-4b4a-af7f-20e692fe9b1a", + "c7b1e098-3e4c-4d79-bac9-e7a4e88b572e", + "034770ab-5090-4ab5-a922-97856d5bcd83", + "ddbc0c52-a8cc-4cfc-9301-cd0498ec26d4", + "a228162b-fe80-4fc7-94e7-b034ec348136", + "dbd06115-dc4a-45d9-a34a-8d72855c1d3c", + "13d8f74c-ff12-4457-b356-dbeb079f6ac0", + "609ff374-c6be-438f-8ca6-8411f2f7e17a", + "c7c41173-ae31-4b07-94bd-138f1f3671c0", + "8f63b866-fb3c-4d7f-834b-3a84c220f5e3", + "432a3704-fb03-4e17-993a-3de4b6032bf1", + "3ef4afc4-3081-4e7f-be54-67a7d216745c", + "4753a5c3-4b6f-45aa-906d-bc18261a58c4", + "05b50ff1-c33d-4207-b51c-45da044e111f", + "76796921-4665-48ab-ba5c-786a39eb03e9", + "327260ee-784f-4b63-b218-15fa3fc3cca2", + "7b9377b1-4bb8-4a06-b51c-273bd1e0af2a", + "6f4ad13d-1ffd-4086-9431-855458b2366a", + "57cf4670-a94c-47a7-af68-4e9c61e88f15", + "7711c02f-8cdd-4a35-9b35-aeb7bdc1447c", + "799b5fd0-412b-4724-a4dc-15402ab91dfe", + "762612e0-bb25-4c85-949d-78d5d0f59d70", + "0f2c35d8-8ccb-4c1c-ac12-2d37cb024d11", + "02291ac0-d3a6-4724-b209-8d80f6db1839", + "8a1409f3-7012-470b-8074-867dd6919d1c", + "3bcedda2-4372-4ecf-820e-c65bcc7bb7e1", + "0045baa3-dce2-4ea7-bb5f-8329beb47399", + "a3fe9591-87bf-4974-851b-61b9618167cf", + "e63b0a86-7712-4b89-b3be-2ccb7afb9766", + "ffad57df-ec61-4a5d-bcd9-cf9edb544224", + "dd373001-e8a5-4e8e-8e5e-d6f26ae45e31", + "97a32378-fa06-40e1-a55f-db16a287e3d6", + "fefd792e-19a7-4c6c-87a4-276cb78b0a95", + "b0ccbf8b-129f-4a8b-811a-0c70a2898070", + "b4bf9a34-9e53-4dc0-a873-b8b9534ca651", + "ce6a60ba-91a5-46fa-a9f3-eea6c69f71d7", + "c9655338-8798-4bdc-a4f5-660ee0d09d14", + "837b778d-8734-4493-89bc-7a6840bcb91c", + "90341457-60c9-4c6a-96a7-0a400b6e0724", + "84fab343-4954-4f91-8a8a-ee1a34915941", + "12fa4c15-53f7-4c5c-a5eb-88c44ef2a536", + "3aa088ec-1910-442d-8972-09180586c2d6", + "e89600c7-b44e-4c47-a10d-d3ba594dda58", + "a8838ac1-dcbc-4191-8d8b-c24e2ef05e3e", + "737eecee-5216-4711-9a6e-5cf7f9c795d0", + "f18ce612-040c-4643-a292-73b88c1cceee", + "cd0d366d-4f81-4539-b36f-18a0121bbcbf", + "9216815c-7f88-4c2d-b67c-7420a57c00fa", + "004f9bb3-f559-4f99-9bef-596f28f42c33", + "e9f15de5-948a-49d0-83ce-8ca415cd1edb", + "c9515f46-72aa-4d63-b731-43c558afe3eb", + "de22ed6a-b7d5-4707-87d9-89bd9f406d0c", + "359ed35a-713c-4e34-84d9-0f799eeef167", + "f3021265-9dd7-4d8a-919d-60fd7f05b0f5", + "899bcedd-8338-4754-aa48-fce9498305de", + "2441c31f-d226-469b-9c8f-864c3cb42c24", + "3184bdba-eac6-4ad6-9bc0-5746589d4835", + "5d55a875-8640-4b4b-9bca-5ba3f79764c4", + "259dc27a-75ec-46c8-85a6-02c2a59e56cb", + "4cd5108f-fd91-4f50-b4ad-f91c81a6a8d8", + "d5a396d1-4567-4106-a439-320dab64d76b", + "2cb9a821-0f84-4131-9291-06134cd3ccd8", + "a86c1383-d9f1-4d34-8dd4-8d675267b9f6", + "5ece011b-cd63-4787-9889-4521a35c8a6b", + "2b52ec56-69c9-4cde-a271-175eb475678d", + "cbe98192-8579-4c66-9b0c-8d3fbaac9372", + "9116857d-0b8a-44f1-93c0-a751449c28cb", + "6fe5ec3b-8262-4069-b0f3-e8407fb9ef63", + "fdc7db87-1673-44ef-b397-013266905c17", + "e9394e0c-7609-4d92-a198-36b21f8e97a7", + "813a6d20-4541-434f-aecc-2a4b0b5c6c13", + "7b449fa3-34b4-4927-a69c-5e76258aa84b", + "0ab976a1-b780-42e0-a571-eb5cde086e8b", + "fe3bbda9-8963-436c-9dbb-e6914680d652", + "46af1e18-70f2-4914-baaf-7cb91965e4a8", + "d6bfa921-7403-4e35-a014-14a7fdd55ff6", + "0f8f6ca2-8a99-43b1-9530-3f94be7be9b1", + "97fec0cf-5b01-4b56-819c-34e7d321820e", + "f9e4a349-4f80-4010-b724-f5bc1dfff8de", + "b77750b7-9a2e-42d0-ab3d-9ae1205c9212", + "d6d34032-7a97-4a42-9811-fcd1b26fb62b", + "481f31a4-c9fb-44b0-8bc3-9e7033256e31", + "674348eb-71e8-4ebb-a77a-ae74f250e5e0", + "4e570a56-dda2-4ef9-ae5d-5f40e42e210a", + "92c1ef9f-8ec8-4a82-bc63-e3ad2946b450", + "3298fb6b-4fc6-446d-94b3-8d1fd4d96080", + "4ff9c628-1085-4340-b4b2-9ee127e5b4f4", + "042e8db4-0214-4560-b473-e89f74f9a46f", + "0427b289-edad-4895-b886-d122253ccc62", + "9657bf28-8491-42f8-bf3f-74199dadfe0d", + "8ea48f03-c9bb-4c7c-96e5-a7d40ee6cca4", + "cb550d26-25af-40f0-bf41-52699a4fe8bc", + "6fd30214-39d3-4b2f-a35f-e2d7dbeffda9", + "90ba02c2-1f05-40c8-ba98-64f0e7265501", + "e151b065-e37b-4aec-9856-745ffe713ca2", + "15682d82-8a7d-454c-aef3-41b24bf8ac07", + "d28db00c-1abf-478b-afc7-d54f37e9cafd", + "07667361-2964-42c1-8ccd-7f2aa39ff33c", + "3324a2b7-7025-4d8c-b8f0-2a87dc9aa49f", + "43825525-a51f-4719-a2d1-540bf7802666", + "379a380c-dc17-4e23-89d7-447a64725424", + "916ef4d6-4789-4c15-a974-e3f2c76f6b8c", + "eb16e73d-271b-44a9-8c35-693826dae2d9", + "88df3405-021a-4720-90f9-8904589b45ff", + "4b56530b-a00c-44c8-934e-34486c284fc8", + "408f2e0d-f796-48c2-9643-acba11fa7c70", + "9e67e2f5-f630-461d-9efc-d59773289608", + "ab68d126-af08-4bd8-982f-8476766604e8", + "b6114cd6-e5f2-460e-a741-6fe2f7b4dde4", + "a8be2d5e-a617-40bb-98cb-ee0b8618be1a", + "de07520f-7bb5-49e2-b9a7-44e62e4f508f", + "ae04d88b-5c6c-4612-a18f-e7e07474a970", + "7d343b4c-70bb-40c5-8913-362ed87eacdb", + "43f6a7e9-5bea-4062-8129-01c99840f990", + "fc3910c2-7df6-480f-a0c9-8668ac7830e8", + "eadee4a9-ee73-4dbe-8f9f-37fdcd50d0d7", + "42e50606-0522-42b6-b19c-8ff897640c72", + "cd3fcd38-100d-4c08-8e8f-d813bb28e3eb", + "adf94b30-d97b-4140-adc1-1d37bca74c74", + "cbd66bf9-f152-4d81-9ba9-43b797cb460f", + "5a45e913-0570-4cbf-83d2-7c50adbb725e", + "fde9c75e-a45d-46c4-96a6-e6acf2824ca5", + "5a72f659-23ab-4c29-b180-f3461b536ac2", + "3dd75591-2c52-41e4-9b24-647cf1d97285", + "fe50faaf-d9ad-4cfa-a7c5-ea2bb7357565", + "7fc4dd7f-fa1d-4c24-bbc3-6592fd0ae328", + "7d38fa5f-313a-4e72-9e3b-a1816f3051b2", + "7f892b9e-e11e-4071-99fd-bdf0bb4b87f8", + "97ed1d13-6dc8-4ed4-b484-b276859ed8d2", + "c29b0e0e-5b2d-473c-8e87-38b876faad7c", + "99104815-632f-41c4-895d-e6f32a9c34be", + "6faffdc6-875b-45a1-85de-bce69f611f1f", + "ecf22e14-bfbd-4439-a799-132ee6d724fa", + "da287dc6-b15b-44a4-a276-1573f986713b", + "d4c9a0dc-3f80-47fe-a70c-8ed1de76c967", + "c8dbf647-f8ce-46fb-b1d5-c2fa8ba18a1f", + "6a3b7c16-dc62-4ff7-b0e2-bcffde6bacbd", + "28eafbfb-4a3d-4317-9e07-60f285e0e30c", + "32604f56-23e9-4c50-a41d-30f5548e007b", + "8ffebe7c-62dd-4964-81e0-b8f45d7ef932", + "15c366f4-a96e-44a4-833f-a7e028118fbd", + "13b606a9-e4c1-48b4-b20b-e277d1dafac9", + "fea8d9cb-6f1c-4838-8de9-4595a814ef83", + "05138407-6e93-49ea-b66d-ec7f76fa6f62", + "bef11234-7f4c-4a2e-b86a-dba422501a54", + "8ef8b36c-a1d8-4dc7-8eba-5eaa8c762149", + "95628740-7a10-4ca3-b402-994f6cd66b4f", + "8e3b9a34-bd42-483f-9156-ec99e6d57f78", + "c5bcac02-5abe-44e7-baf9-455fbe6006c1", + "6797372e-4bbf-49b3-9a10-d5e4d6615dcc", + "332803e2-0917-4602-896b-d73cb34cec3e", + "d0a275f1-8233-4cdd-a500-2ff1db3b63da", + "bf0fb499-790e-471f-8251-465d7c58fb46", + "e44b5531-8b79-43e9-873c-8dcdf5be0c99", + "8af81a23-5693-4951-bb24-fa1765b5b051", + "9bc0df7c-693e-4a44-b0f2-87f115e078c4", + "a7154c04-8021-4063-bf79-7c410d5043db", + "894bcc22-02fc-4696-8a72-c9d8f72b4711", + "02d139b3-0f76-44fb-b639-eae02cecdab0", + "ae52dd11-5c7c-4749-9bab-45127bee7686", + "7074f3b4-00c5-4b90-b847-1631a7c10337", + "5b23f498-a756-411c-bb58-b113aafa9680", + "82dc7503-abac-4198-b5f8-b63e9da33f72", + "458f2b34-c2a7-4513-b28a-0f0bb48ccb4f", + "8beca55f-8868-4a7d-87e9-e55e6deef059", + "189d1a69-d643-4a46-b771-ebdcbcda0cea", + "0a1bc5b5-9d42-44f2-80d0-98497c2c8c75", + "d591fd3a-1eeb-4b0f-96da-fac9a89c0a08", + "ac8098a9-9f05-4502-8af9-3be926c42dae", + "300855cf-257d-47e4-b8c6-560fd8b5f11a", + "b578826e-0a22-4285-b57f-b96dc939d274", + "3d42e507-238d-4f97-a5c7-b8f1d751c35d", + "fa390862-21e2-4dd3-8575-36a093afd2e6", + "393076c3-9645-4ab1-b2a3-227bdbd061e0", + "b5616328-6e19-4d05-bfde-78a424951aaf", + "fb8b7920-cf35-43b4-8bc1-3b89ce1661f5", + "01626e2a-8090-4d6b-8764-4d75bb680b66", + "bfef17fa-b6cc-4888-a46a-6cadba9a295c", + "d9e7424d-872d-4e89-ab36-0d5420610829", + "b20097c1-79c1-4493-b777-fb5c74e38e2b", + "7108d64d-5604-4370-9302-36cc191a490e", + "5a0cc6c2-d0ce-4d90-b951-c8fd71c6ec37", + "48be93e1-fd7f-445c-8e8f-f53663430bc8", + "ee683a0b-85d0-4472-abd3-416f344070e1", + "ef189eb0-9d87-46b7-96c1-43d3af47a59f", + "2479a204-d828-4c18-b09c-a6191474f3c2", + "308c8e4b-606b-425d-a88e-29c6b0c92c8b", + "a0002b0b-bfb8-44dc-8753-ea3cb90501da", + "11f00727-c52f-4cc7-a493-4549a9a2949c", + "91d2c960-d9d0-4d81-8000-ac10cf49c460", + "d9ce0b43-07a4-4417-9659-aba77672b04d", + "199c3c18-a736-4fda-ac3a-e21eb6d6dfdd", + "7db6ab4a-bff3-4229-bc10-6c1047b0d29b", + "2636d842-61dd-4cac-8675-dc724dab81eb", + "c7d50d63-0f6a-4ddb-ab1a-373444b52c3a", + "ea8a4a93-9f5a-416c-9b30-448ae2bce09f", + "d96fae36-9faa-45a3-856c-dbcf76e03072", + "e923e144-386e-4401-8337-cfac0ac06843", + "f38f2421-20e0-4c5a-8ab7-668dafae50fe", + "8a9b3913-f108-4038-a507-cedaeae82cc1", + "7e21bf00-da27-479c-82b9-164b28d66f81", + "1dd163f2-f3a1-426f-a862-7bc6de68d524", + "19a1006e-9990-4063-a90d-c8346e34d198", + "eaba592c-7617-47dd-abff-50a931058016", + "df2117be-020c-425d-bfd5-2bc4cfc88e3d", + "d3f12bef-ef52-4767-a52e-19bc32493902", + "3f34c0dd-88d7-4ba6-8802-f7c53970f694", + "d69641d1-6937-4d3f-9e1e-1f666ebf2a02", + "fde523d4-ffae-40a6-a07e-06bbabdf3256", + "946c70a2-d865-43dc-8473-261213632e20", + "01dfbb4d-1afe-4c2e-a440-96802eb43c8f", + "be4faff4-0295-4ce0-b15d-0a362597afb2", + "7667f871-5ec8-49e5-b868-1b7984168280", + "e889c91a-d19d-46e5-a1fc-f5105aae5a04", + "8acce272-4d3a-4a96-a830-22a384dd0822", + "38325bee-4ce6-419f-be60-81f92ee0030b", + "c6b6125d-075e-4052-b6af-80318d6b1c21", + "0e4a114a-af2e-42e1-927f-12c6a250709b", + "1b326942-e3e6-452c-b788-6585112e24ba", + "eb037800-7f11-4395-bbec-d22890c75455", + "ae1361b0-b310-4ed2-a2ae-3a85e990b8d2", + "0e505114-f66b-4314-b2d1-877b85868c6d", + "32279957-e242-4b76-bae1-91cdfb84261c", + "d46a2712-df22-48bf-a8f2-d1244dbb95b5", + "5b904932-3136-43cb-8018-f2f4a8574e63", + "1a5105f1-e77a-456a-90b0-bea3adce9e76", + "eb2a3b78-32f1-4c03-8204-7cb96d83b885", + "eb29758a-dbee-4a81-91c9-68572b616df5", + "fab8a6b5-82ec-4cf1-a7ca-4625e7d5d4e5", + "2be43a96-fbe2-4766-9fe3-0f5e293e29fe", + "72fd268e-4a0d-4c24-853f-12641e20631b", + "f0212196-cf16-480f-bd03-37463e77ac9a", + "0d098f9f-1ff1-40aa-bc8a-0598522f9fe8", + "eb31dced-cae5-41f1-b53f-531491db35d5", + "13e7caaa-8489-4d92-9b84-0ab12a7c327b", + "60611605-6a00-4ab4-aa31-3fd50833ff31", + "eb07dae6-c665-42e1-914b-e0a419d2c1f8", + "b449200e-ae72-437d-af3b-2a6136c71f87", + "28c19bac-90f4-4259-a1b5-49cedd4bc903", + "042e8b89-baae-4a09-a22d-2edf60088641", + "01caefd8-3b0f-4972-9555-86d4ec063b00", + "54b928f3-7754-4d56-933e-184c9ebcdbc4", + "89bda8f1-479a-4e69-99e6-2deee57830d6", + "3f8acdee-aa3e-4f99-83a9-1c4409d2b337", + "06304bbb-f940-4ca5-9e36-e4259c1d0d2d", + "418b1211-4238-4a17-9ac2-eed83e02fdf4", + "4ac8f020-b418-4685-98bd-34e3e75851aa", + "19ceb0a6-4f72-45c4-a485-f4483b68027b", + "35880852-37b6-4888-84e5-8c8bd313ae7b", + "d0ca62a5-75bf-49bb-a85c-9e35776c346e", + "e7c19b4d-5353-43eb-b406-123187095cbb", + "34730f30-239d-4e07-9e3b-219120c17d9e", + "ad90584c-0dbf-4605-9e2b-be5f196b1eea", + "29aba3ef-fd26-44df-8886-2940dda2d45a", + "1356e98c-3748-43d0-bc7c-af11a8269afd", + "b9f2417f-01ae-42d9-808e-20f015fd43e6", + "5bd5a2d7-6121-473c-ab06-8631a6f981f7", + "e8d25a12-5d53-468d-a26a-0bde9008479c", + "acca25f3-659e-4ba7-bdeb-43b2ae23391f", + "511dbc64-0e7a-4ec5-b6f4-048d9ca404b0", + "8249d64b-1e3d-4b76-9a56-8221ffbf2980", + "bc7b753f-a609-4aed-9214-93a7e252e43a", + "d8e6033e-8293-4378-a584-db0e862f20a5", + "6fd6cfa3-214b-443f-b542-0afac0dbcdc3", + "f7b4c129-f283-43da-9d29-df2d74dda93e", + "d62f9036-79cb-437d-b6ab-91609373b359", + "32945c03-9d96-4d4a-9998-55242f3912d1", + "eeee7fc3-9d2f-4dde-b199-a52ad12397ec", + "350bb95f-3c69-444e-8e90-2d87e04e3874", + "9cdc56a3-6165-46fb-9ea0-84ad3c07401a", + "4f5648d3-fa16-49c9-a799-67327bf02722", + "2b3bf3f4-7ce7-409c-91aa-d515ad686345", + "e61abf07-209f-4773-9e6a-4d6b4b50f7a8", + "25d67cf7-c892-4c67-912d-9543cfd41a53", + "119667cb-0962-44c8-bb94-8b62a4322643", + "af145d1d-d69c-4efe-af11-3284a89fc95c", + "343c1c9d-732a-41c1-9149-7c61b55d248f", + "38dd5902-f22f-42f6-ba45-d6a9749a2585", + "6f4c66fe-6f3b-445e-ab94-395c885f70ea", + "87efe257-5d79-4450-b813-c490bc8ac7f8", + "5a92ec85-c72b-436c-b628-62482a939fb2", + "399ecff0-7e97-4a6c-aed0-1c83945da5a1", + "a13de0b1-8d6e-4b24-bd8f-8df7c4bcafaa", + "7c51cb2f-2502-4179-b75a-f8a3aa4aa190", + "9eba030a-e1de-494f-a26a-de0fa31dee49", + "0100a9d1-3572-450a-bb19-28e0d4d4eda7", + "a1775eaa-5f7d-4a2b-96c4-27340980ebba", + "458535f3-3de0-4258-8c4a-a7c2f8db461b", + "6d68bff0-259c-4967-beba-0ac9abe24fa6", + "c80d7e39-d11c-45d9-b22e-f84366d08400", + "2451d8c5-8b3e-4a3b-9a00-b6250fe3131f", + "73d3e2e9-41b5-415f-9496-f1ce019f399f", + "4cc0f1ea-eab2-4ce6-bc33-32a194683be6", + "44112006-4a1e-4069-b6af-2ebed469b017", + "0ce4d2f4-1f64-46b0-a900-11d5d10700ec", + "4be12643-0062-4ea7-8648-0fd53f849c5b", + "fabdadc4-3d0b-47bb-a5c4-d573901c4f2d", + "5a55c5a9-c391-4d40-ac49-ebb59813b784", + "49a5c527-ea8d-4b6e-af18-1c9340131fa8", + "ea605163-06d1-4e52-acd3-29e61ccbfad3", + "42627078-81c9-4a66-8062-25791001e29b", + "69544966-2381-4ac4-a17f-94ec582dce6b", + "2f028e2d-d586-474f-8f41-702bfdda5f9b", + "030c382f-c34b-49f1-b92c-9eb75d822fe3", + "50ba4a63-cfe6-4867-a3a4-1c6ac305a444", + "973fb61e-7dcf-4e55-9d4f-37e8ce502c5e", + "3c8d2241-4f70-46b1-a361-7150bb12948f", + "a1d3d5f8-cb78-4aae-9b42-4ad3fd9b94f3", + "a23c0630-ff1b-4eb5-a697-f8f806dc8cda", + "ac6b7a1e-ac7e-4690-b1c0-e01f7d56eede", + "f6d2d6ea-e926-4af0-8eed-0ea6df755bf9", + "a86315f1-5cf5-4c88-972d-057eaf76d6c1", + "b80525b1-f401-42b3-8234-f700879d77a0", + "47191c70-6528-4a93-8a43-b5e649562a84", + "dafa1c06-84f0-407f-9fe0-b61e7c5405c5", + "411c0ba8-d2e4-4ccb-a619-881c662305f0", + "c046240a-798b-4a45-9773-7b85fd5756ec", + "f05ee9de-b691-41ef-8a09-e5b65fdf8c99", + "88b84f37-03f5-45a2-ae88-00a9d2574b91", + "29ffc85e-2bb2-4dd1-87fc-6c62f19a2df4", + "94a4d426-0cf1-4f9f-9c26-04d2630caff0", + "43a76858-242f-423f-a7ab-0b61a6660f96", + "38438613-f0f9-4d8b-b48b-5ca7d7f6600d", + "327c36e9-0b29-4c10-ae27-e953dc172261", + "89b04933-6bf2-422e-bae4-06b74125199f", + "9f00e10c-c60b-40b8-a6eb-67b85e166601", + "fd0fc934-1cf3-4979-af50-e07b9e841910", + "83f45af3-5337-45a7-82f3-c8921c7553d9", + "e0f1082e-d7f6-414d-9350-e32c7bd92c66", + "eb4d5788-c0fd-41a1-8a9e-b57082827088", + "7e707fbf-b5ae-44a0-8736-91bcd18c590a", + "db972e61-f302-46a5-9da0-628b0c787ac0", + "6dbdb730-fcad-4d50-9d83-a1d4d59cb6e1", + "baad0831-f83e-4a8f-8fef-890a012d3c10", + "bee8acf5-17d0-4ab9-acb6-2db493230664", + "0c7dadbe-2c02-45e6-b511-2957cbd2dbf8", + "420822c6-9abb-4235-bfe2-632814d5cc5b", + "b0cac7c2-5895-4b4d-9c18-74502b3b8a26", + "a72f25c5-8f15-4ca2-be91-36ace352de2d", + "4e53f352-cca8-4b01-8f0c-23281aae06e1", + "28375192-3caa-44bb-9346-c4394a9cbd6f", + "b3b17dac-1fc0-457d-95a8-dcebf6a75933", + "9ba9f3d6-5a2f-43ce-9567-02537bb475d1", + "5b1a6f05-983f-4e14-9e39-918fd82a07f5", + "8abd8807-d0ec-4004-9f8f-1b8fb4351da3", + "696bf128-a80e-48ce-b0c6-4a0d8c2e37c0", + "342e1f92-4522-42b1-a8c5-2fe026ebc5f9", + "f0b363ff-677d-4f27-875e-6106e0d2dd97", + "ab341c15-c73e-4e87-ac25-853344def8be", + "212028d8-496e-40c0-8a20-438815df59f8", + "2ff2292a-d147-4a3d-b1d7-86304be8cfcb", + "1b294a5d-4ec2-4f0d-b0b4-a3df6661235b", + "2fd81979-2772-48c8-809b-3d357ff67973", + "39dca33f-d88a-4bf6-af3a-f4c64129f784", + "5667f58a-f183-4607-86ee-4c79bef6e7c2", + "a8ca574d-a8b2-467d-b3df-f205313674cc", + "d4f4dc97-a591-4626-aa3f-8b963d755f9d", + "8493663a-1719-44a7-8e72-2a2d0de1bd20", + "794da4f2-65b0-4afd-bff8-f6b534bc09a8", + "f8f46abf-803d-4129-9907-2ad4918f8bb6", + "09777f43-ec50-444a-8646-5dab37f7b6ec", + "3174c31e-be98-4d42-bdc7-a04c676134b8", + "48496406-b2c7-4b86-b2de-1e42519fe58c", + "dcc99708-4860-4a78-8998-acccb317ac1d", + "87221366-168a-4fec-8c29-a2fff5b6eeff", + "8ef7e0f8-7a81-4b68-885d-afbdae99534a", + "45961a8e-ab88-436e-b59d-b509a5e0e112", + "eb732931-fc83-42e4-b317-f7694b0923fe", + "bb57537d-880b-45da-a92c-115b00e00632", + "8e7222ef-0825-4bb0-a71e-300006c981d0", + "c8f1d11b-1935-4178-ac2d-7cf1be209dca", + "059f4509-a760-4423-9c2e-41622ace8872", + "7e08a67a-bec7-4904-a984-4fef5b89e298", + "50b9ecb8-48f5-41a1-87cd-f90ceb111562", + "8944e898-ee93-402a-bf8a-c6f37ae6eec4", + "cde6bc0f-c514-44bf-ad1e-3d71f8ea8949", + "26f82610-1309-402b-815a-1b6264a6ddcc", + "30a684fb-8ce3-411c-8bd8-d203af4659aa", + "c3224312-490a-4dd2-8bd5-368e87468260", + "f252cd5c-eff7-4d9c-bbf8-ad507da2319d", + "d9cef376-aa52-4254-81ba-859efb4d32a1", + "eae42730-a904-4c57-8fa1-18a9919f0a0b", + "db77660d-47eb-4640-a88d-d52cbb78de8b", + "d62797bc-106b-4315-82ca-0607c3548996", + "7cf112a8-44dd-4890-b2dc-1156ff55b69e", + "230162df-da88-4962-8850-04aa49ef71df", + "db1fb35f-dd94-407f-8b2e-fd8ae6e992bb", + "f77257b5-a103-45b5-8f77-1b3d234217df", + "efdbcfe9-6bd3-4792-ae53-722efc43f9f8", + "71fad573-f129-48b0-b55c-f0524bb2130c", + "877dea6b-0c0d-40c1-a85e-00fd643f15ad", + "fd4e856a-6ed3-4a01-8648-d3de2626f75a", + "aa25c6e5-d1e2-42fb-bfa2-2c648c481c57", + "fdc30503-5a40-4a41-8acc-f77ffce2d243", + "6bc53f48-433c-4c6a-8f6d-af45577364a6", + "ce1a4654-d263-477c-9ed0-db6e4ed5091f", + "4a18ea24-8c97-4b5f-80b2-d1740cc54b39", + "78ed1cea-ee14-47be-abcf-f94cabbfa5ef", + "96503b0e-bf2b-49fc-83ec-0bc0fc5badaa", + "23a0cb63-b4df-4b9a-942a-7ff8b28d3d8e", + "11e64424-1bd3-4f97-b223-c61fec6f48b9", + "bf3b68a1-2511-42a7-90c1-707a5d76d0cb", + "e6c3fa57-08fe-483f-abf3-ab446e43dbeb", + "6a3318b2-d978-4e22-8abe-3c01233e65f2", + "36d7a794-bd8d-4d5e-be0b-bb13f2808ab0", + "2f7e0d1e-f340-4eaf-948d-9fd49f701cf5", + "6e3466c0-596a-478e-a438-bfa27612af04", + "baf5e007-1560-45d0-ace5-bccfbb927051", + "5ff6552d-f877-491e-a5d5-005c2397d7e3", + "f1906d73-7268-4b4f-b760-559748e38f62", + "02738d39-d43d-4d65-86d1-a46cedd4bb14", + "41bca3e8-e29b-44ab-b52d-628aaded90e5", + "e8f8a697-489f-4487-90ab-244329c40d5c", + "4b863f81-361e-41be-aa73-b1ca66697706", + "22416bb0-becd-46c1-b574-dc19e881c009", + "908d6411-d3d2-48f3-ba1f-218f865ffed7", + "d2958710-e9d6-4276-afd8-93b873d9c63b", + "49ab0849-5b50-416e-8352-a8974bd672f2", + "27bbf770-d772-4265-8b47-1b2587d15ac8", + "be89b069-6340-425c-a93f-2fa61f8e6daf", + "277ee313-4fd5-4e12-ab08-c3e062a50478", + "4341375e-a0f6-4eb7-b27f-9fa3e928371c", + "3019e7f7-8e72-4eda-bdad-fe609c22060f", + "be652b45-5e41-4040-b2ae-879e7c7f3a5a", + "bae77d75-c075-4d20-ab6b-9ece99d6380a", + "156dcdf1-0827-489a-8406-264d0a9e7eca", + "3de23355-88ac-4659-9cc7-38fb3cbd8a61", + "1a4c0e85-82d7-4e20-b461-16539f7f56a8", + "e354961a-bb0e-40bc-9c99-1316146061ff", + "0e959176-7522-417a-92b3-00571c073b13", + "3c524d78-a9eb-452d-a6ae-c9ea433f6100", + "3e1e6e3b-ab1d-45ca-8d05-55fcf5c6dfe7", + "1ad652de-ed8f-42c8-8074-f1a3ceecf067", + "173ed6f2-959b-48ae-a0a5-ca356f5af3da", + "bf10756c-6aae-41ee-9879-afed40bf4f72", + "1b62322b-1969-4ce0-b3c6-d66ee41ceddc", + "ae2e3524-a76a-4752-ada6-af678dcfd22a", + "25b368a1-63fe-4c63-8266-b78bf544839e", + "b09569b7-b4a2-4ecd-b70b-28455eb3ca92", + "b49a40f5-c0f4-413d-a8d9-d17b7cb6e4fe", + "af497f5c-9009-447a-89a7-e21596e86531", + "76a6ab9a-9f23-482c-a3af-0b87cdfde63a", + "c72dee17-f3eb-48f4-86e2-04c412607edb", + "b92cf65f-377d-43c0-945d-992451add73a", + "0c8e5939-5227-42b1-babe-9798f2e5e779", + "9514eb80-da83-48cb-8f81-a67273328842", + "a98c59ee-f335-4c92-b7d1-bbe87df21cdc", + "82870c1e-ce7a-49cc-b6ad-33cc00caaf49", + "573c4afd-fe3a-4602-825f-50cb858b74bd", + "5d746705-d5bb-45de-97f9-b5a8c843d612", + "a3c10090-71b5-4190-b5f0-184df373826f", + "a9687bec-1237-491a-9915-8cce6be02ccb", + "245ad330-8423-4758-bec2-2e0bfef2e374", + "b1cb527a-b5a5-4d9f-af6a-4f9dafbd1f30", + "c5445010-0887-4914-9c16-cedd6fd8edcc", + "c8e7753e-292a-4640-b6a3-d87e0a41cf9c", + "a7e71ed5-1821-4436-b339-54d1d3560470", + "b1d10980-e3fc-449b-918e-a78a3f00c65b", + "ba47771b-1113-4eac-8f0e-1fd9724de876", + "61ab6ee2-960b-4cd7-a12b-737bfd1e7b09", + "2e483cb6-2dac-4156-817a-c65f009fa354", + "4d791fa4-c9f7-4016-a8ae-55527cb6e708", + "7b99872f-d218-48dc-94c3-79bc4fc6261e", + "bf5f4599-a174-4b31-8610-4b27ad96e2c1", + "e9ef961e-72d2-44c8-a836-b0c258798354", + "1f8d15f9-b5b1-4fe0-9789-9ee2b1839cc7", + "886e794b-ee2a-4b26-b61f-d43d33ea32b8", + "6e23dac7-bfb1-4c2a-9be7-9a116a4ab13f", + "4d146bb0-2e25-4283-a533-cb71e7116c8d", + "9945ac14-3e4e-4ab8-85e5-4ae251a52fef", + "96c1f3de-e0eb-4d59-93cf-7a8c3651bfd0", + "e3bb9b5d-4b09-4f4a-8167-3a01d61e9e7b", + "f58ff9a6-a155-4d89-ab66-e0dccda03aec", + "7eeda666-90d0-4803-8a9a-276445914395", + "03931c82-c3af-4601-ab0b-85af9cb36b33", + "d255031a-3210-42f5-80bf-5fbe2b2cb03e", + "25977a1b-0827-4f09-b419-9b651fad305a", + "36b57cb3-f3ac-41b1-b3bc-ee4c15e56d92", + "74d5b41b-905d-4fee-ba2f-170efdf927ae", + "87a325a0-e8e7-46e6-8be4-27c6b7287de5", + "72ecee72-18dd-4b4b-b057-9c5d2d63a8aa", + "2ae52669-0340-49ed-8fb1-654cd972f8a8", + "7959a000-4d47-4c0b-a61c-41410b928502", + "ea496c66-a518-4dbe-bb95-dd2efa8e3496", + "4d2f7ef9-5b5c-49b2-8d91-9d29f7835afe", + "c4ee8f47-bb3d-4f9a-ba69-e45c22859cf4", + "e4497ec7-aae4-41c0-98d1-ddf019b69d91", + "b19ea147-b6d4-4eba-a264-5e821bad5a3b", + "14eb772b-4d68-4a16-b860-f9a2cc8fb4de", + "8ec88eaa-b998-4434-b2ee-64447de1f86b", + "98140330-4d14-4c21-90b8-4558dfcd9608", + "7ab88398-4c5f-4702-b82d-f3cc865e1e67", + "25178990-37f6-47ba-a7a7-1cf36528be02", + "91b79b80-ddbb-46d0-b937-cf794f957acd", + "2f0cd5fd-2537-40f1-966b-1bb2fe4aefee", + "202915ee-382a-439d-b91e-fc92572b3596", + "0d2ab88d-e1fd-402d-a717-4be9bb96ef47", + "3ee75661-bfa1-41c3-8fb4-5ec1309624e0", + "ce562845-dfdd-4d5d-a082-b8a23c20215a", + "fb2b4c6e-7e3e-4b8b-bbc8-c9965c0c470c", + "3eb10ed5-4faf-430d-b403-013757ae6e28", + "451089df-5e81-4d66-8b07-c12de8f861c3", + "5b5c1c7d-bf31-4c66-9deb-25b610cd388f", + "32c44aef-69cb-4622-ae38-1340cca69164", + "2d88263a-d340-4ac1-b819-e63fc05d1735", + "62a41844-1947-41ba-87a5-89350e845a51", + "7e381646-46c1-47e0-8f89-b1210d5fa70f", + "7583d3e9-2a77-470a-9a38-2d7667677168", + "3a75cbf9-b264-4510-9f56-94a8e534a882", + "79b68646-1cd0-4654-b505-dba6d9489a2c", + "88868539-cca3-4e4f-b7cb-41d063d3a14a", + "2a8bf080-971d-45d9-a32b-adc89c812497", + "540b410b-7c0f-4417-b08e-a51dca00b1ee", + "0888487d-aa76-4ac7-971e-176f6295b7b5", + "edb1070f-35bd-4972-9324-85a69e37a139", + "69997281-3604-4ea9-b85d-dd1e0f606398", + "e2b4979f-4d56-40b3-81a9-377f99742775", + "46dee3b8-443e-40a8-9d31-1fa5907cead5", + "796ec030-b59c-402d-b914-6057c55c616e", + "0c0fe91a-7cbc-456c-bbe8-f59d67c5588c", + "18c98f2d-a42f-497b-ad3b-5f7a963b11c2", + "16f39c12-2e06-431e-9c07-70414822c31e", + "6a1c9e07-a03f-4b10-afce-b90c3d95bb9e", + "4bf9e759-f19a-496e-957e-290426fab158", + "9a7a081d-3861-497c-80d8-7531cd124794", + "105d2492-cca0-4522-8b5d-e4154142cac7", + "a59c003a-a77a-4edf-9d59-32051bec4355", + "6554432e-0d5a-4dbe-93da-1344653e7acf", + "04a25dc6-a199-4489-9bf8-1c0ffcbf89f5", + "433d8549-b17b-410c-a5e5-4f4544c561ef", + "5a0efbb5-1bc3-416a-905f-cc6f338d236c", + "0aa1b081-c5ab-48da-ae33-ede6835155f4", + "a537c9c7-ed8c-4d86-bd7f-f9d1d17fa0a2", + "3ebcc826-b590-49fd-955f-94ad9c221c0d", + "7f517982-eae4-419d-a577-6918c04f77fd", + "d3edef33-c611-42a7-984a-66ff69ac354a", + "5cbd8976-b5e2-4b3d-815b-122543e8931f", + "355c9ee0-2b03-4712-bace-e414100a8a36", + "5f806707-2a34-471f-82a6-6e347e9ca577", + "c22953ba-c385-4656-8cda-3b40918b30b2", + "77af5791-0d6a-46fd-9495-f0681d7a6c68", + "d0ca38ae-e4fd-4ca3-8292-4db7b9fd96e1", + "52b94fd2-c21d-4282-94fc-23632aa3d9f1", + "ef19daab-9490-4372-b817-87e906e811af", + "1ecef329-a32e-49db-8de0-33e89d0c8cf3", + "f2af2db9-5e56-4299-81fc-db4adcb12443", + "a5d863c3-f116-421a-ad26-9e40c5147545", + "0a13286f-eec7-44a4-940f-e37db237887e", + "9d5c4108-550e-45fc-b08a-57b3ca726d1b", + "29d25421-50ea-4ca6-9072-2f2cf17d7f85", + "285a9feb-5fb8-442d-89bc-18e3a1f7f14c", + "b6f7ce9e-38f3-49ee-b8e5-2835239fa2d4", + "cca315cf-52cc-4b09-9040-2dc038333f78", + "cd9f681e-7ffb-4452-9ad7-deb782926cc5", + "e557a091-c556-42fa-8ce1-94b7053c1c5f", + "eac9b0a8-9093-42c2-9851-b3d0689bdecf", + "028e3634-7986-4340-91b3-ec888bc6427b", + "b7eb94bc-027c-41ac-bc71-e0c058052e98", + "3338e35c-7448-4c2d-b2f4-8be5a03527bb", + "cbc06790-26bd-40b8-b7ce-94c6c3a57c11", + "5d0f8460-94b0-4f8b-b913-d2aa35922099", + "9910b7b0-1a28-4809-9aac-9ec8810032d7", + "0fd55c9b-973a-40b5-99e7-375a92b7768e", + "29a5387a-6426-48d0-a6a4-7475e0712298", + "3735b518-db7d-48b3-84db-84e2750d880b", + "57ba4cd4-da38-486c-99b6-9a064036a348", + "6f4d1234-38f3-454b-bc96-02d20e5e5783", + "d755b556-e2ec-4c03-a4d6-6baee9cc8e8f", + "04e5205b-168f-47a7-a9bc-000e10a8cc74", + "3bd04e1d-7431-4705-bc3c-4c97d53ef4cf", + "f9b1850a-352e-480c-9981-805d95402196", + "e7e527bf-dc61-4484-b002-d217539a3434", + "4b84007c-7c2b-4f3d-abe4-0dbb372a24be", + "7bd2b733-ec1a-4760-aace-181f50bea7ea", + "f3efaff9-a531-4281-ae01-0bc9596fced9", + "0fbad900-d393-4fbf-89b5-b3b6e1e0d429", + "5eaefb3c-049f-4cd5-b85f-ca87b6fa6545", + "6321c585-be1a-4259-8ebd-581fe3cab5ab", + "454cc792-eb88-4d1d-a712-cb7400b77307", + "6e804bc8-f63a-4892-bc5c-b13540e6a823", + "ce6abad1-5945-4b18-9f62-6ba858e119df", + "4cf6043a-1442-449c-89a6-503c0a3b67e2", + "65476c76-3f79-474c-a171-bb4ae91abef3", + "8c674739-0997-4bbb-b1b3-6931474c9127", + "3f882d31-5550-4c97-8240-2eb06f58475e", + "dc6594f1-6d32-40b1-8560-edf1d43cfba2", + "0d6fc21f-fb12-4023-894b-d7b0eaddec4b", + "a301b982-01a5-4eee-ba7c-099467588e78", + "4280ba59-6b79-4e59-b728-28fc6a351276", + "988cc988-4ede-4330-933b-ce4da005e42a", + "5f45d196-8a2d-47d1-8cd8-9c0f11ed987e", + "9f93a84b-3898-43a2-80c7-6bcecb2bed94", + "19858815-c823-48f4-b8f4-5d88b90b335c", + "dfb02893-87d5-46a6-9efb-00c9801ddf80", + "8f49c342-c93c-4585-bc55-17fcc86b05fe", + "742bf74c-9ede-46d3-8867-619145dedb16", + "dc84d729-084a-450e-95b0-de9c3cc43f93", + "7467c4f6-04a1-468a-8fb9-5aae394eadcc", + "cea050f2-d775-4342-b847-aaedbdf58d1c", + "4cd3256b-195d-44aa-8718-a6ce84161741", + "327641f3-2929-4dbb-889a-3265dc26f750", + "d2dc5f98-8a23-4ad5-afa9-10a3fade93a5", + "a08dd0bf-d7c1-49a5-8dd4-30f6033933a4", + "77a49018-0707-4e4e-9c69-071e1389712f", + "e759dc63-63d7-43e5-8811-6513d34f6eaa", + "f16fe973-7823-41bf-a45b-881cab8d3eea", + "bb48f7c2-3a7b-4481-9b19-5e09ec531d47", + "15c4e499-e4e8-4e0e-9b28-c633630e5200", + "eab6ae34-ed4a-4a2a-a541-aed2c8850659", + "d1da8e4f-0627-43ad-97ab-04fc30824e9f", + "8dc98a21-38d9-4939-b1a0-4e147c0dac1a", + "161c0dfe-d0d4-4119-a275-71adc06a5165", + "e133873f-296b-41c9-b540-669d68b6af78", + "7add4f24-692e-4c21-bb82-3da289d02b12", + "cedbdaf8-ccd9-417e-8c59-1b693cdf80d7", + "5a9c5c89-1ac2-4a29-8eea-05868bc5ad8c", + "73631dd6-400a-445c-8e27-402f56d1b84f", + "e1672dc8-51d9-4586-b700-3351e90d34eb", + "3e1b619e-789d-4255-9108-6728bd2559d2", + "07e3ab42-7d48-4c47-b11b-664365778142", + "f0d835e7-71d0-406f-a502-ec7e74219274", + "c518156a-2228-4ed5-b52f-9e19f989d62c", + "493058fb-4634-45e0-8ea2-81e8cedd5911", + "660afc7c-89c5-4183-8109-fcabdc288c7a", + "ea614467-14dc-400b-9b7d-27720f78a2d7", + "faf527e2-81ef-4df2-bb09-461f13fbcfbf", + "3b209788-5831-40c4-bd68-cbf14afce398", + "981761a9-6794-4808-b785-989cdb6b4f5d", + "2adf2eb1-1d01-4c31-8b9c-8e19376eb902", + "2ccf3807-1c64-42f8-884d-04b8a55edbd7", + "81891b72-4164-4709-a39f-619df0038f1c", + "01f9fab9-edbc-4cbd-96f7-1b7a2eae0b67", + "ab04e941-f4d5-4b91-995d-588a598e40d8", + "3b355e2e-9aa2-4c12-b1a0-548a0d68e9c7", + "577ea66b-86d7-4089-97bf-abe1b3b57e9b", + "76ecbbaa-ac3d-4b4e-93cc-fc4d7a1fc5d3", + "dacfa724-eece-493a-8a79-03cd939db824", + "1c25b448-f301-4e4e-b29c-e9a2bb44492d", + "9ad23292-3534-407f-8922-a79cf53e9756", + "67bf13e6-7d93-44d1-9248-c1804bc9e462", + "922b84f1-fc87-4034-82bf-cd7ddb99ff95", + "c1bc7c70-82f0-4002-af54-ff4e19a1c082", + "491e74c1-54ba-4cf1-be6d-71a68a48e23c", + "f47de6af-d29c-480c-9787-f83780865854", + "3e0da781-e2b4-430f-932d-57dbd0989a97", + "3a8ef119-fc3d-4e1d-8dd2-bf12b93a93f9", + "bde3242a-56e5-4548-8e85-3d87c22bed93", + "b2d1562b-bdc5-458e-8411-8a606e3ef748", + "365bb953-40fb-4a16-8c4e-3f20a08bd35b", + "f73ad6b6-4c7f-4dd0-bbe4-02bb8f659eed", + "e5657bee-6a5a-4c97-a8cb-89a91294a70b", + "28539940-78cf-4141-ba46-49967efa7026", + "de3db0be-2525-4afe-a731-bdba550dce9f", + "67f45ec3-8862-4424-ba44-6274b6a0f47b", + "de71583b-1133-4a61-a040-e63b4ff31f8d", + "56ddad42-8cd1-4afe-9f6b-04546b4dd93a", + "fe7d3603-99b4-4c11-ada8-e9bdf9e44f5c", + "e14730e9-6698-4947-9d85-72863923af65", + "fff59fe4-3c33-497a-9e86-f0784fd1b54c", + "e1edc6aa-5996-477a-a15e-85e510c2be5e", + "0280f251-7c78-4cdc-b8cb-c5fc894e1c78", + "9ddc353d-4962-480c-817d-bd564e0607df", + "251c5d9f-a0e7-4060-915a-f129c95865b5", + "1849c8ae-7129-4ee5-80a2-2ebcb0921ed9", + "a5b9e778-0e28-46bf-9c14-022bbcef39ae", + "afffdf9e-105b-4511-ab6d-a269f1adea21", + "8ccae8ce-d67b-4cea-bd26-7aa5918a2acb", + "fd9aec1c-aab2-4198-9b5e-801b1d4c3f95", + "a2881f5f-db7e-465c-838d-a48f1c8068e5", + "98b759fb-2c78-475c-9c7b-ce7aa7c530c5", + "1f8a6940-4a25-43e0-8e63-91413294a247", + "48bb8bf2-330c-45f9-b330-ca9b58e48178", + "f9754789-b59e-4b67-9267-6661320212f4", + "76c454f1-8020-4bb8-9910-51267d9b682e", + "db082cac-3b8c-4e12-8d30-5de2b4666283", + "69122294-72a6-4b27-9130-575b743f4506", + "3e03bc70-c56a-45bb-aec2-c26059fa3451", + "3cc2abaf-1d9d-4632-b1e9-a42e5e1c9d1d", + "0e950ded-a0d4-4e8d-92c8-d44bbb15e74d", + "ce3482e1-9358-435c-a3e1-e30cf63e058e", + "087d2389-ff1f-4f4b-8da2-520a6ff560cf", + "95768b2c-0e3a-4560-8a09-c7a0bd905e0f", + "cc9e1c5e-5aef-4851-90a4-7c3712fe36a1", + "98f0f13d-c9de-4af5-896f-ecc7858a393e", + "b3f8fab8-c81b-4cc7-8d1e-c75bfa463d79", + "d5276097-beac-471e-ac82-d670b3de4421", + "ea06e758-562b-4d3c-af8e-d061986859e9", + "13a108fc-e72a-4aed-a614-ec97a58a5a15", + "28bcdfdd-fe4d-4b3b-8756-c51678bbf531", + "04a0bcd1-ed86-4b14-8545-1a6433469981", + "fbf35fb3-182c-435e-a366-04ebb2d15a9d", + "d01ea94b-3f20-41b0-9788-f0ff63c9247f", + "503f8ec5-a0b1-4ddf-b781-86e9f9259f9e", + "49b88733-47ce-4946-861f-bfa73422e714", + "4390c5d5-30ec-4947-8bad-025c59875a84", + "fe0c7630-8721-49e3-9047-bda194eaa15c", + "102c537e-1914-4430-81be-57e7d26b475c", + "cbd2df71-ed88-43dc-8b87-91a99d211976", + "228dfee3-4381-4656-9b78-eb39a425e495", + "e2e2d44c-e857-4621-9482-6d64ad1d7dd8", + "4752a45f-8b9c-45f5-bdc0-0c1a3f03b238", + "1f196127-1926-42a0-8ff6-ca5c5f900fd7", + "07f28821-8e2c-4c5e-ad01-789b060c00c5", + "05a2e92f-c0c2-4655-ac2b-d695da108a9d", + "75777977-9440-4aba-ac9b-e453030aba82", + "e2d80855-13a4-4096-91bc-5e19b65e78a8", + "39789201-7dc6-48aa-9ef0-820f475b658a", + "0ed49ee6-425f-4a0a-94c9-632296d5be16", + "d38b4c35-f12a-4f39-a750-846416d4319b", + "ea1acb60-7d96-4da1-8486-739df7709544", + "c821e569-dc9f-4fed-ab31-e1a0a19b8f3b", + "0b1fc3b3-775f-4dfd-a161-2f9d652cec2d", + "fa92f034-c32c-4b41-aabe-eb9636765ef4", + "14ad22b2-5682-4e25-b88f-74cbc1a5dd85", + "a5b977fd-0f43-4e44-b74c-bef7804643dd", + "6d0eb96d-9527-4cfe-ae7d-27fd7c782f3b", + "526dd401-fb14-4f2c-92ea-64ab1684c6f3", + "d0af538e-8db2-4c3b-ba6a-f2c8807594f2", + "fbbf1395-3c2b-42f0-a136-c7606a4d70fb", + "a97e4f7a-9ad2-47a5-acba-61a7b4051ea5", + "15a03c63-1b50-4ccf-b5a5-c296be09d0d3", + "da033fe4-c3ff-4a0e-86cd-d9ce3144aab9", + "ab9c7847-9cd1-4ad3-8e2c-7bfc422fe9ea", + "a0903a5a-a7f5-460f-8a48-034d48c4ce17", + "f674c2f7-a1cf-4d91-95bf-a420b66f3bf8", + "15288c88-747e-494e-b023-fd28d9a84e3a", + "17eb6064-3871-4bb1-adda-40c9d8e23f24", + "c7f3743d-4fbf-4e67-977f-3b5bca5bc226", + "b19ef122-beb0-4058-989e-1661997ab43d", + "2c9e88f2-3a65-4009-97d0-bbc873d626de", + "8c222e7a-652b-4171-89f8-fde0eb0ae084", + "b0113a9e-73b1-4980-a4b4-ba389e32f06f", + "06ffec8d-9148-4341-8c8a-c9f16a87e9b6", + "6d00fd90-ba7d-40e8-ab4a-63677497c5c6", + "392f8b8d-7a97-4b51-b03b-e2572cd2369d", + "6df736b4-d628-4d78-85da-85409339a4a7", + "e23d86ea-92fa-4179-8ae0-e68703763bd1", + "e34745c9-41e9-410d-99a3-5ae59e99cfa4", + "9c91de59-2ade-4909-ad16-d0e94752f8f0", + "532a5450-d3f7-434d-a9c0-89294e5f49fe", + "f6b9634b-398a-48d4-a57d-7768eb1777ef", + "38d2d31b-269d-46f5-a8b5-6593dd8c2f6b", + "17c96b47-a86d-4755-ba0e-de068f246705", + "1f2f89b0-e471-4529-b56d-d8e4e82a5696", + "751e3a63-0dfc-4ee1-8564-10bea737098e", + "b599a33b-c356-4c30-ab55-f27dba10fc09", + "3dd31afa-747e-4690-982a-d1cfdf1b5821", + "d3bebc2a-c62a-47ce-965a-c13862995360", + "2803a8f3-6cad-4779-ba9c-b25307f8eed8", + "cfb9f48b-2f55-43f3-bb9d-fa87824d87be", + "01849294-26eb-413d-8a2d-584eddbcb0f9", + "4e599442-75e7-43ea-8c8b-cba1d5afa54c", + "e9ffc8f3-a54b-4c3c-9c55-f9441d350b86", + "e110f2e8-f5d7-4646-b736-8696fc03aaef", + "79497e29-e4c1-4ac4-b6fb-d22ac5fcd1d7", + "9640ac12-18e7-4682-b920-d3937c49e5e7", + "ee51ce9f-5e37-4ab4-b295-4c0615aa6c4f", + "231ef623-0abd-4118-a9b6-c80279572b73", + "2158cc99-8534-4040-992f-d96f916e2164", + "62b6e916-3e7e-48f5-8ac6-bcbe72969120", + "e20b7315-3123-4f47-a953-90fa798f2e91", + "45f1ce39-310b-4995-b5bf-2392a157efeb", + "de0a9451-3947-448e-ab28-7415eb40e553", + "181fd8b2-0161-4a52-86cc-be1e778c2064", + "7c08f489-e385-4fda-ba6a-b91dd66eacf8", + "bc048369-dc35-49a9-aba8-73d45c4182cf", + "cee099a3-69df-412d-af92-fcbc52874164", + "bfbedd07-5765-4c85-ad44-6ce16fe34dc1", + "bb23bca4-851f-4cca-b134-b2dcb387b492", + "3af53969-b81d-46c7-b9d4-56f4b16f16f3", + "00806a16-e687-469a-9a58-285bafcadf17", + "f1135938-242a-4d47-b570-4f101a070d11", + "705a05ab-c336-463a-97d0-ddc9f9bb51a0", + "93cab968-c832-4254-abeb-d472e76fd320", + "2a990883-396b-4608-a347-0caec9ba7f0c", + "ff980043-9118-463b-bd01-8442370a825a", + "c4623c95-0e7f-4c2b-bb97-e7063c758624", + "ebcb0caa-a5ca-400a-8d44-da16b922530b", + "964fc1cc-93c9-413c-879f-b36c143452db", + "da3d4b9d-2f82-4796-9f96-63ac6ba8c19f", + "b57ffeb3-6ace-425b-a09a-1506f53ccd50", + "c9a0d799-4a73-449d-a27d-c102578edf10", + "e0786a9a-56fc-4f40-b518-e2ed3299a186", + "97880e54-4178-4fd1-998d-d6c2fae7ddc3", + "36140548-2b0d-4a7f-8846-4e732907ee95", + "47d5ce05-e29a-4940-9aa1-02b002c330eb", + "981515c9-bc6a-4be5-a3d2-7c6474058d1e", + "e74de26f-faeb-455c-9549-aa10b17f4395", + "1ef4963b-9d61-4bc3-b55a-b5a0779ddaa9", + "12d671a7-5602-43a7-ba86-c497cf5207ed", + "15376ebb-e65d-4641-9620-53434eea245b", + "ba32f9bf-4c70-4884-bbfd-dbb4bbdfb72d", + "9d6e33ac-b08e-4463-a124-0024a73fd9c9", + "9d8e56a1-0b6a-483b-9108-ad6442c80456", + "249f21ec-b913-40f2-9259-e7aeb104be96", + "0876f809-fe9f-4086-8bed-c4e47ae67afd", + "6520e856-d1e6-4227-86af-8374dc92a549", + "94e85bf3-23d0-4291-ad5b-05863135b66c", + "649f7d91-8742-4903-99ca-0db5a319a129", + "1ffb0eba-cb6a-4d7b-bd0a-0ddc19441f54", + "83ca9d23-6c0d-49ac-acec-fa841945fdbe", + "2ceb5b97-aa14-45de-96e1-4fa93421be3d", + "e5b2fa40-794f-4895-8d4f-110d0388ef95", + "4fa57766-df07-4771-9bd3-7f79a3e07732", + "cdf1d56b-89a8-4d71-938c-22505d8d1fce", + "5eb06809-574c-4ddd-a29a-720a261078c1", + "26933a76-42bf-46ca-997b-2cf84bcaaa78", + "208c063a-b8e0-4673-88d3-82c3296dca06", + "17132b55-cb8a-492b-8730-6acdb51188d2", + "2ca8c60c-26be-4519-ba30-330f520cf907", + "675dfd1c-4d5c-4e00-8fde-e8d524b57f8a", + "0c8d80cf-8015-48c0-903a-3b78665e4361", + "a3bf3a43-3117-4f32-a675-e0b33204b1e6", + "18a7fcfb-db71-4022-b271-0ff7df2fbe50", + "1db61806-224f-4bd2-9652-5b04f7a43244", + "adaff3e7-b389-437a-b7e3-37a0c4c2f0a6", + "af12f39c-3efd-4a13-8e1d-3d49b0ff292b", + "5ec65289-b05e-4ead-8bc3-22f06cc0902b", + "bb6aaa3a-04f9-4130-b302-129368a02338", + "de3e73ae-80f7-45f9-b1a7-4769a34da32f", + "657b1038-5c3e-4b5b-9fac-b06eba5b5d92", + "b677b723-d528-4e87-b565-7888d4555fe2", + "1ba458cd-a574-4bc9-bf97-587cd9a736ac", + "bbb2f8f4-93e2-44db-948e-c652b663578a", + "bd17420f-5d18-4cc8-9816-60800287242a", + "a164456c-e47b-4005-a034-3060bb3b4fe9", + "e0833017-4e80-4d0d-b4ef-8de017abe644", + "28efb95b-d9d8-478f-9044-28b480dba258", + "ae9d7f59-5944-40e0-8934-353c062c71f8", + "cc66b001-b4da-4d1a-96d9-45438241ff75", + "abe1f169-8a97-4b1e-a9ba-dd8a2c78f311", + "491a30ed-4a65-4c25-b24c-136fa59acd38", + "526d43c5-1502-4b72-af46-5f1aeec5f69f", + "2a2634d9-0051-4735-a296-cbbf786d30c1", + "b56bc608-2509-4896-8d81-784a1775a2cb", + "50771872-68d3-4b49-a1e7-bc36f582be2e", + "77483c55-c2e8-47d7-b1f9-7a60777251cc", + "cfed9c05-6f39-4db7-bb39-bcb2e4e88ff7", + "e2323fd4-c484-49a3-ad85-2335d4464cb3", + "70aa270b-7550-426f-a7d4-76243488b665", + "0be82964-a2b2-4a21-8338-10aff134aaab", + "2d44bf7d-f3ac-40e8-81b3-ab5734b7b281", + "2bb1b051-dd8d-4fb4-82d5-cdeafe85cb15", + "75f9dff8-8dc9-4ea3-b27d-8315893b7894", + "46f8b46d-fe16-44f4-8d58-74751b4e3d5c", + "de83534e-bf7d-4bfd-bb7d-19b7f9a9b128", + "24023e03-a40e-4fe5-85e8-cfd8e2466373", + "4d80ac8a-e073-489b-a04f-3759aa9c8bb8", + "5503651f-5860-4249-9a06-374cb3bf25df", + "53e9e3c8-043e-4d1c-bad4-fd3e37e2f7f1", + "d8488677-bfe0-4883-92e5-cd74c4798e86", + "519f1c3b-1570-49c8-867e-a2bfc890cecc", + "0761cc9c-54a0-4029-97d5-b64dd8c32ee2", + "b278da57-8fc1-4682-a7c5-7d00a16043f7", + "3569068d-6380-45c1-8397-977b90f7d5bd", + "fce38872-6228-4098-9c8d-dc624aa94fdb", + "a4fd53c5-a6d5-4a67-a609-98804a2d2629", + "5eda49a5-aa2b-43d1-a324-61fac09fcb65", + "aa7981c4-a8a2-4c1b-a78b-449dc6e1a2a5", + "162ea22a-9386-4b3f-88f3-1826c0b46edc", + "1dd21059-6039-4946-934a-d8fb3185a23d", + "8f38d889-6b82-44a0-becc-d497f75b19d7", + "6c5ec12d-7bd7-4ff7-963e-1397b02dc373", + "ea573d57-88a2-4643-b32c-6a23c55c7349", + "4f0de3c1-e32b-4a85-bd1c-e0f50857596a", + "b8f00182-966f-4a7d-bfe6-c269fdba62a6", + "d46e2740-785a-43a5-a48c-e5c473bb4c65", + "426c74da-b6dd-4766-92dd-7fc9fb68d3d6", + "f4a4ea6a-2190-4484-9a13-d6fea3c94098", + "f9918e1f-6c8e-4418-abed-baa8313d453e", + "9e7d2ea6-0d38-41d4-a1ef-b09dc117ca8b", + "5860ebd7-524e-488c-9e96-d61ae3d47bc6", + "2d3c5877-d102-4eda-8f26-f0b8dbd5778a", + "927a0fce-2c55-4df9-9164-c31e638346e4", + "04d7ad84-04c8-4095-ae82-dc26fe57d54a", + "66c728bc-c93e-440e-98eb-e69ce14b8a0f", + "c24c384f-3849-477a-976e-8b417ea31dee", + "41a78988-a531-405a-9a1e-49a064031bb8", + "efb91897-52fb-4691-8fba-fe2f55546224", + "fb1d455e-52f8-49fe-be31-7c5a46795e53", + "1bb24fd7-a50d-48c7-b809-098fc426d6e7", + "5b3a977c-c669-45e3-a45a-67709a7e37c0", + "82b706f8-a814-43d8-a68c-e1e8145e6cf8", + "c9203467-b3b6-4c08-9f78-4ca93b2b6f8b", + "841f1444-f4f8-4869-a766-33e53f857f87", + "e5782d57-77c9-41ca-beff-e0833efa7cee", + "72f98bf7-3396-4ffc-8254-60228a6b3a37", + "7278d63f-3feb-444f-9935-8e7d747552b4", + "efbccfa6-e72d-46f2-954d-075a03cce145", + "7646159d-af39-4216-b41e-237d13912ced", + "26d50677-189b-45f0-b43e-e10faceddb29", + "611dd961-545c-40b2-9fd8-e27f5d640bcd", + "467bfd30-aa3d-4881-b70f-6f294ccba46d", + "1b8ff612-b31b-4264-bb62-0dd7bd91bd3f", + "3a3a4051-4bec-4d86-90d2-aee1d8d21ebe", + "cf206665-2cb1-4f97-a356-d32f6d329e24", + "9a22bfe5-91cf-4f28-920e-8395545c52f8", + "126e63d3-658d-42d2-9fb8-7d23e7a085ec", + "a6cbe61e-890d-4b6d-af9c-dfc3b20f70b6", + "14f9dc8b-797f-4b0f-a591-08826dec11c9", + "d5ca3b43-e821-4b8e-8029-a274490d2f70", + "7242009e-3cb8-4261-adcd-0544ffaed65c", + "e13ba9bc-bc8c-49db-9ee7-c5fba6d6bd93", + "f66a934d-54cb-4bd1-a064-6eb3591490cd", + "f64f4e0e-e3e8-4e4b-8cf3-2ebaa5ae6978", + "1949cad8-1788-4499-bce6-c2cbc845558b", + "876ab8b4-7b45-4fa7-a505-eeefedf14286", + "aa45fbda-7cc0-4d94-95fc-8384a48f220b", + "e2ca33a4-1626-42fc-b408-e61fd808d0b1", + "91a4b1d4-d72d-42aa-8481-347e8261470e", + "63846414-d743-423e-99ec-d11b97592f84", + "3a4136f0-5470-4bb3-890d-3cacb65f02a3", + "cd24eab6-4577-4d16-843a-255633d00460", + "61ed75f7-feb1-4215-a878-cc33689445e5", + "9f78423c-434a-429a-b5bd-30dd9c09f64f", + "0be3c1b4-a223-4d07-ad49-139840209184", + "aacf0652-de66-4b5c-aac1-6205c1c162a7", + "0638c100-eb5a-4b8f-85ad-85d73144738f", + "c509ef1a-d50f-468c-993b-49a9d0dde542", + "69913643-e1bf-47af-a19a-0bd6fa61b80b", + "fd224035-7c80-4f81-a10b-032eecbb81d6", + "f03743e8-9f1c-4bbe-a1f1-8012d519ce9c", + "c8431451-7965-43d1-9400-341e52393407", + "4eafd4fa-b8f1-4554-af68-9007c48797bf", + "d5466cd0-0569-458e-a141-6c67c67cfeb2", + "690bd301-abd5-4f28-98e0-72c2b14eae58", + "6c7ee7d3-ba3b-4811-a6f5-09d8a617aee9", + "b4f09d9e-2308-4c67-83e9-108465cb894b", + "9452f3ee-7425-45f5-80c9-c307466585ff", + "c15b4902-2f51-4c19-9d9c-9c8ac6b6eb52", + "bcc082ec-5791-40fe-8676-34ea1c08385c", + "7c6469c8-6f4d-4333-b43c-52c061a4b743", + "1b3418b0-b146-4eae-976c-aa6e8a3526f7", + "a6c3f26f-5e15-4551-beda-5a7229995212", + "5a41d9d8-4e00-4933-97ff-49e284aa6391", + "26293f5b-1112-41ea-b95d-1a8812da5a15", + "a875dcfc-ab09-4803-abea-a1d7332ba843", + "1a87d45e-b6f3-4cc5-9c77-e71e393e5b1f", + "98c5afbe-0b87-4b65-a962-45eb6829d07c", + "f9f3d4e5-8536-424a-a1ea-16773e97c87c", + "ad5fe948-a561-4cca-9007-b1071a2a10b8", + "28aaf19c-a5f7-4993-b531-684cd7fce87f", + "a29e94b4-6dd7-4598-a544-ab3a982241fb", + "f2774e5d-3832-442d-9c96-6c7d362ca0fb", + "5b9ad5c1-47ce-4ec7-a595-ae2da9d6ed08", + "d1bbe0ee-6d1d-420f-a591-6c085c3d2d45", + "2684b8f4-1c33-4664-b0d5-f9dac226c342", + "294c6548-ea2c-4367-aac7-18120b5cbfa2", + "96bcc396-79a6-45f9-8cb1-af6b0bfa9df1", + "1aff0751-b6c8-48d3-a49e-f79cfda50ded", + "7cbb30c3-cc71-4f21-b88f-7004be98dcc0", + "eb009fc3-57e6-4618-b84c-e581f8ec636c", + "b6c895aa-a164-4e7d-ac01-08a328ed9758", + "e4f7fd3c-61e2-43b0-b8f5-94dc96b3a700", + "9a2f11d9-3553-4c47-8f93-133acda6aca9", + "2d9bef19-42b9-44d8-a619-d4a02b8988dc", + "5dae7217-c431-4857-9504-17c13849a580", + "1d289f35-1796-4812-854d-67a8515b7105", + "e5bf4036-53ea-4c33-acdc-3ba18a07a173", + "851440c4-38c1-4d1c-86aa-4908ce58031a", + "5a6c838f-a74d-4da8-a072-b6a5d7e80324", + "6bbaf331-6bd7-49fe-aca4-01cce572a73b", + "c9a6d7a1-7d6a-4952-bd8f-fe246f77337a", + "e08acc00-f7b4-4595-8c9d-d15a01f53c51", + "9fd6aa7e-fc76-4804-aeba-6327a15e7df1", + "2bddbb77-29ab-488d-9c7a-83eb4e0c61cc", + "5c818c93-6237-4b49-a546-148c59c61066", + "d8c52679-d98b-4661-99e5-2ebd65d33f06", + "676dd713-8faf-42b5-9ff0-ffc949317b2f", + "5c005c76-0bcc-407c-8056-a906dd0413a8", + "1c6ed900-2b59-497e-93e5-a75b402e89ae", + "e0072b49-f715-440b-8e6c-3048b0e8abc7", + "fc9d45c8-a9fb-4cba-892e-db719bb1a19d", + "25d11e4c-496d-4ab9-bf92-cfde9520bac3", + "b3caf92a-8595-4832-9527-9391fc44970f", + "11093bd2-a961-4820-b49b-56caf5352a93", + "7eba8ad1-54d0-4755-a538-09ed62f1cbf2", + "c010b240-6c31-4ba5-a1fb-f72ac43df3b3", + "ddd2e731-8a2f-40a2-a38e-cae91e5f2ca7", + "87441204-3166-4f51-886a-4eeb7cc55665", + "5309fb42-fcf2-4c75-aa0b-3b8f47756d17", + "e8d6a67b-0f75-4f79-8d0a-ac657ffc302c", + "4db29436-b173-4ef5-bd46-e07d3857287f", + "985deeca-8493-4299-b5b9-f10c1f0b495b", + "7e328595-e41f-4a44-92e5-4d11c667d4ff", + "e3856838-3663-4308-bb26-919aef7d78e5", + "1f5c7333-310a-49e0-bfe6-375e7cec4a9a", + "c77c755b-eb71-4af9-b4a8-f3d302eb6c6f", + "0862723d-99d7-4b49-94df-5198a04670ce", + "321bf9f6-bd46-466a-82e5-2e79b31c05e6", + "5a94d101-4e2f-45cc-8f37-d2a2139f215d", + "3a87495a-4fa6-4a18-9567-8fac45eb2141", + "02a3f1fb-58c1-4bd5-9c07-df0450a6a892", + "b293f9ff-27d9-4e53-a247-af4ead9d42b9", + "231a6a88-0f45-4651-9737-375bb549a8cf", + "91567764-0b10-44e8-8ea0-48b6c84326ed", + "4808e5c5-c201-4b2a-914c-6e0e67d7736e", + "4936d8e3-f1c6-4d11-9231-9e53a412bada", + "2b3241e6-05c0-4c5a-a794-e2f194e273f9", + "61deee0b-e7c1-4a0f-8007-6efa04740fc9", + "87a34e87-5492-490a-ade2-71bf8d9106ea", + "05267a8a-46ab-40e9-8693-8db0a19e9891", + "79cea8fb-3db0-4453-9bdc-bcadfca12aeb", + "7e1499d0-2ef5-4beb-bd79-ff300b16a482", + "e7296901-bbec-42b3-8f61-05e6cfcec0c7", + "0d2af389-0713-49aa-877e-7578b2e05993", + "966ba526-3362-44a2-9dbe-bac1ff23bb90", + "7a12d0b1-cc82-4103-b0e1-54f6d4686b30", + "b70a72ff-2b32-43d9-a59b-25d59b0bee4f", + "38c768d8-2c7c-41a7-b83d-f7bb52486ab6", + "c687a0b0-9ca4-4a65-afe4-2932d4adff60", + "f1599a3b-08c0-47b8-a5f9-6d68938273fd", + "f55bb228-1c2a-4bbc-a986-7f171f8e4af2", + "8654bdec-ad04-447c-a10c-c3009e4bae83", + "308792b1-f437-4764-928f-cb5c43ed7524", + "cba4abc3-9e9f-48f6-beda-7fe3449a9935", + "65a998a7-8808-416f-b65e-580328083f95", + "3065b040-55f9-42bc-9a5a-5155fa4a8f71", + "51cc932f-028c-48da-a7fd-e4d8914aa92b", + "c6366011-1c35-4435-84ef-ae08af99012d", + "bb09727a-79ef-4c07-9c5d-e950f644e82c", + "886fc3d4-2d52-4c28-a58f-d04659c7a539", + "c5761af0-e00d-42a5-9165-43236cdf042f", + "178ad3c8-d820-4d04-8805-93aaf31f99c7", + "23aba975-6391-4027-a24c-0fc8e5af0e9b", + "e65679b6-a413-4bd7-a8ef-4eb51510122a", + "095cf4a8-7d4e-4b84-a16f-39e4e0d8a7f4", + "b6b7b399-6f7b-4688-a734-93f290ded7a0", + "6bb9216c-1cbf-4ea0-a800-efecbf5f409a", + "263bb203-a3d2-4bfd-9967-338f32af3795", + "c49915b3-9c52-4386-8501-0380c957f088", + "f3866820-e6bb-4dd6-8091-49d3df549c7c", + "982a6e6e-c90d-4013-b811-2b24442fc76c", + "e7610106-8465-4521-aa41-73b3a26ecb13", + "0866562d-88b8-41e3-8359-31fc5486f17c", + "ebd3fb18-454b-428d-902a-cd56d1a529c8", + "471058e4-3be9-473a-a482-38f7c04da61e", + "ffe3447f-6214-471a-95d4-90d1c63d6952", + "5cc9c24f-589a-440f-959e-82ac47497cea", + "054798c9-469b-4c84-9049-52ca35d44f31", + "73116295-4be1-4687-aad5-717b060b821e", + "8d64fb53-47cc-4de6-8e3f-96621a57a944", + "b11e71c5-7dcc-4339-a041-f23055d96197", + "7d1b9305-3b9b-4bd5-882e-a5cde86eb9b3", + "29e60fdb-f90b-4dd8-86b5-9f9f158584ea", + "54bdceb5-16f3-4242-afb1-926dab0dcb0c", + "9ee3f169-32d0-4e95-8b62-3e6bba338902", + "56c808fc-b5d8-46ca-9739-57e2b00e8087", + "d9095435-eaaf-42a9-9fcf-18682e416af5", + "7ddb264e-863b-4c36-9f1a-df52f4b0505c", + "802cbc3c-47ce-4942-93fb-7f38ca6674b1", + "31f70e78-3900-4b94-9ad9-6efcd2430c28", + "72c9e389-bc38-4162-976b-0967e5f3b80d", + "4a388c04-8a12-4bd2-b138-bd9bdc96569b", + "c7581c01-cbaa-4836-9558-5cce5ddc3553", + "63925594-174d-433e-b833-6c49483d498a", + "de8ac935-bde8-4e25-9f56-dd9dc1c45c46", + "f80cc1f2-5557-4afc-b1af-a7156e72d9fc", + "af87d786-f360-49fa-b0e1-baf32c85fc05", + "b3c4beea-5046-4cd5-81ed-f013243109f0", + "f5d68a12-c41b-4e14-9d19-0c414dbfd849", + "a2ed269f-dff9-4904-b988-45622e21d107", + "d70ed750-70db-43d7-a5df-6e88bf3c3043", + "ccd85c70-e8e8-4c44-9819-15928c47a016", + "c25e392c-69d2-4922-9918-a1db6d695f63", + "c52f75ad-be6a-441e-8611-1f8dd54bda27", + "60d44b74-7b8e-4f56-8264-4d3fb699c580", + "f46aa18a-b2b2-4995-a036-19c436015730", + "8800b35a-3677-4f73-9254-ce904918e816", + "733446ad-9359-4c1e-bd4a-000d0f761066", + "b07745d8-777e-48de-9088-4815aaf1fab3", + "59d8355d-ea62-4d42-97ff-b48a5b02347e", + "9aa14aa9-51ac-4f7b-a316-685b9cb5dca8", + "3d55d29e-db0b-4270-aa97-ee7129e0c0c0", + "90145f97-10cf-4952-ad50-780301e2cbb8", + "73de4708-361b-4422-8fc3-a4e27ae71e7d", + "612f98ec-6bd4-46bb-90ab-f05bf9f12d3f", + "a3845096-2bce-4ef8-a9fd-41b70298d542", + "f5d596d2-2946-4f4d-9e11-74f089fe09f5", + "00c5eb54-4624-4861-877b-3da4df8c2dda", + "f9dbbcbc-dfb3-4c0e-8286-5c07260b8a3b", + "de971fa3-2f25-448c-a482-f5d39de728d3", + "a74e2786-7b26-4d21-9c66-03a167fbc251", + "6ca9b177-af5b-4d7e-8025-d6a6d532d28c", + "eaad7cae-d26c-4781-b93c-00a5ecd1fe9b", + "108c9a51-ccb7-4cbf-9140-a50420f387d5", + "84d7ee61-dd17-45ae-a896-60ad5858eab5", + "ac7891e8-f237-40e3-8110-b178e4f2e785", + "bf2ddbd4-7e41-4360-9648-61c50e0554ae", + "b2e19044-6e4a-4639-9de2-97637c2ca914", + "e0732f90-34b0-47eb-8ad1-848df330ec12", + "a8dae909-4aaf-4872-b8d9-52d5a6d9f670", + "5e0ab741-0285-4b45-89a7-c6decf343ec2", + "f6759287-eac0-43aa-84f1-28049bf33722", + "e11f955b-1ad2-46e3-a497-567a7ea14282", + "e9690134-27e1-43c7-96c1-21dfca7647d0", + "9a1c98e4-53b1-486f-bd4e-8aff320ab712", + "a73a55e2-d216-494a-b722-7ff723fe89e8", + "1f4115af-c406-4ad7-b17c-50be7e3212fb", + "df0cae1e-b545-48e0-bb06-57a1afd5a568", + "375e3ac8-fb9b-4460-86f3-3bdf6960c905", + "b02fb273-5f9b-4ee4-a406-0d85cde753db", + "e99da27b-69bd-47b5-8eac-4679a56a3c70", + "4cf85387-6775-449f-a3f9-c6db477f5ad2", + "03cccc0f-05a3-4899-8d76-9e3641bde605", + "2923a97a-cfb3-419d-8081-1e7797454466", + "18f01b16-7c76-4ab3-87ed-4f99fa72e080", + "67214207-31fc-4b4f-99fb-6746dfee1f4e", + "d417a83d-21c7-44c9-8e01-bb138dae6459", + "552db2ec-d304-4bb8-b19b-66f77f138ed9", + "a458d0cd-e5ba-45aa-a783-a3d7441970fb", + "9e433cc9-49ed-4a39-a204-fe8a663307a6", + "d6e409d5-093f-4b6a-90b2-015410146ff1", + "078e4671-f71b-4add-af5c-eb5021ffa7e8", + "c419729b-79a4-446d-a0dd-6f4456214d42", + "d98e779e-dabc-40ff-ab1b-d63534a64eb4", + "8d76b4f6-cfb0-42ca-b833-a5ede476e6d3", + "d6b9251e-d52f-4249-b2d6-ae84024eaabb", + "77632ec4-c383-4e3a-8efc-7c7186076b6d", + "43f1af13-4a36-49de-9f21-f4a25aad52c1", + "ae7e4e38-1104-456a-bf9c-93402d770e5c", + "8a158392-ca99-4427-bdee-0d9b9df79f56", + "d13505e4-95b3-408d-a4b9-dfeacdd76de6", + "baa25a24-388c-4504-926b-ce7c32d59660", + "d1ba7f8f-67e6-45c6-8474-16eb67f66480", + "0da6cb83-0910-4c3c-9e1b-f44fe1d37c64", + "69dd0b3d-5d3d-4a0d-802c-3ed5f5b51d89", + "d8e35406-61ae-4c4c-9838-f5565eea1af5", + "dd0026b2-872d-4e86-b386-a63e3afa9aed", + "4d7f989b-0ac0-4779-bf4a-2d540f9a7b54", + "70346846-24fd-47f9-a034-4dc5250eae4a", + "1674a53e-056b-4866-b7f5-be3b8e35940b", + "5f878e05-7562-453a-b559-9f930c836f6e", + "31876db2-1151-46c7-84fa-98e0f15c3f0f", + "18d082c7-f8bd-41c8-abe2-ac68d4bc2412", + "a5345c98-0540-4bef-9e8b-a9d2fcf14b3c", + "186cac10-d1e7-4567-acc0-85176905edf3", + "aad9074d-ddbb-40bc-86c4-21038463cf15", + "95a7f490-301b-4376-89d9-35a0ecf0a9ed", + "10b62b11-0efe-431b-96c7-8870a2a85abc", + "f19df8bd-fe85-4d7d-8932-92ad8f5b0ea2", + "9eb00cf0-850d-49ea-b472-ad6bde9fa63d", + "cd4bd3e5-6dfa-4bc6-ba4a-4a59ec220ee8", + "576c0610-c1fc-4222-bdf1-27c1d85cbe00", + "7d21080e-f74d-4447-8dbb-0d2a0aa70d22", + "77838d36-eb36-486a-a4b3-36f016f6c541", + "5afd9011-c0b1-4ccb-9393-6b678d51ee7d", + "78a743b4-df6a-499c-9bf2-feb118103564", + "e035c2d2-9aa3-4e8e-b333-5cf66a64d536", + "a5c087eb-4c78-4dee-8b21-9a7c38d1b21e", + "ed329d33-8ac0-4c7e-92d1-121ac38faf01", + "b8920b4e-f19c-443d-b1b0-e86ec17cebf7", + "b30a467f-9302-4be6-80a6-d2495ca2c66f", + "111fb62d-f24d-451e-9823-597ac0505374", + "15393705-de30-466c-9cac-f368599198a7", + "55ab5db1-4fbe-4aae-ae4b-3833309bfffe", + "9be595bd-1742-48cf-bbdc-88e56de8528d", + "5ec41271-82ea-4d18-b83f-3cbb6cf23110", + "183baecb-722f-4a57-a3df-d0329b7d0b77", + "c07a22dc-989d-4429-8350-9c22752ffb99", + "0aab786b-8dbd-49fb-ba67-8ef1e870d894", + "febdc8e2-f2c5-47de-a125-2f1d8b253b15", + "aeb2ff0f-ff4c-4e93-b3bb-230b294173a4", + "84bc7a52-e840-4cfd-a248-88bb663aeaff", + "2a84009d-f37b-447f-8df0-b3dfd9e4a13b", + "9c4aef97-f83c-466c-a989-98ff7d167457", + "3fabfb04-47cf-467c-9b93-3ace313c837f", + "d5c7eb52-86ac-413d-9c95-de3fbb6f4ca3", + "9248dc11-d5b9-4ff9-81dc-7b3cdbf5ed86", + "c315fc22-3c94-48d3-b740-18ab919164df", + "65833b37-a921-4610-aa00-7a73460fb0b3", + "442c64e0-c604-4cd2-8220-d672cb9df86d", + "a15d720d-c748-4ea1-b20a-d7ce826083b6", + "8ef955f8-c67d-4958-9dee-d2ec5af47f90", + "42b35df4-a778-4013-8913-42e0dc43895f", + "ccc78ac0-13ee-4924-bcb9-51ca8963adeb", + "4694cf6a-1b2b-4f26-b136-26e70980cfb0", + "c7e2cdf1-e0df-4061-bf59-87bf77caf378", + "a213d8c2-579f-47e6-995d-16a0ad6b2f32", + "1868aff8-65bc-4bf6-a1c5-608a58428158", + "0e9139ca-1984-44a6-9344-caa7db7576e5", + "13a32c3e-b687-414b-b913-33df26053ea2", + "c33ab773-805d-4843-8751-b2fa45577951", + "567d7002-7a6c-434f-9418-85393893f6ac", + "c8634478-1a3a-43fa-8f1c-32b22fbfab0a", + "5fc03462-30fe-4330-8856-319494490a52", + "801fe375-1ad0-4bec-9274-4203c232907f", + "933c3415-9b9b-434c-93a1-dec3a38c5d13", + "2da59dd9-7b2d-4e13-baa4-73f536c6bd92", + "8840f08e-a2b9-4ccc-92fd-1230314e841e", + "a64746ae-1c95-4789-9685-1bcac00025fc", + "b253c5e9-5e6a-43f2-98e5-c301a81963b3", + "a921d864-491c-41d3-a714-54868009dd8e", + "a134beff-1b1a-4682-a59d-a84fea8d38fe", + "1147bfac-50ad-468c-a0e7-4d6fc4c57a2e", + "baaedadf-4b85-4033-b124-b958a8faf4bd", + "0cb99450-9fa5-4a3e-a267-74d1da2ca18b", + "cb58a62e-f61d-43ee-bdd6-427ebe20a11f", + "db114539-3697-4159-acb7-c69251921225", + "82c19a76-af27-49c2-b717-bdc08b6566f8", + "a5d3151f-a3cf-4e46-83df-20f29d5d86e3", + "34815bf9-f5c0-416e-8abe-0ee22121173c", + "a2b13a20-26d2-4905-b84b-e3e072a61014", + "70515512-483e-4ee0-a45e-f49c93ba5a14", + "efdf98ff-2973-4273-bc89-f718a2b1d1e2", + "4049aa01-5823-4800-b144-3abdf1e302ad", + "f7d67b66-a0a0-4ac4-8690-87b2ea3bd888", + "cc127353-b6b8-49be-9ed7-a4570746d973", + "f895509a-307f-4a1b-9235-95042c14084c", + "3f1a71a4-d59d-4c60-96c0-86c301cc185e", + "2ecc37e7-0fce-4eeb-b0ca-53b47a9fc32e", + "f349609b-4e1a-4c41-adbb-6aba98301f93", + "aa268739-9d81-4ae5-b965-7ac8a5b8245e", + "5a4d14c0-5aad-4c5d-a329-e0ac02c019e9", + "78836d42-7441-42f2-a7f2-ed30ffef7325", + "c4297d69-43cf-42d5-876a-0973d0a98412", + "b6e0f7ad-2420-4534-8822-0faf1036970a", + "b3566446-316b-4cd4-b21b-0c79e291aab9", + "5e138e3b-7a4f-4dce-b268-40c3eab9e18f", + "77723609-15a2-4877-b317-b0957b24973e", + "701666dd-1842-4318-8a31-6bf1a9407068", + "ec86136e-fc3a-457a-ac61-c79698b714cb", + "892ca615-89dd-46ed-b1e3-8be0eb8a138e", + "61f69ad3-6937-483b-89cf-56e6468269ee", + "d620e4be-48d6-42bb-8dc6-1c895a56b553", + "f6443996-64fc-4028-9a69-985235217520", + "e75ce7a3-2c44-43a9-bfc4-8d2ed9bdbebd", + "e1fcbec2-b2b3-4676-8e04-70482aa0c277", + "2ad40d0a-e32f-476b-bbe7-bc1ed748d448", + "328a2484-6e6a-498e-ad08-bd7873f9d4c1", + "299b3f3a-1d2f-4840-9e2b-56e2b7f23184", + "eede55d0-75be-4732-9452-0fd43f48bf16", + "73f39e36-0c56-45b3-ba09-d72633487d83", + "e592b5e4-2ced-41ee-9a4d-d5fa70ff95a6", + "28d5c7be-afae-47eb-822f-792630d2c79c", + "500ce99d-e348-488a-8635-2153afc7d899", + "5527b6b4-7eb8-49f9-ad35-cbe668765e43", + "db347c3d-9bba-439c-8ede-267bf4b687e8", + "035f611f-09ed-41fb-bfcb-ddaabca6eb89", + "3362d3da-c3f5-488e-b875-965d0c5ddc36", + "ae9e921f-8eab-42c3-b1b3-662bfcde4d13", + "35913c0c-9cac-42c1-be53-f90a2facdd8f", + "1dc805e0-c1de-4e23-ae0f-3ee3f5771b20", + "bc1d970d-5ef0-4323-b241-61fdd421c82f", + "d22aa879-fb96-437c-ba8a-3fee5295d6d1", + "eaa96d5a-2293-4856-989b-edf73dc73a15", + "0142614a-53a2-43df-b7c8-cc6e822460eb", + "1b1eacea-7f54-42ef-8bfc-5b8f7edca9f6", + "1173943e-195f-413b-a730-2bd0767d6905", + "49945571-beb7-4790-9771-c5fcfb9b1fe6", + "a7bc63e7-2e24-4876-8678-7ce15a8e2ec9", + "f9ac02c7-a80d-4c45-808b-30b6cd009a04", + "948717c4-e5d8-4cbf-a9c8-0f0f2be7a208", + "63d4d1f7-712e-4e94-80d0-d6c401802122", + "f1985195-a5ea-4bf9-af7d-74a877ed4e2e", + "9a6341fd-a89d-4c08-8788-64e9c344a9ff", + "1084effe-22b0-4e07-84ef-799b8c978011", + "4e2014aa-ffd3-4fe7-bf65-f8f0c08a7b6d", + "7adeeefa-feb9-43d1-8003-8bf9ff397a91", + "6d5ec03a-f2bb-4a7e-b6c6-f8c44bb5c6d1", + "4ed967ae-e1ee-42f6-b1c3-9d70d3d02b16", + "0626372f-fe5f-4618-a966-fd259fbf3832", + "0cb6de58-86ed-4561-a5a7-5980fe26031f", + "35b4a643-f612-4642-a0ca-9d652b117842", + "1025de36-e898-46a2-ba40-335ee3209f06", + "a5de49d3-dff2-4e81-8d83-45acc5e2d3f0", + "1566f135-5cd5-4aa8-b8e5-c116f664b249", + "580d9d0b-c2fb-4335-9116-9cb93dad9a07", + "2cb429a0-c77a-450d-8d3e-c8b06b44f68a", + "77880543-88e0-4c33-bcf8-d5261fef1c18", + "d5108bff-d8f8-41cd-a970-88cf92658a27", + "d410b141-00b0-4863-acee-590d25d6657a", + "f034d5af-e888-4fde-9497-bc3f98788ae6", + "1dbb4f2a-27ab-432d-87c2-a680bb056a94", + "b35f1dc2-57f2-4893-97ed-eda465212c93", + "d284fa86-501d-400a-bf67-5c37ca00b664", + "acd2beb9-cfab-42b0-92a0-5ecad817dce1", + "4b77231e-cf5c-4e69-ba99-a223275f5cc3", + "251f3add-5647-4227-ac8b-da89ccff4c4d", + "badf1ae4-b519-41cc-91a3-8b773f95b9e3", + "7a466eb4-be9b-43e0-9bb7-34ddef263eaa", + "25c7dcb4-f129-4655-aaea-53922316d7a3", + "84e78058-9922-4735-9d28-e0f6e6266a9b", + "4944827c-50bd-4726-823f-dd5fa4b67931", + "a3809cc1-8c38-4acf-b83c-9355df18600a", + "d3aa724f-6843-447f-82a0-b4e9df1a91e6", + "6b36742a-668f-4953-af3c-b2aae6b15266", + "32e93be1-0f9e-4e43-a7be-a78f96ff41dd", + "f73a9d24-5b47-4f1e-9ca0-37e4c36463d1", + "ab3e2b69-4853-4c53-8fd4-78b0353aff1b", + "683101c9-c669-4968-9565-33d6a095782d", + "fa0471b4-50d7-4d41-9eda-cbec46e85ab5", + "94ec18f7-fd63-4ee4-8ea2-9ae20c391085", + "6dd6490a-b140-46cc-a0e2-1a0b5a97f7a3", + "c7d0cdc8-bb6e-4d88-b4e0-27d129e4c0af", + "45c9afa4-55da-424c-9876-56fcba9e3e6b", + "dafb13f4-43fc-4311-a814-9872de54ace0", + "3b32b44a-1ffa-4625-b89a-a92b39b9cbf9", + "9450743a-4a98-40ea-b3bd-98aa8e2d76cf", + "2f37c7be-b276-4caa-8146-932202d9caf0", + "8f4d9cb9-a01c-4202-952e-cedbb80891ae", + "367e3a2b-c87b-42bc-9738-de6762457e68", + "69abfc3c-feb5-46dd-b915-ec1da521f19b", + "494a9fad-9355-4065-9319-134c66bebeb2", + "4479d91e-61e2-480a-8d2f-f1116728b959", + "cda6d449-451f-40db-9721-3ac652ba056b", + "0a69dec4-d9b3-4510-81ac-92ddf98124d9", + "c261bf5c-dff5-43fc-a8a2-ce717474dce9", + "dcfafa11-5f94-449e-9533-41f6efa60180", + "a069799a-86dc-4602-815b-c8598c3bbb73", + "be77a9f2-ea20-4e82-b67f-79e693c8e944", + "5efef663-ea2e-4247-a43e-fda7a0a17a21", + "f3cdd781-3257-4285-bd69-06771bc3d5ea", + "f33752ab-1aa7-4ca8-90cb-a20b2b627c39", + "2bc2fe14-202b-4483-ae9a-98edee2d2575", + "b2aab99b-d87c-41d8-b31b-1454184335a9", + "9c2f468b-838c-47a8-a7eb-43ae2e3f5ac9", + "22c0fa9c-c926-4e6a-ba6d-3dc5de6cafff", + "760ad11a-9cb2-4245-a858-a049fcc45abd", + "a4d74f0b-1122-400e-a100-036b3af8a3bb", + "dbd173fe-f0c9-4b2b-83a6-82ff3a494fc1", + "970253a6-4ee5-452c-9578-8828bcfebaa3", + "17b21a04-c81b-4500-b15e-e7f41ce10058", + "bf8820b0-0483-426a-a02b-74783b7456cd", + "00edc23e-d614-4a9d-80fc-368c8fb953c7", + "c931fc2a-c9d4-4409-a126-53e2933fef3f", + "6156f5db-2f7f-45cf-a1bd-c0978c75bc1c", + "72e80b2a-04f5-4e97-8388-a0527d11dade", + "9e7be720-29cb-4b6b-b78b-188861541336", + "a856828b-726d-47e9-8a71-4b8d51390fea", + "a3303c31-8cae-4dc1-8747-3e663a52b0da", + "48b3a7ce-c92b-423b-bfd6-584073cd4e38", + "fa7f56a8-60ee-479e-9ec2-433a8f7b755f", + "d43ab8b9-8128-41a6-b281-f28643513044", + "d8be7d2d-e5d7-49fa-97c8-beed71e65f8f", + "f6a0253a-b888-4458-858c-686c6a4f0c5a", + "3d1ab7f6-497c-4cc5-b3f6-9a72fd735185", + "f9519650-8f80-4aef-af96-77047a4942f5", + "c82a9630-7aad-4bc1-a202-cb5af844a704", + "865eff33-d88f-4005-bae3-4a84c32a2bea", + "facaed58-f8ef-4ad3-be1c-5a4eb46ff3d7", + "0e4d6402-3a18-4e0b-b610-7f85a24d037d", + "e1ce00a5-d13a-4cff-885d-66bab2498509", + "0dd57eed-0038-46a3-ad23-6268c5a00e54", + "43b94129-4ad4-49a3-99a9-f340dd2d6f21", + "e24186c7-42d6-4e59-9d4d-33635b8a656d", + "e00cbcee-7118-4a0f-84ca-5fe76d3109a4", + "ed0c71f9-956c-46ee-9888-8ce8a7fb4044", + "3ab29cee-2d25-4e29-ad38-d10a68a5343a", + "119c1bf8-84ac-4232-a4f7-7e0a92bdaa57", + "6b7d0c0a-4928-4089-95f1-00b41a183b1f", + "4612fe33-c83c-40a4-9017-6f7d35f75032", + "8fa223a0-f698-4fcd-a76c-a0f8ba3342bf", + "c805d5af-f37b-41da-b1a6-34048d8a02e2", + "f81fbc9e-10df-4910-ae5a-c893875dc33b", + "16b01693-13df-4517-9884-bc36cdf4f243", + "08247e29-ed79-46ff-ad7e-3400fee2952e", + "097d0b66-7bed-433c-a5c4-3070c812453c", + "04bb7729-08c9-4053-8dbd-1b7e06413676", + "88345310-2092-4663-9e3a-39f9f156787e", + "9d8dc207-0ef8-4231-abcb-96fc342ca911", + "86d40c93-797a-4022-968e-f3d45dfed1c9", + "54474890-4668-4a4f-b894-6adb0542fea6", + "9d486a2c-115c-48ce-b0f4-b81bc5d3c890", + "0dbae374-46b4-42c5-ad06-2c0fde0e3bf3", + "ac499625-bb0e-431d-9c24-41b1a5c2d475", + "b73f44e5-493c-4be1-a89f-6658dd32cc65", + "5a6f8de3-a39f-4230-a2c4-3c77a53c3af0", + "58529dc7-a201-48ae-9b80-9e502c22ab82", + "65b7c349-ee47-4224-8e68-909f2ca8efab", + "2934fda2-64ea-4296-b98a-e30e4781bd37", + "6d74431d-0754-4d37-ae6c-b8b029e5e412", + "224521b2-2066-4666-ab3d-fc37dbab65a8", + "a3d1a888-ddd9-4330-b0b4-8fa47e9090eb", + "a91786ac-42c6-4541-86e0-b3d70ed7d288", + "eb79ffca-7283-4127-b849-3accb203acd8", + "2a06124e-c3e9-42ad-aa58-0bff49fd2147", + "36a8fa2e-e767-43ba-a36c-b9e48de50cf7", + "4d21977f-545f-4ef9-8675-c159e523323d", + "debe6566-5298-4abc-90dd-43219aebb907", + "3a4405f4-535e-479c-b81e-868c7897d1f5", + "788a026b-3fa8-4605-99fd-722dd976c3bc", + "2c3557b9-eb41-4f52-9ace-baedfc5c788f", + "8628b9fb-1979-4803-bd52-dc4f4bca2347", + "b7046c2f-ace6-4f85-8c99-2beb367003cf", + "54f2a2e5-018c-4e62-a70d-993b220406cc", + "ba292131-f716-4ef7-8c67-c5cab9893de0", + "5db17f9c-6c38-40e9-a44c-66b12b214472", + "561f419a-666e-48c1-a546-59ab49ec7c46", + "1a2fe596-4753-4345-86e1-75679a9b9c74", + "2f2be132-d97e-4df2-b316-e3c21cd2d3fc", + "10d752f1-0315-40c6-9bd8-66c32ef140ac", + "96387cd1-1357-4c94-8815-ae07c898e3b5", + "deb0bb63-9f65-4fb5-9c4c-02c8e131eb30", + "a7795de1-8be2-4a09-8e6a-8ceee8d6682f", + "4c48a88c-ece7-4e20-ad74-ac661ce78a5d", + "729ac401-756d-4923-b12b-ddb0d0348478", + "920ff162-2b2b-4cac-8d0f-1645df1ea542", + "d404b685-24ef-47bf-bd63-2374227f40e5", + "f5d5fbbb-0168-4109-8e21-54b040218607", + "6ae66773-9d2c-46f8-a34e-78c42ab0915d", + "52edfd6c-1a26-4c16-a995-fafbcf19f5b2", + "27e13306-f11f-4e58-a173-f060af7c5c94", + "8900f21d-d157-4a8b-93a3-77d9ca90e873", + "6fa93d82-7df0-4c9b-97b9-045e484bde54", + "898e3cf5-8cee-470d-a9c8-f822f81571f4", + "7a46a4fa-81c0-4fd2-9ddc-55fe84d398d9", + "cc7c4e90-bcaa-41b7-971c-18aee255b142", + "a8bdcfb2-d7d4-4e5b-882f-1cd87063aa8c", + "e418a737-d800-4065-9122-3111694aad68", + "eaf1dc04-3b91-4025-abe3-b28996fc6800", + "ea623de1-343f-435d-bbb6-ac75e2f2ba80", + "77750854-ef9c-40a2-9e78-eedddf226928", + "8dae48fc-bf45-45c3-83db-14cb49de007d", + "0d8839fb-cf28-405c-aebf-e5f343f20fed", + "1ed0d6e7-0af4-4e08-be74-5a51d0d5b24e", + "7a508156-ea75-4c76-a5b4-3156d80b2639", + "c79b64a8-17cd-45cf-8570-c2a4718f00ce", + "5f3f41ef-551a-4983-b6bb-4dcd37aa730b", + "5085d48c-dc02-423b-bdb4-0e600e29f304", + "e189253e-52ba-4ba0-aee6-1dedb42ac710", + "535c0f6b-3d9a-41f8-982b-4cb9e3339f86", + "a703c7b0-4a57-49c7-a3a0-78d76b2c5c01", + "ef987827-3463-4b06-8ff1-bf3011c83409", + "72445322-1ca5-46c6-b0fa-d2cd75082484", + "7401453e-5c33-4934-827d-073e08b4ca28", + "79ae1579-8a4e-4cf8-abbc-f4ce40e90620", + "35bd19e0-5fdc-42fd-885c-6d24715a1b04", + "f12a31b9-6163-4829-888e-9a14952d7155", + "21b3eafe-f74d-4e80-a8e3-13f8d7080a43", + "2068a27f-9df3-494c-aef9-b9e52d23e111", + "482d2202-1d05-4ce3-ba35-a8f31f30f01b", + "6bfa3dde-842c-47d6-9c23-7dbf3b7eaf1f", + "025f1174-f434-4bd1-84dc-193fb53a0fe2", + "9079fd62-b694-412d-9f2e-5dc28ca91123", + "0b8dc9aa-9857-46d5-a7f9-59aab22e68a1", + "e69cca3e-d488-448a-aa38-8904ae1c44d9", + "43f40010-ec05-41fa-8dd8-4fb4176911ed", + "5404afe6-383f-4731-92ca-56a428944f6e", + "5051e81e-e289-44b9-b3a5-e0eeaeb22fd5", + "16c194a0-5c0f-4439-bbd1-1aa3088cb810", + "928e7a75-3c3c-46dd-bf44-a0349962869d", + "8ee335f2-bea1-44a2-852a-b52ecc425104", + "56f02965-0101-406c-b8b9-ca716606c41a", + "50111029-7cea-483d-bb04-56b44e4b4057", + "24d17984-c027-45dc-8d23-be3f159379fe", + "6a6fdcb0-dd03-48d2-aba9-c1887a8b7ab8", + "2a0a3c82-d51a-4390-8c11-09e0109f6652", + "140e8ff8-b3eb-4797-be66-5a53910ac767", + "a477e0ec-82ad-4c1e-b7a3-2a78b42de337", + "54200714-78b6-4ba8-b367-1af6624882d2", + "18cd7b99-06dc-476e-bc1c-a4e9b7ee6c5b", + "2a52dadc-99c4-44f9-91ab-b24b509f5b0d", + "13ef4af0-5308-48c8-8ab8-939fa705e2ff", + "5f89bf32-d6ee-42e4-9d2c-908d37c58376", + "1c7c48f5-e7d3-4b4c-bbe0-1b1125704467", + "0ae077f9-7810-42fc-a86d-ac12101de707", + "0463d802-5e43-45cb-8114-e5b9301ed7bd", + "2ec51bfd-5174-4307-a3ea-9a35e4f5eb14", + "6c8b05fa-2cc8-4f2f-af5c-6e4b47b23c4e", + "387b893f-b843-4dfd-90b5-03190448b1da", + "635fbc71-8961-409e-8bf3-00f773b9344d", + "277a2aa3-0b61-4015-bb0b-ddd540ea2029", + "57224e43-44a7-4e97-b877-43924275c166", + "7c70d2ff-657b-404d-9339-f28301aaf05a", + "ef733f45-2eb2-4873-8fc3-ae9c29c6f302", + "8e53d25f-1c5e-42e1-9077-3493970a66f0", + "7316ada8-029b-4d89-9f26-bc1a847ac6d5", + "ddd25906-b245-4eb7-a0b7-82daacfa6aa5", + "5d86b680-8c94-43b6-90ee-3b4e41d0cb53", + "e4a96b7f-712e-4b85-aaed-f8e31286df11", + "06761b0c-225a-4143-9c63-aefaa6085ff1", + "2c8e4f05-cbb0-4989-8002-7db1dca2a78d", + "3923bed5-8159-4e3b-a645-de1f5197dda3", + "6b879d85-217f-4c09-9cb4-2b40e885183e", + "fcc0bd73-e4fb-4690-9468-4a6fa751b0e4", + "1bc349ab-9561-417d-9c9d-7f885aea841d", + "d4689624-9e45-4ac6-9ae0-4c6311b84fd8", + "9a29f113-5061-4a1e-88db-3f012a57015e", + "02ea62b4-85bd-411c-b3a1-1d334ea746c0", + "a810e22c-6fd7-4993-8321-716dc4f261dc", + "fc1b53b4-3fa9-46ed-af26-e1954e290ceb", + "b5bac391-1ec2-4cd4-bfb9-a09302816474", + "5903ee74-a54f-4889-b6b1-01e1a410e548", + "feece9a4-538f-4fff-bf5d-72a005e9b21e", + "9ce237b9-c2f6-487b-9a48-015a0fc82ca7", + "d1b31166-df80-45fd-8154-7a01f718abd2", + "35390e45-8cda-49c8-94ad-848daed882c8", + "36778f7b-adb2-4a02-88e0-2b4a6aafb6ec", + "34502209-1459-40b5-973f-edb735f88457", + "13d2b183-1b28-4a2c-8a99-be8a6c796e9a", + "ceb885c8-73f7-463a-88b9-328822835a7c", + "edc26ec9-6ab1-4a32-9781-abb6ab0121b1", + "56da39b0-26b6-4c81-973e-43d041200abc", + "52f73e4b-d41b-49f6-b0cf-bb19d5a3be8c", + "3b93638b-4935-4063-b7d9-4230fc321698", + "a0eb0acf-b126-4fd4-a86f-f79782778c7b", + "ead222ab-d616-4a25-819d-4ae5e9bd9cb8", + "3be1f788-ce66-4fb8-bd2c-6ad5385d890c", + "333d6b85-05dc-48c1-be3f-320264c91b41", + "bcad1a91-22dd-4055-861f-31f40c89990d", + "e5186bdd-8ef0-43fe-a979-61f66afa023c", + "5b1d1904-90c3-4c46-8866-74378d637532", + "d4a1b520-aa2d-4c1d-b76e-721749e93785", + "1015e200-5f4d-457b-ac13-eefeee75c560", + "600c3142-d9a4-4061-ab2c-2e6ff84e5162", + "8400117b-83fb-4947-bcc0-96bdd1fa0e3a", + "316a8574-e623-40db-bd7f-e7b5de143240", + "542889fb-847f-4dd5-9cfe-806254665d2d", + "0da91985-2cbb-4b7b-98ac-d708fd7b7884", + "294f2418-68ba-4d8e-a6bd-9a86eed0ef04", + "527a03d8-d892-4256-a90c-3918e099be40", + "037dc83a-7bfb-4a42-9faf-15398cb3c653", + "0ef3ee9f-325d-4f90-9279-3a134d60a5a1", + "e00ca69b-40e6-4998-a5ae-f371b44acd75", + "834f7726-f467-4db9-b199-95be28844012", + "c83a0ebe-0a04-4edf-b444-421f66b94d2e", + "c9e438da-b83f-4de4-bc3f-dec22e9f2489", + "ff393111-6bab-4092-92ae-80da64983227", + "fdebcb63-da88-446d-aff7-e97babd84c9c", + "847fa634-d504-4b0b-89fe-b784b3f0aac9", + "12ea1928-4c65-4a9f-9b23-baf2e7554cd1", + "ccbeb950-f0c6-4bbf-a114-bc750b3fbf98", + "ee614826-ce47-4d54-a19c-39b5189242cb", + "f71b2b11-7dae-4215-af52-cdbe28d53c87", + "371cff3d-8ac1-4e70-8330-ece612b28aba", + "3db6b450-4b53-4105-854e-d366cf1ca144", + "c0434566-5358-441d-a936-fec12d671d9a", + "7062e8c2-022f-4408-b0cb-e5d980feb4fb", + "45542060-da01-40f9-9dab-e297c9876c40", + "ef0613e2-da9a-4f27-9ee7-1f6c0e6f971d", + "4ea14960-7740-4575-b1f3-00fd7156bfd0", + "7bee32ae-6672-42af-a1b7-5c61039f9ac9", + "c8642476-7703-4f38-b4b8-dc28a1892e97", + "6c3d3c88-381d-4d81-aee8-1d1c25a1c388", + "6ddbd8e7-0381-436a-bc93-b45bdf578f9c", + "7af550ab-9f6a-49a3-a0c5-1acf553301d9", + "981506f6-3d3a-4298-9995-affef33e48e9", + "f2da71bd-9f2e-4765-ada4-87146a75f038", + "85aa987a-37f4-4ab8-8986-2ba88c176b6d", + "259792b0-aae8-44c4-9730-3c4cc80736da", + "666738ba-debd-4c61-9e6c-71c0041c3714", + "cc056d2f-5d88-4745-973b-fa91ab7ce414", + "4162ac91-1fa1-4aa4-9f70-db46fb6b0cce", + "919ab0e5-0949-46d8-9bee-be4c6cdc8445", + "18221931-c055-428f-9b47-c2302073cd6a", + "d35f2bc2-7fe2-4392-8d8e-53f1e830b3c3", + "7bab2c98-3f54-490d-b7f6-182da96d8b0c", + "643c6a35-37fa-4067-9405-da553652bfac", + "ffad7215-3672-4a0e-b0d8-0cb650c660b3", + "6d7cc52c-c4ec-47d3-b729-15e0bba4a3ed", + "aa1eb4b9-c910-43b0-921b-4c55e1285631", + "aa64a7f6-49d9-4640-986f-92ce306cf909", + "ad4f84fe-5369-4a4d-a620-9ffb17afbb4c", + "d64ac2bf-130d-4205-a5a3-e3aeca54b884", + "d227ae62-a1fd-43dc-870a-13aaa6104565", + "7998456e-f94e-42ff-a6cd-cc9107a6116c", + "1db0af19-ad58-4133-8e66-cccbb6746028", + "5e85f68b-be28-457e-a2b6-ba83458205d8", + "688514df-9c32-4c33-827c-d91b67acbca5", + "eeb67705-4585-4ca2-b23a-05ce1270c26f", + "ce6f1cc2-4167-4a52-b5ab-147cede55fc6", + "572d06a4-12de-41d4-af32-9ca247261dea", + "3a6a0b16-e9be-4f89-aac5-85a96f92a697", + "fd18b6a9-32ea-46a4-afc7-996e1b339467", + "2a11c1f4-f4f0-4c9d-a70c-762bfde4274e", + "711bb25e-55d0-47c3-aa54-258fb082eecd", + "71592a0f-aafb-412a-b854-2b78f8fb0549", + "59f891e5-9068-43bc-8158-3c1af02aa4f4", + "8bbe9bd3-7c8c-4823-9f3a-0549cdb7e36b", + "2692c262-5078-42e2-9592-04c2c766f52b", + "f2fc9703-5f82-46f7-86f9-05c6a0ad625f", + "195b2a13-4b1a-4d6a-9396-8832dc1721f4", + "8e5569f0-1a13-4a02-b1c5-94d1bb89490f", + "33e84d31-c8b6-4a1d-a734-63f01cffc3bf", + "08de6498-9d6d-4744-99b7-be8291acce6f", + "a959d294-151d-4d20-86c7-11c86c7c6cce", + "4bf6f9b0-3291-4529-be41-664c725b0915", + "570012e2-5b86-4e8c-97f2-e24268008851", + "2adacbe2-f378-41be-b816-d75eec2eabae", + "f2d16fe0-0251-46b9-90ef-e3d35b2d3300", + "08a8ebcf-cd90-4e59-b795-4c85f0558608", + "54a35d01-a780-4bcc-94d1-9772ca4a9263", + "53893564-6be2-43b0-ae90-31383036bdcd", + "a8c226bc-bdf5-444c-8927-a1ba183058e2", + "77b832df-bd31-4e03-a63e-bd44c1e78967", + "ca5bfbff-d7c9-4220-adff-44ba2fe4fe9d", + "f5fd620f-4106-4ad0-8dc1-a5692e0537e0", + "5acd9fd2-f1e0-4921-8163-d9290fb26ac7", + "29c9efce-c952-48c1-83e4-0b79a41a1c12", + "ed078e66-da77-44d0-9d6c-5d7e559a522e", + "d636dd48-bb65-49d5-ad59-ae6727a8e922", + "0d7a738b-98f1-47ba-955e-9676cfd856e7", + "0b0cac22-5b35-4251-baba-01470ae92d38", + "1077d96c-ded7-45df-b890-71e72915c348", + "d9bc4aa5-d6d5-44f0-ba91-47318991c109", + "8e87c09f-2285-49e3-9369-a1ecda9b371e", + "bbb81c6e-76d6-4069-8d44-88aa57e8c879", + "7b524e69-fbb0-4269-a487-57ce97c808c3", + "3021aa1d-de1a-49e6-b852-5c4d8e0ff853", + "28aff1d8-aaa2-468d-8031-3eff85a78578", + "dab13ff7-dd4b-48db-8f8a-bae2600943ac", + "d71958ad-997d-4213-9a06-80aa8bd12765", + "6e3186d6-3835-45ad-a8b4-cbd536d90509", + "bb3eb885-ce41-4df4-809e-1bd9b1b5eb4d", + "2e6180d8-c39d-4a1a-b14e-07e22dec91a6", + "d5b7f043-9579-43f2-9c10-ea0af4866693", + "76fd4214-e888-4c97-bca7-38e20629db6c", + "c142ab52-41ec-431e-93d4-473caa1eed75", + "73cdaa90-e2f3-4bfd-b177-df28262b5add", + "bda9c904-b554-4b34-999a-b033abcda79b", + "47a499a0-11b6-4dd2-b388-789c92224a18", + "ea76af0f-7b10-4535-94d6-b57f2e8e6ebb", + "908f0863-a48c-49af-b58b-abf17a65a38f", + "e50321c4-2309-4adc-81cd-85b66457492c", + "bc6bee18-892b-47c6-94c3-8d751b776d10", + "9da6332f-a096-4246-a23c-1b219bbbafc7", + "57b51f3e-02ff-41c5-9c38-8ece35b5730a", + "7a1843d6-ee01-481f-acdd-e583b14a0eda", + "b6492586-2c07-4bd0-9c5c-975fbd0129df", + "b5e30e1a-57c2-4f00-bfa6-58af603f51dd", + "a7e4b8cf-a242-4e0d-ba28-72260578c437", + "e617b83c-769f-48b8-9ebf-464abf8b4201", + "bd3ee932-7358-4f92-84af-2892c09f68f0", + "166e96aa-6049-4b77-826c-e2fb97e9f39b", + "27b5dad8-fa92-47ea-8411-303d87498754", + "c082bb2b-b486-43da-b12e-e572a0621182", + "0930571e-ce4b-4711-a525-89f8ddc444fd", + "b033cd40-eea9-4c5b-a17b-d1fcae17f5c1", + "0d66e9a9-bf78-42c6-b34c-2a1459f82025", + "aa4cd115-9ed7-4b37-bfff-9be015afd68f", + "443d1d13-aec8-4df6-8654-58cc3d8def5f", + "4d99c520-30b1-4ab9-beeb-5a5939d19598", + "ddf87095-fada-4895-9f66-1b00af316273", + "186d2617-5702-48de-9c2a-81f27afa8ec1", + "d020a4cb-b21f-4cd3-882c-8dac49eb5d56", + "cd6db2a1-2fc2-473f-b30a-d04d03a3b166", + "0a3f7aae-a63d-4728-8595-b0851546bfbd", + "a6f7699a-e920-41df-b798-bb6c67a74300", + "d0fd379b-c61a-4dc5-80ff-63c386e9571d", + "45264746-fa35-459a-b4d2-6cb6c9bf85bf", + "80add2f1-fff1-4e06-a871-785d713aebf6", + "fcd7044a-8d2e-4df7-8a77-129b7890553d", + "a3fedd53-7c86-459c-8dd1-45b74f4ec7d3", + "5d5aae00-8edd-40d6-b141-960bcd4ef92d", + "c1035bc6-92e6-4687-a586-a46093b5915e", + "d42cf02e-7f66-4254-988f-2d35cf9006a1", + "7442a328-ad5d-4197-a669-5465ec7d4972", + "89fa2f7d-ddd1-4ee6-992a-3cfb588ad899", + "77272f25-411c-454c-bb6f-a79d52e727d9", + "9263c2cb-3fb9-4936-9ac0-d88858098f88", + "6001c08b-9987-4ff0-9f64-7b7b7dbb1cc6", + "1ceb18c8-6e2f-4a63-bc23-7a520c140df3", + "59a69d25-cdee-4390-b4f9-d5f13437894e", + "00c3fe3e-f9e8-4953-b9aa-9d1135f1f4b9", + "f6071eb2-b8ef-4f5c-be4a-b471aadc40c1", + "1600837c-3665-4474-9f7d-1467cce15645", + "14fd6447-9357-40b7-b882-2242e24c0f74", + "e1e9d8a1-bb9c-4050-9ceb-516386c1f3f6", + "dc4ac0e9-f798-4386-88dc-7646e2cd3b89", + "6d0d7a37-88dd-4f3c-b10e-3646101a7416", + "c1ba4198-d30f-4509-a9d4-d37373bf4186", + "29a7cab1-bd36-4c01-8de9-31c5b4991bab", + "5ee7dd6b-5d89-4661-80b1-c01352bb13ec", + "2ebcd848-d31d-4276-8c42-7066628e6d17", + "c7a0362a-840f-4ad1-8823-50c84da72401", + "c76efb9f-1155-46fe-9ebd-e4662309c9b1", + "cd30017f-0e19-4d42-a38d-7358042a4f0b", + "5d3c6ea8-d4a5-4031-9c87-a2a4ad33ddc7", + "8a60d5a1-983f-42ec-8192-c7c802c3013b", + "21d082c2-b0af-4d32-a377-80dbccf78500", + "075ef1ca-cbf6-4d78-9ee3-d974cce7781c", + "05e86f3c-ab9d-4726-9422-54f5806ea52d", + "1f566854-5816-4370-b9c9-2e50ab14379e", + "2a18c138-0ad7-4e37-8182-8cfb6ccc94c5", + "106aa3c9-0660-4b9a-8bd0-713f0f3a5745", + "73bd5559-f082-49b2-8a09-2a74404d6ca4", + "fb78afcd-a21f-4841-b0a1-838bd68ef6c9", + "1e9d1b09-da52-42a9-8d6d-e040234b2198", + "5fda9537-3d93-459d-9be0-63487faebefe", + "bd31a6d0-6146-4d09-81ce-a98f4957602c", + "99079364-ddbd-4c69-8ccd-945e052bedd4", + "21cca40e-b601-4cbb-8af2-9d75fbc53f5d", + "2cf768d9-9bf3-4660-98fa-5fc8f8683eef", + "d25be2ca-9f46-4fbf-93af-5f190848a5e8", + "0520fe87-dc0b-48a3-89b9-6f61d2925f2b", + "f6332e03-3d78-4e20-8423-c7583d9fc878", + "2f02abec-5584-4e48-85a8-1118ffd038c4", + "0ae29b6d-ee07-40f1-a570-319eae3a2276", + "7af2b757-21c9-47a6-b438-6734cce90164", + "f4e39c67-d90c-452a-8e26-6c65324d412b", + "7e89bf38-f633-4be4-a261-90b709ad6072", + "611cb062-0777-436f-8497-3f825186b9a7", + "163eada4-12ed-4805-b040-896239433808", + "257d2e97-6235-469e-945f-1f9d5f7cdc60", + "edd141cc-e2f7-497b-90d2-b520a464d031", + "3fd3c87a-d96a-493e-9d15-c12d9c1d75b2", + "c75abd31-d580-4b19-9761-8a647e2787a5", + "d446d9e0-21e6-435d-9906-d70da93c6b5e", + "d0f3cb43-7939-45bf-b8cc-0db3fdc45832", + "65d370f9-822b-4589-980d-0c777d54115f", + "f0a2e025-f49f-4019-865c-e5c4f74df0c2", + "59bf65ba-e856-47f9-b388-92169fb89dc1", + "ffb298bb-8e8d-445a-8101-9a6af6d01ef9", + "07439776-3f09-4939-a46c-0983a236c23e", + "a9a216bf-f5a4-4098-be5f-0021ed6b165b", + "1c48dbdc-5309-42dd-a8de-f22b39231d5e", + "44c27e72-124a-4b7a-99ba-d19f32d64bfe", + "b054c892-3dc4-484c-94ad-b5170545f268", + "7b7f9550-4bc4-45c3-a7b6-dcf799dcf2a6", + "5b65aa8f-fbeb-4a82-ba80-22d1172a7bc6", + "ce86d9d2-7e89-4a0c-9929-8ac6b42debe3", + "a1227851-a5d8-4fb3-8d85-aeba4c641d76", + "0956d286-ec3a-4531-853e-968d93836bb2", + "1ef53e86-9cf4-4e09-8c3e-26bfcebcb501", + "b8dfcef9-883f-45a5-987a-091a237c5267", + "87f51c93-8615-4241-bcf1-03aeef9163b8", + "cfb3507f-7439-470b-93d5-fe89ec7dcd92", + "b54951db-7356-49bb-81a1-4b462dc1799f", + "2260d8d3-c07e-4304-b064-bc10149f4d1c", + "b410dcd2-2608-42a5-b2e4-6ed9cc2aa5cb", + "fc318fd6-1a3a-494e-955a-fd8fb3ec5435", + "201aff83-155d-4d1b-94fb-c367834a7672", + "e5a0d3da-2d8a-4f67-bc75-eecd3080db1b", + "64d3768e-de91-4088-932f-673896741b0e", + "ed2b4980-e7a4-44da-9641-e4c443fc585e", + "2f5e80ac-2d36-4202-8ce8-f93f82fb7fac", + "38225065-89b3-4c8e-9ae3-ba08d82776d7", + "bb726175-bcbc-42ef-a901-49efdefb8a2e", + "8c1190e8-a940-4b6e-a743-30eb363d5511", + "b7de243f-29ab-4f8b-b103-b69f9030a781", + "940a738a-a6aa-4b71-8dfa-f95193e5ca98", + "db9fc63a-9888-47f1-b39d-b4718dd27634", + "ea86d54a-b9d9-4435-b90d-4d4bb79c3064", + "287e0cd0-b1f6-4ccc-a5da-c52fe691f69d", + "436c55a8-e9d6-4a3a-bebc-054a0265b92e", + "f19c70f8-dfeb-4035-ae35-24653fff12d3", + "41861fc9-4c65-45ec-aa55-9a6872b51a39", + "c0c4d4a6-fd98-49fc-b067-8bfe0bfd31f0", + "f964c3eb-2453-4c47-a63c-747f10484052", + "45d9e222-0c81-4277-baef-163e461fa8a8", + "d3961d64-15c5-49f3-a5f2-0254867cc1f4", + "3e6c051f-3059-4e6f-90b5-9d2f685be6c6", + "91dc01ec-735f-42ed-a754-7bc00c5e9a1a", + "0f006f49-5432-4921-af8a-a34a6fb60d6d", + "a47fbcbe-daa2-4659-9f16-7d407894fe8f", + "593ed27f-d86e-4828-a8be-1af34bf6793c", + "f62d8299-43de-4bf7-82db-5e0ea161cd30", + "ea3113e9-1a34-40f5-895a-573e2efb2a9f", + "39fd6979-0bf1-408e-a825-6d1630d2545d", + "0ecadc3c-4ca3-4614-9bde-cfc993d3c3c3", + "bd0f12f1-74ca-4faa-8d6d-f631b116dc37", + "44adb254-c2b3-4efe-95fa-6495b76d6f07", + "6cdcc36d-2001-4270-b3d0-a22388329661", + "83474f72-b630-42f1-b84e-fd28fd35eefe", + "da1d017c-9cd6-4e4d-8021-e19300383768", + "4769a5a6-12e8-459d-bb99-687c019a0d81", + "0b8d95ac-cd6d-4792-a1a0-d98633a9f1f1", + "c6a15276-df7b-43fa-9e2c-b53dfd80beb1", + "c7e43df1-014a-4a1d-bea3-dad69623716b", + "731a6820-b220-428e-9f5d-fb5eeeba32a7", + "c55c4a6f-30ef-4bd8-abb0-6d038b83503e", + "6e4ebcfc-ae88-442b-8eb7-2f59ffa56197", + "72e47185-ef51-4edd-bf3a-7f98675f69a5", + "f7290c24-3f15-4cdf-9655-ea6aa69077bc", + "fa41e0b4-6d51-47e7-9e9b-83c17dd638b9", + "d79e7451-aba3-445b-971a-6e3c13d5e0d5", + "d3f577f1-78cf-47e8-b57e-6967ed4d91c8", + "9f43f054-efbd-4bc5-ae13-04e5c62600b7", + "685b03b2-878d-4e39-b420-39b5b3e4a160", + "2e619daf-8795-4ee7-ad9d-0ed0f3a47c35", + "f1523362-3421-4f9b-b39c-05db95630be2", + "195b5853-3944-4089-8a8a-2c8f03944625", + "0537dc4c-9660-4f08-b024-3d3947df69ad", + "fd418d7d-416e-4f93-bc07-1ec270cf8fff", + "c6e76844-aad7-4236-b079-71c45c4deb29", + "87f9b9d3-0594-448e-a30f-4bedbdde50f8", + "195c072d-c77f-4ab6-aae6-0332f56e3350", + "1f1ca134-3f47-466b-8804-76b75d82328c", + "24a0e675-3c35-4015-a4b7-6cfc90117ab9", + "cd757c01-3b04-4e43-884c-e216a8d4f95a", + "dc1f54d5-0158-4345-b964-df13986532bb", + "5b6a7b0b-929d-4274-9b63-26cad3ad3e75", + "30f21faa-6591-4c13-a1d7-fd1332b08fc0", + "cb993eb0-37de-49e1-9e1b-c10391e7a1e5", + "f3c8059e-6e04-45d2-b8b6-920fea3c24cb", + "b4d0e09f-801b-4ce2-96e1-eeb08442aa6f", + "c844f5ac-40e3-4fbe-8df4-37427fde7311", + "275cbbdd-73c6-4e62-a0d0-277269a578ee", + "da141dfd-32bb-4339-9659-c15a7df985c6", + "d008edab-f6da-429f-b54e-8ad9bbd8400b", + "116f9eb5-053c-47a6-8475-f1a2a056cc6a", + "238f1632-18f1-4c1c-97f2-01b3aef9a843", + "07877e76-bd66-4559-bbb1-caa9d003572d", + "9f2029a9-d3e4-4c94-9eb3-507b9763a41d", + "b5d84ebe-cc31-4485-8e55-c3ce3104ac61", + "a91e544f-edfe-4105-bebc-2398fb9e81db", + "3ad0f04a-c091-49f2-82c2-59adff77b63c", + "9de0e097-aa08-45cd-89c4-67aaa9daf529", + "feef204e-c100-4c64-adea-d38a087268df", + "6bdab179-dda3-4911-a00e-0563247011f2", + "43f8dc59-af61-4580-ac5a-3254cd82b373", + "ea5cc42f-dd5a-4468-a8ab-2b9766b6128e", + "3d87209d-dcc8-4e55-8c3f-0abbe9a80453", + "b976dfd0-4f8d-4717-aba1-f7f19f843b54", + "1a90d235-8fb3-4bea-8506-980f3b0eefd5", + "b4f7d95a-7243-400c-b644-c5fda6b730f0", + "7aec79bb-967c-44c4-9469-f87290bf1300", + "82d54a60-1dc6-451f-81e5-3f9c44591e44", + "e66363c1-c5ba-4c14-b352-74da981e5a88", + "5a50886d-15c1-4186-9f5b-1018be9cabe0", + "dcc1e167-8cfe-412d-af3d-4a2dd64db408", + "0c027fcf-a4c3-4e19-91a1-dd47e7ee9928", + "88176c45-9de8-420c-a4b2-a78f0579e20d", + "7abc3774-8dc6-41b6-b38d-9935df1717bb", + "bb731c8c-d370-4122-ba41-79658201cf92", + "b6497351-9096-4f34-82c7-4ce6839a7d17", + "fb49852e-4b29-4c4b-8505-b35e107e96c4", + "c0a696d2-4c19-4edd-ac7f-ea28b179790a", + "8487e759-92c5-4c08-9c92-035c11fc0e35", + "a0a1b0e0-d18a-40cf-b984-1d7a7721b33e", + "45522661-d60a-4818-ac4e-e800b5d0b82c", + "2a75c21f-9696-4fe6-a3c4-7987166f0bcd", + "e1ef5510-b2d2-4494-9fe3-3e2722af47e6", + "024a4107-03f0-4002-b25a-4d2ffdc4ba82", + "53535703-2dba-4dfa-9747-fb94768ba567", + "ecb64f65-0462-4df8-9206-93818fa332c0", + "e94e496c-da82-4bc3-ab7d-62bea9265bce", + "58d39e36-7ccd-4de0-bcbe-25c341bee8e2", + "bed0e1b1-32a7-4874-b27e-6e1643017a38", + "deb586df-f25d-47e7-8884-cfeaac9f7835", + "6e91a779-6dc2-436a-8168-46bf48986acb", + "ed10d918-4f17-4286-acbf-1055acb2422a", + "91a085c5-7962-4713-9d33-cd500f512555", + "b2b5557d-f68f-48cf-a603-c2ddc5e26473", + "9c725196-f83d-49dc-9125-6eb95b865775", + "06cc10c9-2a58-44dd-98d7-c4d195e06818", + "a927a199-62ca-4523-a695-b1ae08d23452", + "1b7151e0-62f7-4b01-b5af-b2061cb8c023", + "819d9f70-08b2-4b54-830c-eb7675fa530b", + "b44b5660-14f4-4ce8-9f49-6004fc4044d8", + "c469067c-c739-40aa-881a-b09a325001da", + "358b6af9-2204-4b0f-bc26-50a8c88f2689", + "be06afd5-755f-4528-8ba0-e6b8de5d791a", + "566fb9c0-b744-4553-b12a-9381a4e309b1", + "91806878-de13-4aa2-9768-b76a22ba2098", + "eeacbdaf-7efc-4951-a86e-81242dfbb778", + "53308fd7-d1a3-48c4-8610-0030cf9a054f", + "9d76cf5e-55dc-43dc-86c3-d1a3c40cc36f", + "3b1b4894-0015-41da-b6d6-af0f70c3d8d2", + "64d21e4c-746d-4737-9a23-522c165286a1", + "9615b0e0-1ca3-488c-98e8-463118fd2ec5", + "5c65db36-057e-4c07-963f-bfc4a688ca53", + "5ec0b804-b6a0-48b1-9c58-928b4e03bacd", + "cd1c6279-7185-4172-a131-2003de8f299d", + "82810966-c260-49c2-8dea-4ced82e849cc", + "667379f3-27e1-4976-b9e9-942ed5e68b5b", + "206298cd-0c52-4518-8704-3009ce533839", + "90ecb328-4e47-473a-97c8-625b3be62c07", + "8f76c7a9-011d-4997-826a-6c3ecfd741aa", + "e841e686-b7df-4ddd-a71e-f677b7bb8860", + "0ea0d0b2-a2a8-4431-b43b-1b00b1cf7442", + "73a9ba87-24a4-4c08-9e84-9567649065d7", + "47a21d69-156f-4f0c-9015-329018811a82", + "aab87d12-982a-4d32-acd8-98e7fab9c1bb", + "de5cd000-cf4e-4f9a-85fb-2b3af927d2b4", + "9419697c-516c-46d5-8d85-293c35866e1b", + "2d266105-3439-41b3-863d-5f6dc7004e10", + "b17fcd3a-3845-4a63-8d65-9c5822401754", + "e4a88e20-32ab-44d3-a009-f1496206ecfa", + "0e6cc62e-a920-4772-8ece-7617e9331b2c", + "04354639-d44e-4b04-aa77-e833df17c081", + "a21fa92c-a2de-497d-a0b6-51480b26e7e0", + "5f1b1b07-b737-4d9a-8f74-f9f2be09bb3b", + "93a10235-50ea-4d15-888c-f876c2987592", + "d60446d0-fe60-4d4c-9070-755a64980799", + "70d58405-248b-4ada-afe2-730d9e818f1b", + "4cfd6b68-4c06-4be5-94ba-8242c6f4451e", + "79774624-cfd0-4407-9308-2e288b352fbb", + "f1d924d7-c0f5-441a-86b7-0949a863a221", + "797a587d-c5db-4074-b099-2ee36bd4499d", + "c06b414e-66aa-499d-b353-9b2ef79489b0", + "5c335744-7b2b-40db-b290-1347ff2e1084", + "53edd7c2-e847-4faa-a58d-97de200603a6", + "dad128ce-ac47-4ec4-80f4-23db53be3ac7", + "727ff6ca-d0cb-47d6-922e-65b3f9383857", + "11777c55-2cd2-4138-9127-1e451a9752c3", + "bb16809f-932d-469b-b3a0-eed0ca4f420f", + "04c1a977-c830-489d-81f5-ec9c1cea8d80", + "effc2780-5f9b-4a0c-8f8d-10cbd2351f86", + "62a0c516-adba-412f-987c-c18ee620cfbf", + "906480d1-ca7e-4b79-9cae-43865aecbbe3", + "fcb7b0a4-ae11-4f4f-b247-20a3a50f9d70", + "9b579d60-0b73-41fd-aa76-56ff4757d21b", + "d7b6d479-a7bf-4a28-87e2-0a8ea329863a", + "4211be86-8cfe-4861-a864-4f600ed5ab70", + "55d812a5-ebc5-40bb-ad60-6a2f737a8866", + "581b1c5e-a8cd-45ad-89f2-7b77eef1bb4e", + "80981d99-099b-44ef-b823-18ae9614495d", + "4081f725-cb43-4ef8-82b4-563772d06396", + "71fb535d-2334-4ad7-95a2-d3120ed2f3de", + "a602aad7-348e-4bcd-a981-f6cca2cf5618", + "86d24e3d-f110-4dce-bfd1-3798f6941ed7", + "3e3b1ba6-37bb-48fd-afe3-183fcd689687", + "59b6ac73-a1db-4525-8ec4-12cfa2150b4c", + "00988880-6a05-4288-ad5f-a837cb546383", + "b3a0edfd-b4b3-4266-a3bc-8246dcaff3eb", + "6932db64-816d-4acf-bbfd-408ba55b944e", + "f3b63c1e-f8f2-484b-bbab-ac13c9a747f3", + "0d55407a-3fae-48bd-b57b-f00ea68aab5e", + "acc5b913-3abb-4af2-bed3-98f02080629a", + "965b178a-03d8-4833-b9a2-2efc9ba97789", + "c8849011-dcc1-4522-8cc5-a40e5445cb2d", + "3d4de967-6273-4053-bffa-4e3c3a1423f4", + "4b34b7fa-0cda-434e-9a29-6cd954c32493", + "82c3066b-889f-41fb-823e-7ed749482c3d", + "a444a43b-8b38-438c-b677-b2b74dd326d6", + "e1b3441a-8869-479d-aeb4-d1d9382ae29b", + "1ef7210f-4e13-45a1-96d8-38979182c2c0", + "c8485d83-d7ba-4a4e-8026-4dfd14dd3f22", + "d2116caf-5db5-41f4-b5c5-d668ae628b49", + "e8e118d4-b657-4db0-9ef9-7ed622b15aa5", + "1bc93e6a-9b7e-4245-b702-85f38018b7b2", + "df02b115-e4cb-4039-9759-c7ba218f57cd", + "10d7a00b-2ea4-4c92-93f8-9465609d1b13", + "220adce2-f1bf-4510-bf6e-082f8abdccfd", + "462cea9b-d8e5-4e5b-bc34-8f05baf53942", + "54743811-0dde-4ac4-a9cf-afa76dc53f91", + "d713d36c-ce8a-4f42-a51f-41b69adb98f0", + "2a39cf04-cbf0-4bc9-afc8-d0a67ebbde2d", + "b2eb3e4c-7232-4fa0-b33d-3a90183a6494", + "95546a0b-2899-4b8e-b02c-27551d025a0e", + "298556b5-3204-47a4-9310-9cb051b06b93", + "9b16b778-54ca-4b8a-aebb-dd2f5951607f", + "85b83c20-2a73-4a1e-a1b8-2f140c86b170", + "c5adb920-408c-45a6-a853-d1e4decbb344", + "9abec293-bdb6-47ff-b78f-223a37a59c0c", + "048f3ea3-4a32-4cd7-9dda-1ba287fee388", + "cf019f7a-0bc6-4636-81fe-d7abd6257b6c", + "80181c02-e4a8-4ed6-a34f-70d7be5eb72a", + "48389171-7dab-4171-8589-b728c2a57dff", + "77b866c2-0dcc-460b-af6e-34b1de695ba8", + "f905c98d-8c6e-42b2-a9cd-47d5e96f0763", + "c5ced9a3-6650-41fc-a63e-6d7ac4e5a20f", + "6559ade9-0e46-4e25-8dfc-86f06a85cadc", + "d7026bce-2925-4f82-88fd-44ac7ecec953", + "fe0ed8c7-558b-43ae-a3d2-57d8ec9a7d0e", + "b871fe0e-9095-47b1-abd6-e7453df7e423", + "da1e39cc-f2b5-4147-9c17-8f2b4f2e6329", + "fc7385a1-4243-45a2-b6c2-9ec73f6aaf2a", + "373abe98-3ef5-4105-a4da-9067d4fe168f", + "25441069-2e87-4f93-9de0-022c664ec000", + "f412bfd6-f40a-48b4-b97e-457290ba9dfd", + "32ca2927-1953-41d9-85f1-857bfe169654", + "01feec77-baf4-4ea2-8ed4-ca8287d5fcf7", + "ce1a44bd-2125-4a2d-8edf-22ad1928071e", + "b8ce082e-e385-4198-923f-e7c020edecde", + "0c657425-3848-41e2-9bba-2a265fbc060e", + "12ee157e-d1a1-4395-aa37-7419ee31886e", + "3bf62d2b-aad0-437f-b724-172b2c03f1a8", + "5cbc2fbd-70f8-488d-8bdf-8bea63a703f8", + "ca6002b4-6925-4b22-8b06-a78934e6870b", + "6e184eb2-7e8e-4ee7-8c6e-b75eba9fa11e", + "a9451466-b17a-4069-b303-70ddefdc5679", + "b7d388aa-b88f-4e6a-90d5-e8438b9ff7b6", + "249ff2e5-44bb-47aa-a837-8b69a8c107b2", + "214d7a54-049c-4bb9-9432-8e63325c64eb", + "b4d8adfa-ee4c-4356-b67b-daa173ad353a", + "31fd2b8d-9c47-4e46-b5d1-87a749d4b948", + "216daf9e-98c4-46b1-833d-4ff2bf040c31", + "0c65ba93-de07-4563-bc89-616758aa0370", + "86aefeef-e27c-430b-8030-6bee5468dba0", + "896b015a-6fc6-4bd2-9a70-32eabcedf3b6", + "27565dba-a982-4f49-bb85-a95195cbea4f", + "03939242-d237-4f64-8b97-c82dffd2cdc1", + "c72cf5ea-dafd-48c0-8688-ce9f08ca75be", + "642c2064-fc48-4f75-8875-9525bc618814", + "c9833b20-bd11-49d5-b194-e65df0d411cf", + "8a5e5fda-b663-49a8-8cdc-9e87dd4391f0", + "c56027e7-1d16-4bbf-893d-558f75d767e6", + "06ba657d-c99a-4bba-8b74-882bed79d735", + "75433027-9355-4cb9-9dee-5938bffe95aa", + "48b1887a-cde9-455a-9665-9ddae4e6dc2b", + "e3bb1b26-70e5-4b69-871f-eb88d5ca4ba2", + "b7b9b723-4eda-4699-8a62-8a4b5f2cb8c3", + "d31ff0c9-db76-4985-82d5-f746884e0b85", + "b7846ab9-50cc-486f-8f6b-c5d92ad11f74", + "ff28e216-e7af-4c37-ac22-2bed797ac6b5", + "d584a32e-eba7-41ce-b9fa-36dea1e85877", + "42de1533-e729-4ebc-8766-de1b5a092a50", + "1b0608d9-b317-4ebd-a60c-77c8a5252a1c", + "dea84c32-3974-4855-a1df-72a7867d8087", + "d94f948e-c376-4d75-b1f1-b8ac88c983ba", + "828906b0-a9b0-44db-99b8-865572cd8efd", + "b7aa992d-7540-48c7-9436-20b0f4d3b2f7", + "6f76ea21-524d-4077-9b11-25f2a7a199ea", + "381a9088-7c23-4522-989c-0802bf8e7188", + "1ebaf8ef-fcbb-41aa-bfe3-24d19ef0611e", + "dab65420-4783-4d9d-a1c1-ad7baaf69b73", + "d9270df4-3088-4cf2-a252-16c3d23316b4", + "29d6accf-811b-44cd-94b6-69e94d0bc34f", + "a9fe7293-2bb7-4e4f-bda0-751941a4861c", + "56e71497-6a0e-4416-9ac2-75fce6db4b33", + "a99ca3c4-ce49-415f-ad9d-a3b912656db2", + "83d369ef-ad0a-40df-9e83-7c5d3f7ada70", + "7987e427-1c6f-423e-923d-be7bc6a0e0ed", + "a37e5844-fbca-4d9d-95d2-0645603c7c85", + "bf8f20ab-e679-46c9-96ff-e00b21a15898", + "4d1d5a93-d1c3-4550-afc2-2f8be3f3d425", + "3a854bf6-ba58-4e06-9457-1cc9f45648e2", + "7884b5ca-824f-4e64-aaf4-5dda0d4f2f30", + "5a2510cf-6992-43ff-b84d-8990e189e167", + "7ce4cfda-0bce-4f23-ab47-64b61eddd80a", + "9c676213-cc6d-43b5-9eac-6f96a025c26c", + "81ba7f25-5100-4947-9067-c4e1460c51ae", + "65e33299-915c-4275-913a-32c0ab13edf3", + "834ddfff-459a-4c27-8778-a1b0feb0663e", + "796921d6-e7d2-4a32-8fa4-31b2a083b11d", + "7ef093c6-aaf6-49ed-a6d0-daea5ec99d4f", + "5b3647c2-c98f-4004-bfa6-eb66cc9db737", + "65997697-b7c8-465a-8589-7302bf4ade22", + "7dda4a75-345a-4ff0-b29a-d5fd2bd57be8", + "01087e95-27f1-4546-8906-d0fe270885d2", + "62c8bf9b-d726-4fc8-806b-0cf6128d5013", + "cb076de7-a2b3-469a-a6a7-4b7fcca6ec83", + "1a4199ff-d794-4e27-9c8e-588b3829cd88", + "97fea6c1-d1f7-42ba-b802-5b24bdd8cded", + "7a51f84a-4499-4711-82df-101cd88c94a7", + "4990d796-e073-4b69-a03f-d021b07534f1", + "c9198f2f-3431-44cc-946b-33a7d269ea5f", + "9c0cde9b-2104-4569-af7c-3beee0ceaae4", + "0a86624c-6084-4854-8757-c52ed3e893db", + "1f33a4bb-226e-472b-80d5-17072ec5f199", + "4adc699b-c20a-40dd-8746-8eed531778a4", + "53ab54f7-091b-4d15-a81c-bd2e32f1b5a8", + "e39a8d52-453b-40fe-b5df-fe89c3e2995e", + "e2953dbb-28ae-4e33-9f3a-11eac25b7f04", + "86f6521b-459f-4758-8918-1fc319c8fc39", + "2495d9ce-4f26-4136-81fd-d7537018606c", + "40f6fa02-1884-45ae-a29e-1144342175a1", + "5a7873ca-1155-4653-ac80-86def6293889", + "482e18a0-7593-4a19-bf07-cdd02204cd2e", + "cea9fbd9-4c50-44df-86fc-41d5986d555b", + "83af7912-c3ec-4b07-a36a-4f2faccd3543", + "0258cdfb-33bc-4614-9dc0-3ce9781469aa", + "c5bfd828-48ab-4622-ba13-3e59f38e8eda", + "0212a7d9-56ae-4aad-8f51-f27cc612a017", + "b5074997-3f41-4de7-9663-19443f5b1a6f", + "d0f93743-818d-4bdc-9032-2c39abb015b0", + "b11deaa5-8a4e-48b8-8d74-c05e1f51d6ba", + "4d6ad1fd-f547-4d92-9c99-eaa1a366a27b", + "34658feb-983c-4d84-aade-1589ad24c9c9", + "0dd06f25-c34e-427a-a0ab-33ad92654625", + "08340a35-b97b-45f6-88b8-6edc2e6b930a", + "30a3aa1a-4612-4d11-a540-72b5ffa55712", + "73aa151e-f79d-495d-9221-f53433628135", + "38980b87-0484-4015-9e97-8043aa2f32d0", + "18cb5586-6b57-48c0-93bc-f260c4567dd9", + "173a40e2-9038-4c27-a350-d66dc5c512cf", + "9c5f7d8c-dd16-4cb6-ab96-8789aadbd80c", + "e2eaaac5-84b3-463a-8553-a6dd09392c2e", + "e1a558a3-64e2-4690-9f91-26078fc2695c", + "e2abb082-d1df-4a5b-add4-3646d181d1fc", + "8d5f606f-1dce-4737-9b8c-46708aed27f5", + "d9646d59-2650-49f7-bde9-a2cde0ae621e", + "dc1f6c36-ae6a-4fc2-a5fb-82c214516be6", + "dd7ac5de-b661-4256-af4d-264e27dbd002", + "70d2c1ab-e3ea-4880-bc84-bc23fb0bc7fa", + "892cb8d1-dbff-4a6c-b3d3-248f34089aad", + "4708aab0-d4c7-4876-8824-447ef234ffa4", + "45f49a71-802a-4502-94a6-b7ed12e57c99", + "23779242-97d0-40ae-9631-ca78e79266be", + "6d5c332a-6758-4c21-8576-2c552ebf88b4", + "524c6bbe-76dc-4728-be9e-5aaf32e36fb9", + "b243eb71-dceb-4af7-9c47-2267b731372e", + "46d8b6bc-af0f-41e9-86b9-e2bb87a9adcc", + "bd720042-dd03-469c-b91d-39d37d8ce966", + "90e32533-6698-451d-8bad-54186b5ca35b", + "77e41115-a487-4768-89fe-6ea6c8172845", + "42563ef6-0354-4a3f-aef6-28eb5877e0b4", + "c5d5735e-7557-4a86-9585-e1d743090e11", + "7b7b5ff7-acee-40b4-bb09-f37d26ba5d48", + "6677d89f-206f-406b-a22e-43f65525d6c8", + "71d99d1e-46c8-4eb8-8196-7761f4e90f9d", + "ef88ff39-3ecb-4764-a04c-bbe9cd0d8236", + "88f87fd2-46a2-422c-a774-8f150997dae2", + "0723f367-ad40-4d7f-8a4a-3eafbfcfb3b8", + "3c141a49-eb8f-4387-97a0-1200f08549f7", + "b8a8e67e-32f9-451b-87d7-747ec8eb78c9", + "4cb856e3-66c0-455e-8822-0c2b2fd0ab47", + "6362ab9e-d31e-4a9a-8ddb-f36d4705ff09", + "ad408dd1-76e2-451c-beee-5dc04f5e577b", + "437b9e6b-c50a-4f0d-aded-a5dd89508b80", + "f5af155c-73b1-4d1c-84dd-e4d102320081", + "72175639-2f34-49e0-9517-43f8f5315f46", + "5b443f5a-cc92-46e4-9ae6-331dcaeeb944", + "26f59325-b368-45a4-bb5d-f2aff58acddc", + "8ecccbd2-50f6-4cdb-8820-b13a2b362d99", + "6e913fa3-3981-408e-83c0-3aa2074043b4", + "414cdac7-b2ed-44f2-8876-69f9bb6d0587", + "fb12e9af-fbc6-45e7-9f2d-3d51e5878524", + "f8dca192-0de6-42f3-a7a4-e2105273ee01", + "4e57c198-75d5-4fc6-8771-443e806b446e", + "8b757c09-fa6e-4ba4-864e-5a2a9a126000", + "9cd97ed6-8af3-491c-8dfa-a44c1aa1b1d3", + "048adaf6-c62a-46d0-a9fa-e6a99aadccca", + "51d7ed3a-b168-46af-94f7-a459ebb15c66", + "b8cadf0b-917f-4c91-92fd-25648b6c136a", + "f388ca91-1894-4696-a603-f7800d9a7248", + "df437ddc-ca74-44f3-9cb5-a6411f4f1ac6", + "5ef7f928-0d69-4be8-945c-aba230a7c4ee", + "5022a280-6726-4a73-ba29-89ba1d891be5", + "0a9f8d4f-c5fc-446e-864d-2164e28baec8", + "d1b60821-ad52-41bc-95fe-71933f6c7090", + "980b6140-e338-4ca8-87f1-f2ff59d05170", + "70ddffea-40b0-41d7-a1b0-7412495ae118", + "916e5a47-ea0c-4048-84cc-0b63e3a8bfdf", + "80d5015b-0833-4015-a105-2cf90e6c3957", + "fbae9574-9396-4923-bce7-6e51fe7d5f44", + "38169a6d-e7c8-44fa-b1d0-63c429982956", + "1aa40968-7fc1-4460-b232-42124cc77f04", + "424e89c6-c7c3-4b49-aff1-958c1dd03ab9", + "68cd3c75-b798-460c-a829-6e482456f0e8", + "85f39cfe-0173-4645-8994-e1389c54c88b", + "9e3be60c-94ab-4442-a2bd-0e8d03bf57e5", + "f4560425-a748-4954-b6a1-310b4b0e2294", + "d50db7b7-8813-4709-86f5-779068c30f95", + "ab4faef8-7062-48dc-acac-2ca22ad61be8", + "39314da4-2bc9-4385-a9fc-ddba6ae6d778", + "9fd7862e-3b33-4a4a-9bb9-11691ed347e8", + "2d395115-ef57-425b-b163-0f4a4619c43c", + "576ac4ef-e5b3-4c79-894f-09b2bcb4ed77", + "ec8648a5-8c89-4982-bc5d-bcd6420ec735", + "0bbd0726-6025-43c1-b3b1-3441b339d5fc", + "927e0c30-e610-411c-9887-d17ae906ee07", + "1e4311cd-80b0-4263-b31f-0dde9cb614da", + "5f6d8da7-43c2-4038-b3c0-d167343cc127", + "4b8b4980-390e-437f-ab52-56f666240169", + "c9fa9505-e84a-4349-98e1-968d65095af8", + "bd0bb4c9-bc8c-4ed8-8f4c-bee991f11869", + "5bd8038c-e303-47f9-920d-9721d28be5b6", + "f32c186c-3d57-401f-8823-655e47f194a0", + "1f33769b-0390-4fc9-aa44-7e0bc054e6a1", + "11994f68-01b1-42a4-ad3a-b46c727451fa", + "f53388c9-2022-481a-b77b-a976a7bfcf9a", + "1a4736e0-ec3a-4a4e-908a-5bcd2cc02f14", + "8e05b5e9-2400-4b1b-828f-52378ef4f4ff", + "177f44b8-9403-4ca5-b7cf-dff3eaa26c67", + "2ca02dc8-3997-46e9-befa-03e5ebc11750", + "59e13b31-8717-4e1d-a074-67f0b54c3712", + "249eb872-8155-482c-9fa9-13dfebd1379d", + "9ce4375d-5891-4fb6-9e71-6aaf7f7dbb69", + "9e5a43b2-2769-48cb-a942-69c8fa347c22", + "143ab1f2-0bc3-404d-8742-94fce31bcf63", + "8277dcff-3b87-4eb2-b056-a6bc040d688a", + "c43ca443-63d7-4293-a217-429ab4ddfc7b", + "509332f0-f244-4b96-aabe-5124ebef5323", + "59128c79-e0c9-4071-8d0b-d6fe914ea666", + "17a4678d-8121-4896-89d5-3331655ca6b0", + "a7e78822-1707-4c0a-81c1-aa9b69a8fab0", + "6fa6d845-b1d2-405f-b0e6-0634abfd75ca", + "f118bf9d-7e52-4f38-ac71-66001f3df963", + "45d758f0-f0b9-4d7f-ac88-907eecac60fa", + "41e9553d-65c4-45b6-9572-138b06bdb48c", + "605d6fab-a592-4652-8ee1-d0b2da6cee3a", + "38b9f42d-7907-4073-bbdf-e308659389eb", + "d306a8c0-125f-4977-8660-602dd5d2575a", + "7e8a1e5f-ad74-435f-9c96-547e2fc91f72", + "2cceb47c-9444-4c0a-bc3c-91f776fa8a59", + "284cd66d-2e0f-45ec-bf97-43f8ad53343e", + "419bfe7f-36b4-4c9d-abac-8a1a6ef1f27b", + "ec9c73e6-188f-4abe-9e48-39b65cf652f4", + "da19a9ef-611b-43a3-bb06-1e4c343ee373", + "92a333c9-b50b-40e4-92cd-2f9cf73483cb", + "15edd554-2682-4b6d-b055-fd151d442545", + "539c25d3-2756-4601-bb39-d50f48c1d21c", + "6ec52361-5e9a-4183-ac7f-9e53197c11d4", + "85308af8-541c-4379-a9ca-7e2fbc591f76", + "42c00816-a3dc-4820-a362-beb288bf5e4f", + "c6d36d8e-9e5a-44a4-90fb-98621aa7911f", + "cfc02b60-b062-4acf-abc0-3a64f5353994", + "40482ef9-1c5c-4456-889d-ee8042d70553", + "1ad1d575-b011-460a-95f2-5231a95064be", + "e5462e8b-a7be-45a4-ac22-cb237e4e5dc6", + "c6f2d31a-3403-4fdb-b97d-9768449084e2", + "e2b9045b-d944-4d9f-8603-d286e9315af2", + "4c04e872-c7b2-46ce-b4e5-a5f93feda5b4", + "6b4c1b9f-ab2b-4962-abf7-cfa08e876909", + "2858988b-a701-4cf2-a8fe-58173d2c5075", + "a0ad0789-ed84-4926-b50f-a715e816bff7", + "b5efda41-3f93-4c13-a6a1-03bc769e8a4a", + "e9feb12c-83ea-4097-bab0-cdc75456d558", + "60e02746-b4c1-49a3-877f-7ae1752646fa", + "dc099916-89c0-46b3-87bb-e857335960ed", + "2f9efed6-c022-4367-9f1b-0b38258c20ad", + "da2bebed-23cb-4308-8786-b4a104e0ba14", + "2ca7aab1-e959-4985-885e-2627daf8b05b", + "9420bc4d-7bb9-409e-b89c-4237382e566c", + "c490a9c5-5e22-47ec-8b77-01121482561d", + "366db2f2-85e5-4168-bc23-06340fc2495d", + "2d2432d0-2b45-4af7-a796-13458e48f787", + "e4658a8b-42c2-4e37-aca3-49fa0ddbfa1a", + "f78a7679-bea9-40af-892d-53fb3976aaad", + "e7f5d0be-5dda-4b78-b55d-b65fd5c83559", + "abb7eeed-469a-414d-9c7a-e32cf2c60a0d", + "a95aaabf-ba16-4f03-bed7-fba3db34c8b4", + "89df1266-9479-4583-8c5a-e5f8f46f9f48", + "6ba05dd4-6095-4828-abab-6479becbce36", + "6787220a-3160-45f5-9da6-85e93b61d9f8", + "f32667cf-ff84-4778-9a63-e5eabd7300cb", + "33c07683-8c26-4b5a-9da0-26288690a6da", + "0856bce2-eb65-482e-bb49-bb98e0ec277e", + "c2de6794-4463-4827-aa3f-ec864d0b9762", + "36a41347-d301-41cc-b509-446232504ef8", + "eff88bb1-bde4-47f4-955a-19721b1e889f", + "4f81d1ec-48ca-4b23-8a41-93f7e749b4f3", + "27cb6e33-0871-4394-936d-5ad33c8a6fd3", + "cb56c1e0-be38-4fe3-b5ed-7acd35fbb503", + "c1c9f4c4-b484-42f7-8a7e-117e886b567c", + "dbbfc714-9c89-4daf-b607-59ba71b26226", + "f70a637c-25f9-46b3-8fbd-e2a417cdea4a", + "1de3ccac-22b7-4146-aa7a-ef72312ab01f", + "28ad050f-a9b9-45d9-9e0b-b8aa6229b08a", + "f3c9eeec-80b8-4858-9a70-0356ae42d058", + "3d34679b-d0fb-4bcb-b67d-e5f096222cb4", + "e785e6ce-0815-49e6-83a8-eac1b7508292", + "adb9957a-d505-4ad4-a254-48383a593b05", + "51da1453-611c-451d-8c51-541a064a3066", + "115ec64c-e6c5-400f-ad1b-917f61b3bc7b", + "f2609f3c-5a92-44d4-83a9-34b34392567f", + "37e8557e-f970-4c6d-9bc2-cdf1e9577c41", + "9a9fea83-d5b1-4939-8f92-77d323617754", + "6d69de87-1ee8-4617-b3fc-0e54844e6a95", + "e1afc477-073c-4e91-9a37-474814f8386e", + "1a9eaf9c-2787-43d2-b799-7caa2ba65f46", + "9a0a0ca3-eb44-43da-a488-2d071b866e82", + "b875da8c-1767-4b58-b543-6bbe11d65a2b", + "56a4c881-68e2-4876-aa78-13ccdaec9f68", + "ceeac5de-f068-4ca0-9f1f-d670fcd22d85", + "c9f9d029-5a38-4d9a-86d0-f604cafea4be", + "c527012a-14f4-4048-ae5d-f21e5ca9a8fc", + "4c74ed2c-c8c1-440b-b213-9a996d0500f3", + "b8ba6af8-27be-4ec8-ac3e-72311bee2ef4", + "6adc10f1-d384-46a9-b59f-c88cc89a2fcd", + "194adef6-a469-4f80-9def-f04e0d9ed55c", + "a6d38768-03dd-4d72-ae6a-7dbe230fd9c7", + "e501fee6-94d6-453a-a496-7dd7aaa13657", + "992e16fc-73ae-4581-86b3-3c535e1a17fa", + "9ca6b1c6-6342-4cc8-b99a-73678e4df351", + "c0fc16a8-ff8c-4970-a4b9-adb5697c5611", + "d192a834-7688-4293-a4aa-8857a7a30f03", + "9f92fb54-6802-4c73-a806-7818897a0d25", + "1c12a28b-a66e-4e69-9962-8460d340aeb9", + "7b8928df-690f-4d87-b0b3-4a7cc2197b0b", + "58450b0f-11fb-44d9-9d1a-7621d9c35594", + "c61613b8-6d9d-45ee-bf04-bd6f8ebf2a25", + "e158a2d7-e21f-432a-9c8c-27fa68c7e238", + "191a7c2d-22c1-4cf9-bb44-e1f6a0b80629", + "e40602b5-444b-4d10-a296-81d5d21e7daf", + "125a62ed-e809-4103-80ff-9b97ec44e36e", + "fb8e4c82-19ae-4c8e-bdf7-b1fd54600bf5", + "4634ea39-7d37-4f94-aa4b-15aac832fe26", + "3872fbd1-4a6f-442f-a848-df4aceeb2c02", + "4675ca8d-45fb-4b94-80bb-e7c4acaea845", + "45688b67-a91d-430b-94e7-2e26a6937e8f", + "729bada5-0086-424e-b441-938469e441a8", + "7304e9f9-a7d6-4439-ad94-85b49bd63eb3", + "2999caf1-8e29-4543-b629-88aa015662a7", + "bdc10a4e-83a6-4223-9eec-47e28487e618", + "b83a1cbb-9e32-448c-a19f-c9254c9a3927", + "5024730c-9185-41a6-affb-776a8be0d324", + "ccace47f-1809-45f2-8e0a-4f1aec6f11ad", + "1b2d9937-3d4a-435a-a944-3ceb680a3659", + "8b0bc62f-f863-4d46-b7d1-3376658837ad", + "8a17aa30-d7cf-4a1a-8609-c8837389fb71", + "bab67faa-0e5e-4d5f-84cc-5619aee39b70", + "54a5a0b7-ab9c-4429-858d-d30c9b9c8b9c", + "9937af62-f0b8-4c5b-9a0c-b3667260d366", + "cbf685ca-d53c-40de-ab8b-0c44f0a45a1a", + "c2083071-766c-496a-ac5e-cb5075476b9b", + "7c26419b-77bd-415b-b41f-1461c7ac8f45", + "4f226d1d-48a0-4c42-9c7b-78c3b87e72f8", + "bb190d17-d369-4f7b-a430-30a64f26d67e", + "6d2e2ba3-0520-46ea-aa7d-222bc41ad5dd", + "b7d2b80f-8605-40db-b5bb-d78706dd094a", + "9c20f667-a62d-4345-b84b-d493bcfcf988", + "59e68970-5653-440b-9c9a-7e5bb28863be", + "c5934d7e-50d3-4fa8-b63f-ce5991c79523", + "e1322669-eb01-438b-91ab-b9b03c270981", + "35cab36b-d55f-494c-81d3-87dafa97eebd", + "1d2fe7b7-37fa-43d7-8e70-9fec41a8245f", + "86c6c734-bc86-4190-a7b8-27368e75359e", + "a7c2806a-b966-4a56-bc53-e698e69024eb", + "1086065a-eef2-435d-b8ed-ca9130c9b24d", + "ae7bdd33-f724-4177-9bb3-f1fd56c07d22", + "825a6c86-3641-43bf-b7dd-170d8e9d7660", + "255a31b6-1667-4942-8fe2-68adacaa8f44", + "0bbe7d78-3d4a-4a75-af81-33e5530fdc36", + "5f8d02f8-036e-4d00-adc8-9c1e09856978", + "33c6d1fc-6ece-44c4-8522-d783f4b504a8", + "58bafe33-b56d-4997-806a-e771b9b25276", + "69518af9-0b50-4a0c-89f3-c790c8075a0e", + "230b148a-50a9-4ea7-9a87-1fa13305284c", + "d9ea5de3-b568-41c2-963b-5d1dbdeb8a14", + "a34dfdea-0bb9-44fd-80f5-2e4422ce2313", + "a13e4d4e-b348-4365-8e78-564ce0ae073f", + "e98ba588-e02a-49ed-b50a-40ffaa9f33ea", + "b6d3b30a-712e-400b-9cb9-9182acc54c65", + "67a69f93-4506-4f3c-976b-c619325c7402", + "26a9fd8d-311e-4657-a123-079a4affd55f", + "cf44d9dc-b451-42ef-8302-da7de07542f4", + "121f568d-0896-4ebd-82ca-e58fbf322751", + "ced793e4-1030-4df0-852c-affeb09c5134", + "a3493919-c84f-40ff-ad66-cb696480024c", + "0e121722-63f2-4af1-b6bc-43079e534f55", + "b6124931-8365-4f6f-846b-4d9bf74ff159", + "07e6ea83-205f-4fc5-94bc-1c6830904df8", + "95b48f5a-f5ba-4f39-b6b8-5bfa544deda1", + "61c379ee-feb4-4799-bce6-60529ec74b57", + "50f562ba-5add-467c-979b-4d4c43e12ab8", + "847ec95f-184e-4cf9-bec9-601441c0a9de", + "2922af45-3a3f-4279-af3e-21e311374432", + "3e1e751a-6e27-471e-a5c3-8b0c86bfbafb", + "34452e4e-7ea1-4402-b722-b99b323da721", + "ed36ece1-5c23-438f-aaf9-64f65ec14ae4", + "ae25fafc-78d8-4803-8195-65350d6b9af0", + "efb8c909-8dac-4f9f-a7b7-c1bbe4176cef", + "575549ae-2626-4926-a3e5-4f334268b45a", + "219e80ce-945f-442b-af11-c54fd5e1e76b", + "1112e886-c22c-400c-82b2-2638dfbd999e", + "de8a0686-cffe-46d8-9b3f-e137026bfc69", + "0a69994d-1a57-4171-bcf7-95bf3c0e6eb1", + "bfa8f095-9dab-461f-a40f-bd3f03fdd2e6", + "fa8cffe8-bdfc-4f2c-8cda-2bf233768310", + "9ed46099-eaed-44a6-80f6-94f0dedc1824", + "5729f9f2-f595-4258-80a9-a0b4461e7064", + "a26cac6a-57c4-49f9-b227-88d3e2e7fb24", + "496a72df-f7d2-43e6-bc27-658263e216ff", + "b7251c2b-cc43-4f89-adca-41e024e72bf7", + "a910e5a0-0a6e-4d09-a302-e2f9f7a41fe4", + "e4d5b17e-12e1-454e-9f85-1634f6a88065", + "38d3897d-1162-43e1-8d19-bca29b610b89", + "be966123-6325-42ec-bcc3-f0a9346a810b", + "6403bea3-f5b8-4667-ba6c-8fd253bc6c28", + "34d845ee-238c-4bab-b36b-59f3328170b3", + "2f10c7a4-3ac0-4bd5-88a6-9a3ed02a264e", + "17688676-f1ee-41a2-bffa-dd73396faf54", + "479fbe5d-d1f2-4de7-b23d-8cbc5a101ef0", + "2b5232ff-4b25-4a88-a001-2b2e421181c7", + "c0955b29-be7d-406d-8d6c-2e87b43c92de", + "4f852de2-0d0a-43c5-8e5e-d3c8349f523c", + "532b9236-ab68-447b-9e92-cadd92ce91a6", + "c848adfd-1090-4e9a-a44a-e431442458e8", + "8708969a-0351-49c5-86a4-c89e3c926a48", + "7d75de3c-cbf3-4d1d-b34a-d767b68e2173", + "a8df45b1-aec1-43ee-a5eb-aeec82661759", + "65885d58-b064-41e0-83e3-a7c84f52d897", + "e163002e-3273-4887-bfef-de2c5bf71423", + "c3908326-ed0f-44df-ae1f-2ed019b18a03", + "ccd980d9-bc24-4867-b55d-fb0275d14304", + "c4557fb6-077a-4bbd-9d1b-96e78b0c82c8", + "74ca8fca-2283-4a32-815c-00f9ef595c94", + "eab6ad8f-4786-453b-905d-83dd49afbe05", + "1c42cd50-4c06-4dc1-8448-a601c6d02c0d", + "5b9425e7-3c72-4aef-9c60-4c1e0f64420c", + "9fcb6195-79d2-41cf-9c48-802b7c2445be", + "742cdfb0-7542-49cf-8177-b11583a2102d", + "6ba45888-5b0e-477e-85c8-1c8a8950ea02", + "7d9eec32-2d58-4e94-937b-1eddd29510d0", + "61307804-223a-46c7-baa1-0ade379a054d", + "a688da52-5147-4029-bbd9-906e667ddb45", + "3436e6b0-714d-4130-a7fa-59f7458f70b4", + "90d919c3-f7f6-48b6-b01e-ef9e0f868479", + "e7f56588-a7f9-4333-966c-c1fc7b58e776", + "e5c5ec71-b1e3-4495-899b-4e32749eab44", + "f2b2e874-185d-429e-b30d-cbbfc161ec99", + "257b88cb-c109-4ce7-a695-169196c75fce", + "23977e30-1591-4102-874e-7755eb235ce7", + "a5e16d89-03e1-471e-a683-0e8cc2c3a8a4", + "482bb80f-74d8-4382-804f-46c054ac8482", + "84b9d333-f0d1-4c37-a64f-8a43d3cf6aa3", + "a82465a7-9794-4650-8dff-1b2df3a12285", + "89c16944-9197-42dc-8ceb-0da30ddea3dc", + "cf30b8c0-08cd-478e-beea-b682342c61ba", + "1ef62cb0-852b-4228-862f-3b3a5bcbe4f4", + "39132775-72bf-4082-88cb-e132daff3b59", + "42e64698-de63-4035-b68a-cb34ae3bffa8", + "de8ef2c1-e4de-4ea2-9823-6870bf5a169d", + "c6a16538-c44f-48c3-9494-74df3ea4e9d1", + "da1029dd-9f91-4b3b-8449-9d4fafab4269", + "79797a35-cfcf-4ba1-92f5-851f69c78f7e", + "c3352702-99a2-449a-aa79-3bfcc447569c", + "cf310d94-45ef-4e07-8527-4ae7c1acde23", + "10ec4f92-833c-4039-aeb6-4afa109fbce7", + "2d06faba-6288-4865-a000-e995d0ef7cf9", + "2cccf3d9-4035-45e5-9fe6-1d1a4f492590", + "c26507e6-79fc-49be-9328-7faedee69280", + "e2329ef1-5875-4749-9e1a-7f1b95547e8d", + "11df5315-c746-4c87-a3a2-492d5b7efb77", + "ad1bc448-f68d-42eb-9c65-c6868e810d35", + "8beb27c3-6ac6-4d72-b344-4771a87f330a", + "203d2772-38c6-4dcd-9d91-0d652fb30a4c", + "013b1117-20ae-4cbf-8526-1cf1dfbec111", + "17bab11a-1b70-4750-9df1-7b9775f6d5a9", + "d665e1e8-8176-4dd2-8230-c6577a80bd2f", + "f7705845-cc41-4f51-b098-739d1c1aa931", + "07063b38-c4cd-45ee-a108-7c7031fc7165", + "3c347cc6-847e-40bc-b377-876739319d10", + "7c940339-5799-49d5-bf5b-c14303165be5", + "caf5d1ff-48f1-4d74-ae5d-d4fc34338a86", + "bd8e9a95-f179-495a-bca3-94bd7692d85e", + "0ebb5665-cd9d-48bc-8b8d-4c68e1bdc29f", + "53f9218b-2c2e-4cca-aee6-48c80712e509", + "f40f53f4-47f1-46da-8251-154f0cdbc3a0", + "0a8c113e-e987-4ba0-be9c-03dadbf399b2", + "4fbe963f-c99c-495c-9173-de2dcc6690ca", + "ded88bb1-2af1-4f8f-9e25-6bbd4dcc0cc7", + "499109ca-1087-4342-810e-9851b7e84ffb", + "f75144ab-bbb5-481e-9f58-10451ec2feec", + "1552ceea-ec9a-4fe8-ace4-cc64337b6505", + "4e6cc08d-905e-463c-b37b-c8b30164f9d0", + "1cba0968-c6c5-4c38-aeb8-6edf2fd6aa7f", + "28e60757-5123-4284-89c7-8e1c94e959ff", + "b1cc2a6d-66a2-4a57-8000-b28955c22c07", + "13ef6e6f-82af-4c5d-8c71-59f8f1d29631", + "3e3bc8e9-935c-4de0-93e6-10c22b4320ff", + "37b41273-32f2-4635-b132-91e5e062850d", + "bf650257-987b-42cc-aaa4-e3674759fda1", + "e72fa888-6e34-4f6d-ae36-4f33e3545a54", + "ab872ed4-6872-4352-b392-dd9426e9e338", + "1b2e0df0-3733-48cc-aa2d-754f761a5f2f", + "f9565889-9cc5-4d19-9b8e-6009de362eed", + "8df053b1-7476-4d37-8c96-2a6daf3dc213", + "d97111eb-0f3d-4079-8877-1d6e61050666", + "4e6e94cc-9043-49ea-a964-1dd851d27f3b", + "6afccaaa-a266-4645-bc89-1f5f42f073aa", + "b2e830be-1da5-42f0-96d2-2cc0e60aacfd", + "eed8b7ed-cd60-4972-ba17-4fc31d81451b", + "c2b47708-8d64-450d-8cf9-3fcddb9a2d29", + "664c1bc0-08da-4b77-b30f-d67f4cd071ec", + "cae5c402-4d83-4f76-b30a-05a9e7b83931", + "f6dfdbd4-51c0-4521-b8d3-27527abb444a", + "726529b7-4967-4c76-90a1-a8bb4667fcf6", + "edcaa651-c843-4c8b-9d0c-0be6ffd77d2f", + "781c6990-5f69-41cf-b8e4-d75938eaa406", + "c77f297b-ff86-4e1a-a1ab-e8717fbf4a52", + "32a9045d-e6fd-4c37-9249-d67f33d6ae08", + "853c39fa-8ab2-4768-964d-8ab066cff531", + "b821eba4-39b8-4377-aee2-25866aeff067", + "a9883562-55ee-484e-81ba-0e8df99776e9", + "c49483d8-7fdb-4d31-bd30-425b969b8506", + "e9d0fd82-345a-408e-b9d7-92821f89549f", + "43430c60-2f63-48b0-a13a-908c1ef09aa1", + "f8d6e27e-6b56-46eb-b362-37273fb07a6e", + "79265afb-a9bf-49db-ab4c-daf0248963cd", + "4bdc08e3-143a-4c01-8d5b-e60473c32c37", + "9af9b9fa-2079-46f8-9a3c-a82dffb5e12a", + "44aa8ac2-9c6b-4964-b6eb-e6bdae1d7818", + "5f6d75a6-f96d-4f66-bce1-6c4a6b474da3", + "93be85d6-e226-42f7-84af-7de93b1d0ddc", + "81b3cc33-0d5f-464d-8f3f-4e64ba5f9b23", + "e0cef54a-1a31-4838-8f30-87821ffe5cdc", + "fa5ef70c-f0d7-4fca-ac55-545b695a7fc7", + "aae95e20-7399-466e-984d-2ce99bcfea7a", + "d20bc180-9b41-45e8-b057-9e6d307cb8df", + "6dcd44b6-2181-4db3-b3e0-5b7729f1976d", + "dac2188b-8e8c-4554-a97f-d6f3de3b0380", + "99180ae9-595c-45d9-a2b7-9221cc344c7a", + "d9d6c4c6-814d-4e36-b5f0-7a7705b3c057", + "3e6127db-0823-426b-b095-ce6b70a1a7b9", + "f4678732-fd32-4b43-b549-3e95267606d8", + "d927081b-c35a-4538-b6ee-3fb64194c35b", + "33b8a776-aa02-4305-a42b-06b67c77e545", + "0b3be7a1-90d5-46f3-9126-7cf244ff8edd", + "ff8baf49-3abf-4066-8815-60a94c7eff04", + "3a512988-607f-46a6-92fd-7db60304d565", + "108a762c-b66e-4ef9-952f-d97070a68e91", + "23f6df79-9e7e-4a91-b91b-fad00243160a", + "b6f8d32a-5888-4fe7-b95e-89b2bd0e889c", + "546b9a0c-8ba6-4e49-b2f4-6108433727ae", + "c7f76c6d-b859-49a6-9a0c-2c2aa4851a0d", + "15ad9752-4d2b-4160-a923-123c37e73972", + "6fd81412-8c63-4249-ae92-7f82c7221805", + "63176743-ab54-40bb-ae7c-872d14f57e98", + "f7eb164a-b8c7-404b-a182-07b6d57bd371", + "d6b59c7d-5c8a-4fa0-8ab5-333a5843eb39", + "2a7ce029-3ceb-4180-a204-5035faf78479", + "f02ca15b-f038-4456-832b-e8eedb0ae662", + "efca5f35-4276-4a40-b73f-5a760eadb1bb", + "5bbe6fe0-3aed-4500-a7d2-f5c0a21f6b2d", + "987afd75-dd42-4370-9a40-9da641ae1706", + "5600341e-9540-4889-8103-5aec9072588c", + "382ec068-8609-401c-b8fd-a3f529afa198", + "8fe922d3-8ee6-4700-85d0-ef40292bcac6", + "4a810cb3-c036-4f15-8fab-610aba729e90", + "1a5bbf98-8bd3-414a-b827-3346dd43d6b8", + "28b2dfea-f311-4cd2-b710-a8ed8e6a8206", + "56ffa06f-af93-41bc-8415-715aea4fbfee", + "81f7b6ca-731f-4b5b-a70a-d91805a328b1", + "584ce97a-91c0-47d1-8fc9-6b03d1889ff1", + "d58be63c-7b0a-4b9e-818d-e32d7cd9997b", + "4dad507d-53a2-4e79-8ac2-44d49929e29b", + "b3f4d895-9f47-4d5b-9303-32fb8feb72b4", + "aa620364-39a4-4448-8b7c-fc3a1ccb1079", + "7d01662a-3d94-4d27-90eb-f94bd27f4176", + "7178c1cc-c9c5-4187-ba22-ddbba9ed68eb", + "d52d8c9a-eb51-4a60-9281-d7bdb90b3aac", + "e4c42308-d215-4220-9643-538bff746ecb", + "e4f9b3cb-7b07-4f9c-bc86-5edd8a5ff188", + "2779a30e-24c2-41b6-8704-b7e9713164f8", + "ae324beb-611a-405e-b866-766d9912363f", + "5cc9bad1-63a8-43a9-846f-1295a177857b", + "96aeac18-7c18-464a-8c10-9c81c2b91ec4", + "081e4865-0794-493b-b027-803f281e245e", + "2369bbd9-2988-4fbb-815a-396376bb94ae", + "fb46fa3e-52be-44b4-afe8-5c4a45e134e1", + "24f8b442-5924-4fc6-9f3b-2df37f5795d6", + "349087f9-1ea7-40e7-b31a-3b5cdcd9ef56", + "96928432-e011-4f62-a798-b649464072b9", + "bd343af6-645c-49bb-85ee-6246f41c7afe", + "945b58a4-005b-414f-a680-bd30f100455f", + "4472181b-85f0-4339-8885-34873be50930", + "8c300437-2fe2-4180-a928-5e1bac3a7286", + "caff67dd-6797-4d46-9c3e-1ad4959de893", + "464d5854-5547-40cd-ba90-421ba6367709", + "158e1906-9d94-4460-87b1-ecc89487e2ef", + "434dab05-d0c7-4b08-b267-12dacc114050", + "4a0ee48a-77dd-46cc-9669-0785eeb5b8d6", + "79f2be15-53e5-4d08-b600-53a9a9df4bef", + "b4c09a34-b9bb-470e-a06e-5ae304834432", + "458602ce-e695-4f11-bd1b-0b0b72417285", + "baa9d260-5642-4133-85ec-d88cb6a9378f", + "7f20d757-a7bc-403c-a609-5804836d75c7", + "547297a4-c2af-4626-be26-40ffc00d8051", + "76b7350f-5597-450e-aca2-6caac2b46f1c", + "591953f9-9f03-49f6-925d-972374724117", + "95e4e420-3925-4b76-a594-0768ca8807ae", + "8787f5c0-ed30-464b-b310-cc84c317aa4c", + "fef8bf94-0498-4444-b538-f5755ae6b380", + "55c117bd-8b01-4de4-9dba-5083bebbd0e2", + "3e9db5c8-f516-4cae-83f1-1ad2c24fdb1f", + "49b368db-e48c-4d12-bf9d-e4c7ae1deeff", + "641522d4-4a5e-4b30-bc62-1db3770dd9ff", + "76893d26-2508-4270-9de8-2de53005b903", + "60667454-fb57-48c4-8487-e42abd24bfa1", + "30e2e6de-d0b0-4e5f-98d6-7ecb8d9e6f9d", + "cfa81c5d-7740-479e-bd30-3c78ce19c217", + "56984b7b-220f-458c-9b26-fa6ba708e2ca", + "208e1823-51ef-4603-8af3-231dc67db1cc", + "a34577ab-5b3f-47eb-b0aa-1d9a24fb6e13", + "6b1de31c-8e41-4334-9e6b-899853cef375", + "440a49ad-d424-4052-8e18-86bb8a37e0e4", + "2cd23389-1500-4216-b0fd-b3c2a3e1bcc1", + "26cf0bd0-2270-49e2-bc1e-ca0e865be06b", + "8566fca8-7528-4fb1-8af5-7537030d37fd", + "728ee4de-9022-40b3-85d0-825c3de5315e", + "4265ee22-a695-4907-988d-ba635101361d", + "d48e3a31-b396-411e-ac05-6a23318db627", + "a68b0163-dca9-4d60-a02f-88dc212b9360", + "4c93b767-65af-4e29-8d09-ea1f258acb42", + "2e322e58-24e9-4c8d-a57f-777a8c86231a", + "0c108795-843d-4fde-9a72-f83137354f2a", + "fec3516c-d62f-40fa-93fb-4a652aebbb99", + "b8109f7a-463c-4578-b26f-81b4acdd1cb6", + "4e7d357b-daca-4e8f-8b12-b9cd0f6cf508", + "abbb0205-d774-4ef5-ac45-0dbddf504c93", + "82b5c05d-b279-4caa-855f-0fcff40a1b2a", + "04dda28f-fd08-41ba-8ca4-527d12b5047e", + "9a60bccf-e2db-42b1-8756-07f8d16a40a0", + "53eb97cf-faf6-48a0-bd3a-3b793ea6b6d8", + "7d2b2efc-a94b-4090-b49c-98e5b8fac009", + "ed75ae00-76d2-41bd-8ba6-84e47a2e6188", + "a703150d-9fe7-4d40-b8bb-b2cf240628db", + "7b69d4eb-e45c-41ff-9698-8855df17950d", + "ad4d6082-9175-473a-bb2e-5b44416e3d15", + "e12d6552-a154-4ad2-a825-cab40e3cf0c4", + "a6c593e6-15e2-4c1d-b5ad-556a88bfbbcb", + "cdd3606a-5cc6-4cb7-be98-5af1c2e756f0", + "1d6d612b-e66b-4a85-bf46-3b09eeb4f76b", + "d2fcfe0f-7e38-466e-9bcc-80218fdf87f6", + "f24bdf62-bbe6-401f-aa2b-16e7de30d270", + "c4cf6d2a-d768-42a8-9d0f-c402a8917f87", + "8d9c4461-36c6-44d9-8444-cdfebed5fdab", + "7f9ea6ff-52e4-430b-adbc-19fb58f24eec", + "38a262ea-eb12-4922-8791-f7148dd4e778", + "c1971b6e-d4e3-4a2a-87e2-64af4cecd4a3", + "0c9c00a1-eed4-417b-8568-fdb1ee78112b", + "471466a6-08d5-48f1-96eb-2eefbcb64981", + "19fafea2-d424-4a02-b8c8-18360f10439e", + "e9e90833-0db1-41dd-b3b1-697aa98337db", + "728034d7-d871-40d7-a45a-f05050858bec", + "cf9ef2d0-d516-4087-a97b-96a13e5685e6", + "40490dae-f0d9-4a58-a0b8-0c7c010b6aa7", + "14e7cf92-45f2-4fc1-a92c-32f58ced3dd5", + "26131377-9765-4413-8c41-b5dfbe9a4219", + "41d0ba7c-9224-4650-a2c4-d4908cb86be2", + "abf54f61-d4b4-4a8d-8584-309dc6cc1a59", + "00eccc29-94b7-4c35-9936-166282c4095b", + "d3265c4b-f2a7-43ca-90b5-3d1b4d6857a9", + "a62d968e-247c-4115-8128-3d5dab521bee", + "5aee965f-b110-4e6f-b9f7-330892306c0f", + "0fbe3f12-1b10-448e-b3cc-1654d6d5b399", + "06092767-64e4-4bb2-8bd9-18a0c7fb8586", + "ce6f0c59-e453-4353-b2ed-5e5117e8e67a", + "b0abcdc0-8137-407c-b012-269cfe62a7dd", + "c24f38ec-3c44-4f55-af89-a12922b2bf70", + "ddae6d9d-c996-44e2-b365-36270a097abe", + "75392f7a-e9df-47db-b2b1-a006f4343d9a", + "cdeb4ca3-1684-4d95-9614-b3b3e4487b08", + "d02cdc6b-c71f-4e2e-8de9-9bc475bad01a", + "8d343d62-ffdd-49b2-8246-0f53c128b514", + "47555c64-f3b3-481c-ac72-1437409e1609", + "93270043-d703-4463-9edd-b111b9068ead", + "420ecc98-5331-4a73-ba26-003f2d1cd9cc", + "a36c7dc3-213a-46f4-81d3-cc89924742c1", + "15a0ebf1-5a03-4e43-aef9-7bfe57f19091", + "0fbb751e-ee5f-49c8-9682-c4c47d8d139b", + "a8d287fc-1636-4e69-8527-db4916d8ea15", + "bdc05f5f-e77f-469f-83e2-8218a45100e4", + "35331166-58d7-4f9e-b1fe-991b66b0b811", + "af1590cf-ecfd-49d7-a6b4-2ec60a64f4ee", + "eeded81f-0f27-4075-a219-a7f10ddc4cba", + "72e10e0d-ec7f-41af-86f3-9c3eb600e4e8", + "65878ffb-11c6-43c0-af90-e83f51c9597d", + "6de73f73-dfce-45a8-ad8d-9c7cc432cad8", + "96522ecc-0773-43c5-b45b-ce0ef0df8135", + "1538f965-bda3-4bf3-a2e5-1bcec6cfd601", + "16faef26-d95e-42db-92ea-fea1fc0a9760", + "064ddae2-a77f-4661-83e4-91f86cf828eb", + "f0670874-5d48-497b-a686-1a6fcca2d81a", + "b3773686-c05b-4000-8804-00e2e21666a6", + "03f19a1d-d7a6-43ec-a466-31a55fb86184", + "6802505c-62f2-4cd8-a93c-fa23fa6a6c98", + "47dc751f-0989-4a6f-837b-a6dd2d9e36e8", + "4e8e2563-4df3-4b6c-856b-47f678776842", + "a58f44ed-f18d-40f9-9b75-a07006095bb4", + "b5873241-d5da-4e74-910c-f363aa64f941", + "4dc052fa-7c3b-4b4c-a224-1d3af82f0445", + "1d0a4f8b-e977-4308-be12-a8e99970a6c6", + "61bb7df5-688b-4f39-afda-13243d1527b2", + "59df5fd4-f3a1-4404-b770-9423e74ef223", + "b73b983d-c651-450b-a4ff-f0ef53e07ef2", + "91d90af4-8252-4cb3-904b-02b232d9a6a9", + "03125206-3ffc-411b-b5b4-7ff861b642dd", + "f69f545a-88c3-4527-9732-d9b3589a20a4", + "21c787f8-16bf-441a-a6a2-5092764ac499", + "e2aa201a-f9c6-4b9b-a6a7-ad0edf5b591e", + "40c41f75-e657-433e-a4a2-af47912959ad", + "2fa80aa9-0e50-4ed4-9f00-730a94d58f40", + "afe65be5-81d7-46a7-a82d-78caf28b6c10", + "32b17ee0-95b2-403f-a861-201ec9615474", + "4defb045-3ce0-435d-8001-6bb6d45c92b6", + "4fc3a02e-6ece-43c7-a3ac-7eacec33d79b", + "17e89518-7b00-478a-91d3-d19fd8a4ba76", + "53369e32-1f28-4ebc-a5e7-5060a0deb0ec", + "e44f93f5-80f5-415e-b0bc-9c198d0bd546", + "33b9ffd2-2621-4b49-9e0e-9123cea6a8d9", + "df5a1de2-34af-4fe2-99d3-2b30bbc3764b", + "8d6919e0-c5c7-4188-87bd-85d8831e9e7f", + "d91dfe48-486b-4264-a804-71aae9d50e0c", + "c2ed285c-7417-4979-9132-dc83d69d81c8", + "88a4a5cb-cc3f-4d78-be1d-37004946e824", + "77e631c3-ef22-4b4f-9ff4-f208acaa6f95", + "7f1508b2-0d3b-425b-a8b3-b02209edf1d7", + "65c32c5a-1146-4192-82ae-37027a5d9e53", + "41715137-74df-4d12-9410-173e81e24158", + "00f65d7e-9b93-4b1c-a0f3-5c082cafe5de", + "4dc4a0e8-73e8-4399-8a77-1a51e76f1006", + "bfcfda59-6aae-476f-b294-bba4cb4138d1", + "7201485e-46e3-4181-b237-4ed54186aedb", + "8cdf77fd-99f2-46f8-b2bc-cce67f8f54a3", + "cf2b5bcd-5533-4c4d-8393-49a10a15f815", + "7f1ef67a-5e81-4c63-a330-75abb198e485", + "07f8bf69-0a15-4d23-9970-65882a152035", + "4c00125b-df00-4bdb-9ce8-4dedeb1702e5", + "6c148cf4-cd26-4285-8aa0-2aaabdec0474", + "c66ce040-3fe2-4fa3-a5a9-d1e1484485e9", + "47fc521b-d05c-4c15-b588-397ef1cc8b01", + "9de3475e-414a-4e29-9800-37885d604ddd", + "0ffa757e-9495-4a92-b175-f4500801ab90", + "5c0a4ac4-5b77-400d-82dc-98d2a716222e", + "d05bfa68-8147-4ffd-a730-72074a0c47ce", + "6ff5555a-59f5-4a80-b36e-0880ef7a2720", + "d257aaf0-6874-4099-9c1a-5fc7b4a3af29", + "3f67aa32-0230-4b31-8779-9e7d94f7db65", + "102f63fd-a411-441c-940e-d4b093e1d598", + "d4fcf013-462f-47ed-9ce1-1e2e868be1d6", + "deb9f52a-7be6-4735-8680-466f639722b7", + "9fbb4e88-d147-43b8-873a-3fccabb68d0d", + "a7fbb9d9-c1f2-49aa-8282-dde2f2fc137e", + "b4101ae9-b455-40b1-b076-d3946dcba596", + "30832d57-4139-4fa0-bacd-edf14bf5f419", + "aa607db5-6bad-4c77-96b4-d283f5dcc4dd", + "539c5b28-02b4-42a8-8595-2c690cecbcf4", + "77e48f12-6330-4300-aa98-825e216f0c68", + "dd7fde51-df70-42d1-bd3a-9820a9d01612", + "3c901d43-6b72-4926-a71d-dbf054d795f8", + "64a42f53-865f-4fb5-b1b3-bebe53b77e05", + "d8f4ba40-ae9b-491a-a754-34a23f26b034", + "a1b86a9b-bfdd-4fde-8985-2f5a3178ec05", + "11ffaafc-d5e1-4afb-8a27-56229cf01b28", + "b868857f-983c-434e-9ecc-7fe639352af6", + "719b2f51-c2cd-43f6-9680-f72c2cc92cb1", + "328146b7-6aac-40ac-8cc0-97816992b98c", + "e29491fd-bbfa-491d-a5d3-064890a13625", + "aa74ed65-102a-4756-b622-d9463bab597b", + "36d6c181-bfd6-44f1-8a18-80eb332addaa", + "e9fd8868-f2a0-4cb2-ad76-f2fe1ad31626", + "b09aae09-d9c5-4c21-9d57-21c5af5a05ac", + "72dd1503-7757-4a67-8c5b-88ead73b31da", + "b45bf0e4-4044-4129-a996-de14ada925d0", + "f646cdd8-f911-473f-86db-a7653383235f", + "8a205365-da21-47b1-854a-72362eaf696d", + "b16d9d94-2650-4453-8fa0-9cbe66dde9bd", + "af528d19-d967-46df-89bc-fba8790562ea", + "c9114a1f-1d23-424f-88e0-63298a2df354", + "37330956-fc56-4521-a00d-db1c5b476b72", + "86d40f44-2ac4-4a5c-afd3-20da55903854", + "f5b96baa-8a32-471c-a50c-f33fa8768711", + "192e7231-5495-451d-81d3-0677bd4819f9", + "c69c0f5f-cc42-45bb-a839-df657568d41f", + "d5041e3b-ddf5-4684-9ac6-c7608c65c28d", + "53e3e052-503f-4b75-a766-921d6dba6df1", + "215ea64c-95bd-434b-bb18-12c72f8a29a6", + "af97f00e-55be-4593-8917-a6c428a344d0", + "fb807d10-6fcd-4a4c-b17e-57c4a2b28800", + "73a6f2e6-5029-4d3d-bc1c-7391d003f234", + "afbb44a9-1582-4527-affd-596ab3a172eb", + "516cc1ca-014c-40e7-87eb-1f8e3517545b", + "41554fc5-8ead-44cf-b092-f1a1f507c6d1", + "890689e3-daed-4081-b587-6299e76740be", + "3c2f8d95-7224-45ea-aa01-9bfb5f4d5fd2", + "6d4afc67-1d7a-4177-a4b4-dbdf6e8b33c6", + "313ac559-5841-497b-8b56-e039440793ab", + "44ea1980-4cb4-4445-aaea-8b301fdfb66e", + "c9545a92-92ce-4219-958f-9274fd28bd69", + "7cb94f13-4de8-4b51-8251-bc9382a060ca", + "f35cb6e1-401d-4883-be90-39942a56293e", + "40f92db4-a0b7-40f4-bf31-009c0a867944", + "1128e57f-f9ae-42cb-a541-b3193455fae0", + "b7212a5d-9bd0-41af-a3e8-e3d3b7f37f4e", + "a121a534-d1d5-49d2-b76c-0e4091582333", + "bc09b844-88e5-4655-ba47-91b047cf89b5", + "253da5fe-570c-4186-a79a-27eb3107996c", + "2b809e16-53fc-4219-a9fc-1aa54c8678c9", + "ee83b7ea-40f3-4760-9fd5-7f6fafb6684a", + "9ec42e80-e1de-4f72-8b53-6096b1c543e8", + "d4722b4f-d93e-47e0-ba1c-b28812cfffa7", + "f4377faa-3f9d-42fa-a011-6e20c076a33b", + "32033f89-c4ba-45b0-850f-f92c66aea8a3", + "f9ef443b-a64c-4346-a340-91442ba4c439", + "3f8a311d-6fc5-4ea4-811d-3fad07f11825", + "9fe89a81-2137-4f28-aa28-f134c5c89eba", + "6de4eeaf-a21e-4833-8971-66a0b6d6f09e", + "66a3f0f4-faf7-48a1-ae80-d7f87c9840bc", + "c0166e09-ff90-47fc-b937-bd65a0fc9d1f", + "7359769c-ef34-4979-9f75-c7a21d13fe00", + "4fe63f7c-0d6f-41b2-849e-075052db48f1", + "2b8dc475-2816-4642-85d3-4a308df6c0a3", + "3959f88f-1aac-4430-95b9-59412f94dfc6", + "875f90d3-29de-4527-ab54-13d6ea67eeb7", + "1ef4f649-496b-4c4e-ab5b-3169eecf7ab8", + "654f78cb-bbe8-4ec3-8056-3e7ed6fa8ce0", + "c4afa97a-fdd4-4236-9171-b3bdf8e46812", + "70340c3e-769f-4e8f-9efe-67a637f4176e", + "b2d49a88-2646-4f60-a4fb-9e7136a4a87f", + "a9b0171a-6292-4aae-9a00-01cc782cdbe7", + "e54c47fb-8e77-45dc-8dc9-cb718b2bdb60", + "eb9a6ccb-cfd1-4f55-8234-36051eebe7b6", + "3ad2e83c-0af7-4d84-80db-f9ce5910be85", + "e949c6e9-1590-42c9-acbe-98d24fda602c", + "89470c26-6e4d-4b84-b59c-22cb2580ceb3", + "e2c9d73d-7de5-455d-b91f-76a3b705ac7a", + "62b1a49c-ee1c-4a43-b9b4-ee26dbc2f476", + "e85351d8-c25f-4e5a-8d79-b487dbf73a25", + "aa957ab4-33bf-421b-a989-0a20e964dc9b", + "65d42d84-1d06-4670-a8f8-244945ee403c", + "dc4d3fd8-2458-4478-8d6e-16605e37259a", + "1d0f72b3-cdf9-4045-966d-c1a83fd6433b", + "a3d5d4f3-bedf-4b48-8768-b6ec61623f22", + "5d2fa64b-b28e-4626-9c2c-9f19e6aa1072", + "afd23225-5d29-486d-a295-ffe94bb87fd3", + "97c07a09-93c3-43a8-b356-0d6e91bdbe74", + "487319cb-0dad-4f51-a5b7-9dfa8908a502", + "0c17c156-d772-47f8-92e3-97999954b897", + "ec0028b7-3c40-4f23-874c-62bd51f332dc", + "9b765e29-82b5-4764-9a82-ea95d05dca4d", + "7ed5c86e-3472-4b80-9f0b-1436af1a5428", + "b815d8a7-33e7-4763-ad26-825d8195f801", + "c10b5be2-3dba-4ec8-a767-98342898824a", + "32bb6111-5233-4185-9b75-6793c0bfbe06", + "b9c13223-27c0-4f5a-aeed-c936506e2ae5", + "5f72c952-0466-433b-9f07-d51b61142d83", + "f5af179d-153f-40db-92a2-5a5a36873af2", + "348db8c4-9c74-4562-b9eb-5041ff5848ae", + "41e72230-ad82-4eac-9d27-6b8220b05610", + "a136b70f-0df1-48a9-90ba-056fccd821bd", + "5e85c73b-1182-450c-a565-699bf84dae3d", + "7d32b7b7-a329-41e2-889d-ba0d86dbf252", + "e539f6c7-65c8-4803-a595-f1db800fbcfa", + "340a205d-7b20-4f5c-a6c7-ed0de43bba55", + "dc2ced85-c937-4aaa-a0db-b306f5f35af7", + "88f178c3-450b-4459-940f-dd96ffd8b8e0", + "581d46da-ad43-4aff-86d5-08373025be2a", + "1cf33714-2b97-4fd9-b631-bb9a0f7959b0", + "c6709690-86a7-4a9f-b698-c31349cc4b31", + "ce3fda7c-b6c1-473c-848f-99d3656648be", + "a5aad87f-3ce6-45f8-bc7a-d65539880e27", + "157726d4-f68e-43da-b78b-13d8d5a33527", + "9756ecf3-32a7-44a2-bb25-13d7853649e6", + "c6fe9f4c-f751-4c37-b9de-260e7cf4cddd", + "641e38a6-614e-4dbb-9345-58d82235b115", + "415809ec-dabe-4490-baab-3881850b3432", + "dc0b1f9c-f4d7-427e-9307-a72c74383639", + "6dda160b-101d-4a5b-be37-2a682719e75b", + "6658da10-2a3b-4826-b4fb-ece8c318ccad", + "13eec308-b417-43ef-92c8-7db021a8dba7", + "24fad45e-e2eb-4898-b67b-a15a8cb77100", + "1e75438b-524f-4e1b-bc96-90da7dcb340a", + "bdf8bb91-b165-45ba-9a37-f71f957f7d1b", + "b0c605b7-b523-4938-9d5c-0a82d41d3c6d", + "18db8069-d7b3-47c3-93e7-8d0d5c8f0ae6", + "16a2cb88-1ddd-46a8-912b-300d690f7ba9", + "462ca91a-7066-44f6-aae4-ee04be544815", + "32032bc5-24f1-47ba-90c9-3df884649ef6", + "da15a1d5-3db6-47b0-bf45-1125843d8f05", + "00d77eb6-7414-4768-ac88-3363ae2b1b79", + "24b9bc1e-d826-4774-8bd0-bd76b01ae10a", + "faf54689-8e0a-4219-ac78-4d0b963e4223", + "817acc66-8775-4157-b623-189ff87cdb03", + "0e83b2a4-fabb-4aa0-be20-76e088472ac6", + "8cc001f8-9b66-4480-9afa-bb6726f080ee", + "8532b4ba-4e5d-4677-87d8-6dbe291f7f99", + "710f559f-5707-455a-83f1-37157220cfa7", + "e93d5927-231c-4bb0-a0fb-6124957369df", + "92053b33-c10f-4c26-b3ae-82c88c58d748", + "e8ced61c-fe47-4f3d-ad99-c5bb9a2cda58", + "363135cc-fad9-44ef-9667-00ed4e3b236f", + "9f85c06f-6216-4ef5-832a-da7865cca354", + "a1838aa7-42fb-4e5d-af6c-4e5485a9171c", + "6e5c80ec-4ddc-4bbd-858a-b01d9538dd39", + "82d63393-e73d-40d8-b9ca-eaeb062a263e", + "7ec857eb-0f05-4866-b039-22e8fb5357a7", + "f6b70422-04c4-44bf-8bf4-9fcb9d55a98f", + "8c856b90-4159-4693-881c-a6600d7c24e4", + "870353af-36e2-449f-9599-20ede508a9b4", + "a31f3f83-ef04-458a-93fd-8774d2724ea3", + "3f14469d-aa2b-4b40-9a41-f12edb2236c2", + "f7a6a45d-d0f9-46e4-af66-26d2c5967650", + "6598b6e9-ddda-4f8a-8c14-88f06fb8fca6", + "9d31faaf-1061-4c4e-ac71-a15c2d1894e0", + "607840dd-5b83-4968-b88a-289965952230", + "ff1eef24-7fb2-49e8-ab0f-5a6ebb1985fa", + "294d5cdb-9cf3-4ab8-9096-b224aec9c2ca", + "71f533a5-0501-4d29-a595-80e08cecd549", + "0cfd90ca-d772-4a27-addb-15b97f91f81b", + "835f59a7-9247-4a95-ac15-be3381f1dc02", + "44b7c3ae-76d1-432f-a9a3-05cd7eef1cb0", + "ab4c7496-8e16-4aa5-a6ad-3dae9f402a38", + "dd74f2f6-45b8-462c-8608-143e956974d3", + "c9bb875f-0fbe-479b-b924-23e2e33a7c54", + "248a60f7-057e-420a-9078-9a8432a0addb", + "0ef44f70-17f1-4d23-a8af-35e9c834100a", + "54148581-8e68-4a9b-8a42-fd5de94f792e", + "3d62f0d4-0f11-4c54-9ef5-4f62b23905b0", + "1770171e-2178-4516-b0ad-69f55f4df1ee", + "081888e1-4281-4c3d-ab17-e85ee75f9b00", + "4311dcad-00d5-469c-af0e-443ba6ecd97b", + "c159e2eb-2dc2-4521-85d6-61f0cdf540a0", + "31a592ba-abc8-495d-893d-f1ddc11eb83c", + "9e2c42f1-eb1a-4e43-9060-eebf0e27e184", + "0049043f-780a-4e5a-bce5-1a43657f6197", + "d6dfd5ee-ecc8-4948-a68a-dcaf2bb19dd6", + "9b8dfa81-5eb8-4568-ae85-462dffb900ce", + "069f8809-f4c2-4baa-845a-8769e31db600", + "a069493d-2068-4797-b310-6bf05c60be62", + "2be28c54-34eb-4852-bf28-a9cd8a8ed278", + "e8c70b5d-10b1-4aac-a1b7-a7e146155d89", + "745366c9-54a2-4c8c-9e72-c86437edadbd", + "2e27fdce-fac6-4271-8d2e-114f50d1d354", + "d1def1da-52e2-4828-8b69-74c1b28adb16", + "62501e9c-969b-487f-b04c-4b9437bd964e", + "d0daf969-e506-4c95-816d-6cce9057c172", + "3f11a48f-a42b-4942-9cf0-036288c7ccc3", + "2533e09a-fbb5-4281-affa-be2068111cd2", + "547c0f6f-7349-453a-a85c-361def1a80c4", + "8345db4f-8324-456e-8483-2b054c0ee839", + "741840ea-a1a9-4e0d-b260-b59ed366a7ce", + "d7149253-be8c-4e25-91c9-3cb81de71523", + "3ad35ee6-8194-427b-b685-9553e64212c8", + "8a38b82c-3ccb-423c-82bd-4ff5112bde5a", + "8a8463d3-6b54-4207-a02a-64f1e37ec145", + "fbd8a430-e160-4fb9-bbf5-b4fabf0806a3", + "bd96c584-0038-4c40-b558-4d4eeb0adf3f", + "11e71423-73ef-46ba-a90f-5f36d7afee22", + "0cfcc723-01f3-40c3-a2ce-d134c0cc4d08", + "faa8e232-954c-4aab-ac22-55faf11f30b8", + "edb62263-31ae-47a4-9893-493666956a83", + "104d1252-e27f-4a15-9316-3486d1d0abfe", + "11702ac6-052f-4976-b271-32ea8940055a", + "8039ccf5-9b6f-4b76-bd69-d5ec37f81b07", + "b4d239a9-e714-4b0b-9836-a28d3379a17b", + "d602834e-a582-413e-8f79-afe7d22a7225", + "cac6cbe8-e6aa-4617-9b01-da2b90e1ec82", + "7a101870-861b-4cf0-8c8a-aafee5668eb6", + "e83b51fc-0c30-496f-9d75-2f72c3f8e444", + "20e561c4-e273-44c8-83f3-75bda4e48615", + "f05ab6c1-7b8e-4c7c-ae79-f4f7f00532ad", + "b3723d62-e769-4530-932b-3b280019ba63", + "a6adfbc1-0ee4-4cc8-b56d-4d8c345a09d5", + "3b885c1c-8ed1-4b5f-8067-372101a522c2", + "37911cbf-10eb-41d6-958f-9037d11f2a08", + "801ae037-27c0-43d8-bbc9-1f747817c13e", + "092d4232-f3bf-479e-8d99-1c8525f99d84", + "90d74fa5-2242-4623-98af-57744898a94e", + "33044603-f4bf-419e-887d-ff7897e0addb", + "0f872749-9e9b-47ec-8cc2-a5cf31d95a6e", + "b98a200e-8eab-4db3-be70-339aee406047", + "e0ecba62-99d6-478c-87aa-f7762856fcf1", + "a94f0bbd-5aad-4ccf-85c3-2f9780764390", + "4de3ce57-da1f-4069-9cf5-21e9c4dd1ae4", + "2be94894-3e16-4dbc-aeb3-9b9d3d47cb25", + "c9a5a8b9-e7ff-4d2a-998e-cfb8bd98845f" +] \ No newline at end of file diff --git a/source_code/sam3/scripts/extract_roboflow_vl100_results.py b/source_code/sam3/scripts/extract_roboflow_vl100_results.py new file mode 100644 index 0000000000000000000000000000000000000000..d1f1520d66860c83e9486918fb1935154e853d84 --- /dev/null +++ b/source_code/sam3/scripts/extract_roboflow_vl100_results.py @@ -0,0 +1,380 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved + +""" +Script to extract and analyze training results from Roboflow VL100 experiments. + +This script processes training logs and configuration files to extract model performance +metrics and training parameters for analysis and comparison. +""" + +import argparse +import json +import os +from typing import Any, Dict, List, Optional + +import pandas as pd +import yaml + + +# Constants +CONFIG_FILENAME = "config_resolved.yaml" +RESULTS_FILENAME = "val_stats.json" +BBOX_AP_METRIC = "Meters_train/val_roboflow100/detection/coco_eval_bbox_AP" + +# Roboflow dataset categories organized by domain +ROBOFLOW_CATEGORIES = { + "sports": [ + "actions", + "aerial-pool", + "ball", + "bibdetection", + "football-player-detection", + "lacrosse-object-detection", + ], + "other": [ + "buoy-onboarding", + "car-logo-detection", + "clashroyalechardetector", + "cod-mw-warzone", + "countingpills", + "everdaynew", + "flir-camera-objects", + "halo-infinite-angel-videogame", + "mahjong", + "new-defects-in-wood", + "orionproducts", + "pill", + "soda-bottles", + "taco-trash-annotations-in-context", + "the-dreidel-project", + ], + "aerial": [ + "aerial-airport", + "aerial-cows", + "aerial-sheep", + "apoce-aerial-photographs-for-object-detection-of-construction-equipment", + "electric-pylon-detection-in-rsi", + "floating-waste", + "human-detection-in-floods", + "sssod", + "uavdet-small", + "wildfire-smoke", + "zebrasatasturias", + ], + "medical": [ + "canalstenosis", + "crystal-clean-brain-tumors-mri-dataset", + "dentalai", + "inbreast", + "liver-disease", + "nih-xray", + "spinefrxnormalvindr", + "stomata-cells", + "train", + "ufba-425", + "urine-analysis1", + "x-ray-id", + "xray", + ], + "document": [ + "activity-diagrams", + "all-elements", + "circuit-voltages", + "invoice-processing", + "label-printing-defect-version-2", + "macro-segmentation", + "paper-parts", + "signatures", + "speech-bubbles-detection", + "wine-labels", + ], + "industrial": [ + "-grccs", + "13-lkc01", + "2024-frc", + "aircraft-turnaround-dataset", + "asphaltdistressdetection", + "cable-damage", + "conveyor-t-shirts", + "dataconvert", + "deeppcb", + "defect-detection", + "fruitjes", + "infraredimageofpowerequipment", + "ism-band-packet-detection", + "l10ul502", + "needle-base-tip-min-max", + "recode-waste", + "screwdetectclassification", + "smd-components", + "truck-movement", + "tube", + "water-meter", + "wheel-defect-detection", + ], + "flora_fauna": [ + "aquarium-combined", + "bees", + "deepfruits", + "exploratorium-daphnia", + "grapes-5", + "grass-weeds", + "gwhd2021", + "into-the-vale", + "jellyfish", + "marine-sharks", + "orgharvest", + "peixos-fish", + "penguin-finder-seg", + "pig-detection", + "roboflow-trained-dataset", + "sea-cucumbers-new-tiles", + "thermal-cheetah", + "tomatoes-2", + "trail-camera", + "underwater-objects", + "varroa-mites-detection--test-set", + "wb-prova", + "weeds4", + ], +} + + +def load_jsonl_last_row(file_path: str, keys: List[str]) -> Optional[Dict[str, Any]]: + """ + Load the last row from a JSONL file and extract specific keys. + + Args: + file_path: Path to the JSONL file + keys: List of keys to extract from the last row + + Returns: + Dictionary with extracted key-value pairs, or None if file not found/empty + """ + if not os.path.exists(file_path): + print(f"Warning: File not found: {file_path}") + return None + + last_row = None + try: + with open(file_path, "r") as file: + for line in file: + last_row = json.loads(line.strip()) + + if last_row is None: + print(f"Warning: Empty JSONL file: {file_path}") + return None + + return {key: last_row.get(key) for key in keys} + + except json.JSONDecodeError as e: + print(f"Error: Failed to parse JSON in {file_path}: {e}") + return None + except Exception as e: + print(f"Error: Failed to read {file_path}: {e}") + return None + + +def find_config_files(directory: str, filename: str = CONFIG_FILENAME) -> List[str]: + """ + Recursively find configuration files with a specific filename. + + Args: + directory: Root directory to search + filename: Target filename to search for + + Returns: + List of full paths to matching files + """ + matching_files = [] + for root, _, files in os.walk(directory): + # Skip code directories + if "/code/" in root: + continue + if filename in files: + matching_files.append(os.path.join(root, filename)) + return matching_files + + +def extract_config_parameters(config_path: str, keys: List[str]) -> Dict[str, Any]: + """ + Extract specific parameters from a YAML configuration file. + + Args: + config_path: Path to the YAML configuration file + keys: List of keys to extract from the 'scratch' section + + Returns: + Dictionary containing extracted parameters + """ + try: + with open(config_path, "r") as file: + data = yaml.safe_load(file) + + # Extract parameters from scratch section + scratch_params = {key: data["scratch"].get(key) for key in keys} + + # Add computed parameters + launcher = data.get("launcher", {}) + scratch_params["batch_size"] = int(launcher.get("gpus_per_node", 1)) * int( + launcher.get("num_nodes", 1) + ) + scratch_params["lr_scale"] = data["scratch"].get("lr_scale") + + roboflow_train = data.get("roboflow_train", {}) + scratch_params["roboflow_num_images"] = roboflow_train.get("num_images") + + return scratch_params + + except Exception as e: + print(f"Error: Failed to parse config file {config_path}: {e}") + return {} + + +def calculate_average(values_dict: Dict[str, float]) -> float: + """ + Calculate the average of values in a dictionary. + + Args: + values_dict: Dictionary with numeric values + + Returns: + Average of all values, or 0 if empty + """ + if not values_dict: + return 0.0 + return sum(values_dict.values()) / len(values_dict) + + +def extract_category_results(log_dir: str, categories: List[str]) -> Dict[str, float]: + """ + Extract bbox AP results for specific categories from log files. + + Args: + log_dir: Directory containing category log subdirectories + categories: List of category names to extract results for + + Returns: + Dictionary mapping category names to bbox AP scores + """ + results = {} + metric_keys = [BBOX_AP_METRIC] + + for category in categories: + result_file = os.path.join(log_dir, f"logs/{category}/{RESULTS_FILENAME}") + category_result = load_jsonl_last_row(result_file, metric_keys) + + if category_result is not None and category_result[BBOX_AP_METRIC] is not None: + results[category] = category_result[BBOX_AP_METRIC] + + return results + + +def analyze_experiment_results(config_path: str) -> None: + """ + Analyze results from a single experiment configuration. + + Args: + config_path: Path to the experiment configuration file + """ + print("=" * 80) + print(f"Analyzing experiment: {config_path}") + print("=" * 80) + + # Extract configuration parameters + config_keys = [ + "lr_transformer", + "lr_vision_backbone", + "lr_language_backbone", + "max_data_epochs", + ] + + config_params = extract_config_parameters(config_path, config_keys) + print("Configuration Parameters:") + for key, value in config_params.items(): + print(f" {key}: {value}") + print() + + # Extract results for each category + experiment_dir = os.path.dirname(config_path) + category_results = {} + category_averages = {} + all_scores = [] + + for super_category, categories in ROBOFLOW_CATEGORIES.items(): + category_results[super_category] = extract_category_results( + experiment_dir, categories + ) + + if category_results[super_category]: + category_averages[super_category] = calculate_average( + category_results[super_category] + ) + all_scores.extend(category_results[super_category].values()) + + # Print results summary + print("Results by Category:") + for super_category, avg_score in category_averages.items(): + num_categories = len(category_results[super_category]) + print(f" {super_category}: {avg_score:.4f} (n={num_categories})") + + print(f"\nOverall Results:") + print(f" Weighted average: {calculate_average(category_averages):.4f}") + print(f" Total categories: {len(all_scores)}") + print(f" True average: {sum(all_scores) / len(all_scores):.4f}") + print() + + +def print_results_table(results_data: List[Dict[str, Any]]) -> None: + """ + Print results in a formatted table. + + Args: + results_data: List of dictionaries containing results data + """ + if not results_data: + print("No results data to display.") + return + + df = pd.DataFrame(results_data) + print("\nResults Summary Table:") + print("=" * 60) + print(df.to_string(index=False)) + + +def main() -> None: + """Main function to orchestrate the results extraction and analysis.""" + parser = argparse.ArgumentParser( + description="Extract and analyze Roboflow VL100 training results" + ) + parser.add_argument( + "-p", + "--path", + type=str, + required=True, + help="Root directory path containing experiment results", + ) + + args = parser.parse_args() + + # Find all configuration files + config_files = find_config_files(args.path, CONFIG_FILENAME) + + if not config_files: + print(f"No configuration files found in {args.path}") + return + + print(f"Found {len(config_files)} experiment configurations") + print() + + # Analyze each experiment + for config_file in config_files: + try: + analyze_experiment_results(config_file) + except Exception as e: + print(f"Error analyzing {config_file}: {e}") + continue + + +if __name__ == "__main__": + main()