code
stringlengths
17
6.64M
def post_assets(assets, release_id): 'Post assets to release' token = os.environ.get('GITHUB_TOKEN') headers = {'Accept': 'application/vnd.github.v3+json', 'Authorization': f'token {token}', 'Content-Type': 'application/zip'} for asset in assets: asset_path = os.path.join(os.getcwd(), asset) with ZipFile(f'{asset_path}.zip', 'w') as zip_file: zip_file.write(asset) asset_path = f'{asset_path}.zip' filename = asset_path.split('/')[(- 1)] url = (('https://uploads.github.com/repos/ruhyadi/yolo3d-lightning/releases/' + str(release_id)) + f'/assets?name={filename}') print('[INFO] Uploading {}'.format(filename)) response = requests.post(url, files={'name': open(asset_path, 'rb')}, headers=headers) pprint(response.json())
def video_to_frame(video_path: str, output_path: str, fps: int=5): '\n Convert video to frame\n\n Args:\n video_path: path to video\n output_path: path to output folder\n fps: how many frames per second to save \n ' if (not os.path.exists(output_path)): os.makedirs(output_path) cap = cv2.VideoCapture(video_path) frame_count = 0 while cap.isOpened(): (ret, frame) = cap.read() if (not ret): break if ((frame_count % fps) == 0): cv2.imwrite(os.path.join(output_path, f'{frame_count:06d}.jpg'), frame) frame_count += 1 cap.release()
def generate(video_path, gif_path, fps): 'Generate gif from video' clip = mpy.VideoFileClip(video_path) clip.write_gif(gif_path, fps=fps) clip.close()
class KITTIDataModule(LightningDataModule): def __init__(self, dataset_path: str='./data/KITTI', train_sets: str='./data/KITTI/train.txt', val_sets: str='./data/KITTI/val.txt', test_sets: str='./data/KITTI/test.txt', batch_size: int=32, num_worker: int=4): super().__init__() self.save_hyperparameters(logger=False) self.dataset_transforms = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) def setup(self, stage=None): ' Split dataset to training and validation ' self.KITTI_train = KITTIDataset(self.hparams.dataset_path, self.hparams.train_sets) self.KITTI_val = KITTIDataset(self.hparams.dataset_path, self.hparams.val_sets) def train_dataloader(self): return DataLoader(dataset=self.KITTI_train, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_worker, shuffle=True) def val_dataloader(self): return DataLoader(dataset=self.KITTI_val, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_worker, shuffle=False)
class KITTIDataModule2(LightningDataModule): def __init__(self, dataset_path: str='./data/KITTI', train_sets: str='./data/KITTI/train.txt', val_sets: str='./data/KITTI/val.txt', test_sets: str='./data/KITTI/test.txt', batch_size: int=32, num_worker: int=4): super().__init__() self.save_hyperparameters(logger=False) def setup(self, stage=None): ' Split dataset to training and validation ' self.KITTI_train = KITTIDataset2(self.hparams.dataset_path, self.hparams.train_sets) self.KITTI_val = KITTIDataset2(self.hparams.dataset_path, self.hparams.val_sets) def train_dataloader(self): return DataLoader(dataset=self.KITTI_train, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_worker, shuffle=True) def val_dataloader(self): return DataLoader(dataset=self.KITTI_val, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_worker, shuffle=False)
class KITTIDataModule3(LightningDataModule): def __init__(self, dataset_path: str='./data/KITTI', train_sets: str='./data/KITTI/train.txt', val_sets: str='./data/KITTI/val.txt', test_sets: str='./data/KITTI/test.txt', batch_size: int=32, num_worker: int=4): super().__init__() self.save_hyperparameters(logger=False) self.dataset_transforms = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) def setup(self, stage=None): ' Split dataset to training and validation ' self.KITTI_train = KITTIDataset3(self.hparams.dataset_path, self.hparams.train_sets) self.KITTI_val = KITTIDataset3(self.hparams.dataset_path, self.hparams.val_sets) def train_dataloader(self): return DataLoader(dataset=self.KITTI_train, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_worker, shuffle=True) def val_dataloader(self): return DataLoader(dataset=self.KITTI_val, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_worker, shuffle=False)
@utils.task_wrapper def evaluate(cfg: DictConfig) -> Tuple[(dict, dict)]: 'Evaluates given checkpoint on a datamodule testset.\n\n This method is wrapped in optional @task_wrapper decorator which applies extra utilities\n before and after the call.\n\n Args:\n cfg (DictConfig): Configuration composed by Hydra.\n\n Returns:\n Tuple[dict, dict]: Dict with metrics and dict with all instantiated objects.\n ' assert cfg.ckpt_path log.info(f'Instantiating datamodule <{cfg.datamodule._target_}>') datamodule: LightningDataModule = hydra.utils.instantiate(cfg.datamodule) log.info(f'Instantiating model <{cfg.model._target_}>') model: LightningModule = hydra.utils.instantiate(cfg.model) log.info('Instantiating loggers...') logger: List[LightningLoggerBase] = utils.instantiate_loggers(cfg.get('logger')) log.info(f'Instantiating trainer <{cfg.trainer._target_}>') trainer: Trainer = hydra.utils.instantiate(cfg.trainer, logger=logger) object_dict = {'cfg': cfg, 'datamodule': datamodule, 'model': model, 'logger': logger, 'trainer': trainer} if logger: log.info('Logging hyperparameters!') utils.log_hyperparameters(object_dict) log.info('Starting testing!') trainer.test(model=model, datamodule=datamodule, ckpt_path=cfg.ckpt_path) metric_dict = trainer.callback_metrics return (metric_dict, object_dict)
@hydra.main(version_base='1.2', config_path=(root / 'configs'), config_name='eval.yaml') def main(cfg: DictConfig) -> None: evaluate(cfg)
class RegressorModel(LightningModule): def __init__(self, net: nn.Module, optimizer: str='adam', lr: float=0.0001, momentum: float=0.9, w: float=0.4, alpha: float=0.6): super().__init__() self.save_hyperparameters(logger=False) self.net = net self.conf_loss_func = nn.CrossEntropyLoss() self.dim_loss_func = nn.MSELoss() self.orient_loss_func = OrientationLoss def forward(self, x): return self.net(x) def on_train_start(self): pass def step(self, batch): (x, y) = batch x = x.float() truth_orient = y['Orientation'].float() truth_conf = y['Confidence'].float() truth_dim = y['Dimensions'].float() preds = self(x) [orient, conf, dim] = preds orient_loss = self.orient_loss_func(orient, truth_orient, truth_conf) dim_loss = self.dim_loss_func(dim, truth_dim) truth_conf = torch.max(truth_conf, dim=1)[1] conf_loss = self.conf_loss_func(conf, truth_conf) loss_theta = (conf_loss + (self.hparams.w * orient_loss)) loss = ((self.hparams.alpha * dim_loss) + loss_theta) return ([loss, loss_theta, orient_loss, dim_loss, conf_loss], preds, y) def training_step(self, batch, batch_idx): (loss, preds, targets) = self.step(batch) self.log_dict({'train/loss': loss[0], 'train/theta_loss': loss[1], 'train/orient_loss': loss[2], 'train/dim_loss': loss[3], 'train/conf_loss': loss[4]}, on_step=False, on_epoch=True, prog_bar=False) return {'loss': loss[0], 'preds': preds, 'targets': targets} def training_epoch_end(self, outputs): pass def validation_step(self, batch, batch_idx): (loss, preds, targets) = self.step(batch) self.log_dict({'val/loss': loss[0], 'val/theta_loss': loss[1], 'val/orient_loss': loss[2], 'val/dim_loss': loss[3], 'val/conf_loss': loss[4]}, on_step=False, on_epoch=True, prog_bar=False) return {'loss': loss[0], 'preds': preds, 'targets': targets} def validation_epoch_end(self, outputs): avg_val_loss = torch.tensor([x['loss'] for x in outputs]).mean() self.log('val/avg_loss', avg_val_loss) return {'loss': avg_val_loss} def on_epoch_end(self): pass def configure_optimizers(self): if (self.hparams.optimizer.lower() == 'adam'): optimizer = torch.optim.Adam(params=self.parameters(), lr=self.hparams.lr) elif (self.hparams.optimizer.lower() == 'sgd'): optimizer = torch.optim.SGD(self.parameters(), lr=self.hparams.lr, momentum=self.hparams.momentum) return optimizer
class RegressorModel2(LightningModule): def __init__(self, net: nn.Module, lr: float=0.0001, momentum: float=0.9, w: float=0.4, alpha: float=0.6): super().__init__() self.save_hyperparameters(logger=False) self.net = net self.conf_loss_func = nn.CrossEntropyLoss() self.dim_loss_func = nn.MSELoss() self.orient_loss_func = orientation_loss2 def forward(self, x): return self.net(x) def on_train_start(self): pass def step(self, batch): (x, y) = batch x = x.float() gt_orient = y['orientation'].float() gt_conf = y['confidence'].float() gt_dims = y['dimensions'].float() predictions = self(x) [pred_orient, pred_conf, pred_dims] = predictions loss_orient = self.orient_loss_func(pred_orient, gt_orient) loss_dims = self.dim_loss_func(pred_dims, gt_dims) gt_conf = torch.max(gt_conf, dim=1)[1] loss_conf = self.conf_loss_func(pred_conf, gt_conf) loss_theta = (loss_conf + (self.hparams.w * loss_orient)) loss = ((self.hparams.alpha * loss_dims) + loss_theta) return ([loss, loss_theta, loss_orient, loss_conf, loss_dims], predictions, y) def training_step(self, batch, batch_idx): (loss, preds, targets) = self.step(batch) self.log_dict({'train/loss': loss[0], 'train/theta_loss': loss[1], 'train/orient_loss': loss[2], 'train/conf_loss': loss[3], 'train/dim_loss': loss[4]}, on_step=False, on_epoch=True, prog_bar=False) return {'loss': loss[0], 'preds': preds, 'targets': targets} def training_epoch_end(self, outputs): pass def validation_step(self, batch, batch_idx): (loss, preds, targets) = self.step(batch) self.log_dict({'val/loss': loss[0], 'val/theta_loss': loss[1], 'val/orient_loss': loss[2], 'val/conf_loss': loss[3], 'val/dim_loss': loss[4]}, on_step=False, on_epoch=True, prog_bar=False) return {'loss': loss[0], 'preds': preds, 'targets': targets} def validation_epoch_end(self, outputs): avg_val_loss = torch.tensor([x['loss'] for x in outputs]).mean() self.log('val/avg_loss', avg_val_loss) return {'loss': avg_val_loss} def on_epoch_end(self): pass def configure_optimizers(self): optimizer = torch.optim.SGD(self.parameters(), lr=self.hparams.lr, momentum=self.hparams.momentum) return optimizer
class RegressorModel3(LightningModule): def __init__(self, net: nn.Module, optimizer: str='adam', lr: float=0.0001, momentum: float=0.9, w: float=0.4, alpha: float=0.6): super().__init__() self.save_hyperparameters(logger=False) self.net = net self.conf_loss_func = nn.CrossEntropyLoss() self.dim_loss_func = nn.MSELoss() self.orient_loss_func = OrientationLoss def forward(self, x): return self.net(x) def on_train_start(self): pass def step(self, batch): (x, y) = batch x = x.float() gt_orient = y['orientation'].float() gt_conf = y['confidence'].float() gt_dims = y['dimensions'].float() predictions = self(x) [pred_orient, pred_conf, pred_dims] = predictions loss_orient = self.orient_loss_func(pred_orient, gt_orient, gt_conf) loss_dims = self.dim_loss_func(pred_dims, gt_dims) gt_conf = torch.max(gt_conf, dim=1)[1] loss_conf = self.conf_loss_func(pred_conf, gt_conf) loss_theta = (loss_conf + (self.hparams.w * loss_orient)) loss = ((self.hparams.alpha * loss_dims) + loss_theta) return ([loss, loss_theta, loss_orient, loss_conf, loss_dims], predictions, y) def training_step(self, batch, batch_idx): (loss, preds, targets) = self.step(batch) self.log_dict({'train/loss': loss[0], 'train/theta_loss': loss[1], 'train/orient_loss': loss[2], 'train/conf_loss': loss[3], 'train/dim_loss': loss[4]}, on_step=False, on_epoch=True, prog_bar=False) return {'loss': loss[0], 'preds': preds, 'targets': targets} def training_epoch_end(self, outputs): pass def validation_step(self, batch, batch_idx): (loss, preds, targets) = self.step(batch) self.log_dict({'val/loss': loss[0], 'val/theta_loss': loss[1], 'val/orient_loss': loss[2], 'val/conf_loss': loss[3], 'val/dim_loss': loss[4]}, on_step=False, on_epoch=True, prog_bar=False) return {'loss': loss[0], 'preds': preds, 'targets': targets} def validation_epoch_end(self, outputs): avg_val_loss = torch.tensor([x['loss'] for x in outputs]).mean() self.log('val/avg_loss', avg_val_loss) return {'loss': avg_val_loss} def on_epoch_end(self): pass def configure_optimizers(self): if (self.hparams.optimizer.lower() == 'adam'): optimizer = torch.optim.Adam(params=self.parameters(), lr=self.hparams.lr) elif (self.hparams.optimizer.lower() == 'sgd'): optimizer = torch.optim.SGD(self.parameters(), lr=self.hparams.lr, momentum=self.hparams.momentum) return optimizer
@utils.task_wrapper def train(cfg: DictConfig) -> Tuple[(dict, dict)]: 'Trains the model. Can additionally evaluate on a testset, using best weights obtained during\n training.\n\n This method is wrapped in optional @task_wrapper decorator which applies extra utilities\n before and after the call.\n\n Args:\n cfg (DictConfig): Configuration composed by Hydra.\n\n Returns:\n Tuple[dict, dict]: Dict with metrics and dict with all instantiated objects.\n ' if cfg.get('seed'): pl.seed_everything(cfg.seed, workers=True) log.info(f'Instantiating datamodule <{cfg.datamodule._target_}>') datamodule: LightningDataModule = hydra.utils.instantiate(cfg.datamodule) log.info(f'Instantiating model <{cfg.model._target_}>') model: LightningModule = hydra.utils.instantiate(cfg.model) log.info('Instantiating callbacks...') callbacks: List[Callback] = utils.instantiate_callbacks(cfg.get('callbacks')) log.info('Instantiating loggers...') logger: List[LightningLoggerBase] = utils.instantiate_loggers(cfg.get('logger')) log.info(f'Instantiating trainer <{cfg.trainer._target_}>') trainer: Trainer = hydra.utils.instantiate(cfg.trainer, callbacks=callbacks, logger=logger) object_dict = {'cfg': cfg, 'datamodule': datamodule, 'model': model, 'callbacks': callbacks, 'logger': logger, 'trainer': trainer} if logger: log.info('Logging hyperparameters!') utils.log_hyperparameters(object_dict) if cfg.get('train'): log.info('Starting training!') trainer.fit(model=model, datamodule=datamodule, ckpt_path=cfg.get('ckpt_path')) train_metrics = trainer.callback_metrics if cfg.get('test'): log.info('Starting testing!') ckpt_path = trainer.checkpoint_callback.best_model_path if (ckpt_path == ''): log.warning('Best ckpt not found! Using current weights for testing...') ckpt_path = None trainer.test(model=model, datamodule=datamodule, ckpt_path=ckpt_path) log.info(f'Best ckpt path: {ckpt_path}') test_metrics = trainer.callback_metrics metric_dict = {**train_metrics, **test_metrics} return (metric_dict, object_dict)
@hydra.main(version_base='1.2', config_path=(root / 'configs'), config_name='train.yaml') def main(cfg: DictConfig) -> Optional[float]: (metric_dict, _) = train(cfg) metric_value = utils.get_metric_value(metric_dict=metric_dict, metric_name=cfg.get('optimized_metric')) return metric_value
class DimensionAverages(): '\n Class to calculate the average dimensions of the objects in the dataset.\n ' def __init__(self, categories: List[str]=['car', 'pedestrian', 'cyclist'], save_file: str='dimension_averages.txt'): self.dimension_map = {} self.filename = ((os.path.abspath(os.path.dirname(__file__)) + '/') + save_file) self.categories = categories if (len(self.categories) == 0): self.load_items_from_file() for det in self.categories: cat_ = det.lower() if (cat_ in self.dimension_map.keys()): continue self.dimension_map[cat_] = {} self.dimension_map[cat_]['count'] = 0 self.dimension_map[cat_]['total'] = np.zeros(3, dtype=np.float32) def add_items(self, items_path): for path in items_path: with open(path, 'r') as f: for line in f: line = line.split(' ') if (line[0].lower() in self.categories): self.add_item(line[0], np.array([float(line[8]), float(line[9]), float(line[10])])) def add_item(self, cat, dim): cat = cat.lower() self.dimension_map[cat]['count'] += 1 self.dimension_map[cat]['total'] += dim def get_item(self, cat): cat = cat.lower() return (self.dimension_map[cat]['total'] / self.dimension_map[cat]['count']) def load_items_from_file(self): f = open(self.filename, 'r') dimension_map = json.load(f) for cat in dimension_map: dimension_map[cat]['total'] = np.asarray(dimension_map[cat]['total']) self.dimension_map = dimension_map def dump_to_file(self): f = open(self.filename, 'w') f.write(json.dumps(self.dimension_map, cls=NumpyEncoder)) f.close() def recognized_class(self, cat): return (cat.lower() in self.dimension_map)
class ClassAverages(): def __init__(self, classes=[]): self.dimension_map = {} self.filename = (os.path.abspath(os.path.dirname(__file__)) + '/class_averages.txt') if (len(classes) == 0): self.load_items_from_file() for detection_class in classes: class_ = detection_class.lower() if (class_ in self.dimension_map.keys()): continue self.dimension_map[class_] = {} self.dimension_map[class_]['count'] = 0 self.dimension_map[class_]['total'] = np.zeros(3, dtype=np.double) def add_item(self, class_, dimension): class_ = class_.lower() self.dimension_map[class_]['count'] += 1 self.dimension_map[class_]['total'] += dimension def get_item(self, class_): class_ = class_.lower() return (self.dimension_map[class_]['total'] / self.dimension_map[class_]['count']) def dump_to_file(self): f = open(self.filename, 'w') f.write(json.dumps(self.dimension_map, cls=NumpyEncoder)) f.close() def load_items_from_file(self): f = open(self.filename, 'r') dimension_map = json.load(f) for class_ in dimension_map: dimension_map[class_]['total'] = np.asarray(dimension_map[class_]['total']) self.dimension_map = dimension_map def recognized_class(self, class_): return (class_.lower() in self.dimension_map)
class NumpyEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.ndarray): return obj.tolist() return json.JSONEncoder.default(self, obj)
def get_pylogger(name=__name__) -> logging.Logger: 'Initializes multi-GPU-friendly python command line logger.' logger = logging.getLogger(name) logging_levels = ('debug', 'info', 'warning', 'error', 'exception', 'fatal', 'critical') for level in logging_levels: setattr(logger, level, rank_zero_only(getattr(logger, level))) return logger
@rank_zero_only def print_config_tree(cfg: DictConfig, print_order: Sequence[str]=('datamodule', 'model', 'callbacks', 'logger', 'trainer', 'paths', 'extras'), resolve: bool=False, save_to_file: bool=False) -> None: 'Prints content of DictConfig using Rich library and its tree structure.\n\n Args:\n cfg (DictConfig): Configuration composed by Hydra.\n print_order (Sequence[str], optional): Determines in what order config components are printed.\n resolve (bool, optional): Whether to resolve reference fields of DictConfig.\n save_to_file (bool, optional): Whether to export config to the hydra output folder.\n ' style = 'dim' tree = rich.tree.Tree('CONFIG', style=style, guide_style=style) queue = [] for field in print_order: (queue.append(field) if (field in cfg) else log.warning(f"Field '{field}' not found in config. Skipping '{field}' config printing...")) for field in cfg: if (field not in queue): queue.append(field) for field in queue: branch = tree.add(field, style=style, guide_style=style) config_group = cfg[field] if isinstance(config_group, DictConfig): branch_content = OmegaConf.to_yaml(config_group, resolve=resolve) else: branch_content = str(config_group) branch.add(rich.syntax.Syntax(branch_content, 'yaml')) rich.print(tree) if save_to_file: with open(Path(cfg.paths.output_dir, 'config_tree.log'), 'w') as file: rich.print(tree, file=file)
@rank_zero_only def enforce_tags(cfg: DictConfig, save_to_file: bool=False) -> None: 'Prompts user to input tags from command line if no tags are provided in config.' if (not cfg.get('tags')): if ('id' in HydraConfig().cfg.hydra.job): raise ValueError('Specify tags before launching a multirun!') log.warning('No tags provided in config. Prompting user to input tags...') tags = Prompt.ask('Enter a list of comma separated tags', default='dev') tags = [t.strip() for t in tags.split(',') if (t != '')] with open_dict(cfg): cfg.tags = tags log.info(f'Tags: {cfg.tags}') if save_to_file: with open(Path(cfg.paths.output_dir, 'tags.log'), 'w') as file: rich.print(cfg.tags, file=file)
@numba.jit(nopython=True) def div_up(m, n): return ((m // n) + ((m % n) > 0))
@cuda.jit('(float32[:], float32[:], float32[:])', device=True, inline=True) def trangle_area(a, b, c): return ((((a[0] - c[0]) * (b[1] - c[1])) - ((a[1] - c[1]) * (b[0] - c[0]))) / 2.0)
@cuda.jit('(float32[:], int32)', device=True, inline=True) def area(int_pts, num_of_inter): area_val = 0.0 for i in range((num_of_inter - 2)): area_val += abs(trangle_area(int_pts[:2], int_pts[((2 * i) + 2):((2 * i) + 4)], int_pts[((2 * i) + 4):((2 * i) + 6)])) return area_val
@cuda.jit('(float32[:], int32)', device=True, inline=True) def sort_vertex_in_convex_polygon(int_pts, num_of_inter): if (num_of_inter > 0): center = cuda.local.array((2,), dtype=numba.float32) center[:] = 0.0 for i in range(num_of_inter): center[0] += int_pts[(2 * i)] center[1] += int_pts[((2 * i) + 1)] center[0] /= num_of_inter center[1] /= num_of_inter v = cuda.local.array((2,), dtype=numba.float32) vs = cuda.local.array((16,), dtype=numba.float32) for i in range(num_of_inter): v[0] = (int_pts[(2 * i)] - center[0]) v[1] = (int_pts[((2 * i) + 1)] - center[1]) d = math.sqrt(((v[0] * v[0]) + (v[1] * v[1]))) v[0] = (v[0] / d) v[1] = (v[1] / d) if (v[1] < 0): v[0] = ((- 2) - v[0]) vs[i] = v[0] j = 0 temp = 0 for i in range(1, num_of_inter): if (vs[(i - 1)] > vs[i]): temp = vs[i] tx = int_pts[(2 * i)] ty = int_pts[((2 * i) + 1)] j = i while ((j > 0) and (vs[(j - 1)] > temp)): vs[j] = vs[(j - 1)] int_pts[(j * 2)] = int_pts[((j * 2) - 2)] int_pts[((j * 2) + 1)] = int_pts[((j * 2) - 1)] j -= 1 vs[j] = temp int_pts[(j * 2)] = tx int_pts[((j * 2) + 1)] = ty
@cuda.jit('(float32[:], float32[:], int32, int32, float32[:])', device=True, inline=True) def line_segment_intersection(pts1, pts2, i, j, temp_pts): A = cuda.local.array((2,), dtype=numba.float32) B = cuda.local.array((2,), dtype=numba.float32) C = cuda.local.array((2,), dtype=numba.float32) D = cuda.local.array((2,), dtype=numba.float32) A[0] = pts1[(2 * i)] A[1] = pts1[((2 * i) + 1)] B[0] = pts1[(2 * ((i + 1) % 4))] B[1] = pts1[((2 * ((i + 1) % 4)) + 1)] C[0] = pts2[(2 * j)] C[1] = pts2[((2 * j) + 1)] D[0] = pts2[(2 * ((j + 1) % 4))] D[1] = pts2[((2 * ((j + 1) % 4)) + 1)] BA0 = (B[0] - A[0]) BA1 = (B[1] - A[1]) DA0 = (D[0] - A[0]) CA0 = (C[0] - A[0]) DA1 = (D[1] - A[1]) CA1 = (C[1] - A[1]) acd = ((DA1 * CA0) > (CA1 * DA0)) bcd = (((D[1] - B[1]) * (C[0] - B[0])) > ((C[1] - B[1]) * (D[0] - B[0]))) if (acd != bcd): abc = ((CA1 * BA0) > (BA1 * CA0)) abd = ((DA1 * BA0) > (BA1 * DA0)) if (abc != abd): DC0 = (D[0] - C[0]) DC1 = (D[1] - C[1]) ABBA = ((A[0] * B[1]) - (B[0] * A[1])) CDDC = ((C[0] * D[1]) - (D[0] * C[1])) DH = ((BA1 * DC0) - (BA0 * DC1)) Dx = ((ABBA * DC0) - (BA0 * CDDC)) Dy = ((ABBA * DC1) - (BA1 * CDDC)) temp_pts[0] = (Dx / DH) temp_pts[1] = (Dy / DH) return True return False
@cuda.jit('(float32[:], float32[:], int32, int32, float32[:])', device=True, inline=True) def line_segment_intersection_v1(pts1, pts2, i, j, temp_pts): a = cuda.local.array((2,), dtype=numba.float32) b = cuda.local.array((2,), dtype=numba.float32) c = cuda.local.array((2,), dtype=numba.float32) d = cuda.local.array((2,), dtype=numba.float32) a[0] = pts1[(2 * i)] a[1] = pts1[((2 * i) + 1)] b[0] = pts1[(2 * ((i + 1) % 4))] b[1] = pts1[((2 * ((i + 1) % 4)) + 1)] c[0] = pts2[(2 * j)] c[1] = pts2[((2 * j) + 1)] d[0] = pts2[(2 * ((j + 1) % 4))] d[1] = pts2[((2 * ((j + 1) % 4)) + 1)] area_abc = trangle_area(a, b, c) area_abd = trangle_area(a, b, d) if ((area_abc * area_abd) >= 0): return False area_cda = trangle_area(c, d, a) area_cdb = ((area_cda + area_abc) - area_abd) if ((area_cda * area_cdb) >= 0): return False t = (area_cda / (area_abd - area_abc)) dx = (t * (b[0] - a[0])) dy = (t * (b[1] - a[1])) temp_pts[0] = (a[0] + dx) temp_pts[1] = (a[1] + dy) return True
@cuda.jit('(float32, float32, float32[:])', device=True, inline=True) def point_in_quadrilateral(pt_x, pt_y, corners): ab0 = (corners[2] - corners[0]) ab1 = (corners[3] - corners[1]) ad0 = (corners[6] - corners[0]) ad1 = (corners[7] - corners[1]) ap0 = (pt_x - corners[0]) ap1 = (pt_y - corners[1]) abab = ((ab0 * ab0) + (ab1 * ab1)) abap = ((ab0 * ap0) + (ab1 * ap1)) adad = ((ad0 * ad0) + (ad1 * ad1)) adap = ((ad0 * ap0) + (ad1 * ap1)) return ((abab >= abap) and (abap >= 0) and (adad >= adap) and (adap >= 0))
@cuda.jit('(float32[:], float32[:], float32[:])', device=True, inline=True) def quadrilateral_intersection(pts1, pts2, int_pts): num_of_inter = 0 for i in range(4): if point_in_quadrilateral(pts1[(2 * i)], pts1[((2 * i) + 1)], pts2): int_pts[(num_of_inter * 2)] = pts1[(2 * i)] int_pts[((num_of_inter * 2) + 1)] = pts1[((2 * i) + 1)] num_of_inter += 1 if point_in_quadrilateral(pts2[(2 * i)], pts2[((2 * i) + 1)], pts1): int_pts[(num_of_inter * 2)] = pts2[(2 * i)] int_pts[((num_of_inter * 2) + 1)] = pts2[((2 * i) + 1)] num_of_inter += 1 temp_pts = cuda.local.array((2,), dtype=numba.float32) for i in range(4): for j in range(4): has_pts = line_segment_intersection(pts1, pts2, i, j, temp_pts) if has_pts: int_pts[(num_of_inter * 2)] = temp_pts[0] int_pts[((num_of_inter * 2) + 1)] = temp_pts[1] num_of_inter += 1 return num_of_inter
@cuda.jit('(float32[:], float32[:])', device=True, inline=True) def rbbox_to_corners(corners, rbbox): angle = rbbox[4] a_cos = math.cos(angle) a_sin = math.sin(angle) center_x = rbbox[0] center_y = rbbox[1] x_d = rbbox[2] y_d = rbbox[3] corners_x = cuda.local.array((4,), dtype=numba.float32) corners_y = cuda.local.array((4,), dtype=numba.float32) corners_x[0] = ((- x_d) / 2) corners_x[1] = ((- x_d) / 2) corners_x[2] = (x_d / 2) corners_x[3] = (x_d / 2) corners_y[0] = ((- y_d) / 2) corners_y[1] = (y_d / 2) corners_y[2] = (y_d / 2) corners_y[3] = ((- y_d) / 2) for i in range(4): corners[(2 * i)] = (((a_cos * corners_x[i]) + (a_sin * corners_y[i])) + center_x) corners[((2 * i) + 1)] = ((((- a_sin) * corners_x[i]) + (a_cos * corners_y[i])) + center_y)
@cuda.jit('(float32[:], float32[:])', device=True, inline=True) def inter(rbbox1, rbbox2): corners1 = cuda.local.array((8,), dtype=numba.float32) corners2 = cuda.local.array((8,), dtype=numba.float32) intersection_corners = cuda.local.array((16,), dtype=numba.float32) rbbox_to_corners(corners1, rbbox1) rbbox_to_corners(corners2, rbbox2) num_intersection = quadrilateral_intersection(corners1, corners2, intersection_corners) sort_vertex_in_convex_polygon(intersection_corners, num_intersection) return area(intersection_corners, num_intersection)
@cuda.jit('(float32[:], float32[:], int32)', device=True, inline=True) def devRotateIoUEval(rbox1, rbox2, criterion=(- 1)): area1 = (rbox1[2] * rbox1[3]) area2 = (rbox2[2] * rbox2[3]) area_inter = inter(rbox1, rbox2) if (criterion == (- 1)): return (area_inter / ((area1 + area2) - area_inter)) elif (criterion == 0): return (area_inter / area1) elif (criterion == 1): return (area_inter / area2) else: return area_inter
@cuda.jit('(int64, int64, float32[:], float32[:], float32[:], int32)', fastmath=False) def rotate_iou_kernel_eval(N, K, dev_boxes, dev_query_boxes, dev_iou, criterion=(- 1)): threadsPerBlock = (8 * 8) row_start = cuda.blockIdx.x col_start = cuda.blockIdx.y tx = cuda.threadIdx.x row_size = min((N - (row_start * threadsPerBlock)), threadsPerBlock) col_size = min((K - (col_start * threadsPerBlock)), threadsPerBlock) block_boxes = cuda.shared.array(shape=((64 * 5),), dtype=numba.float32) block_qboxes = cuda.shared.array(shape=((64 * 5),), dtype=numba.float32) dev_query_box_idx = ((threadsPerBlock * col_start) + tx) dev_box_idx = ((threadsPerBlock * row_start) + tx) if (tx < col_size): block_qboxes[((tx * 5) + 0)] = dev_query_boxes[((dev_query_box_idx * 5) + 0)] block_qboxes[((tx * 5) + 1)] = dev_query_boxes[((dev_query_box_idx * 5) + 1)] block_qboxes[((tx * 5) + 2)] = dev_query_boxes[((dev_query_box_idx * 5) + 2)] block_qboxes[((tx * 5) + 3)] = dev_query_boxes[((dev_query_box_idx * 5) + 3)] block_qboxes[((tx * 5) + 4)] = dev_query_boxes[((dev_query_box_idx * 5) + 4)] if (tx < row_size): block_boxes[((tx * 5) + 0)] = dev_boxes[((dev_box_idx * 5) + 0)] block_boxes[((tx * 5) + 1)] = dev_boxes[((dev_box_idx * 5) + 1)] block_boxes[((tx * 5) + 2)] = dev_boxes[((dev_box_idx * 5) + 2)] block_boxes[((tx * 5) + 3)] = dev_boxes[((dev_box_idx * 5) + 3)] block_boxes[((tx * 5) + 4)] = dev_boxes[((dev_box_idx * 5) + 4)] cuda.syncthreads() if (tx < row_size): for i in range(col_size): offset = (((((row_start * threadsPerBlock) * K) + (col_start * threadsPerBlock)) + (tx * K)) + i) dev_iou[offset] = devRotateIoUEval(block_qboxes[(i * 5):((i * 5) + 5)], block_boxes[(tx * 5):((tx * 5) + 5)], criterion)
def rotate_iou_gpu_eval(boxes, query_boxes, criterion=(- 1), device_id=0): 'rotated box iou running in gpu. 500x faster than cpu version\n (take 5ms in one example with numba.cuda code).\n convert from [this project](\n https://github.com/hongzhenwang/RRPN-revise/tree/master/lib/rotation).\n \n Args:\n boxes (float tensor: [N, 5]): rbboxes. format: centers, dims, \n angles(clockwise when positive)\n query_boxes (float tensor: [K, 5]): [description]\n device_id (int, optional): Defaults to 0. [description]\n \n Returns:\n [type]: [description]\n ' box_dtype = boxes.dtype boxes = boxes.astype(np.float32) query_boxes = query_boxes.astype(np.float32) N = boxes.shape[0] K = query_boxes.shape[0] iou = np.zeros((N, K), dtype=np.float32) if ((N == 0) or (K == 0)): return iou threadsPerBlock = (8 * 8) cuda.select_device(device_id) blockspergrid = (div_up(N, threadsPerBlock), div_up(K, threadsPerBlock)) stream = cuda.stream() with stream.auto_synchronize(): boxes_dev = cuda.to_device(boxes.reshape([(- 1)]), stream) query_boxes_dev = cuda.to_device(query_boxes.reshape([(- 1)]), stream) iou_dev = cuda.to_device(iou.reshape([(- 1)]), stream) rotate_iou_kernel_eval[(blockspergrid, threadsPerBlock, stream)](N, K, boxes_dev, query_boxes_dev, iou_dev, criterion) iou_dev.copy_to_host(iou.reshape([(- 1)]), stream=stream) return iou.astype(boxes.dtype)
@pytest.fixture(scope='package') def cfg_train_global() -> DictConfig: with initialize(version_base='1.2', config_path='../configs'): cfg = compose(config_name='train.yaml', return_hydra_config=True, overrides=[]) with open_dict(cfg): cfg.paths.root_dir = str(pyrootutils.find_root()) cfg.trainer.max_epochs = 1 cfg.trainer.limit_train_batches = 0.01 cfg.trainer.limit_val_batches = 0.1 cfg.trainer.limit_test_batches = 0.1 cfg.trainer.accelerator = 'cpu' cfg.trainer.devices = 1 cfg.datamodule.num_workers = 0 cfg.datamodule.pin_memory = False cfg.extras.print_config = False cfg.extras.enforce_tags = False cfg.logger = None return cfg
@pytest.fixture(scope='package') def cfg_eval_global() -> DictConfig: with initialize(version_base='1.2', config_path='../configs'): cfg = compose(config_name='eval.yaml', return_hydra_config=True, overrides=['ckpt_path=.']) with open_dict(cfg): cfg.paths.root_dir = str(pyrootutils.find_root()) cfg.trainer.max_epochs = 1 cfg.trainer.limit_test_batches = 0.1 cfg.trainer.accelerator = 'cpu' cfg.trainer.devices = 1 cfg.datamodule.num_workers = 0 cfg.datamodule.pin_memory = False cfg.extras.print_config = False cfg.extras.enforce_tags = False cfg.logger = None return cfg
@pytest.fixture(scope='function') def cfg_train(cfg_train_global, tmp_path) -> DictConfig: cfg = cfg_train_global.copy() with open_dict(cfg): cfg.paths.output_dir = str(tmp_path) cfg.paths.log_dir = str(tmp_path) (yield cfg) GlobalHydra.instance().clear()
@pytest.fixture(scope='function') def cfg_eval(cfg_eval_global, tmp_path) -> DictConfig: cfg = cfg_eval_global.copy() with open_dict(cfg): cfg.paths.output_dir = str(tmp_path) cfg.paths.log_dir = str(tmp_path) (yield cfg) GlobalHydra.instance().clear()
def _package_available(package_name: str) -> bool: 'Check if a package is available in your environment.' try: return (pkg_resources.require(package_name) is not None) except pkg_resources.DistributionNotFound: return False
class RunIf(): 'RunIf wrapper for conditional skipping of tests.\n\n Fully compatible with `@pytest.mark`.\n\n Example:\n\n @RunIf(min_torch="1.8")\n @pytest.mark.parametrize("arg1", [1.0, 2.0])\n def test_wrapper(arg1):\n assert arg1 > 0\n ' def __new__(self, min_gpus: int=0, min_torch: Optional[str]=None, max_torch: Optional[str]=None, min_python: Optional[str]=None, skip_windows: bool=False, sh: bool=False, tpu: bool=False, fairscale: bool=False, deepspeed: bool=False, wandb: bool=False, neptune: bool=False, comet: bool=False, mlflow: bool=False, **kwargs): '\n Args:\n min_gpus: min number of GPUs required to run test\n min_torch: minimum pytorch version to run test\n max_torch: maximum pytorch version to run test\n min_python: minimum python version required to run test\n skip_windows: skip test for Windows platform\n tpu: if TPU is available\n sh: if `sh` module is required to run the test\n fairscale: if `fairscale` module is required to run the test\n deepspeed: if `deepspeed` module is required to run the test\n wandb: if `wandb` module is required to run the test\n neptune: if `neptune` module is required to run the test\n comet: if `comet` module is required to run the test\n mlflow: if `mlflow` module is required to run the test\n kwargs: native pytest.mark.skipif keyword arguments\n ' conditions = [] reasons = [] if min_gpus: conditions.append((torch.cuda.device_count() < min_gpus)) reasons.append(f'GPUs>={min_gpus}') if min_torch: torch_version = get_distribution('torch').version conditions.append((Version(torch_version) < Version(min_torch))) reasons.append(f'torch>={min_torch}') if max_torch: torch_version = get_distribution('torch').version conditions.append((Version(torch_version) >= Version(max_torch))) reasons.append(f'torch<{max_torch}') if min_python: py_version = f'{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}' conditions.append((Version(py_version) < Version(min_python))) reasons.append(f'python>={min_python}') if skip_windows: conditions.append(_IS_WINDOWS) reasons.append('does not run on Windows') if tpu: conditions.append((not _TPU_AVAILABLE)) reasons.append('TPU') if sh: conditions.append((not _SH_AVAILABLE)) reasons.append('sh') if fairscale: conditions.append((not _FAIRSCALE_AVAILABLE)) reasons.append('fairscale') if deepspeed: conditions.append((not _DEEPSPEED_AVAILABLE)) reasons.append('deepspeed') if wandb: conditions.append((not _WANDB_AVAILABLE)) reasons.append('wandb') if neptune: conditions.append((not _NEPTUNE_AVAILABLE)) reasons.append('neptune') if comet: conditions.append((not _COMET_AVAILABLE)) reasons.append('comet') if mlflow: conditions.append((not _MLFLOW_AVAILABLE)) reasons.append('mlflow') reasons = [rs for (cond, rs) in zip(conditions, reasons) if cond] return pytest.mark.skipif(condition=any(conditions), reason=f"Requires: [{' + '.join(reasons)}]", **kwargs)
def run_sh_command(command: List[str]): 'Default method for executing shell commands with pytest and sh package.' msg = None try: sh.python(command) except sh.ErrorReturnCode as e: msg = e.stderr.decode() if msg: pytest.fail(msg=msg)
def test_train_config(cfg_train: DictConfig): assert cfg_train assert cfg_train.datamodule assert cfg_train.model assert cfg_train.trainer HydraConfig().set_config(cfg_train) hydra.utils.instantiate(cfg_train.datamodule) hydra.utils.instantiate(cfg_train.model) hydra.utils.instantiate(cfg_train.trainer)
def test_eval_config(cfg_eval: DictConfig): assert cfg_eval assert cfg_eval.datamodule assert cfg_eval.model assert cfg_eval.trainer HydraConfig().set_config(cfg_eval) hydra.utils.instantiate(cfg_eval.datamodule) hydra.utils.instantiate(cfg_eval.model) hydra.utils.instantiate(cfg_eval.trainer)
@pytest.mark.slow def test_train_eval(tmp_path, cfg_train, cfg_eval): 'Train for 1 epoch with `train.py` and evaluate with `eval.py`' assert (str(tmp_path) == cfg_train.paths.output_dir == cfg_eval.paths.output_dir) with open_dict(cfg_train): cfg_train.trainer.max_epochs = 1 cfg_train.test = True HydraConfig().set_config(cfg_train) (train_metric_dict, _) = train(cfg_train) assert ('last.ckpt' in os.listdir((tmp_path / 'checkpoints'))) with open_dict(cfg_eval): cfg_eval.ckpt_path = str(((tmp_path / 'checkpoints') / 'last.ckpt')) HydraConfig().set_config(cfg_eval) (test_metric_dict, _) = evaluate(cfg_eval) assert (test_metric_dict['test/acc'] > 0.0) assert (abs((train_metric_dict['test/acc'].item() - test_metric_dict['test/acc'].item())) < 0.001)
@pytest.mark.parametrize('batch_size', [32, 128]) def test_mnist_datamodule(batch_size): data_dir = 'data/' dm = MNISTDataModule(data_dir=data_dir, batch_size=batch_size) dm.prepare_data() assert ((not dm.data_train) and (not dm.data_val) and (not dm.data_test)) assert Path(data_dir, 'MNIST').exists() assert Path(data_dir, 'MNIST', 'raw').exists() dm.setup() assert (dm.data_train and dm.data_val and dm.data_test) assert (dm.train_dataloader() and dm.val_dataloader() and dm.test_dataloader()) num_datapoints = ((len(dm.data_train) + len(dm.data_val)) + len(dm.data_test)) assert (num_datapoints == 70000) batch = next(iter(dm.train_dataloader())) (x, y) = batch assert (len(x) == batch_size) assert (len(y) == batch_size) assert (x.dtype == torch.float32) assert (y.dtype == torch.int64)
@RunIf(sh=True) @pytest.mark.slow def test_experiments(tmp_path): 'Test running all available experiment configs with fast_dev_run=True.' command = ([startfile, '-m', 'experiment=glob(*)', ('hydra.sweep.dir=' + str(tmp_path)), '++trainer.fast_dev_run=true'] + overrides) run_sh_command(command)
@RunIf(sh=True) @pytest.mark.slow def test_hydra_sweep(tmp_path): 'Test default hydra sweep.' command = ([startfile, '-m', ('hydra.sweep.dir=' + str(tmp_path)), 'model.optimizer.lr=0.005,0.01', '++trainer.fast_dev_run=true'] + overrides) run_sh_command(command)
@RunIf(sh=True) @pytest.mark.slow def test_hydra_sweep_ddp_sim(tmp_path): 'Test default hydra sweep with ddp sim.' command = ([startfile, '-m', ('hydra.sweep.dir=' + str(tmp_path)), 'trainer=ddp_sim', 'trainer.max_epochs=3', '+trainer.limit_train_batches=0.01', '+trainer.limit_val_batches=0.1', '+trainer.limit_test_batches=0.1', 'model.optimizer.lr=0.005,0.01,0.02'] + overrides) run_sh_command(command)
@RunIf(sh=True) @pytest.mark.slow def test_optuna_sweep(tmp_path): 'Test optuna sweep.' command = ([startfile, '-m', 'hparams_search=mnist_optuna', ('hydra.sweep.dir=' + str(tmp_path)), 'hydra.sweeper.n_trials=10', 'hydra.sweeper.sampler.n_startup_trials=5', '++trainer.fast_dev_run=true'] + overrides) run_sh_command(command)
@RunIf(wandb=True, sh=True) @pytest.mark.slow def test_optuna_sweep_ddp_sim_wandb(tmp_path): 'Test optuna sweep with wandb and ddp sim.' command = [startfile, '-m', 'hparams_search=mnist_optuna', ('hydra.sweep.dir=' + str(tmp_path)), 'hydra.sweeper.n_trials=5', 'trainer=ddp_sim', 'trainer.max_epochs=3', '+trainer.limit_train_batches=0.01', '+trainer.limit_val_batches=0.1', '+trainer.limit_test_batches=0.1', 'logger=wandb'] run_sh_command(command)
def test_train_fast_dev_run(cfg_train): 'Run for 1 train, val and test step.' HydraConfig().set_config(cfg_train) with open_dict(cfg_train): cfg_train.trainer.fast_dev_run = True cfg_train.trainer.accelerator = 'cpu' train(cfg_train)
@RunIf(min_gpus=1) def test_train_fast_dev_run_gpu(cfg_train): 'Run for 1 train, val and test step on GPU.' HydraConfig().set_config(cfg_train) with open_dict(cfg_train): cfg_train.trainer.fast_dev_run = True cfg_train.trainer.accelerator = 'gpu' train(cfg_train)
@RunIf(min_gpus=1) @pytest.mark.slow def test_train_epoch_gpu_amp(cfg_train): 'Train 1 epoch on GPU with mixed-precision.' HydraConfig().set_config(cfg_train) with open_dict(cfg_train): cfg_train.trainer.max_epochs = 1 cfg_train.trainer.accelerator = 'cpu' cfg_train.trainer.precision = 16 train(cfg_train)
@pytest.mark.slow def test_train_epoch_double_val_loop(cfg_train): 'Train 1 epoch with validation loop twice per epoch.' HydraConfig().set_config(cfg_train) with open_dict(cfg_train): cfg_train.trainer.max_epochs = 1 cfg_train.trainer.val_check_interval = 0.5 train(cfg_train)
@pytest.mark.slow def test_train_ddp_sim(cfg_train): 'Simulate DDP (Distributed Data Parallel) on 2 CPU processes.' HydraConfig().set_config(cfg_train) with open_dict(cfg_train): cfg_train.trainer.max_epochs = 2 cfg_train.trainer.accelerator = 'cpu' cfg_train.trainer.devices = 2 cfg_train.trainer.strategy = 'ddp_spawn' train(cfg_train)
@pytest.mark.slow def test_train_resume(tmp_path, cfg_train): 'Run 1 epoch, finish, and resume for another epoch.' with open_dict(cfg_train): cfg_train.trainer.max_epochs = 1 HydraConfig().set_config(cfg_train) (metric_dict_1, _) = train(cfg_train) files = os.listdir((tmp_path / 'checkpoints')) assert ('last.ckpt' in files) assert ('epoch_000.ckpt' in files) with open_dict(cfg_train): cfg_train.ckpt_path = str(((tmp_path / 'checkpoints') / 'last.ckpt')) cfg_train.trainer.max_epochs = 2 (metric_dict_2, _) = train(cfg_train) files = os.listdir((tmp_path / 'checkpoints')) assert ('epoch_001.ckpt' in files) assert ('epoch_002.ckpt' not in files) assert (metric_dict_1['train/acc'] < metric_dict_2['train/acc']) assert (metric_dict_1['val/acc'] < metric_dict_2['val/acc'])
class Dataset(torch.utils.data.Dataset): def __init__(self, args: dict, split='train'): self.args = args self.split = split self.sample_length = args['sample_length'] self.size = (self.w, self.h) = (args['w'], args['h']) if (args['name'] == 'YouTubeVOS'): vid_lst_prefix = os.path.join(args['data_root'], args['name'], (split + '_all_frames/JPEGImages')) vid_lst = os.listdir(vid_lst_prefix) self.video_names = [os.path.join(vid_lst_prefix, name) for name in vid_lst] self._to_tensors = transforms.Compose([Stack(), ToTorchFormatTensor()]) def __len__(self): return len(self.video_names) def __getitem__(self, index): try: item = self.load_item(index) except: print('Loading error in video {}'.format(self.video_names[index])) item = self.load_item(0) return item def load_item(self, index): video_name = self.video_names[index] all_frames = [os.path.join(video_name, name) for name in sorted(os.listdir(video_name))] all_masks = create_random_shape_with_random_motion(len(all_frames), imageHeight=self.h, imageWidth=self.w) ref_index = get_ref_index(len(all_frames), self.sample_length) frames = [] masks = [] for idx in ref_index: img = Image.open(all_frames[idx]).convert('RGB') img = img.resize(self.size) frames.append(img) masks.append(all_masks[idx]) if (self.split == 'train'): frames = GroupRandomHorizontalFlip()(frames) frame_tensors = ((self._to_tensors(frames) * 2.0) - 1.0) mask_tensors = self._to_tensors(masks) return (frame_tensors, mask_tensors)
def get_ref_index(length, sample_length): if (random.uniform(0, 1) > 0.5): ref_index = random.sample(range(length), sample_length) ref_index.sort() else: pivot = random.randint(0, (length - sample_length)) ref_index = [(pivot + i) for i in range(sample_length)] return ref_index
def get_world_size(): 'Find OMPI world size without calling mpi functions\n :rtype: int\n ' if (os.environ.get('PMI_SIZE') is not None): return int((os.environ.get('PMI_SIZE') or 1)) elif (os.environ.get('OMPI_COMM_WORLD_SIZE') is not None): return int((os.environ.get('OMPI_COMM_WORLD_SIZE') or 1)) else: return torch.cuda.device_count()
def get_global_rank(): 'Find OMPI world rank without calling mpi functions\n :rtype: int\n ' if (os.environ.get('PMI_RANK') is not None): return int((os.environ.get('PMI_RANK') or 0)) elif (os.environ.get('OMPI_COMM_WORLD_RANK') is not None): return int((os.environ.get('OMPI_COMM_WORLD_RANK') or 0)) else: return 0
def get_local_rank(): 'Find OMPI local rank without calling mpi functions\n :rtype: int\n ' if (os.environ.get('MPI_LOCALRANKID') is not None): return int((os.environ.get('MPI_LOCALRANKID') or 0)) elif (os.environ.get('OMPI_COMM_WORLD_LOCAL_RANK') is not None): return int((os.environ.get('OMPI_COMM_WORLD_LOCAL_RANK') or 0)) else: return 0
def get_master_ip(): if (os.environ.get('AZ_BATCH_MASTER_NODE') is not None): return os.environ.get('AZ_BATCH_MASTER_NODE').split(':')[0] elif (os.environ.get('AZ_BATCHAI_MPI_MASTER_NODE') is not None): return os.environ.get('AZ_BATCHAI_MPI_MASTER_NODE') else: return '127.0.0.1'
class AdversarialLoss(nn.Module): '\n Adversarial loss\n https://arxiv.org/abs/1711.10337\n ' def __init__(self, type='nsgan', target_real_label=1.0, target_fake_label=0.0): '\n type = nsgan | lsgan | hinge\n ' super(AdversarialLoss, self).__init__() self.type = type self.register_buffer('real_label', torch.tensor(target_real_label)) self.register_buffer('fake_label', torch.tensor(target_fake_label)) if (type == 'nsgan'): self.criterion = nn.BCELoss() elif (type == 'lsgan'): self.criterion = nn.MSELoss() elif (type == 'hinge'): self.criterion = nn.ReLU() def __call__(self, outputs, is_real, is_disc=None): if (self.type == 'hinge'): if is_disc: if is_real: outputs = (- outputs) return self.criterion((1 + outputs)).mean() else: return (- outputs).mean() else: labels = (self.real_label if is_real else self.fake_label).expand_as(outputs) loss = self.criterion(outputs, labels) return loss
class SpectralNorm(object): _version = 1 def __init__(self, name='weight', n_power_iterations=1, dim=0, eps=1e-12): self.name = name self.dim = dim if (n_power_iterations <= 0): raise ValueError('Expected n_power_iterations to be positive, but got n_power_iterations={}'.format(n_power_iterations)) self.n_power_iterations = n_power_iterations self.eps = eps def reshape_weight_to_matrix(self, weight): weight_mat = weight if (self.dim != 0): weight_mat = weight_mat.permute(self.dim, *[d for d in range(weight_mat.dim()) if (d != self.dim)]) height = weight_mat.size(0) return weight_mat.reshape(height, (- 1)) def compute_weight(self, module, do_power_iteration): weight = getattr(module, (self.name + '_orig')) u = getattr(module, (self.name + '_u')) v = getattr(module, (self.name + '_v')) weight_mat = self.reshape_weight_to_matrix(weight) if do_power_iteration: with torch.no_grad(): for _ in range(self.n_power_iterations): v = normalize(torch.mv(weight_mat.t(), u), dim=0, eps=self.eps, out=v) u = normalize(torch.mv(weight_mat, v), dim=0, eps=self.eps, out=u) if (self.n_power_iterations > 0): u = u.clone() v = v.clone() sigma = torch.dot(u, torch.mv(weight_mat, v)) weight = (weight / sigma) return weight def remove(self, module): with torch.no_grad(): weight = self.compute_weight(module, do_power_iteration=False) delattr(module, self.name) delattr(module, (self.name + '_u')) delattr(module, (self.name + '_v')) delattr(module, (self.name + '_orig')) module.register_parameter(self.name, torch.nn.Parameter(weight.detach())) def __call__(self, module, inputs): setattr(module, self.name, self.compute_weight(module, do_power_iteration=module.training)) def _solve_v_and_rescale(self, weight_mat, u, target_sigma): v = torch.chain_matmul(weight_mat.t().mm(weight_mat).pinverse(), weight_mat.t(), u.unsqueeze(1)).squeeze(1) return v.mul_((target_sigma / torch.dot(u, torch.mv(weight_mat, v)))) @staticmethod def apply(module, name, n_power_iterations, dim, eps): for (k, hook) in module._forward_pre_hooks.items(): if (isinstance(hook, SpectralNorm) and (hook.name == name)): raise RuntimeError('Cannot register two spectral_norm hooks on the same parameter {}'.format(name)) fn = SpectralNorm(name, n_power_iterations, dim, eps) weight = module._parameters[name] with torch.no_grad(): weight_mat = fn.reshape_weight_to_matrix(weight) (h, w) = weight_mat.size() u = normalize(weight.new_empty(h).normal_(0, 1), dim=0, eps=fn.eps) v = normalize(weight.new_empty(w).normal_(0, 1), dim=0, eps=fn.eps) delattr(module, fn.name) module.register_parameter((fn.name + '_orig'), weight) setattr(module, fn.name, weight.data) module.register_buffer((fn.name + '_u'), u) module.register_buffer((fn.name + '_v'), v) module.register_forward_pre_hook(fn) module._register_state_dict_hook(SpectralNormStateDictHook(fn)) module._register_load_state_dict_pre_hook(SpectralNormLoadStateDictPreHook(fn)) return fn
class SpectralNormLoadStateDictPreHook(object): def __init__(self, fn): self.fn = fn def __call__(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs): fn = self.fn version = local_metadata.get('spectral_norm', {}).get((fn.name + '.version'), None) if ((version is None) or (version < 1)): with torch.no_grad(): weight_orig = state_dict[((prefix + fn.name) + '_orig')] weight_mat = fn.reshape_weight_to_matrix(weight_orig) u = state_dict[((prefix + fn.name) + '_u')]
class SpectralNormStateDictHook(object): def __init__(self, fn): self.fn = fn def __call__(self, module, state_dict, prefix, local_metadata): if ('spectral_norm' not in local_metadata): local_metadata['spectral_norm'] = {} key = (self.fn.name + '.version') if (key in local_metadata['spectral_norm']): raise RuntimeError("Unexpected key in metadata['spectral_norm']: {}".format(key)) local_metadata['spectral_norm'][key] = self.fn._version
def spectral_norm(module, name='weight', n_power_iterations=1, eps=1e-12, dim=None): 'Applies spectral normalization to a parameter in the given module.\n\n .. math::\n \\mathbf{W}_{SN} = \\dfrac{\\mathbf{W}}{\\sigma(\\mathbf{W})},\n \\sigma(\\mathbf{W}) = \\max_{\\mathbf{h}: \\mathbf{h} \\ne 0} \\dfrac{\\|\\mathbf{W} \\mathbf{h}\\|_2}{\\|\\mathbf{h}\\|_2}\n\n Spectral normalization stabilizes the training of discriminators (critics)\n in Generative Adversarial Networks (GANs) by rescaling the weight tensor\n with spectral norm :math:`\\sigma` of the weight matrix calculated using\n power iteration method. If the dimension of the weight tensor is greater\n than 2, it is reshaped to 2D in power iteration method to get spectral\n norm. This is implemented via a hook that calculates spectral norm and\n rescales weight before every :meth:`~Module.forward` call.\n\n See `Spectral Normalization for Generative Adversarial Networks`_ .\n\n .. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957\n\n Args:\n module (nn.Module): containing module\n name (str, optional): name of weight parameter\n n_power_iterations (int, optional): number of power iterations to\n calculate spectral norm\n eps (float, optional): epsilon for numerical stability in\n calculating norms\n dim (int, optional): dimension corresponding to number of outputs,\n the default is ``0``, except for modules that are instances of\n ConvTranspose{1,2,3}d, when it is ``1``\n\n Returns:\n The original module with the spectral norm hook\n\n Example::\n\n >>> m = spectral_norm(nn.Linear(20, 40))\n >>> m\n Linear(in_features=20, out_features=40, bias=True)\n >>> m.weight_u.size()\n torch.Size([40])\n\n ' if (dim is None): if isinstance(module, (torch.nn.ConvTranspose1d, torch.nn.ConvTranspose2d, torch.nn.ConvTranspose3d)): dim = 1 else: dim = 0 SpectralNorm.apply(module, name, n_power_iterations, dim, eps) return module
def remove_spectral_norm(module, name='weight'): 'Removes the spectral normalization reparameterization from a module.\n\n Args:\n module (Module): containing module\n name (str, optional): name of weight parameter\n\n Example:\n >>> m = spectral_norm(nn.Linear(40, 10))\n >>> remove_spectral_norm(m)\n ' for (k, hook) in module._forward_pre_hooks.items(): if (isinstance(hook, SpectralNorm) and (hook.name == name)): hook.remove(module) del module._forward_pre_hooks[k] return module raise ValueError("spectral_norm of '{}' not found in {}".format(name, module))
def use_spectral_norm(module, use_sn=False): if use_sn: return spectral_norm(module) return module
class Trainer(): def __init__(self, config): self.config = config self.epoch = 0 self.iteration = 0 self.train_dataset = Dataset(config['data_loader'], split='train') self.train_sampler = None self.train_args = config['trainer'] if config['distributed']: self.train_sampler = DistributedSampler(self.train_dataset, num_replicas=config['world_size'], rank=config['global_rank']) self.train_loader = DataLoader(self.train_dataset, batch_size=(self.train_args['batch_size'] // config['world_size']), shuffle=(self.train_sampler is None), num_workers=self.train_args['num_workers'], sampler=self.train_sampler) self.adversarial_loss = AdversarialLoss(type=self.config['losses']['GAN_LOSS']) self.adversarial_loss = self.adversarial_loss.to(self.config['device']) self.l1_loss = nn.L1Loss() net = importlib.import_module(('model.' + config['model']['net'])) self.netG = net.InpaintGenerator() self.netG = self.netG.to(self.config['device']) if (not self.config['model']['no_dis']): self.netD = net.Discriminator(in_channels=3, use_sigmoid=(config['losses']['GAN_LOSS'] != 'hinge')) self.netD = self.netD.to(self.config['device']) self.optimG = torch.optim.Adam(self.netG.parameters(), lr=config['trainer']['lr'], betas=(self.config['trainer']['beta1'], self.config['trainer']['beta2'])) if (not self.config['model']['no_dis']): self.optimD = torch.optim.Adam(self.netD.parameters(), lr=config['trainer']['lr'], betas=(self.config['trainer']['beta1'], self.config['trainer']['beta2'])) self.load() if config['distributed']: self.netG = DDP(self.netG, device_ids=[self.config['local_rank']], output_device=self.config['local_rank'], broadcast_buffers=True, find_unused_parameters=True) if (not self.config['model']['no_dis']): self.netD = DDP(self.netD, device_ids=[self.config['local_rank']], output_device=self.config['local_rank'], broadcast_buffers=True, find_unused_parameters=False) self.dis_writer = None self.gen_writer = None self.summary = {} if ((self.config['global_rank'] == 0) or (not config['distributed'])): self.dis_writer = SummaryWriter(os.path.join(config['save_dir'], 'dis')) self.gen_writer = SummaryWriter(os.path.join(config['save_dir'], 'gen')) def get_lr(self): return self.optimG.param_groups[0]['lr'] def adjust_learning_rate(self): decay = (0.1 ** (min(self.iteration, self.config['trainer']['niter']) // self.config['trainer']['niter'])) new_lr = (self.config['trainer']['lr'] * decay) if (new_lr != self.get_lr()): for param_group in self.optimG.param_groups: param_group['lr'] = new_lr if (not self.config['model']['no_dis']): for param_group in self.optimD.param_groups: param_group['lr'] = new_lr def add_summary(self, writer, name, val): if (name not in self.summary): self.summary[name] = 0 self.summary[name] += val if ((writer is not None) and ((self.iteration % 100) == 0)): writer.add_scalar(name, (self.summary[name] / 100), self.iteration) self.summary[name] = 0 def load(self): model_path = self.config['save_dir'] if os.path.isfile(os.path.join(model_path, 'latest.ckpt')): latest_epoch = open(os.path.join(model_path, 'latest.ckpt'), 'r').read().splitlines()[(- 1)] else: ckpts = [os.path.basename(i).split('.pth')[0] for i in glob.glob(os.path.join(model_path, '*.pth'))] ckpts.sort() latest_epoch = (ckpts[(- 1)] if (len(ckpts) > 0) else None) if (latest_epoch is not None): gen_path = os.path.join(model_path, 'gen_{}.pth'.format(str(latest_epoch).zfill(5))) dis_path = os.path.join(model_path, 'dis_{}.pth'.format(str(latest_epoch).zfill(5))) opt_path = os.path.join(model_path, 'opt_{}.pth'.format(str(latest_epoch).zfill(5))) if (self.config['global_rank'] == 0): print('Loading model from {}...'.format(gen_path)) data = torch.load(gen_path, map_location=self.config['device']) self.netG.load_state_dict(data['netG']) if (not self.config['model']['no_dis']): data = torch.load(dis_path, map_location=self.config['device']) self.netD.load_state_dict(data['netD']) data = torch.load(opt_path, map_location=self.config['device']) self.optimG.load_state_dict(data['optimG']) if (not self.config['model']['no_dis']): self.optimD.load_state_dict(data['optimD']) self.epoch = data['epoch'] self.iteration = data['iteration'] elif (self.config['global_rank'] == 0): print('Warnning: There is no trained model found. An initialized model will be used.') def save(self, it): if (self.config['global_rank'] == 0): gen_path = os.path.join(self.config['save_dir'], 'gen_{}.pth'.format(str(it).zfill(5))) dis_path = os.path.join(self.config['save_dir'], 'dis_{}.pth'.format(str(it).zfill(5))) opt_path = os.path.join(self.config['save_dir'], 'opt_{}.pth'.format(str(it).zfill(5))) print('\nsaving model to {} ...'.format(gen_path)) if (isinstance(self.netG, torch.nn.DataParallel) or isinstance(self.netG, DDP)): netG = self.netG.module if (not self.config['model']['no_dis']): netD = self.netD.module else: netG = self.netG if (not self.config['model']['no_dis']): netD = self.netD torch.save({'netG': netG.state_dict()}, gen_path) if (not self.config['model']['no_dis']): torch.save({'netD': netD.state_dict()}, dis_path) torch.save({'epoch': self.epoch, 'iteration': self.iteration, 'optimG': self.optimG.state_dict(), 'optimD': self.optimD.state_dict()}, opt_path) else: torch.save({'epoch': self.epoch, 'iteration': self.iteration, 'optimG': self.optimG.state_dict()}, opt_path) os.system('echo {} > {}'.format(str(it).zfill(5), os.path.join(self.config['save_dir'], 'latest.ckpt'))) def train(self): pbar = range(int(self.train_args['iterations'])) if (self.config['global_rank'] == 0): pbar = tqdm(pbar, initial=self.iteration, dynamic_ncols=True, smoothing=0.01) os.makedirs('logs', exist_ok=True) logging.basicConfig(level=logging.INFO, format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S', filename='logs/{}.log'.format(self.config['save_dir'].split('/')[(- 1)]), filemode='w') while True: self.epoch += 1 if self.config['distributed']: self.train_sampler.set_epoch(self.epoch) self._train_epoch(pbar) if (self.iteration > self.train_args['iterations']): break print('\nEnd training....') def _train_epoch(self, pbar): device = self.config['device'] for (frames, masks) in self.train_loader: self.adjust_learning_rate() self.iteration += 1 (frames, masks) = (frames.to(device), masks.to(device)) (b, t, c, h, w) = frames.size() masked_frame = (frames * (1 - masks).float()) pred_img = self.netG(masked_frame) frames = frames.view((b * t), c, h, w) masks = masks.view((b * t), 1, h, w) comp_img = ((frames * (1.0 - masks)) + (masks * pred_img)) gen_loss = 0 dis_loss = 0 if (not self.config['model']['no_dis']): real_vid_feat = self.netD(frames) fake_vid_feat = self.netD(comp_img.detach()) dis_real_loss = self.adversarial_loss(real_vid_feat, True, True) dis_fake_loss = self.adversarial_loss(fake_vid_feat, False, True) dis_loss += ((dis_real_loss + dis_fake_loss) / 2) self.add_summary(self.dis_writer, 'loss/dis_vid_fake', dis_fake_loss.item()) self.add_summary(self.dis_writer, 'loss/dis_vid_real', dis_real_loss.item()) self.optimD.zero_grad() dis_loss.backward() self.optimD.step() gen_vid_feat = self.netD(comp_img) gan_loss = self.adversarial_loss(gen_vid_feat, True, False) gan_loss = (gan_loss * self.config['losses']['adversarial_weight']) gen_loss += gan_loss self.add_summary(self.gen_writer, 'loss/gan_loss', gan_loss.item()) hole_loss = self.l1_loss((pred_img * masks), (frames * masks)) hole_loss = ((hole_loss / torch.mean(masks)) * self.config['losses']['hole_weight']) gen_loss += hole_loss self.add_summary(self.gen_writer, 'loss/hole_loss', hole_loss.item()) valid_loss = self.l1_loss((pred_img * (1 - masks)), (frames * (1 - masks))) valid_loss = ((valid_loss / torch.mean((1 - masks))) * self.config['losses']['valid_weight']) gen_loss += valid_loss self.add_summary(self.gen_writer, 'loss/valid_loss', valid_loss.item()) self.optimG.zero_grad() gen_loss.backward() self.optimG.step() if (self.config['global_rank'] == 0): pbar.update(1) if (not self.config['model']['no_dis']): pbar.set_description(f'd: {dis_loss.item():.3f}; g: {gan_loss.item():.3f};hole: {hole_loss.item():.3f}; valid: {valid_loss.item():.3f}') else: pbar.set_description(f'hole: {hole_loss.item():.3f}; valid: {valid_loss.item():.3f}') if ((self.iteration % self.train_args['log_freq']) == 0): if (not self.config['model']['no_dis']): logging.info('[Iter {}] d: {:.4f}; g: {:.4f}; hole: {:.4f}; valid: {:.4f}'.format(self.iteration, dis_loss.item(), gan_loss.item(), hole_loss.item(), valid_loss.item())) else: logging.info('[Iter {}] hole: {:.4f}; valid: {:.4f}'.format(self.iteration, hole_loss.item(), valid_loss.item())) if ((self.iteration % self.train_args['save_freq']) == 0): self.save(int((self.iteration // self.train_args['save_freq']))) if (self.iteration > self.train_args['iterations']): break
class BaseNetwork(nn.Module): def __init__(self): super(BaseNetwork, self).__init__() def print_network(self): if isinstance(self, list): self = self[0] num_params = 0 for param in self.parameters(): num_params += param.numel() print(('Network [%s] was created. Total number of parameters: %.1f million. To see the architecture, do print(network).' % (type(self).__name__, (num_params / 1000000)))) def init_weights(self, init_type='normal', gain=0.02): "\n initialize network's weights\n init_type: normal | xavier | kaiming | orthogonal\n https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/9451e70673400885567d08a9e97ade2524c700d0/models/networks.py#L39\n " def init_func(m): classname = m.__class__.__name__ if (classname.find('InstanceNorm2d') != (- 1)): if (hasattr(m, 'weight') and (m.weight is not None)): nn.init.constant_(m.weight.data, 1.0) if (hasattr(m, 'bias') and (m.bias is not None)): nn.init.constant_(m.bias.data, 0.0) elif (hasattr(m, 'weight') and ((classname.find('Conv') != (- 1)) or (classname.find('Linear') != (- 1)))): if (init_type == 'normal'): nn.init.normal_(m.weight.data, 0.0, gain) elif (init_type == 'xavier'): nn.init.xavier_normal_(m.weight.data, gain=gain) elif (init_type == 'xavier_uniform'): nn.init.xavier_uniform_(m.weight.data, gain=1.0) elif (init_type == 'kaiming'): nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') elif (init_type == 'orthogonal'): nn.init.orthogonal_(m.weight.data, gain=gain) elif (init_type == 'none'): m.reset_parameters() else: raise NotImplementedError(('initialization method [%s] is not implemented' % init_type)) if (hasattr(m, 'bias') and (m.bias is not None)): nn.init.constant_(m.bias.data, 0.0) self.apply(init_func) for m in self.children(): if hasattr(m, 'init_weights'): m.init_weights(init_type, gain)
class HierarchyEncoder(nn.Module): def __init__(self, channel): super(HierarchyEncoder, self).__init__() assert (channel == 256) self.group = [1, 2, 4, 8, 1] self.layers = nn.ModuleList([nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, groups=1), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1, groups=2), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1, groups=4), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1, groups=8), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(512, 128, kernel_size=3, stride=1, padding=1, groups=1), nn.LeakyReLU(0.2, inplace=True)]) def forward(self, x): (bt, c, h, w) = x.size() out = x for (i, layer) in enumerate(self.layers): if (((i % 2) == 0) and (i != 0)): g = self.group[(i // 2)] x0 = x.view(bt, g, (- 1), h, w) out0 = out.view(bt, g, (- 1), h, w) out = torch.cat([x0, out0], 2).view(bt, (- 1), h, w) out = layer(out) return out
class InpaintGenerator(BaseNetwork): def __init__(self, init_weights=True): super(InpaintGenerator, self).__init__() channel = 256 hidden = 512 stack_num = 8 num_head = 4 kernel_size = (7, 7) padding = (3, 3) stride = (3, 3) output_size = (60, 108) token_size = tuple(map((lambda x, y: (x // y)), output_size, stride)) blocks = [] dropout = 0.0 for _ in range((stack_num // 2)): blocks.append(TransformerBlock(token_size, hidden=hidden, num_head=num_head, mode='t', dropout=dropout)) blocks.append(TransformerBlock(token_size, hidden=hidden, num_head=num_head, mode='s', dropout=dropout)) self.transformer = nn.Sequential(*blocks) self.patch2vec = nn.Conv2d((channel // 2), hidden, kernel_size=kernel_size, stride=stride, padding=padding) self.vec2patch = Vec2Patch((channel // 2), hidden, output_size, kernel_size, stride, padding) self.encoder = nn.Sequential(nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(128, channel, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(0.2, inplace=True)) self.hier_enc = HierarchyEncoder(channel) self.decoder = nn.Sequential(deconv((channel // 2), 128, kernel_size=3, padding=1), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(0.2, inplace=True), deconv(64, 64, kernel_size=3, padding=1), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(64, 3, kernel_size=3, stride=1, padding=1)) if init_weights: self.init_weights() def forward(self, masked_frames): (b, t, c, h, w) = masked_frames.size() enc_feat = self.encoder(masked_frames.view((b * t), c, h, w)) enc_feat = self.hier_enc(enc_feat) trans_feat = self.patch2vec(enc_feat) (_, c, h, w) = trans_feat.size() trans_feat = trans_feat.view((b * t), c, (- 1)).permute(0, 2, 1) trans_feat = self.transformer({'x': trans_feat, 't': t})['x'] trans_feat = self.vec2patch(trans_feat) enc_feat = (enc_feat + trans_feat) output = self.decoder(enc_feat) output = torch.tanh(output) return output
class deconv(nn.Module): def __init__(self, input_channel, output_channel, kernel_size=3, padding=0, scale_factor=2): super().__init__() self.conv = nn.Conv2d(input_channel, output_channel, kernel_size=kernel_size, stride=1, padding=padding) self.s = scale_factor def forward(self, x): x = F.interpolate(x, scale_factor=self.s, mode='bilinear', align_corners=True) return self.conv(x)
class Attention(nn.Module): "\n Compute 'Scaled Dot Product Attention\n " def __init__(self, p=0.1): super(Attention, self).__init__() self.dropout = nn.Dropout(p=p) def forward(self, query, key, value): scores = (torch.matmul(query, key.transpose((- 2), (- 1))) / math.sqrt(query.size((- 1)))) p_attn = F.softmax(scores, dim=(- 1)) p_attn = self.dropout(p_attn) p_val = torch.matmul(p_attn, value) return (p_val, p_attn)
class Vec2Patch(nn.Module): def __init__(self, channel, hidden, output_size, kernel_size, stride, padding): super(Vec2Patch, self).__init__() self.relu = nn.LeakyReLU(0.2, inplace=True) c_out = (reduce((lambda x, y: (x * y)), kernel_size) * channel) self.embedding = nn.Linear(hidden, c_out) self.to_patch = torch.nn.Fold(output_size=output_size, kernel_size=kernel_size, stride=stride, padding=padding) (h, w) = output_size def forward(self, x): feat = self.embedding(x) (b, n, c) = feat.size() feat = feat.permute(0, 2, 1) feat = self.to_patch(feat) return feat
class MultiHeadedAttention(nn.Module): '\n Take in model size and number of heads.\n ' def __init__(self, tokensize, d_model, head, mode, p=0.1): super().__init__() self.mode = mode self.query_embedding = nn.Linear(d_model, d_model) self.value_embedding = nn.Linear(d_model, d_model) self.key_embedding = nn.Linear(d_model, d_model) self.output_linear = nn.Linear(d_model, d_model) self.attention = Attention(p=p) self.head = head (self.h, self.w) = tokensize def forward(self, x, t): (bt, n, c) = x.size() b = (bt // t) c_h = (c // self.head) key = self.key_embedding(x) query = self.query_embedding(x) value = self.value_embedding(x) if (self.mode == 's'): key = key.view(b, t, n, self.head, c_h).permute(0, 1, 3, 2, 4) query = query.view(b, t, n, self.head, c_h).permute(0, 1, 3, 2, 4) value = value.view(b, t, n, self.head, c_h).permute(0, 1, 3, 2, 4) (att, _) = self.attention(query, key, value) att = att.permute(0, 1, 3, 2, 4).contiguous().view(bt, n, c) elif (self.mode == 't'): key = key.view(b, t, 2, (self.h // 2), 2, (self.w // 2), self.head, c_h) key = key.permute(0, 2, 4, 6, 1, 3, 5, 7).contiguous().view(b, 4, self.head, (- 1), c_h) query = query.view(b, t, 2, (self.h // 2), 2, (self.w // 2), self.head, c_h) query = query.permute(0, 2, 4, 6, 1, 3, 5, 7).contiguous().view(b, 4, self.head, (- 1), c_h) value = value.view(b, t, 2, (self.h // 2), 2, (self.w // 2), self.head, c_h) value = value.permute(0, 2, 4, 6, 1, 3, 5, 7).contiguous().view(b, 4, self.head, (- 1), c_h) (att, _) = self.attention(query, key, value) att = att.view(b, 2, 2, self.head, t, (self.h // 2), (self.w // 2), c_h) att = att.permute(0, 4, 1, 5, 2, 6, 3, 7).contiguous().view(bt, n, c) output = self.output_linear(att) return output
class FeedForward(nn.Module): def __init__(self, d_model, p=0.1): super(FeedForward, self).__init__() self.conv = nn.Sequential(nn.Linear(d_model, (d_model * 4)), nn.ReLU(inplace=True), nn.Dropout(p=p), nn.Linear((d_model * 4), d_model), nn.Dropout(p=p)) def forward(self, x): x = self.conv(x) return x
class TransformerBlock(nn.Module): '\n Transformer = MultiHead_Attention + Feed_Forward with sublayer connection\n ' def __init__(self, tokensize, hidden=128, num_head=4, mode='s', dropout=0.1): super().__init__() self.attention = MultiHeadedAttention(tokensize, d_model=hidden, head=num_head, mode=mode, p=dropout) self.ffn = FeedForward(hidden, p=dropout) self.norm1 = nn.LayerNorm(hidden) self.norm2 = nn.LayerNorm(hidden) self.dropout = nn.Dropout(p=dropout) def forward(self, input): (x, t) = (input['x'], input['t']) x = self.norm1(x) x = (x + self.dropout(self.attention(x, t))) y = self.norm2(x) x = (x + self.ffn(y)) return {'x': x, 't': t}
class Discriminator(BaseNetwork): def __init__(self, in_channels=3, use_sigmoid=False, use_spectral_norm=True, init_weights=True): super(Discriminator, self).__init__() self.use_sigmoid = use_sigmoid nf = 32 self.conv = nn.Sequential(spectral_norm(nn.Conv3d(in_channels=in_channels, out_channels=(nf * 1), kernel_size=(3, 5, 5), stride=(1, 2, 2), padding=1, bias=(not use_spectral_norm)), use_spectral_norm), nn.LeakyReLU(0.2, inplace=True), spectral_norm(nn.Conv3d((nf * 1), (nf * 2), kernel_size=(3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2), bias=(not use_spectral_norm)), use_spectral_norm), nn.LeakyReLU(0.2, inplace=True), spectral_norm(nn.Conv3d((nf * 2), (nf * 4), kernel_size=(3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2), bias=(not use_spectral_norm)), use_spectral_norm), nn.LeakyReLU(0.2, inplace=True), spectral_norm(nn.Conv3d((nf * 4), (nf * 4), kernel_size=(3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2), bias=(not use_spectral_norm)), use_spectral_norm), nn.LeakyReLU(0.2, inplace=True), spectral_norm(nn.Conv3d((nf * 4), (nf * 4), kernel_size=(3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2), bias=(not use_spectral_norm)), use_spectral_norm), nn.LeakyReLU(0.2, inplace=True), nn.Conv3d((nf * 4), (nf * 4), kernel_size=(3, 5, 5), stride=(1, 2, 2), padding=(1, 2, 2))) if init_weights: self.init_weights() def forward(self, xs): xs_t = torch.transpose(xs, 0, 1) xs_t = xs_t.unsqueeze(0) feat = self.conv(xs_t) if self.use_sigmoid: feat = torch.sigmoid(feat) out = torch.transpose(feat, 1, 2) return out
def spectral_norm(module, mode=True): if mode: return _spectral_norm(module) return module
def main_worker(rank, config): if ('local_rank' not in config): config['local_rank'] = config['global_rank'] = rank if config['distributed']: torch.cuda.set_device(int(config['local_rank'])) torch.distributed.init_process_group(backend='nccl', init_method=config['init_method'], world_size=config['world_size'], rank=config['global_rank'], group_name='mtorch') print('using GPU {}-{} for training'.format(int(config['global_rank']), int(config['local_rank']))) config['save_dir'] = os.path.join(config['save_dir'], '{}_{}'.format(config['model']['net'], os.path.basename(args.config).split('.')[0])) if torch.cuda.is_available(): config['device'] = torch.device('cuda:{}'.format(config['local_rank'])) else: config['device'] = 'cpu' if ((not config['distributed']) or (config['global_rank'] == 0)): os.makedirs(config['save_dir'], exist_ok=True) config_path = os.path.join(config['save_dir'], args.config.split('/')[(- 1)]) if (not os.path.isfile(config_path)): copyfile(args.config, config_path) print('[**] create folder {}'.format(config['save_dir'])) trainer = Trainer(config) trainer.train()
def find_dataset_using_name(dataset_name): 'Import the module "data/[dataset_name]_dataset.py".\n\n In the file, the class called DatasetNameDataset() will\n be instantiated. It has to be a subclass of BaseDataset,\n and it is case-insensitive.\n ' dataset_filename = (('data.' + dataset_name) + '_dataset') datasetlib = importlib.import_module(dataset_filename) dataset = None target_dataset_name = (dataset_name.replace('_', '') + 'dataset') for (name, cls) in datasetlib.__dict__.items(): if ((name.lower() == target_dataset_name.lower()) and issubclass(cls, BaseDataset)): dataset = cls if (dataset is None): raise NotImplementedError(('In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase.' % (dataset_filename, target_dataset_name))) return dataset
def get_option_setter(dataset_name): 'Return the static method <modify_commandline_options> of the dataset class.' dataset_class = find_dataset_using_name(dataset_name) return dataset_class.modify_commandline_options
def create_dataset(opt): "Create a dataset given the option.\n\n This function wraps the class CustomDatasetDataLoader.\n This is the main interface between this package and 'train.py'/'test.py'\n\n Example:\n >>> from data import create_dataset\n >>> dataset = create_dataset(opt)\n " data_loader = CustomDatasetDataLoader(opt) dataset = data_loader.load_data() return dataset
class CustomDatasetDataLoader(): 'Wrapper class of Dataset class that performs multi-threaded data loading' def __init__(self, opt): 'Initialize this class\n\n Step 1: create a dataset instance given the name [dataset_mode]\n Step 2: create a multi-threaded data loader.\n ' self.opt = opt dataset_class = find_dataset_using_name(opt.dataset_mode) self.dataset = dataset_class(opt) print(('dataset [%s] was created' % type(self.dataset).__name__)) self.dataloader = torch.utils.data.DataLoader(self.dataset, batch_size=opt.batch_size, shuffle=(not opt.serial_batches), num_workers=int(opt.num_threads)) def load_data(self): return self def __len__(self): 'Return the number of data in the dataset' return min(len(self.dataset), self.opt.max_dataset_size) def __iter__(self): 'Return a batch of data' for (i, data) in enumerate(self.dataloader): if ((i * self.opt.batch_size) >= self.opt.max_dataset_size): break (yield data)
class AlignedDataset(BaseDataset): "A dataset class for paired image dataset.\n\n It assumes that the directory '/path/to/data/train' contains image pairs in the form of {A,B}.\n During test time, you need to prepare a directory '/path/to/data/test'.\n " def __init__(self, opt): 'Initialize this dataset class.\n\n Parameters:\n opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions\n ' BaseDataset.__init__(self, opt) self.dir_AB = os.path.join(opt.dataroot, opt.phase) self.AB_paths = sorted(make_dataset(self.dir_AB, opt.max_dataset_size)) assert (self.opt.load_size >= self.opt.crop_size) self.input_nc = (self.opt.output_nc if (self.opt.direction == 'BtoA') else self.opt.input_nc) self.output_nc = (self.opt.input_nc if (self.opt.direction == 'BtoA') else self.opt.output_nc) def __getitem__(self, index): 'Return a data point and its metadata information.\n\n Parameters:\n index - - a random integer for data indexing\n\n Returns a dictionary that contains A, B, A_paths and B_paths\n A (tensor) - - an image in the input domain\n B (tensor) - - its corresponding image in the target domain\n A_paths (str) - - image paths\n B_paths (str) - - image paths (same as A_paths)\n ' AB_path = self.AB_paths[index] AB = Image.open(AB_path).convert('RGB') (w, h) = AB.size w2 = int((w / 2)) A = AB.crop((0, 0, w2, h)) B = AB.crop((w2, 0, w, h)) transform_params = get_params(self.opt, A.size) A_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1)) B_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1)) A = A_transform(A) B = B_transform(B) return {'A': A, 'B': B, 'A_paths': AB_path, 'B_paths': AB_path} def __len__(self): 'Return the total number of images in the dataset.' return len(self.AB_paths)
class BaseDataset(data.Dataset, ABC): 'This class is an abstract base class (ABC) for datasets.\n\n To create a subclass, you need to implement the following four functions:\n -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).\n -- <__len__>: return the size of dataset.\n -- <__getitem__>: get a data point.\n -- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.\n ' def __init__(self, opt): 'Initialize the class; save the options in the class\n\n Parameters:\n opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions\n ' self.opt = opt self.root = opt.dataroot @staticmethod def modify_commandline_options(parser, is_train): 'Add new dataset-specific options, and rewrite default values for existing options.\n\n Parameters:\n parser -- original option parser\n is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.\n\n Returns:\n the modified parser.\n ' return parser @abstractmethod def __len__(self): 'Return the total number of images in the dataset.' return 0 @abstractmethod def __getitem__(self, index): 'Return a data point and its metadata information.\n\n Parameters:\n index - - a random integer for data indexing\n\n Returns:\n a dictionary of data with their names. It ususally contains the data itself and its metadata information.\n ' pass
def get_params(opt, size): (w, h) = size new_h = h new_w = w if (opt.preprocess == 'resize_and_crop'): new_h = new_w = opt.load_size elif (opt.preprocess == 'scale_width_and_crop'): new_w = opt.load_size new_h = ((opt.load_size * h) // w) x = random.randint(0, np.maximum(0, (new_w - opt.crop_size))) y = random.randint(0, np.maximum(0, (new_h - opt.crop_size))) flip = (random.random() > 0.5) return {'crop_pos': (x, y), 'flip': flip}
def get_transform(opt, params=None, grayscale=False, method=Image.BICUBIC, convert=True): transform_list = [] if grayscale: transform_list.append(transforms.Grayscale(1)) if ('resize' in opt.preprocess): osize = [opt.load_size, opt.load_size] transform_list.append(transforms.Resize(osize, method)) elif ('scale_width' in opt.preprocess): transform_list.append(transforms.Lambda((lambda img: __scale_width(img, opt.load_size, method)))) if ('crop' in opt.preprocess): if (params is None): transform_list.append(transforms.RandomCrop(opt.crop_size)) else: transform_list.append(transforms.Lambda((lambda img: __crop(img, params['crop_pos'], opt.crop_size)))) if (opt.preprocess == 'none'): transform_list.append(transforms.Lambda((lambda img: __make_power_2(img, base=4, method=method)))) if (not opt.no_flip): if (params is None): transform_list.append(transforms.RandomHorizontalFlip()) elif params['flip']: transform_list.append(transforms.Lambda((lambda img: __flip(img, params['flip'])))) if convert: transform_list += [transforms.ToTensor()] if grayscale: transform_list += [transforms.Normalize((0.5,), (0.5,))] else: transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] return transforms.Compose(transform_list)
def __make_power_2(img, base, method=Image.BICUBIC): (ow, oh) = img.size h = int((round((oh / base)) * base)) w = int((round((ow / base)) * base)) if ((h == oh) and (w == ow)): return img __print_size_warning(ow, oh, w, h) return img.resize((w, h), method)
def __scale_width(img, target_width, method=Image.BICUBIC): (ow, oh) = img.size if (ow == target_width): return img w = target_width h = int(((target_width * oh) / ow)) return img.resize((w, h), method)
def __crop(img, pos, size): (ow, oh) = img.size (x1, y1) = pos tw = th = size if ((ow > tw) or (oh > th)): return img.crop((x1, y1, (x1 + tw), (y1 + th))) return img
def __flip(img, flip): if flip: return img.transpose(Image.FLIP_LEFT_RIGHT) return img
def __print_size_warning(ow, oh, w, h): 'Print warning information about image size(only print once)' if (not hasattr(__print_size_warning, 'has_printed')): print(('The image size needs to be a multiple of 4. The loaded image size was (%d, %d), so it was adjusted to (%d, %d). This adjustment will be done to all images whose sizes are not multiples of 4' % (ow, oh, w, h))) __print_size_warning.has_printed = True
def is_image_file(filename): return any((filename.endswith(extension) for extension in IMG_EXTENSIONS))
def make_dataset(dir, max_dataset_size=float('inf')): images = [] assert os.path.isdir(dir), ('%s is not a valid directory' % dir) for (root, _, fnames) in sorted(os.walk(dir)): for fname in fnames: if is_image_file(fname): path = os.path.join(root, fname) images.append(path) return images[:min(max_dataset_size, len(images))]
def default_loader(path): return Image.open(path).convert('RGB')
class ImageFolder(data.Dataset): def __init__(self, root, transform=None, return_paths=False, loader=default_loader): imgs = make_dataset(root) if (len(imgs) == 0): raise RuntimeError(((('Found 0 images in: ' + root) + '\nSupported image extensions are: ') + ','.join(IMG_EXTENSIONS))) self.root = root self.imgs = imgs self.transform = transform self.return_paths = return_paths self.loader = loader def __getitem__(self, index): path = self.imgs[index] img = self.loader(path) if (self.transform is not None): img = self.transform(img) if self.return_paths: return (img, path) else: return img def __len__(self): return len(self.imgs)
class SingleDataset(BaseDataset): "This dataset class can load a set of images specified by the path --dataroot /path/to/data.\n\n It can be used for generating CycleGAN results only for one side with the model option '-model test'.\n " def __init__(self, opt): 'Initialize this dataset class.\n\n Parameters:\n opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions\n ' BaseDataset.__init__(self, opt) self.A_paths = sorted(make_dataset(opt.dataroot, opt.max_dataset_size)) input_nc = (self.opt.output_nc if (self.opt.direction == 'BtoA') else self.opt.input_nc) self.transform = get_transform(opt, grayscale=(input_nc == 1)) def __getitem__(self, index): 'Return a data point and its metadata information.\n\n Parameters:\n index - - a random integer for data indexing\n\n Returns a dictionary that contains A and A_paths\n A(tensor) - - an image in one domain\n A_paths(str) - - the path of the image\n ' A_path = self.A_paths[index] A_img = Image.open(A_path).convert('RGB') A = self.transform(A_img) return {'A': A, 'A_paths': A_path} def __len__(self): 'Return the total number of images in the dataset.' return len(self.A_paths)
def find_model_using_name(model_name): 'Import the module "models/[model_name]_model.py".\n In the file, the class called DatasetNameModel() will\n be instantiated. It has to be a subclass of BaseModel,\n and it is case-insensitive.\n ' model_filename = (('models.' + model_name) + '_model') modellib = importlib.import_module(model_filename) model = None target_model_name = (model_name.replace('_', '') + 'model') for (name, cls) in modellib.__dict__.items(): if ((name.lower() == target_model_name.lower()) and issubclass(cls, BaseModel)): model = cls if (model is None): print(('In %s.py, there should be a subclass of BaseModel with class name that matches %s in lowercase.' % (model_filename, target_model_name))) exit(0) return model
def get_option_setter(model_name): 'Return the static method <modify_commandline_options> of the model class.' model_class = find_model_using_name(model_name) return model_class.modify_commandline_options