code
stringlengths
17
6.64M
def show_running(func): @wraps(func) def g(*args, **kargs): x = WaitPrint(2, '{}({})... '.format(func.__name__, ', '.join(([repr(x) for x in args] + ['{}={}'.format(key, repr(value)) for (key, value) in kargs.items()])))) x.start() t = time.perf_counter() r = func(*args, **kargs) if x.is_alive(): x.stop() else: print('done in {:.0f} seconds'.format((time.perf_counter() - t))) return r return g
def cached_dirpklgz(dirname): '\n Cache a function with a directory\n ' def decorator(func): '\n The actual decorator\n ' @lru_cache(maxsize=None) @wraps(func) def wrapper(*args): '\n The wrapper of the function\n ' try: os.makedirs(dirname) except FileExistsError: pass indexfile = os.path.join(dirname, 'index.pkl') try: with open(indexfile, 'rb') as file: index = pickle.load(file) except FileNotFoundError: index = {} try: filename = index[args] except KeyError: index[args] = filename = '{}.pkl.gz'.format(len(index)) with open(indexfile, 'wb') as file: pickle.dump(index, file) filepath = os.path.join(dirname, filename) try: with gzip.open(filepath, 'rb') as file: print('load {}... '.format(filename), end='') result = pickle.load(file) except FileNotFoundError: print('compute {}... '.format(filename), end='') sys.stdout.flush() result = func(*args) print('save {}... '.format(filename), end='') with gzip.open(filepath, 'wb') as file: pickle.dump(result, file) print('done') return result return wrapper return decorator
def test_so3_rfft(b_in, b_out, device): x = torch.randn((2 * b_in), (2 * b_in), (2 * b_in), dtype=torch.float, device=device) from s2cnn.soft.so3_fft import so3_rfft y1 = so3_rfft(x, b_out=b_out) from s2cnn import so3_rft, so3_soft_grid import lie_learn.spaces.S3 as S3 weights = torch.tensor(S3.quadrature_weights(b_in), dtype=torch.float, device=device) x2 = torch.einsum('bac,b->bac', (x, weights)) y2 = so3_rft(x2.view((- 1)), b_out, so3_soft_grid(b_in)) assert ((y1 - y2).abs().max().item() < (0.0001 * y1.abs().mean().item()))
def test_inverse(f, g, b_in, b_out, device, complex): if complex: x = torch.randn((2 * b_in), (2 * b_in), (2 * b_in), 2, dtype=torch.float, device=device) else: x = torch.randn((2 * b_in), (2 * b_in), (2 * b_in), dtype=torch.float, device=device) x = g(f(x, b_out=b_out), b_out=b_in) y = g(f(x, b_out=b_out), b_out=b_in) assert ((x - y).abs().max().item() < (0.0001 * y.abs().mean().item()))
def test_inverse2(f, g, b_in, b_out, device): x = torch.randn(((b_in * ((4 * (b_in ** 2)) - 1)) // 3), 2, dtype=torch.float, device=device) x = g(f(x, b_out=b_out), b_out=b_in) y = g(f(x, b_out=b_out), b_out=b_in) assert ((x - y).abs().max().item() < (0.0001 * y.abs().mean().item()))
def compare_cpu_gpu(f, x): z1 = f(x.cpu()) z2 = f(x.cuda()).cpu() q = ((z1 - z2).abs().max().item() / z1.std().item()) assert (q < 0.0001)
def get_test_results(logfile): perf = {} with open(logfile, 'r') as f: prevline = '' for line in f.readlines(): if ('DATALOADER:0 TEST RESULTS' in prevline): perf = eval(line) prevline = line return perf
class CLI(LightningCLI): def __init__(self, model_class, run=True, **kwargs): trainer_defaults = {'default_config_files': [os.path.join('perceiver', 'trainer.yaml')]} super().__init__(model_class, run=run, save_config_overwrite=True, parser_kwargs={'fit': trainer_defaults, 'test': trainer_defaults, 'validate': trainer_defaults}, **kwargs) def instantiate_trainer(self, **kwargs: Any) -> Trainer: if self.subcommand: cfg = self.config_init[self.subcommand] else: cfg = self.config_init if (cfg['trainer']['strategy'] == 'ddp_static_graph'): cfg['trainer']['strategy'] = DDPStaticGraphPlugin(find_unused_parameters=False) return super().instantiate_trainer(logger=cfg['logger'], **kwargs) def add_default_arguments_to_parser(self, parser: LightningArgumentParser) -> None: super().add_default_arguments_to_parser(parser) parser.add_argument('--experiment', default='default') def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None: parser.add_class_arguments(TensorBoardLogger, 'logger') parser.link_arguments('trainer.default_root_dir', 'logger.save_dir', apply_on='parse') parser.link_arguments('experiment', 'logger.name', apply_on='parse') parser.add_optimizer_args(torch.optim.AdamW, link_to='model.optimizer_init')
class DDPStaticGraphPlugin(DDPPlugin): def _setup_model(self, model): wrapped = super()._setup_model(model) wrapped._set_static_graph() return wrapped
def load_split(root, split): if (split not in ['train', 'test']): raise ValueError(f'invalid split: {split}') raw_x = [] raw_y = [] for (i, label) in enumerate(['neg', 'pos']): path_pattern = os.path.join(root, f'IMDB/aclImdb/{split}/{label}', '*.txt') for name in glob.glob(path_pattern): with open(name, encoding='utf-8') as f: raw_x.append(f.read()) raw_y.append(i) return (raw_x, raw_y)
class IMDBDataset(Dataset): def __init__(self, root, split): (self.raw_x, self.raw_y) = load_split(root, split) def __len__(self): return len(self.raw_x) def __getitem__(self, index): return (self.raw_y[index], self.raw_x[index])
@DATAMODULE_REGISTRY class IMDBDataModule(pl.LightningDataModule): def __init__(self, data_dir: str='.cache', vocab_size: int=10003, max_seq_len: int=512, batch_size: int=64, num_workers: int=3, pin_memory: bool=False): super().__init__() self.save_hyperparameters() self.tokenizer_path = os.path.join(data_dir, f'imdb-tokenizer-{vocab_size}.json') self.tokenizer = None self.collator = None self.ds_train = None self.ds_valid = None self.vocab_size = vocab_size self.max_seq_len = max_seq_len def prepare_data(self, *args, **kwargs): if (not os.path.exists(os.path.join(self.hparams.data_dir, 'IMDB'))): IMDB(root=self.hparams.data_dir) if (not os.path.exists(self.tokenizer_path)): (raw_x, _) = load_split(root=self.hparams.data_dir, split='train') tokenizer = create_tokenizer(Replace('<br />', ' ')) train_tokenizer(tokenizer, data=raw_x, vocab_size=self.hparams.vocab_size) save_tokenizer(tokenizer, self.tokenizer_path) def setup(self, stage=None): self.tokenizer = load_tokenizer(self.tokenizer_path) self.collator = TextCollator(self.tokenizer, self.hparams.max_seq_len) self.ds_train = IMDBDataset(root=self.hparams.data_dir, split='train') self.ds_valid = IMDBDataset(root=self.hparams.data_dir, split='test') def train_dataloader(self): return DataLoader(self.ds_train, shuffle=True, collate_fn=self.collator.collate, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers, pin_memory=self.hparams.pin_memory) def val_dataloader(self): return DataLoader(self.ds_valid, shuffle=False, collate_fn=self.collator.collate, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers, pin_memory=self.hparams.pin_memory)
@DATAMODULE_REGISTRY class MNISTDataModule(mnist_datamodule.MNISTDataModule): def __init__(self, channels_last: bool=True, random_crop: Optional[int]=None, data_dir: Optional[str]='.cache', val_split: Union[(int, float)]=10000, num_workers: int=3, normalize: bool=True, pin_memory: bool=False, *args, **kwargs): super().__init__(*args, data_dir=data_dir, val_split=val_split, num_workers=num_workers, normalize=normalize, pin_memory=pin_memory, **kwargs) self.save_hyperparameters() self._image_shape = super().dims if channels_last: self._image_shape = (self._image_shape[1], self._image_shape[2], self._image_shape[0]) @property def image_shape(self): return self._image_shape def default_transforms(self) -> Callable: return mnist_transform(normalize=self.hparams.normalize, channels_last=self.hparams.channels_last, random_crop=self.hparams.random_crop)
class MNISTPreprocessor(): def __init__(self, transform=None): if (transform is None): self.transform = mnist_transform() else: self.transform = transform def preprocess(self, img): return self.transform(img) def preprocess_batch(self, img_batch): return torch.stack([self.preprocess(img) for img in img_batch])
def mnist_transform(normalize: bool=True, channels_last: bool=True, random_crop: Optional[int]=None): transform_list = [] if random_crop: transform_list.append(transforms.RandomCrop(random_crop)) transform_list.append(transforms.ToTensor()) if normalize: transform_list.append(transforms.Normalize(mean=(0.5,), std=(0.5,))) if channels_last: transform_list.append(channels_to_last) return transforms.Compose(transform_list)
def channels_to_last(img: torch.Tensor): return img.permute(1, 2, 0).contiguous()
@DATAMODULE_REGISTRY class CIFAR100DataModule(pl.LightningDataModule): def __init__(self, channels_last: bool=True, random_crop: Optional[int]=None, data_dir: Optional[str]='.cache', val_split: Union[(int, float)]=10000, num_workers: int=3, batch_size: int=64, normalize: bool=True, pin_memory: bool=False, *args, **kwargs): super().__init__() self.save_hyperparameters() self._image_shape = [3, 32, 32] self.num_classes = 100 self.val_split = val_split self.batch_size = batch_size if channels_last: self._image_shape = (self._image_shape[1], self._image_shape[2], self._image_shape[0]) def prepare_data(self): datasets.CIFAR100(root='../datasets/cifar-100', train=True, download=True, transform=self.default_transforms()) datasets.CIFAR100(root='../datasets/cifar-100', train=False, download=True, transform=self.default_transforms()) def setup(self, stage): self.cifar_train = Subset(datasets.CIFAR100(root='../datasets/cifar-100', train=True, download=True, transform=self.default_transforms()), np.arange(50000)[:(- self.val_split)]) self.cifar_val = Subset(datasets.CIFAR100(root='../datasets/cifar-100', train=True, download=True, transform=self.default_transforms()), np.arange(50000)[(- self.val_split):]) self.cifar_test = datasets.CIFAR100(root='../datasets/cifar-100', train=False, download=True, transform=self.default_transforms()) def train_dataloader(self): cifar_train = DataLoader(self.cifar_train, batch_size=self.batch_size, shuffle=True, num_workers=8) return cifar_train def val_dataloader(self): cifar_val = DataLoader(self.cifar_val, batch_size=self.batch_size, shuffle=False, num_workers=8) return cifar_val def test_dataloader(self): return DataLoader(self.cifar_test, batch_size=self.batch_size, shuffle=False, num_workers=8) @property def image_shape(self): return self._image_shape def default_transforms(self) -> Callable: return cifar100_transform(normalize=self.hparams.normalize, channels_last=self.hparams.channels_last, random_crop=self.hparams.random_crop)
def cifar100_transform(normalize: bool=True, channels_last: bool=True, random_crop: Optional[int]=None): transform_list = [] if random_crop: transform_list.append(transforms.RandomCrop(random_crop)) transform_list.append(transforms.ToTensor()) if normalize: mean = [0.4914, 0.4822, 0.4465] std = [0.2023, 0.1994, 0.201] transform_list.append(transforms.Normalize(mean=mean, std=std)) if channels_last: transform_list.append(channels_to_last) return transforms.Compose(transform_list)
def channels_to_last(img: torch.Tensor): return img.permute(1, 2, 0).contiguous()
@DATAMODULE_REGISTRY class CosmicDataModule(pl.LightningDataModule): def __init__(self, channels_last: bool=True, data_dir: Optional[str]='.cache', num_workers: int=3, batch_size: int=4, pin_memory: bool=False, root='../datasets', *args, **kwargs): super().__init__() self.save_hyperparameters() self._image_shape = [1, 128, 128] self.dense_pred_shape = (1, 128, 128) self.batch_size = batch_size self.root = root if channels_last: self._image_shape = (self._image_shape[1], self._image_shape[2], self._image_shape[0]) def prepare_data(self): load_cosmic_data(f'{self.root}/cosmic') def setup(self, stage): (self.cosmic_train, self.cosmic_val, self.cosmic_test) = load_cosmic_data(f'{self.root}/cosmic') def train_dataloader(self): dl = DataLoader(self.cosmic_train, batch_size=self.batch_size, shuffle=True, num_workers=0) print(len(dl)) return dl def val_dataloader(self): dl = DataLoader(self.cosmic_val, batch_size=self.batch_size, shuffle=False, num_workers=0) return dl def test_dataloader(self): dl = DataLoader(self.cosmic_test, batch_size=self.batch_size, shuffle=False, num_workers=0) return dl @property def image_shape(self): return self._image_shape def default_transforms(self) -> Callable: return cosmic_transform()
class ToChannelsLast(): def __call__(self, x): if (x.ndim == 3): x = x.unsqueeze(0) elif (x.ndim != 4): raise RuntimeError return x.to(memory_format=torch.channels_last) def __repr__(self): return (self.__class__.__name__ + '()')
def cosmic_transform(): transform_list = [] transform_list.append(transforms.ToTensor()) transform_list.append(ToChannelsLast()) return transforms.Compose(transform_list)
def load_cosmic_data(path): print(path) train_dirs = np.load(os.path.join(path, 'train_dirs.npy'), allow_pickle=True) test_dirs = np.load(os.path.join(path, 'test_dirs.npy'), allow_pickle=True) if (path == 'datasets/cosmic'): train_dirs = [td[3:] for td in train_dirs] test_dirs = [td[3:] for td in test_dirs] aug_sky = ((- 0.9), 3) print(train_dirs[0]) trainvalset_full = PairedDatasetImagePath(train_dirs[:], aug_sky[0], aug_sky[1], part='train') testset = PairedDatasetImagePath(train_dirs[:], aug_sky[0], aug_sky[1], part='None') train_val_size = len(trainvalset_full) val_size = (train_val_size // 10) trainset = Subset(trainvalset_full, np.arange(train_val_size)[:(- val_size)]) valset = Subset(trainvalset_full, np.arange(train_val_size)[(- val_size):]) return (trainset, valset, testset)
@DATAMODULE_REGISTRY class FSD50KDataModule(pl.LightningDataModule): def __init__(self, channels_last: bool=True, random_crop: Optional[int]=None, data_dir: Optional[str]='.cache', num_workers: int=3, batch_size: int=64, normalize: bool=True, pin_memory: bool=False, root='../datasets', *args, **kwargs): super().__init__() self.save_hyperparameters() self._image_shape = [1, 96, 101] self.num_classes = 200 self.batch_size = batch_size self.root = root if channels_last: self._image_shape = (self._image_shape[1], self._image_shape[2], self._image_shape[0]) def prepare_data(self): pass def setup(self, stage): (self.audio_train, self.audio_val, self.audio_test) = load_audio(f'{self.root}/audio', feature='mel', train=True, root=self.root) def train_dataloader(self): return DataLoader(self.audio_train, collate_fn=_collate_fn, batch_size=self.batch_size, shuffle=True, num_workers=8) def val_dataloader(self): return DataLoader(self.audio_val, collate_fn=_collate_fn_eval, batch_size=self.batch_size, shuffle=False, num_workers=8) def test_dataloader(self): return DataLoader(self.audio_test, collate_fn=_collate_fn_eval, batch_size=self.batch_size, shuffle=False, num_workers=8) @property def image_shape(self): return self._image_shape def default_transforms(self) -> Callable: return audio_transform(channels_last=self.hparams.channels_last)
def audio_transform(channels_last: bool=True): transform_list = [] def channels_to_last(img: torch.Tensor): return img.permute(1, 2, 0).contiguous() transform_list.append(transforms.ToTensor()) if channels_last: transform_list.append(channels_to_last) return transforms.Compose(transform_list)
@DATAMODULE_REGISTRY class PSICOVDataModule(pl.LightningDataModule): def __init__(self, channels_last: bool=True, data_dir: Optional[str]='.cache', num_workers: int=3, batch_size: int=4, pin_memory: bool=False, root='../datasets', *args, **kwargs): super().__init__() self.save_hyperparameters() self._image_shape = [57, 128, 128] self.dense_pred_shape = (1, 128, 128) self.batch_size = batch_size self.root = root if channels_last: self._image_shape = (self._image_shape[1], self._image_shape[2], self._image_shape[0]) def prepare_data(self): load_psicov_data(f'{self.root}/psicov', self.batch_size) def setup(self, stage): (self.psicov_train, self.psicov_val, self.psicov_test, _, _) = load_psicov_data(f'{self.root}/psicov', self.batch_size) def train_dataloader(self): dl = DataLoader(self.psicov_train, batch_size=self.batch_size, shuffle=True, num_workers=0) print(len(dl)) return dl def val_dataloader(self): dl = DataLoader(self.psicov_val, batch_size=self.batch_size, shuffle=False, num_workers=0) return dl def test_dataloader(self): dl = DataLoader(self.psicov_test, batch_size=1, shuffle=False, num_workers=0) return dl @property def image_shape(self): return self._image_shape def default_transforms(self) -> Callable: return psicov_transform()
def psicov_transform(): transform_list = [] transform_list.append(transforms.ToTensor()) return transforms.Compose(transform_list)
def load_psicov_data(path, batch_size): all_feat_paths = [f'{path}/deepcov/features/', f'{path}/psicov/features/', f'{path}/cameo/features/'] all_dist_paths = [f'{path}/deepcov/distance/', f'{path}/psicov/distance/', f'{path}/cameo/distance/'] deepcov_list = load_list(f'{path}/deepcov.lst', (- 1)) length_dict = {} for pdb in deepcov_list: (ly, seqy, cb_map) = np.load(((f'{path}/deepcov/distance/' + pdb) + '-cb.npy'), allow_pickle=True) length_dict[pdb] = ly print(len(deepcov_list)) train_pdbs = deepcov_list[100:] trainset = PDNetDataset(train_pdbs, all_feat_paths, all_dist_paths, 128, 10, batch_size, 57, label_engineering='16.0') valid_pdbs = deepcov_list[:100] validset = PDNetDataset(valid_pdbs, all_feat_paths, all_dist_paths, 128, 10, batch_size, 57, label_engineering='16.0') psicov_list = load_list(f'{path}/psicov.lst') psicov_length_dict = {} for pdb in psicov_list: (ly, seqy, cb_map) = np.load(((f'{path}/psicov/distance/' + pdb) + '-cb.npy'), allow_pickle=True) psicov_length_dict[pdb] = ly my_list = psicov_list length_dict = psicov_length_dict testset = PDNetDataset(my_list, all_feat_paths, all_dist_paths, 512, 10, 1, 57, label_engineering=None) return (trainset, validset, testset, my_list, length_dict)
class TextPreprocessor(): def __init__(self, tokenizer_path: str, max_seq_len: int=512): self.tokenizer = load_tokenizer(tokenizer_path) self.collator = TextCollator(self.tokenizer, max_seq_len) def preprocess(self, text): return self.preprocess_batch([text])[0][0] def preprocess_batch(self, text_batch): return self.collator.encode(text_batch)
class TextCollator(): def __init__(self, tokenizer: Tokenizer, max_seq_len: int): self.pad_id = tokenizer.token_to_id(PAD_TOKEN) self.tokenizer = tokenizer self.tokenizer.enable_padding(pad_id=self.pad_id, pad_token=PAD_TOKEN) self.tokenizer.enable_truncation(max_length=max_seq_len) def collate(self, batch): (ys, xs) = zip(*batch) xs_ids = [x.ids for x in self.tokenizer.encode_batch(xs)] xs_ids = torch.tensor(xs_ids) pad_mask = (xs_ids == self.pad_id) return (torch.tensor(ys), xs_ids, pad_mask) def encode(self, text_batch): batch = [(0, text) for text in text_batch] return self.collate(batch)[1:]
def load_tokenizer(path): return Tokenizer.from_file(path)
def save_tokenizer(tokenizer: Tokenizer, path): tokenizer.save(path)
def train_tokenizer(tokenizer: Tokenizer, data: Iterable[str], vocab_size): trainer = WordPieceTrainer(vocab_size=vocab_size, special_tokens=[PAD_TOKEN, UNK_TOKEN, MASK_TOKEN]) tokenizer.train_from_iterator(data, trainer)
def create_tokenizer(*normalizer: Normalizer): tokenizer = Tokenizer(WordPiece(unk_token=UNK_TOKEN)) tokenizer.normalizer = Sequence((list(normalizer) + [NFD(), Lowercase(), StripAccents()])) tokenizer.pre_tokenizer = Whitespace() tokenizer.decoder = decoders.WordPiece() return tokenizer
class ImageClassifierCLI(CLI): def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None: super().add_arguments_to_parser(parser) parser.link_arguments('data.num_classes', 'model.num_classes', apply_on='instantiate') parser.link_arguments('data.image_shape', 'model.image_shape', apply_on='instantiate') parser.set_defaults({'experiment': 'img_clf', 'model.num_frequency_bands': 32, 'model.num_latents': 32, 'model.num_latent_channels': 128, 'model.encoder.num_layers': 3, 'model.encoder.num_self_attention_layers_per_block': 3, 'model.decoder.num_cross_attention_heads': 1})
class MaskedLanguageModelCLI(CLI): def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None: super().add_arguments_to_parser(parser) parser.add_lr_scheduler_args(torch.optim.lr_scheduler.OneCycleLR, link_to='model.scheduler_init') parser.link_arguments('trainer.max_steps', 'lr_scheduler.total_steps', apply_on='parse') parser.link_arguments('optimizer.lr', 'lr_scheduler.max_lr', apply_on='parse') parser.link_arguments('data.vocab_size', 'model.vocab_size', apply_on='instantiate') parser.link_arguments('data.max_seq_len', 'model.max_seq_len', apply_on='instantiate') parser.set_defaults({'experiment': 'mlm', 'lr_scheduler.pct_start': 0.1, 'lr_scheduler.cycle_momentum': False, 'model.num_latents': 64, 'model.num_latent_channels': 64, 'model.encoder.num_layers': 3, 'model.num_predictions': 5, 'model.masked_samples': ['I have watched this <MASK> and it was awesome', 'I have <MASK> this movie and <MASK> was really terrible']})
class ImageClassifierCLI(CLI): def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None: super().add_arguments_to_parser(parser) parser.link_arguments('data.num_classes', 'model.num_classes', apply_on='instantiate') parser.link_arguments('data.image_shape', 'model.image_shape', apply_on='instantiate') parser.set_defaults({'experiment': 'cifar100', 'model.num_frequency_bands': 32, 'model.num_latents': 32, 'model.num_latent_channels': 128, 'model.encoder.num_layers': 3, 'model.encoder.num_self_attention_layers_per_block': 3, 'model.decoder.num_cross_attention_heads': 1})
class DensePredictorCLI(CLI): def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None: super().add_arguments_to_parser(parser) parser.link_arguments('data.dense_pred_shape', 'model.dense_pred_shape', apply_on='instantiate') parser.link_arguments('data.image_shape', 'model.image_shape', apply_on='instantiate') parser.set_defaults({'experiment': 'cosmic', 'model.num_frequency_bands': 32, 'model.num_latents': 32, 'model.num_latent_channels': 128, 'model.encoder.num_layers': 3, 'model.encoder.num_self_attention_layers_per_block': 3, 'model.decoder.num_cross_attention_heads': 1, 'model.scorer': 'auroc', 'model.loss_fn': 'BCEWithLogitsLoss', 'model.cosmic': True})
class DensePredictorCLI(CLI): def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None: super().add_arguments_to_parser(parser) parser.link_arguments('data.dense_pred_shape', 'model.dense_pred_shape', apply_on='instantiate') parser.link_arguments('data.image_shape', 'model.image_shape', apply_on='instantiate') parser.set_defaults({'experiment': 'darcyflow', 'model.num_frequency_bands': 32, 'model.num_latents': 32, 'model.num_latent_channels': 128, 'model.encoder.num_layers': 3, 'model.encoder.num_self_attention_layers_per_block': 3, 'model.decoder.num_cross_attention_heads': 1, 'model.scorer': 'LpLoss', 'model.loss_fn': 'LpLoss', 'model.darcy': True})
class ImageClassifierCLI(CLI): def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None: super().add_arguments_to_parser(parser) parser.link_arguments('data.num_classes', 'model.num_classes', apply_on='instantiate') parser.link_arguments('data.image_shape', 'model.image_shape', apply_on='instantiate') parser.set_defaults({'experiment': 'deepsea', 'model.num_frequency_bands': 32, 'model.num_latents': 32, 'model.num_latent_channels': 128, 'model.encoder.num_layers': 3, 'model.encoder.num_self_attention_layers_per_block': 3, 'model.decoder.num_cross_attention_heads': 1, 'model.scorer': 'auroc', 'model.loss_fn': 'BCEWithLogitsLoss'})
class ImageClassifierCLI(CLI): def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None: super().add_arguments_to_parser(parser) parser.link_arguments('data.num_classes', 'model.num_classes', apply_on='instantiate') parser.link_arguments('data.image_shape', 'model.image_shape', apply_on='instantiate') parser.set_defaults({'experiment': 'ecg', 'model.num_frequency_bands': 32, 'model.num_latents': 32, 'model.num_latent_channels': 128, 'model.encoder.num_layers': 3, 'model.encoder.num_self_attention_layers_per_block': 3, 'model.decoder.num_cross_attention_heads': 1, 'model.scorer': 'f1_macro'})
class ImageClassifierCLI(CLI): def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None: super().add_arguments_to_parser(parser) parser.link_arguments('data.num_classes', 'model.num_classes', apply_on='instantiate') parser.link_arguments('data.image_shape', 'model.image_shape', apply_on='instantiate') parser.set_defaults({'experiment': 'fsd50k', 'model.num_frequency_bands': 32, 'model.num_latents': 32, 'model.num_latent_channels': 128, 'model.encoder.num_layers': 3, 'model.encoder.num_self_attention_layers_per_block': 3, 'model.decoder.num_cross_attention_heads': 1, 'model.scorer': 'map', 'model.loss_fn': 'BCEWithLogitsLoss'})
class ImageClassifierCLI(CLI): def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None: super().add_arguments_to_parser(parser) parser.link_arguments('data.num_classes', 'model.num_classes', apply_on='instantiate') parser.link_arguments('data.image_shape', 'model.image_shape', apply_on='instantiate') parser.set_defaults({'experiment': 'ninapro', 'model.num_frequency_bands': 32, 'model.num_latents': 32, 'model.num_latent_channels': 128, 'model.encoder.num_layers': 3, 'model.encoder.num_self_attention_layers_per_block': 3, 'model.decoder.num_cross_attention_heads': 1})
class DensePredictorCLI(CLI): def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None: super().add_arguments_to_parser(parser) parser.link_arguments('data.dense_pred_shape', 'model.dense_pred_shape', apply_on='instantiate') parser.link_arguments('data.image_shape', 'model.image_shape', apply_on='instantiate') parser.set_defaults({'experiment': 'psicov', 'model.num_frequency_bands': 32, 'model.num_latents': 32, 'model.num_latent_channels': 128, 'model.encoder.num_layers': 3, 'model.encoder.num_self_attention_layers_per_block': 3, 'model.decoder.num_cross_attention_heads': 1, 'model.scorer': 'MAE', 'model.loss_fn': 'MSELoss', 'model.psicov': True})
class ImageClassifierCLI(CLI): def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None: super().add_arguments_to_parser(parser) parser.link_arguments('data.num_classes', 'model.num_classes', apply_on='instantiate') parser.link_arguments('data.image_shape', 'model.image_shape', apply_on='instantiate') parser.set_defaults({'experiment': 'satellite', 'model.num_frequency_bands': 32, 'model.num_latents': 32, 'model.num_latent_channels': 128, 'model.encoder.num_layers': 3, 'model.encoder.num_self_attention_layers_per_block': 3, 'model.decoder.num_cross_attention_heads': 1})
class ImageClassifierCLI(CLI): def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None: super().add_arguments_to_parser(parser) parser.link_arguments('data.num_classes', 'model.num_classes', apply_on='instantiate') parser.link_arguments('data.image_shape', 'model.image_shape', apply_on='instantiate') parser.set_defaults({'experiment': 'spherical', 'model.num_frequency_bands': 32, 'model.num_latents': 32, 'model.num_latent_channels': 128, 'model.encoder.num_layers': 3, 'model.encoder.num_self_attention_layers_per_block': 3, 'model.decoder.num_cross_attention_heads': 1})
class TextClassifierCLI(CLI): def add_arguments_to_parser(self, parser: LightningArgumentParser) -> None: super().add_arguments_to_parser(parser) parser.link_arguments('data.vocab_size', 'model.vocab_size', apply_on='instantiate') parser.link_arguments('data.max_seq_len', 'model.max_seq_len', apply_on='instantiate') parser.set_defaults({'experiment': 'seq_clf', 'model.num_classes': 2, 'model.num_latents': 64, 'model.num_latent_channels': 64, 'model.encoder.num_layers': 3, 'model.decoder.num_cross_attention_heads': 1})
@task def install(c): c.run('conda env update --prune -f environment.yml', pty=_use_pty())
@task def precommit_install(c): c.run('pre-commit install', pty=_use_pty())
@task def clean_cache(c): c.run("find . -name '*.pyc' -exec rm -f {} +") c.run("find . -name '*.pyo' -exec rm -f {} +") c.run("find . -name '*~' -exec rm -f {} +") c.run("find . -name '__pycache__' -exec rm -fr {} +") c.run('rm -fr .mypy_cache')
@task def clean_test(c): c.run('rm -fr .tox/') c.run('rm -f .coverage') c.run('rm -fr htmlcov/') c.run('rm -fr .pytest_cache')
@task def clean_build(c): c.run('rm -fr dist')
@task def clean(c): clean_cache(c) clean_test(c) clean_build(c)
@task def build(c): clean(c) c.run('poetry build', pty=_use_pty())
@task(aliases=['cc']) def code_check(c): c.run('pre-commit run --all-files', pty=_use_pty())
@task def test(c, cov=False, cov_report=None): _run_pytest(c, 'tests --durations=25 --color=yes', cov, cov_report, _use_pty())
def _use_pty(): return (platform != 'win32')
def _run_pytest(c, test_dir, cov=False, cov_report=None, pty=True): c.run(f'pytest {test_dir} {_pytest_cov_options(cov, cov_report)}', pty=pty)
def _pytest_cov_options(use_cov: bool, cov_reports: Optional[str]): if (not use_cov): return '' cov_report_types = (cov_reports.split(',') if cov_reports else []) cov_report_types = (['term'] + cov_report_types) cov_report_params = [f'--cov-report {r}' for r in cov_report_types] return f"--cov {' '.join(cov_report_params)}"
def test_lit_image_classifier(): LitImageClassifier((64, 64, 3), 2, 16, 16, EncoderConfig(), DecoderConfig(), optimizer_init={})
def evaluate_all_datasets(arch: Text, datasets: List[Text], xpaths: List[Text], splits: List[Text], config_path: Text, seed: int, raw_arch_config, workers, logger): (machine_info, raw_arch_config) = (get_machine_info(), deepcopy(raw_arch_config)) all_infos = {'info': machine_info} all_dataset_keys = [] for (dataset, xpath, split) in zip(datasets, xpaths, splits): (train_data, valid_data, _, xshape, class_num, normalizer) = get_datasets(dataset, xpath, (- 1)) if ((dataset == 'cifar10') or (dataset == 'cifar100')): split_info = load_config('configs/nas-benchmark/cifar-split.txt', None, None) elif dataset.startswith('ImageNet16'): split_info = load_config('configs/nas-benchmark/{:}-split.txt'.format(dataset), None, None) elif (dataset == 'ninapro'): split_info = None split = False elif (dataset == 'darcyflow'): split_info = None split = False else: raise ValueError('invalid dataset : {:}'.format(dataset)) config = load_config(config_path, dict(class_num=class_num, xshape=xshape), logger) train_loader = torch.utils.data.DataLoader(train_data, batch_size=config.batch_size, shuffle=True, num_workers=workers, pin_memory=True) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=config.batch_size, shuffle=False, num_workers=workers, pin_memory=True) if (dataset == 'ninapro'): ValLoaders = {'ori-test': valid_loader} elif (dataset == 'darcyflow'): ValLoaders = {'ori-test': valid_loader} else: raise ValueError('invalid dataset : {:}'.format(dataset)) dataset_key = '{:}'.format(dataset) logger.log('Evaluate ||||||| {:10s} ||||||| Train-Num={:}, Valid-Num={:}, Train-Loader-Num={:}, Valid-Loader-Num={:}, batch size={:}'.format(dataset_key, len(train_data), len(valid_data), len(train_loader), len(valid_loader), config.batch_size)) logger.log('Evaluate ||||||| {:10s} ||||||| Config={:}'.format(dataset_key, config)) for (key, value) in ValLoaders.items(): logger.log('Evaluate ---->>>> {:10s} with {:} batchs'.format(key, len(value))) arch_config = dict2config(dict(name='infer.tiny', C=raw_arch_config['channel'], N=raw_arch_config['num_cells'], genotype=arch, num_classes=config.class_num), None) results = bench_evaluate_for_seed(arch_config, config, train_loader, ValLoaders, seed, logger, normalizer=normalizer) all_infos[dataset_key] = results all_dataset_keys.append(dataset_key) all_infos['all_dataset_keys'] = all_dataset_keys return all_infos
def main(save_dir: Path, workers: int, datasets: List[Text], xpaths: List[Text], splits: List[int], seeds: List[int], nets: List[str], opt_config: Dict[(Text, Any)], to_evaluate_indexes: tuple, cover_mode: bool, arch_config: Dict[(Text, Any)]): log_dir = (save_dir / 'logs') log_dir.mkdir(parents=True, exist_ok=True) logger = Logger(str(log_dir), os.getpid(), False) logger.log('xargs : seeds = {:}'.format(seeds)) logger.log('xargs : cover_mode = {:}'.format(cover_mode)) logger.log(('-' * 100)) logger.log(('Start evaluating range =: {:06d} - {:06d}'.format(min(to_evaluate_indexes), max(to_evaluate_indexes)) + '({:} in total) / {:06d} with cover-mode={:}'.format(len(to_evaluate_indexes), len(nets), cover_mode))) for (i, (dataset, xpath, split)) in enumerate(zip(datasets, xpaths, splits)): logger.log('--->>> Evaluate {:}/{:} : dataset={:9s}, path={:}, split={:}'.format(i, len(datasets), dataset, xpath, split)) logger.log('--->>> optimization config : {:}'.format(opt_config)) (start_time, epoch_time) = (time.time(), AverageMeter()) for (i, index) in enumerate(to_evaluate_indexes): arch = nets[index] logger.log('\n{:} evaluate {:06d}/{:06d} ({:06d}/{:06d})-th arch [seeds={:}] {:}'.format(time_string(), i, len(to_evaluate_indexes), index, len(nets), seeds, ('-' * 15))) logger.log('{:} {:} {:}'.format(('-' * 15), arch, ('-' * 15))) has_continue = False for seed in seeds: to_save_name = (save_dir / 'arch-{:06d}-seed-{:04d}.pth'.format(index, seed)) if to_save_name.exists(): if cover_mode: logger.log('Find existing file : {:}, remove it before evaluation'.format(to_save_name)) os.remove(str(to_save_name)) else: logger.log('Find existing file : {:}, skip this evaluation'.format(to_save_name)) has_continue = True continue results = evaluate_all_datasets(CellStructure.str2structure(arch), datasets, xpaths, splits, opt_config, seed, arch_config, workers, logger) torch.save(results, to_save_name) logger.log('\n{:} evaluate {:06d}/{:06d} ({:06d}/{:06d})-th arch [seeds={:}] ===>>> {:}'.format(time_string(), i, len(to_evaluate_indexes), index, len(nets), seeds, to_save_name)) if (not has_continue): epoch_time.update((time.time() - start_time)) start_time = time.time() need_time = 'Time Left: {:}'.format(convert_secs2time((epoch_time.avg * ((len(to_evaluate_indexes) - i) - 1)), True)) logger.log('This arch costs : {:}'.format(convert_secs2time(epoch_time.val, True))) logger.log('{:}'.format(('*' * 100))) logger.log('{:} {:74s} {:}'.format(('*' * 10), '{:06d}/{:06d} ({:06d}/{:06d})-th done, left {:}'.format(i, len(to_evaluate_indexes), index, len(nets), need_time), ('*' * 10))) logger.log('{:}'.format(('*' * 100))) logger.close()
def train_single_model(save_dir, workers, datasets, xpaths, splits, use_less, seeds, model_str, arch_config): assert torch.cuda.is_available(), 'CUDA is not available.' torch.backends.cudnn.enabled = True torch.backends.cudnn.deterministic = True save_dir = ((Path(save_dir) / 'specifics') / '{:}-{:}-{:}-{:}'.format(('LESS' if use_less else 'FULL'), model_str, arch_config['channel'], arch_config['num_cells'])) logger = Logger(str(save_dir), 0, False) if (model_str in CellArchitectures): arch = CellArchitectures[model_str] logger.log('The model string is found in pre-defined architecture dict : {:}'.format(model_str)) else: try: arch = CellStructure.str2structure(model_str) except: raise ValueError('Invalid model string : {:}. It can not be found or parsed.'.format(model_str)) assert arch.check_valid_op(get_search_spaces('cell', 'full')), '{:} has the invalid op.'.format(arch) logger.log('Start train-evaluate {:}'.format(arch.tostr())) logger.log('arch_config : {:}'.format(arch_config)) (start_time, seed_time) = (time.time(), AverageMeter()) for (_is, seed) in enumerate(seeds): logger.log('\nThe {:02d}/{:02d}-th seed is {:} ----------------------<.>----------------------'.format(_is, len(seeds), seed)) to_save_name = (save_dir / 'seed-{:04d}.pth'.format(seed)) if to_save_name.exists(): logger.log('Find the existing file {:}, directly load!'.format(to_save_name)) checkpoint = torch.load(to_save_name) else: logger.log('Does not find the existing file {:}, train and evaluate!'.format(to_save_name)) checkpoint = evaluate_all_datasets(arch, datasets, xpaths, splits, use_less, seed, arch_config, workers, logger) torch.save(checkpoint, to_save_name) logger.log('{:}'.format(checkpoint['info'])) all_dataset_keys = checkpoint['all_dataset_keys'] for dataset_key in all_dataset_keys: logger.log('\n{:} dataset : {:} {:}'.format(('-' * 15), dataset_key, ('-' * 15))) dataset_info = checkpoint[dataset_key] logger.log('Flops = {:} MB, Params = {:} MB'.format(dataset_info['flop'], dataset_info['param'])) logger.log('config : {:}'.format(dataset_info['config'])) logger.log('Training State (finish) = {:}'.format(dataset_info['finish-train'])) last_epoch = (dataset_info['total_epoch'] - 1) (train_acc1es, train_acc5es) = (dataset_info['train_acc1es'], dataset_info['train_acc5es']) (valid_acc1es, valid_acc5es) = (dataset_info['valid_acc1es'], dataset_info['valid_acc5es']) logger.log('Last Info : Train = Acc@1 {:.2f}% Acc@5 {:.2f}% Error@1 {:.2f}%, Test = Acc@1 {:.2f}% Acc@5 {:.2f}% Error@1 {:.2f}%'.format(train_acc1es[last_epoch], train_acc5es[last_epoch], (100 - train_acc1es[last_epoch]), valid_acc1es[last_epoch], valid_acc5es[last_epoch], (100 - valid_acc1es[last_epoch]))) seed_time.update((time.time() - start_time)) start_time = time.time() need_time = 'Time Left: {:}'.format(convert_secs2time((seed_time.avg * ((len(seeds) - _is) - 1)), True)) logger.log('\n<<<***>>> The {:02d}/{:02d}-th seed is {:} <finish> other procedures need {:}'.format(_is, len(seeds), seed, need_time)) logger.close()
def generate_meta_info(save_dir, max_node, divide=40): aa_nas_bench_ss = get_search_spaces('cell', 'nas-bench-201') archs = CellStructure.gen_all(aa_nas_bench_ss, max_node, False) print('There are {:} archs vs {:}.'.format(len(archs), (len(aa_nas_bench_ss) ** (((max_node - 1) * max_node) / 2)))) random.seed(88) random.shuffle(archs) assert (archs[0].tostr() == '|avg_pool_3x3~0|+|nor_conv_1x1~0|skip_connect~1|+|nor_conv_1x1~0|skip_connect~1|skip_connect~2|'), 'please check the 0-th architecture : {:}'.format(archs[0]) assert (archs[9].tostr() == '|avg_pool_3x3~0|+|none~0|none~1|+|skip_connect~0|none~1|nor_conv_3x3~2|'), 'please check the 9-th architecture : {:}'.format(archs[9]) assert (archs[123].tostr() == '|avg_pool_3x3~0|+|avg_pool_3x3~0|nor_conv_1x1~1|+|none~0|avg_pool_3x3~1|nor_conv_3x3~2|'), 'please check the 123-th architecture : {:}'.format(archs[123]) total_arch = len(archs) num = 50000 indexes_5W = list(range(num)) random.seed(1021) random.shuffle(indexes_5W) train_split = sorted(list(set(indexes_5W[:(num // 2)]))) valid_split = sorted(list(set(indexes_5W[(num // 2):]))) assert ((len(train_split) + len(valid_split)) == num) assert ((train_split[0] == 0) and (train_split[10] == 26) and (train_split[111] == 203) and (valid_split[0] == 1) and (valid_split[10] == 18) and (valid_split[111] == 242)), '{:} {:} {:} - {:} {:} {:}'.format(train_split[0], train_split[10], train_split[111], valid_split[0], valid_split[10], valid_split[111]) splits = {num: {'train': train_split, 'valid': valid_split}} info = {'archs': [x.tostr() for x in archs], 'total': total_arch, 'max_node': max_node, 'splits': splits} save_dir = Path(save_dir) save_dir.mkdir(parents=True, exist_ok=True) save_name = (save_dir / 'meta-node-{:}.pth'.format(max_node)) assert (not save_name.exists()), '{:} already exist'.format(save_name) torch.save(info, save_name) print('save the meta file into {:}'.format(save_name))
def traverse_net(max_node): aa_nas_bench_ss = get_search_spaces('cell', 'nats-bench') archs = CellStructure.gen_all(aa_nas_bench_ss, max_node, False) print('There are {:} archs vs {:}.'.format(len(archs), (len(aa_nas_bench_ss) ** (((max_node - 1) * max_node) / 2)))) random.seed(88) random.shuffle(archs) assert (archs[0].tostr() == '|avg_pool_3x3~0|+|nor_conv_1x1~0|skip_connect~1|+|nor_conv_1x1~0|skip_connect~1|skip_connect~2|'), 'please check the 0-th architecture : {:}'.format(archs[0]) assert (archs[9].tostr() == '|avg_pool_3x3~0|+|none~0|none~1|+|skip_connect~0|none~1|nor_conv_3x3~2|'), 'please check the 9-th architecture : {:}'.format(archs[9]) assert (archs[123].tostr() == '|avg_pool_3x3~0|+|avg_pool_3x3~0|nor_conv_1x1~1|+|none~0|avg_pool_3x3~1|nor_conv_3x3~2|'), 'please check the 123-th architecture : {:}'.format(archs[123]) return [x.tostr() for x in archs]
def filter_indexes(xlist, mode, save_dir, seeds): all_indexes = [] for index in xlist: if (mode == 'cover'): all_indexes.append(index) else: for seed in seeds: temp_path = (save_dir / 'arch-{:06d}-seed-{:04d}.pth'.format(index, seed)) if (not temp_path.exists()): all_indexes.append(index) break print('{:} [FILTER-INDEXES] : there are {:}/{:} architectures in total'.format(time_string(), len(all_indexes), len(xlist))) return all_indexes
def create_result_count(used_seed: int, dataset: Text, arch_config: Dict[(Text, Any)], results: Dict[(Text, Any)], dataloader_dict: Dict[(Text, Any)]) -> ResultsCount: xresult = ResultsCount(dataset, results['net_state_dict'], results['train_acc1es'], results['train_losses'], results['param'], results['flop'], arch_config, used_seed, results['total_epoch'], None) net_config = dict2config({'name': 'infer.tiny', 'C': arch_config['channel'], 'N': arch_config['num_cells'], 'genotype': CellStructure.str2structure(arch_config['arch_str']), 'num_classes': arch_config['class_num']}, None) if ('train_times' in results): xresult.update_train_info(results['train_acc1es'], results['train_acc5es'], results['train_losses'], results['train_times']) xresult.update_eval(results['valid_acc1es'], results['valid_losses'], results['valid_times']) else: network = get_cell_based_tiny_net(net_config) network.load_state_dict(xresult.get_net_param()) if (dataset == 'cifar10-valid'): xresult.update_OLD_eval('x-valid', results['valid_acc1es'], results['valid_losses']) (loss, top1, top5, latencies) = pure_evaluate(dataloader_dict['{:}@{:}'.format('cifar10', 'test')], network.cuda()) xresult.update_OLD_eval('ori-test', {(results['total_epoch'] - 1): top1}, {(results['total_epoch'] - 1): loss}) xresult.update_latency(latencies) elif (dataset == 'cifar10'): xresult.update_OLD_eval('ori-test', results['valid_acc1es'], results['valid_losses']) (loss, top1, top5, latencies) = pure_evaluate(dataloader_dict['{:}@{:}'.format(dataset, 'test')], network.cuda()) xresult.update_latency(latencies) elif ((dataset == 'cifar100') or (dataset == 'ImageNet16-120')): xresult.update_OLD_eval('ori-test', results['valid_acc1es'], results['valid_losses']) (loss, top1, top5, latencies) = pure_evaluate(dataloader_dict['{:}@{:}'.format(dataset, 'valid')], network.cuda()) xresult.update_OLD_eval('x-valid', {(results['total_epoch'] - 1): top1}, {(results['total_epoch'] - 1): loss}) (loss, top1, top5, latencies) = pure_evaluate(dataloader_dict['{:}@{:}'.format(dataset, 'test')], network.cuda()) xresult.update_OLD_eval('x-test', {(results['total_epoch'] - 1): top1}, {(results['total_epoch'] - 1): loss}) xresult.update_latency(latencies) else: raise ValueError('invalid dataset name : {:}'.format(dataset)) return xresult
def account_one_arch(arch_index, arch_str, checkpoints, datasets, dataloader_dict): information = ArchResults(arch_index, arch_str) for checkpoint_path in checkpoints: checkpoint = torch.load(checkpoint_path, map_location='cpu') used_seed = checkpoint_path.name.split('-')[(- 1)].split('.')[0] ok_dataset = 0 dataset = 'darcyflow' if (dataset not in checkpoint): print('Can not find {:} in arch-{:} from {:}'.format(dataset, arch_index, checkpoint_path)) continue else: ok_dataset += 1 results = checkpoint[dataset] assert results['finish-train'], 'This {:} arch seed={:} does not finish train on {:} ::: {:}'.format(arch_index, used_seed, dataset, checkpoint_path) arch_config = {'channel': results['arch_config']['C'], 'num_cells': results['arch_config']['N'], 'arch_str': arch_str, 'class_num': results['arch_config']['num_classes']} xresult = create_result_count(used_seed, dataset, arch_config, results, dataloader_dict) information.update(dataset, int(used_seed), xresult) if (ok_dataset == 0): raise ValueError('{:} does not find any data'.format(checkpoint_path)) return information
def correct_time_related_info(arch_index: int, arch_infos: Dict[(Text, ArchResults)]): '\n cifar010_latency = (\n api.get_latency(arch_index, "cifar10-valid", hp="200")\n + api.get_latency(arch_index, "cifar10", hp="200")\n ) / 2\n cifar100_latency = api.get_latency(arch_index, "cifar100", hp="200")\n image_latency = api.get_latency(arch_index, "ImageNet16-120", hp="200")\n for hp, arch_info in arch_infos.items():\n arch_info.reset_latency("cifar10-valid", 777, cifar010_latency)\n arch_info.reset_latency("cifar10", 777, cifar010_latency)\n arch_info.reset_latency("cifar100", 777, cifar100_latency)\n arch_info.reset_latency("ImageNet16-120", 777, image_latency)\n ' train_per_epoch_time = list(arch_infos['12'].query('darcyflow', 777).train_times.values()) train_per_epoch_time = (sum(train_per_epoch_time) / len(train_per_epoch_time)) (eval_ori_test_time, eval_x_valid_time) = ([], []) for (key, value) in arch_infos['12'].query('darcyflow', 777).eval_times.items(): if key.startswith('ori-test@'): eval_ori_test_time.append(value) elif key.startswith('x-valid@'): eval_x_valid_time.append(value) else: raise ValueError('-- {:} --'.format(key)) (eval_ori_test_time, eval_x_valid_time) = (float(np.mean(eval_ori_test_time)), float(np.mean(eval_x_valid_time))) nums = {'cifar10-valid-train': 25000, 'darcyflow-train': 1000, 'darcyflow-test': 100} eval_per_sample = ((eval_ori_test_time + eval_x_valid_time) / nums['darcyflow-test']) for (hp, arch_info) in arch_infos.items(): arch_info.reset_pseudo_train_times('darcyflow', None, ((train_per_epoch_time / nums['cifar10-valid-train']) * nums['darcyflow-train'])) arch_info.reset_pseudo_eval_times('darcyflow', None, 'ori-test', (eval_per_sample * nums['darcyflow-test'])) return arch_infos
def simplify(save_dir, save_name, nets, total, sup_config): dataloader_dict = {} (hps, seeds) = (['12'], set()) for hp in hps: sub_save_dir = (save_dir / 'raw-data-{:}'.format(hp)) ckps = sorted(list(sub_save_dir.glob('arch-*-seed-*.pth'))) seed2names = defaultdict(list) for ckp in ckps: parts = re.split('-|\\.', ckp.name) seed2names[parts[3]].append(ckp.name) print('DIR : {:}'.format(sub_save_dir)) nums = [] for (seed, xlist) in seed2names.items(): seeds.add(seed) nums.append(len(xlist)) print(' [seed={:}] there are {:} checkpoints.'.format(seed, len(xlist))) assert (len(nets) == total == max(nums)), 'there are some missed files : {:} vs {:}'.format(max(nums), total) print('{:} start simplify the checkpoint.'.format(time_string())) datasets = ('ninapro', 'darcyflow') full_save_dir = (save_dir / (save_name + '-FULL')) simple_save_dir = (save_dir / (save_name + '-SIMPLIFY')) full_save_dir.mkdir(parents=True, exist_ok=True) simple_save_dir.mkdir(parents=True, exist_ok=True) (arch2infos, evaluated_indexes) = (dict(), set()) (end_time, arch_time) = (time.time(), AverageMeter()) temp_final_infos = {'meta_archs': nets, 'total_archs': total, 'arch2infos': None, 'evaluated_indexes': set()} pickle_save(temp_final_infos, str((full_save_dir / 'meta.pickle'))) pickle_save(temp_final_infos, str((simple_save_dir / 'meta.pickle'))) for index in tqdm(range(total)): arch_str = nets[index] hp2info = OrderedDict() full_save_path = (full_save_dir / '{:06d}.pickle'.format(index)) simple_save_path = (simple_save_dir / '{:06d}.pickle'.format(index)) for hp in hps: sub_save_dir = (save_dir / 'raw-data-{:}'.format(hp)) ckps = [(sub_save_dir / 'arch-{:06d}-seed-{:}.pth'.format(index, seed)) for seed in seeds] ckps = [x for x in ckps if x.exists()] if (len(ckps) == 0): raise ValueError('Invalid data : index={:}, hp={:}'.format(index, hp)) arch_info = account_one_arch(index, arch_str, ckps, datasets, dataloader_dict) hp2info[hp] = arch_info hp2info = correct_time_related_info(index, hp2info) evaluated_indexes.add(index) to_save_data = OrderedDict({'12': hp2info['12'].state_dict()}) pickle_save(to_save_data, str(full_save_path)) for hp in hps: hp2info[hp].clear_params() to_save_data = OrderedDict({'12': hp2info['12'].state_dict()}) pickle_save(to_save_data, str(simple_save_path)) arch2infos[index] = to_save_data arch_time.update((time.time() - end_time)) end_time = time.time() need_time = '{:}'.format(convert_secs2time((arch_time.avg * ((total - index) - 1)), True)) print('{:} {:} done.'.format(time_string(), save_name)) final_infos = {'meta_archs': nets, 'total_archs': total, 'arch2infos': arch2infos, 'evaluated_indexes': evaluated_indexes} save_file_name = (save_dir / '{:}.pickle'.format(save_name)) pickle_save(final_infos, str(save_file_name)) hd5sum = get_md5_file((str(save_file_name) + '.pbz2')) hd5_file_name = (save_dir / '{:}-{:}.pickle.pbz2'.format(NATS_TSS_BASE_NAME, hd5sum)) shutil.move((str(save_file_name) + '.pbz2'), hd5_file_name) print('Save {:} / {:} architecture results into {:} -> {:}.'.format(len(evaluated_indexes), total, save_file_name, hd5_file_name)) hd5_full_save_dir = (save_dir / '{:}-{:}-full'.format(NATS_TSS_BASE_NAME, hd5sum)) hd5_simple_save_dir = (save_dir / '{:}-{:}-simple'.format(NATS_TSS_BASE_NAME, hd5sum)) shutil.move(full_save_dir, hd5_full_save_dir) shutil.move(simple_save_dir, hd5_simple_save_dir)
def traverse_net(max_node): aa_nas_bench_ss = get_search_spaces('cell', 'nats-bench') archs = CellStructure.gen_all(aa_nas_bench_ss, max_node, False) print('There are {:} archs vs {:}.'.format(len(archs), (len(aa_nas_bench_ss) ** (((max_node - 1) * max_node) / 2)))) random.seed(88) random.shuffle(archs) assert (archs[0].tostr() == '|avg_pool_3x3~0|+|nor_conv_1x1~0|skip_connect~1|+|nor_conv_1x1~0|skip_connect~1|skip_connect~2|'), 'please check the 0-th architecture : {:}'.format(archs[0]) assert (archs[9].tostr() == '|avg_pool_3x3~0|+|none~0|none~1|+|skip_connect~0|none~1|nor_conv_3x3~2|'), 'please check the 9-th architecture : {:}'.format(archs[9]) assert (archs[123].tostr() == '|avg_pool_3x3~0|+|avg_pool_3x3~0|nor_conv_1x1~1|+|none~0|avg_pool_3x3~1|nor_conv_3x3~2|'), 'please check the 123-th architecture : {:}'.format(archs[123]) return [x.tostr() for x in archs]
def get_topology_config_space(search_space, max_nodes=4): cs = ConfigSpace.ConfigurationSpace() for i in range(1, max_nodes): for j in range(i): node_str = '{:}<-{:}'.format(i, j) cs.add_hyperparameter(ConfigSpace.CategoricalHyperparameter(node_str, search_space)) return cs
def get_size_config_space(search_space): cs = ConfigSpace.ConfigurationSpace() for ilayer in range(search_space['numbers']): node_str = 'layer-{:}'.format(ilayer) cs.add_hyperparameter(ConfigSpace.CategoricalHyperparameter(node_str, search_space['candidates'])) return cs
def config2topology_func(max_nodes=4): def config2structure(config): genotypes = [] for i in range(1, max_nodes): xlist = [] for j in range(i): node_str = '{:}<-{:}'.format(i, j) op_name = config[node_str] xlist.append((op_name, j)) genotypes.append(tuple(xlist)) return CellStructure(genotypes) return config2structure
def config2size_func(search_space): def config2structure(config): channels = [] for ilayer in range(search_space['numbers']): node_str = 'layer-{:}'.format(ilayer) channels.append(str(config[node_str])) return ':'.join(channels) return config2structure
class MyWorker(Worker): def __init__(self, *args, convert_func=None, dataset=None, api=None, **kwargs): super().__init__(*args, **kwargs) self.convert_func = convert_func self._dataset = dataset self._api = api self.total_times = [] self.trajectory = [] def compute(self, config, budget, **kwargs): arch = self.convert_func(config) (accuracy, latency, time_cost, total_time) = self._api.simulate_train_eval(arch, self._dataset, iepoch=(int(budget) - 1), hp='12') self.trajectory.append((accuracy, arch)) self.total_times.append(total_time) return {'loss': (100 - accuracy), 'info': self._api.query_index_by_arch(arch)}
def main(xargs, api, api_full): torch.set_num_threads(4) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) logger.log('{:} use api : {:}'.format(time_string(), api)) api.reset_time() search_space = get_search_spaces(xargs.search_space, 'nats-bench') if (xargs.search_space == 'tss'): cs = get_topology_config_space(search_space) config2structure = config2topology_func() else: cs = get_size_config_space(search_space) config2structure = config2size_func(search_space) hb_run_id = '0' NS = hpns.NameServer(run_id=hb_run_id, host='localhost', port=0) (ns_host, ns_port) = NS.start() num_workers = 1 workers = [] for i in range(num_workers): w = MyWorker(nameserver=ns_host, nameserver_port=ns_port, convert_func=config2structure, dataset=xargs.dataset, api=api, run_id=hb_run_id, id=i) w.run(background=True) workers.append(w) start_time = time.time() bohb = BOHB(configspace=cs, run_id=hb_run_id, eta=3, min_budget=1, max_budget=12, nameserver=ns_host, nameserver_port=ns_port, num_samples=xargs.num_samples, random_fraction=xargs.random_fraction, bandwidth_factor=xargs.bandwidth_factor, ping_interval=10, min_bandwidth=xargs.min_bandwidth) results = bohb.run(xargs.n_iters, min_n_workers=num_workers) bohb.shutdown(shutdown_workers=True) NS.shutdown() current_best_index = [] for idx in range(len(workers[0].trajectory)): trajectory = workers[0].trajectory[:(idx + 1)] arch = max(trajectory, key=(lambda x: x[0]))[1] current_best_index.append(api.query_index_by_arch(arch)) best_arch = max(workers[0].trajectory, key=(lambda x: x[0]))[1] logger.log('Best found configuration: {:} within {:.3f} s'.format(best_arch, workers[0].total_times[(- 1)])) info = api_full.query_info_str_by_arch(best_arch, ('200' if (xargs.search_space == 'tss') else '90')) info_num = api_full.get_more_info(api.query_index_by_arch(best_arch), 'darcyflow', iepoch=None, hp='200') loss = info_num['valtest-loss'] logger.log('{:}'.format(info)) logger.log(('-' * 100)) logger.close() return (logger.log_dir, current_best_index, workers[0].total_times, loss)
def get_topology_config_space(search_space, max_nodes=4): cs = ConfigSpace.ConfigurationSpace() for i in range(1, max_nodes): for j in range(i): node_str = '{:}<-{:}'.format(i, j) cs.add_hyperparameter(ConfigSpace.CategoricalHyperparameter(node_str, search_space)) return cs
def get_size_config_space(search_space): cs = ConfigSpace.ConfigurationSpace() for ilayer in range(search_space['numbers']): node_str = 'layer-{:}'.format(ilayer) cs.add_hyperparameter(ConfigSpace.CategoricalHyperparameter(node_str, search_space['candidates'])) return cs
def config2topology_func(max_nodes=4): def config2structure(config): genotypes = [] for i in range(1, max_nodes): xlist = [] for j in range(i): node_str = '{:}<-{:}'.format(i, j) op_name = config[node_str] xlist.append((op_name, j)) genotypes.append(tuple(xlist)) return CellStructure(genotypes) return config2structure
def config2size_func(search_space): def config2structure(config): channels = [] for ilayer in range(search_space['numbers']): node_str = 'layer-{:}'.format(ilayer) channels.append(str(config[node_str])) return ':'.join(channels) return config2structure
class MyWorker(Worker): def __init__(self, *args, convert_func=None, dataset=None, api=None, **kwargs): super().__init__(*args, **kwargs) self.convert_func = convert_func self._dataset = dataset self._api = api self.total_times = [] self.trajectory = [] def compute(self, config, budget, **kwargs): arch = self.convert_func(config) (accuracy, latency, time_cost, total_time) = self._api.simulate_train_eval(arch, self._dataset, iepoch=(int(budget) - 1), hp='12') self.trajectory.append((accuracy, arch)) self.total_times.append(total_time) return {'loss': (100 - accuracy), 'info': self._api.query_index_by_arch(arch)}
def main(xargs, api, api_full): torch.set_num_threads(4) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) logger.log('{:} use api : {:}'.format(time_string(), api)) api.reset_time() search_space = get_search_spaces(xargs.search_space, 'nats-bench') if (xargs.search_space == 'tss'): cs = get_topology_config_space(search_space) config2structure = config2topology_func() else: cs = get_size_config_space(search_space) config2structure = config2size_func(search_space) hb_run_id = '0' NS = hpns.NameServer(run_id=hb_run_id, host='localhost', port=0) (ns_host, ns_port) = NS.start() num_workers = 1 workers = [] for i in range(num_workers): w = MyWorker(nameserver=ns_host, nameserver_port=ns_port, convert_func=config2structure, dataset=xargs.dataset, api=api, run_id=hb_run_id, id=i) w.run(background=True) workers.append(w) start_time = time.time() hyper = HyperBand(configspace=cs, run_id=hb_run_id, eta=3, min_budget=1, max_budget=12, nameserver=ns_host, nameserver_port=ns_port, ping_interval=50) results = hyper.run(xargs.n_iters, min_n_workers=num_workers) hyper.shutdown(shutdown_workers=True) NS.shutdown() current_best_index = [] for idx in range(len(workers[0].trajectory)): trajectory = workers[0].trajectory[:(idx + 1)] arch = max(trajectory, key=(lambda x: x[0]))[1] current_best_index.append(api.query_index_by_arch(arch)) best_arch = max(workers[0].trajectory, key=(lambda x: x[0]))[1] logger.log('Best found configuration: {:} within {:.3f} s'.format(best_arch, workers[0].total_times[(- 1)])) info = api_full.query_info_str_by_arch(best_arch, ('200' if (xargs.search_space == 'tss') else '90')) info_num = api_full.get_more_info(api.query_index_by_arch(best_arch), 'darcyflow', iepoch=None, hp='200') loss = info_num['valtest-loss'] logger.log('{:}'.format(info)) logger.log(('-' * 100)) logger.close() return (logger.log_dir, current_best_index, workers[0].total_times, loss)
def random_topology_func(op_names, max_nodes=4): def random_architecture(): genotypes = [] for i in range(1, max_nodes): xlist = [] for j in range(i): node_str = '{:}<-{:}'.format(i, j) op_name = random.choice(op_names) xlist.append((op_name, j)) genotypes.append(tuple(xlist)) return CellStructure(genotypes) return random_architecture
def random_size_func(info): def random_architecture(): channels = [] for i in range(info['numbers']): channels.append(str(random.choice(info['candidates']))) return ':'.join(channels) return random_architecture
def main(xargs, api, apu_full): torch.set_num_threads(4) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) logger.log('{:} use api : {:}'.format(time_string(), api)) api.reset_time() search_space = get_search_spaces(xargs.search_space, 'nats-bench') if (xargs.search_space == 'tss'): random_arch = random_topology_func(search_space) else: random_arch = random_size_func(search_space) (best_arch, best_acc, total_time_cost, history) = (None, (- 1), [], []) current_best_index = [] while ((len(total_time_cost) == 0) or (total_time_cost[(- 1)] < xargs.time_budget)): arch = random_arch() (accuracy, _, _, total_cost) = api.simulate_train_eval(arch, xargs.dataset, iepoch=11, hp='12') total_time_cost.append(total_cost) history.append(arch) if ((best_arch is None) or (best_acc < accuracy)): (best_acc, best_arch) = (accuracy, arch) logger.log('[{:03d}] : {:} : accuracy = {:.2f}%'.format(len(history), arch, accuracy)) current_best_index.append(api.query_index_by_arch(best_arch)) logger.log('{:} best arch is {:}, accuracy = {:.2f}%, visit {:} archs with {:.1f} s.'.format(time_string(), best_arch, best_acc, len(history), total_time_cost[(- 1)])) info = api_full.query_info_str_by_arch(best_arch, ('200' if (xargs.search_space == 'tss') else '90')) info_num = api_full.get_more_info(api.query_index_by_arch(best_arch), 'darcyflow', iepoch=None, hp='200') loss = info_num['valtest-loss'] logger.log('{:}'.format(info)) logger.log(('-' * 100)) logger.close() return (logger.log_dir, current_best_index, total_time_cost, loss)
class Model(object): def __init__(self): self.arch = None self.accuracy = None def __str__(self): 'Prints a readable version of this bitstring.' return '{:}'.format(self.arch)
def random_topology_func(op_names, max_nodes=4): def random_architecture(): genotypes = [] for i in range(1, max_nodes): xlist = [] for j in range(i): node_str = '{:}<-{:}'.format(i, j) op_name = random.choice(op_names) xlist.append((op_name, j)) genotypes.append(tuple(xlist)) return CellStructure(genotypes) return random_architecture
def random_size_func(info): def random_architecture(): channels = [] for i in range(info['numbers']): channels.append(str(random.choice(info['candidates']))) return ':'.join(channels) return random_architecture
def mutate_topology_func(op_names): 'Computes the architecture for a child of the given parent architecture.\n The parent architecture is cloned and mutated to produce the child architecture. The child architecture is mutated by randomly switch one operation to another.\n ' def mutate_topology_func(parent_arch): child_arch = deepcopy(parent_arch) node_id = random.randint(0, (len(child_arch.nodes) - 1)) node_info = list(child_arch.nodes[node_id]) snode_id = random.randint(0, (len(node_info) - 1)) xop = random.choice(op_names) while (xop == node_info[snode_id][0]): xop = random.choice(op_names) node_info[snode_id] = (xop, node_info[snode_id][1]) child_arch.nodes[node_id] = tuple(node_info) return child_arch return mutate_topology_func
def mutate_size_func(info): 'Computes the architecture for a child of the given parent architecture.\n The parent architecture is cloned and mutated to produce the child architecture. The child architecture is mutated by randomly switch one operation to another.\n ' def mutate_size_func(parent_arch): child_arch = deepcopy(parent_arch) child_arch = child_arch.split(':') index = random.randint(0, (len(child_arch) - 1)) child_arch[index] = str(random.choice(info['candidates'])) return ':'.join(child_arch) return mutate_size_func
def regularized_evolution(cycles, population_size, sample_size, time_budget, random_arch, mutate_arch, api, use_proxy, dataset): 'Algorithm for regularized evolution (i.e. aging evolution).\n\n Follows "Algorithm 1" in Real et al. "Regularized Evolution for Image\n Classifier Architecture Search".\n\n Args:\n cycles: the number of cycles the algorithm should run for.\n population_size: the number of individuals to keep in the population.\n sample_size: the number of individuals that should participate in each tournament.\n time_budget: the upper bound of searching cost\n\n Returns:\n history: a list of `Model` instances, representing all the models computed\n during the evolution experiment.\n ' population = collections.deque() api.reset_time() (history, total_time_cost) = ([], []) current_best_index = [] while (len(population) < population_size): model = Model() model.arch = random_arch() (model.accuracy, _, _, total_cost) = api.simulate_train_eval(model.arch, dataset, iepoch=11, hp='12') population.append(model) history.append((model.accuracy, model.arch)) total_time_cost.append(total_cost) current_best_index.append(api.query_index_by_arch(max(history, key=(lambda x: x[0]))[1])) while (total_time_cost[(- 1)] < time_budget): (start_time, sample) = (time.time(), []) while (len(sample) < sample_size): candidate = random.choice(list(population)) sample.append(candidate) parent = max(sample, key=(lambda i: i.accuracy)) child = Model() child.arch = mutate_arch(parent.arch) (child.accuracy, _, time_cost, total_cost) = api.simulate_train_eval(child.arch, dataset, hp='12') population.append(child) history.append((child.accuracy, child.arch)) current_best_index.append(api.query_index_by_arch(max(history, key=(lambda x: x[0]))[1])) total_time_cost.append(total_cost) population.popleft() return (history, current_best_index, total_time_cost)
def main(xargs, api, api_full): torch.set_num_threads(4) prepare_seed(xargs.rand_seed) logger = prepare_logger(args) search_space = get_search_spaces(xargs.search_space, 'nats-bench') if (xargs.search_space == 'tss'): random_arch = random_topology_func(search_space) mutate_arch = mutate_topology_func(search_space) else: random_arch = random_size_func(search_space) mutate_arch = mutate_size_func(search_space) x_start_time = time.time() logger.log('{:} use api : {:}'.format(time_string(), api)) logger.log((('-' * 30) + ' start searching with the time budget of {:} s'.format(xargs.time_budget))) (history, current_best_index, total_times) = regularized_evolution(xargs.ea_cycles, xargs.ea_population, xargs.ea_sample_size, xargs.time_budget, random_arch, mutate_arch, api, (xargs.use_proxy > 0), xargs.dataset) logger.log('{:} regularized_evolution finish with history of {:} arch with {:.1f} s (real-cost={:.2f} s).'.format(time_string(), len(history), total_times[(- 1)], (time.time() - x_start_time))) best_arch = max(history, key=(lambda x: x[0]))[1] logger.log('{:} best arch is {:}'.format(time_string(), best_arch)) info = api_full.query_info_str_by_arch(best_arch, ('200' if (xargs.search_space == 'tss') else '90')) info_num = api_full.get_more_info(api.query_index_by_arch(best_arch), 'darcyflow', iepoch=None, hp='200') loss = info_num['valtest-loss'] logger.log('{:}'.format(info)) logger.log(('-' * 100)) logger.close() return (logger.log_dir, current_best_index, total_times, loss)
class PolicyTopology(nn.Module): def __init__(self, search_space, max_nodes=4): super(PolicyTopology, self).__init__() self.max_nodes = max_nodes self.search_space = deepcopy(search_space) self.edge2index = {} for i in range(1, max_nodes): for j in range(i): node_str = '{:}<-{:}'.format(i, j) self.edge2index[node_str] = len(self.edge2index) self.arch_parameters = nn.Parameter((0.001 * torch.randn(len(self.edge2index), len(search_space)))) def generate_arch(self, actions): genotypes = [] for i in range(1, self.max_nodes): xlist = [] for j in range(i): node_str = '{:}<-{:}'.format(i, j) op_name = self.search_space[actions[self.edge2index[node_str]]] xlist.append((op_name, j)) genotypes.append(tuple(xlist)) return CellStructure(genotypes) def genotype(self): genotypes = [] for i in range(1, self.max_nodes): xlist = [] for j in range(i): node_str = '{:}<-{:}'.format(i, j) with torch.no_grad(): weights = self.arch_parameters[self.edge2index[node_str]] op_name = self.search_space[weights.argmax().item()] xlist.append((op_name, j)) genotypes.append(tuple(xlist)) return CellStructure(genotypes) def forward(self): alphas = nn.functional.softmax(self.arch_parameters, dim=(- 1)) return alphas
class PolicySize(nn.Module): def __init__(self, search_space): super(PolicySize, self).__init__() self.candidates = search_space['candidates'] self.numbers = search_space['numbers'] self.arch_parameters = nn.Parameter((0.001 * torch.randn(self.numbers, len(self.candidates)))) def generate_arch(self, actions): channels = [str(self.candidates[i]) for i in actions] return ':'.join(channels) def genotype(self): channels = [] for i in range(self.numbers): index = self.arch_parameters[i].argmax().item() channels.append(str(self.candidates[index])) return ':'.join(channels) def forward(self): alphas = nn.functional.softmax(self.arch_parameters, dim=(- 1)) return alphas
class ExponentialMovingAverage(object): 'Class that maintains an exponential moving average.' def __init__(self, momentum): self._numerator = 0 self._denominator = 0 self._momentum = momentum def update(self, value): self._numerator = ((self._momentum * self._numerator) + ((1 - self._momentum) * value)) self._denominator = ((self._momentum * self._denominator) + (1 - self._momentum)) def value(self): 'Return the current value of the moving average' return (self._numerator / self._denominator)
def select_action(policy): probs = policy() m = Categorical(probs) action = m.sample() return (m.log_prob(action), action.cpu().tolist())
def main(xargs, api, api_full): prepare_seed(xargs.rand_seed) logger = prepare_logger(args) search_space = get_search_spaces(xargs.search_space, 'nats-bench') if (xargs.search_space == 'tss'): policy = PolicyTopology(search_space) else: policy = PolicySize(search_space) optimizer = torch.optim.Adam(policy.parameters(), lr=xargs.learning_rate) eps = np.finfo(np.float32).eps.item() baseline = ExponentialMovingAverage(xargs.EMA_momentum) logger.log('policy : {:}'.format(policy)) logger.log('optimizer : {:}'.format(optimizer)) logger.log('eps : {:}'.format(eps)) logger.log('{:} use api : {:}'.format(time_string(), api)) api.reset_time() x_start_time = time.time() logger.log('Will start searching with time budget of {:} s.'.format(xargs.time_budget)) (total_steps, total_costs, trace) = (0, [], []) current_best_index = [] while ((len(total_costs) == 0) or (total_costs[(- 1)] < xargs.time_budget)): start_time = time.time() (log_prob, action) = select_action(policy) arch = policy.generate_arch(action) (reward, _, _, current_total_cost) = api.simulate_train_eval(arch, xargs.dataset, iepoch=11, hp='12') trace.append((reward, arch)) total_costs.append(current_total_cost) baseline.update(reward) policy_loss = ((- log_prob) * (reward - baseline.value())).sum() optimizer.zero_grad() policy_loss.backward() optimizer.step() total_steps += 1 logger.log('step [{:3d}] : average-reward={:.3f} : policy_loss={:.4f} : {:}'.format(total_steps, baseline.value(), policy_loss.item(), policy.genotype())) current_best_index.append(api.query_index_by_arch(max(trace, key=(lambda x: x[0]))[1])) best_arch = max(trace, key=(lambda x: x[0]))[1] logger.log('REINFORCE finish with {:} steps and {:.1f} s (real cost={:.3f}).'.format(total_steps, total_costs[(- 1)], (time.time() - x_start_time))) info = api_full.query_info_str_by_arch(best_arch, ('200' if (xargs.search_space == 'tss') else '90')) info_num = api_full.get_more_info(api.query_index_by_arch(best_arch), 'darcyflow', iepoch=None, hp='200') loss = info_num['valtest-loss'] logger.log('{:}'.format(info)) logger.log(('-' * 100)) logger.close() return (logger.log_dir, current_best_index, total_costs, loss)
class LpLoss(object): def __init__(self, d=2, p=2, size_average=True, reduction=True): super(LpLoss, self).__init__() assert ((d > 0) and (p > 0)) self.d = d self.p = p self.reduction = reduction self.size_average = size_average def abs(self, x, y): num_examples = x.size()[0] h = (1.0 / (x.size()[1] - 1.0)) all_norms = ((h ** (self.d / self.p)) * torch.norm((x.view(num_examples, (- 1)) - y.view(num_examples, (- 1))), self.p, 1)) if self.reduction: if self.size_average: return torch.mean(all_norms) else: return torch.sum(all_norms) return all_norms def rel(self, x, y): num_examples = x.size()[0] diff_norms = torch.norm((x.reshape(num_examples, (- 1)) - y.reshape(num_examples, (- 1))), self.p, 1) y_norms = torch.norm(y.reshape(num_examples, (- 1)), self.p, 1) if self.reduction: if self.size_average: return torch.mean((diff_norms / y_norms)) else: return torch.sum((diff_norms / y_norms)) return (diff_norms / y_norms) def __call__(self, x, y): return self.rel(x, y)
def _concat(xs): return torch.cat([x.view((- 1)) for x in xs])