code
stringlengths
17
6.64M
def checkpoint_name(checkpoint_dir, epoch='latest'): return os.path.join(checkpoint_dir, 'ckpt-{}.dat'.format(epoch))
class FileStorageObserverWithExUuid(FileStorageObserver): ' Wraps the FileStorageObserver so that we can pass in the Id.\n This allows us to save experiments into subdirectories with \n meaningful names. The standard FileStorageObserver jsut increments \n a counter.' UNUSED_VALUE = (- 1) def started_event(self, ex_info, command, host_info, start_time, config, meta_info, _id): _id = (config['uuid'] + '_metadata') super().started_event(ex_info, command, host_info, start_time, config, meta_info, _id=_id) def queued_event(self, ex_info, command, host_info, queue_time, config, meta_info, _id): assert ('uuid' in config), "The config must contain a key 'uuid'" _id = (config['uuid'] + '_metadata') super().queued_event(ex_info, command, host_info, queue_time, config, meta_info, _id=_id)
class VideoLogger(object): ' Logs a video to a file, frame-by-frame \n \n All frames must be the same height.\n \n Example:\n >>> logger = VideoLogger("output.mp4")\n >>> for i in range(30):\n >>> logger.log(color_transitions_(i, n_frames, width, height) )\n >>> del logger #or, just let the logger go out of scope\n ' def __init__(self, save_path, fps=30): fps = str(fps) self.writer = skvideo.io.FFmpegWriter(save_path, inputdict={'-r': fps}, outputdict={'-vcodec': 'libx264', '-r': fps}) self.f_open = False def log(self, frame): ' Adds a frame to the file\n Parameters:\n frame: A WxHxC numpy array (uint8). All frames must be the same height\n ' self.writer.writeFrame(frame) def close(self): try: self.writer.close() except AttributeError: pass def __del__(self): self.close()
def color_transitions_(i, k, width, height): x = np.linspace(0, 1.0, width) y = np.linspace(0, 1.0, height) bg = np.array(np.meshgrid(x, y)) bg = (((1.0 - (i / k)) * bg) + ((i / k) * (1 - bg))) r = ((np.ones_like(bg[0][(np.newaxis, ...)]) * i) / k) return np.uint8((np.rollaxis(np.concatenate([bg, r], axis=0), 0, 3) * 255))
class SensorPack(dict): ' Fun fact, you can slice using np.s_. E.g.\n sensors.at(np.s_[:2])\n ' def at(self, val): return SensorPack({k: v[val] for (k, v) in self.items()}) def apply(self, lambda_fn): return SensorPack({k: lambda_fn(k, v) for (k, v) in self.items()}) def size(self, idx, key=None): assert (idx == 0), 'can only get batch size for SensorPack' if (key is None): key = list(self.keys())[0] return self[key].size(idx)
def replay_logs(existing_log_paths, mlog): existing_results_path = combined_paths(existing_log_paths, 'result_log.pkl') save_training_logs(existing_results_path, mlog)
def move_metadata_file(old_log_dir, new_log_dir, uuid): fp_metadata_old = get_subdir(old_log_dir, 'metadata') fp_metadata_old = [fp for fp in fp_metadata_old if (uuid in fp)] if (len(fp_metadata_old) == 0): logger.info(f'No metadata for new experiment found at {old_log_dir} for {uuid}') else: fp_metadata_new = new_log_dir logger.info(f'Moving logs from {fp_metadata_old[0]} to {fp_metadata_new}') shutil.move(fp_metadata_old, fp_metadata_new)
def checkpoint_name(checkpoint_dir, epoch='latest'): return os.path.join(checkpoint_dir, 'ckpt-{}.dat'.format(epoch))
def get_parent_dirname(path): return os.path.basename(os.path.dirname(path))
def get_subdir(training_directory, subdir_name): "\n look through all files/directories in training_directory\n return all files/subdirectories whose basename have subdir_name\n if 0, return none\n if 1, return it\n if more, return list of them\n\n e.g. training_directory: '/path/to/exp'\n subdir_name: 'checkpoints' (directory)\n subdir_name: 'rewards' (files)\n " training_directory = training_directory.strip() subdirectories = os.listdir(training_directory) special_subdirs = [] for subdir in subdirectories: if (subdir_name in subdir): special_subdir = os.path.join(training_directory, subdir) special_subdirs.append(special_subdir) if (len(special_subdirs) == 0): return None elif (len(special_subdirs) == 1): return special_subdirs[0] return special_subdirs
def read_pkl(pkl_name): with open(pkl_name, 'rb') as f: data = pickle.load(f) return data
def unused_dir_name(output_dir): "\n Returns a unique (not taken) output_directory name with similar structure to existing one\n Specifically,\n if dir is not taken, return itself\n if dir is taken, return a new name where\n if dir = base + number, then newdir = base + {number+1}\n ow: newdir = base1\n e.g. if output_dir = '/eval/'\n if empty: return '/eval/'\n if '/eval/' exists: return '/eval1/'\n if '/eval/' and '/eval1/' exists, return '/eval2/'\n\n " existing_output_paths = [] if os.path.exists(output_dir): if (os.path.basename(output_dir) == ''): output_dir = os.path.dirname(output_dir) dirname = os.path.dirname(output_dir) base_name_prefix = re.sub('\\d+$', '', os.path.basename(output_dir)) existing_output_paths = get_subdir(dirname, base_name_prefix) assert (existing_output_paths is not None), f'Bug, cannot find output_dir {output_dir}' if (not isinstance(existing_output_paths, list)): existing_output_paths = [existing_output_paths] numbers = [get_number(os.path.basename(path)[(- 5):]) for path in existing_output_paths] eval_num = (max(max(numbers), 0) + 1) output_dir = os.path.join(dirname, f'{base_name_prefix}{eval_num}', '') print('New output dir', output_dir) return (output_dir, existing_output_paths)
def combined_paths(paths, name): '\n Runs get_subdir on every path in paths then flattens\n Finds all files/directories in all paths whose basename includes name\n Returns all these in a one-dimensional list\n ' ret_paths = [] for exp_path in paths: evals = get_subdir(exp_path, name) if (evals is None): continue if isinstance(evals, list): ret_paths.extend(evals) else: ret_paths.append(evals) return ret_paths
def read_logs(pkl_name): return read_pkl(pkl_name)['results'][0]
def save_training_logs(results_paths, mlog): "\n results_path is a list of experiment's result pkl file paths\n e.g. results_path = ['exp1/results_log.pkl', 'exp2/results_log.pkl']\n " step_num_set = set() for results_path in results_paths: print(f'logging {results_path}') try: results = read_logs(results_path) except Exception as e: print(f'Could not read {results_path}. could be empty', e) continue for result in results: i = result['step_num'] if (i in step_num_set): continue else: step_num_set.add(i) del result['step_num'] for (k, v) in result.items(): log(mlog, k, v, phase='train') reset_log(mlog, None, i, phase='train')
def save_testing_logs(eval_paths, mlog): "\n eval_paths is a list of eval runs path\n e.g. eval_paths = ['exp1/eval', 'exp1/eval1', 'exp2/eval']\n " data_all_epochs = [] seen_epochs = set() for eval_path in eval_paths: subdirectories = os.listdir(eval_path) for subdir in subdirectories: if ('rewards' in subdir): print(f'logging {eval_path}/{subdir}') epoch_num = get_number(subdir) if (epoch_num in seen_epochs): continue else: seen_epochs.add(epoch_num) rewards_pkl = os.path.join(eval_path, subdir) try: rewards_lst = read_logs(rewards_pkl) rewards = [r['reward'] for r in rewards_lst] lengths = [r['length'] for r in rewards_lst] except Exception as e: print(f'Could not read {rewards_pkl}', e) continue data_all_epochs.append((epoch_num, (rewards, lengths))) data_all_epochs = sorted(data_all_epochs, key=(lambda x: x[0])) for (epoch_num, (reward, length)) in data_all_epochs: reward = np.array(reward) avg_reward = np.mean(reward) length = np.array(length) avg_length = np.mean(length) print(f'logging epoch {epoch_num} with r={avg_reward} of length {avg_length}') log(mlog, 'rewards_all_epochs', avg_reward, phase='val') log(mlog, 'rewards_histogram', reward, phase='val') log(mlog, 'lengths_all_epochs', avg_length, phase='val') log(mlog, 'lengths_histogram', length, phase='val') reset_log(mlog, None, epoch_num, phase='val')
def save_train_testing(exp_paths, mlog): train_result_paths = combined_paths(exp_paths, 'result_log.pkl') save_training_logs(train_result_paths, mlog) eval_paths = combined_paths(exp_paths, 'eval') save_testing_logs(eval_paths, mlog)
class EpisodeTracker(object): '\n Provides a method for tracking important metrics with a simultaneous batch of episodes\n ' def __init__(self, n_to_track): self.episodes = [[] for _ in range(n_to_track)] def append(self, obs, actions): for (i, (o, a)) in enumerate(zip(obs['global_pos'], actions)): self.episodes[i].append((np.array(o), a)) def clear_episode(self, k): first_obs = self.episodes[k][(- 1)] self.episodes[k] = [first_obs]
def softmax_cross_entropy(inputs, target, weight=None, cache={}, size_average=None, ignore_index=(- 100), reduce=None, reduction='mean'): cache['predictions'] = inputs cache['labels'] = target if (len(target.shape) == 2): target = torch.argmax(target, dim=1) loss = F.cross_entropy(inputs, target, weight) return {'total': loss, 'xentropy': loss}
def heteroscedastic_normal(mean_and_scales, target, weight=None, cache={}, eps=0.01): (mu, scales) = mean_and_scales loss = ((((mu - target) ** 2) / ((scales ** 2) + eps)) + torch.log(((scales ** 2) + eps))) loss = ((torch.mean((weight * loss)) / weight.mean()) if (weight is not None) else loss.mean()) return {'total': loss, 'nll': loss}
def heteroscedastic_double_exponential(mean_and_scales, target, weight=None, cache={}, eps=0.05): (mu, scales) = mean_and_scales loss = ((torch.abs((mu - target)) / (scales + eps)) + torch.log((2.0 * (scales + eps)))) loss = ((torch.mean((weight * loss)) / weight.mean()) if (weight is not None) else loss.mean()) return {'total': loss, 'nll': loss}
def weighted_mse_loss(inputs, target, weight=None, cache={}): losses = {} cache['predictions'] = inputs cache['labels'] = target if (weight is not None): loss = (torch.mean((weight * ((inputs - target) ** 2))) / torch.mean(weight)) else: loss = F.mse_loss(inputs, target) return {'total': loss, 'mse': loss}
def weighted_l1_loss(inputs, target, weight=None, cache={}): target = target.float() if (weight is not None): loss = (torch.mean((weight * torch.abs((inputs - target)))) / torch.mean(weight)) else: loss = F.l1_loss(inputs, target) return {'total': loss, 'l1': loss}
def perceptual_l1_loss(decoder_path, bake_decodings): task = [t for t in SINGLE_IMAGE_TASKS if (t in decoder_path)][0] decoder = TaskonomyDecoder(TASKS_TO_CHANNELS[task], feed_forward=(task in FEED_FORWARD_TASKS)) checkpoint = torch.load(decoder_path) decoder.load_state_dict(checkpoint['state_dict']) decoder.cuda() decoder.eval() print(f'Loaded decoder from {decoder_path} for perceptual loss') def runner(inputs, target, weight=None, cache={}): inputs_decoded = decoder(inputs) targets_decoded = (target if bake_decodings else decoder(target)) cache['predictions'] = inputs_decoded cache['labels'] = targets_decoded if (weight is not None): loss = (torch.mean((weight * torch.abs((inputs_decoded - targets_decoded)))) / torch.mean(weight)) else: loss = F.l1_loss(inputs_decoded, targets_decoded) return {'total': loss, 'perceptual_l1': loss} return runner
def perceptual_l2_loss(decoder_path, bake_decodings): task = [t for t in SINGLE_IMAGE_TASKS if (t in decoder_path)][0] decoder = TaskonomyDecoder(TASKS_TO_CHANNELS[task], feed_forward=(task in FEED_FORWARD_TASKS)) checkpoint = torch.load(decoder_path) decoder.load_state_dict(checkpoint['state_dict']) decoder.cuda() decoder.eval() print(f'Loaded decoder from {decoder_path} for perceptual loss') def runner(inputs, target, weight=None, cache={}): inputs_decoded = decoder(inputs) targets_decoded = (target if bake_decodings else decoder(target)) cache['predictions'] = inputs_decoded cache['labels'] = targets_decoded if (weight is not None): loss = (torch.mean((weight * ((inputs_decoded - targets_decoded) ** 2))) / torch.mean(weight)) else: loss = F.mse_loss(inputs_decoded, targets_decoded) return {'total': loss, 'perceptual_mse': loss} return runner
def dense_softmax_cross_entropy_loss(inputs, targets, cache={}): (batch_size, _) = targets.shape losses = {} losses['final'] = (((- 1.0) * torch.sum((torch.softmax(targets.float(), dim=1) * F.log_softmax(inputs.float(), dim=1)))) / batch_size) losses['standard'] = losses['final'] return losses
def dense_cross_entropy_loss_(inputs, targets): (batch_size, _) = targets.shape return (((- 1.0) * torch.sum((targets * F.log_softmax(inputs, dim=1)))) / batch_size)
def dense_softmax_cross_entropy(inputs, targets, weight=None, cache={}): assert (weight is None) cache['predictions'] = inputs cache['labels'] = targets (batch_size, _) = targets.shape loss = (((- 1.0) * torch.sum((torch.softmax(targets.detach(), dim=1) * F.log_softmax(inputs, dim=1)))) / batch_size) return {'total': loss, 'xentropy': loss}
def dense_cross_entropy(inputs, targets, weight=None, cache={}): assert (weight == None) cache['predictions'] = inputs cache['labels'] = targets (batch_size, _) = targets.shape loss = (((- 1.0) * torch.sum((targets.detach() * F.log_softmax(inputs, dim=1)))) / batch_size) return {'total': loss, 'xentropy': loss}
def perceptual_cross_entropy_loss(decoder_path, bake_decodings): task = [t for t in SINGLE_IMAGE_TASKS if (t in decoder_path)][0] decoder = TaskonomyDecoder(TASKS_TO_CHANNELS[task], feed_forward=(task in FEED_FORWARD_TASKS)) checkpoint = torch.load(decoder_path) decoder.load_state_dict(checkpoint['state_dict']) decoder.cuda() decoder.eval() print(f'Loaded decoder from {decoder_path} for perceptual loss') def runner(inputs, target, weight=None, cache={}): inputs_decoded = decoder(inputs) targets_decoded = (target if bake_decodings else decoder(target)) cache['predictions'] = inputs_decoded cache['labels'] = targets_decoded return dense_softmax_cross_entropy_loss_(inputs_decoded, targets_decoded) return runner
def identity_regularizer(loss_fn, model): def runner(inputs, target, weight=None, cache={}): losses = loss_fn(inputs, target, weight, cache) return losses return runner
def transfer_regularizer(loss_fn, model, reg_loss_fn='F.l1_loss', coef=0.001): def runner(inputs, target, weight=None, cache={}): orig_losses = loss_fn(inputs, target, weight, cache) if (type(model).__name__ == 'PolicyWithBase'): assert (('base_encoding' in cache) and ('transfered_encoding' in cache)), f'cache is missing keys {cache.keys()}' regularization_loss = 0 for (base_encoding, transfered_encoding) in zip(cache['base_encoding'], cache['transfered_encoding']): regularization_loss += eval(reg_loss_fn)(model.base.perception_unit.sidetuner.net.transfer_network(base_encoding), transfered_encoding) else: assert isinstance(model.side_output, torch.Tensor), 'Cannot regularize side network if it is not used' regularization_loss = eval(reg_loss_fn)(model.transfer_network(model.base_encoding), model.transfered_encoding) orig_losses.update({'total': (orig_losses['total'] + (coef * regularization_loss)), 'weight_tying': regularization_loss}) return orig_losses return runner
def perceptual_regularizer(loss_fn, model, coef=0.001, decoder_path=None, use_transfer=True, reg_loss_fn='F.mse_loss'): assert (decoder_path is not None), 'Pass in a decoder to which to transform our parameters and regularize on' task = [t for t in SINGLE_IMAGE_TASKS if (t in decoder_path)][0] decoder = TaskonomyDecoder(TASKS_TO_CHANNELS[task], feed_forward=(task in FEED_FORWARD_TASKS)) checkpoint = torch.load(decoder_path) decoder.load_state_dict(checkpoint['state_dict']) decoder.cuda() decoder.eval() if (task in FEED_FORWARD_TASKS): reg_loss_fn = 'dense_softmax_cross_entropy_loss_' else: reg_loss_fn = 'F.l1_loss' print(f'Loaded decoder from {decoder_path} for perceptual loss') def runner(inputs, target, weight=None, cache={}): orig_losses = loss_fn(inputs, target, weight, cache) if (type(model).__name__ == 'PolicyWithBase'): assert ('base_encoding' in cache), f'cache is missing base {cache.keys()}' if use_transfer: assert ('transfered_encoding' in cache), f'cache is missing tied {cache.keys()}' tied_encodings = cache['transfered_encoding'] else: assert ('merged_encoding' in cache), f'cache is missing tied{cache.keys()}' tied_encodings = cache['merged_encoding'] regularization_loss = 0 for (base_encoding, tied_encoding) in zip(cache['base_encoding'], tied_encodings): regularization_loss += eval(reg_loss_fn)(decoder(base_encoding), decoder(tied_encoding)) else: assert isinstance(model.side_output, torch.Tensor), 'Cannot regularize side network if it is not used' if use_transfer: tied_encoding = model.transfered_encoding else: tied_encoding = model.merged_encoding losses['weight_tying'] = eval(reg_loss_fn)(decoder(model.base_encoding), decoder(tied_encoding)) regularization_loss = reg_loss_fn(decoder(model.base_encoding), decoder(tied_encoding)) orig_losses.update({'total': (orig_losses['total'] + (coef * regularization_loss)), 'weight_tying': regularization_loss}) return orig_losses return runner
def cfg_to_md(cfg, uuid): ' Because tensorboard uses markdown' return (((uuid + '\n\n ') + pprint.pformat(cfg).replace('\n', ' \n').replace("\n '", "\n '")) + '')
def count_trainable_parameters(model): return sum((p.numel() for p in model.parameters() if p.requires_grad))
def count_total_parameters(model): return sum((p.numel() for p in model.parameters()))
def is_interactive(): try: ip = get_ipython() return ip.has_trait('kernel') except: return False
def is_cuda(model): return next(model.parameters()).is_cuda
class Bunch(object): def __init__(self, adict): self.__dict__.update(adict) (self._keys, self._vals) = zip(*adict.items()) (self._keys, self._vals) = (list(self._keys), list(self._vals)) def keys(self): return self._keys def vals(self): return self._vals
def compute_weight_norm(parameters): ' no grads! ' total = 0.0 count = 0 for p in parameters: total += torch.sum((p.data ** 2)) count += p.numel() return (total / count)
def get_number(name): '\n use regex to get the first integer in the name\n if none exists, return -1\n ' try: num = int(re.findall('[0-9]+', name)[0]) except: num = (- 1) return num
def append_dict(d, u, stop_recurse_keys=[]): for (k, v) in u.items(): if (isinstance(v, collections.Mapping) and (k not in stop_recurse_keys)): d[k] = append_dict(d.get(k, {}), v, stop_recurse_keys=stop_recurse_keys) else: if (k not in d): d[k] = [] d[k].append(v) return d
def update_dict_deepcopy(d, u): for (k, v) in u.items(): if isinstance(v, collections.Mapping): d[k] = update_dict_deepcopy(d.get(k, {}), v) else: d[k] = v return d
def eval_dict_values(d): for k in d.keys(): if isinstance(d[k], collections.Mapping): d[k] = eval_dict_values(d[k]) elif isinstance(d[k], str): d[k] = eval(d[k].replace('---', "'")) return d
def search_and_replace_dict(model_kwargs, task_initial): for (k, v) in model_kwargs.items(): if isinstance(v, collections.Mapping): search_and_replace_dict(v, task_initial) elif (isinstance(v, str) and ('encoder' in v) and (task_initial not in v)): new_pth = v.replace('curvature', task_initial) warnings.warn(f'BE CAREFUL - CHANGING ENCODER PATH: {v} is being replaced for {new_pth}') model_kwargs[k] = new_pth return
class _CustomDataParallel(nn.Module): def __init__(self, model, device_ids): super(_CustomDataParallel, self).__init__() self.model = nn.DataParallel(model, device_ids=device_ids) self.model.to(device) num_devices = (torch.cuda.device_count() if (device_ids is None) else len(device_ids)) print(f'{type(model)} using {num_devices} GPUs!') def forward(self, *input, **kwargs): return self.model(*input, **kwargs) def __getattr__(self, name): try: return super().__getattr__(name) except AttributeError: return getattr(self.model.module, name)
class Profiler(object): def __init__(self, name, logger=None, level=logging.INFO): self.name = name self.logger = logger self.level = level def step(self, name): ' Returns the duration and stepname since last step/start ' duration = self.summarize_step(start=self.step_start, step_name=name, level=self.level) now = time.time() self.step_start = now return duration def __enter__(self): self.start = time.time() self.step_start = time.time() return self def __exit__(self, exception_type, exception_value, traceback): self.summarize_step(self.start, step_name='complete') def summarize_step(self, start, step_name='', level=None): duration = (time.time() - start) step_semicolon = (':' if step_name else '') if self.logger: level = (level or self.level) self.logger.log(self.level, '{name}{step}: {secs} seconds'.format(name=self.name, step=(step_semicolon + step_name), secs=duration)) else: print('{name}{step}: {secs} seconds'.format(name=self.name, step=(step_semicolon + step_name), secs=duration)) return duration
class RAdam(Optimizer): def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False): if amsgrad: warnings.warn('amsgrad is not used') defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) self.buffer = [[None, None, None] for ind in range(10)] super(RAdam, self).__init__(params, defaults) def __setstate__(self, state): super(RAdam, self).__setstate__(state) def step(self, closure=None): loss = None if (closure is not None): loss = closure() for group in self.param_groups: for p in group['params']: if (p.grad is None): continue grad = p.grad.data.float() if grad.is_sparse: raise RuntimeError('RAdam does not support sparse gradients') p_data_fp32 = p.data.float() state = self.state[p] if (len(state) == 0): state['step'] = 0 state['exp_avg'] = torch.zeros_like(p_data_fp32) state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) else: state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) (exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq']) (beta1, beta2) = group['betas'] exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad) exp_avg.mul_(beta1).add_((1 - beta1), grad) state['step'] += 1 buffered = self.buffer[int((state['step'] % 10))] if (state['step'] == buffered[0]): (N_sma, step_size) = (buffered[1], buffered[2]) else: buffered[0] = state['step'] beta2_t = (beta2 ** state['step']) N_sma_max = ((2 / (1 - beta2)) - 1) N_sma = (N_sma_max - (((2 * state['step']) * beta2_t) / (1 - beta2_t))) buffered[1] = N_sma if (N_sma >= 5): step_size = ((group['lr'] * math.sqrt((((((((1 - beta2_t) * (N_sma - 4)) / (N_sma_max - 4)) * (N_sma - 2)) / N_sma) * N_sma_max) / (N_sma_max - 2)))) / (1 - (beta1 ** state['step']))) else: step_size = (group['lr'] / (1 - (beta1 ** state['step']))) buffered[2] = step_size if (group['weight_decay'] != 0): p_data_fp32.add_(((- group['weight_decay']) * group['lr']), p_data_fp32) if (N_sma >= 5): denom = exp_avg_sq.sqrt().add_(group['eps']) p_data_fp32.addcdiv_((- step_size), exp_avg, denom) else: p_data_fp32.add_((- step_size), exp_avg) p.data.copy_(p_data_fp32) return loss
class PlainRAdam(Optimizer): def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0): defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) super(PlainRAdam, self).__init__(params, defaults) def __setstate__(self, state): super(PlainRAdam, self).__setstate__(state) def step(self, closure=None): loss = None if (closure is not None): loss = closure() for group in self.param_groups: for p in group['params']: if (p.grad is None): continue grad = p.grad.data.float() if grad.is_sparse: raise RuntimeError('RAdam does not support sparse gradients') p_data_fp32 = p.data.float() state = self.state[p] if (len(state) == 0): state['step'] = 0 state['exp_avg'] = torch.zeros_like(p_data_fp32) state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) else: state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) (exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq']) (beta1, beta2) = group['betas'] exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad) exp_avg.mul_(beta1).add_((1 - beta1), grad) state['step'] += 1 beta2_t = (beta2 ** state['step']) N_sma_max = ((2 / (1 - beta2)) - 1) N_sma = (N_sma_max - (((2 * state['step']) * beta2_t) / (1 - beta2_t))) if (group['weight_decay'] != 0): p_data_fp32.add_(((- group['weight_decay']) * group['lr']), p_data_fp32) if (N_sma >= 5): step_size = ((group['lr'] * math.sqrt((((((((1 - beta2_t) * (N_sma - 4)) / (N_sma_max - 4)) * (N_sma - 2)) / N_sma) * N_sma_max) / (N_sma_max - 2)))) / (1 - (beta1 ** state['step']))) denom = exp_avg_sq.sqrt().add_(group['eps']) p_data_fp32.addcdiv_((- step_size), exp_avg, denom) else: step_size = (group['lr'] / (1 - (beta1 ** state['step']))) p_data_fp32.add_((- step_size), exp_avg) p.data.copy_(p_data_fp32) return loss
class AdamW(Optimizer): def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, warmup=0): defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, warmup=warmup) super(AdamW, self).__init__(params, defaults) def __setstate__(self, state): super(AdamW, self).__setstate__(state) def step(self, closure=None): loss = None if (closure is not None): loss = closure() for group in self.param_groups: for p in group['params']: if (p.grad is None): continue grad = p.grad.data.float() if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') p_data_fp32 = p.data.float() state = self.state[p] if (len(state) == 0): state['step'] = 0 state['exp_avg'] = torch.zeros_like(p_data_fp32) state['exp_avg_sq'] = torch.zeros_like(p_data_fp32) else: state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32) state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32) (exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq']) (beta1, beta2) = group['betas'] state['step'] += 1 exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad) exp_avg.mul_(beta1).add_((1 - beta1), grad) denom = exp_avg_sq.sqrt().add_(group['eps']) bias_correction1 = (1 - (beta1 ** state['step'])) bias_correction2 = (1 - (beta2 ** state['step'])) if (group['warmup'] > state['step']): scheduled_lr = (1e-08 + ((state['step'] * group['lr']) / group['warmup'])) else: scheduled_lr = group['lr'] step_size = ((group['lr'] * math.sqrt(bias_correction2)) / bias_correction1) if (group['weight_decay'] != 0): p_data_fp32.add_(((- group['weight_decay']) * scheduled_lr), p_data_fp32) p_data_fp32.addcdiv_((- step_size), exp_avg, denom) p.data.copy_(p_data_fp32) return loss
def set_seed(seed): torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) random.seed(seed)
def compute_optimal_imgs(img_paths, use_pool=False): (median_time, mean_time, pil_time) = (0, 0, 0) img_paths = [path for path in img_paths if ('.png' in path)] mean_meter = ValueSummaryMeter() median_meter = MedianImageMeter(bit_depth=8, im_shape=(256, 256, 3), device='cuda') p = Pool(6) for img_paths_chunk in tqdm(chunk(img_paths, 64)): t0 = time.time() if use_pool: imgs = p.map(Image.open, img_paths_chunk) else: imgs = [Image.open(img_path) for img_path in img_paths_chunk] t1 = time.time() for img in imgs: median_meter.add(pil_to_np(img)) t2 = time.time() for img in imgs: mean_meter.add(pil_to_np(img).astype(np.float32)) img.close() t3 = time.time() median_time += (t2 - t1) mean_time += (t3 - t2) pil_time += (t1 - t0) p.close() print('median', median_time, 'mean', mean_time, 'pil', pil_time) return (np_to_pil(mean_meter.value()[0]), np_to_pil(median_meter.value()))
class SimpleRLEnv(habitat.RLEnv): def get_reward_range(self): return [(- 1), 1] def get_reward(self, observations): return 0 def get_done(self, observations): return self.habitat_env.episode_over def get_info(self, observations): return self.habitat_env.get_metrics()
def draw_top_down_map(info, heading, output_size): top_down_map = maps.colorize_topdown_map(info['top_down_map']['map']) original_map_size = top_down_map.shape[:2] map_scale = np.array((1, ((original_map_size[1] * 1.0) / original_map_size[0]))) new_map_size = np.round((output_size * map_scale)).astype(np.int32) top_down_map = cv2.resize(top_down_map, (new_map_size[1], new_map_size[0])) map_agent_pos = info['top_down_map']['agent_map_coord'] map_agent_pos = np.round(((map_agent_pos * new_map_size) / original_map_size)).astype(np.int32) top_down_map = maps.draw_agent(top_down_map, map_agent_pos, (heading - (np.pi / 2)), agent_radius_px=(top_down_map.shape[0] / 40)) return top_down_map
def get_logger(cfg, uuid): if (cfg['saving']['logging_type'] == 'visdom'): mlog = tnt.logger.VisdomMeterLogger(title=uuid, env=uuid, server=cfg['saving']['visdom_server'], port=cfg['saving']['visdom_port'], log_to_filename=cfg['saving']['visdom_log_file']) elif (cfg['saving']['logging_type'] == 'tensorboard'): mlog = tnt.logger.TensorboardMeterLogger(env=uuid, log_dir=cfg['saving']['log_dir'], plotstylecombined=True, train_only=cfg['training']['train_only']) else: assert False, 'no proper logger!' return mlog
def maybe_bake_decodings(cfg, logger): task = cfg['training']['taskonomy_encoder'] need_encodings = (cfg['training']['baked_encoding'] and (not os.path.isdir(os.path.join(cfg['training']['data_dir'], f'{task}_encoding')))) need_decodings = (cfg['training']['baked_decoding'] and (not os.path.isdir(os.path.join(cfg['training']['data_dir'], f'{task}_decoding')))) split_to_use = eval(cfg['training']['split_to_use']) folders_to_convert = set(((split_to_use['train'] + split_to_use['val']) + split_to_use['test'])) if (not (need_decodings or need_encodings or need_to_save(task=task, folders_to_convert=folders_to_convert, data_dir=cfg['training']['data_dir'], save_dir=cfg['training']['data_dir'], store_representation=cfg['training']['baked_encoding'], store_prediction=cfg['training']['baked_decoding']))): return logger.info(f'Requiring at least one of baked encodings ({need_encodings}) or decodings ({need_decodings}). Baking...') from tlkit.get_reprs import save_reprs as base_decodings base_decodings(cfg['training']['taskonomy_encoder'], model_base_path=cfg['training']['encoding_base_path'], folders_to_convert=folders_to_convert, split_to_convert=None, data_dir=cfg['training']['data_dir'], save_dir=cfg['training']['data_dir'], store_representation=cfg['training']['baked_encoding'], store_prediction=cfg['training']['baked_decoding'], n_dataloader_workers=cfg['training']['num_workers'], batch_size=cfg['training']['batch_size_val'], skip_done_folders=True)
@ex.main def train(cfg, uuid): logger.setLevel(logging.INFO) logger.info(cfg) logger.debug(f'Loaded Torch version: {torch.__version__}') logger.debug(f'Using device: {device}') task = cfg['training']['taskonomy_encoder'] start_epoch = 0 logger.debug(f'Starting data loaders') maybe_bake_decodings(cfg, logger) set_seed(cfg['training']['seed']) data_subfolders = ['rgb'] if cfg['training']['baked_encoding']: data_subfolders.append(f'{task}_encoding') if cfg['training']['baked_decoding']: data_subfolders.append(f'{task}_decoding') dataloaders = get_dataloaders(cfg['training']['data_dir'], data_subfolders, batch_size=cfg['training']['batch_size'], batch_size_val=cfg['training']['batch_size_val'], zip_file_name=False, train_folders=eval(cfg['training']['split_to_use'])['train'], val_folders=eval(cfg['training']['split_to_use'])['val'], test_folders=eval(cfg['training']['split_to_use'])['test'], num_workers=cfg['training']['num_workers'], load_to_mem=cfg['training']['load_to_mem'], pin_memory=cfg['training']['pin_memory']) logger.debug(f'Setting up student model') set_seed(cfg['training']['seed']) student = eval(cfg['learner']['model'])(**cfg['learner']['model_kwargs']) if (cfg['training']['resume_from_checkpoint_path'] is not None): ckpt_fpath = cfg['training']['resume_from_checkpoint_path'] checkpoint = torch.load(ckpt_fpath) student.load_state_dict(checkpoint['state_dict']) start_epoch = (checkpoint['epoch'] if ('epoch' in checkpoint) else 0) logger.info(f"Loaded student (epoch {(start_epoch if ('epoch' in checkpoint) else 'unknown')}) from {ckpt_fpath}") student.to(device) logger.debug(f'Setting up teacher') set_seed(cfg['training']['seed']) out_channels = (TASKS_TO_CHANNELS[task] if (task in TASKS_TO_CHANNELS) else None) teacher = TaskonomyNetwork(out_channels=out_channels) if cfg['training']['baked_encoding']: teacher.encoder = None else: teacher.load_encoder(os.path.join(cfg['training']['encoding_base_path'], f'{task}_encoder.dat')) if (cfg['training']['baked_decoding'] and (not (cfg['training']['loss_type'].upper() == 'PERCEPTION'))): teacher.decoder = None else: assert (out_channels is not None), f'Decoder needed for config, but unknown decoder format for task {task}.' teacher.load_decoder(os.path.join(cfg['training']['encoding_base_path'], f'{task}_decoder.dat')) teacher.eval() teacher.to(device) if (torch.cuda.device_count() > 1): logger.info(f'Using {torch.cuda.device_count()} GPUs!') teacher.encoder = torch.nn.DataParallel(teacher.encoder) teacher.decoder = torch.nn.DataParallel(teacher.decoder) student = torch.nn.DataParallel(student) if (cfg['training']['loss_fn'] == 'L2'): loss_fn = nn.MSELoss() elif (cfg['training']['loss_fn'] == 'L1'): loss_fn = nn.L1Loss() else: logger.warning('Using default L2/MSE loss') loss_fn = nn.MSELoss() flog = tnt.logger.FileLogger(cfg['saving']['results_log_file'], overwrite=True) mlog = get_logger(cfg, uuid) mlog.add_meter('decoded_image', tnt.meter.ValueSummaryMeter(), ptype='image') if cfg['training']['train']: mlog.add_meter('teacher_histogram', tnt.meter.ValueSummaryMeter(), ptype='histogram') mlog.add_meter('student_histogram', tnt.meter.ValueSummaryMeter(), ptype='histogram') mlog.add_meter('loss', tnt.meter.ValueSummaryMeter()) mlog.add_meter('image', tnt.meter.ValueSummaryMeter(), ptype='image') optimizer = optim.Adam(student.parameters(), lr=cfg['learner']['lr']) scheduler = None if (cfg['learner']['lr_scheduler_method'] is not None): scheduler = eval(cfg['learner']['lr_scheduler_method'])(optimizer, **cfg['learner']['lr_scheduler_method_kwargs']) logger.info('Starting training...') train_model(cfg, student, teacher, dataloaders, loss_fn, optimizer, start_epoch=start_epoch, num_epochs=cfg['training']['num_epochs'], save_epochs=cfg['saving']['save_interval'], scheduler=scheduler, mlog=mlog, flog=flog) if cfg['training']['test']: NotImplementedError()
def train_model(cfg, student, teacher, dataloaders, loss_fn, optimizer, start_epoch=0, num_epochs=250, save_epochs=25, scheduler=None, mlog=None, flog=None): checkpoint_dir = os.path.join(cfg['saving']['log_dir'], cfg['saving']['save_dir']) run_kwargs = {'baked_encoding': cfg['training']['baked_encoding'], 'baked_decoding': cfg['training']['baked_decoding'], 'mlog': mlog, 'flog': flog, 'optimizer': optimizer, 'loss_type': cfg['training']['loss_type'], 'loss_fn': loss_fn, 'student': student, 'teacher': teacher, 'decoder': teacher.decoder, 'cfg': cfg} loss = 99999 for epoch in range(start_epoch, (start_epoch + num_epochs)): if (((epoch % save_epochs) == (save_epochs - 1)) or (epoch == 0)): checkpoints.save_checkpoint({'state_dict': student.state_dict(), 'epoch': epoch}, directory=checkpoint_dir, step_num=epoch) _ = run_one_epoch(dataloader=dataloaders['train'], epoch=epoch, train=True, **run_kwargs) if (scheduler is not None): try: scheduler.step(loss) except: scheduler.step() loss = run_one_epoch(dataloader=dataloaders['val'], epoch=epoch, train=False, **run_kwargs) checkpoints.save_checkpoint({'state_dict': student.state_dict(), 'epoch': epoch}, directory=checkpoint_dir, step_num=epoch) return student
def run_one_epoch(student, teacher, decoder, dataloader, loss_fn, loss_type, optimizer, epoch, baked_encoding, baked_decoding, mlog, flog, train, cfg): student.train(train) phase = ('train' if train else 'val') with torch.set_grad_enabled(train), Profiler(f"Epoch {epoch} ({('train' if train else 'val')})", logger) as prof: running_loss = 0.0 for batch_tuple in tqdm(dataloader): batch_tuple = [x.to(device, non_blocking=True) for x in batch_tuple] x = batch_tuple[0] if baked_encoding: encoding_label = batch_tuple[1] if baked_decoding: decoding_label = batch_tuple[(- 1)] if train: optimizer.zero_grad() student_encoding = student(x) if (loss_type.upper() == 'PERCEPTION'): if (not baked_decoding): encoding_label = teacher.encoder(x) if (not baked_decoding): with torch.no_grad(): decoding_label = decoder(encoding_label) (prediction, label) = (decoder(student_encoding), decoding_label) else: (prediction, label) = (student_encoding, encoding_label) loss = loss_fn(prediction, label) if train: loss.backward() if (cfg['learner']['max_grad_norm'] is not None): torch.nn.utils.clip_grad_norm_(student.parameters(), cfg['learner']['max_grad_norm']) optimizer.step() mlog.update_meter(loss.detach().item(), meters={'loss'}, phase=phase) if (decoder is not None): if ((len(label.shape) == 4) and (label.shape[1] == 2)): zeros = torch.zeros(label.shape[0], 1, label.shape[2], label.shape[3]).to(label.device) label = torch.cat([label, zeros], dim=1) prediction = torch.cat([prediction, zeros], dim=1) im_samples = torch.cat([x, prediction.expand_as(x), label.expand_as(x)], dim=3) im_samples = tvutils.make_grid(im_samples.detach().cpu(), nrow=1, padding=2) log(mlog, f'decoded_image', im_samples, phase=phase) logs = mlog.peek_meter(phase=phase) logger.info((phase + ' loss: {0:.6f}'.format(logs['loss'].item()))) tlkit.utils.log(mlog, f'image', var_to_numpy(x[0]), phase=phase) tlkit.utils.reset_log(mlog, flog=flog, epoch=epoch, phase=phase) return logs['loss'].item()
@ex.config def cfg_base(): uuid = 'basic' cfg = {} cfg['learner'] = {'model': 'atari_residual', 'model_kwargs': {}, 'eps': 1e-05, 'lr': 0.001, 'lr_scheduler_method': None, 'lr_scheduler_method_kwargs': {}, 'max_grad_norm': 1, 'test': False, 'scheduler': 'plateau'} cfg['training'] = {'baked_encoding': False, 'baked_decoding': True, 'batch_size': 64, 'batch_size_val': 64, 'cuda': True, 'data_dir': '/mnt/hdd2/taskonomy_reps', 'epochs': 100, 'encoding_base_path': '/root/tlkit/taskonomy/taskbank/pytorch', 'loss_fn': 'L1', 'loss_type': 'perception', 'load_to_mem': False, 'num_workers': 8, 'num_epochs': 1, 'pin_memory': True, 'resume_from_checkpoint_path': None, 'seed': random.randint(0, 1000), 'split_to_use': 'splits.taskonomy_no_midlevel["debug"]', 'taskonomy_encoder': 'autoencoding', 'train': True, 'train_only': False, 'test': False} cfg['saving'] = {'log_dir': LOG_DIR, 'log_interval': 1, 'logging_type': 'tensorboard', 'results_log_file': os.path.join(LOG_DIR, 'result_log.pkl'), 'reward_log_file': os.path.join(LOG_DIR, 'rewards.pkl'), 'save_interval': 1, 'save_dir': 'checkpoints', 'visdom_log_file': os.path.join(LOG_DIR, 'visdom_logs.json'), 'visdom_server': 'r2d2.eecs.berkeley.edu', 'visdom_port': '8097'}
@ex.named_config def model_fcn5(): cfg = {'learner': {'model': 'FCN5Residual', 'model_kwargs': {'num_groups': 2, 'use_residual': False, 'normalize_output': False}}}
@ex.named_config def model_fcn5_residual(): cfg = {'learner': {'model': 'FCN5Residual', 'model_kwargs': {'num_groups': 2, 'use_residual': True, 'normalize_output': False}}}
@ex.named_config def model_fcn3(): cfg = {'learner': {'model': 'FCN3', 'model_kwargs': {'num_groups': 2, 'normalize_output': False}}}
@ex.named_config def student_taskonomy_encoder_penultimate(): cfg = {'learner': {'model': 'TaskonomyEncoder', 'model_kwargs': {'train': True, 'eval_only': False}}}
@ex.named_config def student_taskonomy_encoder(): cfg = {'learner': {'model': 'TaskonomyEncoder', 'model_kwargs': {'train_penultimate': True, 'eval_only': False}}}
@ex.named_config def scheduler_reduce_on_plateau(): cfg = {'learner': {'lr_scheduler_method': 'lr_scheduler.ReduceLROnPlateau', 'lr_scheduler_method_kwargs': {'factor': 0.1, 'patience': 5}}}
@ex.named_config def scheduler_step_lr(): cfg = {'learner': {'lr_scheduler_method': 'lr_scheduler.StepLR', 'lr_scheduler_method_kwargs': {'lr_decay_epochs': 30, 'gamma': 0.1}}}
@ex.named_config def cfg_eval(): uuid = 'eval' cfg = {} cfg['learner'] = {'model': 'FCN5', 'test': True} cfg['training'] = {'train': False}
def save_as_png(file_path, decoding): decoding = ((0.5 * decoding) + 0.5) decoding *= ((2 ** 16) - 1) decoding = decoding.astype(np.uint16) if (decoding.shape[0] == 2): zeros = np.zeros((1, decoding.shape[1], decoding.shape[2]), dtype=np.uint16) decoding = np.vstack((decoding, zeros)) decoding = np.transpose(decoding, (1, 2, 0)) if (decoding.shape[2] > 1): cv2.imwrite(file_path, cv2.cvtColor(decoding, cv2.COLOR_RGB2BGR)) else: cv2.imwrite(file_path, decoding) return
def save_to_file(arr, original_image_fname, new_root, subfolder, filetype='.npy'): abspath = os.path.abspath(original_image_fname) base_name = os.path.basename(abspath).replace('.png', filetype) parent_name = get_parent_dirname(abspath) file_path = os.path.join(new_root, subfolder, parent_name, base_name) os.makedirs(os.path.join(new_root, subfolder, parent_name), exist_ok=True) if (filetype == '.npy'): np.save(file_path, arr) elif (filetype == '.npz'): np.savez_compressed(file_path, arr) elif (filetype == '.png'): save_as_png(file_path, arr) else: raise NotImplementedError('Cannot save {}. Unrecognized filetype {}.'.format(file_path, filetype))
def save_mappable(x): return save_to_file(*x)
def remove_done_folders(task, folders_to_convert, data_dir, save_dir, store_prediction, store_representation): rgb_dir = os.path.join(data_dir, 'rgb') encoding_dir = os.path.join(save_dir, f'{task}_encoding') decoding_dir = os.path.join(save_dir, f'{task}_decoding') folders_to_use = set() for folder in folders_to_convert: rgb_folder = os.path.join(rgb_dir, folder) decoding_folder = os.path.join(decoding_dir, folder) encoding_folder = os.path.join(encoding_dir, folder) if (not os.path.exists(rgb_folder)): print(f'Skipping {folder} because no rgb folder (but is that true? This is probably caused by a bug somewhere)') continue if (store_representation and (not os.path.exists(encoding_folder))): folders_to_use.add(folder) elif (store_representation and (len(os.listdir(encoding_folder)) != len(os.listdir(rgb_folder)))): folders_to_use.add(folder) if (store_prediction and (not os.path.exists(decoding_folder))): folders_to_use.add(folder) elif (store_prediction and (len(os.listdir(decoding_folder)) != len(os.listdir(rgb_folder)))): folders_to_use.add(folder) return list(folders_to_use)
def need_to_save(task, folders_to_convert, data_dir, save_dir, store_prediction, store_representation): folders_to_convert = remove_done_folders(task, folders_to_convert, data_dir, save_dir, store_prediction, store_representation) return (len(folders_to_convert) != 0)
def save_reprs(task, model_base_path, folders_to_convert, split_to_convert, data_dir, save_dir, store_representation=True, store_prediction=True, n_dataloader_workers=4, batch_size=64, skip_done_folders=True): logger.info(f'Setting up model of {task} with {model_base_path}') out_channels = (TASKS_TO_CHANNELS[task] if (task in TASKS_TO_CHANNELS) else None) feed_forward = (task in FEED_FORWARD_TASKS) model = TaskonomyNetwork(out_channels=out_channels, feed_forward=feed_forward) model.load_encoder(os.path.join(model_base_path, f'{task}_encoder.dat')) if store_prediction: if (out_channels is None): NotImplementedError(f'Unknown decoder format for task {task}') model.load_decoder(os.path.join(model_base_path, f'{task}_decoder.dat')) if (torch.cuda.device_count() > 1): logger.info(f'Using {torch.cuda.device_count()} GPUs!') model.encoder = torch.nn.DataParallel(model.encoder) model.decoder = (torch.nn.DataParallel(model.decoder) if store_prediction else model.decoder) model.eval() model.to(device) if ((folders_to_convert is None) and (split_to_convert is not None)): split_to_convert = eval(split_to_convert) logger.info(f'Converting from split {split_to_convert}') folders_to_convert = sorted(list(set(((split_to_convert['train'] + split_to_convert['val']) + split_to_convert['test'])))) assert (folders_to_convert is not None), 'No folders to convert. Aborting' if skip_done_folders: folders_to_convert = remove_done_folders(task, folders_to_convert, data_dir, save_dir, store_prediction, store_representation) logger.info(f'Converting folders {str(folders_to_convert)}') if (task not in SINGLE_IMAGE_TASKS): raise NotImplementedError(f'Distillation is currently implemented only for single-image-input tasks.') dataloader = get_dataloaders(data_path=data_dir, tasks='rgb', batch_size=batch_size, batch_size_val=batch_size, num_workers=n_dataloader_workers, train_folders=None, val_folders=folders_to_convert, test_folders=None, zip_file_name=True)['val'] pred_format = ('.npy' if feed_forward else '.png') pool = Pool(n_dataloader_workers) for (fpaths, x) in tqdm(dataloader): dirname = get_parent_dirname(fpaths[0]) x = x.to(device) with torch.no_grad(): encodings = model.encoder(x) if store_representation: encodings_np = encodings.cpu().detach().numpy() pool.map(save_mappable, zip(encodings_np, fpaths, ([save_dir] * batch_size), ([f'{task}_encoding'] * batch_size), (['.npy'] * batch_size))) if store_prediction: decodings = model.decoder(encodings) decodings_np = decodings.cpu().detach().numpy() pool.map(save_mappable, zip(decodings_np, fpaths, ([save_dir] * batch_size), ([f'{task}_decoding'] * batch_size), ([pred_format] * batch_size)))
@ex.main def run_cfg(cfg): save_reprs(task=cfg['task'], model_base_path=cfg['model_base_path'], folders_to_convert=cfg['folders_to_convert'], split_to_convert=cfg['split_to_convert'], data_dir=cfg['data_dir'], save_dir=cfg['save_dir'], store_representation=cfg['store_representation'], store_prediction=cfg['store_prediction'], n_dataloader_workers=cfg['n_dataloader_workers'], batch_size=cfg['batch_size'])
@ex.config def cfg_base(): task = 'autoencoding' model_base_path = '/mnt/models/' store_representation = True store_prediction = True folders_to_convert = None split_to_convert = None batch_size = 64 n_dataloader_workers = 8 data_dir = '/mnt/data' save_dir = '/mnt/data'
@ex.named_config def cfg_docker(): cfg = {'task': 'keypoints3d', 'model_base_path': '/mnt/models/', 'store_representation': False, 'store_prediction': True, 'split_to_convert': 'splits.taskonomy_no_midlevel["fullplus"]', 'data_dir': '/mnt/data', 'save_dir': '/mnt/data', 'folders_to_convert': None, 'batch_size': 64, 'n_dataloader_workers': 8}
def save_as_png(file_path, decoding): decoding = ((0.5 * decoding) + 0.5) decoding *= ((2 ** 16) - 1) decoding = decoding.astype(np.uint16) decoding = np.transpose(decoding, (1, 2, 0)) if (decoding.shape[2] > 1): cv2.imwrite(file_path, cv2.cvtColor(decoding, cv2.COLOR_RGB2BGR)) else: cv2.imwrite(file_path, decoding.astype(np.uint8)) return
def save_to_file(arr, original_image_fname, new_root, subfolder, filetype='.npy'): abspath = os.path.abspath(original_image_fname) base_name = os.path.basename(abspath).replace('.png', filetype) parent_name = get_parent_dirname(abspath).replace(SOURCE_TASK, 'mask_valid') file_path = os.path.join(new_root, subfolder, parent_name, base_name) os.makedirs(os.path.join(new_root, subfolder, parent_name), exist_ok=True) if (filetype == '.npy'): np.save(file_path, arr) elif (filetype == '.npz'): np.savez_compressed(file_path, arr) elif (filetype == '.png'): cv2.imwrite(file_path, np.uint8(arr[0])) else: raise NotImplementedError('Cannot save {}. Unrecognized filetype {}.'.format(file_path, filetype))
def save_mappable(x): return save_to_file(*x)
def build_mask(target, val=65000): mask = (target >= val) mask = (F.max_pool2d(mask.float(), 5, padding=2, stride=2) == 0) return (mask * 255)
@ex.main def make_mask(folders_to_convert, split_to_convert, data_dir, save_dir, n_dataloader_workers=4, batch_size=64): if ((folders_to_convert is None) and (split_to_convert is not None)): split_to_convert = eval(split_to_convert) logger.info(f'Converting from split {split_to_convert}') folders_to_convert = sorted(list(set(((split_to_convert['train'] + split_to_convert['val']) + split_to_convert['test'])))) if (folders_to_convert is None): logger.info(f'Converting all folders in {data_dir}') else: logger.info(f'Converting folders {str(folders_to_convert)}') dataloader = get_dataloaders(data_path=data_dir, tasks=SOURCE_TASK, batch_size=batch_size, batch_size_val=batch_size, num_workers=n_dataloader_workers, train_folders=None, val_folders=folders_to_convert, test_folders=None, zip_file_name=True, transform=transforms.Compose([transforms.ToTensor()]))['val'] pool = Pool(n_dataloader_workers) for (fpaths, x) in tqdm(dataloader): dirname = get_parent_dirname(fpaths[0]) with torch.no_grad(): x = build_mask(x) pool.map(save_mappable, zip(x, fpaths, ([save_dir] * batch_size), (['mask_valid'] * batch_size), (['.png'] * batch_size)))
@ex.config def cfg_base(): folders_to_convert = None split_to_convert = None batch_size = 64 n_dataloader_workers = 8 data_dir = '/mnt/data' save_dir = '/mnt/data'
def save_as_png(file_path, decoding): decoding = ((0.5 * decoding) + 0.5) decoding *= ((2 ** 16) - 1) decoding = decoding.astype(np.uint16) decoding = np.transpose(decoding, (1, 2, 0)) if (decoding.shape[2] > 1): cv2.imwrite(file_path, cv2.cvtColor(decoding, cv2.COLOR_RGB2BGR)) else: cv2.imwrite(file_path, decoding.astype(np.uint8)) return
def save_to_file(arr, original_image_fname, new_root, subfolder, filetype='.npy'): abspath = os.path.abspath(original_image_fname) base_name = os.path.basename(abspath).replace('.png', filetype) parent_name = get_parent_dirname(abspath) file_path = os.path.join(new_root, subfolder, parent_name, base_name) os.makedirs(os.path.join(new_root, subfolder, parent_name), exist_ok=True) if (filetype == '.npy'): np.save(file_path, arr) elif (filetype == '.npz'): np.savez_compressed(file_path, arr) elif (filetype == '.png'): cv2.imwrite(file_path, cv2.cvtColor(np.uint8(arr[0]), cv2.COLOR_RGB2BGR)) else: raise NotImplementedError('Cannot save {}. Unrecognized filetype {}.'.format(file_path, filetype))
def shrink_file(original_fpath, new_fpath): with open(original_fpath, 'rb') as f: img = Image.open(f) img = img.convert('RGB') img = transforms.Resize((256, 256), Image.BICUBIC)(img) with open(new_fpath, 'wb') as f: img.save(f)
def save_mappable(x): return shrink_file(*x)
@ex.main def make_mask(folders_to_convert, split_to_convert, data_dir, save_dir, n_dataloader_workers=4, batch_size=64): if ((folders_to_convert is None) and (split_to_convert is not None)): split_to_convert = eval(split_to_convert) logger.info(f'Converting from split {split_to_convert}') folders_to_convert = sorted(list(set(((split_to_convert['train'] + split_to_convert['val']) + split_to_convert['test'])))) assert (folders_to_convert is not None), 'No folders to convert. Aborting' logger.info(f'Converting folders {str(folders_to_convert)}') assert (len(folders_to_convert) == 1) pool = Pool(n_dataloader_workers) for fpath in tqdm(os.listdir(os.path.join(data_dir, SOURCE_TASK, folders_to_convert[0], SOURCE_TASK))): dirname = get_parent_dirname(fpath) source_path = os.path.join(data_dir, SOURCE_TASK, folders_to_convert[0], SOURCE_TASK, fpath) target_path = os.path.join(save_dir, SOURCE_TASK, folders_to_convert[0], fpath) pool.apply_async(shrink_file, args=(source_path, target_path)) pool.close() pool.join() print(target_path)
@ex.config def cfg_base(): folders_to_convert = None split_to_convert = None batch_size = 64 n_dataloader_workers = 8 data_dir = '/mnt/data' save_dir = '/mnt/data'
@ex.command def run_hps(cfg, uuid): print(cfg) argv_plus_hps = sys.argv script_name = argv_plus_hps[0] script_name = script_name.replace('.py', '').replace('/', '.') script_name = (script_name[1:] if script_name.startswith('.') else script_name) for (hp, hp_range) in flatten(cfg['hps_kwargs']['hp']).items(): hp_val = np.power(10, np.random.uniform(*hp_range)) argv_plus_hps.append(f'cfg.{hp}={hp_val}') argv_plus_hps = [a.replace('run_hps', cfg['hps_kwargs']['script_name']) for a in argv_plus_hps] argv_plus_hps.append(f'uuid={uuid}_hps_run') print(f"python -m {script_name} {LOG_DIR} {' '.join(argv_plus_hps[1:])}") ex.run_commandline(argv=argv_plus_hps)
@ex.named_config def cfg_hps(): uuid = 'hps' cfg = {} cfg['hps_kwargs'] = {'hp': {'learner': {'lr': ((- 5), (- 3)), 'optimizer_kwargs': {'weight_decay': ((- 6), (- 4))}}}, 'script_name': 'train', 'add_time_to_logdir': True}
@ex.command def prologue(cfg, uuid): os.makedirs(LOG_DIR, exist_ok=True) assert (not (cfg['saving']['obliterate_logs'] and cfg['training']['resume_training'])), 'Cannot obliterate logs and resume training' if cfg['saving']['obliterate_logs']: assert LOG_DIR, 'LOG_DIR cannot be empty' subprocess.call(f'rm -rf {LOG_DIR}', shell=True) if cfg['training']['resume_training']: checkpoints.archive_current_run(LOG_DIR, uuid)
@ex.main def train(cfg, uuid): set_seed(cfg['training']['seed']) logger.setLevel(logging.INFO) logger.info(pprint.pformat(cfg)) logger.debug(f'Loaded Torch version: {torch.__version__}') logger.debug(f'Using device: {device}') logger.info(f'Training following tasks: ') for (i, (s, t)) in enumerate(zip(cfg['training']['sources'], cfg['training']['targets'])): logger.info(f' Task {i}: {s} -> {t}') logger.debug(f'Starting data loaders') logger.debug(f'Setting up model') search_and_replace_dict(cfg['learner']['model_kwargs'], cfg['training']['targets'][0][0]) model = eval(cfg['learner']['model'])(**cfg['learner']['model_kwargs']) logger.info(f'Created model. Number of trainable parameters: {count_trainable_parameters(model)}. Number of total parameters: {count_total_parameters(model)}') try: logger.info(f'Number of trainable transfer parameters: {count_trainable_parameters(model.transfers)}. Number of total transfer parameters: {count_total_parameters(model.transfers)}') if isinstance(model.encoder, nn.Module): logger.info(f'Number of trainable encoder parameters: {count_trainable_parameters(model.base)}. Number of total encoder parameters: {count_total_parameters(model.base)}') if isinstance(model.side_networks, nn.Module): logger.info(f'Number of trainable side parameters: {count_trainable_parameters(model.sides)}. Number of total side parameters: {count_total_parameters(model.sides)}') if isinstance(model.merge_operators, nn.Module): logger.info(f'Number of trainable merge (alpha) parameters: {count_trainable_parameters(model.merge_operators)}. Number of total merge (alpha) parameters: {count_total_parameters(model.merge_operators)}') except: pass ckpt_fpath = cfg['training']['resume_from_checkpoint_path'] loaded_optimizer = None start_epoch = 0 if ((ckpt_fpath is not None) and (not cfg['training']['resume_training'])): warnings.warn('Checkpoint path provided but resume_training is set to False, are you sure??') if ((ckpt_fpath is not None) and cfg['training']['resume_training']): if (not os.path.exists(ckpt_fpath)): logger.warning(f'Trying to resume training, but checkpoint path {ckpt_fpath} does not exist. Starting training from beginning...') else: (model, checkpoint) = load_state_dict_from_path(model, ckpt_fpath) start_epoch = (checkpoint['epoch'] if ('epoch' in checkpoint) else 0) logger.info(f"Loaded model (epoch {(start_epoch if ('epoch' in checkpoint) else 'unknown')}) from {ckpt_fpath}") if ('optimizer' in checkpoint): loaded_optimizer = checkpoint['optimizer'] else: warnings.warn('No optimizer in checkpoint, are you sure?') try: del checkpoint['state_dict'] except KeyError: pass model.to(device) if (torch.cuda.device_count() > 1): logger.info(f'Using {torch.cuda.device_count()} GPUs!') assert (cfg['learner']['model'] != 'ConstantModel'), 'ConstantModel (blind) does not operate with multiple devices' model = nn.DataParallel(model, range(torch.cuda.device_count())) model.to(device) for key in ['sources', 'targets', 'masks']: cfg['training']['dataloader_fn_kwargs'][key] = cfg['training'][key] dataloaders = eval(cfg['training']['dataloader_fn'])(**cfg['training']['dataloader_fn_kwargs']) if cfg['training']['resume_training']: if (('curr_iter_idx' in checkpoint) and (checkpoint['curr_iter_idx'] == (- 1))): warnings.warn(f'curr_iter_idx is -1, Guessing curr_iter_idx to be start_epoch {start_epoch}') dataloaders['train'].start_dl = start_epoch elif ('curr_iter_idx' in checkpoint): logger.info(f"Starting dataloader at {checkpoint['curr_iter_idx']}") dataloaders['train'].start_dl = checkpoint['curr_iter_idx'] else: warnings.warn(f'Guessing curr_iter_idx to be start_epoch {start_epoch}') dataloaders['train'].start_dl = start_epoch loss_fn_lst = cfg['training']['loss_fn'] loss_kwargs_lst = cfg['training']['loss_kwargs'] if (not isinstance(loss_fn_lst, list)): loss_fn_lst = [loss_fn_lst] loss_kwargs_lst = [loss_kwargs_lst] elif isinstance(loss_kwargs_lst, dict): loss_kwargs_lst = [loss_kwargs_lst for _ in range(len(loss_fn_lst))] loss_fns = [] assert (len(loss_fn_lst) == len(loss_kwargs_lst)), 'number of loss fn/kwargs not the same' for (loss_fn, loss_kwargs) in zip(loss_fn_lst, loss_kwargs_lst): if (loss_fn == 'perceptual_l1'): loss_fn = perceptual_l1_loss(cfg['training']['loss_kwargs']['decoder_path'], cfg['training']['loss_kwargs']['bake_decodings']) elif (loss_fn == 'perceptual_l2'): loss_fn = perceptual_l2_loss(cfg['training']['loss_kwargs']['decoder_path'], cfg['training']['loss_kwargs']['bake_decodings']) elif (loss_fn == 'perceptual_cross_entropy'): loss_fn = perceptual_cross_entropy_loss(cfg['training']['loss_kwargs']['decoder_path'], cfg['training']['loss_kwargs']['bake_decodings']) else: loss_fn = functools.partial(eval(loss_fn), **loss_kwargs) loss_fns.append(loss_fn) if ((len(loss_fns) == 1) and (len(cfg['training']['sources']) > 1)): loss_fns = [loss_fns[0] for _ in range(len(cfg['training']['sources']))] if (('regularizer_fn' in cfg['training']) and (cfg['training']['regularizer_fn'] is not None)): assert (torch.cuda.device_count() <= 1), 'Regularization does not support multi GPU, unable to access model attributes from DataParallel wrapper' bare_model = (model.module if (torch.cuda.device_count() > 1) else model) loss_fns = [eval(cfg['training']['regularizer_fn'])(loss_fn=loss_fn, model=bare_model, **cfg['training']['regularizer_kwargs']) for loss_fn in loss_fns] flog = tnt.logger.FileLogger(cfg['saving']['results_log_file'], overwrite=True) mlog = get_logger(cfg, uuid) mlog.add_meter('config', tnt.meter.SingletonMeter(), ptype='text') mlog.update_meter(cfg_to_md(cfg, uuid), meters={'config'}, phase='train') for (task, _) in enumerate(cfg['training']['targets']): mlog.add_meter(f'alpha/task_{task}', tnt.meter.ValueSummaryMeter()) mlog.add_meter(f'output/task_{task}', tnt.meter.ValueSummaryMeter(), ptype='image') mlog.add_meter(f'input/task_{task}', tnt.meter.ValueSummaryMeter(), ptype='image') mlog.add_meter('weight_histogram/task_{task}', tnt.meter.ValueSummaryMeter(), ptype='histogram') for loss in cfg['training']['loss_list']: mlog.add_meter(f'losses/{loss}_{task}', tnt.meter.ValueSummaryMeter()) if cfg['training']['task_is_classification'][task]: mlog.add_meter(f'accuracy_top1/task_{task}', tnt.meter.ClassErrorMeter(topk=[1], accuracy=True)) mlog.add_meter(f'accuracy_top5/task_{task}', tnt.meter.ClassErrorMeter(topk=[5], accuracy=True)) mlog.add_meter(f'perplexity_pred/task_{task}', tnt.meter.ValueSummaryMeter()) mlog.add_meter(f'perplexity_label/task_{task}', tnt.meter.ValueSummaryMeter()) try: if cfg['training']['train']: if (cfg['training']['resume_training'] and (loaded_optimizer is not None)): optimizer = loaded_optimizer else: optimizer = eval(cfg['learner']['optimizer_class'])([{'params': [param for (name, param) in model.named_parameters() if (('merge_operator' in name) or ('context' in name) or ('alpha' in name))], 'weight_decay': 0.0}, {'params': [param for (name, param) in model.named_parameters() if (('merge_operator' not in name) and ('context' not in name) and ('alpha' not in name))]}], lr=cfg['learner']['lr'], **cfg['learner']['optimizer_kwargs']) scheduler = None if (cfg['learner']['lr_scheduler_method'] is not None): scheduler = eval(cfg['learner']['lr_scheduler_method'])(optimizer, **cfg['learner']['lr_scheduler_method_kwargs']) model.start_training() if cfg['training']['amp']: from apex import amp (model, optimizer) = amp.initialize(model, optimizer, opt_level='O1') logger.info('Starting training...') context = train_model(cfg, model, dataloaders, loss_fns, optimizer, start_epoch=start_epoch, num_epochs=cfg['training']['num_epochs'], save_epochs=cfg['saving']['save_interval'], scheduler=scheduler, mlog=mlog, flog=flog) finally: print(psutil.virtual_memory()) GPUtil.showUtilization(all=True) if cfg['training']['test']: run_kwargs = {'cfg': cfg, 'mlog': mlog, 'flog': flog, 'optimizer': None, 'loss_fns': loss_fns, 'model': model, 'use_thread': cfg['saving']['in_background']} (context, _) = run_one_epoch(dataloader=dataloaders['val'], epoch=0, train=False, **run_kwargs) logger.info('Waiting up to 10 minutes for all files to save...') mlog.flush() [c.join(600) for c in context] logger.info('All saving is finished.')
def train_model(cfg, model, dataloaders, loss_fns, optimizer, start_epoch=0, num_epochs=250, save_epochs=25, scheduler=None, mlog=None, flog=None): '\n Main training loop. Multiple tasks might happen in the same epoch. \n 0 to 1 random validation only\n 1 to 2 train task 0 labeled as epoch 2, validate all\n i to {i+1} train task {i-1} labeled as epoch {i+1}\n ' checkpoint_dir = os.path.join(cfg['saving']['log_dir'], cfg['saving']['save_dir']) run_kwargs = {'cfg': cfg, 'mlog': mlog, 'flog': flog, 'optimizer': optimizer, 'loss_fns': loss_fns, 'model': model, 'use_thread': cfg['saving']['in_background']} context = [] log_interval = cfg['saving']['log_interval'] log_interval = (int(log_interval) if (log_interval > 1) else log_interval) end_epoch = (start_epoch + num_epochs) print(f'training for {num_epochs} epochs') for epoch in range(start_epoch, end_epoch): torch.cuda.empty_cache() if ((epoch == 0) or ((epoch % save_epochs) == (save_epochs - 1))): context += save_checkpoint(model, optimizer, epoch, dataloaders, checkpoint_dir, use_thread=cfg['saving']['in_background']) should_run_validation = ((epoch == 0) or (log_interval <= 1) or ((epoch % log_interval) == (log_interval - 1))) if should_run_validation: assert math.isnan(mlog.peek_meter()['losses/total_0']), 'Loggers are not empty at the beginning of evaluation. Were training logs cleared?' (context1, loss_dict) = run_one_epoch(dataloader=dataloaders['val'], epoch=epoch, train=False, **run_kwargs) context += context1 if (scheduler is not None): try: scheduler.step(loss_dict['total']) except: scheduler.step() (context1, _) = run_one_epoch(dataloader=dataloaders['train'], epoch=(epoch + 1), train=True, **run_kwargs) context += context1 post_training_epoch(dataloader=dataloaders['train'], epoch=epoch, **run_kwargs) (context1, _) = run_one_epoch(dataloader=dataloaders['val'], epoch=end_epoch, train=False, **run_kwargs) context += context1 context += save_checkpoint(model, optimizer, end_epoch, dataloaders, checkpoint_dir, use_thread=cfg['saving']['in_background']) return context
def post_training_epoch(dataloader=None, epoch=(- 1), model=None, loss_fns=None, **kwargs): post_training_cache = {} if hasattr(loss_fns[dataloader.curr_iter_idx], 'post_training_epoch'): loss_fns[dataloader.curr_iter_idx].post_training_epoch(model, dataloader, post_training_cache, **kwargs) for (i, loss_fn) in enumerate(loss_fns): if (hasattr(loss_fn, 'post_training_epoch') and (i != dataloader.curr_iter_idx)): loss_fn.post_training_epoch(model, dataloader, post_training_cache, **kwargs)
def run_one_epoch(model: LifelongSidetuneNetwork, dataloader, loss_fns, optimizer, epoch, cfg, mlog, flog, train=True, use_thread=False) -> (list, dict): start_time = time.time() model.train(train) params_with_grad = model.parameters() phase = ('train' if train else 'val') sources = cfg['training']['sources'] targets = cfg['training']['targets'] tasks = [t for t in SINGLE_IMAGE_TASKS if (len([tt for tt in cfg['training']['targets'] if (t in tt)]) > 0)] cache = {'phase': phase, 'sources': sources, 'targets': targets, 'tasks': tasks} context = [] losses = {x: [] for x in cfg['training']['loss_list']} log_steps = [] log_interval = cfg['saving']['log_interval'] log_interval = (int(log_interval) if (log_interval >= 1) else log_interval) if ((log_interval < 1) and train): num_logs_per_epoch = int((1 // log_interval)) log_steps = [(i * int((len(dataloader) / num_logs_per_epoch))) for i in range(1, num_logs_per_epoch)] if (cfg['training']['post_aggregation_transform_fn'] is not None): post_agg_transform = eval(cfg['training']['post_aggregation_transform_fn']) if cfg['learner']['use_feedback']: num_passes = cfg['learner']['feedback_kwargs']['num_feedback_iter'] backward_kwargs = {'retain_graph': True} else: num_passes = 1 backward_kwargs = {} if isinstance(model, _CustomDataParallel): warnings.warn('DataParallel does not allow you to put part of the model on CPU') model.cuda() with torch.set_grad_enabled(train): seen = set() for (i, (task_idx, batch_tuple)) in enumerate(tqdm(dataloader, desc=f'Epoch {epoch} ({phase})')): if (cfg['training']['post_aggregation_transform_fn'] is not None): batch_tuple = post_agg_transform(batch_tuple, **cfg['training']['post_aggregation_transform_fn_kwargs']) old_size = len(seen) seen.add(task_idx) if (len(seen) > old_size): logger.info(f'Moving to task: {task_idx}') model.start_task(task_idx, train, print_alpha=True) (x, label, masks) = tlkit.utils.process_batch_tuple(batch_tuple, task_idx, cfg) for pass_i in range(num_passes): prediction = model(x, task_idx=task_idx, pass_i=pass_i) loss_dict = loss_fns[task_idx](prediction, label, masks, cache) if train: optimizer.zero_grad() loss_dict['total'].backward(**backward_kwargs) if (cfg['learner']['max_grad_norm'] is not None): torch.nn.utils.clip_grad_norm_(params_with_grad, cfg['learner']['max_grad_norm']) optimizer.step() mlog.update_meter(model.merge_operator.param, meters={f'alpha/task_{task_idx}'}, phase=phase) for loss in cfg['training']['loss_list']: assert (loss in loss_dict.keys()), f'Promised to report loss {loss}, but missing from loss_dict' mlog.update_meter(loss_dict[loss].detach().item(), meters={f'losses/{loss}_{task_idx}'}, phase=phase) if cfg['training']['task_is_classification'][task_idx]: add_classification_specific_logging(cache, mlog, task_idx, phase) if (len(seen) > old_size): log_image(mlog, task_idx, cfg, x, label, prediction, masks=masks, cache=cache) if (i in log_steps): step = (epoch + (i / len(dataloader))) step = int(np.floor((step * cfg['saving']['ticks_per_epoch']))) for loss in cfg['training']['loss_list']: losses[loss].append(mlog.peek_meter(phase=phase)[f'losses/{loss}_{task_idx}'].item()) context += write_logs(mlog, flog, task_idx, step, cfg, cache, to_print=False) for loss in cfg['training']['loss_list']: losses[loss].append(mlog.peek_meter(phase=phase)[f'losses/{loss}_{task_idx}'].item()) if ((log_interval <= 1) or ((epoch % log_interval) == (log_interval - 1)) or (epoch == 0)): step = (epoch + ((len(dataloader) - 1) / len(dataloader))) step = int(np.floor((step * cfg['saving']['ticks_per_epoch']))) context += write_logs(mlog, flog, task_idx, step, cfg, cache, to_print=True) assert (len(losses['total']) > 0), 'Need to report loss' for k in losses.keys(): losses[k] = (sum(losses[k]) / len(losses[k])) loss_str = ''.join([((' | ' + k) + ' loss: {0:.6f} '.format(v)) for (k, v) in losses.items()]) duration = int((time.time() - start_time)) logger.info(f'End of epoch {epoch} ({phase}) ({(duration // 60)}m {(duration % 60)}s) {loss_str}') return (context, losses)
def save_checkpoint(model, optimizer, epoch, dataloaders, checkpoint_dir, use_thread=False): dict_to_save = {'state_dict': model.state_dict(), 'epoch': epoch, 'model': model, 'optimizer': optimizer, 'curr_iter_idx': dataloaders['train'].curr_iter_idx} checkpoints.save_checkpoint(dict_to_save, checkpoint_dir, epoch) return []
@ex.command def prologue(cfg, uuid): os.makedirs(LOG_DIR, exist_ok=True) assert (not (cfg['saving']['obliterate_logs'] and cfg['training']['resumable'])), 'cannot obliterate logs and resume training' if cfg['saving']['obliterate_logs']: assert LOG_DIR, 'LOG_DIR cannot be empty' subprocess.call(f'rm -rf {LOG_DIR}', shell=True) if cfg['training']['resumable']: archive_current_run(LOG_DIR, uuid)
@ex.main def run_training(cfg, uuid, override={}): try: logger.info(('-------------\nStarting with configuration:\n' + pprint.pformat(cfg))) logger.info(('UUID: ' + uuid)) torch.set_num_threads(1) set_seed(cfg['training']['seed']) old_log_dir = cfg['saving']['log_dir'] changed_log_dir = False existing_log_paths = [] if (os.path.exists(old_log_dir) and cfg['saving']['autofix_log_dir']): (LOG_DIR, existing_log_paths) = evkit.utils.logging.unused_dir_name(old_log_dir) os.makedirs(LOG_DIR, exist_ok=False) cfg['saving']['log_dir'] = LOG_DIR cfg['saving']['results_log_file'] = os.path.join(LOG_DIR, 'result_log.pkl') cfg['saving']['reward_log_file'] = os.path.join(LOG_DIR, 'rewards.pkl') cfg['saving']['visdom_log_file'] = os.path.join(LOG_DIR, 'visdom_logs.json') changed_log_dir = True agent = None if cfg['training']['resumable']: if cfg['saving']['checkpoint']: prev_run_path = cfg['saving']['checkpoint'] if (cfg['saving']['checkpoint_num'] is None): ckpt_fpath = os.path.join(prev_run_path, 'checkpoints', 'ckpt-latest.dat') else: ckpt_fpath = os.path.join(prev_run_path, 'checkpoints', f"ckpt-{cfg['saving']['checkpoint_num']}.dat") if cfg['saving']['checkpoint_configs']: prev_run_metadata_paths = [os.path.join(prev_run_path, f) for f in os.listdir(prev_run_path) if f.endswith('metadata')] prev_run_config_path = os.path.join(prev_run_metadata_paths[0], 'config.json') with open(prev_run_config_path) as f: config = json.load(f) true_log_dir = cfg['saving']['log_dir'] cfg = update_dict_deepcopy(cfg, config['cfg']) uuid = config['uuid'] logger.warning('Reusing config from {}'.format(prev_run_config_path)) cfg['saving']['log_dir'] = true_log_dir cfg['saving']['results_log_file'] = os.path.join(true_log_dir, 'result_log.pkl') cfg['saving']['reward_log_file'] = os.path.join(true_log_dir, 'rewards.pkl') cfg['saving']['visdom_log_file'] = os.path.join(true_log_dir, 'visdom_logs.json') if ((ckpt_fpath is not None) and os.path.exists(ckpt_fpath)): checkpoint_obj = torch.load(ckpt_fpath) start_epoch = checkpoint_obj['epoch'] logger.info('Loaded learner (epoch {}) from {}'.format(start_epoch, ckpt_fpath)) if (cfg['learner']['algo'] == 'imitation_learning'): actor_critic = checkpoint_obj['model'] try: actor_critic = actor_critic.module except: pass else: agent = checkpoint_obj['agent'] actor_critic = agent.actor_critic else: logger.warning('No checkpoint found at {}'.format(ckpt_fpath)) cfg = update_dict_deepcopy(cfg, override) logger.info(('-------------\n Running with configuration:\n' + pprint.pformat(cfg))) try: taskonomy_transform = cfg['env']['transform_fn_post_aggregation_kwargs']['names_to_transforms']['taskonomy'] taskonomy_encoder = cfg['learner']['perception_network_kwargs']['extra_kwargs']['sidetune_kwargs']['base_weights_path'] assert (taskonomy_encoder in taskonomy_transform), f'Taskonomy PostTransform and perception network base need to match. {taskonomy_encoder} != {taskonomy_transform}' except KeyError: pass if (cfg['training']['gpu_devices'] is None): cfg['training']['gpu_devices'] = list(range(torch.cuda.device_count())) assert (not ((len(cfg['training']['gpu_devices']) > 1) and ('attributes' in cfg['learner']['cache_kwargs']))), 'Cannot utilize cache with more than one model GPU' (simulator, scenario) = cfg['env']['env_name'].split('_') transform_pre_aggregation = None if (cfg['env']['transform_fn_pre_aggregation'] is not None): logger.warning('Using depreciated config transform_fn_pre_aggregation') transform_pre_aggregation = eval(cfg['env']['transform_fn_pre_aggregation'].replace('---', "'")) elif (('transform_fn_pre_aggregation_fn' in cfg['env']) and (cfg['env']['transform_fn_pre_aggregation_fn'] is not None)): pre_aggregation_kwargs = copy.deepcopy(cfg['env']['transform_fn_pre_aggregation_kwargs']) transform_pre_aggregation = eval(cfg['env']['transform_fn_pre_aggregation_fn'].replace('---', "'"))(**eval_dict_values(pre_aggregation_kwargs)) if (('debug_mode' in cfg['env']['env_specific_kwargs']) and cfg['env']['env_specific_kwargs']['debug_mode']): assert (cfg['env']['num_processes'] == 1), 'Using debug mode requires you to only use one process' envs = EnvFactory.vectorized(cfg['env']['env_name'], cfg['training']['seed'], cfg['env']['num_processes'], cfg['saving']['log_dir'], cfg['env']['add_timestep'], env_specific_kwargs=cfg['env']['env_specific_kwargs'], num_val_processes=cfg['env']['num_val_processes'], preprocessing_fn=transform_pre_aggregation, addl_repeat_count=cfg['env']['additional_repeat_count'], sensors=cfg['env']['sensors'], vis_interval=cfg['saving']['vis_interval'], visdom_server=cfg['saving']['visdom_server'], visdom_port=cfg['saving']['visdom_port'], visdom_log_file=cfg['saving']['visdom_log_file'], visdom_name=uuid) transform_post_aggregation = None if (('transform_fn_post_aggregation' in cfg['env']) and (cfg['env']['transform_fn_post_aggregation'] is not None)): logger.warning('Using depreciated config transform_fn_post_aggregation') transform_post_aggregation = eval(cfg['env']['transform_fn_post_aggregation'].replace('---', "'")) elif (('transform_fn_post_aggregation_fn' in cfg['env']) and (cfg['env']['transform_fn_post_aggregation_fn'] is not None)): post_aggregation_kwargs = copy.deepcopy(cfg['env']['transform_fn_post_aggregation_kwargs']) transform_post_aggregation = eval(cfg['env']['transform_fn_post_aggregation_fn'].replace('---', "'"))(**eval_dict_values(post_aggregation_kwargs)) if (transform_post_aggregation is not None): (transform, space) = transform_post_aggregation(envs.observation_space) envs = ProcessObservationWrapper(envs, transform, space) action_space = envs.action_space observation_space = envs.observation_space retained_obs_shape = {k: v.shape for (k, v) in observation_space.spaces.items() if (k in cfg['env']['sensors'])} logger.info(f'Action space: {action_space}') logger.info(f'Observation space: {observation_space}') logger.info('Retaining: {}'.format(set(observation_space.spaces.keys()).intersection(cfg['env']['sensors'].keys()))) if ((agent == None) and (cfg['learner']['algo'] == 'ppo')): perception_model = eval(cfg['learner']['perception_network'])(cfg['learner']['num_stack'], **cfg['learner']['perception_network_kwargs']) base = NaivelyRecurrentACModule(perception_unit=perception_model, use_gru=cfg['learner']['recurrent_policy'], internal_state_size=cfg['learner']['internal_state_size']) actor_critic = PolicyWithBase(base, action_space, num_stacks=cfg['learner']['num_stack'], takeover=None, loss_kwargs=cfg['learner']['loss_kwargs'], gpu_devices=cfg['training']['gpu_devices']) if cfg['learner']['use_replay']: agent = evkit.rl.algo.PPOReplay(actor_critic, cfg['learner']['clip_param'], cfg['learner']['ppo_epoch'], cfg['learner']['num_mini_batch'], cfg['learner']['value_loss_coef'], cfg['learner']['entropy_coef'], cfg['learner']['on_policy_epoch'], cfg['learner']['off_policy_epoch'], cfg['learner']['num_steps'], cfg['learner']['num_stack'], lr=cfg['learner']['lr'], eps=cfg['learner']['eps'], max_grad_norm=cfg['learner']['max_grad_norm'], gpu_devices=cfg['training']['gpu_devices'], loss_kwargs=cfg['learner']['loss_kwargs'], cache_kwargs=cfg['learner']['cache_kwargs'], optimizer_class=cfg['learner']['optimizer_class'], optimizer_kwargs=cfg['learner']['optimizer_kwargs']) else: agent = evkit.rl.algo.PPO(actor_critic, cfg['learner']['clip_param'], cfg['learner']['ppo_epoch'], cfg['learner']['num_mini_batch'], cfg['learner']['value_loss_coef'], cfg['learner']['entropy_coef'], lr=cfg['learner']['lr'], eps=cfg['learner']['eps'], max_grad_norm=cfg['learner']['max_grad_norm']) start_epoch = 0 if ((torch.cuda.device_count() > 1) and ((cfg['training']['gpu_devices'] is None) or (len(cfg['training']['gpu_devices']) > 1))): actor_critic.data_parallel(cfg['training']['gpu_devices']) elif ((agent == None) and (cfg['learner']['algo'] == 'slam')): assert (cfg['learner']['slam_class'] is not None), 'Must define SLAM agent class' actor_critic = eval(cfg['learner']['slam_class'])(**cfg['learner']['slam_kwargs']) start_epoch = 0 elif (cfg['learner']['algo'] == 'expert'): actor_critic = eval(cfg['learner']['algo_class'])(**cfg['learner']['algo_kwargs']) start_epoch = 0 if (cfg['learner']['algo'] == 'expert'): assert (('debug_mode' in cfg['env']['env_specific_kwargs']) and cfg['env']['env_specific_kwargs']['debug_mode']), 'need to use debug mode with expert algo' if (cfg['learner']['perception_network_reinit'] and (cfg['learner']['algo'] == 'ppo')): logger.info('Reinit perception network, use with caution') old_perception_unit = actor_critic.base.perception_unit new_perception_unit = eval(cfg['learner']['perception_network'])(cfg['learner']['num_stack'], **cfg['learner']['perception_network_kwargs']) new_perception_unit.main_perception = old_perception_unit actor_critic.base.perception_unit = new_perception_unit if (((actor_critic.gpu_devices == None) or (len(actor_critic.gpu_devices) == 1)) and (len(cfg['training']['gpu_devices']) > 1)): actor_critic.data_parallel(cfg['training']['gpu_devices']) actor_critic.gpu_devices = cfg['training']['gpu_devices'] agent.gpu_devices = cfg['training']['gpu_devices'] num_train_processes = (cfg['env']['num_processes'] - cfg['env']['num_val_processes']) num_val_processes = cfg['env']['num_val_processes'] assert (cfg['learner']['test'] or (cfg['env']['num_val_processes'] < cfg['env']['num_processes'])), "Can't train without some training processes!" current_obs = StackedSensorDictStorage(cfg['env']['num_processes'], cfg['learner']['num_stack'], retained_obs_shape) if (not cfg['learner']['test']): current_train_obs = StackedSensorDictStorage(num_train_processes, cfg['learner']['num_stack'], retained_obs_shape) logger.debug(f'Stacked obs shape {current_obs.obs_shape}') if (cfg['learner']['use_replay'] and (not cfg['learner']['test'])): rollouts = RolloutSensorDictReplayBuffer(cfg['learner']['num_steps'], num_train_processes, current_obs.obs_shape, action_space, cfg['learner']['internal_state_size'], actor_critic, cfg['learner']['use_gae'], cfg['learner']['gamma'], cfg['learner']['tau'], cfg['learner']['replay_buffer_size'], batch_multiplier=cfg['learner']['rollout_value_batch_multiplier']) else: rollouts = RolloutSensorDictStorage(cfg['learner']['num_steps'], num_train_processes, current_obs.obs_shape, action_space, cfg['learner']['internal_state_size']) if (cfg['saving']['logging_type'] == 'visdom'): mlog = tnt.logger.VisdomMeterLogger(title=uuid, env=uuid, server=cfg['saving']['visdom_server'], port=cfg['saving']['visdom_port'], log_to_filename=cfg['saving']['visdom_log_file']) elif (cfg['saving']['logging_type'] == 'tensorboard'): mlog = tnt.logger.TensorboardMeterLogger(env=uuid, log_dir=cfg['saving']['log_dir'], plotstylecombined=True) else: raise NotImplementedError("Unknown logger type: ({cfg['saving']['logging_type']})") loggable_metrics = ['metrics/rewards', 'diagnostics/dist_perplexity', 'diagnostics/lengths', 'diagnostics/max_importance_weight', 'diagnostics/value', 'losses/action_loss', 'losses/dist_entropy', 'losses/value_loss', 'introspect/alpha'] if ('intrinsic_loss_types' in cfg['learner']['loss_kwargs']): for iloss in cfg['learner']['loss_kwargs']['intrinsic_loss_types']: loggable_metrics.append(f'losses/{iloss}') core_metrics = ['metrics/rewards', 'diagnostics/lengths'] debug_metrics = ['debug/input_images'] if ('habitat' in cfg['env']['env_name'].lower()): for metric in ['metrics/collisions', 'metrics/spl', 'metrics/success']: loggable_metrics.append(metric) core_metrics.append(metric) for meter in loggable_metrics: mlog.add_meter(meter, tnt.meter.ValueSummaryMeter()) for debug_meter in debug_metrics: mlog.add_meter(debug_meter, tnt.meter.SingletonMeter(), ptype='image') try: for attr in cfg['learner']['perception_network_kwargs']['extra_kwargs']['attrs_to_remember']: mlog.add_meter(f'diagnostics/{attr}', tnt.meter.ValueSummaryMeter(), ptype='histogram') except KeyError: pass mlog.add_meter('config', tnt.meter.SingletonMeter(), ptype='text') mlog.update_meter(cfg_to_md(cfg, uuid), meters={'config'}, phase='train') flog = tnt.logger.FileLogger(cfg['saving']['results_log_file'], overwrite=True) try: flog_keys_to_remove = [f'diagnostics/{k}' for k in cfg['learner']['perception_network_kwargs']['extra_kwargs']['attrs_to_remember']] except KeyError: warnings.warn('Unable to find flog keys to remove') flog_keys_to_remove = [] reward_only_flog = tnt.logger.FileLogger(cfg['saving']['reward_log_file'], overwrite=True) if changed_log_dir: evkit.utils.logging.replay_logs(existing_log_paths, mlog) evkit.utils.logging.move_metadata_file(old_log_dir, cfg['saving']['log_dir'], uuid) if cfg['training']['cuda']: if (not cfg['learner']['test']): current_train_obs = current_train_obs.cuda(device=cfg['training']['gpu_devices'][0]) current_obs = current_obs.cuda(device=cfg['training']['gpu_devices'][0]) try: actor_critic.cuda(device=cfg['training']['gpu_devices'][0]) except UnboundLocalError as e: logger.error(f'Cannot put actor critic on cuda. Are you using a checkpoint and is it being found/initialized properly? {e}') raise e episode_rewards = torch.zeros([cfg['env']['num_processes'], 1]) episode_lengths = torch.zeros([cfg['env']['num_processes'], 1]) episode_tracker = evkit.utils.logging.EpisodeTracker(cfg['env']['num_processes']) if cfg['learner']['test']: all_episodes = [] actor_critic.eval() try: actor_critic.base.perception_unit.sidetuner.attrs_to_remember = [] except: pass obs = envs.reset() current_obs.insert(obs) mask_done = torch.FloatTensor([[0.0] for _ in range(cfg['env']['num_processes'])]).cuda(device=cfg['training']['gpu_devices'][0], non_blocking=True) states = torch.zeros(cfg['env']['num_processes'], cfg['learner']['internal_state_size']).cuda(device=cfg['training']['gpu_devices'][0], non_blocking=True) try: actor_critic.reset(envs=envs) except: actor_critic.reset() start_time = time.time() n_episodes_completed = 0 num_updates = (int(cfg['training']['num_frames']) // (cfg['learner']['num_steps'] * cfg['env']['num_processes'])) if cfg['learner']['test']: logger.info(f"Running {cfg['learner']['test_k_episodes']}") else: logger.info(f'Running until num updates == {num_updates}') for j in range(start_epoch, num_updates, 1): for step in range(cfg['learner']['num_steps']): obs_unpacked = {k: current_obs.peek()[k].peek() for k in current_obs.peek()} if ((j == start_epoch) and (step < 10)): log_input_images(obs_unpacked, mlog, num_stack=cfg['learner']['num_stack'], key_names=['rgb_filled', 'map'], meter_name='debug/input_images', step_num=step) with torch.no_grad(): (value, action, action_log_prob, states) = actor_critic.act(obs_unpacked, states, mask_done, cfg['learner']['deterministic']) cpu_actions = list(action.squeeze(1).cpu().numpy()) (obs, reward, done, info) = envs.step(cpu_actions) mask_done_cpu = torch.FloatTensor([([0.0] if done_ else [1.0]) for done_ in done]) mask_done = mask_done_cpu.cuda(device=cfg['training']['gpu_devices'][0], non_blocking=True) reward = torch.from_numpy(np.expand_dims(np.stack(reward), 1)).float() episode_tracker.append(obs, cpu_actions) if cfg['learner']['test']: try: mlog.update_meter(actor_critic.perplexity.cpu(), meters={'diagnostics/dist_perplexity'}, phase='val') mlog.update_meter(actor_critic.entropy.cpu(), meters={'losses/dist_entropy'}, phase='val') mlog.update_meter(value.cpu(), meters={'diagnostics/value'}, phase='val') except AttributeError: pass episode_rewards += reward episode_lengths += (1 + cfg['env']['additional_repeat_count']) for (i, (r, l, done_)) in enumerate(zip(episode_rewards, episode_lengths, done)): if done_: n_episodes_completed += 1 if cfg['learner']['test']: info[i]['reward'] = r.item() info[i]['length'] = l.item() if (('debug_mode' in cfg['env']['env_specific_kwargs']) and cfg['env']['env_specific_kwargs']['debug_mode']): info[i]['scene_id'] = envs.env.env.env._env.current_episode.scene_id info[i]['episode_id'] = envs.env.env.env._env.current_episode.episode_id all_episodes.append({'info': info[i], 'history': episode_tracker.episodes[i][:(- 1)]}) episode_tracker.clear_episode(i) phase = ('train' if (i < num_train_processes) else 'val') mlog.update_meter(r.item(), meters={'metrics/rewards'}, phase=phase) mlog.update_meter(l.item(), meters={'diagnostics/lengths'}, phase=phase) if ('habitat' in cfg['env']['env_name'].lower()): mlog.update_meter(info[i]['collisions'], meters={'metrics/collisions'}, phase=phase) if (scenario == 'PointNav'): mlog.update_meter(info[i]['spl'], meters={'metrics/spl'}, phase=phase) mlog.update_meter(info[i]['success'], meters={'metrics/success'}, phase=phase) if (('debug_mode' in cfg['env']['env_specific_kwargs']) and cfg['env']['env_specific_kwargs']['debug_mode']): obs = envs.reset() try: actor_critic.reset(envs=envs) except: actor_critic.reset() episode_rewards *= mask_done_cpu episode_lengths *= mask_done_cpu current_obs.insert(obs, mask_done) if (not cfg['learner']['test']): for k in obs: if (k in current_train_obs.sensor_names): current_train_obs[k].insert(obs[k][:num_train_processes], mask_done[:num_train_processes]) rollouts.insert(current_train_obs.peek(), states[:num_train_processes], action[:num_train_processes], action_log_prob[:num_train_processes], value[:num_train_processes], reward[:num_train_processes], mask_done[:num_train_processes]) mlog.update_meter(value[:num_train_processes].mean().item(), meters={'diagnostics/value'}, phase='train') if (not cfg['learner']['test']): if (not cfg['learner']['use_replay']): with torch.no_grad(): next_value = actor_critic.get_value(rollouts.observations.at((- 1)), rollouts.states[(- 1)], rollouts.masks[(- 1)]).detach() rollouts.compute_returns(next_value, cfg['learner']['use_gae'], cfg['learner']['gamma'], cfg['learner']['tau']) (value_loss, action_loss, dist_entropy, max_importance_weight, info) = agent.update(rollouts) rollouts.after_update() mlog.update_meter(dist_entropy, meters={'losses/dist_entropy'}) mlog.update_meter(np.exp(dist_entropy), meters={'diagnostics/dist_perplexity'}) mlog.update_meter(value_loss, meters={'losses/value_loss'}) mlog.update_meter(action_loss, meters={'losses/action_loss'}) mlog.update_meter(max_importance_weight, meters={'diagnostics/max_importance_weight'}) if (('intrinsic_loss_types' in cfg['learner']['loss_kwargs']) and (len(cfg['learner']['loss_kwargs']['intrinsic_loss_types']) > 0)): for iloss in cfg['learner']['loss_kwargs']['intrinsic_loss_types']: mlog.update_meter(info[iloss], meters={f'losses/{iloss}'}) try: for attr in cfg['learner']['perception_network_kwargs']['extra_kwargs']['attrs_to_remember']: mlog.update_meter(info[attr].cpu(), meters={f'diagnostics/{attr}'}) except KeyError: pass try: if hasattr(actor_critic, 'module'): alpha = [param for (name, param) in actor_critic.module.named_parameters() if ('alpha' in name)][0] else: alpha = [param for (name, param) in actor_critic.named_parameters() if ('alpha' in name)][0] mlog.update_meter(torch.sigmoid(alpha).detach().item(), meters={f'introspect/alpha'}) except IndexError: pass if ((j % cfg['saving']['log_interval']) == 0): torch.cuda.empty_cache() GPUtil.showUtilization() count_open() num_relevant_processes = (num_val_processes if cfg['learner']['test'] else num_train_processes) n_steps_since_logging = ((cfg['saving']['log_interval'] * num_relevant_processes) * cfg['learner']['num_steps']) total_num_steps = (((j + 1) * num_relevant_processes) * cfg['learner']['num_steps']) logger.info('Update {}, num timesteps {}, FPS {}'.format((j + 1), total_num_steps, int((n_steps_since_logging / (time.time() - start_time))))) logger.info(f'Completed episodes: {n_episodes_completed}') viable_modes = (['val'] if cfg['learner']['test'] else ['train', 'val']) for metric in core_metrics: for mode in viable_modes: if ((metric in core_metrics) or (mode == 'train')): mlog.print_meter(mode, total_num_steps, meterlist={metric}) if (not cfg['learner']['test']): for mode in viable_modes: results = mlog.peek_meter(phase=mode) reward_only_flog.log(mode, {metric: results[metric] for metric in core_metrics}) if (mode == 'train'): results_to_log = {} results['step_num'] = (j + 1) results_to_log['step_num'] = results['step_num'] for (k, v) in results.items(): if (k in flog_keys_to_remove): warnings.warn(f'Removing {k} from results_log.pkl due to large size') else: results_to_log[k] = v flog.log('all_results', results_to_log) mlog.reset_meter(total_num_steps, mode=mode) start_time = time.time() if ((not cfg['learner']['test']) and ((j % cfg['saving']['save_interval']) == 0)): save_dir_absolute = os.path.join(cfg['saving']['log_dir'], cfg['saving']['save_dir']) save_checkpoint({'agent': agent, 'epoch': j}, save_dir_absolute, j) if (('test_k_episodes' in cfg['learner']) and (n_episodes_completed >= cfg['learner']['test_k_episodes'])): torch.save(all_episodes, os.path.join(cfg['saving']['log_dir'], 'validation.pth')) all_episodes = all_episodes[:cfg['learner']['test_k_episodes']] spl_mean = np.mean([episode['info']['spl'] for episode in all_episodes]) success_mean = np.mean([episode['info']['success'] for episode in all_episodes]) reward_mean = np.mean([episode['info']['reward'] for episode in all_episodes]) logger.info('------------ done with testing -------------') logger.info(f'SPL: {spl_mean} --- Success: {success_mean} --- Reward: {reward_mean}') for metric in mlog.meter['val'].keys(): mlog.print_meter('val', (- 1), meterlist={metric}) break finally: print(psutil.virtual_memory()) GPUtil.showUtilization(all=True) try: logger.info('### Done - Killing envs.') if isinstance(envs, list): [env.close() for env in envs] else: envs.close() logger.info('Killed envs.') except UnboundLocalError: logger.info('No envs to kill!')
class ExpertData(data.Dataset): def __init__(self, data_path, keys, num_frames, split='train', transform: dict={}, load_to_mem=False, remove_last_step_in_traj=True, removed_actions=[]): '\n data expected format\n /path/to/data/\n scenek/\n trajj/\n rgb_1.png\n map_1.npz\n action_1.npz\n ...\n rgb_24.png\n map_24.npz\n action_24.npz\n ' if (not os.path.isdir(data_path)): assert 'bad directory' self.keys = keys self.urls = {k: [] for k in self.keys} print(f'Loading {split} data') for scene in tqdm(sorted(os.listdir(os.path.join(data_path, split)))): for traj in sorted(os.listdir(os.path.join(data_path, split, scene))): for step in sorted(os.listdir(os.path.join(data_path, split, scene, traj))): path = os.path.join(data_path, split, scene, traj, step) key = [k for k in self.keys if (k in path)] if (len(key) != 1): continue self.urls[key[0]].append(path) if remove_last_step_in_traj: for k in self.keys: self.urls[k].pop() lens = [len(v) for (k, v) in self.urls.items()] assert (max(lens) == min(lens)), f'should have same number of each key: {keys} with len f{lens}' self.load_to_mem = load_to_mem if self.load_to_mem: print('Loading trajectories to memory') self.cached_data = {} for (k, objs) in self.urls.items(): if ('rgb' in k): self.cached_data[k] = [np.asarray(Image.open(obj)) for obj in objs] else: self.cached_data[k] = [np.load(obj) for obj in objs] self.num_frames = num_frames self.transform = transform for k in self.transform.keys(): assert (k in self.keys), f'transform {k} not in keys {self.keys}' self.removed_actions = removed_actions def __len__(self): return len(self.urls[self.keys[0]]) def __getitem__(self, index): episode_num = self._episode_num(index) ret = [[] for _ in self.keys] for i in range(self.num_frames): if (episode_num == self._episode_num((index - i))): for (key_idx, data) in enumerate(self._get_index((index - i))): ret[key_idx].append(data) else: for key_idx in range(len(self.keys)): ret[key_idx].append(np.zeros_like(ret[key_idx][0])) for i in range(len(self.keys)): if (i == self.keys.index('action')): ret[i] = ret[i][0] if (isinstance(ret[i], list) or (isinstance(ret[i], np.ndarray) and (len(ret[i].shape) > 0))): num_acts = len(ret[i]) while (np.argmax(ret[i]) in self.removed_actions): rand_act = np.zeros(num_acts, dtype=np.uint8) rand_act[np.random.randint(num_acts)] = 1 ret[i] = rand_act keep_indices = [i for i in range(num_acts) if (i not in self.removed_actions)] ret[i] = ret[i][keep_indices] elif (ret[i] in self.removed_actions): ret[i] = np.array(np.random.randint(min(self.removed_actions))) else: ret[i] = np.concatenate(ret[i][::(- 1)], axis=0) return ret def _episode_num(self, index): return self.urls[self.keys[0]][index].split('/')[(- 2)] def _get_index(self, index): if self.load_to_mem: ret = [self.cached_data[k][index] for k in self.keys()] else: ret = [] for k in self.keys: path = self.urls[k][index] if ('rgb' in k): with open(path, 'rb') as f: img = Image.open(f) img.convert(img.mode) ret.append(img) else: ret.append(np.load(path)['arr_0']) for (k, t) in self.transform.items(): idx = self.keys.index(k) ret[idx] = t(ret[idx]) return ret