idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
231,400
def is_provided ( self , name ) : if name in self . _storage : return True elif name in self . _providers : return True elif name . startswith ( 'rollout:' ) : rollout_name = name [ 8 : ] else : return False
Capability check if evaluator provides given value
59
10
231,401
def get ( self , name ) : if name in self . _storage : return self . _storage [ name ] elif name in self . _providers : value = self . _storage [ name ] = self . _providers [ name ] ( self ) return value elif name . startswith ( 'rollout:' ) : rollout_name = name [ 8 : ] value = self . _storage [ name ] = self . rollout . batch_tensor ( rollout_name ) return value else : raise RuntimeError ( f"Key {name} is not provided by this evaluator" )
Return a value from this evaluator .
127
9
231,402
def create ( model_config , batch_size , normalize = True , num_workers = 0 , augmentations = None ) : path = model_config . data_dir ( 'mnist' ) train_dataset = datasets . MNIST ( path , train = True , download = True ) test_dataset = datasets . MNIST ( path , train = False , download = True ) augmentations = [ ToArray ( ) ] + ( augmentations if augmentations is not None else [ ] ) if normalize : train_data = train_dataset . train_data mean_value = ( train_data . double ( ) / 255 ) . mean ( ) . item ( ) std_value = ( train_data . double ( ) / 255 ) . std ( ) . item ( ) augmentations . append ( Normalize ( mean = mean_value , std = std_value , tags = [ 'train' , 'val' ] ) ) augmentations . append ( ToTensor ( ) ) return TrainingData ( train_dataset , test_dataset , num_workers = num_workers , batch_size = batch_size , augmentations = augmentations )
Create a MNIST dataset normalized
253
6
231,403
def reset ( self , configuration : dict ) -> None : self . clean ( 0 ) self . backend . store_config ( configuration )
Whenever there was anything stored in the database or not purge previous state and start new training process from scratch .
28
21
231,404
def load ( self , train_info : TrainingInfo ) -> ( dict , dict ) : last_epoch = train_info . start_epoch_idx model_state = torch . load ( self . checkpoint_filename ( last_epoch ) ) hidden_state = torch . load ( self . checkpoint_hidden_filename ( last_epoch ) ) self . checkpoint_strategy . restore ( hidden_state ) train_info . restore ( hidden_state ) return model_state , hidden_state
Resume learning process and return loaded hidden state dictionary
108
10
231,405
def clean ( self , global_epoch_idx ) : if self . cleaned : return self . cleaned = True self . backend . clean ( global_epoch_idx ) self . _make_sure_dir_exists ( ) for x in os . listdir ( self . model_config . checkpoint_dir ( ) ) : match = re . match ( 'checkpoint_(\\d+)\\.data' , x ) if match : idx = int ( match [ 1 ] ) if idx > global_epoch_idx : os . remove ( os . path . join ( self . model_config . checkpoint_dir ( ) , x ) ) match = re . match ( 'checkpoint_hidden_(\\d+)\\.data' , x ) if match : idx = int ( match [ 1 ] ) if idx > global_epoch_idx : os . remove ( os . path . join ( self . model_config . checkpoint_dir ( ) , x ) ) match = re . match ( 'checkpoint_best_(\\d+)\\.data' , x ) if match : idx = int ( match [ 1 ] ) if idx > global_epoch_idx : os . remove ( os . path . join ( self . model_config . checkpoint_dir ( ) , x ) )
Clean old checkpoints
287
3
231,406
def checkpoint ( self , epoch_info : EpochInfo , model : Model ) : self . clean ( epoch_info . global_epoch_idx - 1 ) self . _make_sure_dir_exists ( ) # Checkpoint latest torch . save ( model . state_dict ( ) , self . checkpoint_filename ( epoch_info . global_epoch_idx ) ) hidden_state = epoch_info . state_dict ( ) self . checkpoint_strategy . write_state_dict ( hidden_state ) torch . save ( hidden_state , self . checkpoint_hidden_filename ( epoch_info . global_epoch_idx ) ) if epoch_info . global_epoch_idx > 1 and self . checkpoint_strategy . should_delete_previous_checkpoint ( epoch_info . global_epoch_idx ) : prev_epoch_idx = epoch_info . global_epoch_idx - 1 os . remove ( self . checkpoint_filename ( prev_epoch_idx ) ) os . remove ( self . checkpoint_hidden_filename ( prev_epoch_idx ) ) if self . checkpoint_strategy . should_store_best_checkpoint ( epoch_info . global_epoch_idx , epoch_info . result ) : best_checkpoint_idx = self . checkpoint_strategy . current_best_checkpoint_idx if best_checkpoint_idx is not None : os . remove ( self . checkpoint_best_filename ( best_checkpoint_idx ) ) torch . save ( model . state_dict ( ) , self . checkpoint_best_filename ( epoch_info . global_epoch_idx ) ) self . checkpoint_strategy . store_best_checkpoint_idx ( epoch_info . global_epoch_idx ) self . backend . store ( epoch_info . result )
When epoch is done we persist the training state
416
9
231,407
def _persisted_last_epoch ( self ) -> int : epoch_number = 0 self . _make_sure_dir_exists ( ) for x in os . listdir ( self . model_config . checkpoint_dir ( ) ) : match = re . match ( 'checkpoint_(\\d+)\\.data' , x ) if match : idx = int ( match [ 1 ] ) if idx > epoch_number : epoch_number = idx return epoch_number
Return number of last epoch already calculated
105
7
231,408
def _make_sure_dir_exists ( self ) : filename = self . model_config . checkpoint_dir ( ) pathlib . Path ( filename ) . mkdir ( parents = True , exist_ok = True )
Make sure directory exists
48
4
231,409
def clip_gradients ( batch_result , model , max_grad_norm ) : if max_grad_norm is not None : grad_norm = torch . nn . utils . clip_grad_norm_ ( filter ( lambda p : p . requires_grad , model . parameters ( ) ) , max_norm = max_grad_norm ) else : grad_norm = 0.0 batch_result [ 'grad_norm' ] = grad_norm
Clip gradients to a given maximum length
99
9
231,410
def sample_trajectories ( self , rollout_length , batch_info ) -> Trajectories : indexes = self . backend . sample_batch_trajectories ( rollout_length ) transition_tensors = self . backend . get_trajectories ( indexes , rollout_length ) return Trajectories ( num_steps = rollout_length , num_envs = self . backend . num_envs , environment_information = None , transition_tensors = { k : torch . from_numpy ( v ) for k , v in transition_tensors . items ( ) } , rollout_tensors = { } )
Sample batch of trajectories and return them
138
8
231,411
def conjugate_gradient_method ( matrix_vector_operator , loss_gradient , nsteps , rdotr_tol = 1e-10 ) : x = torch . zeros_like ( loss_gradient ) r = loss_gradient . clone ( ) p = loss_gradient . clone ( ) rdotr = torch . dot ( r , r ) for i in range ( nsteps ) : Avp = matrix_vector_operator ( p ) alpha = rdotr / torch . dot ( p , Avp ) x += alpha * p r -= alpha * Avp new_rdotr = torch . dot ( r , r ) betta = new_rdotr / rdotr p = r + betta * p rdotr = new_rdotr if rdotr < rdotr_tol : break return x
Conjugate gradient algorithm
182
5
231,412
def line_search ( self , model , rollout , original_policy_loss , original_policy_params , original_parameter_vec , full_step , expected_improvement_full ) : current_parameter_vec = original_parameter_vec . clone ( ) for idx in range ( self . line_search_iters ) : stepsize = 0.5 ** idx new_parameter_vec = current_parameter_vec + stepsize * full_step # Update model parameters v2p ( new_parameter_vec , model . policy_parameters ( ) ) # Calculate new loss with torch . no_grad ( ) : policy_params = model . policy ( rollout . batch_tensor ( 'observations' ) ) policy_entropy = torch . mean ( model . entropy ( policy_params ) ) kl_divergence = torch . mean ( model . kl_divergence ( original_policy_params , policy_params ) ) new_loss = self . calc_policy_loss ( model , policy_params , policy_entropy , rollout ) actual_improvement = original_policy_loss - new_loss expected_improvement = expected_improvement_full * stepsize ratio = actual_improvement / expected_improvement if kl_divergence . item ( ) > self . mak_kl * 1.5 : # KL divergence bound exceeded continue elif ratio < expected_improvement : # Not enough loss improvement continue else : # Optimization successful return True , ratio , actual_improvement , new_loss , kl_divergence # Optimization failed, revert to initial parameters v2p ( original_parameter_vec , model . policy_parameters ( ) ) return False , torch . tensor ( 0.0 ) , torch . tensor ( 0.0 ) , torch . tensor ( 0.0 ) , torch . tensor ( 0.0 )
Find the right stepsize to make sure policy improves
418
10
231,413
def fisher_vector_product ( self , vector , kl_divergence_gradient , model ) : assert not vector . requires_grad , "Vector must not propagate gradient" dot_product = vector @ kl_divergence_gradient # at least one dimension spans across two contiguous subspaces double_gradient = torch . autograd . grad ( dot_product , model . policy_parameters ( ) , retain_graph = True ) fvp = p2v ( x . contiguous ( ) for x in double_gradient ) return fvp + vector * self . cg_damping
Calculate product Hessian
127
6
231,414
def value_loss ( self , model , observations , discounted_rewards ) : value_outputs = model . value ( observations ) value_loss = 0.5 * F . mse_loss ( value_outputs , discounted_rewards ) return value_loss
Loss of value estimator
57
6
231,415
def calc_policy_loss ( self , model , policy_params , policy_entropy , rollout ) : actions = rollout . batch_tensor ( 'actions' ) advantages = rollout . batch_tensor ( 'advantages' ) fixed_logprobs = rollout . batch_tensor ( 'action:logprobs' ) model_logprobs = model . logprob ( actions , policy_params ) # Normalize advantages advantages = ( advantages - advantages . mean ( ) ) / ( advantages . std ( ) + 1e-8 ) # We put - in front because we want to maximize the surrogate objective policy_loss = - advantages * torch . exp ( model_logprobs - fixed_logprobs ) return policy_loss . mean ( ) - policy_entropy * self . entropy_coef
Policy gradient loss - calculate from probability distribution
174
8
231,416
def shuffled_batches ( self , batch_size ) : if batch_size >= self . size : yield self else : batch_splits = math_util . divide_ceiling ( self . size , batch_size ) indices = list ( range ( self . size ) ) np . random . shuffle ( indices ) for sub_indices in np . array_split ( indices , batch_splits ) : yield Transitions ( size = len ( sub_indices ) , environment_information = None , # Dont use it in batches for a moment, can be uncommented later if needed # environment_information=[info[sub_indices.tolist()] for info in self.environment_information] transition_tensors = { k : v [ sub_indices ] for k , v in self . transition_tensors . items ( ) } # extra_data does not go into batches )
Generate randomized batches of data
195
6
231,417
def to_transitions ( self ) -> 'Transitions' : # No need to propagate 'rollout_tensors' as they won't mean anything return Transitions ( size = self . num_steps * self . num_envs , environment_information = [ ei for l in self . environment_information for ei in l ] if self . environment_information is not None else None , transition_tensors = { name : tensor_util . merge_first_two_dims ( t ) for name , t in self . transition_tensors . items ( ) } , extra_data = self . extra_data )
Convert given rollout to Transitions
138
7
231,418
def shuffled_batches ( self , batch_size ) : if batch_size >= self . num_envs * self . num_steps : yield self else : rollouts_in_batch = batch_size // self . num_steps batch_splits = math_util . divide_ceiling ( self . num_envs , rollouts_in_batch ) indices = list ( range ( self . num_envs ) ) np . random . shuffle ( indices ) for sub_indices in np . array_split ( indices , batch_splits ) : yield Trajectories ( num_steps = self . num_steps , num_envs = len ( sub_indices ) , # Dont use it in batches for a moment, can be uncommented later if needed # environment_information=[x[sub_indices.tolist()] for x in self.environment_information], environment_information = None , transition_tensors = { k : x [ : , sub_indices ] for k , x in self . transition_tensors . items ( ) } , rollout_tensors = { k : x [ sub_indices ] for k , x in self . rollout_tensors . items ( ) } , # extra_data does not go into batches )
Generate randomized batches of data - only sample whole trajectories
280
12
231,419
def episode_information ( self ) : return [ info . get ( 'episode' ) for infolist in self . environment_information for info in infolist if 'episode' in info ]
List of information about finished episodes
41
6
231,420
def forward_state ( self , sequence , state = None ) : if state is None : state = self . zero_state ( sequence . size ( 0 ) ) data = self . input_block ( sequence ) state_outputs = [ ] # for layer_length, layer in zip(self.hidden_layers, self.recurrent_layers): for idx in range ( len ( self . recurrent_layers ) ) : layer_length = self . recurrent_layers [ idx ] . state_dim # Partition hidden state, for each layer we have layer_length of h state and layer_length of c state current_state = state [ : , : , : layer_length ] state = state [ : , : , layer_length : ] # Propagate through the GRU state data , new_h = self . recurrent_layers [ idx ] ( data , current_state ) if self . dropout_layers : data = self . dropout_layers [ idx ] ( data ) state_outputs . append ( new_h ) output_data = self . output_activation ( self . output_layer ( data ) ) concatenated_hidden_output = torch . cat ( state_outputs , dim = 2 ) return output_data , concatenated_hidden_output
Forward propagate a sequence through the network accounting for the state
282
11
231,421
def loss_value ( self , x_data , y_true , y_pred ) : y_pred = y_pred . view ( - 1 , y_pred . size ( 2 ) ) y_true = y_true . view ( - 1 ) . to ( torch . long ) return F . nll_loss ( y_pred , y_true )
Calculate a value of loss function
78
8
231,422
def initialize_training ( self , training_info : TrainingInfo , model_state = None , hidden_state = None ) : if model_state is None : self . model . reset_weights ( ) else : self . model . load_state_dict ( model_state )
Prepare for training
59
4
231,423
def run_epoch ( self , epoch_info : EpochInfo , source : 'vel.api.Source' ) : epoch_info . on_epoch_begin ( ) lr = epoch_info . optimizer . param_groups [ - 1 ] [ 'lr' ] print ( "|-------- Epoch {:06} Lr={:.6f} ----------|" . format ( epoch_info . global_epoch_idx , lr ) ) self . train_epoch ( epoch_info , source ) epoch_info . result_accumulator . freeze_results ( 'train' ) self . validation_epoch ( epoch_info , source ) epoch_info . result_accumulator . freeze_results ( 'val' ) epoch_info . on_epoch_end ( )
Run full epoch of learning
176
5
231,424
def train_epoch ( self , epoch_info , source : 'vel.api.Source' , interactive = True ) : self . train ( ) if interactive : iterator = tqdm . tqdm ( source . train_loader ( ) , desc = "Training" , unit = "iter" , file = sys . stdout ) else : iterator = source . train_loader ( ) for batch_idx , ( data , target ) in enumerate ( iterator ) : batch_info = BatchInfo ( epoch_info , batch_idx ) batch_info . on_batch_begin ( ) self . train_batch ( batch_info , data , target ) batch_info . on_batch_end ( ) iterator . set_postfix ( loss = epoch_info . result_accumulator . intermediate_value ( 'loss' ) )
Run a single training epoch
182
5
231,425
def validation_epoch ( self , epoch_info , source : 'vel.api.Source' ) : self . eval ( ) iterator = tqdm . tqdm ( source . val_loader ( ) , desc = "Validation" , unit = "iter" , file = sys . stdout ) with torch . no_grad ( ) : for batch_idx , ( data , target ) in enumerate ( iterator ) : batch_info = BatchInfo ( epoch_info , batch_idx ) batch_info . on_validation_batch_begin ( ) self . feed_batch ( batch_info , data , target ) batch_info . on_validation_batch_end ( )
Run a single evaluation epoch
152
5
231,426
def feed_batch ( self , batch_info , data , target ) : data , target = data . to ( self . device ) , target . to ( self . device ) output , loss = self . model . loss ( data , target ) # Store extra batch information for calculation of the statistics batch_info [ 'data' ] = data batch_info [ 'target' ] = target batch_info [ 'output' ] = output batch_info [ 'loss' ] = loss return loss
Run single batch of data
103
5
231,427
def train_batch ( self , batch_info , data , target ) : batch_info . optimizer . zero_grad ( ) loss = self . feed_batch ( batch_info , data , target ) loss . backward ( ) if self . max_grad_norm is not None : batch_info [ 'grad_norm' ] = torch . nn . utils . clip_grad_norm_ ( filter ( lambda p : p . requires_grad , self . model . parameters ( ) ) , max_norm = self . max_grad_norm ) batch_info . optimizer . step ( )
Train single batch of data
129
5
231,428
def process_environment_settings ( default_dictionary : dict , settings : typing . Optional [ dict ] = None , presets : typing . Optional [ dict ] = None ) : settings = settings if settings is not None else { } presets = presets if presets is not None else { } env_keys = sorted ( set ( default_dictionary . keys ( ) ) | set ( presets . keys ( ) ) ) result_dict = { } for key in env_keys : if key in default_dictionary : new_dict = default_dictionary [ key ] . copy ( ) else : new_dict = { } new_dict . update ( settings ) if key in presets : new_dict . update ( presets [ key ] ) result_dict [ key ] = new_dict return result_dict
Process a dictionary of env settings
168
6
231,429
def roll_out_and_store ( self , batch_info ) : self . model . train ( ) if self . env_roller . is_ready_for_sampling ( ) : rollout = self . env_roller . rollout ( batch_info , self . model , self . settings . rollout_steps ) . to_device ( self . device ) # Store some information about the rollout, no training phase batch_info [ 'frames' ] = rollout . frames ( ) batch_info [ 'episode_infos' ] = rollout . episode_information ( ) else : frames = 0 episode_infos = [ ] with tqdm . tqdm ( desc = "Populating memory" , total = self . env_roller . initial_memory_size_hint ( ) ) as pbar : while not self . env_roller . is_ready_for_sampling ( ) : rollout = self . env_roller . rollout ( batch_info , self . model , self . settings . rollout_steps ) . to_device ( self . device ) new_frames = rollout . frames ( ) frames += new_frames episode_infos . extend ( rollout . episode_information ( ) ) pbar . update ( new_frames ) # Store some information about the rollout, no training phase batch_info [ 'frames' ] = frames batch_info [ 'episode_infos' ] = episode_infos
Roll out environment and store result in the replay buffer
301
10
231,430
def train_on_replay_memory ( self , batch_info ) : self . model . train ( ) # Algo will aggregate data into this list: batch_info [ 'sub_batch_data' ] = [ ] for i in range ( self . settings . training_rounds ) : sampled_rollout = self . env_roller . sample ( batch_info , self . model , self . settings . training_steps ) batch_result = self . algo . optimizer_step ( batch_info = batch_info , device = self . device , model = self . model , rollout = sampled_rollout . to_device ( self . device ) ) self . env_roller . update ( rollout = sampled_rollout , batch_info = batch_result ) batch_info [ 'sub_batch_data' ] . append ( batch_result ) batch_info . aggregate_key ( 'sub_batch_data' )
Train agent on a memory gotten from replay buffer
201
9
231,431
def conv3x3 ( in_channels , out_channels , stride = 1 ) : return nn . Conv2d ( in_channels , out_channels , kernel_size = 3 , stride = stride , padding = 1 , bias = False )
3x3 convolution with padding . Original code has had bias turned off because Batch Norm would remove the bias either way
57
25
231,432
def load ( config_path , run_number = 0 , device = 'cuda:0' ) : model_config = ModelConfig . from_file ( config_path , run_number , device = device ) return model_config
Load a ModelConfig from filename
50
6
231,433
def restore ( self , hidden_state ) : for callback in self . callbacks : callback . load_state_dict ( self , hidden_state ) if 'optimizer' in hidden_state : self . optimizer_initial_state = hidden_state [ 'optimizer' ]
Restore any state from checkpoint - currently not implemented but possible to do so in the future
60
18
231,434
def result ( self ) : final_result = { 'epoch_idx' : self . global_epoch_idx } for key , value in self . frozen_results . items ( ) : final_result [ key ] = value return final_result
Return the epoch result
56
4
231,435
def state_dict ( self ) -> dict : hidden_state = { } if self . optimizer is not None : hidden_state [ 'optimizer' ] = self . optimizer . state_dict ( ) for callback in self . callbacks : callback . write_state_dict ( self . training_info , hidden_state ) return hidden_state
Calculate hidden state dictionary
75
6
231,436
def on_epoch_end ( self ) : self . freeze_epoch_result ( ) for callback in self . callbacks : callback . on_epoch_end ( self ) self . training_info . history . add ( self . result )
Finish epoch processing
54
3
231,437
def aggregate_key ( self , aggregate_key ) : aggregation = self . data_dict [ aggregate_key ] # List of dictionaries of numpy arrays/scalars # Aggregate sub batch data data_dict_keys = { y for x in aggregation for y in x . keys ( ) } for key in data_dict_keys : # Just average all the statistics from the loss function stacked = np . stack ( [ d [ key ] for d in aggregation ] , axis = 0 ) self . data_dict [ key ] = np . mean ( stacked , axis = 0 )
Aggregate values from key and put them into the top - level dictionary
123
14
231,438
def run ( self ) : device = self . model_config . torch_device ( ) # Reinforcer is the learner for the reinforcement learning model reinforcer = self . reinforcer . instantiate ( device ) optimizer = self . optimizer_factory . instantiate ( reinforcer . model ) # All callbacks used for learning callbacks = self . gather_callbacks ( optimizer ) # Metrics to track through this training metrics = reinforcer . metrics ( ) training_info = self . resume_training ( reinforcer , callbacks , metrics ) reinforcer . initialize_training ( training_info ) training_info . on_train_begin ( ) if training_info . optimizer_initial_state : optimizer . load_state_dict ( training_info . optimizer_initial_state ) global_epoch_idx = training_info . start_epoch_idx + 1 while training_info [ 'frames' ] < self . total_frames : epoch_info = EpochInfo ( training_info , global_epoch_idx = global_epoch_idx , batches_per_epoch = self . batches_per_epoch , optimizer = optimizer , ) reinforcer . train_epoch ( epoch_info ) if self . openai_logging : self . _openai_logging ( epoch_info . result ) self . storage . checkpoint ( epoch_info , reinforcer . model ) global_epoch_idx += 1 training_info . on_train_end ( ) return training_info
Run reinforcement learning algorithm
334
4
231,439
def resume_training ( self , reinforcer , callbacks , metrics ) -> TrainingInfo : if self . model_config . continue_training : start_epoch = self . storage . last_epoch_idx ( ) else : start_epoch = 0 training_info = TrainingInfo ( start_epoch_idx = start_epoch , run_name = self . model_config . run_name , metrics = metrics , callbacks = callbacks ) if start_epoch == 0 : self . storage . reset ( self . model_config . render_configuration ( ) ) training_info . initialize ( ) reinforcer . initialize_training ( training_info ) else : model_state , hidden_state = self . storage . load ( training_info ) reinforcer . initialize_training ( training_info , model_state , hidden_state ) return training_info
Possibly resume training from a saved state from the storage
188
11
231,440
def _openai_logging ( self , epoch_result ) : for key in sorted ( epoch_result . keys ( ) ) : if key == 'fps' : # Not super elegant, but I like nicer display of FPS openai_logger . record_tabular ( key , int ( epoch_result [ key ] ) ) else : openai_logger . record_tabular ( key , epoch_result [ key ] ) openai_logger . dump_tabular ( )
Use OpenAI logging facilities for the same type of logging
105
11
231,441
def module_broadcast ( m , broadcast_fn , * args , * * kwargs ) : apply_leaf ( m , lambda x : module_apply_broadcast ( x , broadcast_fn , args , kwargs ) )
Call given function in all submodules with given parameters
51
10
231,442
def _select_phase_left_bound ( self , epoch_number ) : idx = bisect . bisect_left ( self . ladder , epoch_number ) if idx >= len ( self . ladder ) : return len ( self . ladder ) - 1 elif self . ladder [ idx ] > epoch_number : return idx - 1 else : return idx
Return number of current phase . Return index of first phase not done after all up to epoch_number were done .
80
23
231,443
def wrapped_env_maker ( environment_id , seed , serial_id , disable_reward_clipping = False , disable_episodic_life = False , monitor = False , allow_early_resets = False , scale_float_frames = False , max_episode_frames = 10000 , frame_stack = None ) : env = env_maker ( environment_id ) env . seed ( seed + serial_id ) if max_episode_frames is not None : env = ClipEpisodeLengthWrapper ( env , max_episode_length = max_episode_frames ) # Monitoring the env if monitor : logdir = logger . get_dir ( ) and os . path . join ( logger . get_dir ( ) , str ( serial_id ) ) else : logdir = None env = Monitor ( env , logdir , allow_early_resets = allow_early_resets ) if not disable_episodic_life : # Make end-of-life == end-of-episode, but only reset on true game over. # Done by DeepMind for the DQN and co. since it helps value estimation. env = EpisodicLifeEnv ( env ) if 'FIRE' in env . unwrapped . get_action_meanings ( ) : # Take action on reset for environments that are fixed until firing. if disable_episodic_life : env = FireEpisodicLifeEnv ( env ) else : env = FireResetEnv ( env ) # Warp frames to 84x84 as done in the Nature paper and later work. env = WarpFrame ( env ) if scale_float_frames : env = ScaledFloatFrame ( env ) if not disable_reward_clipping : # Bin reward to {+1, 0, -1} by its sign. env = ClipRewardEnv ( env ) if frame_stack is not None : env = FrameStack ( env , frame_stack ) return env
Wrap atari environment so that it s nicer to learn RL algorithms
419
14
231,444
def instantiate ( self , seed = 0 , serial_id = 0 , preset = 'default' , extra_args = None ) -> gym . Env : settings = self . get_preset ( preset ) return wrapped_env_maker ( self . envname , seed , serial_id , * * settings )
Make a single environment compatible with the experiments
67
8
231,445
def visdom_send_metrics ( vis , metrics , update = 'replace' ) : visited = { } sorted_metrics = sorted ( metrics . columns , key = _column_original_name ) for metric_basename , metric_list in it . groupby ( sorted_metrics , key = _column_original_name ) : metric_list = list ( metric_list ) for metric in metric_list : if vis . win_exists ( metric_basename ) and ( not visited . get ( metric , False ) ) : update = update elif not vis . win_exists ( metric_basename ) : update = None else : update = 'append' vis . line ( metrics [ metric ] . values , metrics . index . values , win = metric_basename , name = metric , opts = { 'title' : metric_basename , 'showlegend' : True } , update = update ) if metric_basename != metric and len ( metric_list ) > 1 : if vis . win_exists ( metric ) : update = update else : update = None vis . line ( metrics [ metric ] . values , metrics . index . values , win = metric , name = metric , opts = { 'title' : metric , 'showlegend' : True } , update = update )
Send set of metrics to visdom
284
7
231,446
def restore ( self , training_info : TrainingInfo , local_batch_idx : int , model : Model , hidden_state : dict ) : pass
Restore learning from intermediate state .
33
7
231,447
def update_priority ( self , tree_idx_list , priority_list ) : for tree_idx , priority , segment_tree in zip ( tree_idx_list , priority_list , self . segment_trees ) : segment_tree . update ( tree_idx , priority )
Update priorities of the elements in the tree
65
8
231,448
def _sample_batch_prioritized ( self , segment_tree , batch_size , history , forward_steps = 1 ) : p_total = segment_tree . total ( ) segment = p_total / batch_size # Get batch of valid samples batch = [ self . _get_sample_from_segment ( segment_tree , segment , i , history , forward_steps ) for i in range ( batch_size ) ] probs , idxs , tree_idxs = zip ( * batch ) return np . array ( probs ) , np . array ( idxs ) , np . array ( tree_idxs )
Return indexes of the next sample in from prioritized distribution
136
11
231,449
def take_along_axis ( large_array , indexes ) : # Reshape indexes into the right shape if len ( large_array . shape ) > len ( indexes . shape ) : indexes = indexes . reshape ( indexes . shape + tuple ( [ 1 ] * ( len ( large_array . shape ) - len ( indexes . shape ) ) ) ) return np . take_along_axis ( large_array , indexes , axis = 0 )
Take along axis
95
3
231,450
def get_transition ( self , frame_idx , env_idx ) : past_frame , future_frame = self . get_frame_with_future ( frame_idx , env_idx ) data_dict = { 'observations' : past_frame , 'observations_next' : future_frame , 'actions' : self . action_buffer [ frame_idx , env_idx ] , 'rewards' : self . reward_buffer [ frame_idx , env_idx ] , 'dones' : self . dones_buffer [ frame_idx , env_idx ] , } for name in self . extra_data : data_dict [ name ] = self . extra_data [ name ] [ frame_idx , env_idx ] return data_dict
Single transition with given index
180
5
231,451
def get_transitions_forward_steps ( self , indexes , forward_steps , discount_factor ) : frame_batch_shape = ( [ indexes . shape [ 0 ] , indexes . shape [ 1 ] ] + list ( self . state_buffer . shape [ 2 : - 1 ] ) + [ self . state_buffer . shape [ - 1 ] * self . frame_history ] ) simple_batch_shape = [ indexes . shape [ 0 ] , indexes . shape [ 1 ] ] past_frame_buffer = np . zeros ( frame_batch_shape , dtype = self . state_buffer . dtype ) future_frame_buffer = np . zeros ( frame_batch_shape , dtype = self . state_buffer . dtype ) reward_buffer = np . zeros ( simple_batch_shape , dtype = np . float32 ) dones_buffer = np . zeros ( simple_batch_shape , dtype = bool ) for buffer_idx , frame_row in enumerate ( indexes ) : for env_idx , frame_idx in enumerate ( frame_row ) : past_frame , future_frame , reward , done = self . get_frame_with_future_forward_steps ( frame_idx , env_idx , forward_steps = forward_steps , discount_factor = discount_factor ) past_frame_buffer [ buffer_idx , env_idx ] = past_frame future_frame_buffer [ buffer_idx , env_idx ] = future_frame reward_buffer [ buffer_idx , env_idx ] = reward dones_buffer [ buffer_idx , env_idx ] = done actions = take_along_axis ( self . action_buffer , indexes ) transition_tensors = { 'observations' : past_frame_buffer , 'actions' : actions , 'rewards' : reward_buffer , 'observations_next' : future_frame_buffer , 'dones' : dones_buffer . astype ( np . float32 ) , } for name in self . extra_data : transition_tensors [ name ] = take_along_axis ( self . extra_data [ name ] , indexes ) return transition_tensors
Get dictionary of a transition data - where the target of a transition is n steps forward along the trajectory . Rewards are properly aggregated according to the discount factor and the process stops when trajectory is done .
491
40
231,452
def sample_batch_trajectories ( self , rollout_length ) : results = [ ] for i in range ( self . num_envs ) : results . append ( self . sample_rollout_single_env ( rollout_length ) ) return np . stack ( results , axis = - 1 )
Return indexes of next random rollout
65
6
231,453
def sample_frame_single_env ( self , batch_size , forward_steps = 1 ) : # Whole idea of this function is to make sure that sample we take is far away from the point which we are # currently writing to the buffer, which is 'discontinuous' if self . current_size < self . buffer_capacity : # Sample from up to total size of the buffer # -1 because we cannot take the last one return np . random . choice ( self . current_size - forward_steps , batch_size , replace = False ) else : candidate = np . random . choice ( self . buffer_capacity , batch_size , replace = False ) forbidden_ones = ( np . arange ( self . current_idx - forward_steps + 1 , self . current_idx + self . frame_history ) % self . buffer_capacity ) # Exclude these frames for learning as they may have some part of history overwritten while any ( x in candidate for x in forbidden_ones ) : candidate = np . random . choice ( self . buffer_capacity , batch_size , replace = False ) return candidate
Return an in index of a random set of frames from a buffer that have enough history and future
240
19
231,454
def record_take ( self , model , env_instance , device , take_number ) : frames = [ ] observation = env_instance . reset ( ) if model . is_recurrent : hidden_state = model . zero_state ( 1 ) . to ( device ) frames . append ( env_instance . render ( 'rgb_array' ) ) print ( "Evaluating environment..." ) while True : observation_array = np . expand_dims ( np . array ( observation ) , axis = 0 ) observation_tensor = torch . from_numpy ( observation_array ) . to ( device ) if model . is_recurrent : output = model . step ( observation_tensor , hidden_state , * * self . sample_args ) hidden_state = output [ 'state' ] actions = output [ 'actions' ] else : actions = model . step ( observation_tensor , * * self . sample_args ) [ 'actions' ] actions = actions . detach ( ) . cpu ( ) . numpy ( ) observation , reward , done , epinfo = env_instance . step ( actions [ 0 ] ) frames . append ( env_instance . render ( 'rgb_array' ) ) if 'episode' in epinfo : # End of an episode break takename = self . model_config . output_dir ( 'videos' , self . model_config . run_name , self . videoname . format ( take_number ) ) pathlib . Path ( os . path . dirname ( takename ) ) . mkdir ( parents = True , exist_ok = True ) fourcc = cv2 . VideoWriter_fourcc ( 'M' , 'J' , 'P' , 'G' ) video = cv2 . VideoWriter ( takename , fourcc , self . fps , ( frames [ 0 ] . shape [ 1 ] , frames [ 0 ] . shape [ 0 ] ) ) for i in tqdm . trange ( len ( frames ) , file = sys . stdout ) : video . write ( cv2 . cvtColor ( frames [ i ] , cv2 . COLOR_RGB2BGR ) ) video . release ( ) print ( "Written {}" . format ( takename ) )
Record a single movie and store it on hard drive
485
10
231,455
def reset_training_state ( self , dones , batch_info ) : for idx , done in enumerate ( dones ) : if done > 0.5 : self . processes [ idx ] . reset ( )
A hook for a model to react when during training episode is finished
48
13
231,456
def forward ( self , actions , batch_info ) : while len ( self . processes ) < actions . shape [ 0 ] : len_action_space = self . action_space . shape [ - 1 ] self . processes . append ( OrnsteinUhlenbeckNoiseProcess ( np . zeros ( len_action_space ) , float ( self . std_dev ) * np . ones ( len_action_space ) ) ) noise = torch . from_numpy ( np . stack ( [ x ( ) for x in self . processes ] ) ) . float ( ) . to ( actions . device ) return torch . min ( torch . max ( actions + noise , self . low_tensor ) , self . high_tensor )
Return model step after applying noise
157
6
231,457
def interpolate_logscale ( start , end , steps ) : if start <= 0.0 : warnings . warn ( "Start of logscale interpolation must be positive!" ) start = 1e-5 return np . logspace ( np . log10 ( float ( start ) ) , np . log10 ( float ( end ) ) , steps )
Interpolate series between start and end in given number of steps - logscale interpolation
73
18
231,458
def interpolate_series ( start , end , steps , how = 'linear' ) : return INTERP_DICT [ how ] ( start , end , steps )
Interpolate series between start and end in given number of steps
35
13
231,459
def interpolate_single ( start , end , coefficient , how = 'linear' ) : return INTERP_SINGLE_DICT [ how ] ( start , end , coefficient )
Interpolate single value between start and end in given number of steps
39
14
231,460
def run ( self , * args ) : if self . source is None : self . model . summary ( ) else : x_data , y_data = next ( iter ( self . source . train_loader ( ) ) ) self . model . summary ( input_size = x_data . shape [ 1 : ] )
Print model summary
68
3
231,461
def initialize_training ( self , training_info : TrainingInfo , model_state = None , hidden_state = None ) : if model_state is not None : self . model . load_state_dict ( model_state ) else : self . model . reset_weights ( ) self . algo . initialize ( training_info = training_info , model = self . model , environment = self . env_roller . environment , device = self . device )
Prepare models for training
97
5
231,462
def convolutional_layer_series ( initial_size , layer_sequence ) : size = initial_size for filter_size , padding , stride in layer_sequence : size = convolution_size_equation ( size , filter_size , padding , stride ) return size
Execute a series of convolutional layer transformations to the size number
58
14
231,463
def train ( self , mode = True ) : super ( ) . train ( mode ) if mode : mu . apply_leaf ( self , mu . set_train_mode ) return self
r Sets the module in training mode .
39
8
231,464
def summary ( self , input_size = None , hashsummary = False ) : if input_size is None : print ( self ) print ( "-" * 120 ) number = sum ( p . numel ( ) for p in self . model . parameters ( ) ) print ( "Number of model parameters: {:,}" . format ( number ) ) print ( "-" * 120 ) else : summary ( self , input_size ) if hashsummary : for idx , hashvalue in enumerate ( self . hashsummary ( ) ) : print ( f"{idx}: {hashvalue}" )
Print a model summary
131
4
231,465
def hashsummary ( self ) : children = list ( self . children ( ) ) result = [ ] for child in children : result . extend ( hashlib . sha256 ( x . detach ( ) . cpu ( ) . numpy ( ) . tobytes ( ) ) . hexdigest ( ) for x in child . parameters ( ) ) return result
Print a model summary - checksums of each layer parameters
76
11
231,466
def zero_state ( self , batch_size ) : return torch . zeros ( batch_size , self . state_dim , dtype = torch . float32 )
Initial state of the network
36
5
231,467
def loss ( self , x_data , y_true ) : y_pred = self ( x_data ) return y_pred , self . loss_value ( x_data , y_true , y_pred )
Forward propagate network and return a value of loss function
47
10
231,468
def metrics ( self ) : from vel . metrics . loss_metric import Loss from vel . metrics . accuracy import Accuracy return [ Loss ( ) , Accuracy ( ) ]
Set of metrics for this model
35
6
231,469
def one_hot_encoding ( input_tensor , num_labels ) : xview = input_tensor . view ( - 1 , 1 ) . to ( torch . long ) onehot = torch . zeros ( xview . size ( 0 ) , num_labels , device = input_tensor . device , dtype = torch . float ) onehot . scatter_ ( 1 , xview , 1 ) return onehot . view ( list ( input_tensor . shape ) + [ - 1 ] )
One - hot encode labels from input
112
7
231,470
def merge_first_two_dims ( tensor ) : shape = tensor . shape batch_size = shape [ 0 ] * shape [ 1 ] new_shape = tuple ( [ batch_size ] + list ( shape [ 2 : ] ) ) return tensor . view ( new_shape )
Reshape tensor to merge first two dimensions
64
10
231,471
def instantiate ( self , parallel_envs , seed = 0 , preset = 'default' ) -> VecEnv : envs = DummyVecEnv ( [ self . _creation_function ( i , seed , preset ) for i in range ( parallel_envs ) ] ) if self . frame_history is not None : envs = VecFrameStack ( envs , self . frame_history ) return envs
Create vectorized environments
91
4
231,472
def instantiate_single ( self , seed = 0 , preset = 'default' ) : env = self . env . instantiate ( seed = seed , serial_id = 0 , preset = preset ) if self . frame_history is not None : env = FrameStack ( env , self . frame_history ) return env
Create a new Env instance - single
67
8
231,473
def _creation_function ( self , idx , seed , preset ) : return lambda : self . env . instantiate ( seed = seed , serial_id = idx , preset = preset )
Helper function to create a proper closure around supplied values
41
10
231,474
def policy ( self , observations ) : input_data = self . input_block ( observations ) policy_base_output = self . policy_backbone ( input_data ) policy_params = self . action_head ( policy_base_output ) return policy_params
Calculate only action head for given state
57
9
231,475
def _init_cycle_dict ( self ) : dict_arr = np . zeros ( self . epochs , dtype = int ) length_arr = np . zeros ( self . epochs , dtype = int ) start_arr = np . zeros ( self . epochs , dtype = int ) c_len = self . cycle_len idx = 0 for i in range ( self . cycles ) : current_start = idx for j in range ( c_len ) : dict_arr [ idx ] = i length_arr [ idx ] = c_len start_arr [ idx ] = current_start idx += 1 c_len *= self . cycle_mult return dict_arr , length_arr , start_arr
Populate a cycle dict
164
5
231,476
def on_batch_begin ( self , batch_info : BatchInfo ) : cycle_length = self . cycle_lengths [ batch_info . local_epoch_number - 1 ] cycle_start = self . cycle_starts [ batch_info . local_epoch_number - 1 ] numerator = ( batch_info . local_epoch_number - cycle_start - 1 ) * batch_info . batches_per_epoch + batch_info . batch_number denominator = cycle_length * batch_info . batches_per_epoch interpolation_number = numerator / denominator if cycle_start == 0 and numerator < self . init_iter : lr = self . init_lr else : if isinstance ( self . max_lr , list ) : lr = [ interp . interpolate_single ( max_lr , min_lr , interpolation_number , how = self . interpolate ) for max_lr , min_lr in zip ( self . max_lr , self . min_lr ) ] else : lr = interp . interpolate_single ( self . max_lr , self . min_lr , interpolation_number , how = self . interpolate ) self . set_lr ( lr )
Set proper learning rate
274
4
231,477
def set_lr ( self , lr ) : if isinstance ( lr , list ) : for group_lr , param_group in zip ( lr , self . optimizer . param_groups ) : param_group [ 'lr' ] = group_lr else : for param_group in self . optimizer . param_groups : param_group [ 'lr' ] = lr
Set a learning rate for the optimizer
84
8
231,478
def parameter_constructor ( cls , loader , node ) : value = loader . construct_scalar ( node ) if isinstance ( value , str ) : if '=' in value : ( varname , varvalue ) = Parser . parse_equality ( value ) return cls ( varname , varvalue ) else : return cls ( value ) else : return cls ( value )
Construct variable instance from yaml node
84
7
231,479
def register ( cls ) : if not cls . IS_LOADED : cls . IS_LOADED = True yaml . add_constructor ( '!param' , Parameter . parameter_constructor , Loader = yaml . SafeLoader ) yaml . add_constructor ( '!env' , EnvironmentVariable . parameter_constructor , Loader = yaml . SafeLoader )
Register variable handling in YAML
86
7
231,480
def parse_equality ( cls , equality_string ) : cls . register ( ) assert '=' in equality_string , "There must be an '=' sign in the equality" [ left_side , right_side ] = equality_string . split ( '=' , 1 ) left_side_value = yaml . safe_load ( left_side . strip ( ) ) right_side_value = yaml . safe_load ( right_side . strip ( ) ) assert isinstance ( left_side_value , str ) , "Left side of equality must be a string" return left_side_value , right_side_value
Parse some simple equality statements
137
6
231,481
def clean ( self , initial_epoch ) : self . db . metrics . delete_many ( { 'run_name' : self . model_config . run_name , 'epoch_idx' : { '$gt' : initial_epoch } } )
Remove entries from database that would get overwritten
59
9
231,482
def store_config ( self , configuration ) : run_name = self . model_config . run_name self . db . configs . delete_many ( { 'run_name' : self . model_config . run_name } ) configuration = configuration . copy ( ) configuration [ 'run_name' ] = run_name self . db . configs . insert_one ( configuration )
Store model parameters in the database
84
6
231,483
def get_frame ( self ) : metric_items = list ( self . db . metrics . find ( { 'run_name' : self . model_config . run_name } ) . sort ( 'epoch_idx' ) ) if len ( metric_items ) == 0 : return pd . DataFrame ( columns = [ 'run_name' ] ) else : return pd . DataFrame ( metric_items ) . drop ( [ '_id' , 'model_name' ] , axis = 1 ) . set_index ( 'epoch_idx' )
Get a dataframe of metrics from this storage
125
9
231,484
def _get_transitions ( self , probs , indexes , tree_idxs , batch_info , forward_steps = 1 , discount_factor = 1.0 ) : if forward_steps > 1 : transition_arrays = self . backend . get_transitions_forward_steps ( indexes , forward_steps , discount_factor ) else : transition_arrays = self . backend . get_transitions ( indexes ) priority_weight = self . priority_weight . value ( batch_info [ 'progress' ] ) # Normalize by sum of all probs probs = probs / np . array ( [ s . total ( ) for s in self . backend . segment_trees ] , dtype = float ) . reshape ( 1 , - 1 ) capacity = self . backend . current_size weights = ( capacity * probs ) ** ( - priority_weight ) weights = weights / weights . max ( axis = 0 , keepdims = True ) transition_arrays [ 'weights' ] = weights transition_tensors = { k : torch . from_numpy ( v ) for k , v in transition_arrays . items ( ) } transitions = Trajectories ( num_steps = indexes . shape [ 0 ] , num_envs = indexes . shape [ 1 ] , environment_information = None , transition_tensors = transition_tensors , rollout_tensors = { } , extra_data = { 'tree_idxs' : tree_idxs } ) return transitions . to_transitions ( )
Return batch of frames for given indexes
332
7
231,485
def rollout ( self , batch_info : BatchInfo , model : Model , number_of_steps : int ) -> Rollout : raise NotImplementedError
Roll - out the environment and return it
35
8
231,486
def train_epoch ( self , epoch_info : EpochInfo , interactive = True ) : epoch_info . on_epoch_begin ( ) if interactive : iterator = tqdm . trange ( epoch_info . batches_per_epoch , file = sys . stdout , desc = "Training" , unit = "batch" ) else : iterator = range ( epoch_info . batches_per_epoch ) for batch_idx in iterator : batch_info = BatchInfo ( epoch_info , batch_idx ) batch_info . on_batch_begin ( ) self . train_batch ( batch_info ) batch_info . on_batch_end ( ) epoch_info . result_accumulator . freeze_results ( ) epoch_info . on_epoch_end ( )
Train model on an epoch of a fixed number of batch updates
176
12
231,487
def train_batch ( self , batch_info : BatchInfo ) : batch_info [ 'sub_batch_data' ] = [ ] self . on_policy_train_batch ( batch_info ) if self . settings . experience_replay > 0 and self . env_roller . is_ready_for_sampling ( ) : if self . settings . stochastic_experience_replay : experience_replay_count = np . random . poisson ( self . settings . experience_replay ) else : experience_replay_count = self . settings . experience_replay for i in range ( experience_replay_count ) : self . off_policy_train_batch ( batch_info ) # Even with all the experience replay, we count the single rollout as a single batch batch_info . aggregate_key ( 'sub_batch_data' )
Single most atomic step of learning this reinforcer can perform
189
11
231,488
def on_policy_train_batch ( self , batch_info : BatchInfo ) : self . model . train ( ) rollout = self . env_roller . rollout ( batch_info , self . model , self . settings . number_of_steps ) . to_device ( self . device ) batch_result = self . algo . optimizer_step ( batch_info = batch_info , device = self . device , model = self . model , rollout = rollout ) batch_info [ 'sub_batch_data' ] . append ( batch_result ) batch_info [ 'frames' ] = rollout . frames ( ) batch_info [ 'episode_infos' ] = rollout . episode_information ( )
Perform an on - policy training step of evaluating an env and a single backpropagation step
154
20
231,489
def off_policy_train_batch ( self , batch_info : BatchInfo ) : self . model . train ( ) rollout = self . env_roller . sample ( batch_info , self . model , self . settings . number_of_steps ) . to_device ( self . device ) batch_result = self . algo . optimizer_step ( batch_info = batch_info , device = self . device , model = self . model , rollout = rollout ) batch_info [ 'sub_batch_data' ] . append ( batch_result )
Perform an off - policy training step of sampling the replay buffer and gradient descent
121
16
231,490
def should_store_best_checkpoint ( self , epoch_idx , metrics ) -> bool : if not self . store_best : return False metric = metrics [ self . metric ] if better ( self . _current_best_metric_value , metric , self . metric_mode ) : self . _current_best_metric_value = metric return True return False
Should we store current checkpoint as the best
81
8
231,491
def create ( model_config , batch_size , vectors = None ) : path = model_config . data_dir ( 'imdb' ) text_field = data . Field ( lower = True , tokenize = 'spacy' , batch_first = True ) label_field = data . LabelField ( is_target = True ) train_source , test_source = IMDBCached . splits ( root = path , text_field = text_field , label_field = label_field ) text_field . build_vocab ( train_source , max_size = 25_000 , vectors = vectors ) label_field . build_vocab ( train_source ) train_iterator , test_iterator = data . BucketIterator . splits ( ( train_source , test_source ) , batch_size = batch_size , device = model_config . torch_device ( ) , shuffle = True ) return TextData ( train_source , test_source , train_iterator , test_iterator , text_field , label_field )
Create an IMDB dataset
222
5
231,492
def run ( self ) : dataset = self . source . train_dataset ( ) num_samples = len ( dataset ) fig , ax = plt . subplots ( self . cases , self . samples + 1 ) selected_sample = np . sort ( np . random . choice ( num_samples , self . cases , replace = False ) ) for i in range ( self . cases ) : raw_image , _ = dataset . get_raw ( selected_sample [ i ] ) ax [ i , 0 ] . imshow ( raw_image ) ax [ i , 0 ] . set_title ( "Original image" ) for j in range ( self . samples ) : augmented_image , _ = dataset [ selected_sample [ i ] ] augmented_image = dataset . denormalize ( augmented_image ) ax [ i , j + 1 ] . imshow ( augmented_image ) plt . show ( )
Run the visualization
197
3
231,493
def env_maker ( environment_id , seed , serial_id , monitor = False , allow_early_resets = False ) : env = gym . make ( environment_id ) env . seed ( seed + serial_id ) # Monitoring the env if monitor : logdir = logger . get_dir ( ) and os . path . join ( logger . get_dir ( ) , str ( serial_id ) ) else : logdir = None env = Monitor ( env , logdir , allow_early_resets = allow_early_resets ) return env
Create a classic control environment with basic set of wrappers
119
11
231,494
def freeze ( self , number = None ) : if number is None : number = self . head_layers for idx , child in enumerate ( self . model . children ( ) ) : if idx < number : mu . freeze_layer ( child )
Freeze given number of layers in the model
55
9
231,495
def unfreeze ( self ) : for idx , child in enumerate ( self . model . children ( ) ) : mu . unfreeze_layer ( child )
Unfreeze model layers
36
5
231,496
def update_average_model ( self , model ) : for model_param , average_param in zip ( model . parameters ( ) , self . average_model . parameters ( ) ) : # EWMA average model update average_param . data . mul_ ( self . average_model_alpha ) . add_ ( model_param . data * ( 1 - self . average_model_alpha ) )
Update weights of the average model with new model observation
85
10
231,497
def retrace ( self , rewards , dones , q_values , state_values , rho , final_values ) : rho_bar = torch . min ( torch . ones_like ( rho ) * self . retrace_rho_cap , rho ) q_retraced_buffer = torch . zeros_like ( rewards ) next_value = final_values for i in reversed ( range ( rewards . size ( 0 ) ) ) : q_retraced = rewards [ i ] + self . discount_factor * next_value * ( 1.0 - dones [ i ] ) # Next iteration next_value = rho_bar [ i ] * ( q_retraced - q_values [ i ] ) + state_values [ i ] q_retraced_buffer [ i ] = q_retraced return q_retraced_buffer
Calculate Q retraced targets
191
8
231,498
def logprob ( self , action_sample , pd_params ) : means = pd_params [ : , : , 0 ] log_std = pd_params [ : , : , 1 ] std = torch . exp ( log_std ) z_score = ( action_sample - means ) / std return - ( 0.5 * ( ( z_score ** 2 + self . LOG2PI ) . sum ( dim = - 1 ) ) + log_std . sum ( dim = - 1 ) )
Log - likelihood
111
3
231,499
def logprob ( self , actions , action_logits ) : neg_log_prob = F . nll_loss ( action_logits , actions , reduction = 'none' ) return - neg_log_prob
Logarithm of probability of given sample
50
9