idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
43,500
def train_batch ( self , batch_info , data , target ) : batch_info . optimizer . zero_grad ( ) loss = self . feed_batch ( batch_info , data , target ) loss . backward ( ) if self . max_grad_norm is not None : batch_info [ 'grad_norm' ] = torch . nn . utils . clip_grad_norm_ ( filter ( lambda p : p . requires_grad , self . model . parameters ( ) ) , max_norm = self . max_grad_norm ) batch_info . optimizer . step ( )
Train single batch of data
43,501
def process_environment_settings ( default_dictionary : dict , settings : typing . Optional [ dict ] = None , presets : typing . Optional [ dict ] = None ) : settings = settings if settings is not None else { } presets = presets if presets is not None else { } env_keys = sorted ( set ( default_dictionary . keys ( ) ) | set ( presets . keys ( ) ) ) result_dict = { } for key in env_keys : if key in default_dictionary : new_dict = default_dictionary [ key ] . copy ( ) else : new_dict = { } new_dict . update ( settings ) if key in presets : new_dict . update ( presets [ key ] ) result_dict [ key ] = new_dict return result_dict
Process a dictionary of env settings
43,502
def roll_out_and_store ( self , batch_info ) : self . model . train ( ) if self . env_roller . is_ready_for_sampling ( ) : rollout = self . env_roller . rollout ( batch_info , self . model , self . settings . rollout_steps ) . to_device ( self . device ) batch_info [ 'frames' ] = rollout . frames ( ) batch_info [ 'episode_infos' ] = rollout . episode_information ( ) else : frames = 0 episode_infos = [ ] with tqdm . tqdm ( desc = "Populating memory" , total = self . env_roller . initial_memory_size_hint ( ) ) as pbar : while not self . env_roller . is_ready_for_sampling ( ) : rollout = self . env_roller . rollout ( batch_info , self . model , self . settings . rollout_steps ) . to_device ( self . device ) new_frames = rollout . frames ( ) frames += new_frames episode_infos . extend ( rollout . episode_information ( ) ) pbar . update ( new_frames ) batch_info [ 'frames' ] = frames batch_info [ 'episode_infos' ] = episode_infos
Roll out environment and store result in the replay buffer
43,503
def train_on_replay_memory ( self , batch_info ) : self . model . train ( ) batch_info [ 'sub_batch_data' ] = [ ] for i in range ( self . settings . training_rounds ) : sampled_rollout = self . env_roller . sample ( batch_info , self . model , self . settings . training_steps ) batch_result = self . algo . optimizer_step ( batch_info = batch_info , device = self . device , model = self . model , rollout = sampled_rollout . to_device ( self . device ) ) self . env_roller . update ( rollout = sampled_rollout , batch_info = batch_result ) batch_info [ 'sub_batch_data' ] . append ( batch_result ) batch_info . aggregate_key ( 'sub_batch_data' )
Train agent on a memory gotten from replay buffer
43,504
def conv3x3 ( in_channels , out_channels , stride = 1 ) : return nn . Conv2d ( in_channels , out_channels , kernel_size = 3 , stride = stride , padding = 1 , bias = False )
3x3 convolution with padding . Original code has had bias turned off because Batch Norm would remove the bias either way
43,505
def load ( config_path , run_number = 0 , device = 'cuda:0' ) : model_config = ModelConfig . from_file ( config_path , run_number , device = device ) return model_config
Load a ModelConfig from filename
43,506
def restore ( self , hidden_state ) : for callback in self . callbacks : callback . load_state_dict ( self , hidden_state ) if 'optimizer' in hidden_state : self . optimizer_initial_state = hidden_state [ 'optimizer' ]
Restore any state from checkpoint - currently not implemented but possible to do so in the future
43,507
def result ( self ) : final_result = { 'epoch_idx' : self . global_epoch_idx } for key , value in self . frozen_results . items ( ) : final_result [ key ] = value return final_result
Return the epoch result
43,508
def state_dict ( self ) -> dict : hidden_state = { } if self . optimizer is not None : hidden_state [ 'optimizer' ] = self . optimizer . state_dict ( ) for callback in self . callbacks : callback . write_state_dict ( self . training_info , hidden_state ) return hidden_state
Calculate hidden state dictionary
43,509
def on_epoch_end ( self ) : self . freeze_epoch_result ( ) for callback in self . callbacks : callback . on_epoch_end ( self ) self . training_info . history . add ( self . result )
Finish epoch processing
43,510
def aggregate_key ( self , aggregate_key ) : aggregation = self . data_dict [ aggregate_key ] data_dict_keys = { y for x in aggregation for y in x . keys ( ) } for key in data_dict_keys : stacked = np . stack ( [ d [ key ] for d in aggregation ] , axis = 0 ) self . data_dict [ key ] = np . mean ( stacked , axis = 0 )
Aggregate values from key and put them into the top - level dictionary
43,511
def run ( self ) : device = self . model_config . torch_device ( ) reinforcer = self . reinforcer . instantiate ( device ) optimizer = self . optimizer_factory . instantiate ( reinforcer . model ) callbacks = self . gather_callbacks ( optimizer ) metrics = reinforcer . metrics ( ) training_info = self . resume_training ( reinforcer , callbacks , metrics ) reinforcer . initialize_training ( training_info ) training_info . on_train_begin ( ) if training_info . optimizer_initial_state : optimizer . load_state_dict ( training_info . optimizer_initial_state ) global_epoch_idx = training_info . start_epoch_idx + 1 while training_info [ 'frames' ] < self . total_frames : epoch_info = EpochInfo ( training_info , global_epoch_idx = global_epoch_idx , batches_per_epoch = self . batches_per_epoch , optimizer = optimizer , ) reinforcer . train_epoch ( epoch_info ) if self . openai_logging : self . _openai_logging ( epoch_info . result ) self . storage . checkpoint ( epoch_info , reinforcer . model ) global_epoch_idx += 1 training_info . on_train_end ( ) return training_info
Run reinforcement learning algorithm
43,512
def resume_training ( self , reinforcer , callbacks , metrics ) -> TrainingInfo : if self . model_config . continue_training : start_epoch = self . storage . last_epoch_idx ( ) else : start_epoch = 0 training_info = TrainingInfo ( start_epoch_idx = start_epoch , run_name = self . model_config . run_name , metrics = metrics , callbacks = callbacks ) if start_epoch == 0 : self . storage . reset ( self . model_config . render_configuration ( ) ) training_info . initialize ( ) reinforcer . initialize_training ( training_info ) else : model_state , hidden_state = self . storage . load ( training_info ) reinforcer . initialize_training ( training_info , model_state , hidden_state ) return training_info
Possibly resume training from a saved state from the storage
43,513
def _openai_logging ( self , epoch_result ) : for key in sorted ( epoch_result . keys ( ) ) : if key == 'fps' : openai_logger . record_tabular ( key , int ( epoch_result [ key ] ) ) else : openai_logger . record_tabular ( key , epoch_result [ key ] ) openai_logger . dump_tabular ( )
Use OpenAI logging facilities for the same type of logging
43,514
def module_broadcast ( m , broadcast_fn , * args , ** kwargs ) : apply_leaf ( m , lambda x : module_apply_broadcast ( x , broadcast_fn , args , kwargs ) )
Call given function in all submodules with given parameters
43,515
def _select_phase_left_bound ( self , epoch_number ) : idx = bisect . bisect_left ( self . ladder , epoch_number ) if idx >= len ( self . ladder ) : return len ( self . ladder ) - 1 elif self . ladder [ idx ] > epoch_number : return idx - 1 else : return idx
Return number of current phase . Return index of first phase not done after all up to epoch_number were done .
43,516
def wrapped_env_maker ( environment_id , seed , serial_id , disable_reward_clipping = False , disable_episodic_life = False , monitor = False , allow_early_resets = False , scale_float_frames = False , max_episode_frames = 10000 , frame_stack = None ) : env = env_maker ( environment_id ) env . seed ( seed + serial_id ) if max_episode_frames is not None : env = ClipEpisodeLengthWrapper ( env , max_episode_length = max_episode_frames ) if monitor : logdir = logger . get_dir ( ) and os . path . join ( logger . get_dir ( ) , str ( serial_id ) ) else : logdir = None env = Monitor ( env , logdir , allow_early_resets = allow_early_resets ) if not disable_episodic_life : env = EpisodicLifeEnv ( env ) if 'FIRE' in env . unwrapped . get_action_meanings ( ) : if disable_episodic_life : env = FireEpisodicLifeEnv ( env ) else : env = FireResetEnv ( env ) env = WarpFrame ( env ) if scale_float_frames : env = ScaledFloatFrame ( env ) if not disable_reward_clipping : env = ClipRewardEnv ( env ) if frame_stack is not None : env = FrameStack ( env , frame_stack ) return env
Wrap atari environment so that it s nicer to learn RL algorithms
43,517
def instantiate ( self , seed = 0 , serial_id = 0 , preset = 'default' , extra_args = None ) -> gym . Env : settings = self . get_preset ( preset ) return wrapped_env_maker ( self . envname , seed , serial_id , ** settings )
Make a single environment compatible with the experiments
43,518
def visdom_send_metrics ( vis , metrics , update = 'replace' ) : visited = { } sorted_metrics = sorted ( metrics . columns , key = _column_original_name ) for metric_basename , metric_list in it . groupby ( sorted_metrics , key = _column_original_name ) : metric_list = list ( metric_list ) for metric in metric_list : if vis . win_exists ( metric_basename ) and ( not visited . get ( metric , False ) ) : update = update elif not vis . win_exists ( metric_basename ) : update = None else : update = 'append' vis . line ( metrics [ metric ] . values , metrics . index . values , win = metric_basename , name = metric , opts = { 'title' : metric_basename , 'showlegend' : True } , update = update ) if metric_basename != metric and len ( metric_list ) > 1 : if vis . win_exists ( metric ) : update = update else : update = None vis . line ( metrics [ metric ] . values , metrics . index . values , win = metric , name = metric , opts = { 'title' : metric , 'showlegend' : True } , update = update )
Send set of metrics to visdom
43,519
def restore ( self , training_info : TrainingInfo , local_batch_idx : int , model : Model , hidden_state : dict ) : pass
Restore learning from intermediate state .
43,520
def update_priority ( self , tree_idx_list , priority_list ) : for tree_idx , priority , segment_tree in zip ( tree_idx_list , priority_list , self . segment_trees ) : segment_tree . update ( tree_idx , priority )
Update priorities of the elements in the tree
43,521
def _sample_batch_prioritized ( self , segment_tree , batch_size , history , forward_steps = 1 ) : p_total = segment_tree . total ( ) segment = p_total / batch_size batch = [ self . _get_sample_from_segment ( segment_tree , segment , i , history , forward_steps ) for i in range ( batch_size ) ] probs , idxs , tree_idxs = zip ( * batch ) return np . array ( probs ) , np . array ( idxs ) , np . array ( tree_idxs )
Return indexes of the next sample in from prioritized distribution
43,522
def take_along_axis ( large_array , indexes ) : if len ( large_array . shape ) > len ( indexes . shape ) : indexes = indexes . reshape ( indexes . shape + tuple ( [ 1 ] * ( len ( large_array . shape ) - len ( indexes . shape ) ) ) ) return np . take_along_axis ( large_array , indexes , axis = 0 )
Take along axis
43,523
def get_transition ( self , frame_idx , env_idx ) : past_frame , future_frame = self . get_frame_with_future ( frame_idx , env_idx ) data_dict = { 'observations' : past_frame , 'observations_next' : future_frame , 'actions' : self . action_buffer [ frame_idx , env_idx ] , 'rewards' : self . reward_buffer [ frame_idx , env_idx ] , 'dones' : self . dones_buffer [ frame_idx , env_idx ] , } for name in self . extra_data : data_dict [ name ] = self . extra_data [ name ] [ frame_idx , env_idx ] return data_dict
Single transition with given index
43,524
def get_transitions_forward_steps ( self , indexes , forward_steps , discount_factor ) : frame_batch_shape = ( [ indexes . shape [ 0 ] , indexes . shape [ 1 ] ] + list ( self . state_buffer . shape [ 2 : - 1 ] ) + [ self . state_buffer . shape [ - 1 ] * self . frame_history ] ) simple_batch_shape = [ indexes . shape [ 0 ] , indexes . shape [ 1 ] ] past_frame_buffer = np . zeros ( frame_batch_shape , dtype = self . state_buffer . dtype ) future_frame_buffer = np . zeros ( frame_batch_shape , dtype = self . state_buffer . dtype ) reward_buffer = np . zeros ( simple_batch_shape , dtype = np . float32 ) dones_buffer = np . zeros ( simple_batch_shape , dtype = bool ) for buffer_idx , frame_row in enumerate ( indexes ) : for env_idx , frame_idx in enumerate ( frame_row ) : past_frame , future_frame , reward , done = self . get_frame_with_future_forward_steps ( frame_idx , env_idx , forward_steps = forward_steps , discount_factor = discount_factor ) past_frame_buffer [ buffer_idx , env_idx ] = past_frame future_frame_buffer [ buffer_idx , env_idx ] = future_frame reward_buffer [ buffer_idx , env_idx ] = reward dones_buffer [ buffer_idx , env_idx ] = done actions = take_along_axis ( self . action_buffer , indexes ) transition_tensors = { 'observations' : past_frame_buffer , 'actions' : actions , 'rewards' : reward_buffer , 'observations_next' : future_frame_buffer , 'dones' : dones_buffer . astype ( np . float32 ) , } for name in self . extra_data : transition_tensors [ name ] = take_along_axis ( self . extra_data [ name ] , indexes ) return transition_tensors
Get dictionary of a transition data - where the target of a transition is n steps forward along the trajectory . Rewards are properly aggregated according to the discount factor and the process stops when trajectory is done .
43,525
def sample_batch_trajectories ( self , rollout_length ) : results = [ ] for i in range ( self . num_envs ) : results . append ( self . sample_rollout_single_env ( rollout_length ) ) return np . stack ( results , axis = - 1 )
Return indexes of next random rollout
43,526
def sample_frame_single_env ( self , batch_size , forward_steps = 1 ) : if self . current_size < self . buffer_capacity : return np . random . choice ( self . current_size - forward_steps , batch_size , replace = False ) else : candidate = np . random . choice ( self . buffer_capacity , batch_size , replace = False ) forbidden_ones = ( np . arange ( self . current_idx - forward_steps + 1 , self . current_idx + self . frame_history ) % self . buffer_capacity ) while any ( x in candidate for x in forbidden_ones ) : candidate = np . random . choice ( self . buffer_capacity , batch_size , replace = False ) return candidate
Return an in index of a random set of frames from a buffer that have enough history and future
43,527
def record_take ( self , model , env_instance , device , take_number ) : frames = [ ] observation = env_instance . reset ( ) if model . is_recurrent : hidden_state = model . zero_state ( 1 ) . to ( device ) frames . append ( env_instance . render ( 'rgb_array' ) ) print ( "Evaluating environment..." ) while True : observation_array = np . expand_dims ( np . array ( observation ) , axis = 0 ) observation_tensor = torch . from_numpy ( observation_array ) . to ( device ) if model . is_recurrent : output = model . step ( observation_tensor , hidden_state , ** self . sample_args ) hidden_state = output [ 'state' ] actions = output [ 'actions' ] else : actions = model . step ( observation_tensor , ** self . sample_args ) [ 'actions' ] actions = actions . detach ( ) . cpu ( ) . numpy ( ) observation , reward , done , epinfo = env_instance . step ( actions [ 0 ] ) frames . append ( env_instance . render ( 'rgb_array' ) ) if 'episode' in epinfo : break takename = self . model_config . output_dir ( 'videos' , self . model_config . run_name , self . videoname . format ( take_number ) ) pathlib . Path ( os . path . dirname ( takename ) ) . mkdir ( parents = True , exist_ok = True ) fourcc = cv2 . VideoWriter_fourcc ( 'M' , 'J' , 'P' , 'G' ) video = cv2 . VideoWriter ( takename , fourcc , self . fps , ( frames [ 0 ] . shape [ 1 ] , frames [ 0 ] . shape [ 0 ] ) ) for i in tqdm . trange ( len ( frames ) , file = sys . stdout ) : video . write ( cv2 . cvtColor ( frames [ i ] , cv2 . COLOR_RGB2BGR ) ) video . release ( ) print ( "Written {}" . format ( takename ) )
Record a single movie and store it on hard drive
43,528
def reset_training_state ( self , dones , batch_info ) : for idx , done in enumerate ( dones ) : if done > 0.5 : self . processes [ idx ] . reset ( )
A hook for a model to react when during training episode is finished
43,529
def forward ( self , actions , batch_info ) : while len ( self . processes ) < actions . shape [ 0 ] : len_action_space = self . action_space . shape [ - 1 ] self . processes . append ( OrnsteinUhlenbeckNoiseProcess ( np . zeros ( len_action_space ) , float ( self . std_dev ) * np . ones ( len_action_space ) ) ) noise = torch . from_numpy ( np . stack ( [ x ( ) for x in self . processes ] ) ) . float ( ) . to ( actions . device ) return torch . min ( torch . max ( actions + noise , self . low_tensor ) , self . high_tensor )
Return model step after applying noise
43,530
def interpolate_logscale ( start , end , steps ) : if start <= 0.0 : warnings . warn ( "Start of logscale interpolation must be positive!" ) start = 1e-5 return np . logspace ( np . log10 ( float ( start ) ) , np . log10 ( float ( end ) ) , steps )
Interpolate series between start and end in given number of steps - logscale interpolation
43,531
def interpolate_series ( start , end , steps , how = 'linear' ) : return INTERP_DICT [ how ] ( start , end , steps )
Interpolate series between start and end in given number of steps
43,532
def interpolate_single ( start , end , coefficient , how = 'linear' ) : return INTERP_SINGLE_DICT [ how ] ( start , end , coefficient )
Interpolate single value between start and end in given number of steps
43,533
def run ( self , * args ) : if self . source is None : self . model . summary ( ) else : x_data , y_data = next ( iter ( self . source . train_loader ( ) ) ) self . model . summary ( input_size = x_data . shape [ 1 : ] )
Print model summary
43,534
def initialize_training ( self , training_info : TrainingInfo , model_state = None , hidden_state = None ) : if model_state is not None : self . model . load_state_dict ( model_state ) else : self . model . reset_weights ( ) self . algo . initialize ( training_info = training_info , model = self . model , environment = self . env_roller . environment , device = self . device )
Prepare models for training
43,535
def convolutional_layer_series ( initial_size , layer_sequence ) : size = initial_size for filter_size , padding , stride in layer_sequence : size = convolution_size_equation ( size , filter_size , padding , stride ) return size
Execute a series of convolutional layer transformations to the size number
43,536
def train ( self , mode = True ) : r super ( ) . train ( mode ) if mode : mu . apply_leaf ( self , mu . set_train_mode ) return self
r Sets the module in training mode .
43,537
def summary ( self , input_size = None , hashsummary = False ) : if input_size is None : print ( self ) print ( "-" * 120 ) number = sum ( p . numel ( ) for p in self . model . parameters ( ) ) print ( "Number of model parameters: {:,}" . format ( number ) ) print ( "-" * 120 ) else : summary ( self , input_size ) if hashsummary : for idx , hashvalue in enumerate ( self . hashsummary ( ) ) : print ( f"{idx}: {hashvalue}" )
Print a model summary
43,538
def hashsummary ( self ) : children = list ( self . children ( ) ) result = [ ] for child in children : result . extend ( hashlib . sha256 ( x . detach ( ) . cpu ( ) . numpy ( ) . tobytes ( ) ) . hexdigest ( ) for x in child . parameters ( ) ) return result
Print a model summary - checksums of each layer parameters
43,539
def zero_state ( self , batch_size ) : return torch . zeros ( batch_size , self . state_dim , dtype = torch . float32 )
Initial state of the network
43,540
def loss ( self , x_data , y_true ) : y_pred = self ( x_data ) return y_pred , self . loss_value ( x_data , y_true , y_pred )
Forward propagate network and return a value of loss function
43,541
def metrics ( self ) : from vel . metrics . loss_metric import Loss from vel . metrics . accuracy import Accuracy return [ Loss ( ) , Accuracy ( ) ]
Set of metrics for this model
43,542
def one_hot_encoding ( input_tensor , num_labels ) : xview = input_tensor . view ( - 1 , 1 ) . to ( torch . long ) onehot = torch . zeros ( xview . size ( 0 ) , num_labels , device = input_tensor . device , dtype = torch . float ) onehot . scatter_ ( 1 , xview , 1 ) return onehot . view ( list ( input_tensor . shape ) + [ - 1 ] )
One - hot encode labels from input
43,543
def merge_first_two_dims ( tensor ) : shape = tensor . shape batch_size = shape [ 0 ] * shape [ 1 ] new_shape = tuple ( [ batch_size ] + list ( shape [ 2 : ] ) ) return tensor . view ( new_shape )
Reshape tensor to merge first two dimensions
43,544
def instantiate ( self , parallel_envs , seed = 0 , preset = 'default' ) -> VecEnv : envs = DummyVecEnv ( [ self . _creation_function ( i , seed , preset ) for i in range ( parallel_envs ) ] ) if self . frame_history is not None : envs = VecFrameStack ( envs , self . frame_history ) return envs
Create vectorized environments
43,545
def instantiate_single ( self , seed = 0 , preset = 'default' ) : env = self . env . instantiate ( seed = seed , serial_id = 0 , preset = preset ) if self . frame_history is not None : env = FrameStack ( env , self . frame_history ) return env
Create a new Env instance - single
43,546
def _creation_function ( self , idx , seed , preset ) : return lambda : self . env . instantiate ( seed = seed , serial_id = idx , preset = preset )
Helper function to create a proper closure around supplied values
43,547
def policy ( self , observations ) : input_data = self . input_block ( observations ) policy_base_output = self . policy_backbone ( input_data ) policy_params = self . action_head ( policy_base_output ) return policy_params
Calculate only action head for given state
43,548
def _init_cycle_dict ( self ) : dict_arr = np . zeros ( self . epochs , dtype = int ) length_arr = np . zeros ( self . epochs , dtype = int ) start_arr = np . zeros ( self . epochs , dtype = int ) c_len = self . cycle_len idx = 0 for i in range ( self . cycles ) : current_start = idx for j in range ( c_len ) : dict_arr [ idx ] = i length_arr [ idx ] = c_len start_arr [ idx ] = current_start idx += 1 c_len *= self . cycle_mult return dict_arr , length_arr , start_arr
Populate a cycle dict
43,549
def on_batch_begin ( self , batch_info : BatchInfo ) : cycle_length = self . cycle_lengths [ batch_info . local_epoch_number - 1 ] cycle_start = self . cycle_starts [ batch_info . local_epoch_number - 1 ] numerator = ( batch_info . local_epoch_number - cycle_start - 1 ) * batch_info . batches_per_epoch + batch_info . batch_number denominator = cycle_length * batch_info . batches_per_epoch interpolation_number = numerator / denominator if cycle_start == 0 and numerator < self . init_iter : lr = self . init_lr else : if isinstance ( self . max_lr , list ) : lr = [ interp . interpolate_single ( max_lr , min_lr , interpolation_number , how = self . interpolate ) for max_lr , min_lr in zip ( self . max_lr , self . min_lr ) ] else : lr = interp . interpolate_single ( self . max_lr , self . min_lr , interpolation_number , how = self . interpolate ) self . set_lr ( lr )
Set proper learning rate
43,550
def set_lr ( self , lr ) : if isinstance ( lr , list ) : for group_lr , param_group in zip ( lr , self . optimizer . param_groups ) : param_group [ 'lr' ] = group_lr else : for param_group in self . optimizer . param_groups : param_group [ 'lr' ] = lr
Set a learning rate for the optimizer
43,551
def parameter_constructor ( cls , loader , node ) : value = loader . construct_scalar ( node ) if isinstance ( value , str ) : if '=' in value : ( varname , varvalue ) = Parser . parse_equality ( value ) return cls ( varname , varvalue ) else : return cls ( value ) else : return cls ( value )
Construct variable instance from yaml node
43,552
def register ( cls ) : if not cls . IS_LOADED : cls . IS_LOADED = True yaml . add_constructor ( '!param' , Parameter . parameter_constructor , Loader = yaml . SafeLoader ) yaml . add_constructor ( '!env' , EnvironmentVariable . parameter_constructor , Loader = yaml . SafeLoader )
Register variable handling in YAML
43,553
def parse_equality ( cls , equality_string ) : cls . register ( ) assert '=' in equality_string , "There must be an '=' sign in the equality" [ left_side , right_side ] = equality_string . split ( '=' , 1 ) left_side_value = yaml . safe_load ( left_side . strip ( ) ) right_side_value = yaml . safe_load ( right_side . strip ( ) ) assert isinstance ( left_side_value , str ) , "Left side of equality must be a string" return left_side_value , right_side_value
Parse some simple equality statements
43,554
def clean ( self , initial_epoch ) : self . db . metrics . delete_many ( { 'run_name' : self . model_config . run_name , 'epoch_idx' : { '$gt' : initial_epoch } } )
Remove entries from database that would get overwritten
43,555
def store_config ( self , configuration ) : run_name = self . model_config . run_name self . db . configs . delete_many ( { 'run_name' : self . model_config . run_name } ) configuration = configuration . copy ( ) configuration [ 'run_name' ] = run_name self . db . configs . insert_one ( configuration )
Store model parameters in the database
43,556
def get_frame ( self ) : metric_items = list ( self . db . metrics . find ( { 'run_name' : self . model_config . run_name } ) . sort ( 'epoch_idx' ) ) if len ( metric_items ) == 0 : return pd . DataFrame ( columns = [ 'run_name' ] ) else : return pd . DataFrame ( metric_items ) . drop ( [ '_id' , 'model_name' ] , axis = 1 ) . set_index ( 'epoch_idx' )
Get a dataframe of metrics from this storage
43,557
def _get_transitions ( self , probs , indexes , tree_idxs , batch_info , forward_steps = 1 , discount_factor = 1.0 ) : if forward_steps > 1 : transition_arrays = self . backend . get_transitions_forward_steps ( indexes , forward_steps , discount_factor ) else : transition_arrays = self . backend . get_transitions ( indexes ) priority_weight = self . priority_weight . value ( batch_info [ 'progress' ] ) probs = probs / np . array ( [ s . total ( ) for s in self . backend . segment_trees ] , dtype = float ) . reshape ( 1 , - 1 ) capacity = self . backend . current_size weights = ( capacity * probs ) ** ( - priority_weight ) weights = weights / weights . max ( axis = 0 , keepdims = True ) transition_arrays [ 'weights' ] = weights transition_tensors = { k : torch . from_numpy ( v ) for k , v in transition_arrays . items ( ) } transitions = Trajectories ( num_steps = indexes . shape [ 0 ] , num_envs = indexes . shape [ 1 ] , environment_information = None , transition_tensors = transition_tensors , rollout_tensors = { } , extra_data = { 'tree_idxs' : tree_idxs } ) return transitions . to_transitions ( )
Return batch of frames for given indexes
43,558
def rollout ( self , batch_info : BatchInfo , model : Model , number_of_steps : int ) -> Rollout : raise NotImplementedError
Roll - out the environment and return it
43,559
def train_epoch ( self , epoch_info : EpochInfo , interactive = True ) : epoch_info . on_epoch_begin ( ) if interactive : iterator = tqdm . trange ( epoch_info . batches_per_epoch , file = sys . stdout , desc = "Training" , unit = "batch" ) else : iterator = range ( epoch_info . batches_per_epoch ) for batch_idx in iterator : batch_info = BatchInfo ( epoch_info , batch_idx ) batch_info . on_batch_begin ( ) self . train_batch ( batch_info ) batch_info . on_batch_end ( ) epoch_info . result_accumulator . freeze_results ( ) epoch_info . on_epoch_end ( )
Train model on an epoch of a fixed number of batch updates
43,560
def train_batch ( self , batch_info : BatchInfo ) : batch_info [ 'sub_batch_data' ] = [ ] self . on_policy_train_batch ( batch_info ) if self . settings . experience_replay > 0 and self . env_roller . is_ready_for_sampling ( ) : if self . settings . stochastic_experience_replay : experience_replay_count = np . random . poisson ( self . settings . experience_replay ) else : experience_replay_count = self . settings . experience_replay for i in range ( experience_replay_count ) : self . off_policy_train_batch ( batch_info ) batch_info . aggregate_key ( 'sub_batch_data' )
Single most atomic step of learning this reinforcer can perform
43,561
def on_policy_train_batch ( self , batch_info : BatchInfo ) : self . model . train ( ) rollout = self . env_roller . rollout ( batch_info , self . model , self . settings . number_of_steps ) . to_device ( self . device ) batch_result = self . algo . optimizer_step ( batch_info = batch_info , device = self . device , model = self . model , rollout = rollout ) batch_info [ 'sub_batch_data' ] . append ( batch_result ) batch_info [ 'frames' ] = rollout . frames ( ) batch_info [ 'episode_infos' ] = rollout . episode_information ( )
Perform an on - policy training step of evaluating an env and a single backpropagation step
43,562
def off_policy_train_batch ( self , batch_info : BatchInfo ) : self . model . train ( ) rollout = self . env_roller . sample ( batch_info , self . model , self . settings . number_of_steps ) . to_device ( self . device ) batch_result = self . algo . optimizer_step ( batch_info = batch_info , device = self . device , model = self . model , rollout = rollout ) batch_info [ 'sub_batch_data' ] . append ( batch_result )
Perform an off - policy training step of sampling the replay buffer and gradient descent
43,563
def should_store_best_checkpoint ( self , epoch_idx , metrics ) -> bool : if not self . store_best : return False metric = metrics [ self . metric ] if better ( self . _current_best_metric_value , metric , self . metric_mode ) : self . _current_best_metric_value = metric return True return False
Should we store current checkpoint as the best
43,564
def create ( model_config , batch_size , vectors = None ) : path = model_config . data_dir ( 'imdb' ) text_field = data . Field ( lower = True , tokenize = 'spacy' , batch_first = True ) label_field = data . LabelField ( is_target = True ) train_source , test_source = IMDBCached . splits ( root = path , text_field = text_field , label_field = label_field ) text_field . build_vocab ( train_source , max_size = 25_000 , vectors = vectors ) label_field . build_vocab ( train_source ) train_iterator , test_iterator = data . BucketIterator . splits ( ( train_source , test_source ) , batch_size = batch_size , device = model_config . torch_device ( ) , shuffle = True ) return TextData ( train_source , test_source , train_iterator , test_iterator , text_field , label_field )
Create an IMDB dataset
43,565
def run ( self ) : dataset = self . source . train_dataset ( ) num_samples = len ( dataset ) fig , ax = plt . subplots ( self . cases , self . samples + 1 ) selected_sample = np . sort ( np . random . choice ( num_samples , self . cases , replace = False ) ) for i in range ( self . cases ) : raw_image , _ = dataset . get_raw ( selected_sample [ i ] ) ax [ i , 0 ] . imshow ( raw_image ) ax [ i , 0 ] . set_title ( "Original image" ) for j in range ( self . samples ) : augmented_image , _ = dataset [ selected_sample [ i ] ] augmented_image = dataset . denormalize ( augmented_image ) ax [ i , j + 1 ] . imshow ( augmented_image ) plt . show ( )
Run the visualization
43,566
def env_maker ( environment_id , seed , serial_id , monitor = False , allow_early_resets = False ) : env = gym . make ( environment_id ) env . seed ( seed + serial_id ) if monitor : logdir = logger . get_dir ( ) and os . path . join ( logger . get_dir ( ) , str ( serial_id ) ) else : logdir = None env = Monitor ( env , logdir , allow_early_resets = allow_early_resets ) return env
Create a classic control environment with basic set of wrappers
43,567
def freeze ( self , number = None ) : if number is None : number = self . head_layers for idx , child in enumerate ( self . model . children ( ) ) : if idx < number : mu . freeze_layer ( child )
Freeze given number of layers in the model
43,568
def unfreeze ( self ) : for idx , child in enumerate ( self . model . children ( ) ) : mu . unfreeze_layer ( child )
Unfreeze model layers
43,569
def update_average_model ( self , model ) : for model_param , average_param in zip ( model . parameters ( ) , self . average_model . parameters ( ) ) : average_param . data . mul_ ( self . average_model_alpha ) . add_ ( model_param . data * ( 1 - self . average_model_alpha ) )
Update weights of the average model with new model observation
43,570
def retrace ( self , rewards , dones , q_values , state_values , rho , final_values ) : rho_bar = torch . min ( torch . ones_like ( rho ) * self . retrace_rho_cap , rho ) q_retraced_buffer = torch . zeros_like ( rewards ) next_value = final_values for i in reversed ( range ( rewards . size ( 0 ) ) ) : q_retraced = rewards [ i ] + self . discount_factor * next_value * ( 1.0 - dones [ i ] ) next_value = rho_bar [ i ] * ( q_retraced - q_values [ i ] ) + state_values [ i ] q_retraced_buffer [ i ] = q_retraced return q_retraced_buffer
Calculate Q retraced targets
43,571
def logprob ( self , action_sample , pd_params ) : means = pd_params [ : , : , 0 ] log_std = pd_params [ : , : , 1 ] std = torch . exp ( log_std ) z_score = ( action_sample - means ) / std return - ( 0.5 * ( ( z_score ** 2 + self . LOG2PI ) . sum ( dim = - 1 ) ) + log_std . sum ( dim = - 1 ) )
Log - likelihood
43,572
def logprob ( self , actions , action_logits ) : neg_log_prob = F . nll_loss ( action_logits , actions , reduction = 'none' ) return - neg_log_prob
Logarithm of probability of given sample
43,573
def _value_function ( self , x_input , y_true , y_pred ) : if len ( y_true . shape ) == 1 : return y_pred . argmax ( 1 ) . eq ( y_true ) . double ( ) . mean ( ) . item ( ) else : raise NotImplementedError
Return classification accuracy of input
43,574
def on_epoch_end ( self , epoch_info ) : metrics_df = pd . DataFrame ( [ epoch_info . result ] ) . set_index ( 'epoch_idx' ) visdom_append_metrics ( self . vis , metrics_df , first_epoch = epoch_info . global_epoch_idx == 1 )
Update data in visdom on push
43,575
def on_batch_end ( self , batch_info ) : if self . settings . stream_lr : iteration_idx = ( float ( batch_info . epoch_number ) + float ( batch_info . batch_number ) / batch_info . batches_per_epoch ) lr = batch_info . optimizer . param_groups [ - 1 ] [ 'lr' ] metrics_df = pd . DataFrame ( [ lr ] , index = [ iteration_idx ] , columns = [ 'lr' ] ) visdom_append_metrics ( self . vis , metrics_df , first_epoch = ( batch_info . epoch_number == 1 ) and ( batch_info . batch_number == 0 ) )
Stream LR to visdom
43,576
def main ( ) : parser = argparse . ArgumentParser ( description = 'Paperboy deep learning launcher' ) parser . add_argument ( 'config' , metavar = 'FILENAME' , help = 'Configuration file for the run' ) parser . add_argument ( 'command' , metavar = 'COMMAND' , help = 'A command to run' ) parser . add_argument ( 'varargs' , nargs = '*' , metavar = 'VARARGS' , help = 'Extra options to the command' ) parser . add_argument ( '-r' , '--run_number' , type = int , default = 0 , help = "A run number" ) parser . add_argument ( '-d' , '--device' , default = 'cuda' , help = "A device to run the model on" ) parser . add_argument ( '-s' , '--seed' , type = int , default = None , help = "Random seed for the project" ) parser . add_argument ( '-p' , '--param' , type = str , metavar = 'NAME=VALUE' , action = 'append' , default = [ ] , help = "Configuration parameters" ) parser . add_argument ( '--continue' , action = 'store_true' , default = False , help = "Continue previously started learning process" ) parser . add_argument ( '--profile' , type = str , default = None , help = "Profiler output" ) args = parser . parse_args ( ) model_config = ModelConfig . from_file ( args . config , args . run_number , continue_training = getattr ( args , 'continue' ) , device = args . device , seed = args . seed , params = { k : v for ( k , v ) in ( Parser . parse_equality ( eq ) for eq in args . param ) } ) if model_config . project_dir not in sys . path : sys . path . append ( model_config . project_dir ) multiprocessing_setting = model_config . provide_with_default ( 'multiprocessing' , default = None ) if multiprocessing_setting : multiprocessing . set_start_method ( multiprocessing_setting ) from vel . util . random import set_seed set_seed ( model_config . seed ) model_config . banner ( args . command ) if args . profile : print ( "[PROFILER] Running Vel in profiling mode, output filename={}" . format ( args . profile ) ) import cProfile import pstats profiler = cProfile . Profile ( ) profiler . enable ( ) model_config . run_command ( args . command , args . varargs ) profiler . disable ( ) profiler . dump_stats ( args . profile ) profiler . print_stats ( sort = 'tottime' ) print ( "======================================================================" ) pstats . Stats ( profiler ) . strip_dirs ( ) . sort_stats ( 'tottime' ) . print_stats ( 30 ) print ( "======================================================================" ) pstats . Stats ( profiler ) . strip_dirs ( ) . sort_stats ( 'cumtime' ) . print_stats ( 30 ) else : model_config . run_command ( args . command , args . varargs ) model_config . quit_banner ( )
Paperboy entry point - parse the arguments and run a command
43,577
def set_seed ( seed : int ) : random . seed ( seed ) np . random . seed ( seed ) torch . random . manual_seed ( seed )
Set random seed for python numpy and pytorch RNGs
43,578
def better ( old_value , new_value , mode ) : if ( old_value is None or np . isnan ( old_value ) ) and ( new_value is not None and not np . isnan ( new_value ) ) : return True if mode == 'min' : return new_value < old_value elif mode == 'max' : return new_value > old_value else : raise RuntimeError ( f"Mode '{mode}' value is not supported" )
Check if new value is better than the old value
43,579
def reset_weights ( self ) : init . uniform_ ( self . linear . weight , - 3e-3 , 3e-3 ) init . zeros_ ( self . linear . bias )
Initialize weights to sane defaults
43,580
def discount_bootstrap ( rewards_buffer , dones_buffer , final_values , discount_factor , number_of_steps ) : true_value_buffer = torch . zeros_like ( rewards_buffer ) current_value = final_values for i in reversed ( range ( number_of_steps ) ) : current_value = rewards_buffer [ i ] + discount_factor * current_value * ( 1.0 - dones_buffer [ i ] ) true_value_buffer [ i ] = current_value return true_value_buffer
Calculate state values bootstrapping off the following state values
43,581
def find_project_directory ( start_path ) -> str : start_path = os . path . realpath ( start_path ) possible_name = os . path . join ( start_path , ModelConfig . PROJECT_FILE_NAME ) if os . path . exists ( possible_name ) : return start_path else : up_path = os . path . realpath ( os . path . join ( start_path , '..' ) ) if os . path . realpath ( start_path ) == up_path : raise RuntimeError ( f"Couldn't find project file starting from {start_path}" ) else : return ModelConfig . find_project_directory ( up_path )
Locate top - level project directory
43,582
def from_file ( cls , filename : str , run_number : int , continue_training : bool = False , seed : int = None , device : str = 'cuda' , params = None ) : with open ( filename , 'r' ) as fp : model_config_contents = Parser . parse ( fp ) project_config_path = ModelConfig . find_project_directory ( os . path . dirname ( os . path . abspath ( filename ) ) ) with open ( os . path . join ( project_config_path , cls . PROJECT_FILE_NAME ) , 'r' ) as fp : project_config_contents = Parser . parse ( fp ) aggregate_dictionary = { ** project_config_contents , ** model_config_contents } return ModelConfig ( filename = filename , configuration = aggregate_dictionary , run_number = run_number , project_dir = project_config_path , continue_training = continue_training , seed = seed , device = device , parameters = params )
Create model config from file
43,583
def from_memory ( cls , model_data : dict , run_number : int , project_dir : str , continue_training = False , seed : int = None , device : str = 'cuda' , params = None ) : return ModelConfig ( filename = "[memory]" , configuration = model_data , run_number = run_number , project_dir = project_dir , continue_training = continue_training , seed = seed , device = device , parameters = params )
Create model config from supplied data
43,584
def run_command ( self , command_name , varargs ) : command_descriptor = self . get_command ( command_name ) return command_descriptor . run ( * varargs )
Instantiate model class
43,585
def project_data_dir ( self , * args ) -> str : return os . path . normpath ( os . path . join ( self . project_dir , 'data' , * args ) )
Directory where to store data
43,586
def output_dir ( self , * args ) -> str : return os . path . join ( self . project_dir , 'output' , * args )
Directory where to store output
43,587
def project_top_dir ( self , * args ) -> str : return os . path . join ( self . project_dir , * args )
Project top - level directory
43,588
def provide_with_default ( self , name , default = None ) : return self . provider . instantiate_by_name_with_default ( name , default_value = default )
Return a dependency - injected instance
43,589
def benchmark_method ( f ) : "decorator to turn f into a factory of benchmarks" @ wraps ( f ) def inner ( name , * args , ** kwargs ) : return Benchmark ( name , f , args , kwargs ) return inner
decorator to turn f into a factory of benchmarks
43,590
def bench ( participants = participants , benchmarks = benchmarks , bench_time = BENCH_TIME ) : mcs = [ p . factory ( ) for p in participants ] means = [ [ ] for p in participants ] stddevs = [ [ ] for p in participants ] last_fn = None for benchmark_name , fn , args , kwargs in benchmarks : logger . info ( '' ) logger . info ( '%s' , benchmark_name ) for i , ( participant , mc ) in enumerate ( zip ( participants , mcs ) ) : if 'get' in fn . __name__ : last_fn ( mc , * args , ** kwargs ) sw = Stopwatch ( ) while sw . total ( ) < bench_time : with sw . timing ( ) : fn ( mc , * args , ** kwargs ) means [ i ] . append ( sw . mean ( ) ) stddevs [ i ] . append ( sw . stddev ( ) ) logger . info ( u'%76s: %s' , participant . name , sw ) last_fn = fn return means , stddevs
Do you even lift?
43,591
def strip_datetime ( value ) : if isinstance ( value , basestring ) : try : return parse_datetime ( value ) except ValueError : return elif isinstance ( value , integer_types ) : try : return datetime . datetime . utcfromtimestamp ( value / 1e3 ) except ( ValueError , OverflowError , OSError ) : return
Converts value to datetime if string or int .
43,592
def set_session_token ( self , session_token ) : self . session_token = session_token self . _login_time = datetime . datetime . now ( )
Sets session token and new login time .
43,593
def get_password ( self ) : if self . password is None : if os . environ . get ( self . username + 'password' ) : self . password = os . environ . get ( self . username + 'password' ) else : raise PasswordError ( self . username )
If password is not provided will look in environment variables for username + password .
43,594
def get_app_key ( self ) : if self . app_key is None : if os . environ . get ( self . username ) : self . app_key = os . environ . get ( self . username ) else : raise AppKeyError ( self . username )
If app_key is not provided will look in environment variables for username .
43,595
def session_expired ( self ) : if not self . _login_time or ( datetime . datetime . now ( ) - self . _login_time ) . total_seconds ( ) > 12000 : return True
Returns True if login_time not set or seconds since login time is greater than 200 mins .
43,596
def check_status_code ( response , codes = None ) : codes = codes or [ 200 ] if response . status_code not in codes : raise StatusCodeError ( response . status_code )
Checks response . status_code is in codes .
43,597
def list_runner_book ( self , market_id , selection_id , handicap = None , price_projection = None , order_projection = None , match_projection = None , include_overall_position = None , partition_matched_by_strategy_ref = None , customer_strategy_refs = None , currency_code = None , matched_since = None , bet_ids = None , locale = None , session = None , lightweight = None ) : params = clean_locals ( locals ( ) ) method = '%s%s' % ( self . URI , 'listRunnerBook' ) ( response , elapsed_time ) = self . request ( method , params , session ) return self . process_response ( response , resources . MarketBook , elapsed_time , lightweight )
Returns a list of dynamic data about a market and a specified runner . Dynamic data includes prices the status of the market the status of selections the traded volume and the status of any orders you have placed in the market
43,598
def list_current_orders ( self , bet_ids = None , market_ids = None , order_projection = None , customer_order_refs = None , customer_strategy_refs = None , date_range = time_range ( ) , order_by = None , sort_dir = None , from_record = None , record_count = None , session = None , lightweight = None ) : params = clean_locals ( locals ( ) ) method = '%s%s' % ( self . URI , 'listCurrentOrders' ) ( response , elapsed_time ) = self . request ( method , params , session ) return self . process_response ( response , resources . CurrentOrders , elapsed_time , lightweight )
Returns a list of your current orders .
43,599
def list_cleared_orders ( self , bet_status = 'SETTLED' , event_type_ids = None , event_ids = None , market_ids = None , runner_ids = None , bet_ids = None , customer_order_refs = None , customer_strategy_refs = None , side = None , settled_date_range = time_range ( ) , group_by = None , include_item_description = None , locale = None , from_record = None , record_count = None , session = None , lightweight = None ) : params = clean_locals ( locals ( ) ) method = '%s%s' % ( self . URI , 'listClearedOrders' ) ( response , elapsed_time ) = self . request ( method , params , session ) return self . process_response ( response , resources . ClearedOrders , elapsed_time , lightweight )
Returns a list of settled bets based on the bet status ordered by settled date .