idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
238,700
def variable_summaries ( vars_ , groups = None , scope = 'weights' ) : groups = groups or { r'all' : r'.*' } grouped = collections . defaultdict ( list ) for var in vars_ : for name , pattern in groups . items ( ) : if re . match ( pattern , var . name ) : name = re . sub ( pattern , name , var . name ) grouped [ name ] . append ( var ) for name in groups : if name not in grouped : tf . logging . warn ( "No variables matching '{}' group." . format ( name ) ) summaries = [ ] # pylint: disable=redefined-argument-from-local for name , vars_ in grouped . items ( ) : vars_ = [ tf . reshape ( var , [ - 1 ] ) for var in vars_ ] vars_ = tf . concat ( vars_ , 0 ) summaries . append ( tf . summary . histogram ( scope + '/' + name , vars_ ) ) return tf . summary . merge ( summaries )
Create histogram summaries for the provided variables .
238
10
238,701
def set_dimension ( tensor , axis , value ) : shape = tensor . shape . as_list ( ) if shape [ axis ] not in ( value , None ) : message = 'Cannot set dimension {} of tensor {} to {}; is already {}.' raise ValueError ( message . format ( axis , tensor . name , value , shape [ axis ] ) ) shape [ axis ] = value tensor . set_shape ( shape )
Set the length of a tensor along the specified dimension .
95
12
238,702
def default ( ) : # General algorithm = algorithms . PPO num_agents = 30 eval_episodes = 30 use_gpu = False # Environment normalize_ranges = True # Network network = networks . feed_forward_gaussian weight_summaries = dict ( all = r'.*' , policy = r'.*/policy/.*' , value = r'.*/value/.*' ) policy_layers = 200 , 100 value_layers = 200 , 100 init_output_factor = 0.1 init_std = 0.35 # Optimization update_every = 30 update_epochs = 25 optimizer = tf . train . AdamOptimizer learning_rate = 1e-4 # Losses discount = 0.995 kl_target = 1e-2 kl_cutoff_factor = 2 kl_cutoff_coef = 1000 kl_init_penalty = 1 return locals ( )
Default configuration for PPO .
200
6
238,703
def pendulum ( ) : locals ( ) . update ( default ( ) ) # Environment env = 'Pendulum-v0' max_length = 200 steps = 1e6 # 1M # Optimization batch_size = 20 chunk_length = 50 return locals ( )
Configuration for the pendulum classic control task .
58
9
238,704
def cartpole ( ) : locals ( ) . update ( default ( ) ) # Environment env = 'CartPole-v1' max_length = 500 steps = 2e5 # 200k normalize_ranges = False # The env reports wrong ranges. # Network network = networks . feed_forward_categorical return locals ( )
Configuration for the cart pole classic control task .
72
9
238,705
def reacher ( ) : locals ( ) . update ( default ( ) ) # Environment env = 'Reacher-v2' max_length = 1000 steps = 5e6 # 5M discount = 0.985 update_every = 60 return locals ( )
Configuration for MuJoCo s reacher task .
54
10
238,706
def bullet_ant ( ) : locals ( ) . update ( default ( ) ) # Environment import pybullet_envs # noqa pylint: disable=unused-import env = 'AntBulletEnv-v0' max_length = 1000 steps = 3e7 # 30M update_every = 60 return locals ( )
Configuration for PyBullet s ant task .
73
9
238,707
def step ( self , actions ) : for index , ( env , action ) in enumerate ( zip ( self . _envs , actions ) ) : if not env . action_space . contains ( action ) : message = 'Invalid action at index {}: {}' raise ValueError ( message . format ( index , action ) ) if self . _blocking : transitions = [ env . step ( action ) for env , action in zip ( self . _envs , actions ) ] else : transitions = [ env . step ( action , blocking = False ) for env , action in zip ( self . _envs , actions ) ] transitions = [ transition ( ) for transition in transitions ] observs , rewards , dones , infos = zip ( * transitions ) observ = np . stack ( observs ) reward = np . stack ( rewards ) done = np . stack ( dones ) info = tuple ( infos ) return observ , reward , done , info
Forward a batch of actions to the wrapped environments .
199
10
238,708
def call ( self , name , * args , * * kwargs ) : payload = name , args , kwargs self . _conn . send ( ( self . _CALL , payload ) ) return self . _receive
Asynchronously call a method of the external environment .
49
11
238,709
def close ( self ) : try : self . _conn . send ( ( self . _CLOSE , None ) ) self . _conn . close ( ) except IOError : # The connection was already closed. pass self . _process . join ( )
Send a close message to the external process and join it .
52
12
238,710
def step ( self , action , blocking = True ) : promise = self . call ( 'step' , action ) if blocking : return promise ( ) else : return promise
Step the environment .
35
4
238,711
def _receive ( self ) : message , payload = self . _conn . recv ( ) # Re-raise exceptions in the main process. if message == self . _EXCEPTION : stacktrace = payload raise Exception ( stacktrace ) if message == self . _RESULT : return payload raise KeyError ( 'Received message of unexpected type {}' . format ( message ) )
Wait for a message from the worker process and return its payload .
81
13
238,712
def _worker ( self , constructor , conn ) : try : env = constructor ( ) while True : try : # Only block for short times to have keyboard exceptions be raised. if not conn . poll ( 0.1 ) : continue message , payload = conn . recv ( ) except ( EOFError , KeyboardInterrupt ) : break if message == self . _ACCESS : name = payload result = getattr ( env , name ) conn . send ( ( self . _RESULT , result ) ) continue if message == self . _CALL : name , args , kwargs = payload result = getattr ( env , name ) ( * args , * * kwargs ) conn . send ( ( self . _RESULT , result ) ) continue if message == self . _CLOSE : assert payload is None break raise KeyError ( 'Received message of unknown type {}' . format ( message ) ) except Exception : # pylint: disable=broad-except stacktrace = '' . join ( traceback . format_exception ( * sys . exc_info ( ) ) ) tf . logging . error ( 'Error in environment process: {}' . format ( stacktrace ) ) conn . send ( ( self . _EXCEPTION , stacktrace ) ) conn . close ( )
The process waits for actions and sends back environment results .
270
11
238,713
def step ( self , action ) : observ , reward , done , info = self . _env . step ( action ) observ = self . _convert_observ ( observ ) reward = self . _convert_reward ( reward ) return observ , reward , done , info
Forward action to the wrapped environment .
59
7
238,714
def _convert_observ ( self , observ ) : if not np . isfinite ( observ ) . all ( ) : raise ValueError ( 'Infinite observation encountered.' ) if observ . dtype == np . float64 : return observ . astype ( np . float32 ) if observ . dtype == np . int64 : return observ . astype ( np . int32 ) return observ
Convert the observation to 32 bits .
85
8
238,715
def _convert_reward ( self , reward ) : if not np . isfinite ( reward ) . all ( ) : raise ValueError ( 'Infinite reward encountered.' ) return np . array ( reward , dtype = np . float32 )
Convert the reward to 32 bits .
54
8
238,716
def value ( self ) : return self . _sum / tf . cast ( self . _count , self . _dtype )
The current value of the mean .
27
7
238,717
def submit ( self , value ) : # Add a batch dimension if necessary. if value . shape . ndims == self . _sum . shape . ndims : value = value [ None , ... ] return tf . group ( self . _sum . assign_add ( tf . reduce_sum ( value , 0 ) ) , self . _count . assign_add ( tf . shape ( value ) [ 0 ] ) )
Submit a single or batch tensor to refine the streaming mean .
90
13
238,718
def clear ( self ) : value = self . _sum / tf . cast ( self . _count , self . _dtype ) with tf . control_dependencies ( [ value ] ) : reset_value = self . _sum . assign ( tf . zeros_like ( self . _sum ) ) reset_count = self . _count . assign ( 0 ) with tf . control_dependencies ( [ reset_value , reset_count ] ) : return tf . identity ( value )
Return the mean estimate and reset the streaming statistics .
104
10
238,719
def zip_ ( * structures , * * kwargs ) : # pylint: disable=differing-param-doc,missing-param-doc # Named keyword arguments are not allowed after *args in Python 2. flatten = kwargs . pop ( 'flatten' , False ) assert not kwargs , 'zip() got unexpected keyword arguments.' return map ( lambda * x : x if len ( x ) > 1 else x [ 0 ] , * structures , flatten = flatten )
Combine corresponding elements in multiple nested structure to tuples .
107
12
238,720
def map_ ( function , * structures , * * kwargs ) : # pylint: disable=differing-param-doc,missing-param-doc # Named keyword arguments are not allowed after *args in Python 2. flatten = kwargs . pop ( 'flatten' , False ) assert not kwargs , 'map() got unexpected keyword arguments.' def impl ( function , * structures ) : if len ( structures ) == 0 : # pylint: disable=len-as-condition return structures if all ( isinstance ( s , ( tuple , list ) ) for s in structures ) : if len ( set ( len ( x ) for x in structures ) ) > 1 : raise ValueError ( 'Cannot merge tuples or lists of different length.' ) args = tuple ( ( impl ( function , * x ) for x in _builtin_zip ( * structures ) ) ) if hasattr ( structures [ 0 ] , '_fields' ) : # namedtuple return type ( structures [ 0 ] ) ( * args ) else : # tuple, list return type ( structures [ 0 ] ) ( args ) if all ( isinstance ( s , dict ) for s in structures ) : if len ( set ( frozenset ( x . keys ( ) ) for x in structures ) ) > 1 : raise ValueError ( 'Cannot merge dicts with different keys.' ) merged = { k : impl ( function , * ( s [ k ] for s in structures ) ) for k in structures [ 0 ] } return type ( structures [ 0 ] ) ( merged ) return function ( * structures ) result = impl ( function , * structures ) if flatten : result = flatten_ ( result ) return result
Apply a function to every element in a nested structure .
363
11
238,721
def flatten_ ( structure ) : if isinstance ( structure , dict ) : if structure : structure = zip ( * sorted ( structure . items ( ) , key = lambda x : x [ 0 ] ) ) [ 1 ] else : # Zip doesn't work on an the items of an empty dictionary. structure = ( ) if isinstance ( structure , ( tuple , list ) ) : result = [ ] for element in structure : result += flatten_ ( element ) return tuple ( result ) return ( structure , )
Combine all leaves of a nested structure into a tuple .
107
12
238,722
def filter_ ( predicate , * structures , * * kwargs ) : # pylint: disable=differing-param-doc,missing-param-doc, too-many-branches # Named keyword arguments are not allowed after *args in Python 2. flatten = kwargs . pop ( 'flatten' , False ) assert not kwargs , 'filter() got unexpected keyword arguments.' def impl ( predicate , * structures ) : if len ( structures ) == 0 : # pylint: disable=len-as-condition return structures if all ( isinstance ( s , ( tuple , list ) ) for s in structures ) : if len ( set ( len ( x ) for x in structures ) ) > 1 : raise ValueError ( 'Cannot merge tuples or lists of different length.' ) # Only wrap in tuples if more than one structure provided. if len ( structures ) > 1 : filtered = ( impl ( predicate , * x ) for x in _builtin_zip ( * structures ) ) else : filtered = ( impl ( predicate , x ) for x in structures [ 0 ] ) # Remove empty containers and construct result structure. if hasattr ( structures [ 0 ] , '_fields' ) : # namedtuple filtered = ( x if x != ( ) else None for x in filtered ) return type ( structures [ 0 ] ) ( * filtered ) else : # tuple, list filtered = ( x for x in filtered if not isinstance ( x , ( tuple , list , dict ) ) or x ) return type ( structures [ 0 ] ) ( filtered ) if all ( isinstance ( s , dict ) for s in structures ) : if len ( set ( frozenset ( x . keys ( ) ) for x in structures ) ) > 1 : raise ValueError ( 'Cannot merge dicts with different keys.' ) # Only wrap in tuples if more than one structure provided. if len ( structures ) > 1 : filtered = { k : impl ( predicate , * ( s [ k ] for s in structures ) ) for k in structures [ 0 ] } else : filtered = { k : impl ( predicate , v ) for k , v in structures [ 0 ] . items ( ) } # Remove empty containers and construct result structure. filtered = { k : v for k , v in filtered . items ( ) if not isinstance ( v , ( tuple , list , dict ) ) or v } return type ( structures [ 0 ] ) ( filtered ) if len ( structures ) > 1 : return structures if predicate ( * structures ) else ( ) else : return structures [ 0 ] if predicate ( structures [ 0 ] ) else ( ) result = impl ( predicate , * structures ) if flatten : result = flatten_ ( result ) return result
Select elements of a nested structure based on a predicate function .
580
12
238,723
def add_phase ( self , name , done , score , summary , steps , report_every = None , log_every = None , checkpoint_every = None , feed = None ) : done = tf . convert_to_tensor ( done , tf . bool ) score = tf . convert_to_tensor ( score , tf . float32 ) summary = tf . convert_to_tensor ( summary , tf . string ) feed = feed or { } if done . shape . ndims is None or score . shape . ndims is None : raise ValueError ( "Rank of 'done' and 'score' tensors must be known." ) writer = self . _logdir and tf . summary . FileWriter ( os . path . join ( self . _logdir , name ) , tf . get_default_graph ( ) , flush_secs = 60 ) op = self . _define_step ( done , score , summary ) batch = 1 if score . shape . ndims == 0 else score . shape [ 0 ] . value self . _phases . append ( _Phase ( name , writer , op , batch , int ( steps ) , feed , report_every , log_every , checkpoint_every ) )
Add a phase to the loop protocol .
263
8
238,724
def run ( self , sess , saver , max_step = None ) : global_step = sess . run ( self . _step ) steps_made = 1 while True : if max_step and global_step >= max_step : break phase , epoch , steps_in = self . _find_current_phase ( global_step ) phase_step = epoch * phase . steps + steps_in if steps_in % phase . steps < steps_made : message = '\n' + ( '-' * 50 ) + '\n' message += 'Phase {} (phase step {}, global step {}).' tf . logging . info ( message . format ( phase . name , phase_step , global_step ) ) # Populate book keeping tensors. phase . feed [ self . _reset ] = ( steps_in < steps_made ) phase . feed [ self . _log ] = ( phase . writer and self . _is_every_steps ( phase_step , phase . batch , phase . log_every ) ) phase . feed [ self . _report ] = ( self . _is_every_steps ( phase_step , phase . batch , phase . report_every ) ) summary , mean_score , global_step , steps_made = sess . run ( phase . op , phase . feed ) if self . _is_every_steps ( phase_step , phase . batch , phase . checkpoint_every ) : self . _store_checkpoint ( sess , saver , global_step ) if self . _is_every_steps ( phase_step , phase . batch , phase . report_every ) : yield mean_score if summary and phase . writer : # We want smaller phases to catch up at the beginnig of each epoch so # that their graphs are aligned. longest_phase = max ( phase . steps for phase in self . _phases ) summary_step = epoch * longest_phase + steps_in phase . writer . add_summary ( summary , summary_step )
Run the loop schedule for a specified number of steps .
433
11
238,725
def _is_every_steps ( self , phase_step , batch , every ) : if not every : return False covered_steps = range ( phase_step , phase_step + batch ) return any ( ( step + 1 ) % every == 0 for step in covered_steps )
Determine whether a periodic event should happen at this step .
60
13
238,726
def _find_current_phase ( self , global_step ) : epoch_size = sum ( phase . steps for phase in self . _phases ) epoch = int ( global_step // epoch_size ) steps_in = global_step % epoch_size for phase in self . _phases : if steps_in < phase . steps : return phase , epoch , steps_in steps_in -= phase . steps
Determine the current phase based on the global step .
89
12
238,727
def _define_step ( self , done , score , summary ) : if done . shape . ndims == 0 : done = done [ None ] if score . shape . ndims == 0 : score = score [ None ] score_mean = streaming_mean . StreamingMean ( ( ) , tf . float32 ) with tf . control_dependencies ( [ done , score , summary ] ) : done_score = tf . gather ( score , tf . where ( done ) [ : , 0 ] ) submit_score = tf . cond ( tf . reduce_any ( done ) , lambda : score_mean . submit ( done_score ) , tf . no_op ) with tf . control_dependencies ( [ submit_score ] ) : mean_score = tf . cond ( self . _report , score_mean . clear , float ) steps_made = tf . shape ( score ) [ 0 ] next_step = self . _step . assign_add ( steps_made ) with tf . control_dependencies ( [ mean_score , next_step ] ) : return tf . identity ( summary ) , mean_score , next_step , steps_made
Combine operations of a phase .
248
7
238,728
def _store_checkpoint ( self , sess , saver , global_step ) : if not self . _logdir or not saver : return tf . gfile . MakeDirs ( self . _logdir ) filename = os . path . join ( self . _logdir , 'model.ckpt' ) saver . save ( sess , filename , global_step )
Store a checkpoint if a log directory was provided to the constructor .
83
13
238,729
def _define_loop ( graph , logdir , train_steps , eval_steps ) : loop = tools . Loop ( logdir , graph . step , graph . should_log , graph . do_report , graph . force_reset ) loop . add_phase ( 'train' , graph . done , graph . score , graph . summary , train_steps , report_every = train_steps , log_every = train_steps // 2 , checkpoint_every = None , feed = { graph . is_training : True } ) loop . add_phase ( 'eval' , graph . done , graph . score , graph . summary , eval_steps , report_every = eval_steps , log_every = eval_steps // 2 , checkpoint_every = 10 * eval_steps , feed = { graph . is_training : False } ) return loop
Create and configure a training loop with training and evaluation phases .
181
12
238,730
def train ( config , env_processes ) : tf . reset_default_graph ( ) if config . update_every % config . num_agents : tf . logging . warn ( 'Number of agents should divide episodes per update.' ) with tf . device ( '/cpu:0' ) : batch_env = utility . define_batch_env ( lambda : _create_environment ( config ) , config . num_agents , env_processes ) graph = utility . define_simulation_graph ( batch_env , config . algorithm , config ) loop = _define_loop ( graph , config . logdir , config . update_every * config . max_length , config . eval_episodes * config . max_length ) total_steps = int ( config . steps / config . update_every * ( config . update_every + config . eval_episodes ) ) # Exclude episode related variables since the Python state of environments is # not checkpointed and thus new episodes start after resuming. saver = utility . define_saver ( exclude = ( r'.*_temporary.*' , ) ) sess_config = tf . ConfigProto ( allow_soft_placement = True ) sess_config . gpu_options . allow_growth = True with tf . Session ( config = sess_config ) as sess : utility . initialize_variables ( sess , saver , config . logdir ) for score in loop . run ( sess , saver , total_steps ) : yield score batch_env . close ( )
Training and evaluation entry point yielding scores .
332
8
238,731
def main ( _ ) : utility . set_up_logging ( ) if not FLAGS . config : raise KeyError ( 'You must specify a configuration.' ) logdir = FLAGS . logdir and os . path . expanduser ( os . path . join ( FLAGS . logdir , '{}-{}' . format ( FLAGS . timestamp , FLAGS . config ) ) ) try : config = utility . load_config ( logdir ) except IOError : config = tools . AttrDict ( getattr ( configs , FLAGS . config ) ( ) ) config = utility . save_config ( config , logdir ) for score in train ( config , FLAGS . env_processes ) : tf . logging . info ( 'Score {}.' . format ( score ) )
Create or load configuration and launch the trainer .
177
9
238,732
def iterate_sequences ( consumer_fn , output_template , sequences , length , chunk_length = None , batch_size = None , num_epochs = 1 , padding_value = 0 ) : if not length . shape [ 0 ] . value : raise ValueError ( 'Batch size of length tensor must be set.' ) num_sequences = length . shape [ 0 ] . value sequences = dict ( sequence = sequences , length = length ) dataset = tf . data . Dataset . from_tensor_slices ( sequences ) dataset = dataset . repeat ( num_epochs ) if chunk_length : dataset = dataset . map ( remove_padding ) . flat_map ( # pylint: disable=g-long-lambda lambda x : tf . data . Dataset . from_tensor_slices ( chunk_sequence ( x , chunk_length , padding_value ) ) ) num_chunks = tf . reduce_sum ( ( length - 1 ) // chunk_length + 1 ) else : num_chunks = num_sequences if batch_size : dataset = dataset . shuffle ( num_sequences // 2 ) dataset = dataset . batch ( batch_size or num_sequences ) dataset = dataset . prefetch ( num_epochs ) iterator = dataset . make_initializable_iterator ( ) with tf . control_dependencies ( [ iterator . initializer ] ) : num_batches = num_epochs * num_chunks // ( batch_size or num_sequences ) return tf . scan ( # pylint: disable=g-long-lambda lambda _1 , index : consumer_fn ( iterator . get_next ( ) ) , tf . range ( num_batches ) , output_template , parallel_iterations = 1 )
Iterate over batches of chunks of sequences for multiple epochs .
391
13
238,733
def chunk_sequence ( sequence , chunk_length = 200 , padding_value = 0 ) : if 'length' in sequence : length = sequence . pop ( 'length' ) else : length = tf . shape ( tools . nested . flatten ( sequence ) [ 0 ] ) [ 0 ] num_chunks = ( length - 1 ) // chunk_length + 1 padding_length = chunk_length * num_chunks - length padded = tools . nested . map ( # pylint: disable=g-long-lambda lambda tensor : tf . concat ( [ tensor , 0 * tensor [ : padding_length ] + padding_value ] , 0 ) , sequence ) chunks = tools . nested . map ( # pylint: disable=g-long-lambda lambda tensor : tf . reshape ( tensor , [ num_chunks , chunk_length ] + tensor . shape [ 1 : ] . as_list ( ) ) , padded ) chunks [ 'length' ] = tf . concat ( [ chunk_length * tf . ones ( ( num_chunks - 1 , ) , dtype = tf . int32 ) , [ chunk_length - padding_length ] ] , 0 ) return chunks
Split a nested dict of sequence tensors into a batch of chunks .
261
14
238,734
def remove_padding ( sequence ) : length = sequence . pop ( 'length' ) sequence = tools . nested . map ( lambda tensor : tensor [ : length ] , sequence ) return sequence
Selects the used frames of a sequence up to its length .
41
13
238,735
def transform ( self , value ) : with tf . name_scope ( self . _name + '/transform' ) : no_batch_dim = value . shape . ndims == self . _mean . shape . ndims if no_batch_dim : # Add a batch dimension if necessary. value = value [ None , ... ] if self . _center : value -= self . _mean [ None , ... ] if self . _scale : # We cannot scale before seeing at least two samples. value /= tf . cond ( self . _count > 1 , lambda : self . _std ( ) + 1e-8 , lambda : tf . ones_like ( self . _var_sum ) ) [ None ] if self . _clip : value = tf . clip_by_value ( value , - self . _clip , self . _clip ) # Remove batch dimension if necessary. if no_batch_dim : value = value [ 0 ] return tf . check_numerics ( value , 'value' )
Normalize a single or batch tensor .
217
9
238,736
def update ( self , value ) : with tf . name_scope ( self . _name + '/update' ) : if value . shape . ndims == self . _mean . shape . ndims : # Add a batch dimension if necessary. value = value [ None , ... ] count = tf . shape ( value ) [ 0 ] with tf . control_dependencies ( [ self . _count . assign_add ( count ) ] ) : step = tf . cast ( self . _count , tf . float32 ) mean_delta = tf . reduce_sum ( value - self . _mean [ None , ... ] , 0 ) new_mean = self . _mean + mean_delta / step new_mean = tf . cond ( self . _count > 1 , lambda : new_mean , lambda : value [ 0 ] ) var_delta = ( value - self . _mean [ None , ... ] ) * ( value - new_mean [ None , ... ] ) new_var_sum = self . _var_sum + tf . reduce_sum ( var_delta , 0 ) with tf . control_dependencies ( [ new_mean , new_var_sum ] ) : update = self . _mean . assign ( new_mean ) , self . _var_sum . assign ( new_var_sum ) with tf . control_dependencies ( update ) : if value . shape . ndims == 1 : value = tf . reduce_mean ( value ) return self . _summary ( 'value' , tf . reduce_mean ( value ) )
Update the mean and variance estimates .
337
7
238,737
def reset ( self ) : with tf . name_scope ( self . _name + '/reset' ) : return tf . group ( self . _count . assign ( 0 ) , self . _mean . assign ( tf . zeros_like ( self . _mean ) ) , self . _var_sum . assign ( tf . zeros_like ( self . _var_sum ) ) )
Reset the estimates of mean and variance .
84
9
238,738
def summary ( self ) : with tf . name_scope ( self . _name + '/summary' ) : mean_summary = tf . cond ( self . _count > 0 , lambda : self . _summary ( 'mean' , self . _mean ) , str ) std_summary = tf . cond ( self . _count > 1 , lambda : self . _summary ( 'stddev' , self . _std ( ) ) , str ) return tf . summary . merge ( [ mean_summary , std_summary ] )
Summary string of mean and standard deviation .
112
8
238,739
def _std ( self ) : variance = tf . cond ( self . _count > 1 , lambda : self . _var_sum / tf . cast ( self . _count - 1 , tf . float32 ) , lambda : tf . ones_like ( self . _var_sum ) * float ( 'nan' ) ) # The epsilon corrects for small negative variance values caused by # the algorithm. It was empirically chosen to work with all environments # tested. return tf . sqrt ( variance + 1e-4 )
Computes the current estimate of the standard deviation .
113
10
238,740
def _summary ( self , name , tensor ) : if tensor . shape . ndims == 0 : return tf . summary . scalar ( name , tensor ) else : return tf . summary . histogram ( name , tensor )
Create a scalar or histogram summary matching the rank of the tensor .
52
16
238,741
def length ( self , rows = None ) : rows = tf . range ( self . _capacity ) if rows is None else rows return tf . gather ( self . _length , rows )
Tensor holding the current length of episodes .
39
9
238,742
def append ( self , transitions , rows = None ) : rows = tf . range ( self . _capacity ) if rows is None else rows assert rows . shape . ndims == 1 assert_capacity = tf . assert_less ( rows , self . _capacity , message = 'capacity exceeded' ) with tf . control_dependencies ( [ assert_capacity ] ) : assert_max_length = tf . assert_less ( tf . gather ( self . _length , rows ) , self . _max_length , message = 'max length exceeded' ) with tf . control_dependencies ( [ assert_max_length ] ) : timestep = tf . gather ( self . _length , rows ) indices = tf . stack ( [ rows , timestep ] , 1 ) append_ops = tools . nested . map ( lambda var , val : tf . scatter_nd_update ( var , indices , val ) , self . _buffers , transitions , flatten = True ) with tf . control_dependencies ( append_ops ) : episode_mask = tf . reduce_sum ( tf . one_hot ( rows , self . _capacity , dtype = tf . int32 ) , 0 ) return self . _length . assign_add ( episode_mask )
Append a batch of transitions to rows of the memory .
269
12
238,743
def replace ( self , episodes , length , rows = None ) : rows = tf . range ( self . _capacity ) if rows is None else rows assert rows . shape . ndims == 1 assert_capacity = tf . assert_less ( rows , self . _capacity , message = 'capacity exceeded' ) with tf . control_dependencies ( [ assert_capacity ] ) : assert_max_length = tf . assert_less_equal ( length , self . _max_length , message = 'max length exceeded' ) with tf . control_dependencies ( [ assert_max_length ] ) : replace_ops = tools . nested . map ( lambda var , val : tf . scatter_update ( var , rows , val ) , self . _buffers , episodes , flatten = True ) with tf . control_dependencies ( replace_ops ) : return tf . scatter_update ( self . _length , rows , length )
Replace full episodes .
198
5
238,744
def data ( self , rows = None ) : rows = tf . range ( self . _capacity ) if rows is None else rows assert rows . shape . ndims == 1 episode = tools . nested . map ( lambda var : tf . gather ( var , rows ) , self . _buffers ) length = tf . gather ( self . _length , rows ) return episode , length
Access a batch of episodes from the memory .
80
9
238,745
def clear ( self , rows = None ) : rows = tf . range ( self . _capacity ) if rows is None else rows assert rows . shape . ndims == 1 return tf . scatter_update ( self . _length , rows , tf . zeros_like ( rows ) )
Reset episodes in the memory .
61
7
238,746
def _parse_shape ( self , space ) : if isinstance ( space , gym . spaces . Discrete ) : return ( ) if isinstance ( space , gym . spaces . Box ) : return space . shape raise NotImplementedError ( )
Get a tensor shape from a OpenAI Gym space .
53
12
238,747
def _parse_dtype ( self , space ) : if isinstance ( space , gym . spaces . Discrete ) : return tf . int32 if isinstance ( space , gym . spaces . Box ) : return tf . float32 raise NotImplementedError ( )
Get a tensor dtype from a OpenAI Gym space .
57
13
238,748
def begin_episode ( self , agent_indices ) : with tf . name_scope ( 'begin_episode/' ) : if self . _last_state is None : reset_state = tf . no_op ( ) else : reset_state = utility . reinit_nested_vars ( self . _last_state , agent_indices ) reset_buffer = self . _current_episodes . clear ( agent_indices ) with tf . control_dependencies ( [ reset_state , reset_buffer ] ) : return tf . constant ( '' )
Reset the recurrent states and stored episode .
123
9
238,749
def perform ( self , agent_indices , observ ) : with tf . name_scope ( 'perform/' ) : observ = self . _observ_filter . transform ( observ ) if self . _last_state is None : state = None else : state = tools . nested . map ( lambda x : tf . gather ( x , agent_indices ) , self . _last_state ) with tf . device ( '/gpu:0' if self . _use_gpu else '/cpu:0' ) : output = self . _network ( observ [ : , None ] , tf . ones ( observ . shape [ 0 ] ) , state ) action = tf . cond ( self . _is_training , output . policy . sample , output . policy . mode ) logprob = output . policy . log_prob ( action ) [ : , 0 ] # pylint: disable=g-long-lambda summary = tf . cond ( self . _should_log , lambda : tf . summary . merge ( [ tf . summary . histogram ( 'mode' , output . policy . mode ( ) [ : , 0 ] ) , tf . summary . histogram ( 'action' , action [ : , 0 ] ) , tf . summary . histogram ( 'logprob' , logprob ) ] ) , str ) # Remember current policy to append to memory in the experience callback. if self . _last_state is None : assign_state = tf . no_op ( ) else : assign_state = utility . assign_nested_vars ( self . _last_state , output . state , agent_indices ) remember_last_action = tf . scatter_update ( self . _last_action , agent_indices , action [ : , 0 ] ) policy_params = tools . nested . filter ( lambda x : isinstance ( x , tf . Tensor ) , output . policy . parameters ) assert policy_params , 'Policy has no parameters to store.' remember_last_policy = tools . nested . map ( lambda var , val : tf . scatter_update ( var , agent_indices , val [ : , 0 ] ) , self . _last_policy , policy_params , flatten = True ) with tf . control_dependencies ( ( assign_state , remember_last_action ) + remember_last_policy ) : return action [ : , 0 ] , tf . identity ( summary )
Compute batch of actions and a summary for a batch of observation .
519
14
238,750
def experience ( self , agent_indices , observ , action , reward , unused_done , unused_nextob ) : with tf . name_scope ( 'experience/' ) : return tf . cond ( self . _is_training , # pylint: disable=g-long-lambda lambda : self . _define_experience ( agent_indices , observ , action , reward ) , str )
Process the transition tuple of the current step .
88
9
238,751
def end_episode ( self , agent_indices ) : with tf . name_scope ( 'end_episode/' ) : return tf . cond ( self . _is_training , lambda : self . _define_end_episode ( agent_indices ) , str )
Add episodes to the memory and perform update steps if memory is full .
59
14
238,752
def _initialize_policy ( self ) : with tf . device ( '/gpu:0' if self . _use_gpu else '/cpu:0' ) : network = functools . partial ( self . _config . network , self . _config , self . _batch_env . action_space ) self . _network = tf . make_template ( 'network' , network ) output = self . _network ( tf . zeros_like ( self . _batch_env . observ ) [ : , None ] , tf . ones ( len ( self . _batch_env ) ) ) if output . policy . event_shape != self . _batch_env . action . shape [ 1 : ] : message = 'Policy event shape {} does not match action shape {}.' message = message . format ( output . policy . event_shape , self . _batch_env . action . shape [ 1 : ] ) raise ValueError ( message ) self . _policy_type = type ( output . policy ) is_tensor = lambda x : isinstance ( x , tf . Tensor ) policy_params = tools . nested . filter ( is_tensor , output . policy . parameters ) set_batch_dim = lambda x : utility . set_dimension ( x , 0 , len ( self . _batch_env ) ) tools . nested . map ( set_batch_dim , policy_params ) if output . state is not None : tools . nested . map ( set_batch_dim , output . state ) return policy_params , output . state
Initialize the policy .
329
5
238,753
def _initialize_memory ( self , policy_params ) : # We store observation, action, policy parameters, and reward. template = ( self . _batch_env . observ [ 0 ] , self . _batch_env . action [ 0 ] , tools . nested . map ( lambda x : x [ 0 , 0 ] , policy_params ) , self . _batch_env . reward [ 0 ] ) with tf . variable_scope ( 'ppo_temporary' ) : self . _current_episodes = parts . EpisodeMemory ( template , len ( self . _batch_env ) , self . _config . max_length , 'episodes' ) self . _finished_episodes = parts . EpisodeMemory ( template , self . _config . update_every , self . _config . max_length , 'memory' ) self . _num_finished_episodes = tf . Variable ( 0 , False )
Initialize temporary and permanent memory .
196
7
238,754
def _training ( self ) : with tf . device ( '/gpu:0' if self . _use_gpu else '/cpu:0' ) : with tf . name_scope ( 'training' ) : assert_full = tf . assert_equal ( self . _num_finished_episodes , self . _config . update_every ) with tf . control_dependencies ( [ assert_full ] ) : data = self . _finished_episodes . data ( ) ( observ , action , old_policy_params , reward ) , length = data # We set padding frames of the parameters to ones to prevent Gaussians # with zero variance. This would result in an infinite KL divergence, # which, even if masked out, would result in NaN gradients. old_policy_params = tools . nested . map ( lambda param : self . _mask ( param , length , 1 ) , old_policy_params ) with tf . control_dependencies ( [ tf . assert_greater ( length , 0 ) ] ) : length = tf . identity ( length ) observ = self . _observ_filter . transform ( observ ) reward = self . _reward_filter . transform ( reward ) update_summary = self . _perform_update_steps ( observ , action , old_policy_params , reward , length ) with tf . control_dependencies ( [ update_summary ] ) : penalty_summary = self . _adjust_penalty ( observ , old_policy_params , length ) with tf . control_dependencies ( [ penalty_summary ] ) : clear_memory = tf . group ( self . _finished_episodes . clear ( ) , self . _num_finished_episodes . assign ( 0 ) ) with tf . control_dependencies ( [ clear_memory ] ) : weight_summary = utility . variable_summaries ( tf . trainable_variables ( ) , self . _config . weight_summaries ) return tf . summary . merge ( [ update_summary , penalty_summary , weight_summary ] )
Perform multiple training iterations of both policy and value baseline .
439
12
238,755
def _perform_update_steps ( self , observ , action , old_policy_params , reward , length ) : return_ = utility . discounted_return ( reward , length , self . _config . discount ) value = self . _network ( observ , length ) . value if self . _config . gae_lambda : advantage = utility . lambda_advantage ( reward , value , length , self . _config . discount , self . _config . gae_lambda ) else : advantage = return_ - value mean , variance = tf . nn . moments ( advantage , axes = [ 0 , 1 ] , keep_dims = True ) advantage = ( advantage - mean ) / ( tf . sqrt ( variance ) + 1e-8 ) advantage = tf . Print ( advantage , [ tf . reduce_mean ( return_ ) , tf . reduce_mean ( value ) ] , 'return and value: ' ) advantage = tf . Print ( advantage , [ tf . reduce_mean ( advantage ) ] , 'normalized advantage: ' ) episodes = ( observ , action , old_policy_params , reward , advantage ) value_loss , policy_loss , summary = parts . iterate_sequences ( self . _update_step , [ 0. , 0. , '' ] , episodes , length , self . _config . chunk_length , self . _config . batch_size , self . _config . update_epochs , padding_value = 1 ) print_losses = tf . group ( tf . Print ( 0 , [ tf . reduce_mean ( value_loss ) ] , 'value loss: ' ) , tf . Print ( 0 , [ tf . reduce_mean ( policy_loss ) ] , 'policy loss: ' ) ) with tf . control_dependencies ( [ value_loss , policy_loss , print_losses ] ) : return summary [ self . _config . update_epochs // 2 ]
Perform multiple update steps of value function and policy .
414
11
238,756
def _update_step ( self , sequence ) : observ , action , old_policy_params , reward , advantage = sequence [ 'sequence' ] length = sequence [ 'length' ] old_policy = self . _policy_type ( * * old_policy_params ) value_loss , value_summary = self . _value_loss ( observ , reward , length ) network = self . _network ( observ , length ) policy_loss , policy_summary = self . _policy_loss ( old_policy , network . policy , action , advantage , length ) network_loss = network . get ( 'loss' , 0.0 ) loss = policy_loss + value_loss + tf . reduce_mean ( network_loss ) gradients , variables = ( zip ( * self . _optimizer . compute_gradients ( loss ) ) ) optimize = self . _optimizer . apply_gradients ( zip ( gradients , variables ) ) summary = tf . summary . merge ( [ value_summary , policy_summary , tf . summary . histogram ( 'network_loss' , network_loss ) , tf . summary . scalar ( 'avg_network_loss' , tf . reduce_mean ( network_loss ) ) , tf . summary . scalar ( 'gradient_norm' , tf . global_norm ( gradients ) ) , utility . gradient_summaries ( zip ( gradients , variables ) ) ] ) with tf . control_dependencies ( [ optimize ] ) : return [ tf . identity ( x ) for x in ( value_loss , policy_loss , summary ) ]
Compute the current combined loss and perform a gradient update step .
341
13
238,757
def _value_loss ( self , observ , reward , length ) : with tf . name_scope ( 'value_loss' ) : value = self . _network ( observ , length ) . value return_ = utility . discounted_return ( reward , length , self . _config . discount ) advantage = return_ - value value_loss = 0.5 * self . _mask ( advantage ** 2 , length ) summary = tf . summary . merge ( [ tf . summary . histogram ( 'value_loss' , value_loss ) , tf . summary . scalar ( 'avg_value_loss' , tf . reduce_mean ( value_loss ) ) ] ) value_loss = tf . reduce_mean ( value_loss ) return tf . check_numerics ( value_loss , 'value_loss' ) , summary
Compute the loss function for the value baseline .
178
10
238,758
def _policy_loss ( self , old_policy , policy , action , advantage , length ) : with tf . name_scope ( 'policy_loss' ) : kl = tf . contrib . distributions . kl_divergence ( old_policy , policy ) # Infinite values in the KL, even for padding frames that we mask out, # cause NaN gradients since TensorFlow computes gradients with respect to # the whole input tensor. kl = tf . check_numerics ( kl , 'kl' ) kl = tf . reduce_mean ( self . _mask ( kl , length ) , 1 ) policy_gradient = tf . exp ( policy . log_prob ( action ) - old_policy . log_prob ( action ) ) surrogate_loss = - tf . reduce_mean ( self . _mask ( policy_gradient * tf . stop_gradient ( advantage ) , length ) , 1 ) surrogate_loss = tf . check_numerics ( surrogate_loss , 'surrogate_loss' ) kl_penalty = self . _penalty * kl cutoff_threshold = self . _config . kl_target * self . _config . kl_cutoff_factor cutoff_count = tf . reduce_sum ( tf . cast ( kl > cutoff_threshold , tf . int32 ) ) with tf . control_dependencies ( [ tf . cond ( cutoff_count > 0 , lambda : tf . Print ( 0 , [ cutoff_count ] , 'kl cutoff! ' ) , int ) ] ) : kl_cutoff = ( self . _config . kl_cutoff_coef * tf . cast ( kl > cutoff_threshold , tf . float32 ) * ( kl - cutoff_threshold ) ** 2 ) policy_loss = surrogate_loss + kl_penalty + kl_cutoff entropy = tf . reduce_mean ( policy . entropy ( ) , axis = 1 ) if self . _config . entropy_regularization : policy_loss -= self . _config . entropy_regularization * entropy summary = tf . summary . merge ( [ tf . summary . histogram ( 'entropy' , entropy ) , tf . summary . histogram ( 'kl' , kl ) , tf . summary . histogram ( 'surrogate_loss' , surrogate_loss ) , tf . summary . histogram ( 'kl_penalty' , kl_penalty ) , tf . summary . histogram ( 'kl_cutoff' , kl_cutoff ) , tf . summary . histogram ( 'kl_penalty_combined' , kl_penalty + kl_cutoff ) , tf . summary . histogram ( 'policy_loss' , policy_loss ) , tf . summary . scalar ( 'avg_surr_loss' , tf . reduce_mean ( surrogate_loss ) ) , tf . summary . scalar ( 'avg_kl_penalty' , tf . reduce_mean ( kl_penalty ) ) , tf . summary . scalar ( 'avg_policy_loss' , tf . reduce_mean ( policy_loss ) ) ] ) policy_loss = tf . reduce_mean ( policy_loss , 0 ) return tf . check_numerics ( policy_loss , 'policy_loss' ) , summary
Compute the policy loss composed of multiple components .
729
10
238,759
def _adjust_penalty ( self , observ , old_policy_params , length ) : old_policy = self . _policy_type ( * * old_policy_params ) with tf . name_scope ( 'adjust_penalty' ) : network = self . _network ( observ , length ) print_penalty = tf . Print ( 0 , [ self . _penalty ] , 'current penalty: ' ) with tf . control_dependencies ( [ print_penalty ] ) : kl_change = tf . reduce_mean ( self . _mask ( tf . contrib . distributions . kl_divergence ( old_policy , network . policy ) , length ) ) kl_change = tf . Print ( kl_change , [ kl_change ] , 'kl change: ' ) maybe_increase = tf . cond ( kl_change > 1.3 * self . _config . kl_target , # pylint: disable=g-long-lambda lambda : tf . Print ( self . _penalty . assign ( self . _penalty * 1.5 ) , [ 0 ] , 'increase penalty ' ) , float ) maybe_decrease = tf . cond ( kl_change < 0.7 * self . _config . kl_target , # pylint: disable=g-long-lambda lambda : tf . Print ( self . _penalty . assign ( self . _penalty / 1.5 ) , [ 0 ] , 'decrease penalty ' ) , float ) with tf . control_dependencies ( [ maybe_increase , maybe_decrease ] ) : return tf . summary . merge ( [ tf . summary . scalar ( 'kl_change' , kl_change ) , tf . summary . scalar ( 'penalty' , self . _penalty ) ] )
Adjust the KL policy between the behavioral and current policy .
402
11
238,760
def _mask ( self , tensor , length , padding_value = 0 ) : with tf . name_scope ( 'mask' ) : range_ = tf . range ( tensor . shape [ 1 ] . value ) mask = range_ [ None , : ] < length [ : , None ] if tensor . shape . ndims > 2 : for _ in range ( tensor . shape . ndims - 2 ) : mask = mask [ ... , None ] mask = tf . tile ( mask , [ 1 , 1 ] + tensor . shape [ 2 : ] . as_list ( ) ) masked = tf . where ( mask , tensor , padding_value * tf . ones_like ( tensor ) ) return tf . check_numerics ( masked , 'masked' )
Set padding elements of a batch of sequences to a constant .
170
12
238,761
def main ( self , * args , * * kwargs ) : self . start ( * args , * * kwargs ) try : while 1 : body , message = yield self . receive ( ) handler = self . get_handler ( message ) handler ( body , message ) finally : self . stop ( * args , * * kwargs )
Implement the actor main loop by waiting forever for messages .
74
12
238,762
def send ( self , method , args = { } , to = None , nowait = False , * * kwargs ) : if to is None : to = self . routing_key r = self . call_or_cast ( method , args , routing_key = to , nowait = nowait , * * kwargs ) if not nowait : return r . get ( )
Call method on agent listening to routing_key .
83
10
238,763
def throw ( self , method , args = { } , nowait = False , * * kwargs ) : r = self . call_or_cast ( method , args , type = ACTOR_TYPE . RR , nowait = nowait , * * kwargs ) if not nowait : return r
Call method on one of the agents in round robin .
66
12
238,764
def scatter ( self , method , args = { } , nowait = False , timeout = None , * * kwargs ) : timeout = timeout if timeout is not None else self . default_timeout r = self . call_or_cast ( method , args , type = ACTOR_TYPE . SCATTER , nowait = nowait , timeout = timeout , * * kwargs ) if not nowait : return r . gather ( timeout = timeout , * * kwargs )
Broadcast method to all agents .
103
7
238,765
def call_or_cast ( self , method , args = { } , nowait = False , * * kwargs ) : return ( nowait and self . cast or self . call ) ( method , args , * * kwargs )
Apply remote method asynchronously or synchronously depending on the value of nowait .
52
17
238,766
def cast ( self , method , args = { } , declare = None , retry = None , retry_policy = None , type = None , exchange = None , * * props ) : retry = self . retry if retry is None else retry body = { 'class' : self . name , 'method' : method , 'args' : args } _retry_policy = self . retry_policy if retry_policy : # merge default and custom policies. _retry_policy = dict ( _retry_policy , * * retry_policy ) if type and type not in self . types : raise ValueError ( 'Unsupported type: {0}' . format ( type ) ) elif not type : type = ACTOR_TYPE . DIRECT props . setdefault ( 'routing_key' , self . routing_key ) props . setdefault ( 'serializer' , self . serializer ) exchange = exchange or self . type_to_exchange [ type ] ( ) declare = ( maybe_list ( declare ) or [ ] ) + [ exchange ] with producers [ self . _connection ] . acquire ( block = True ) as producer : return producer . publish ( body , exchange = exchange , declare = declare , retry = retry , retry_policy = retry_policy , * * props )
Send message to actor . Discarding replies .
287
9
238,767
def handle_call ( self , body , message ) : try : r = self . _DISPATCH ( body , ticket = message . properties [ 'reply_to' ] ) except self . Next : # don't reply, delegate to other agents. pass else : self . reply ( message , r )
Handle call message .
64
4
238,768
def _on_message ( self , body , message ) : if message . properties . get ( 'reply_to' ) : handler = self . handle_call else : handler = self . handle_cast def handle ( ) : # Do not ack the message if an exceptional error occurs, # but do ack the message if SystemExit or KeyboardInterrupt # is raised, as this is probably intended. try : handler ( body , message ) except Exception : raise except BaseException : message . ack ( ) raise else : message . ack ( ) handle ( )
What to do when a message is received .
119
9
238,769
def parse_options ( self , prog_name , arguments ) : # Don't want to load configuration to just print the version, # so we handle --version manually here. if '--version' in arguments : self . exit_status ( self . version , fh = sys . stdout ) parser = self . create_parser ( prog_name ) options , args = parser . parse_args ( arguments ) return options , args
Parse the available options .
90
6
238,770
def get ( self , * * kwargs ) : kwargs . setdefault ( 'limit' , 1 ) return self . _first ( self . gather ( * * kwargs ) )
What kind of arguments should be pass here
42
8
238,771
def _gather ( self , * args , * * kwargs ) : propagate = kwargs . pop ( 'propagate' , True ) return ( self . to_python ( reply , propagate = propagate ) for reply in self . actor . _collect_replies ( * args , * * kwargs ) )
Generator over the results
69
5
238,772
def to_python ( self , reply , propagate = True ) : try : return reply [ 'ok' ] except KeyError : error = self . Error ( * reply . get ( 'nok' ) or ( ) ) if propagate : raise error return error
Extracts the value out of the reply message .
54
11
238,773
def spawn ( self , cls , kwargs = { } , nowait = False ) : actor_id = uuid ( ) if str ( qualname ( cls ) ) == '__builtin__.unicode' : name = cls else : name = qualname ( cls ) res = self . call ( 'spawn' , { 'cls' : name , 'id' : actor_id , 'kwargs' : kwargs } , type = ACTOR_TYPE . RR , nowait = nowait ) return ActorProxy ( name , actor_id , res , agent = self , connection = self . connection , * * kwargs )
Spawn a new actor on a celery worker by sending a remote command to the worker .
143
18
238,774
def select ( self , cls , * * kwargs ) : name = qualname ( cls ) id = first_reply ( self . scatter ( 'select' , { 'cls' : name } , limit = 1 ) , cls ) return ActorProxy ( name , id , agent = self , connection = self . connection , * * kwargs )
Get the id of already spawned actor
78
7
238,775
def process_message ( self , actor , body , message ) : if actor is not self and self . is_green ( ) : self . pool . spawn_n ( actor . _on_message , body , message ) else : if not self . is_green ( ) and message . properties . get ( 'reply_to' ) : warn ( 'Starting a blocking call (%s) on actor (%s) ' 'when greenlets are disabled.' , itemgetter ( 'method' ) ( body ) , actor . __class__ ) actor . _on_message ( body , message )
Process actor message depending depending on the the worker settings .
125
11
238,776
def get_outer_frame_variables ( ) : cur_filename = inspect . getframeinfo ( inspect . currentframe ( ) ) . filename outer_frame = next ( f for f in inspect . getouterframes ( inspect . currentframe ( ) ) if f . filename != cur_filename ) variables = { } variables . update ( outer_frame . frame . f_globals ) variables . update ( outer_frame . frame . f_locals ) return variables
Get a dict of local and global variables of the first outer frame from another file .
100
17
238,777
def extract_table_names ( query ) : # a good old fashioned regex. turns out this worked better than actually parsing the code tables_blocks = re . findall ( r'(?:FROM|JOIN)\s+(\w+(?:\s*,\s*\w+)*)' , query , re . IGNORECASE ) tables = [ tbl for block in tables_blocks for tbl in re . findall ( r'\w+' , block ) ] return set ( tables )
Extract table names from an SQL query .
108
9
238,778
def write_table ( df , tablename , conn ) : with catch_warnings ( ) : filterwarnings ( 'ignore' , message = 'The provided table name \'%s\' is not found exactly as such in the database' % tablename ) to_sql ( df , name = tablename , con = conn , index = not any ( name is None for name in df . index . names ) )
Write a dataframe to the database .
90
8
238,779
def make_benchark ( n_train , n_test , n_dim = 2 ) : X_train = np . random . rand ( n_train , n_dim ) y_train = np . random . rand ( n_train ) X_test = np . random . rand ( n_test , n_dim ) res = { } for variogram_model in VARIOGRAM_MODELS : tic = time ( ) OK = OrdinaryKriging ( X_train [ : , 0 ] , X_train [ : , 1 ] , y_train , variogram_model = 'linear' , verbose = False , enable_plotting = False ) res [ 't_train_{}' . format ( variogram_model ) ] = time ( ) - tic # All the following tests are performed with the linear variogram model for backend in BACKENDS : for n_closest_points in N_MOVING_WINDOW : if backend == 'vectorized' and n_closest_points is not None : continue # this is not supported tic = time ( ) OK . execute ( 'points' , X_test [ : , 0 ] , X_test [ : , 1 ] , backend = backend , n_closest_points = n_closest_points ) res [ 't_test_{}_{}' . format ( backend , n_closest_points ) ] = time ( ) - tic return res
Compute the benchmarks for Ordianry Kriging
320
11
238,780
def print_benchmark ( n_train , n_test , n_dim , res ) : print ( '=' * 80 ) print ( ' ' * 10 , 'N_dim={}, N_train={}, N_test={}' . format ( n_dim , n_train , n_test ) ) print ( '=' * 80 ) print ( '\n' , '# Training the model' , '\n' ) print ( '|' . join ( [ '{:>11} ' . format ( el ) for el in [ 't_train (s)' ] + VARIOGRAM_MODELS ] ) ) print ( '-' * ( 11 + 2 ) * ( len ( VARIOGRAM_MODELS ) + 1 ) ) print ( '|' . join ( [ '{:>11} ' . format ( 'Training' ) ] + [ '{:>11.2} ' . format ( el ) for el in [ res [ 't_train_{}' . format ( mod ) ] for mod in VARIOGRAM_MODELS ] ] ) ) print ( '\n' , '# Predicting kriging points' , '\n' ) print ( '|' . join ( [ '{:>11} ' . format ( el ) for el in [ 't_test (s)' ] + BACKENDS ] ) ) print ( '-' * ( 11 + 2 ) * ( len ( BACKENDS ) + 1 ) ) for n_closest_points in N_MOVING_WINDOW : timing_results = [ res . get ( 't_test_{}_{}' . format ( mod , n_closest_points ) , '' ) for mod in BACKENDS ] print ( '|' . join ( [ '{:>11} ' . format ( 'N_nn=' + str ( n_closest_points ) ) ] + [ '{:>11.2} ' . format ( el ) for el in timing_results ] ) )
Print the benchmarks
442
3
238,781
def display_variogram_model ( self ) : fig = plt . figure ( ) ax = fig . add_subplot ( 111 ) ax . plot ( self . lags , self . semivariance , 'r*' ) ax . plot ( self . lags , self . variogram_function ( self . variogram_model_parameters , self . lags ) , 'k-' ) plt . show ( )
Displays variogram model with the actual binned data .
92
12
238,782
def plot_epsilon_residuals ( self ) : fig = plt . figure ( ) ax = fig . add_subplot ( 111 ) ax . scatter ( range ( self . epsilon . size ) , self . epsilon , c = 'k' , marker = '*' ) ax . axhline ( y = 0.0 ) plt . show ( )
Plots the epsilon residuals for the variogram fit .
85
14
238,783
def print_statistics ( self ) : print ( "Q1 =" , self . Q1 ) print ( "Q2 =" , self . Q2 ) print ( "cR =" , self . cR )
Prints out the Q1 Q2 and cR statistics for the variogram fit . NOTE that ideally Q1 is close to zero Q2 is close to 1 and cR is as small as possible .
48
42
238,784
def _adjust_for_anisotropy ( X , center , scaling , angle ) : center = np . asarray ( center ) [ None , : ] angle = np . asarray ( angle ) * np . pi / 180 X -= center Ndim = X . shape [ 1 ] if Ndim == 1 : raise NotImplementedError ( 'Not implemnented yet?' ) elif Ndim == 2 : stretch = np . array ( [ [ 1 , 0 ] , [ 0 , scaling [ 0 ] ] ] ) rot_tot = np . array ( [ [ np . cos ( - angle [ 0 ] ) , - np . sin ( - angle [ 0 ] ) ] , [ np . sin ( - angle [ 0 ] ) , np . cos ( - angle [ 0 ] ) ] ] ) elif Ndim == 3 : stretch = np . array ( [ [ 1. , 0. , 0. ] , [ 0. , scaling [ 0 ] , 0. ] , [ 0. , 0. , scaling [ 1 ] ] ] ) rotate_x = np . array ( [ [ 1. , 0. , 0. ] , [ 0. , np . cos ( - angle [ 0 ] ) , - np . sin ( - angle [ 0 ] ) ] , [ 0. , np . sin ( - angle [ 0 ] ) , np . cos ( - angle [ 0 ] ) ] ] ) rotate_y = np . array ( [ [ np . cos ( - angle [ 1 ] ) , 0. , np . sin ( - angle [ 1 ] ) ] , [ 0. , 1. , 0. ] , [ - np . sin ( - angle [ 1 ] ) , 0. , np . cos ( - angle [ 1 ] ) ] ] ) rotate_z = np . array ( [ [ np . cos ( - angle [ 2 ] ) , - np . sin ( - angle [ 2 ] ) , 0. ] , [ np . sin ( - angle [ 2 ] ) , np . cos ( - angle [ 2 ] ) , 0. ] , [ 0. , 0. , 1. ] ] ) rot_tot = np . dot ( rotate_z , np . dot ( rotate_y , rotate_x ) ) else : raise ValueError ( "Adjust for anisotropy function doesn't " "support ND spaces where N>3" ) X_adj = np . dot ( stretch , np . dot ( rot_tot , X . T ) ) . T X_adj += center return X_adj
Adjusts data coordinates to take into account anisotropy . Can also be used to take into account data scaling . Angles are CCW about specified axes . Scaling is applied in rotated coordinate system .
541
42
238,785
def _calculate_variogram_model ( lags , semivariance , variogram_model , variogram_function , weight ) : if variogram_model == 'linear' : x0 = [ ( np . amax ( semivariance ) - np . amin ( semivariance ) ) / ( np . amax ( lags ) - np . amin ( lags ) ) , np . amin ( semivariance ) ] bnds = ( [ 0. , 0. ] , [ np . inf , np . amax ( semivariance ) ] ) elif variogram_model == 'power' : x0 = [ ( np . amax ( semivariance ) - np . amin ( semivariance ) ) / ( np . amax ( lags ) - np . amin ( lags ) ) , 1.1 , np . amin ( semivariance ) ] bnds = ( [ 0. , 0.001 , 0. ] , [ np . inf , 1.999 , np . amax ( semivariance ) ] ) else : x0 = [ np . amax ( semivariance ) - np . amin ( semivariance ) , 0.25 * np . amax ( lags ) , np . amin ( semivariance ) ] bnds = ( [ 0. , 0. , 0. ] , [ 10. * np . amax ( semivariance ) , np . amax ( lags ) , np . amax ( semivariance ) ] ) # use 'soft' L1-norm minimization in order to buffer against # potential outliers (weird/skewed points) res = least_squares ( _variogram_residuals , x0 , bounds = bnds , loss = 'soft_l1' , args = ( lags , semivariance , variogram_function , weight ) ) return res . x
Function that fits a variogram model when parameters are not specified . Returns variogram model parameters that minimize the RMSE between the specified variogram function and the actual calculated variogram points .
418
37
238,786
def _krige ( X , y , coords , variogram_function , variogram_model_parameters , coordinates_type ) : zero_index = None zero_value = False # calculate distance between points... need a square distance matrix # of inter-measurement-point distances and a vector of distances between # measurement points (X) and the kriging point (coords) if coordinates_type == 'euclidean' : d = squareform ( pdist ( X , metric = 'euclidean' ) ) bd = np . squeeze ( cdist ( X , coords [ None , : ] , metric = 'euclidean' ) ) # geographic coordinate distances still calculated in the old way... # assume X[:, 0] ('x') => lon, X[:, 1] ('y') => lat # also assume problem is 2D; check done earlier in initializing variogram elif coordinates_type == 'geographic' : x1 , x2 = np . meshgrid ( X [ : , 0 ] , X [ : , 0 ] , sparse = True ) y1 , y2 = np . meshgrid ( X [ : , 1 ] , X [ : , 1 ] , sparse = True ) d = great_circle_distance ( x1 , y1 , x2 , y2 ) bd = great_circle_distance ( X [ : , 0 ] , X [ : , 1 ] , coords [ 0 ] * np . ones ( X . shape [ 0 ] ) , coords [ 1 ] * np . ones ( X . shape [ 0 ] ) ) # this check is done when initializing variogram, but kept here anyways... else : raise ValueError ( "Specified coordinate type '%s' " "is not supported." % coordinates_type ) # check if kriging point overlaps with measurement point if np . any ( np . absolute ( bd ) <= 1e-10 ) : zero_value = True zero_index = np . where ( bd <= 1e-10 ) [ 0 ] [ 0 ] # set up kriging matrix n = X . shape [ 0 ] a = np . zeros ( ( n + 1 , n + 1 ) ) a [ : n , : n ] = - variogram_function ( variogram_model_parameters , d ) np . fill_diagonal ( a , 0.0 ) a [ n , : ] = 1.0 a [ : , n ] = 1.0 a [ n , n ] = 0.0 # set up RHS b = np . zeros ( ( n + 1 , 1 ) ) b [ : n , 0 ] = - variogram_function ( variogram_model_parameters , bd ) if zero_value : b [ zero_index , 0 ] = 0.0 b [ n , 0 ] = 1.0 # solve res = np . linalg . solve ( a , b ) zinterp = np . sum ( res [ : n , 0 ] * y ) sigmasq = np . sum ( res [ : , 0 ] * - b [ : , 0 ] ) return zinterp , sigmasq
Sets up and solves the ordinary kriging system for the given coordinate pair . This function is only used for the statistics calculations .
682
27
238,787
def _find_statistics ( X , y , variogram_function , variogram_model_parameters , coordinates_type ) : delta = np . zeros ( y . shape ) sigma = np . zeros ( y . shape ) for i in range ( y . shape [ 0 ] ) : # skip the first value in the kriging problem if i == 0 : continue else : k , ss = _krige ( X [ : i , : ] , y [ : i ] , X [ i , : ] , variogram_function , variogram_model_parameters , coordinates_type ) # if the estimation error is zero, it's probably because # the evaluation point X[i, :] is really close to one of the # kriging system points in X[:i, :]... # in the case of zero estimation error, the results are not stored if np . absolute ( ss ) < eps : continue delta [ i ] = y [ i ] - k sigma [ i ] = np . sqrt ( ss ) # only use non-zero entries in these arrays... sigma is used to pull out # non-zero entries in both cases because it is guaranteed to be positive, # whereas delta can be either positive or negative delta = delta [ sigma > eps ] sigma = sigma [ sigma > eps ] epsilon = delta / sigma return delta , sigma , epsilon
Calculates variogram fit statistics . Returns the delta sigma and epsilon values for the variogram fit . These arrays are used for statistics calculations .
307
32
238,788
def fit ( self , p , x , y ) : self . regression_model . fit ( p , y ) ml_pred = self . regression_model . predict ( p ) print ( 'Finished learning regression model' ) # residual=y-ml_pred self . krige . fit ( x = x , y = y - ml_pred ) print ( 'Finished kriging residuals' )
fit the regression method and also Krige the residual
88
11
238,789
def score ( self , p , x , y , sample_weight = None ) : return r2_score ( y_pred = self . predict ( p , x ) , y_true = y , sample_weight = sample_weight )
Overloading default regression score method
51
6
238,790
def _exec_loop_moving_window ( self , a_all , bd_all , mask , bd_idx ) : import scipy . linalg . lapack npt = bd_all . shape [ 0 ] n = bd_idx . shape [ 1 ] kvalues = np . zeros ( npt ) sigmasq = np . zeros ( npt ) for i in np . nonzero ( ~ mask ) [ 0 ] : b_selector = bd_idx [ i ] bd = bd_all [ i ] a_selector = np . concatenate ( ( b_selector , np . array ( [ a_all . shape [ 0 ] - 1 ] ) ) ) a = a_all [ a_selector [ : , None ] , a_selector ] if np . any ( np . absolute ( bd ) <= self . eps ) : zero_value = True zero_index = np . where ( np . absolute ( bd ) <= self . eps ) else : zero_value = False zero_index = None b = np . zeros ( ( n + 1 , 1 ) ) b [ : n , 0 ] = - self . variogram_function ( self . variogram_model_parameters , bd ) if zero_value : b [ zero_index [ 0 ] , 0 ] = 0.0 b [ n , 0 ] = 1.0 x = scipy . linalg . solve ( a , b ) kvalues [ i ] = x [ : n , 0 ] . dot ( self . VALUES [ b_selector ] ) sigmasq [ i ] = - x [ : , 0 ] . dot ( b [ : , 0 ] ) return kvalues , sigmasq
Solves the kriging system by looping over all specified points . Uses only a certain number of closest points . Not very memory intensive but the loop is done in pure Python .
388
37
238,791
def get_frame_info ( tb , context_lines = 7 , simple = False ) : # line numbers / function / variables lineno = tb . tb_lineno function = tb . tb_frame . f_code . co_name variables = tb . tb_frame . f_locals files = { } # get filename if simple : fn = tb . tb_frame . f_code . co_filename else : fn = tb . tb_frame . f_globals . get ( '__file__' ) if not fn : fn = os . path . realpath ( inspect . getsourcefile ( tb ) or inspect . getfile ( tb ) ) if fn [ - 4 : ] in ( '.pyc' , '.pyo' ) : fn = fn [ : - 1 ] #if filename is existed, then just read the file # get loader loader = None if not os . path . exists ( fn ) : loader = tb . tb_frame . f_globals . get ( '__loader__' ) while not loader and tb . tb_next : tb = tb . tb_next loader = tb . tb_frame . f_globals . get ( '__loader__' ) # sourcecode source = '' pre_context , post_context = [ ] , [ ] context_line = raw_context_line = context_lineno = None try : if loader : source = loader . get_source ( fn ) else : if not fn in files : source = open ( fn ) . read ( ) files [ fn ] = source else : source = files [ fn ] except : pass else : try : raw_context_line = source . splitlines ( ) [ lineno - 1 ] . strip ( ) except IndexError : pass if not simple : parsed_source = highlight_python ( source ) lbound = max ( 0 , lineno - context_lines - 1 ) ubound = lineno + context_lines try : context_line = parsed_source [ lineno - 1 ] pre_context = parsed_source [ lbound : lineno - 1 ] post_context = parsed_source [ lineno : ubound ] except IndexError as e : pass context_lineno = lbound if isinstance ( fn , unicode ) : fn = fn . encode ( 'utf-8' ) return { 'tb' : tb , 'filename' : fn , 'basename' : os . path . basename ( fn ) , 'loader' : loader , 'function' : function , 'lineno' : lineno , 'vars' : variables , 'pre_context' : pre_context , 'context_line' : context_line , 'raw_context_line' : raw_context_line , 'post_context' : post_context , 'context_lineno' : context_lineno , 'source' : source }
Return a dict of information about a given traceback .
637
11
238,792
def get_html_output ( self ) : def html_splitlines ( lines ) : # this cool function was taken from trac. # http://projects.edgewall.com/trac/ open_tag_re = re . compile ( r'<(\w+)(\s.*)?[^/]?>' ) close_tag_re = re . compile ( r'</(\w+)>' ) open_tags = [ ] for line in lines : for tag in open_tags : line = tag . group ( 0 ) + line open_tags = [ ] for tag in open_tag_re . finditer ( line ) : open_tags . append ( tag ) open_tags . reverse ( ) for ctag in close_tag_re . finditer ( line ) : for otag in open_tags : if otag . group ( 1 ) == ctag . group ( 1 ) : open_tags . remove ( otag ) break for tag in open_tags : line += '</%s>' % tag . group ( 1 ) yield line if self . error : return escape ( self . raw ) . splitlines ( ) return list ( html_splitlines ( self . out . getvalue ( ) . splitlines ( ) ) )
Return line generator .
270
4
238,793
def get_columns ( model = None , fields = None , meta = None ) : if model : M = get_model ( model ) else : M = None if fields is not None : f = fields if M : if meta and hasattr ( M , meta ) : m = getattr ( model , meta ) if hasattr ( m , 'fields' ) : f = m . fields else : f = M . _fields_list else : f = M . _fields_list columns = [ ] for x in f : if isinstance ( x , str ) : # so x is field_name field_name = x elif isinstance ( x , dict ) : field_name = x [ 'name' ] else : raise UliwebError ( "Field definition is not right, it should be just like str or {'name':xxx}" ) if '.' in field_name : model_name , field_name = field_name . split ( '.' ) M = get_model ( model_name ) if not M : raise UliwebError ( "Model can't be empty, because field name not has `model.` prefix" ) if field_name in M . c : columns . append ( M . c [ field_name ] ) return columns
Get model columns list
270
4
238,794
def get_field ( name , model = None ) : if '.' in name : m , name = name . split ( '.' ) model = get_model ( m ) if model : return getattr ( model , name , None )
get model field according to name the name can be like model . column
50
14
238,795
def get_column ( name , model = None ) : if '.' in name : m , name = name . split ( '.' ) model = get_model ( m ) if model : return model . c . get ( name )
get table column according to name the name can be like model . column
49
14
238,796
def _process_file ( self , obj , fobj , field ) : from uliweb import settings paths = [ ] upload_to = self . upload_to or self . _get_upload_path ( field , 'upload_to' , obj ) if upload_to : self . fileserving . to_path = upload_to upload_to_sub = self . upload_to_sub or self . _get_upload_path ( field , 'upload_to_sub' , obj ) if upload_to_sub : paths . append ( upload_to_sub ) paths . append ( fobj [ 'filename' ] ) return self . fileserving . save_file ( os . path . join ( * paths ) , fobj [ 'file' ] , replace = self . file_replace , convert = self . file_convert )
obj is record object fobj is data field is FileField instance
181
13
238,797
def count ( self , query ) : if self . manual : return self . total if isinstance ( query , Select ) : q = query . with_only_columns ( [ func . count ( ) ] ) . order_by ( None ) . limit ( None ) . offset ( None ) return do_ ( q ) . scalar ( ) return query . count ( )
If query is Select object this function will try to get count of select
79
14
238,798
def get_data ( self , query , fields_convert_map , encoding = 'utf-8' , auto_convert = True , include_hidden = False , header = None ) : fields_convert_map = fields_convert_map or { } d = self . fields_convert_map . copy ( ) d . update ( fields_convert_map ) if isinstance ( query , Select ) : query = do_ ( query ) # def get_value(name, value, record): # convert = d.get(name) # if convert: # value = convert(value, record) # return safe_unicode(value, encoding) for record in query : self . _cal_sum ( record ) row = [ ] record = self . _get_record ( record ) if self . before_record_render : self . before_record_render ( record ) if isinstance ( record , orm . Model ) : model = record . __class__ else : model = None for i , x in enumerate ( self . table_info [ 'fields_list' ] ) : field = get_field ( x [ 'name' ] , model ) if not field : field = { 'name' : x [ 'name' ] } else : field = { 'name' : x [ 'name' ] , 'prop' : field } if not include_hidden and x . get ( 'hidden' ) : continue if isinstance ( record , orm . Model ) : v = make_view_field ( field , record , fields_convert_map = d , auto_convert = auto_convert ) else : v = make_view_field ( field , record , fields_convert_map = d , auto_convert = auto_convert , value = record [ x [ 'name' ] ] ) value = v [ 'display' ] #value = safe_unicode(v['display'], encoding) row . append ( value ) if header : ret = dict ( zip ( header , row ) ) else : ret = row yield ret total = self . _get_sum ( ) if total : row = [ ] for x in total : v = x if isinstance ( x , str ) : v = safe_unicode ( x , encoding ) row . append ( v ) if header : ret = dict ( zip ( header , row ) ) else : ret = row yield ret
If convert = True will convert field value
519
8
238,799
def objects ( self , json_result = False ) : self . rows_num = 0 query = self . query ( ) if not isinstance ( query , ( orm . Result , list , dict ) ) : query = do_ ( query ) for record in query : self . rows_num += 1 r = self . object ( record , json_result ) self . _cal_sum ( record ) yield r total = self . _render_sum ( True ) if total : yield total
Return a generator of all processed data it just like render but it ll not return a table or json format data but just data . And the data will be processed by fields_convert_map if passed .
103
42