idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
222,100
def _save_plot ( self , * args , extension = 'pdf' , * * kwargs ) : import matplotlib . pyplot as plt tmp_path = make_temp_dir ( ) filename = '{}.{}' . format ( str ( uuid . uuid4 ( ) ) , extension . strip ( '.' ) ) filepath = posixpath . join ( tmp_path , filename ) plt . savefig ( filepath , * args , * * kwargs ) return filepath
Save the plot .
112
4
222,101
def add_plot ( self , * args , extension = 'pdf' , * * kwargs ) : add_image_kwargs = { } for key in ( 'width' , 'placement' ) : if key in kwargs : add_image_kwargs [ key ] = kwargs . pop ( key ) filename = self . _save_plot ( * args , extension = extension , * * kwargs ) self . add_image ( filename , * * add_image_kwargs )
Add the current Matplotlib plot to the figure .
110
11
222,102
def add_image ( self , filename , * , width = NoEscape ( r'\linewidth' ) , placement = None ) : super ( ) . add_image ( filename , width = width , placement = placement )
Add an image to the subfigure .
49
8
222,103
def escape ( self ) : if self . _escape is not None : return self . _escape if self . _default_escape is not None : return self . _default_escape return True
Determine whether or not to escape content of this class .
40
13
222,104
def _repr_values ( self ) : def getattr_better ( obj , field ) : try : return getattr ( obj , field ) except AttributeError as e : try : return getattr ( obj , '_' + field ) except AttributeError : raise e return ( getattr_better ( self , attr ) for attr in self . _repr_attributes )
Return values that are to be shown in repr string .
84
11
222,105
def _repr_attributes ( self ) : if self . _repr_attributes_override is None : # Default to init arguments attrs = getfullargspec ( self . __init__ ) . args [ 1 : ] mapping = self . _repr_attributes_mapping if mapping : attrs = [ mapping [ a ] if a in mapping else a for a in attrs ] return attrs return self . _repr_attributes_override
Return attributes that should be part of the repr string .
102
11
222,106
def latex_name ( self ) : star = ( '*' if self . _star_latex_name else '' ) if self . _latex_name is not None : return self . _latex_name + star return self . __class__ . __name__ . lower ( ) + star
Return the name of the class used in LaTeX .
65
11
222,107
def generate_tex ( self , filepath ) : with open ( filepath + '.tex' , 'w' , encoding = 'utf-8' ) as newf : self . dump ( newf )
Generate a . tex file .
44
7
222,108
def dumps_as_content ( self ) : string = self . dumps ( ) if self . separate_paragraph or self . begin_paragraph : string = '\n\n' + string . lstrip ( '\n' ) if self . separate_paragraph or self . end_paragraph : string = string . rstrip ( '\n' ) + '\n\n' return string
Create a string representation of the object as content .
83
10
222,109
def _propagate_packages ( self ) : super ( ) . _propagate_packages ( ) for item in ( self . preamble ) : if isinstance ( item , LatexObject ) : if isinstance ( item , Container ) : item . _propagate_packages ( ) for p in item . packages : self . packages . add ( p )
r Propogate packages .
76
6
222,110
def dumps ( self ) : head = self . documentclass . dumps ( ) + '%\n' head += self . dumps_packages ( ) + '%\n' head += dumps_list ( self . variables ) + '%\n' head += dumps_list ( self . preamble ) + '%\n' return head + '%\n' + super ( ) . dumps ( )
Represent the document as a string in LaTeX syntax .
87
11
222,111
def generate_pdf ( self , filepath = None , * , clean = True , clean_tex = True , compiler = None , compiler_args = None , silent = True ) : if compiler_args is None : compiler_args = [ ] filepath = self . _select_filepath ( filepath ) filepath = os . path . join ( '.' , filepath ) cur_dir = os . getcwd ( ) dest_dir = os . path . dirname ( filepath ) basename = os . path . basename ( filepath ) if basename == '' : basename = 'default_basename' os . chdir ( dest_dir ) self . generate_tex ( basename ) if compiler is not None : compilers = ( ( compiler , [ ] ) , ) else : latexmk_args = [ '--pdf' ] compilers = ( ( 'latexmk' , latexmk_args ) , ( 'pdflatex' , [ ] ) ) main_arguments = [ '--interaction=nonstopmode' , basename + '.tex' ] os_error = None for compiler , arguments in compilers : command = [ compiler ] + arguments + compiler_args + main_arguments try : output = subprocess . check_output ( command , stderr = subprocess . STDOUT ) except ( OSError , IOError ) as e : # Use FileNotFoundError when python 2 is dropped os_error = e if os_error . errno == errno . ENOENT : # If compiler does not exist, try next in the list continue raise except subprocess . CalledProcessError as e : # For all other errors print the output and raise the error print ( e . output . decode ( ) ) raise else : if not silent : print ( output . decode ( ) ) if clean : try : # Try latexmk cleaning first subprocess . check_output ( [ 'latexmk' , '-c' , basename ] , stderr = subprocess . STDOUT ) except ( OSError , IOError , subprocess . CalledProcessError ) as e : # Otherwise just remove some file extensions. extensions = [ 'aux' , 'log' , 'out' , 'fls' , 'fdb_latexmk' ] for ext in extensions : try : os . remove ( basename + '.' + ext ) except ( OSError , IOError ) as e : # Use FileNotFoundError when python 2 is dropped if e . errno != errno . ENOENT : raise rm_temp_dir ( ) if clean_tex : os . remove ( basename + '.tex' ) # Remove generated tex file # Compilation has finished, so no further compilers have to be # tried break else : # Notify user that none of the compilers worked. raise ( CompilerError ( 'No LaTex compiler was found\n' + 'Either specify a LaTex compiler ' + 'or make sure you have latexmk or pdfLaTex installed.' ) ) os . chdir ( cur_dir )
Generate a pdf file from the document .
660
9
222,112
def _select_filepath ( self , filepath ) : if filepath is None : return self . default_filepath else : if os . path . basename ( filepath ) == '' : filepath = os . path . join ( filepath , os . path . basename ( self . default_filepath ) ) return filepath
Make a choice between filepath and self . default_filepath .
72
14
222,113
def add_color ( self , name , model , description ) : if self . color is False : self . packages . append ( Package ( "color" ) ) self . color = True self . preamble . append ( Command ( "definecolor" , arguments = [ name , model , description ] ) )
r Add a color that can be used throughout the document .
65
12
222,114
def change_length ( self , parameter , value ) : self . preamble . append ( UnsafeCommand ( 'setlength' , arguments = [ parameter , value ] ) )
r Change the length of a certain parameter to a certain value .
38
13
222,115
def set_variable ( self , name , value ) : name_arg = "\\" + name variable_exists = False for variable in self . variables : if name_arg == variable . arguments . _positional_args [ 0 ] : variable_exists = True break if variable_exists : renew = Command ( command = "renewcommand" , arguments = [ NoEscape ( name_arg ) , value ] ) self . append ( renew ) else : new = Command ( command = "newcommand" , arguments = [ NoEscape ( name_arg ) , value ] ) self . variables . append ( new )
r Add a variable which can be used inside the document .
134
12
222,116
def change ( self , * * kwargs ) : old_attrs = { } for k , v in kwargs . items ( ) : old_attrs [ k ] = getattr ( self , k , v ) setattr ( self , k , v ) yield self # allows with ... as ... for k , v in old_attrs . items ( ) : setattr ( self , k , v )
Override some attributes of the config in a specific context .
89
11
222,117
def __key ( self ) : return ( self . latex_name , self . arguments , self . options , self . extra_arguments )
Return a hashable key representing the command .
30
9
222,118
def dumps ( self ) : options = self . options . dumps ( ) arguments = self . arguments . dumps ( ) if self . extra_arguments is None : return r'\{command}{options}{arguments}' . format ( command = self . latex_name , options = options , arguments = arguments ) extra_arguments = self . extra_arguments . dumps ( ) return r'\{command}{arguments}{options}{extra_arguments}' . format ( command = self . latex_name , arguments = arguments , options = options , extra_arguments = extra_arguments )
Represent the command as a string in LaTeX syntax .
128
11
222,119
def _format_contents ( self , prefix , separator , suffix ) : params = self . _list_args_kwargs ( ) if len ( params ) <= 0 : return '' string = prefix + dumps_list ( params , escape = self . escape , token = separator ) + suffix return string
Format the parameters .
65
4
222,120
def _list_args_kwargs ( self ) : params = [ ] params . extend ( self . _positional_args ) params . extend ( [ '{k}={v}' . format ( k = k , v = v ) for k , v in self . _key_value_args . items ( ) ] ) return params
Make a list of strings representing al parameters .
73
9
222,121
def dumps_content ( self ) : import numpy as np string = '' shape = self . matrix . shape for ( y , x ) , value in np . ndenumerate ( self . matrix ) : if x : string += '&' string += str ( value ) if x == shape [ 1 ] - 1 and y != shape [ 0 ] - 1 : string += r'\\' + '%\n' super ( ) . dumps_content ( ) return string
Return a string representing the matrix in LaTeX syntax .
100
11
222,122
def change_thickness ( self , element , thickness ) : if element == "header" : self . data . append ( Command ( "renewcommand" , arguments = [ NoEscape ( r"\headrulewidth" ) , str ( thickness ) + 'pt' ] ) ) elif element == "footer" : self . data . append ( Command ( "renewcommand" , arguments = [ NoEscape ( r"\footrulewidth" ) , str ( thickness ) + 'pt' ] ) )
r Change line thickness .
112
5
222,123
def from_str ( cls , coordinate ) : m = cls . _coordinate_str_regex . match ( coordinate ) if m is None : raise ValueError ( 'invalid coordinate string' ) if m . group ( 1 ) == '++' : relative = True else : relative = False return TikZCoordinate ( float ( m . group ( 2 ) ) , float ( m . group ( 4 ) ) , relative = relative )
Build a TikZCoordinate object from a string .
95
11
222,124
def distance_to ( self , other ) : other_coord = self . _arith_check ( other ) return math . sqrt ( math . pow ( self . _x - other_coord . _x , 2 ) + math . pow ( self . _y - other_coord . _y , 2 ) )
Euclidean distance between two coordinates .
68
9
222,125
def dumps ( self ) : ret_str = [ ] ret_str . append ( Command ( 'node' , options = self . options ) . dumps ( ) ) if self . handle is not None : ret_str . append ( '({})' . format ( self . handle ) ) if self . _node_position is not None : ret_str . append ( 'at {}' . format ( str ( self . _position ) ) ) if self . _node_text is not None : ret_str . append ( '{{{text}}};' . format ( text = self . _node_text ) ) else : ret_str . append ( '{};' ) return ' ' . join ( ret_str )
Return string representation of the node .
154
7
222,126
def get_anchor_point ( self , anchor_name ) : if anchor_name in self . _possible_anchors : return TikZNodeAnchor ( self . handle , anchor_name ) else : try : anchor = int ( anchor_name . split ( '_' ) [ 1 ] ) except : anchor = None if anchor is not None : return TikZNodeAnchor ( self . handle , str ( anchor ) ) raise ValueError ( 'Invalid anchor name: "{}"' . format ( anchor_name ) )
Return an anchor point of the node if it exists .
115
11
222,127
def dumps ( self ) : ret_str = self . path_type if self . options is not None : ret_str += self . options . dumps ( ) return ret_str
Return path command representation .
38
5
222,128
def dumps ( self ) : ret_str = [ ] for item in self . _arg_list : if isinstance ( item , TikZUserPath ) : ret_str . append ( item . dumps ( ) ) elif isinstance ( item , TikZCoordinate ) : ret_str . append ( item . dumps ( ) ) elif isinstance ( item , str ) : ret_str . append ( item ) return ' ' . join ( ret_str )
Return representation of the path command .
99
7
222,129
def dumps ( self ) : ret_str = [ Command ( 'path' , options = self . options ) . dumps ( ) ] ret_str . append ( self . path . dumps ( ) ) return ' ' . join ( ret_str ) + ';'
Return a representation for the command .
56
7
222,130
def dumps ( self ) : string = Command ( 'addplot' , options = self . options ) . dumps ( ) if self . coordinates is not None : string += ' coordinates {%\n' if self . error_bar is None : for x , y in self . coordinates : # ie: "(x,y)" string += '(' + str ( x ) + ',' + str ( y ) + ')%\n' else : for ( x , y ) , ( e_x , e_y ) in zip ( self . coordinates , self . error_bar ) : # ie: "(x,y) +- (e_x,e_y)" string += '(' + str ( x ) + ',' + str ( y ) + ') +- (' + str ( e_x ) + ',' + str ( e_y ) + ')%\n' string += '};%\n%\n' elif self . func is not None : string += '{' + self . func + '};%\n%\n' if self . name is not None : string += Command ( 'addlegendentry' , self . name ) . dumps ( ) super ( ) . dumps ( ) return string
Represent the plot as a string in LaTeX syntax .
265
11
222,131
def _cost_gp ( self , x ) : m , _ , _ , _ = self . cost_model . predict_withGradients ( x ) return np . exp ( m )
Predicts the time cost of evaluating the function at x .
40
12
222,132
def _cost_gp_withGradients ( self , x ) : m , _ , dmdx , _ = self . cost_model . predict_withGradients ( x ) return np . exp ( m ) , np . exp ( m ) * dmdx
Predicts the time cost and its gradient of evaluating the function at x .
57
15
222,133
def update_cost_model ( self , x , cost_x ) : if self . cost_type == 'evaluation_time' : cost_evals = np . log ( np . atleast_2d ( np . asarray ( cost_x ) ) . T ) if self . num_updates == 0 : X_all = x costs_all = cost_evals else : X_all = np . vstack ( ( self . cost_model . model . X , x ) ) costs_all = np . vstack ( ( self . cost_model . model . Y , cost_evals ) ) self . num_updates += 1 self . cost_model . updateModel ( X_all , costs_all , None , None )
Updates the GP used to handle the cost .
164
10
222,134
def update_batches ( self , X_batch , L , Min ) : self . X_batch = X_batch if X_batch is not None : self . r_x0 , self . s_x0 = self . _hammer_function_precompute ( X_batch , L , Min , self . model )
Updates the batches internally and pre - computes the
71
11
222,135
def _hammer_function_precompute ( self , x0 , L , Min , model ) : if x0 is None : return None , None if len ( x0 . shape ) == 1 : x0 = x0 [ None , : ] m = model . predict ( x0 ) [ 0 ] pred = model . predict ( x0 ) [ 1 ] . copy ( ) pred [ pred < 1e-16 ] = 1e-16 s = np . sqrt ( pred ) r_x0 = ( m - Min ) / L s_x0 = s / L r_x0 = r_x0 . flatten ( ) s_x0 = s_x0 . flatten ( ) return r_x0 , s_x0
Pre - computes the parameters of a penalizer centered at x0 .
162
15
222,136
def _hammer_function ( self , x , x0 , r_x0 , s_x0 ) : return norm . logcdf ( ( np . sqrt ( ( np . square ( np . atleast_2d ( x ) [ : , None , : ] - np . atleast_2d ( x0 ) [ None , : , : ] ) ) . sum ( - 1 ) ) - r_x0 ) / s_x0 )
Creates the function to define the exclusion zones
101
9
222,137
def _penalized_acquisition ( self , x , model , X_batch , r_x0 , s_x0 ) : fval = - self . acq . acquisition_function ( x ) [ : , 0 ] if self . transform == 'softplus' : fval_org = fval . copy ( ) fval [ fval_org >= 40. ] = np . log ( fval_org [ fval_org >= 40. ] ) fval [ fval_org < 40. ] = np . log ( np . log1p ( np . exp ( fval_org [ fval_org < 40. ] ) ) ) elif self . transform == 'none' : fval = np . log ( fval + 1e-50 ) fval = - fval if X_batch is not None : h_vals = self . _hammer_function ( x , X_batch , r_x0 , s_x0 ) fval += - h_vals . sum ( axis = - 1 ) return fval
Creates a penalized acquisition function using hammer functions around the points collected in the batch
227
17
222,138
def acquisition_function ( self , x ) : return self . _penalized_acquisition ( x , self . model , self . X_batch , self . r_x0 , self . s_x0 )
Returns the value of the acquisition function at x .
47
10
222,139
def d_acquisition_function ( self , x ) : x = np . atleast_2d ( x ) if self . transform == 'softplus' : fval = - self . acq . acquisition_function ( x ) [ : , 0 ] scale = 1. / ( np . log1p ( np . exp ( fval ) ) * ( 1. + np . exp ( - fval ) ) ) elif self . transform == 'none' : fval = - self . acq . acquisition_function ( x ) [ : , 0 ] scale = 1. / fval else : scale = 1. if self . X_batch is None : _ , grad_acq_x = self . acq . acquisition_function_withGradients ( x ) return scale * grad_acq_x else : _ , grad_acq_x = self . acq . acquisition_function_withGradients ( x ) return scale * grad_acq_x - self . _d_hammer_function ( x , self . X_batch , self . r_x0 , self . s_x0 )
Returns the gradient of the acquisition function at x .
243
10
222,140
def acquisition_function_withGradients ( self , x ) : aqu_x = self . acquisition_function ( x ) aqu_x_grad = self . d_acquisition_function ( x ) return aqu_x , aqu_x_grad
Returns the acquisition function and its its gradient at x .
53
11
222,141
def acquisition_function ( self , x ) : f_acqu = self . _compute_acq ( x ) cost_x , _ = self . cost_withGradients ( x ) return - ( f_acqu * self . space . indicator_constraints ( x ) ) / cost_x
Takes an acquisition and weights it so the domain and cost are taken into account .
65
17
222,142
def acquisition_function_withGradients ( self , x ) : f_acqu , df_acqu = self . _compute_acq_withGradients ( x ) cost_x , cost_grad_x = self . cost_withGradients ( x ) f_acq_cost = f_acqu / cost_x df_acq_cost = ( df_acqu * cost_x - f_acqu * cost_grad_x ) / ( cost_x ** 2 ) return - f_acq_cost * self . space . indicator_constraints ( x ) , - df_acq_cost * self . space . indicator_constraints ( x )
Takes an acquisition and it gradient and weights it so the domain and cost are taken into account .
147
20
222,143
def reshape ( x , input_dim ) : x = np . array ( x ) if x . size == input_dim : x = x . reshape ( ( 1 , input_dim ) ) return x
Reshapes x into a matrix with input_dim columns
45
12
222,144
def spawn ( f ) : def fun ( pipe , x ) : pipe . send ( f ( x ) ) pipe . close ( ) return fun
Function for parallel evaluation of the acquisition function
30
8
222,145
def values_to_array ( input_values ) : if type ( input_values ) == tuple : values = np . array ( input_values ) . reshape ( - 1 , 1 ) elif type ( input_values ) == np . ndarray : values = np . atleast_2d ( input_values ) elif type ( input_values ) == int or type ( input_values ) == float or type ( np . int64 ) : values = np . atleast_2d ( np . array ( input_values ) ) else : print ( 'Type to transform not recognized' ) return values
Transforms a values of int float and tuples to a column vector numpy array
133
17
222,146
def merge_values ( values1 , values2 ) : array1 = values_to_array ( values1 ) array2 = values_to_array ( values2 ) if array1 . size == 0 : return array2 if array2 . size == 0 : return array1 merged_array = [ ] for row_array1 in array1 : for row_array2 in array2 : merged_row = np . hstack ( ( row_array1 , row_array2 ) ) merged_array . append ( merged_row ) return np . atleast_2d ( merged_array )
Merges two numpy arrays by calculating all possible combinations of rows
127
13
222,147
def normalize ( Y , normalization_type = 'stats' ) : Y = np . asarray ( Y , dtype = float ) if np . max ( Y . shape ) != Y . size : raise NotImplementedError ( 'Only 1-dimensional arrays are supported.' ) # Only normalize with non null sdev (divide by zero). For only one # data point both std and ptp return 0. if normalization_type == 'stats' : Y_norm = Y - Y . mean ( ) std = Y . std ( ) if std > 0 : Y_norm /= std elif normalization_type == 'maxmin' : Y_norm = Y - Y . min ( ) y_range = np . ptp ( Y ) if y_range > 0 : Y_norm /= y_range # A range of [-1, 1] is more natural for a zero-mean GP Y_norm = 2 * ( Y_norm - 0.5 ) else : raise ValueError ( 'Unknown normalization type: {}' . format ( normalization_type ) ) return Y_norm
Normalize the vector Y using statistics or its range .
238
11
222,148
def get_samples ( self , init_points_count ) : init_points_count = self . _adjust_init_points_count ( init_points_count ) samples = np . empty ( ( init_points_count , self . space . dimensionality ) ) # Use random design to fill non-continuous variables random_design = RandomDesign ( self . space ) random_design . fill_noncontinous_variables ( samples ) if self . space . has_continuous ( ) : X_design = multigrid ( self . space . get_continuous_bounds ( ) , self . data_per_dimension ) samples [ : , self . space . get_continuous_dims ( ) ] = X_design return samples
This method may return less points than requested . The total number of generated points is the smallest closest integer of n^d to the selected amount of points .
162
31
222,149
def get_samples_with_constraints ( self , init_points_count ) : samples = np . empty ( ( 0 , self . space . dimensionality ) ) while samples . shape [ 0 ] < init_points_count : domain_samples = self . get_samples_without_constraints ( init_points_count ) valid_indices = ( self . space . indicator_constraints ( domain_samples ) == 1 ) . flatten ( ) if sum ( valid_indices ) > 0 : valid_samples = domain_samples [ valid_indices , : ] samples = np . vstack ( ( samples , valid_samples ) ) return samples [ 0 : init_points_count , : ]
Draw random samples and only save those that satisfy constraints Finish when required number of samples is generated
162
18
222,150
def fill_noncontinous_variables ( self , samples ) : init_points_count = samples . shape [ 0 ] for ( idx , var ) in enumerate ( self . space . space_expanded ) : if isinstance ( var , DiscreteVariable ) or isinstance ( var , CategoricalVariable ) : sample_var = np . atleast_2d ( np . random . choice ( var . domain , init_points_count ) ) samples [ : , idx ] = sample_var . flatten ( ) # sample in the case of bandit variables elif isinstance ( var , BanditVariable ) : # Bandit variable is represented by a several adjacent columns in the samples array idx_samples = np . random . randint ( var . domain . shape [ 0 ] , size = init_points_count ) bandit_idx = np . arange ( idx , idx + var . domain . shape [ 1 ] ) samples [ : , bandit_idx ] = var . domain [ idx_samples , : ]
Fill sample values to non - continuous variables in place
231
10
222,151
def _get_obj ( self , space ) : obj_func = self . obj_func from . . core . task import SingleObjective return SingleObjective ( obj_func , self . config [ 'resources' ] [ 'cores' ] , space = space , unfold_args = True )
Imports the acquisition function .
64
6
222,152
def _get_space ( self ) : assert 'space' in self . config , 'The search space is NOT configured!' space_config = self . config [ 'space' ] constraint_config = self . config [ 'constraints' ] from . . core . task . space import Design_space return Design_space . fromConfig ( space_config , constraint_config )
Imports the domain .
80
5
222,153
def _get_model ( self ) : from copy import deepcopy model_args = deepcopy ( self . config [ 'model' ] ) del model_args [ 'type' ] from . . models import select_model return select_model ( self . config [ 'model' ] [ 'type' ] ) . fromConfig ( model_args )
Imports the model .
74
5
222,154
def _get_acquisition ( self , model , space ) : from copy import deepcopy acqOpt_config = deepcopy ( self . config [ 'acquisition' ] [ 'optimizer' ] ) acqOpt_name = acqOpt_config [ 'name' ] del acqOpt_config [ 'name' ] from . . optimization import AcquisitionOptimizer acqOpt = AcquisitionOptimizer ( space , acqOpt_name , * * acqOpt_config ) from . . acquisitions import select_acquisition return select_acquisition ( self . config [ 'acquisition' ] [ 'type' ] ) . fromConfig ( model , space , acqOpt , None , self . config [ 'acquisition' ] )
Imports the acquisition
159
4
222,155
def _get_acq_evaluator ( self , acq ) : from . . core . evaluators import select_evaluator from copy import deepcopy eval_args = deepcopy ( self . config [ 'acquisition' ] [ 'evaluator' ] ) del eval_args [ 'type' ] return select_evaluator ( self . config [ 'acquisition' ] [ 'evaluator' ] [ 'type' ] ) ( acq , * * eval_args )
Imports the evaluator
108
6
222,156
def _check_stop ( self , iters , elapsed_time , converged ) : r_c = self . config [ 'resources' ] stop = False if converged == 0 : stop = True if r_c [ 'maximum-iterations' ] != 'NA' and iters >= r_c [ 'maximum-iterations' ] : stop = True if r_c [ 'max-run-time' ] != 'NA' and elapsed_time / 60. >= r_c [ 'max-run-time' ] : stop = True return stop
Defines the stopping criterion .
121
6
222,157
def run ( self ) : space = self . _get_space ( ) obj_func = self . _get_obj ( space ) model = self . _get_model ( ) acq = self . _get_acquisition ( model , space ) acq_eval = self . _get_acq_evaluator ( acq ) from . . experiment_design import initial_design X_init = initial_design ( self . config [ 'initialization' ] [ 'type' ] , space , self . config [ 'initialization' ] [ 'num-eval' ] ) from . . methods import ModularBayesianOptimization bo = ModularBayesianOptimization ( model , space , obj_func , acq , acq_eval , X_init ) bo . run_optimization ( max_iter = self . config [ 'resources' ] [ 'maximum-iterations' ] , max_time = self . config [ 'resources' ] [ 'max-run-time' ] if self . config [ 'resources' ] [ 'max-run-time' ] != "NA" else np . inf , eps = self . config [ 'resources' ] [ 'tolerance' ] , verbosity = True ) return bo
Runs the optimization using the previously loaded elements .
269
10
222,158
def choose_optimizer ( optimizer_name , bounds ) : if optimizer_name == 'lbfgs' : optimizer = OptLbfgs ( bounds ) elif optimizer_name == 'DIRECT' : optimizer = OptDirect ( bounds ) elif optimizer_name == 'CMA' : optimizer = OptCma ( bounds ) else : raise InvalidVariableNameError ( 'Invalid optimizer selected.' ) return optimizer
Selects the type of local optimizer
95
8
222,159
def evaluator_creator ( self , evaluator_type , acquisition , batch_size , model_type , model , space , acquisition_optimizer ) : acquisition_transformation = self . kwargs . get ( 'acquisition_transformation' , 'none' ) if batch_size == 1 or evaluator_type == 'sequential' : return Sequential ( acquisition ) elif batch_size > 1 and ( evaluator_type == 'random' or evaluator_type is None ) : return RandomBatch ( acquisition , batch_size ) elif batch_size > 1 and evaluator_type == 'thompson_sampling' : return ThompsonBatch ( acquisition , batch_size ) elif evaluator_type == 'local_penalization' : if model_type not in [ 'GP' , 'sparseGP' , 'GP_MCMC' , 'warpedGP' ] : raise InvalidConfigError ( 'local_penalization evaluator can only be used with GP models' ) if not isinstance ( acquisition , AcquisitionLP ) : acquisition_lp = AcquisitionLP ( model , space , acquisition_optimizer , acquisition , acquisition_transformation ) return LocalPenalization ( acquisition_lp , batch_size )
Acquisition chooser from the available options . Guide the optimization through sequential or parallel evalutions of the objective .
276
23
222,160
def _compute_acq ( self , x ) : m , s = self . model . predict ( x ) f_acqu = - m + self . exploration_weight * s return f_acqu
Computes the GP - Lower Confidence Bound
43
9
222,161
def _compute_acq_withGradients ( self , x ) : m , s , dmdx , dsdx = self . model . predict_withGradients ( x ) f_acqu = - m + self . exploration_weight * s df_acqu = - dmdx + self . exploration_weight * dsdx return f_acqu , df_acqu
Computes the GP - Lower Confidence Bound and its derivative
81
12
222,162
def create_variable ( descriptor ) : if descriptor [ 'type' ] == 'continuous' : return ContinuousVariable ( descriptor [ 'name' ] , descriptor [ 'domain' ] , descriptor . get ( 'dimensionality' , 1 ) ) elif descriptor [ 'type' ] == 'bandit' : return BanditVariable ( descriptor [ 'name' ] , descriptor [ 'domain' ] , descriptor . get ( 'dimensionality' , None ) ) # bandits variables cannot be repeated elif descriptor [ 'type' ] == 'discrete' : return DiscreteVariable ( descriptor [ 'name' ] , descriptor [ 'domain' ] , descriptor . get ( 'dimensionality' , 1 ) ) elif descriptor [ 'type' ] == 'categorical' : return CategoricalVariable ( descriptor [ 'name' ] , descriptor [ 'domain' ] , descriptor . get ( 'dimensionality' , 1 ) ) else : raise InvalidConfigError ( 'Unknown variable type ' + descriptor [ 'type' ] )
Creates a variable from a dictionary descriptor
214
8
222,163
def expand ( self ) : expanded_variables = [ ] for i in range ( self . dimensionality ) : one_d_variable = deepcopy ( self ) one_d_variable . dimensionality = 1 if self . dimensionality > 1 : one_d_variable . name = '{}_{}' . format ( self . name , i + 1 ) else : one_d_variable . name = self . name one_d_variable . dimensionality_in_model = 1 expanded_variables . append ( one_d_variable ) return expanded_variables
Builds a list of single dimensional variables representing current variable .
123
12
222,164
def round ( self , value_array ) : min_value = self . domain [ 0 ] max_value = self . domain [ 1 ] rounded_value = value_array [ 0 ] if rounded_value < min_value : rounded_value = min_value elif rounded_value > max_value : rounded_value = max_value return [ rounded_value ]
If value falls within bounds just return it otherwise return min or max whichever is closer to the value Assumes an 1d array with a single element as an input .
79
33
222,165
def round ( self , value_array ) : distances = np . linalg . norm ( np . array ( self . domain ) - value_array , axis = 1 ) idx = np . argmin ( distances ) return [ self . domain [ idx ] ]
Rounds a bandit variable by selecting the closest point in the domain Closest here is defined by euclidian distance Assumes an 1d array of the same length as the single variable value
57
40
222,166
def round ( self , value_array ) : value = value_array [ 0 ] rounded_value = self . domain [ 0 ] for domain_value in self . domain : if np . abs ( domain_value - value ) < np . abs ( rounded_value - value ) : rounded_value = domain_value return [ rounded_value ]
Rounds a discrete variable by selecting the closest point in the domain Assumes an 1d array with a single element as an input .
73
27
222,167
def optimize ( self , f = None , df = None , f_df = None , duplicate_manager = None ) : self . f = f self . df = df self . f_df = f_df ## --- Update the optimizer, in case context has beee passed. self . optimizer = choose_optimizer ( self . optimizer_name , self . context_manager . noncontext_bounds ) ## --- Selecting the anchor points and removing duplicates if self . type_anchor_points_logic == max_objective_anchor_points_logic : anchor_points_generator = ObjectiveAnchorPointsGenerator ( self . space , random_design_type , f ) elif self . type_anchor_points_logic == thompson_sampling_anchor_points_logic : anchor_points_generator = ThompsonSamplingAnchorPointsGenerator ( self . space , sobol_design_type , self . model ) ## -- Select the anchor points (with context) anchor_points = anchor_points_generator . get ( duplicate_manager = duplicate_manager , context_manager = self . context_manager ) ## --- Applying local optimizers at the anchor points and update bounds of the optimizer (according to the context) optimized_points = [ apply_optimizer ( self . optimizer , a , f = f , df = None , f_df = f_df , duplicate_manager = duplicate_manager , context_manager = self . context_manager , space = self . space ) for a in anchor_points ] x_min , fx_min = min ( optimized_points , key = lambda t : t [ 1 ] ) #x_min, fx_min = min([apply_optimizer(self.optimizer, a, f=f, df=None, f_df=f_df, duplicate_manager=duplicate_manager, context_manager=self.context_manager, space = self.space) for a in anchor_points], key=lambda t:t[1]) return x_min , fx_min
Optimizes the input function .
459
7
222,168
def evaluate ( self , x ) : if self . n_procs == 1 : f_evals , cost_evals = self . _eval_func ( x ) else : try : f_evals , cost_evals = self . _syncronous_batch_evaluation ( x ) except : if not hasattr ( self , 'parallel_error' ) : print ( 'Error in parallel computation. Fall back to single process!' ) else : self . parallel_error = True f_evals , cost_evals = self . _eval_func ( x ) return f_evals , cost_evals
Performs the evaluation of the objective at x .
135
10
222,169
def _syncronous_batch_evaluation ( self , x ) : from multiprocessing import Process , Pipe # --- parallel evaluation of the function divided_samples = [ x [ i : : self . n_procs ] for i in range ( self . n_procs ) ] pipe = [ Pipe ( ) for i in range ( self . n_procs ) ] proc = [ Process ( target = spawn ( self . _eval_func ) , args = ( c , k ) ) for k , ( p , c ) in zip ( divided_samples , pipe ) ] [ p . start ( ) for p in proc ] [ p . join ( ) for p in proc ] # --- time of evaluation is set to constant (=1). This is one of the hypothesis of synchronous batch methods. f_evals = np . zeros ( ( x . shape [ 0 ] , 1 ) ) cost_evals = np . ones ( ( x . shape [ 0 ] , 1 ) ) i = 0 for ( p , c ) in pipe : f_evals [ i : : self . n_procs ] = p . recv ( ) [ 0 ] # throw away costs i += 1 return f_evals , cost_evals
Evaluates the function a x where x can be a single location or a batch . The evaluation is performed in parallel according to the number of accessible cores .
267
32
222,170
def compute_batch ( self , duplicate_manager = None , context_manager = None ) : x , _ = self . acquisition . optimize ( duplicate_manager = duplicate_manager ) return x
Selects the new location to evaluate the objective .
40
10
222,171
def _compute_acq ( self , x ) : means , stds = self . model . predict ( x ) f_acqu = 0 for m , s in zip ( means , stds ) : f_acqu += - m + self . exploration_weight * s return f_acqu / ( len ( means ) )
Integrated GP - Lower Confidence Bound
69
8
222,172
def _compute_acq_withGradients ( self , x ) : means , stds , dmdxs , dsdxs = self . model . predict_withGradients ( x ) f_acqu = None df_acqu = None for m , s , dmdx , dsdx in zip ( means , stds , dmdxs , dsdxs ) : f = - m + self . exploration_weight * s df = - dmdx + self . exploration_weight * dsdx if f_acqu is None : f_acqu = f df_acqu = df else : f_acqu += f df_acqu += df return f_acqu / ( len ( means ) ) , df_acqu / ( len ( means ) )
Integrated GP - Lower Confidence Bound and its derivative
160
11
222,173
def _expand_config_space ( self ) : self . config_space_expanded = [ ] for variable in self . config_space : variable_dic = variable . copy ( ) if 'dimensionality' in variable_dic . keys ( ) : dimensionality = variable_dic [ 'dimensionality' ] variable_dic [ 'dimensionality' ] = 1 variables_set = [ variable_dic . copy ( ) for d in range ( dimensionality ) ] k = 1 for variable in variables_set : variable [ 'name' ] = variable [ 'name' ] + '_' + str ( k ) k += 1 self . config_space_expanded += variables_set else : self . config_space_expanded += [ variable_dic ]
Expands the config input space into a list of diccionaries one for each variable_dic in which the dimensionality is always one .
168
30
222,174
def _create_variables_dic ( self ) : self . name_to_variable = { } for variable in self . space_expanded : self . name_to_variable [ variable . name ] = variable
Returns the variable by passing its name
47
7
222,175
def _translate_space ( self , space ) : self . space = [ ] self . dimensionality = 0 self . has_types = d = { t : False for t in self . supported_types } for i , d in enumerate ( space ) : descriptor = deepcopy ( d ) descriptor [ 'name' ] = descriptor . get ( 'name' , 'var_' + str ( i ) ) descriptor [ 'type' ] = descriptor . get ( 'type' , 'continuous' ) if 'domain' not in descriptor : raise InvalidConfigError ( 'Domain attribute is missing for variable ' + descriptor [ 'name' ] ) variable = create_variable ( descriptor ) self . space . append ( variable ) self . dimensionality += variable . dimensionality self . has_types [ variable . type ] = True # Check if there are any bandit and non-bandit variables together in the space if any ( v . is_bandit ( ) for v in self . space ) and any ( not v . is_bandit ( ) for v in self . space ) : raise InvalidConfigError ( 'Invalid mixed domain configuration. Bandit variables cannot be mixed with other types.' )
Translates a list of dictionaries into internal list of variables
252
13
222,176
def _expand_space ( self ) : ## --- Expand the config space self . _expand_config_space ( ) ## --- Expand the space self . space_expanded = [ ] for variable in self . space : self . space_expanded += variable . expand ( )
Creates an internal list where the variables with dimensionality larger than one are expanded . This list is the one that is used internally to do the optimization .
60
31
222,177
def objective_to_model ( self , x_objective ) : x_model = [ ] for k in range ( self . objective_dimensionality ) : variable = self . space_expanded [ k ] new_entry = variable . objective_to_model ( x_objective [ 0 , k ] ) x_model += new_entry return x_model
This function serves as interface between objective input vectors and model input vectors
78
13
222,178
def model_to_objective ( self , x_model ) : idx_model = 0 x_objective = [ ] for idx_obj in range ( self . objective_dimensionality ) : variable = self . space_expanded [ idx_obj ] new_entry = variable . model_to_objective ( x_model , idx_model ) x_objective += new_entry idx_model += variable . dimensionality_in_model return x_objective
This function serves as interface between model input vectors and objective input vectors
106
13
222,179
def get_subspace ( self , dims ) : subspace = [ ] k = 0 for variable in self . space_expanded : if k in dims : subspace . append ( variable ) k += variable . dimensionality_in_model return subspace
Extracts subspace from the reference of a list of variables in the inputs of the model .
56
20
222,180
def indicator_constraints ( self , x ) : x = np . atleast_2d ( x ) I_x = np . ones ( ( x . shape [ 0 ] , 1 ) ) if self . constraints is not None : for d in self . constraints : try : exec ( 'constraint = lambda x:' + d [ 'constraint' ] , globals ( ) ) ind_x = ( constraint ( x ) <= 0 ) * 1 I_x *= ind_x . reshape ( x . shape [ 0 ] , 1 ) except : print ( 'Fail to compile the constraint: ' + str ( d ) ) raise return I_x
Returns array of ones and zeros indicating if x is within the constraints
144
14
222,181
def input_dim ( self ) : n_cont = len ( self . get_continuous_dims ( ) ) n_disc = len ( self . get_discrete_dims ( ) ) return n_cont + n_disc
Extracts the input dimension of the domain .
52
10
222,182
def round_optimum ( self , x ) : x = np . array ( x ) if not ( ( x . ndim == 1 ) or ( x . ndim == 2 and x . shape [ 0 ] == 1 ) ) : raise ValueError ( "Unexpected dimentionality of x. Got {}, expected (1, N) or (N,)" . format ( x . ndim ) ) if x . ndim == 2 : x = x [ 0 ] x_rounded = [ ] value_index = 0 for variable in self . space_expanded : var_value = x [ value_index : value_index + variable . dimensionality_in_model ] var_value_rounded = variable . round ( var_value ) x_rounded . append ( var_value_rounded ) value_index += variable . dimensionality_in_model return np . atleast_2d ( np . concatenate ( x_rounded ) )
Rounds some value x to a feasible value in the design space . x is expected to be a vector or an array with a single row
203
28
222,183
def get_continuous_bounds ( self ) : bounds = [ ] for d in self . space : if d . type == 'continuous' : bounds . extend ( [ d . domain ] * d . dimensionality ) return bounds
Extracts the bounds of the continuous variables .
50
10
222,184
def get_continuous_dims ( self ) : continuous_dims = [ ] for i in range ( self . dimensionality ) : if self . space_expanded [ i ] . type == 'continuous' : continuous_dims += [ i ] return continuous_dims
Returns the dimension of the continuous components of the domain .
61
11
222,185
def get_discrete_grid ( self ) : sets_grid = [ ] for d in self . space : if d . type == 'discrete' : sets_grid . extend ( [ d . domain ] * d . dimensionality ) return np . array ( list ( itertools . product ( * sets_grid ) ) )
Computes a Numpy array with the grid of points that results after crossing the possible outputs of the discrete variables
71
22
222,186
def get_discrete_dims ( self ) : discrete_dims = [ ] for i in range ( self . dimensionality ) : if self . space_expanded [ i ] . type == 'discrete' : discrete_dims += [ i ] return discrete_dims
Returns the dimension of the discrete components of the domain .
61
11
222,187
def get_bandit ( self ) : arms_bandit = [ ] for d in self . space : if d . type == 'bandit' : arms_bandit += tuple ( map ( tuple , d . domain ) ) return np . asarray ( arms_bandit )
Extracts the arms of the bandit if any .
60
12
222,188
def predict ( self , X ) : X = np . atleast_2d ( X ) m = np . empty ( shape = ( 0 , 1 ) ) s = np . empty ( shape = ( 0 , 1 ) ) for k in range ( X . shape [ 0 ] ) : preds = [ ] for pred in self . model . estimators_ : preds . append ( pred . predict ( X [ k , : ] ) [ 0 ] ) m = np . vstack ( ( m , np . array ( preds ) . mean ( ) ) ) s = np . vstack ( ( s , np . array ( preds ) . std ( ) ) ) return m , s
Predictions with the model . Returns posterior means and standard deviations at X .
148
15
222,189
def _compute_acq ( self , x ) : means , stds = self . model . predict ( x ) fmins = self . model . get_fmin ( ) f_acqu = 0 for m , s , fmin in zip ( means , stds , fmins ) : _ , Phi , _ = get_quantiles ( self . jitter , fmin , m , s ) f_acqu += Phi return f_acqu / len ( means )
Integrated Expected Improvement
100
5
222,190
def _compute_acq_withGradients ( self , x ) : means , stds , dmdxs , dsdxs = self . model . predict_withGradients ( x ) fmins = self . model . get_fmin ( ) f_acqu = None df_acqu = None for m , s , fmin , dmdx , dsdx in zip ( means , stds , fmins , dmdxs , dsdxs ) : phi , Phi , u = get_quantiles ( self . jitter , fmin , m , s ) f = Phi df = - ( phi / s ) * ( dmdx + dsdx * u ) if f_acqu is None : f_acqu = f df_acqu = df else : f_acqu += f df_acqu += df return f_acqu / ( len ( means ) ) , df_acqu / ( len ( means ) )
Integrated Expected Improvement and its derivative
199
8
222,191
def plot_convergence ( Xdata , best_Y , filename = None ) : n = Xdata . shape [ 0 ] aux = ( Xdata [ 1 : n , : ] - Xdata [ 0 : n - 1 , : ] ) ** 2 distances = np . sqrt ( aux . sum ( axis = 1 ) ) ## Distances between consecutive x's plt . figure ( figsize = ( 10 , 5 ) ) plt . subplot ( 1 , 2 , 1 ) plt . plot ( list ( range ( n - 1 ) ) , distances , '-ro' ) plt . xlabel ( 'Iteration' ) plt . ylabel ( 'd(x[n], x[n-1])' ) plt . title ( 'Distance between consecutive x\'s' ) grid ( True ) # Estimated m(x) at the proposed sampling points plt . subplot ( 1 , 2 , 2 ) plt . plot ( list ( range ( n ) ) , best_Y , '-o' ) plt . title ( 'Value of the best selected sample' ) plt . xlabel ( 'Iteration' ) plt . ylabel ( 'Best y' ) grid ( True ) if filename != None : savefig ( filename ) else : plt . show ( )
Plots to evaluate the convergence of standard Bayesian optimization algorithms
279
12
222,192
def compute_batch ( self , duplicate_manager = None , context_manager = None ) : from . . . acquisitions import AcquisitionLP assert isinstance ( self . acquisition , AcquisitionLP ) self . acquisition . update_batches ( None , None , None ) # --- GET first element in the batch X_batch = self . acquisition . optimize ( ) [ 0 ] k = 1 if self . batch_size > 1 : # ---------- Approximate the constants of the the method L = estimate_L ( self . acquisition . model . model , self . acquisition . space . get_bounds ( ) ) Min = self . acquisition . model . model . Y . min ( ) # --- GET the remaining elements while k < self . batch_size : self . acquisition . update_batches ( X_batch , L , Min ) new_sample = self . acquisition . optimize ( ) [ 0 ] X_batch = np . vstack ( ( X_batch , new_sample ) ) k += 1 # --- Back to the non-penalized acquisition self . acquisition . update_batches ( None , None , None ) return X_batch
Computes the elements of the batch sequentially by penalizing the acquisition .
240
15
222,193
def check_notebooks_for_errors ( notebooks_directory ) : print ( "Checking notebooks in directory {} for errors" . format ( notebooks_directory ) ) failed_notebooks_count = 0 for file in os . listdir ( notebooks_directory ) : if file . endswith ( ".ipynb" ) : print ( "Checking notebook " + file ) full_file_path = os . path . join ( notebooks_directory , file ) output , errors = run_notebook ( full_file_path ) if errors is not None and len ( errors ) > 0 : failed_notebooks_count += 1 print ( "Errors in notebook " + file ) print ( errors ) if failed_notebooks_count == 0 : print ( "No errors found in notebooks under " + notebooks_directory )
Evaluates all notebooks in given directory and prints errors if any
174
13
222,194
def predict ( self , X , with_noise = True ) : m , v = self . _predict ( X , False , with_noise ) # We can take the square root because v is just a diagonal matrix of variances return m , np . sqrt ( v )
Predictions with the model . Returns posterior means and standard deviations at X . Note that this is different in GPy where the variances are given .
61
30
222,195
def predict_covariance ( self , X , with_noise = True ) : _ , v = self . _predict ( X , True , with_noise ) return v
Predicts the covariance matric for points in X .
41
12
222,196
def predict_withGradients ( self , X ) : if X . ndim == 1 : X = X [ None , : ] m , v = self . model . predict ( X ) v = np . clip ( v , 1e-10 , np . inf ) dmdx , dvdx = self . model . predictive_gradients ( X ) dmdx = dmdx [ : , : , 0 ] dsdx = dvdx / ( 2 * np . sqrt ( v ) ) return m , np . sqrt ( v ) , dmdx , dsdx
Returns the mean standard deviation mean gradient and standard deviation gradient at X .
127
14
222,197
def predict ( self , X ) : if X . ndim == 1 : X = X [ None , : ] ps = self . model . param_array . copy ( ) means = [ ] stds = [ ] for s in self . hmc_samples : if self . model . _fixes_ is None : self . model [ : ] = s else : self . model [ self . model . _fixes_ ] = s self . model . _trigger_params_changed ( ) m , v = self . model . predict ( X ) means . append ( m ) stds . append ( np . sqrt ( np . clip ( v , 1e-10 , np . inf ) ) ) self . model . param_array [ : ] = ps self . model . _trigger_params_changed ( ) return means , stds
Predictions with the model for all the MCMC samples . Returns posterior means and standard deviations at X . Note that this is different in GPy where the variances are given .
178
36
222,198
def get_fmin ( self ) : ps = self . model . param_array . copy ( ) fmins = [ ] for s in self . hmc_samples : if self . model . _fixes_ is None : self . model [ : ] = s else : self . model [ self . model . _fixes_ ] = s self . model . _trigger_params_changed ( ) fmins . append ( self . model . predict ( self . model . X ) [ 0 ] . min ( ) ) self . model . param_array [ : ] = ps self . model . _trigger_params_changed ( ) return fmins
Returns the location where the posterior mean is takes its minimal value .
137
13
222,199
def predict_withGradients ( self , X ) : if X . ndim == 1 : X = X [ None , : ] ps = self . model . param_array . copy ( ) means = [ ] stds = [ ] dmdxs = [ ] dsdxs = [ ] for s in self . hmc_samples : if self . model . _fixes_ is None : self . model [ : ] = s else : self . model [ self . model . _fixes_ ] = s self . model . _trigger_params_changed ( ) m , v = self . model . predict ( X ) std = np . sqrt ( np . clip ( v , 1e-10 , np . inf ) ) dmdx , dvdx = self . model . predictive_gradients ( X ) dmdx = dmdx [ : , : , 0 ] dsdx = dvdx / ( 2 * std ) means . append ( m ) stds . append ( std ) dmdxs . append ( dmdx ) dsdxs . append ( dsdx ) self . model . param_array [ : ] = ps self . model . _trigger_params_changed ( ) return means , stds , dmdxs , dsdxs
Returns the mean standard deviation mean gradient and standard deviation gradient at X for all the MCMC samples .
271
20