idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
44,800
def go_up ( self ) : if self . current_option > 0 : self . current_option += - 1 else : self . current_option = len ( self . items ) - 1 self . draw ( )
Go up one wrap to end if necessary
44,801
def get_selection ( cls , strings , title = "Select an option" , subtitle = None , exit_option = True , _menu = None ) : menu = cls ( strings , title , subtitle , exit_option ) if _menu is not None : _menu . append ( menu ) menu . show ( ) menu . join ( ) return menu . selected_option
Single - method way of getting a selection out of a list of strings .
44,802
def ensure_object_is_ordered_dict ( item , title ) : assert isinstance ( title , str ) if not isinstance ( item , OrderedDict ) : msg = "{} must be an OrderedDict. {} passed instead." raise TypeError ( msg . format ( title , type ( item ) ) ) return None
Checks that the item is an OrderedDict . If not raises ValueError .
44,803
def ensure_object_is_string ( item , title ) : assert isinstance ( title , str ) if not isinstance ( item , str ) : msg = "{} must be a string. {} passed instead." raise TypeError ( msg . format ( title , type ( item ) ) ) return None
Checks that the item is a string . If not raises ValueError .
44,804
def ensure_object_is_ndarray ( item , title ) : assert isinstance ( title , str ) if not isinstance ( item , np . ndarray ) : msg = "{} must be a np.ndarray. {} passed instead." raise TypeError ( msg . format ( title , type ( item ) ) ) return None
Ensures that a given mapping matrix is a dense numpy array . Raises a helpful TypeError if otherwise .
44,805
def ensure_columns_are_in_dataframe ( columns , dataframe , col_title = '' , data_title = 'data' ) : assert isinstance ( columns , Iterable ) assert isinstance ( dataframe , pd . DataFrame ) assert isinstance ( col_title , str ) assert isinstance ( data_title , str ) problem_cols = [ col for col in columns if col not in dataframe . columns ] if problem_cols != [ ] : if col_title == '' : msg = "{} not in {}.columns" final_msg = msg . format ( problem_cols , data_title ) else : msg = "The following columns in {} are not in {}.columns: {}" final_msg = msg . format ( col_title , data_title , problem_cols ) raise ValueError ( final_msg ) return None
Checks whether each column in columns is in dataframe . Raises ValueError if any of the columns are not in the dataframe .
44,806
def check_argument_type ( long_form , specification_dict ) : if not isinstance ( long_form , pd . DataFrame ) : msg = "long_form should be a pandas dataframe. It is a {}" raise TypeError ( msg . format ( type ( long_form ) ) ) ensure_object_is_ordered_dict ( specification_dict , "specification_dict" ) return None
Ensures that long_form is a pandas dataframe and that specification_dict is an OrderedDict raising a ValueError otherwise .
44,807
def ensure_alt_id_in_long_form ( alt_id_col , long_form ) : if alt_id_col not in long_form . columns : msg = "alt_id_col == {} is not a column in long_form." raise ValueError ( msg . format ( alt_id_col ) ) return None
Ensures alt_id_col is in long_form and raises a ValueError if not .
44,808
def ensure_specification_cols_are_in_dataframe ( specification , dataframe ) : try : assert isinstance ( specification , OrderedDict ) except AssertionError : raise TypeError ( "`specification` must be an OrderedDict." ) assert isinstance ( dataframe , pd . DataFrame ) problem_cols = [ ] dataframe_cols = dataframe . columns for key in specification : if key not in dataframe_cols : problem_cols . append ( key ) if problem_cols != [ ] : msg = "The following keys in the specification are not in 'data':\n{}" raise ValueError ( msg . format ( problem_cols ) ) return None
Checks whether each column in specification is in dataframe . Raises ValueError if any of the columns are not in the dataframe .
44,809
def check_keys_and_values_of_name_dictionary ( names , specification_dict , num_alts ) : if names . keys ( ) != specification_dict . keys ( ) : msg = "names.keys() does not equal specification_dict.keys()" raise ValueError ( msg ) for key in names : specification = specification_dict [ key ] name_object = names [ key ] if isinstance ( specification , list ) : try : assert isinstance ( name_object , list ) assert len ( name_object ) == len ( specification ) assert all ( [ isinstance ( x , str ) for x in name_object ] ) except AssertionError : msg = "names[{}] must be a list AND it must have the same" msg_2 = " number of strings as there are elements of the" msg_3 = " corresponding list in specification_dict" raise ValueError ( msg . format ( key ) + msg_2 + msg_3 ) else : if specification == "all_same" : if not isinstance ( name_object , str ) : msg = "names[{}] should be a string" . format ( key ) raise TypeError ( msg ) else : try : assert isinstance ( name_object , list ) assert len ( name_object ) == num_alts except AssertionError : msg_1 = "names[{}] should be a list with {} elements," msg_2 = " 1 element for each possible alternative" msg = ( msg_1 . format ( key , num_alts ) + msg_2 ) raise ValueError ( msg ) return None
Check the validity of the keys and values in the names dictionary .
44,810
def ensure_all_columns_are_used ( num_vars_accounted_for , dataframe , data_title = 'long_data' ) : dataframe_vars = set ( dataframe . columns . tolist ( ) ) num_dataframe_vars = len ( dataframe_vars ) if num_vars_accounted_for == num_dataframe_vars : pass elif num_vars_accounted_for < num_dataframe_vars : msg = "Note, there are {:,} variables in {} but the inputs" msg_2 = " ind_vars, alt_specific_vars, and subset_specific_vars only" msg_3 = " account for {:,} variables." warnings . warn ( msg . format ( num_dataframe_vars , data_title ) + msg_2 + msg_3 . format ( num_vars_accounted_for ) ) else : msg = "There are more variable specified in ind_vars, " msg_2 = "alt_specific_vars, and subset_specific_vars ({:,}) than there" msg_3 = " are variables in {} ({:,})" warnings . warn ( msg + msg_2 . format ( num_vars_accounted_for ) + msg_3 . format ( data_title , num_dataframe_vars ) ) return None
Ensure that all of the columns from dataframe are in the list of used_cols . Will raise a helpful UserWarning if otherwise .
44,811
def check_dataframe_for_duplicate_records ( obs_id_col , alt_id_col , df ) : if df . duplicated ( subset = [ obs_id_col , alt_id_col ] ) . any ( ) : msg = "One or more observation-alternative_id pairs is not unique." raise ValueError ( msg ) return None
Checks a cross - sectional dataframe of long - format data for duplicate observations . Duplicate observations are defined as rows with the same observation id value and the same alternative id value .
44,812
def ensure_num_chosen_alts_equals_num_obs ( obs_id_col , choice_col , df ) : num_obs = df [ obs_id_col ] . unique ( ) . shape [ 0 ] num_choices = df [ choice_col ] . sum ( ) if num_choices < num_obs : msg = "One or more observations have not chosen one " msg_2 = "of the alternatives available to him/her" raise ValueError ( msg + msg_2 ) if num_choices > num_obs : msg = "One or more observations has chosen multiple alternatives" raise ValueError ( msg ) return None
Checks that the total number of recorded choices equals the total number of observations . If this is not the case raise helpful ValueError messages .
44,813
def check_type_and_values_of_alt_name_dict ( alt_name_dict , alt_id_col , df ) : if not isinstance ( alt_name_dict , dict ) : msg = "alt_name_dict should be a dictionary. Passed value was a {}" raise TypeError ( msg . format ( type ( alt_name_dict ) ) ) if not all ( [ x in df [ alt_id_col ] . values for x in alt_name_dict . keys ( ) ] ) : msg = "One or more of alt_name_dict's keys are not " msg_2 = "in long_data[alt_id_col]" raise ValueError ( msg + msg_2 ) return None
Ensures that alt_name_dict is a dictionary and that its keys are in the alternative id column of df . Raises helpful errors if either condition is not met .
44,814
def ensure_ridge_is_scalar_or_none ( ridge ) : if ( ridge is not None ) and not isinstance ( ridge , Number ) : msg_1 = "ridge should be None or an int, float, or long." msg_2 = "The passed value of ridge had type: {}" . format ( type ( ridge ) ) raise TypeError ( msg_1 + msg_2 ) return None
Ensures that ridge is either None or a scalar value . Raises a helpful TypeError otherwise .
44,815
def get_original_order_unique_ids ( id_array ) : assert isinstance ( id_array , np . ndarray ) assert len ( id_array . shape ) == 1 original_unique_id_indices = np . sort ( np . unique ( id_array , return_index = True ) [ 1 ] ) original_order_unique_ids = id_array [ original_unique_id_indices ] return original_order_unique_ids
Get the unique id s of id_array in their original order of appearance .
44,816
def create_sparse_mapping ( id_array , unique_ids = None ) : if unique_ids is None : unique_ids = get_original_order_unique_ids ( id_array ) assert isinstance ( unique_ids , np . ndarray ) assert isinstance ( id_array , np . ndarray ) assert unique_ids . ndim == 1 assert id_array . ndim == 1 represented_ids = np . in1d ( id_array , unique_ids ) num_non_zero_rows = represented_ids . sum ( ) num_rows = id_array . size num_cols = unique_ids . size data = np . ones ( num_non_zero_rows , dtype = int ) row_indices = np . arange ( num_rows ) [ represented_ids ] unique_id_dict = dict ( zip ( unique_ids , np . arange ( num_cols ) ) ) col_indices = np . array ( [ unique_id_dict [ x ] for x in id_array [ represented_ids ] ] ) return csr_matrix ( ( data , ( row_indices , col_indices ) ) , shape = ( num_rows , num_cols ) )
Will create a scipy . sparse compressed - sparse - row matrix that maps each row represented by an element in id_array to the corresponding value of the unique ids in id_array .
44,817
def check_wide_data_for_blank_choices ( choice_col , wide_data ) : if wide_data [ choice_col ] . isnull ( ) . any ( ) : msg_1 = "One or more of the values in wide_data[choice_col] is null." msg_2 = " Remove null values in the choice column or fill them in." raise ValueError ( msg_1 + msg_2 ) return None
Checks wide_data for null values in the choice column and raises a helpful ValueError if null values are found .
44,818
def ensure_unique_obs_ids_in_wide_data ( obs_id_col , wide_data ) : if len ( wide_data [ obs_id_col ] . unique ( ) ) != wide_data . shape [ 0 ] : msg = "The values in wide_data[obs_id_col] are not unique, " msg_2 = "but they need to be." raise ValueError ( msg + msg_2 ) return None
Ensures that there is one observation per row in wide_data . Raises a helpful ValueError if otherwise .
44,819
def ensure_chosen_alternatives_are_in_user_alt_ids ( choice_col , wide_data , availability_vars ) : if not wide_data [ choice_col ] . isin ( availability_vars . keys ( ) ) . all ( ) : msg = "One or more values in wide_data[choice_col] is not in the user " msg_2 = "provided alternative ids in availability_vars.keys()" raise ValueError ( msg + msg_2 ) return None
Ensures that all chosen alternatives in wide_df are present in the availability_vars dict . Raises a helpful ValueError if not .
44,820
def ensure_each_wide_obs_chose_an_available_alternative ( obs_id_col , choice_col , availability_vars , wide_data ) : wide_availability_values = wide_data [ list ( availability_vars . values ( ) ) ] . values unavailable_condition = ( ( wide_availability_values == 0 ) . sum ( axis = 1 ) . astype ( bool ) ) problem_obs = [ ] for idx , row in wide_data . loc [ unavailable_condition ] . iterrows ( ) : if row . at [ availability_vars [ row . at [ choice_col ] ] ] != 1 : problem_obs . append ( row . at [ obs_id_col ] ) if problem_obs != [ ] : msg = "The following observations chose unavailable alternatives:\n{}" raise ValueError ( msg . format ( problem_obs ) ) return None
Checks whether or not each observation with a restricted choice set chose an alternative that was personally available to him or her . Will raise a helpful ValueError if this is not the case .
44,821
def ensure_all_wide_alt_ids_are_chosen ( choice_col , alt_specific_vars , availability_vars , wide_data ) : sorted_alt_ids = np . sort ( wide_data [ choice_col ] . unique ( ) ) try : problem_ids = [ x for x in availability_vars if x not in sorted_alt_ids ] problem_type = "availability_vars" assert problem_ids == [ ] problem_ids = [ ] for new_column in alt_specific_vars : for alt_id in alt_specific_vars [ new_column ] : if alt_id not in sorted_alt_ids and alt_id not in problem_ids : problem_ids . append ( alt_id ) problem_type = "alt_specific_vars" assert problem_ids == [ ] except AssertionError : msg = "The following alternative ids from {} are not " msg_2 = "observed in wide_data[choice_col]:\n{}" raise ValueError ( msg . format ( problem_type ) + msg_2 . format ( problem_ids ) ) return None
Checks to make sure all user - specified alternative id s both in alt_specific_vars and availability_vars are observed in the choice column of wide_data .
44,822
def ensure_contiguity_in_observation_rows ( obs_id_vector ) : contiguity_check_array = ( obs_id_vector [ 1 : ] - obs_id_vector [ : - 1 ] ) >= 0 if not contiguity_check_array . all ( ) : problem_ids = obs_id_vector [ np . where ( ~ contiguity_check_array ) ] msg_1 = "All rows pertaining to a given choice situation must be " msg_2 = "contiguous. \nRows pertaining to the following observation " msg_3 = "id's are not contiguous: \n{}" raise ValueError ( msg_1 + msg_2 + msg_3 . format ( problem_ids . tolist ( ) ) ) else : return None
Ensures that all rows pertaining to a given choice situation are located next to one another . Raises a helpful ValueError otherwise . This check is needed because the hessian calculation function requires the design matrix to have contiguity in rows with the same observation id .
44,823
def relate_obs_ids_to_chosen_alts ( obs_id_array , alt_id_array , choice_array ) : chosen_alts_to_obs_ids = { } for alt_id in np . sort ( np . unique ( alt_id_array ) ) : selection_condition = np . where ( ( alt_id_array == alt_id ) & ( choice_array == 1 ) ) chosen_alts_to_obs_ids [ alt_id ] = np . sort ( np . unique ( obs_id_array [ selection_condition ] ) ) return chosen_alts_to_obs_ids
Creates a dictionary that relates each unique alternative id to the set of observations ids that chose the given alternative .
44,824
def create_cross_sectional_bootstrap_samples ( obs_id_array , alt_id_array , choice_array , num_samples , seed = None ) : chosen_alts_to_obs_ids = relate_obs_ids_to_chosen_alts ( obs_id_array , alt_id_array , choice_array ) num_obs_per_group , tot_num_obs = get_num_obs_choosing_each_alternative ( chosen_alts_to_obs_ids ) ids_per_sample = np . empty ( ( num_samples , tot_num_obs ) , dtype = float ) if seed is not None : if not isinstance ( seed , int ) : msg = "`boot_seed` MUST be an int." raise ValueError ( msg ) np . random . seed ( seed ) col_idx = 0 for alt_id in num_obs_per_group : relevant_ids = chosen_alts_to_obs_ids [ alt_id ] resample_size = num_obs_per_group [ alt_id ] current_ids = ( np . random . choice ( relevant_ids , size = resample_size * num_samples , replace = True ) . reshape ( ( num_samples , resample_size ) ) ) end_col = col_idx + resample_size ids_per_sample [ : , col_idx : end_col ] = current_ids col_idx += resample_size return ids_per_sample
Determines the unique observations that will be present in each bootstrap sample . This function DOES NOT create the new design matrices or a new long - format dataframe for each bootstrap sample . Note that these will be correct bootstrap samples for cross - sectional datasets . This function will not work correctly for panel datasets .
44,825
def create_bootstrap_id_array ( obs_id_per_sample ) : n_rows , n_cols = obs_id_per_sample . shape bootstrap_id_array = np . tile ( np . arange ( n_cols ) + 1 , n_rows ) . reshape ( ( n_rows , n_cols ) ) return bootstrap_id_array
Creates a 2D ndarray that contains the bootstrap ids for each replication of each unit of observation that is an the set of bootstrap samples .
44,826
def check_column_existence ( col_name , df , presence = True ) : if presence : if col_name not in df . columns : msg = "Ensure that `{}` is in `df.columns`." raise ValueError ( msg . format ( col_name ) ) else : if col_name in df . columns : msg = "Ensure that `{}` is not in `df.columns`." raise ValueError ( msg . format ( col_name ) ) return None
Checks whether or not col_name is in df and raises a helpful error msg if the desired condition is not met .
44,827
def ensure_resampled_obs_ids_in_df ( resampled_obs_ids , orig_obs_id_array ) : if not np . in1d ( resampled_obs_ids , orig_obs_id_array ) . all ( ) : msg = "All values in `resampled_obs_ids` MUST be in `orig_obs_id_array`." raise ValueError ( msg ) return None
Checks whether all ids in resampled_obs_ids are in orig_obs_id_array . Raises a helpful ValueError if not .
44,828
def create_bootstrap_dataframe ( orig_df , obs_id_col , resampled_obs_ids_1d , groupby_dict , boot_id_col = "bootstrap_id" ) : check_column_existence ( obs_id_col , orig_df , presence = True ) check_column_existence ( boot_id_col , orig_df , presence = False ) obs_id_values = orig_df [ obs_id_col ] . values ensure_resampled_obs_ids_in_df ( resampled_obs_ids_1d , obs_id_values ) component_dfs = [ ] for boot_id , obs_id in enumerate ( resampled_obs_ids_1d ) : extracted_df = groupby_dict [ obs_id ] . copy ( ) extracted_df [ boot_id_col ] = boot_id + 1 component_dfs . append ( extracted_df ) bootstrap_df = pd . concat ( component_dfs , axis = 0 , ignore_index = True ) return bootstrap_df
Will create the altered dataframe of data needed to estimate a choice model with the particular observations that belong to the current bootstrap sample .
44,829
def get_param_names ( model_obj ) : all_names = deepcopy ( model_obj . ind_var_names ) if model_obj . intercept_names is not None : all_names = model_obj . intercept_names + all_names if model_obj . shape_names is not None : all_names = model_obj . shape_names + all_names if model_obj . nest_names is not None : all_names = model_obj . nest_names + all_names return all_names
Extracts all the names to be displayed for the estimated parameters .
44,830
def get_param_list_for_prediction ( model_obj , replicates ) : ensure_samples_is_ndim_ndarray ( replicates , ndim = 2 , name = 'replicates' ) num_idx_coefs = len ( model_obj . ind_var_names ) intercept_names = model_obj . intercept_names num_outside_intercepts = 0 if intercept_names is None else len ( intercept_names ) shape_names = model_obj . shape_names num_shapes = 0 if shape_names is None else len ( shape_names ) nest_names = model_obj . nest_names num_nests = 0 if nest_names is None else len ( nest_names ) parameter_numbers = [ num_nests , num_shapes , num_outside_intercepts , num_idx_coefs ] current_idx = 0 param_list = [ ] for param_num in parameter_numbers : if param_num == 0 : param_list . insert ( 0 , None ) continue upper_idx = current_idx + param_num param_list . insert ( 0 , replicates [ : , current_idx : upper_idx ] . T ) current_idx += param_num return param_list
Create the param_list argument for use with model_obj . predict .
44,831
def generate_bootstrap_replicates ( self , num_samples , mnl_obj = None , mnl_init_vals = None , mnl_fit_kwargs = None , extract_init_vals = None , print_res = False , method = "BFGS" , loss_tol = 1e-06 , gradient_tol = 1e-06 , maxiter = 1000 , ridge = None , constrained_pos = None , boot_seed = None , weights = None ) : print ( "Generating Bootstrap Replicates" ) print ( time . strftime ( "%a %m-%d-%Y %I:%M%p" ) ) sys . stdout . flush ( ) obs_id_array = self . model_obj . data [ self . model_obj . obs_id_col ] . values alt_id_array = self . model_obj . alt_IDs choice_array = self . model_obj . choices num_params = self . mle_params . shape [ 0 ] obs_id_per_sample = bs . create_cross_sectional_bootstrap_samples ( obs_id_array , alt_id_array , choice_array , num_samples , seed = boot_seed ) dfs_by_obs_id = bs . create_deepcopied_groupby_dict ( self . model_obj . data , self . model_obj . obs_id_col ) boot_id_col = "bootstrap_id" point_estimates = np . empty ( ( num_samples , num_params ) , dtype = float ) fit_kwargs = { "print_res" : print_res , "method" : method , "loss_tol" : loss_tol , "gradient_tol" : gradient_tol , "maxiter" : maxiter , "ridge" : ridge , "constrained_pos" : constrained_pos , "just_point" : True } mnl_spec = None if mnl_obj is None else mnl_obj . specification mnl_names = None if mnl_obj is None else mnl_obj . name_spec iterable_for_iteration = PROGRESS ( xrange ( num_samples ) , desc = "Creating Bootstrap Replicates" , total = num_samples ) for row in iterable_for_iteration : bootstrap_df = bs . create_bootstrap_dataframe ( self . model_obj . data , self . model_obj . obs_id_col , obs_id_per_sample [ row , : ] , dfs_by_obs_id , boot_id_col = boot_id_col ) current_results = retrieve_point_est ( self . model_obj , bootstrap_df , boot_id_col , num_params , mnl_spec , mnl_names , mnl_init_vals , mnl_fit_kwargs , extract_init_vals = extract_init_vals , ** fit_kwargs ) point_estimates [ row ] = current_results [ "x" ] self . bootstrap_replicates = pd . DataFrame ( point_estimates , columns = self . mle_params . index ) print ( "Finished Generating Bootstrap Replicates" ) print ( time . strftime ( "%a %m-%d-%Y %I:%M%p" ) ) return None
Generates the bootstrap replicates for one s given model and dataset .
44,832
def generate_jackknife_replicates ( self , mnl_obj = None , mnl_init_vals = None , mnl_fit_kwargs = None , extract_init_vals = None , print_res = False , method = "BFGS" , loss_tol = 1e-06 , gradient_tol = 1e-06 , maxiter = 1000 , ridge = None , constrained_pos = None ) : print ( "Generating Jackknife Replicates" ) print ( time . strftime ( "%a %m-%d-%Y %I:%M%p" ) ) sys . stdout . flush ( ) obs_id_col = self . model_obj . obs_id_col orig_obs_id_array = self . model_obj . data [ obs_id_col ] . values unique_obs_ids = np . sort ( np . unique ( orig_obs_id_array ) ) num_obs = unique_obs_ids . size num_params = self . mle_params . size fit_kwargs = { "print_res" : print_res , "method" : method , "loss_tol" : loss_tol , "gradient_tol" : gradient_tol , "maxiter" : maxiter , "ridge" : ridge , "constrained_pos" : constrained_pos , "just_point" : True } mnl_spec = None if mnl_obj is None else mnl_obj . specification mnl_names = None if mnl_obj is None else mnl_obj . name_spec point_replicates = np . empty ( ( num_obs , num_params ) , dtype = float ) iterable_for_iteration = PROGRESS ( enumerate ( unique_obs_ids ) , desc = "Creating Jackknife Replicates" , total = unique_obs_ids . size ) for pos , obs_id in iterable_for_iteration : new_df = self . model_obj . data . loc [ orig_obs_id_array != obs_id ] current_results = retrieve_point_est ( self . model_obj , new_df , obs_id_col , num_params , mnl_spec , mnl_names , mnl_init_vals , mnl_fit_kwargs , extract_init_vals = extract_init_vals , ** fit_kwargs ) point_replicates [ pos ] = current_results [ 'x' ] self . jackknife_replicates = pd . DataFrame ( point_replicates , columns = self . mle_params . index ) print ( "Finished Generating Jackknife Replicates" ) print ( time . strftime ( "%a %m-%d-%Y %I:%M%p" ) ) return None
Generates the jackknife replicates for one s given model and dataset .
44,833
def calc_log_likes_for_replicates ( self , replicates = 'bootstrap' , num_draws = None , seed = None ) : ensure_replicates_kwarg_validity ( replicates ) replicate_vec = getattr ( self , replicates + "_replicates" ) . values choice_col = self . model_obj . choice_col current_model_type = self . model_obj . model_type non_2d_predictions = [ model_type_to_display_name [ "Nested Logit" ] , model_type_to_display_name [ "Mixed Logit" ] ] if current_model_type not in non_2d_predictions : param_list = get_param_list_for_prediction ( self . model_obj , replicate_vec ) chosen_probs = self . model_obj . predict ( self . model_obj . data , param_list = param_list , return_long_probs = False , choice_col = choice_col ) else : chosen_probs_list = [ ] iterable_for_iteration = PROGRESS ( xrange ( replicate_vec . shape [ 0 ] ) , desc = "Calculate Gradient Norms" , total = replicate_vec . shape [ 0 ] ) for idx in iterable_for_iteration : param_list = get_param_list_for_prediction ( self . model_obj , replicate_vec [ idx ] [ None , : ] ) param_list = [ x . ravel ( ) if x is not None else x for x in param_list ] chosen_probs = self . model_obj . predict ( self . model_obj . data , param_list = param_list , return_long_probs = False , choice_col = choice_col , num_draws = num_draws , seed = seed ) chosen_probs_list . append ( chosen_probs [ : , None ] ) chosen_probs = np . concatenate ( chosen_probs_list , axis = 1 ) log_likelihoods = np . log ( chosen_probs ) . sum ( axis = 0 ) attribute_name = replicates + "_log_likelihoods" log_like_series = pd . Series ( log_likelihoods , name = attribute_name ) setattr ( self , attribute_name , log_like_series ) return log_likelihoods
Calculate the log - likelihood value of one s replicates given one s dataset .
44,834
def calc_gradient_norm_for_replicates ( self , replicates = 'bootstrap' , ridge = None , constrained_pos = None , weights = None ) : ensure_replicates_kwarg_validity ( replicates ) estimation_obj = create_estimation_obj ( self . model_obj , self . mle_params . values , ridge = ridge , constrained_pos = constrained_pos , weights = weights ) if hasattr ( estimation_obj , "set_derivatives" ) : estimation_obj . set_derivatives ( ) replicate_array = getattr ( self , replicates + "_replicates" ) . values num_reps = replicate_array . shape [ 0 ] gradient_norms = np . empty ( ( num_reps , ) , dtype = float ) iterable_for_iteration = PROGRESS ( xrange ( num_reps ) , desc = "Calculating Gradient Norms" , total = num_reps ) for row in iterable_for_iteration : current_params = replicate_array [ row ] gradient = estimation_obj . convenience_calc_gradient ( current_params ) gradient_norms [ row ] = np . linalg . norm ( gradient ) return gradient_norms
Calculate the Euclidean - norm of the gradient of one s replicates given one s dataset .
44,835
def calc_percentile_interval ( self , conf_percentage ) : alpha = bc . get_alpha_from_conf_percentage ( conf_percentage ) single_column_names = [ '{:.3g}%' . format ( alpha / 2.0 ) , '{:.3g}%' . format ( 100 - alpha / 2.0 ) ] conf_intervals = bc . calc_percentile_interval ( self . bootstrap_replicates . values , conf_percentage ) self . percentile_interval = pd . DataFrame ( conf_intervals . T , index = self . mle_params . index , columns = single_column_names ) return None
Calculates percentile bootstrap confidence intervals for one s model .
44,836
def calc_abc_interval ( self , conf_percentage , init_vals , epsilon = 0.001 , ** fit_kwargs ) : print ( "Calculating Approximate Bootstrap Confidence (ABC) Intervals" ) print ( time . strftime ( "%a %m-%d-%Y %I:%M%p" ) ) sys . stdout . flush ( ) alpha = bc . get_alpha_from_conf_percentage ( conf_percentage ) single_column_names = [ '{:.3g}%' . format ( alpha / 2.0 ) , '{:.3g}%' . format ( 100 - alpha / 2.0 ) ] conf_intervals = abc . calc_abc_interval ( self . model_obj , self . mle_params . values , init_vals , conf_percentage , epsilon = epsilon , ** fit_kwargs ) self . abc_interval = pd . DataFrame ( conf_intervals . T , index = self . mle_params . index , columns = single_column_names ) return None
Calculates Approximate Bootstrap Confidence Intervals for one s model .
44,837
def calc_conf_intervals ( self , conf_percentage , interval_type = 'all' , init_vals = None , epsilon = abc . EPSILON , ** fit_kwargs ) : if interval_type == 'pi' : self . calc_percentile_interval ( conf_percentage ) elif interval_type == 'bca' : self . calc_bca_interval ( conf_percentage ) elif interval_type == 'abc' : self . calc_abc_interval ( conf_percentage , init_vals , epsilon = epsilon , ** fit_kwargs ) elif interval_type == 'all' : print ( "Calculating Percentile Confidence Intervals" ) sys . stdout . flush ( ) self . calc_percentile_interval ( conf_percentage ) print ( "Calculating BCa Confidence Intervals" ) sys . stdout . flush ( ) self . calc_bca_interval ( conf_percentage ) self . calc_abc_interval ( conf_percentage , init_vals , epsilon = epsilon , ** fit_kwargs ) alpha = bc . get_alpha_from_conf_percentage ( conf_percentage ) interval_type_names = [ 'percentile_interval' , 'BCa_interval' , 'ABC_interval' ] endpoint_names = [ '{:.3g}%' . format ( alpha / 2.0 ) , '{:.3g}%' . format ( 100 - alpha / 2.0 ) ] multi_index_names = list ( itertools . product ( interval_type_names , endpoint_names ) ) df_column_index = pd . MultiIndex . from_tuples ( multi_index_names ) self . all_intervals = pd . concat ( [ self . percentile_interval , self . bca_interval , self . abc_interval ] , axis = 1 , ignore_index = True ) self . all_intervals . columns = df_column_index self . all_intervals . index = self . mle_params . index else : msg = "interval_type MUST be in `['pi', 'bca', 'abc', 'all']`" raise ValueError ( msg ) return None
Calculates percentile bias - corrected and accelerated and approximate bootstrap confidence intervals .
44,838
def create_calc_dh_d_alpha ( estimator ) : if estimator . intercept_ref_pos is not None : needed_idxs = range ( estimator . rows_to_alts . shape [ 1 ] ) needed_idxs . remove ( estimator . intercept_ref_pos ) dh_d_alpha = ( estimator . rows_to_alts . copy ( ) . transpose ( ) [ needed_idxs , : ] . transpose ( ) ) else : dh_d_alpha = None calc_dh_d_alpha = partial ( _cloglog_transform_deriv_alpha , output_array = dh_d_alpha ) return calc_dh_d_alpha
Return the function that can be used in the various gradient and hessian calculations to calculate the derivative of the transformation with respect to the outside intercept parameters .
44,839
def calc_individual_chi_squares ( residuals , long_probabilities , rows_to_obs ) : chi_squared_terms = np . square ( residuals ) / long_probabilities return rows_to_obs . T . dot ( chi_squared_terms )
Calculates individual chi - squared values for each choice situation in the dataset .
44,840
def calc_rho_and_rho_bar_squared ( final_log_likelihood , null_log_likelihood , num_est_parameters ) : rho_squared = 1.0 - final_log_likelihood / null_log_likelihood rho_bar_squared = 1.0 - ( ( final_log_likelihood - num_est_parameters ) / null_log_likelihood ) return rho_squared , rho_bar_squared
Calculates McFadden s rho - squared and rho - bar squared for the given model .
44,841
def calc_and_store_post_estimation_results ( results_dict , estimator ) : final_log_likelihood = - 1 * results_dict [ "fun" ] results_dict [ "final_log_likelihood" ] = final_log_likelihood final_params = results_dict [ "x" ] split_res = estimator . convenience_split_params ( final_params , return_all_types = True ) results_dict [ "nest_params" ] = split_res [ 0 ] results_dict [ "shape_params" ] = split_res [ 1 ] results_dict [ "intercept_params" ] = split_res [ 2 ] results_dict [ "utility_coefs" ] = split_res [ 3 ] chosen_probs , long_probs = estimator . convenience_calc_probs ( final_params ) results_dict [ "chosen_probs" ] = chosen_probs results_dict [ "long_probs" ] = long_probs if len ( long_probs . shape ) == 1 : residuals = estimator . choice_vector - long_probs else : residuals = estimator . choice_vector [ : , None ] - long_probs results_dict [ "residuals" ] = residuals args = [ residuals , long_probs , estimator . rows_to_obs ] results_dict [ "ind_chi_squareds" ] = calc_individual_chi_squares ( * args ) log_likelihood_null = results_dict [ "log_likelihood_null" ] rho_results = calc_rho_and_rho_bar_squared ( final_log_likelihood , log_likelihood_null , final_params . shape [ 0 ] ) results_dict [ "rho_squared" ] = rho_results [ 0 ] results_dict [ "rho_bar_squared" ] = rho_results [ 1 ] results_dict [ "final_gradient" ] = estimator . convenience_calc_gradient ( final_params ) results_dict [ "final_hessian" ] = estimator . convenience_calc_hessian ( final_params ) results_dict [ "fisher_info" ] = estimator . convenience_calc_fisher_approx ( final_params ) results_dict [ "constrained_pos" ] = estimator . constrained_pos return results_dict
Calculates and stores post - estimation results that require the use of the systematic utility transformation functions or the various derivative functions . Note that this function is only valid for logit - type models .
44,842
def estimate ( init_values , estimator , method , loss_tol , gradient_tol , maxiter , print_results , use_hessian = True , just_point = False , ** kwargs ) : if not just_point : log_likelihood_at_zero = estimator . convenience_calc_log_likelihood ( estimator . zero_vector ) initial_log_likelihood = estimator . convenience_calc_log_likelihood ( init_values ) if print_results : null_msg = "Log-likelihood at zero: {:,.4f}" print ( null_msg . format ( log_likelihood_at_zero ) ) init_msg = "Initial Log-likelihood: {:,.4f}" print ( init_msg . format ( initial_log_likelihood ) ) sys . stdout . flush ( ) hess_func = estimator . calc_neg_hessian if use_hessian else None start_time = time . time ( ) results = minimize ( estimator . calc_neg_log_likelihood_and_neg_gradient , init_values , method = method , jac = True , hess = hess_func , tol = loss_tol , options = { 'gtol' : gradient_tol , "maxiter" : maxiter } , ** kwargs ) if not just_point : if print_results : end_time = time . time ( ) elapsed_sec = ( end_time - start_time ) elapsed_min = elapsed_sec / 60.0 if elapsed_min > 1.0 : msg = "Estimation Time for Point Estimation: {:.2f} minutes." print ( msg . format ( elapsed_min ) ) else : msg = "Estimation Time for Point Estimation: {:.2f} seconds." print ( msg . format ( elapsed_sec ) ) print ( "Final log-likelihood: {:,.4f}" . format ( - 1 * results [ "fun" ] ) ) sys . stdout . flush ( ) results [ "log_likelihood_null" ] = log_likelihood_at_zero results = calc_and_store_post_estimation_results ( results , estimator ) return results
Estimate the given choice model that is defined by estimator .
44,843
def calc_neg_log_likelihood_and_neg_gradient ( self , params ) : neg_log_likelihood = - 1 * self . convenience_calc_log_likelihood ( params ) neg_gradient = - 1 * self . convenience_calc_gradient ( params ) if self . constrained_pos is not None : neg_gradient [ self . constrained_pos ] = 0 return neg_log_likelihood , neg_gradient
Calculates and returns the negative of the log - likelihood and the negative of the gradient . This function is used as the objective function in scipy . optimize . minimize .
44,844
def ensure_samples_is_ndim_ndarray ( samples , name = 'bootstrap' , ndim = 2 ) : assert isinstance ( ndim , int ) assert isinstance ( name , str ) if not isinstance ( samples , np . ndarray ) or not ( samples . ndim == ndim ) : sample_name = name + "_samples" msg = "`{}` MUST be a {}D ndarray." . format ( sample_name , ndim ) raise ValueError ( msg ) return None
Ensures that samples is an ndim numpy array . Raises a helpful ValueError if otherwise .
44,845
def create_estimation_obj ( model_obj , init_vals , mappings = None , ridge = None , constrained_pos = None , weights = None ) : mapping_matrices = model_obj . get_mappings_for_fit ( ) if mappings is None else mappings zero_vector = np . zeros ( init_vals . shape [ 0 ] ) internal_model_name = display_name_to_model_type [ model_obj . model_type ] estimator_class , current_split_func = ( model_type_to_resources [ internal_model_name ] [ 'estimator' ] , model_type_to_resources [ internal_model_name ] [ 'split_func' ] ) estimation_obj = estimator_class ( model_obj , mapping_matrices , ridge , zero_vector , current_split_func , constrained_pos , weights = weights ) return estimation_obj
Should return a model estimation object corresponding to the model type of the model_obj .
44,846
def ensure_wide_weights_is_1D_or_2D_ndarray ( wide_weights ) : if not isinstance ( wide_weights , np . ndarray ) : msg = "wide_weights MUST be a ndarray." raise ValueError ( msg ) ndim = wide_weights . ndim if not 0 < ndim < 3 : msg = "wide_weights MUST be a 1D or 2D ndarray." raise ValueError ( msg ) return None
Ensures that wide_weights is a 1D or 2D ndarray . Raises a helpful ValueError if otherwise .
44,847
def check_validity_of_long_form_args ( model_obj , wide_weights , rows_to_obs ) : ensure_model_obj_has_mapping_constructor ( model_obj ) ensure_wide_weights_is_1D_or_2D_ndarray ( wide_weights ) ensure_rows_to_obs_validity ( rows_to_obs ) return None
Ensures the args to create_long_form_weights have expected properties .
44,848
def calc_finite_diff_terms_for_abc ( model_obj , mle_params , init_vals , epsilon , ** fit_kwargs ) : num_obs = model_obj . data [ model_obj . obs_id_col ] . unique ( ) . size init_weights_wide = np . ones ( num_obs , dtype = float ) / num_obs init_wide_weights_plus = ( 1 - epsilon ) * init_weights_wide init_wide_weights_minus = ( 1 + epsilon ) * init_weights_wide term_plus = np . empty ( ( num_obs , init_vals . shape [ 0 ] ) , dtype = float ) term_minus = np . empty ( ( num_obs , init_vals . shape [ 0 ] ) , dtype = float ) rows_to_obs = model_obj . get_mappings_for_fit ( ) [ 'rows_to_obs' ] new_fit_kwargs = deepcopy ( fit_kwargs ) if fit_kwargs is not None and 'weights' in fit_kwargs : orig_weights = fit_kwargs [ 'weights' ] del new_fit_kwargs [ 'weights' ] else : orig_weights = 1 new_fit_kwargs [ 'just_point' ] = True for obs in xrange ( num_obs ) : current_wide_weights_plus = init_wide_weights_plus . copy ( ) current_wide_weights_plus [ obs ] += epsilon current_wide_weights_minus = init_wide_weights_minus . copy ( ) current_wide_weights_minus [ obs ] -= epsilon long_weights_plus = ( create_long_form_weights ( model_obj , current_wide_weights_plus , rows_to_obs = rows_to_obs ) * orig_weights ) long_weights_minus = ( create_long_form_weights ( model_obj , current_wide_weights_minus , rows_to_obs = rows_to_obs ) * orig_weights ) term_plus [ obs ] = model_obj . fit_mle ( init_vals , weights = long_weights_plus , ** new_fit_kwargs ) [ 'x' ] term_minus [ obs ] = model_obj . fit_mle ( init_vals , weights = long_weights_minus , ** new_fit_kwargs ) [ 'x' ] return term_plus , term_minus
Calculates the terms needed for the finite difference approximations of the empirical influence and second order empirical influence functions .
44,849
def calc_abc_interval ( model_obj , mle_params , init_vals , conf_percentage , epsilon = 0.001 , ** fit_kwargs ) : check_conf_percentage_validity ( conf_percentage ) empirical_influence , second_order_influence = calc_influence_arrays_for_abc ( model_obj , mle_params , init_vals , epsilon , ** fit_kwargs ) acceleration = calc_acceleration_abc ( empirical_influence ) std_error = calc_std_error_abc ( empirical_influence ) bias = calc_bias_abc ( second_order_influence ) quadratic_coef = efron_quadratic_coef_abc ( model_obj , mle_params , init_vals , empirical_influence , std_error , epsilon , ** fit_kwargs ) total_curvature = calc_total_curvature_abc ( bias , std_error , quadratic_coef ) bias_correction = calc_bias_correction_abc ( acceleration , total_curvature ) lower_endpoint , upper_endpoint = efron_endpoints_for_abc_confidence_interval ( conf_percentage , model_obj , init_vals , bias_correction , acceleration , std_error , empirical_influence , ** fit_kwargs ) conf_intervals = combine_conf_endpoints ( lower_endpoint , upper_endpoint ) return conf_intervals
Calculate approximate bootstrap confidence intervals .
44,850
def check_length_of_init_values ( design_3d , init_values ) : if init_values . shape [ 0 ] != design_3d . shape [ 2 ] : msg_1 = "The initial values are of the wrong dimension. " msg_2 = "They should be of dimension {}" . format ( design_3d . shape [ 2 ] ) raise ValueError ( msg_1 + msg_2 ) return None
Ensures that the initial values are of the correct length given the design matrix that they will be dot - producted with . Raises a ValueError if that is not the case and provides a useful error message to users .
44,851
def add_mixl_specific_results_to_estimation_res ( estimator , results_dict ) : prob_res = mlc . calc_choice_sequence_probs ( results_dict [ "long_probs" ] , estimator . choice_vector , estimator . rows_to_mixers , return_type = 'all' ) results_dict [ "simulated_sequence_probs" ] = prob_res [ 0 ] results_dict [ "expanded_sequence_probs" ] = prob_res [ 1 ] return results_dict
Stores particular items in the results dictionary that are unique to mixed logit - type models . In particular this function calculates and adds sequence_probs and expanded_sequence_probs to the results dictionary . The constrained_pos object is also stored to the results_dict .
44,852
def identify_degenerate_nests ( nest_spec ) : degenerate_positions = [ ] for pos , key in enumerate ( nest_spec ) : if len ( nest_spec [ key ] ) == 1 : degenerate_positions . append ( pos ) return degenerate_positions
Identify the nests within nest_spec that are degenerate i . e . those nests with only a single alternative within the nest .
44,853
def check_length_of_initial_values ( self , init_values ) : num_nests = self . rows_to_nests . shape [ 1 ] num_index_coefs = self . design . shape [ 1 ] assumed_param_dimensions = num_index_coefs + num_nests if init_values . shape [ 0 ] != assumed_param_dimensions : msg = "The initial values are of the wrong dimension" msg_1 = "It should be of dimension {}" msg_2 = "But instead it has dimension {}" raise ValueError ( msg + msg_1 . format ( assumed_param_dimensions ) + msg_2 . format ( init_values . shape [ 0 ] ) ) return None
Ensures that the initial values are of the correct length .
44,854
def convenience_split_params ( self , params , return_all_types = False ) : return split_param_vec ( params , self . rows_to_nests , return_all_types = return_all_types )
Splits parameter vector into nest parameters and index parameters .
44,855
def robust_outer_product ( vec_1 , vec_2 ) : mantissa_1 , exponents_1 = np . frexp ( vec_1 ) mantissa_2 , exponents_2 = np . frexp ( vec_2 ) new_mantissas = mantissa_1 [ None , : ] * mantissa_2 [ : , None ] new_exponents = exponents_1 [ None , : ] + exponents_2 [ : , None ] return new_mantissas * np . exp2 ( new_exponents )
Calculates a robust outer product of two vectors that may or may not contain very small values .
44,856
def calc_percentile_interval ( bootstrap_replicates , conf_percentage ) : check_conf_percentage_validity ( conf_percentage ) ensure_samples_is_ndim_ndarray ( bootstrap_replicates , ndim = 2 ) alpha = get_alpha_from_conf_percentage ( conf_percentage ) lower_percent = alpha / 2.0 upper_percent = 100.0 - lower_percent lower_endpoint = np . percentile ( bootstrap_replicates , lower_percent , interpolation = 'lower' , axis = 0 ) upper_endpoint = np . percentile ( bootstrap_replicates , upper_percent , interpolation = 'higher' , axis = 0 ) conf_intervals = combine_conf_endpoints ( lower_endpoint , upper_endpoint ) return conf_intervals
Calculate bootstrap confidence intervals based on raw percentiles of the bootstrap distribution of samples .
44,857
def calc_bca_interval ( bootstrap_replicates , jackknife_replicates , mle_params , conf_percentage ) : check_conf_percentage_validity ( conf_percentage ) ensure_samples_is_ndim_ndarray ( bootstrap_replicates , ndim = 2 ) ensure_samples_is_ndim_ndarray ( jackknife_replicates , name = 'jackknife' , ndim = 2 ) alpha_percent = get_alpha_from_conf_percentage ( conf_percentage ) bias_correction = calc_bias_correction_bca ( bootstrap_replicates , mle_params ) acceleration = calc_acceleration_bca ( jackknife_replicates ) lower_percents = calc_lower_bca_percentile ( alpha_percent , bias_correction , acceleration ) upper_percents = calc_upper_bca_percentile ( alpha_percent , bias_correction , acceleration ) lower_endpoints = np . diag ( np . percentile ( bootstrap_replicates , lower_percents , interpolation = 'lower' , axis = 0 ) ) upper_endpoints = np . diag ( np . percentile ( bootstrap_replicates , upper_percents , interpolation = 'higher' , axis = 0 ) ) conf_intervals = combine_conf_endpoints ( lower_endpoints , upper_endpoints ) return conf_intervals
Calculate bias - corrected and accelerated bootstrap confidence intervals .
44,858
def extract_default_init_vals ( orig_model_obj , mnl_point_series , num_params ) : init_vals = np . zeros ( num_params , dtype = float ) no_outside_intercepts = orig_model_obj . intercept_names is None if no_outside_intercepts : init_index_coefs = mnl_point_series . values init_intercepts = None else : init_index_coefs = mnl_point_series . loc [ orig_model_obj . ind_var_names ] . values init_intercepts = mnl_point_series . loc [ orig_model_obj . intercept_names ] . values if orig_model_obj . mixing_vars is not None : num_mixing_vars = len ( orig_model_obj . mixing_vars ) init_index_coefs = np . concatenate ( [ init_index_coefs , np . zeros ( num_mixing_vars ) ] , axis = 0 ) if orig_model_obj . model_type == model_type_to_display_name [ "Asym" ] : multiplier = np . log ( len ( np . unique ( orig_model_obj . alt_IDs ) ) ) init_index_coefs = init_index_coefs . astype ( float ) init_index_coefs /= multiplier if init_intercepts is not None : init_index_coefs = np . concatenate ( [ init_intercepts , init_index_coefs ] , axis = 0 ) num_index = init_index_coefs . shape [ 0 ] init_vals [ - 1 * num_index : ] = init_index_coefs return init_vals
Get the default initial values for the desired model type based on the point estimate of the MNL model that is closest to the desired model .
44,859
def get_model_abbrev ( model_obj ) : model_type = model_obj . model_type for key in model_type_to_display_name : if model_type_to_display_name [ key ] == model_type : return key msg = "Model object has an unknown or incorrect model type." raise ValueError ( msg )
Extract the string used to specify the model type of this model object in pylogit . create_chohice_model .
44,860
def get_model_creation_kwargs ( model_obj ) : model_abbrev = get_model_abbrev ( model_obj ) model_kwargs = { "model_type" : model_abbrev , "names" : model_obj . name_spec , "intercept_names" : model_obj . intercept_names , "intercept_ref_pos" : model_obj . intercept_ref_position , "shape_names" : model_obj . shape_names , "shape_ref_pos" : model_obj . shape_ref_position , "nest_spec" : model_obj . nest_spec , "mixing_vars" : model_obj . mixing_vars , "mixing_id_col" : model_obj . mixing_id_col } return model_kwargs
Get a dictionary of the keyword arguments needed to create the passed model object using pylogit . create_choice_model .
44,861
def ensure_valid_model_type ( specified_type , model_type_list ) : if specified_type not in model_type_list : msg_1 = "The specified model_type was not valid." msg_2 = "Valid model-types are {}" . format ( model_type_list ) msg_3 = "The passed model-type was: {}" . format ( specified_type ) total_msg = "\n" . join ( [ msg_1 , msg_2 , msg_3 ] ) raise ValueError ( total_msg ) return None
Checks to make sure that specified_type is in model_type_list and raises a helpful error if this is not the case .
44,862
def ensure_valid_nums_in_specification_cols ( specification , dataframe ) : problem_cols = [ ] for col in specification : if dataframe [ col ] . dtype . kind not in [ 'f' , 'i' , 'u' ] : problem_cols . append ( col ) elif np . isinf ( dataframe [ col ] ) . any ( ) : problem_cols . append ( col ) elif np . isnan ( dataframe [ col ] ) . any ( ) : problem_cols . append ( col ) if problem_cols != [ ] : msg = "The following columns contain either +/- inifinity values, " msg_2 = "NaN values, or values that are not real numbers " msg_3 = "(e.g. strings):\n{}" total_msg = msg + msg_2 + msg_3 raise ValueError ( total_msg . format ( problem_cols ) ) return None
Checks whether each column in specification contains numeric data excluding positive or negative infinity and excluding NaN . Raises ValueError if any of the columns do not meet these requirements .
44,863
def check_length_of_shape_or_intercept_names ( name_list , num_alts , constrained_param , list_title ) : if len ( name_list ) != ( num_alts - constrained_param ) : msg_1 = "{} is of the wrong length:" . format ( list_title ) msg_2 = "len({}) == {}" . format ( list_title , len ( name_list ) ) correct_length = num_alts - constrained_param msg_3 = "The correct length is: {}" . format ( correct_length ) total_msg = "\n" . join ( [ msg_1 , msg_2 , msg_3 ] ) raise ValueError ( total_msg ) return None
Ensures that the length of the parameter names matches the number of parameters that will be estimated . Will raise a ValueError otherwise .
44,864
def check_type_of_nest_spec_keys_and_values ( nest_spec ) : try : assert all ( [ isinstance ( k , str ) for k in nest_spec ] ) assert all ( [ isinstance ( nest_spec [ k ] , list ) for k in nest_spec ] ) except AssertionError : msg = "All nest_spec keys/values must be strings/lists." raise TypeError ( msg ) return None
Ensures that the keys and values of nest_spec are strings and lists . Raises a helpful ValueError if they are .
44,865
def check_for_empty_nests_in_nest_spec ( nest_spec ) : empty_nests = [ ] for k in nest_spec : if len ( nest_spec [ k ] ) == 0 : empty_nests . append ( k ) if empty_nests != [ ] : msg = "The following nests are INCORRECTLY empty: {}" raise ValueError ( msg . format ( empty_nests ) ) return None
Ensures that the values of nest_spec are not empty lists . Raises a helpful ValueError if they are .
44,866
def ensure_alt_ids_in_nest_spec_are_ints ( nest_spec , list_elements ) : try : assert all ( [ isinstance ( x , int ) for x in list_elements ] ) except AssertionError : msg = "All elements of the nest_spec values should be integers" raise ValueError ( msg ) return None
Ensures that the alternative id s in nest_spec are integers . Raises a helpful ValueError if they are not .
44,867
def ensure_alt_ids_are_only_in_one_nest ( nest_spec , list_elements ) : try : assert len ( set ( list_elements ) ) == len ( list_elements ) except AssertionError : msg = "Each alternative id should only be in a single nest." raise ValueError ( msg ) return None
Ensures that the alternative id s in nest_spec are only associated with a single nest . Raises a helpful ValueError if they are not .
44,868
def ensure_all_alt_ids_have_a_nest ( nest_spec , list_elements , all_ids ) : unaccounted_alt_ids = [ ] for alt_id in all_ids : if alt_id not in list_elements : unaccounted_alt_ids . append ( alt_id ) if unaccounted_alt_ids != [ ] : msg = "Associate the following alternative ids with a nest: {}" raise ValueError ( msg . format ( unaccounted_alt_ids ) ) return None
Ensures that the alternative id s in nest_spec are all associated with a nest . Raises a helpful ValueError if they are not .
44,869
def ensure_nest_alts_are_valid_alts ( nest_spec , list_elements , all_ids ) : invalid_alt_ids = [ ] for x in list_elements : if x not in all_ids : invalid_alt_ids . append ( x ) if invalid_alt_ids != [ ] : msg = "The following elements are not in df[alt_id_col]: {}" raise ValueError ( msg . format ( invalid_alt_ids ) ) return None
Ensures that the alternative id s in nest_spec are all in the universal choice set for this dataset . Raises a helpful ValueError if they are not .
44,870
def check_type_and_size_of_param_list ( param_list , expected_length ) : try : assert isinstance ( param_list , list ) assert len ( param_list ) == expected_length except AssertionError : msg = "param_list must be a list containing {} elements." raise ValueError ( msg . format ( expected_length ) ) return None
Ensure that param_list is a list with the expected length . Raises a helpful ValueError if this is not the case .
44,871
def check_type_of_param_list_elements ( param_list ) : try : assert isinstance ( param_list [ 0 ] , np . ndarray ) assert all ( [ ( x is None or isinstance ( x , np . ndarray ) ) for x in param_list ] ) except AssertionError : msg = "param_list[0] must be a numpy array." msg_2 = "All other elements must be numpy arrays or None." total_msg = msg + "\n" + msg_2 raise TypeError ( total_msg ) return None
Ensures that all elements of param_list are ndarrays or None . Raises a helpful ValueError if otherwise .
44,872
def check_num_columns_in_param_list_arrays ( param_list ) : try : num_columns = param_list [ 0 ] . shape [ 1 ] assert all ( [ x is None or ( x . shape [ 1 ] == num_columns ) for x in param_list ] ) except AssertionError : msg = "param_list arrays should have equal number of columns." raise ValueError ( msg ) return None
Ensure that each array in param_list that is not None has the same number of columns . Raises a helpful ValueError if otherwise .
44,873
def ensure_all_mixing_vars_are_in_the_name_dict ( mixing_vars , name_dict , ind_var_names ) : if mixing_vars is None : return None problem_names = [ variable_name for variable_name in mixing_vars if variable_name not in ind_var_names ] msg_0 = "The following parameter names were not in the values of the " msg_1 = "passed name dictionary: \n{}" msg_with_name_dict = msg_0 + msg_1 . format ( problem_names ) msg_2 = "The following paramter names did not match any of the default " msg_3 = "names generated for the parameters to be estimated: \n{}" msg_4 = "The default names that were generated were: \n{}" msg_without_name_dict = ( msg_2 + msg_3 . format ( problem_names ) + msg_4 . format ( ind_var_names ) ) if problem_names != [ ] : if name_dict : raise ValueError ( msg_with_name_dict ) else : raise ValueError ( msg_without_name_dict ) return None
Ensures that all of the variables listed in mixing_vars are present in ind_var_names . Raises a helpful ValueError if otherwise .
44,874
def compute_aic ( model_object ) : assert isinstance ( model_object . params , pd . Series ) assert isinstance ( model_object . log_likelihood , Number ) return - 2 * model_object . log_likelihood + 2 * model_object . params . size
Compute the Akaike Information Criteria for an estimated model .
44,875
def compute_bic ( model_object ) : assert isinstance ( model_object . params , pd . Series ) assert isinstance ( model_object . log_likelihood , Number ) assert isinstance ( model_object . nobs , Number ) log_likelihood = model_object . log_likelihood num_obs = model_object . nobs num_params = model_object . params . size return - 2 * log_likelihood + np . log ( num_obs ) * num_params
Compute the Bayesian Information Criteria for an estimated model .
44,876
def _create_results_summary ( self ) : needed_attributes = [ "params" , "standard_errors" , "tvalues" , "pvalues" , "robust_std_errs" , "robust_t_stats" , "robust_p_vals" ] try : assert all ( [ hasattr ( self , attr ) for attr in needed_attributes ] ) assert all ( [ isinstance ( getattr ( self , attr ) , pd . Series ) for attr in needed_attributes ] ) except AssertionError : msg = "Call this function only after setting/calculating all other" msg_2 = " estimation results attributes" raise NotImplementedError ( msg + msg_2 ) self . summary = pd . concat ( ( self . params , self . standard_errors , self . tvalues , self . pvalues , self . robust_std_errs , self . robust_t_stats , self . robust_p_vals ) , axis = 1 ) return None
Create the dataframe that displays the estimation results and store it on the model instance .
44,877
def _record_values_for_fit_summary_and_statsmodels ( self ) : needed_attributes = [ "fitted_probs" , "params" , "log_likelihood" , "standard_errors" ] try : assert all ( [ hasattr ( self , attr ) for attr in needed_attributes ] ) assert all ( [ getattr ( self , attr ) is not None for attr in needed_attributes ] ) except AssertionError : msg = "Call this function only after setting/calculating all other" msg_2 = " estimation results attributes" raise NotImplementedError ( msg + msg_2 ) self . nobs = self . fitted_probs . shape [ 0 ] self . df_model = self . params . shape [ 0 ] self . df_resid = self . nobs - self . df_model self . llf = self . log_likelihood self . bse = self . standard_errors self . aic = compute_aic ( self ) self . bic = compute_bic ( self ) return None
Store the various estimation results that are used to describe how well the estimated model fits the given dataset and record the values that are needed for the statsmodels estimation results table . All values are stored on the model instance .
44,878
def _store_inferential_results ( self , value_array , index_names , attribute_name , series_name = None , column_names = None ) : if len ( value_array . shape ) == 1 : assert series_name is not None new_attribute_value = pd . Series ( value_array , index = index_names , name = series_name ) elif len ( value_array . shape ) == 2 : assert column_names is not None new_attribute_value = pd . DataFrame ( value_array , index = index_names , columns = column_names ) setattr ( self , attribute_name , new_attribute_value ) return None
Store the estimation results that relate to statistical inference such as parameter estimates standard errors p - values etc .
44,879
def _store_generic_inference_results ( self , results_dict , all_params , all_names ) : self . _store_inferential_results ( results_dict [ "utility_coefs" ] , index_names = self . ind_var_names , attribute_name = "coefs" , series_name = "coefficients" ) self . _store_inferential_results ( results_dict [ "final_gradient" ] , index_names = all_names , attribute_name = "gradient" , series_name = "gradient" ) self . _store_inferential_results ( results_dict [ "final_hessian" ] , index_names = all_names , attribute_name = "hessian" , column_names = all_names ) self . _store_inferential_results ( - 1 * scipy . linalg . inv ( self . hessian ) , index_names = all_names , attribute_name = "cov" , column_names = all_names ) self . _store_inferential_results ( np . concatenate ( all_params , axis = 0 ) , index_names = all_names , attribute_name = "params" , series_name = "parameters" ) self . _store_inferential_results ( np . sqrt ( np . diag ( self . cov ) ) , index_names = all_names , attribute_name = "standard_errors" , series_name = "std_err" ) self . tvalues = self . params / self . standard_errors self . tvalues . name = "t_stats" p_vals = 2 * scipy . stats . norm . sf ( np . abs ( self . tvalues ) ) self . _store_inferential_results ( p_vals , index_names = all_names , attribute_name = "pvalues" , series_name = "p_values" ) self . _store_inferential_results ( results_dict [ "fisher_info" ] , index_names = all_names , attribute_name = "fisher_information" , column_names = all_names ) robust_covariance = calc_asymptotic_covariance ( self . hessian , self . fisher_information ) self . _store_inferential_results ( robust_covariance , index_names = all_names , attribute_name = "robust_cov" , column_names = all_names ) self . _store_inferential_results ( np . sqrt ( np . diag ( self . robust_cov ) ) , index_names = all_names , attribute_name = "robust_std_errs" , series_name = "robust_std_err" ) self . robust_t_stats = self . params / self . robust_std_errs self . robust_t_stats . name = "robust_t_stats" one_sided_p_vals = scipy . stats . norm . sf ( np . abs ( self . robust_t_stats ) ) self . _store_inferential_results ( 2 * one_sided_p_vals , index_names = all_names , attribute_name = "robust_p_vals" , series_name = "robust_p_values" ) return None
Store the model inference values that are common to all choice models . This includes things like index coefficients gradients hessians asymptotic covariance matrices t - values p - values and robust versions of these values .
44,880
def _store_optional_parameters ( self , optional_params , name_list_attr , default_name_str , all_names , all_params , param_attr_name , series_name ) : num_elements = optional_params . shape [ 0 ] parameter_names = getattr ( self , name_list_attr ) if parameter_names is None : parameter_names = [ default_name_str . format ( x ) for x in range ( 1 , num_elements + 1 ) ] all_names = list ( parameter_names ) + list ( all_names ) all_params . insert ( 0 , optional_params ) self . _store_inferential_results ( optional_params , index_names = parameter_names , attribute_name = param_attr_name , series_name = series_name ) return all_names , all_params
Extract the optional parameters from the results_dict save them to the model object and update the list of all parameters and all parameter names .
44,881
def _adjust_inferential_results_for_parameter_constraints ( self , constraints ) : if constraints is not None : inferential_attributes = [ "standard_errors" , "tvalues" , "pvalues" , "robust_std_errs" , "robust_t_stats" , "robust_p_vals" ] assert all ( [ hasattr ( self , x ) for x in inferential_attributes ] ) assert hasattr ( self , "params" ) all_names = self . params . index . tolist ( ) for series in [ getattr ( self , x ) for x in inferential_attributes ] : for pos in constraints : series . loc [ all_names [ pos ] ] = np . nan return None
Ensure that parameters that were constrained during estimation do not have any values showed for inferential results . After all no inference was performed .
44,882
def _check_result_dict_for_needed_keys ( self , results_dict ) : missing_cols = [ x for x in needed_result_keys if x not in results_dict ] if missing_cols != [ ] : msg = "The following keys are missing from results_dict\n{}" raise ValueError ( msg . format ( missing_cols ) ) return None
Ensure that results_dict has the needed keys to store all the estimation results . Raise a helpful ValueError otherwise .
44,883
def _add_mixing_variable_names_to_individual_vars ( self ) : assert isinstance ( self . ind_var_names , list ) already_included = any ( [ "Sigma " in x for x in self . ind_var_names ] ) if self . mixing_vars is not None and not already_included : new_ind_var_names = [ "Sigma " + x for x in self . mixing_vars ] self . ind_var_names += new_ind_var_names return None
Ensure that the model objects mixing variables are added to its list of individual variables .
44,884
def print_summaries ( self ) : if hasattr ( self , "fit_summary" ) and hasattr ( self , "summary" ) : print ( "\n" ) print ( self . fit_summary ) print ( "=" * 30 ) print ( self . summary ) else : msg = "This {} object has not yet been estimated so there " msg_2 = "are no estimation summaries to print." raise NotImplementedError ( msg . format ( self . model_type ) + msg_2 ) return None
Returns None . Will print the measures of fit and the estimation results for the model .
44,885
def prefix ( * kinds ) : def wrap ( fn ) : try : fn . prefix_kinds . extend ( kinds ) except AttributeError : fn . prefix_kinds = list ( kinds ) return fn return wrap
Decorate a method as handling prefix tokens of the given kinds
44,886
def infix ( * kinds ) : def wrap ( fn ) : try : fn . infix_kinds . extend ( kinds ) except AttributeError : fn . infix_kinds = list ( kinds ) return fn return wrap
Decorate a method as handling infix tokens of the given kinds
44,887
def attempt ( self , * kinds ) : if self . _error : raise self . _error token = self . next_token if not token : return None if kinds and token . kind not in kinds : return None self . _advance ( ) return token
Try to get the next token if it matches one of the kinds given otherwise returning None . If no kinds are given any kind is accepted .
44,888
def require ( self , * kinds ) : token = self . attempt ( ) if not token : raise SyntaxError ( 'Unexpected end of input' ) if kinds and token . kind not in kinds : raise SyntaxError . unexpected ( token , kinds ) return token
Get the next token raising an exception if it doesn t match one of the given kinds or the input ends . If no kinds are given returns the next token of any kind .
44,889
def local_symbol_table ( imports = None , symbols = ( ) ) : return SymbolTable ( table_type = LOCAL_TABLE_TYPE , symbols = symbols , imports = imports )
Constructs a local symbol table .
44,890
def shared_symbol_table ( name , version , symbols , imports = None ) : return SymbolTable ( table_type = SHARED_TABLE_TYPE , symbols = symbols , name = name , version = version , imports = imports )
Constructs a shared symbol table .
44,891
def placeholder_symbol_table ( name , version , max_id ) : if version <= 0 : raise ValueError ( 'Version must be grater than or equal to 1: %s' % version ) if max_id < 0 : raise ValueError ( 'Max ID must be zero or positive: %s' % max_id ) return SymbolTable ( table_type = SHARED_TABLE_TYPE , symbols = repeat ( None , max_id ) , name = name , version = version , is_substitute = True )
Constructs a shared symbol table that consists symbols that all have no known text .
44,892
def substitute_symbol_table ( table , version , max_id ) : if not table . table_type . is_shared : raise ValueError ( 'Symbol table to substitute from must be a shared table' ) if version <= 0 : raise ValueError ( 'Version must be grater than or equal to 1: %s' % version ) if max_id < 0 : raise ValueError ( 'Max ID must be zero or positive: %s' % max_id ) if max_id <= table . max_id : symbols = ( token . text for token in islice ( table , max_id ) ) else : symbols = chain ( ( token . text for token in table ) , repeat ( None , max_id - table . max_id ) ) return SymbolTable ( table_type = SHARED_TABLE_TYPE , symbols = symbols , name = table . name , version = version , is_substitute = True )
Substitutes a given shared symbol table for another version .
44,893
def __add ( self , token ) : self . __symbols . append ( token ) text = token . text if text is not None and text not in self . __mapping : self . __mapping [ text ] = token
Unconditionally adds a token to the table .
44,894
def __add_shared ( self , original_token ) : sid = self . __new_sid ( ) token = SymbolToken ( original_token . text , sid , self . __import_location ( sid ) ) self . __add ( token ) return token
Adds a token normalizing the SID and import reference to this table .
44,895
def __add_import ( self , original_token ) : sid = self . __new_sid ( ) token = SymbolToken ( original_token . text , sid , original_token . location ) self . __add ( token ) return token
Adds a token normalizing only the SID
44,896
def __add_text ( self , text ) : if text is not None and not isinstance ( text , six . text_type ) : raise TypeError ( 'Local symbol definition must be a Unicode sequence or None: %r' % text ) sid = self . __new_sid ( ) location = None if self . table_type . is_shared : location = self . __import_location ( sid ) token = SymbolToken ( text , sid , location ) self . __add ( token ) return token
Adds the given Unicode text as a locally defined symbol .
44,897
def intern ( self , text ) : if self . table_type . is_shared : raise TypeError ( 'Cannot intern on shared symbol table' ) if not isinstance ( text , six . text_type ) : raise TypeError ( 'Cannot intern non-Unicode sequence into symbol table: %r' % text ) token = self . get ( text ) if token is None : token = self . __add_text ( text ) return token
Interns the given Unicode sequence into the symbol table .
44,898
def get ( self , key , default = None ) : if isinstance ( key , six . text_type ) : return self . __mapping . get ( key , None ) if not isinstance ( key , int ) : raise TypeError ( 'Key must be int or Unicode sequence.' ) if key == 0 : return SYMBOL_ZERO_TOKEN index = key - 1 if index < 0 or key > len ( self ) : return default return self . __symbols [ index ]
Returns a token by text or local ID with a default .
44,899
def register ( self , table ) : if table . table_type . is_system : raise ValueError ( 'Cannot add system table to catalog' ) if not table . table_type . is_shared : raise ValueError ( 'Cannot add local table to catalog' ) if table . is_substitute : raise ValueError ( 'Cannot add substitute table to catalog' ) versions = self . __tables . get ( table . name ) if versions is None : versions = { } self . __tables [ table . name ] = versions versions [ table . version ] = table
Adds a shared table to the catalog .