idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
37,100
def data ( cls , cube , weighted , prune ) : return cls ( ) . _data ( cube , weighted , prune )
Return ndarray representing table index by margin .
37,101
def _data ( self , cube , weighted , prune ) : result = [ ] for slice_ in cube . slices : if cube . has_mr : return self . _mr_index ( cube , weighted , prune ) num = slice_ . margin ( axis = 0 , weighted = weighted , prune = prune ) den = slice_ . margin ( weighted = weighted , prune = prune ) margin = num / den proportions = slice_ . proportions ( axis = 1 , weighted = weighted , prune = prune ) result . append ( proportions / margin ) if len ( result ) == 1 and cube . ndim < 3 : result = result [ 0 ] else : if prune : mask = np . array ( [ slice_ . mask for slice_ in result ] ) result = np . ma . masked_array ( result , mask ) else : result = np . array ( result ) return result
ndarray representing table index by margin .
37,102
def kakwani ( values , ineq_axis , weights = None ) : from scipy . integrate import simps if weights is None : weights = ones ( len ( values ) ) PLCx , PLCy = pseudo_lorenz ( values , ineq_axis , weights ) LCx , LCy = lorenz ( ineq_axis , weights ) del PLCx return simps ( ( LCy - PLCy ) , LCx )
Computes the Kakwani index
37,103
def lorenz ( values , weights = None ) : if weights is None : weights = ones ( len ( values ) ) df = pd . DataFrame ( { 'v' : values , 'w' : weights } ) df = df . sort_values ( by = 'v' ) x = cumsum ( df [ 'w' ] ) x = x / float ( x [ - 1 : ] ) y = cumsum ( df [ 'v' ] * df [ 'w' ] ) y = y / float ( y [ - 1 : ] ) return x , y
Computes Lorenz Curve coordinates
37,104
def pvals ( cls , slice_ , axis = 0 , weighted = True ) : return cls . _factory ( slice_ , axis , weighted ) . pvals
Wishart CDF values for slice columns as square ndarray .
37,105
def _chi_squared ( self , proportions , margin , observed ) : n = self . _element_count chi_squared = np . zeros ( [ n , n ] ) for i in xrange ( 1 , n ) : for j in xrange ( 0 , n - 1 ) : denominator = 1 / margin [ i ] + 1 / margin [ j ] chi_squared [ i , j ] = chi_squared [ j , i ] = ( np . sum ( np . square ( proportions [ : , i ] - proportions [ : , j ] ) / observed ) / denominator ) return chi_squared
return ndarray of chi - squared measures for proportions columns .
37,106
def _pvals_from_chi_squared ( self , pairwise_chisq ) : return self . _intersperse_insertion_rows_and_columns ( 1.0 - WishartCDF ( pairwise_chisq , self . _n_min , self . _n_max ) . values )
return statistical significance for props columns .
37,107
def _factory ( slice_ , axis , weighted ) : if slice_ . dim_types [ 0 ] == DT . MR_SUBVAR : return _MrXCatPairwiseSignificance ( slice_ , axis , weighted ) return _CatXCatPairwiseSignificance ( slice_ , axis , weighted )
return subclass for PairwiseSignificance based on slice dimension types .
37,108
def _intersperse_insertion_rows_and_columns ( self , pairwise_pvals ) : for i in self . _insertion_indices : pairwise_pvals = np . insert ( pairwise_pvals , i , np . nan , axis = 0 ) pairwise_pvals = np . insert ( pairwise_pvals , i , np . nan , axis = 1 ) return pairwise_pvals
Return pvals matrix with inserted NaN rows and columns as numpy . ndarray .
37,109
def _opposite_axis_margin ( self ) : off_axis = 1 - self . _axis return self . _slice . margin ( axis = off_axis , include_mr_cat = self . _include_mr_cat )
ndarray representing margin along the axis opposite of self . _axis
37,110
def _proportions ( self ) : return self . _slice . proportions ( axis = self . _axis , include_mr_cat = self . _include_mr_cat )
ndarray representing slice proportions along correct axis .
37,111
def build_dummies_dict ( data ) : unique_val_list = unique ( data ) output = { } for val in unique_val_list : output [ val ] = ( data == val ) return output
Return a dict with unique values as keys and vectors as values
37,112
def ca_main_axis ( self ) : try : ca_ind = self . dim_types . index ( DT . CA_SUBVAR ) return 1 - ca_ind except ValueError : return None
For univariate CA the main axis is the categorical axis
37,113
def can_compare_pairwise ( self ) : if self . ndim != 2 : return False return all ( dt in DT . ALLOWED_PAIRWISE_TYPES for dt in self . dim_types )
Return bool indicating if slice can compute pairwise comparisons .
37,114
def get_shape ( self , prune = False , hs_dims = None ) : if not prune : return self . as_array ( include_transforms_for_dims = hs_dims ) . shape shape = compress_pruned ( self . as_array ( prune = True , include_transforms_for_dims = hs_dims ) ) . shape return tuple ( n for n in shape if n > 1 )
Tuple of array dimensions lengths .
37,115
def index_table ( self , axis = None , baseline = None , prune = False ) : proportions = self . proportions ( axis = axis ) baseline = ( baseline if baseline is not None else self . _prepare_index_baseline ( axis ) ) if ( axis == 0 and len ( baseline . shape ) <= 1 and self . ndim == len ( self . get_shape ( ) ) ) : baseline = baseline [ : , None ] indexes = proportions / baseline * 100 return self . _apply_pruning_mask ( indexes ) if prune else indexes
Return index percentages for a given axis and baseline .
37,116
def labels ( self , hs_dims = None , prune = False ) : if self . ca_as_0th : labels = self . _cube . labels ( include_transforms_for_dims = hs_dims ) [ 1 : ] else : labels = self . _cube . labels ( include_transforms_for_dims = hs_dims ) [ - 2 : ] if not prune : return labels def prune_dimension_labels ( labels , prune_indices ) : labels = [ label for label , prune in zip ( labels , prune_indices ) if not prune ] return labels labels = [ prune_dimension_labels ( dim_labels , dim_prune_inds ) for dim_labels , dim_prune_inds in zip ( labels , self . _prune_indices ( hs_dims ) ) ] return labels
Get labels for the cube slice and perform pruning by slice .
37,117
def margin ( self , axis = None , weighted = True , include_missing = False , include_transforms_for_dims = None , prune = False , include_mr_cat = False , ) : axis = self . _calculate_correct_axis_for_cube ( axis ) hs_dims = self . _hs_dims_for_cube ( include_transforms_for_dims ) margin = self . _cube . margin ( axis = axis , weighted = weighted , include_missing = include_missing , include_transforms_for_dims = hs_dims , prune = prune , include_mr_cat = include_mr_cat , ) return self . _extract_slice_result_from_cube ( margin )
Return ndarray representing slice margin across selected axis .
37,118
def min_base_size_mask ( self , size , hs_dims = None , prune = False ) : return MinBaseSizeMask ( self , size , hs_dims = hs_dims , prune = prune )
Returns MinBaseSizeMask object with correct row col and table masks .
37,119
def mr_dim_ind ( self ) : mr_dim_ind = self . _cube . mr_dim_ind if self . _cube . ndim == 3 : if isinstance ( mr_dim_ind , int ) : if mr_dim_ind == 0 : return None return mr_dim_ind - 1 elif isinstance ( mr_dim_ind , tuple ) : mr_dim_ind = tuple ( i - 1 for i in mr_dim_ind if i ) return mr_dim_ind if len ( mr_dim_ind ) > 1 else mr_dim_ind [ 0 ] return mr_dim_ind
Get the correct index of the MR dimension in the cube slice .
37,120
def scale_means ( self , hs_dims = None , prune = False ) : scale_means = self . _cube . scale_means ( hs_dims , prune ) if self . ca_as_0th : scale_means = scale_means [ 0 ] [ - 1 ] if scale_means is None : return [ None ] return [ scale_means [ self . _index ] ] return scale_means [ self . _index ]
Return list of column and row scaled means for this slice .
37,121
def table_name ( self ) : if self . _cube . ndim < 3 and not self . ca_as_0th : return None title = self . _cube . name table_name = self . _cube . labels ( ) [ 0 ] [ self . _index ] return "%s: %s" % ( title , table_name )
Get slice name .
37,122
def wishart_pairwise_pvals ( self , axis = 0 ) : if axis != 0 : raise NotImplementedError ( "Pairwise comparison only implemented for colums" ) return WishartPairwiseSignificance . pvals ( self , axis = axis )
Return square symmetric matrix of pairwise column - comparison p - values .
37,123
def pvals ( self , weighted = True , prune = False , hs_dims = None ) : stats = self . zscore ( weighted = weighted , prune = prune , hs_dims = hs_dims ) pvals = 2 * ( 1 - norm . cdf ( np . abs ( stats ) ) ) return self . _apply_pruning_mask ( pvals , hs_dims ) if prune else pvals
Return 2D ndarray with calculated P values
37,124
def pairwise_indices ( self , alpha = 0.05 , only_larger = True , hs_dims = None ) : return PairwiseSignificance ( self , alpha = alpha , only_larger = only_larger , hs_dims = hs_dims ) . pairwise_indices
Indices of columns where p < alpha for column - comparison t - tests
37,125
def _array_type_std_res ( self , counts , total , colsum , rowsum ) : if self . mr_dim_ind == 0 : total = total [ : , np . newaxis ] rowsum = rowsum [ : , np . newaxis ] expected_counts = rowsum * colsum / total variance = rowsum * colsum * ( total - rowsum ) * ( total - colsum ) / total ** 3 return ( counts - expected_counts ) / np . sqrt ( variance )
Return ndarray containing standard residuals for array values .
37,126
def _calculate_std_res ( self , counts , total , colsum , rowsum ) : if set ( self . dim_types ) & DT . ARRAY_TYPES : return self . _array_type_std_res ( counts , total , colsum , rowsum ) return self . _scalar_type_std_res ( counts , total , colsum , rowsum )
Return ndarray containing standard residuals .
37,127
def _calculate_correct_axis_for_cube ( self , axis ) : if self . _cube . ndim < 3 : if self . ca_as_0th and axis is None : return 1 return axis if isinstance ( axis , int ) : axis += 1 return axis
Return correct axis for cube based on ndim .
37,128
def _scalar_type_std_res ( self , counts , total , colsum , rowsum ) : expected_counts = expected_freq ( counts ) residuals = counts - expected_counts variance = ( np . outer ( rowsum , colsum ) * np . outer ( total - rowsum , total - colsum ) / total ** 3 ) return residuals / np . sqrt ( variance )
Return ndarray containing standard residuals for category values .
37,129
def data ( self ) : means = [ ] table = self . _slice . as_array ( ) products = self . _inner_prods ( table , self . values ) for axis , product in enumerate ( products ) : if product is None : means . append ( product ) continue valid_indices = self . _valid_indices ( axis ) num = np . sum ( product [ valid_indices ] , axis ) den = np . sum ( table [ valid_indices ] , axis ) mean = num / den if not isinstance ( mean , np . ndarray ) : mean = np . array ( [ mean ] ) means . append ( mean ) return means
list of mean numeric values of categorical responses .
37,130
def margin ( self , axis ) : if self . _slice . ndim < 2 : msg = ( "Scale Means marginal cannot be calculated on 1D cubes, as" "the scale means already get reduced to a scalar value." ) raise ValueError ( msg ) dimension_index = 1 - axis margin = self . _slice . margin ( axis = axis ) if len ( margin . shape ) > 1 : index = [ 0 if d . dimension_type == DT . MR else slice ( None ) for d in self . _slice . dimensions ] margin = margin [ index ] total = np . sum ( margin ) values = self . values [ dimension_index ] if values is None : return None return np . sum ( values * margin ) / total
Return marginal value of the current slice scaled means .
37,131
def values ( self ) : return [ ( np . array ( dim . numeric_values ) if ( dim . numeric_values and any ( ~ np . isnan ( dim . numeric_values ) ) ) else None ) for dim in self . _slice . dimensions ]
list of ndarray value - ids for each dimension in slice .
37,132
def compress_pruned ( table ) : if not isinstance ( table , np . ma . core . MaskedArray ) : return table if table . ndim == 0 : return table . data if table . ndim == 1 : return np . ma . compressed ( table ) row_inds = ~ table . mask . all ( axis = 1 ) col_inds = ~ table . mask . all ( axis = 0 ) table = table [ row_inds , : ] [ : , col_inds ] if table . dtype == float and table . mask . any ( ) : table [ table . mask ] = np . nan return table
Compress table based on pruning mask .
37,133
def intersperse_hs_in_std_res ( slice_ , hs_dims , res ) : for dim , inds in enumerate ( slice_ . inserted_hs_indices ( ) ) : if dim not in hs_dims : continue for i in inds : res = np . insert ( res , i , np . nan , axis = ( dim - slice_ . ndim ) ) return res
Perform the insertions of place - holding rows and cols for insertions .
37,134
def inflate_parameter_leaf ( sub_parameter , base_year , inflator , unit_type = 'unit' ) : if isinstance ( sub_parameter , Scale ) : if unit_type == 'threshold_unit' : for bracket in sub_parameter . brackets : threshold = bracket . children [ 'threshold' ] inflate_parameter_leaf ( threshold , base_year , inflator ) return else : kept_instants_str = [ parameter_at_instant . instant_str for parameter_at_instant in sub_parameter . values_list if periods . instant ( parameter_at_instant . instant_str ) . year <= base_year ] if not kept_instants_str : return last_admissible_instant_str = max ( kept_instants_str ) sub_parameter . update ( start = last_admissible_instant_str , value = sub_parameter ( last_admissible_instant_str ) ) restricted_to_base_year_value_list = [ parameter_at_instant for parameter_at_instant in sub_parameter . values_list if periods . instant ( parameter_at_instant . instant_str ) . year == base_year ] if restricted_to_base_year_value_list : for parameter_at_instant in reversed ( restricted_to_base_year_value_list ) : if parameter_at_instant . instant_str . startswith ( str ( base_year ) ) : value = ( parameter_at_instant . value * ( 1 + inflator ) if parameter_at_instant . value is not None else None ) sub_parameter . update ( start = parameter_at_instant . instant_str . replace ( str ( base_year ) , str ( base_year + 1 ) ) , value = value , ) else : value = ( sub_parameter ( "{}-12-31" . format ( base_year ) ) * ( 1 + inflator ) if sub_parameter ( "{}-12-31" . format ( base_year ) ) is not None else None ) sub_parameter . update ( start = "{}-01-01" . format ( base_year + 1 ) , value = value )
Inflate a Parameter leaf according to unit type
37,135
def calculate_variable ( self , variable = None , period = None , use_baseline = False ) : if use_baseline : assert self . baseline_simulation is not None , "self.baseline_simulation is None" simulation = self . baseline_simulation else : assert self . simulation is not None simulation = self . simulation tax_benefit_system = simulation . tax_benefit_system assert period is not None if not isinstance ( period , periods . Period ) : period = periods . period ( period ) assert simulation is not None assert tax_benefit_system is not None assert variable in tax_benefit_system . variables , "{} is not a valid variable" . format ( variable ) period_size_independent = tax_benefit_system . get_variable ( variable ) . is_period_size_independent definition_period = tax_benefit_system . get_variable ( variable ) . definition_period if period_size_independent is False and definition_period != u'eternity' : values = simulation . calculate_add ( variable , period = period ) elif period_size_independent is True and definition_period == u'month' and period . size_in_months > 1 : values = simulation . calculate ( variable , period = period . first_month ) elif period_size_independent is True and definition_period == u'month' and period . size_in_months == 1 : values = simulation . calculate ( variable , period = period ) elif period_size_independent is True and definition_period == u'year' and period . size_in_months > 12 : values = simulation . calculate ( variable , period = period . start . offset ( 'first-of' , 'year' ) . period ( 'year' ) ) elif period_size_independent is True and definition_period == u'year' and period . size_in_months == 12 : values = simulation . calculate ( variable , period = period ) elif period_size_independent is True and definition_period == u'year' : values = simulation . calculate ( variable , period = period . this_year ) elif definition_period == u'eternity' : values = simulation . calculate ( variable , period = period ) else : values = None assert values is not None , 'Unspecified calculation period for variable {}' . format ( variable ) return values
Compute and return the variable values for period and baseline or reform tax_benefit_system
37,136
def filter_input_variables ( self , input_data_frame = None , simulation = None ) : assert input_data_frame is not None assert simulation is not None id_variable_by_entity_key = self . id_variable_by_entity_key role_variable_by_entity_key = self . role_variable_by_entity_key used_as_input_variables = self . used_as_input_variables tax_benefit_system = simulation . tax_benefit_system variables = tax_benefit_system . variables id_variables = [ id_variable_by_entity_key [ _entity . key ] for _entity in simulation . entities . values ( ) if not _entity . is_person ] role_variables = [ role_variable_by_entity_key [ _entity . key ] for _entity in simulation . entities . values ( ) if not _entity . is_person ] log . debug ( 'Variable used_as_input_variables in filter: \n {}' . format ( used_as_input_variables ) ) unknown_columns = [ ] for column_name in input_data_frame : if column_name in id_variables + role_variables : continue if column_name not in variables : unknown_columns . append ( column_name ) input_data_frame . drop ( column_name , axis = 1 , inplace = True ) if unknown_columns : log . debug ( 'The following unknown columns {}, are dropped from input table' . format ( sorted ( unknown_columns ) ) ) used_columns = [ ] dropped_columns = [ ] for column_name in input_data_frame : if column_name in id_variables + role_variables : continue variable = variables [ column_name ] if variable . formulas : if column_name in used_as_input_variables : used_columns . append ( column_name ) continue dropped_columns . append ( column_name ) input_data_frame . drop ( column_name , axis = 1 , inplace = True ) if used_columns : log . debug ( 'These columns are not dropped because present in used_as_input_variables:\n {}' . format ( sorted ( used_columns ) ) ) if dropped_columns : log . debug ( 'These columns in survey are set to be calculated, we drop them from the input table:\n {}' . format ( sorted ( dropped_columns ) ) ) log . info ( 'Keeping the following variables in the input_data_frame:\n {}' . format ( sorted ( list ( input_data_frame . columns ) ) ) ) return input_data_frame
Filter the input data frame from variables that won t be used or are set to be computed
37,137
def init_from_data ( self , calibration_kwargs = None , inflation_kwargs = None , rebuild_input_data = False , rebuild_kwargs = None , data = None , memory_config = None ) : if data is not None : data_year = data . get ( "data_year" , self . year ) if calibration_kwargs is not None : assert set ( calibration_kwargs . keys ( ) ) . issubset ( set ( [ 'target_margins_by_variable' , 'parameters' , 'total_population' ] ) ) if inflation_kwargs is not None : assert set ( inflation_kwargs . keys ( ) ) . issubset ( set ( [ 'inflator_by_variable' , 'target_by_variable' ] ) ) self . _set_id_variable_by_entity_key ( ) self . _set_role_variable_by_entity_key ( ) self . _set_used_as_input_variables_by_entity ( ) if rebuild_input_data : if rebuild_kwargs is not None : self . build_input_data ( year = data_year , ** rebuild_kwargs ) else : self . build_input_data ( year = data_year ) debug = self . debug trace = self . trace if self . baseline_tax_benefit_system is not None : self . new_simulation ( debug = debug , data = data , trace = trace , memory_config = memory_config , use_baseline = True ) self . new_simulation ( debug = debug , data = data , trace = trace , memory_config = memory_config ) if calibration_kwargs : self . calibrate ( ** calibration_kwargs ) if inflation_kwargs : self . inflate ( ** inflation_kwargs )
Initialises a survey scenario from data .
37,138
def neutralize_variables ( self , tax_benefit_system ) : for variable_name , variable in tax_benefit_system . variables . items ( ) : if variable . formulas : continue if self . used_as_input_variables and ( variable_name in self . used_as_input_variables ) : continue if self . non_neutralizable_variables and ( variable_name in self . non_neutralizable_variables ) : continue if self . weight_column_name_by_entity and ( variable_name in self . weight_column_name_by_entity . values ( ) ) : continue tax_benefit_system . neutralize_variable ( variable_name )
Neutralizing input variables not in input dataframe and keep some crucial variables
37,139
def set_tax_benefit_systems ( self , tax_benefit_system = None , baseline_tax_benefit_system = None ) : assert tax_benefit_system is not None self . tax_benefit_system = tax_benefit_system if self . cache_blacklist is not None : self . tax_benefit_system . cache_blacklist = self . cache_blacklist if baseline_tax_benefit_system is not None : self . baseline_tax_benefit_system = baseline_tax_benefit_system if self . cache_blacklist is not None : self . baseline_tax_benefit_system . cache_blacklist = self . cache_blacklist
Set the tax and benefit system and eventually the baseline tax and benefit system
37,140
def _set_id_variable_by_entity_key ( self ) -> Dict [ str , str ] : if self . id_variable_by_entity_key is None : self . id_variable_by_entity_key = dict ( ( entity . key , entity . key + '_id' ) for entity in self . tax_benefit_system . entities ) log . debug ( "Use default id_variable names:\n {}" . format ( self . id_variable_by_entity_key ) ) return self . id_variable_by_entity_key
Identify and set the good ids for the different entities
37,141
def _set_role_variable_by_entity_key ( self ) -> Dict [ str , str ] : if self . role_variable_by_entity_key is None : self . role_variable_by_entity_key = dict ( ( entity . key , entity . key + '_legacy_role' ) for entity in self . tax_benefit_system . entities ) return self . role_variable_by_entity_key
Identify and set the good roles for the different entities
37,142
def _set_used_as_input_variables_by_entity ( self ) -> Dict [ str , List [ str ] ] : if self . used_as_input_variables_by_entity is not None : return tax_benefit_system = self . tax_benefit_system assert set ( self . used_as_input_variables ) <= set ( tax_benefit_system . variables . keys ( ) ) , "Some variables used as input variables are not part of the tax benefit system:\n {}" . format ( set ( self . used_as_input_variables ) . difference ( set ( tax_benefit_system . variables . keys ( ) ) ) ) self . used_as_input_variables_by_entity = dict ( ) for entity in tax_benefit_system . entities : self . used_as_input_variables_by_entity [ entity . key ] = [ variable for variable in self . used_as_input_variables if tax_benefit_system . get_variable ( variable ) . entity == entity ] return self . used_as_input_variables_by_entity
Identify and set the good input variables for the different entities
37,143
def _dimensions ( self ) : return tuple ( d for d in self . _all_dimensions if d . dimension_type != DT . MR_CAT )
tuple of dimension objects in this collection .
37,144
def _iter_dimensions ( self ) : return ( Dimension ( raw_dimension . dimension_dict , raw_dimension . dimension_type ) for raw_dimension in self . _raw_dimensions )
Generate Dimension object for each dimension dict .
37,145
def _raw_dimensions ( self ) : return tuple ( _RawDimension ( dimension_dict , self . _dimension_dicts ) for dimension_dict in self . _dimension_dicts )
Sequence of _RawDimension objects wrapping each dimension dict .
37,146
def dimension_type ( self ) : base_type = self . _base_type if base_type == "categorical" : return self . _resolve_categorical ( ) if base_type == "enum.variable" : return self . _resolve_array_type ( ) if base_type == "enum.datetime" : return DT . DATETIME if base_type == "enum.numeric" : return DT . BINNED_NUMERIC if base_type == "enum.text" : return DT . TEXT raise NotImplementedError ( "unrecognized dimension type %s" % base_type )
Return member of DIMENSION_TYPE appropriate to dimension_dict .
37,147
def _base_type ( self ) : type_class = self . _dimension_dict [ "type" ] [ "class" ] if type_class == "categorical" : return "categorical" if type_class == "enum" : subclass = self . _dimension_dict [ "type" ] [ "subtype" ] [ "class" ] return "enum.%s" % subclass raise NotImplementedError ( "unexpected dimension type class '%s'" % type_class )
Return str like enum . numeric representing dimension type .
37,148
def _resolve_array_type ( self ) : next_raw_dimension = self . _next_raw_dimension if next_raw_dimension is None : return DT . CA is_mr_subvar = ( next_raw_dimension . _base_type == "categorical" and next_raw_dimension . _has_selected_category and next_raw_dimension . _alias == self . _alias ) return DT . MR if is_mr_subvar else DT . CA
Return one of the ARRAY_TYPES members of DIMENSION_TYPE .
37,149
def _resolve_categorical ( self ) : if self . _is_array_cat : return DT . MR_CAT if self . _has_selected_category else DT . CA_CAT return DT . LOGICAL if self . _has_selected_category else DT . CAT
Return one of the categorical members of DIMENSION_TYPE .
37,150
def inserted_hs_indices ( self ) : if self . dimension_type in DT . ARRAY_TYPES : return [ ] return [ idx for idx , item in enumerate ( self . _iter_interleaved_items ( self . valid_elements ) ) if item . is_insertion ]
list of int index of each inserted subtotal for the dimension .
37,151
def is_marginable ( self ) : return self . dimension_type not in { DT . CA , DT . MR , DT . MR_CAT , DT . LOGICAL }
True if adding counts across this dimension axis is meaningful .
37,152
def labels ( self , include_missing = False , include_transforms = False , include_cat_ids = False ) : elements = self . all_elements if include_missing else self . valid_elements include_subtotals = include_transforms and self . dimension_type != DT . CA_SUBVAR interleaved_items = tuple ( self . _iter_interleaved_items ( elements ) ) labels = list ( item . label for item in interleaved_items if include_subtotals or not item . is_insertion ) if include_cat_ids : element_ids = tuple ( None if item . is_insertion else item . element_id for item in interleaved_items if include_subtotals or not item . is_insertion ) return list ( zip ( labels , element_ids ) ) return labels
Return list of str labels for the elements of this dimension .
37,153
def _iter_interleaved_items ( self , elements ) : subtotals = self . _subtotals for subtotal in subtotals . iter_for_anchor ( "top" ) : yield subtotal for element in elements : yield element for subtotal in subtotals . iter_for_anchor ( element . element_id ) : yield subtotal for subtotal in subtotals . iter_for_anchor ( "bottom" ) : yield subtotal
Generate element or subtotal items in interleaved order .
37,154
def _subtotals ( self ) : view = self . _dimension_dict . get ( "references" , { } ) . get ( "view" , { } ) insertion_dicts = ( [ ] if view is None else view . get ( "transform" , { } ) . get ( "insertions" , [ ] ) ) return _Subtotals ( insertion_dicts , self . valid_elements )
_Subtotals sequence object for this dimension .
37,155
def _elements ( self ) : ElementCls , element_dicts = self . _element_makings return tuple ( ElementCls ( element_dict , idx , element_dicts ) for idx , element_dict in enumerate ( element_dicts ) )
Composed tuple storing actual sequence of element objects .
37,156
def numeric_value ( self ) : numeric_value = self . _element_dict . get ( "numeric_value" ) return np . nan if numeric_value is None else numeric_value
Numeric value assigned to element by user np . nan if absent .
37,157
def label ( self ) : value = self . _element_dict . get ( "value" ) type_name = type ( value ) . __name__ if type_name == "NoneType" : return "" if type_name == "list" : return "-" . join ( [ str ( item ) for item in value ] ) if type_name in ( "float" , "int" ) : return str ( value ) if type_name in ( "str" , "unicode" ) : return value name = value . get ( "references" , { } ) . get ( "name" ) return name if name else ""
str display - name for this element when absent from cube response .
37,158
def _iter_valid_subtotal_dicts ( self ) : for insertion_dict in self . _insertion_dicts : if not isinstance ( insertion_dict , dict ) : continue if insertion_dict . get ( "function" ) != "subtotal" : continue if not { "anchor" , "args" , "name" } . issubset ( insertion_dict . keys ( ) ) : continue if not self . _element_ids . intersection ( insertion_dict [ "args" ] ) : continue yield insertion_dict
Generate each insertion dict that represents a valid subtotal .
37,159
def _subtotals ( self ) : return tuple ( _Subtotal ( subtotal_dict , self . valid_elements ) for subtotal_dict in self . _iter_valid_subtotal_dicts ( ) )
Composed tuple storing actual sequence of _Subtotal objects .
37,160
def anchor ( self ) : anchor = self . _subtotal_dict [ "anchor" ] try : anchor = int ( anchor ) if anchor not in self . valid_elements . element_ids : return "bottom" return anchor except ( TypeError , ValueError ) : return anchor . lower ( )
int or str indicating element under which to insert this subtotal .
37,161
def anchor_idx ( self ) : anchor = self . anchor if anchor in [ "top" , "bottom" ] : return anchor return self . valid_elements . get_by_id ( anchor ) . index_in_valids
int or str representing index of anchor element in dimension .
37,162
def addend_ids ( self ) : return tuple ( arg for arg in self . _subtotal_dict . get ( "args" , [ ] ) if arg in self . valid_elements . element_ids )
tuple of int ids of elements contributing to this subtotal .
37,163
def addend_idxs ( self ) : return tuple ( self . valid_elements . get_by_id ( addend_id ) . index_in_valids for addend_id in self . addend_ids )
tuple of int index of each addend element for this subtotal .
37,164
def create_data_file_by_format ( directory_path = None ) : stata_files = [ ] sas_files = [ ] for root , subdirs , files in os . walk ( directory_path ) : for file_name in files : file_path = os . path . join ( root , file_name ) if os . path . basename ( file_name ) . endswith ( ".dta" ) : log . info ( "Found stata file {}" . format ( file_path ) ) stata_files . append ( file_path ) if os . path . basename ( file_name ) . endswith ( ".sas7bdat" ) : log . info ( "Found sas file {}" . format ( file_path ) ) sas_files . append ( file_path ) return { 'stata' : stata_files , 'sas' : sas_files }
Browse subdirectories to extract stata and sas files
37,165
def as_array ( self , include_missing = False , weighted = True , include_transforms_for_dims = None , prune = False , ) : array = self . _as_array ( include_missing = include_missing , weighted = weighted , include_transforms_for_dims = include_transforms_for_dims , ) if prune : array = self . _prune_body ( array , transforms = include_transforms_for_dims ) return self . _drop_mr_cat_dims ( array )
Return ndarray representing cube values .
37,166
def count ( self , weighted = True ) : return self . _measures . weighted_n if weighted else self . _measures . unweighted_n
Return numberic count of rows considered for cube response .
37,167
def index ( self , weighted = True , prune = False ) : warnings . warn ( "CrunchCube.index() is deprecated. Use CubeSlice.index_table()." , DeprecationWarning , ) return Index . data ( self , weighted , prune )
Return cube index measurement .
37,168
def is_univariate_ca ( self ) : return self . ndim == 2 and set ( self . dim_types ) == { DT . CA_SUBVAR , DT . CA_CAT }
True if cube only contains a CA dimension - pair in either order .
37,169
def labels ( self , include_missing = False , include_transforms_for_dims = False ) : return [ dim . labels ( include_missing , include_transforms_for_dims ) for dim in self . dimensions ]
Gets labels for each cube s dimension .
37,170
def margin ( self , axis = None , weighted = True , include_missing = False , include_transforms_for_dims = None , prune = False , include_mr_cat = False , ) : table = self . _counts ( weighted ) . raw_cube_array new_axis = self . _adjust_axis ( axis ) index = tuple ( None if i in new_axis else slice ( None ) for i , _ in enumerate ( table . shape ) ) hs_dims = self . _hs_dims_for_den ( include_transforms_for_dims , axis ) den = self . _apply_subtotals ( self . _apply_missings ( table , include_missing = include_missing ) , hs_dims ) arr = self . _as_array ( include_transforms_for_dims = hs_dims , include_missing = include_missing ) if prune : arr = self . _prune_body ( arr , transforms = hs_dims ) arr = self . _drop_mr_cat_dims ( arr , fix_valids = include_missing ) if isinstance ( arr , np . ma . core . MaskedArray ) : inflate_ind = tuple ( ( None if ( d . dimension_type == DT . MR_CAT or i != 0 and ( n <= 1 or len ( d . valid_elements ) <= 1 ) ) else slice ( None ) ) for i , ( d , n ) in enumerate ( zip ( self . _all_dimensions , table . shape ) ) ) mask = np . logical_or ( np . zeros ( den . shape , dtype = bool ) , arr . mask [ inflate_ind ] ) den = np . ma . masked_array ( den , mask ) if ( self . ndim != 1 or axis is None or axis == 0 and len ( self . _all_dimensions ) == 1 ) : den = np . sum ( den , axis = new_axis ) [ index ] den = self . _drop_mr_cat_dims ( den , fix_valids = ( include_missing or include_mr_cat ) ) if den . shape [ 0 ] == 1 and len ( den . shape ) > 1 and self . ndim < 3 : den = den . reshape ( den . shape [ 1 : ] ) return den
Get margin for the selected axis .
37,171
def mr_dim_ind ( self ) : indices = tuple ( idx for idx , d in enumerate ( self . dimensions ) if d . dimension_type == DT . MR_SUBVAR ) if indices == ( ) : return None if len ( indices ) == 1 : return indices [ 0 ] return indices
Return int tuple of int or None representing MR indices .
37,172
def population_counts ( self , population_size , weighted = True , include_missing = False , include_transforms_for_dims = None , prune = False , ) : population_counts = [ slice_ . population_counts ( population_size , weighted = weighted , include_missing = include_missing , include_transforms_for_dims = include_transforms_for_dims , prune = prune , ) for slice_ in self . slices ] if len ( population_counts ) > 1 : return np . array ( population_counts ) return population_counts [ 0 ]
Return counts scaled in proportion to overall population .
37,173
def proportions ( self , axis = None , weighted = True , include_transforms_for_dims = None , include_mr_cat = False , prune = False , ) : table = self . _measure ( weighted ) . raw_cube_array num = self . _apply_subtotals ( self . _apply_missings ( table ) , include_transforms_for_dims ) proportions = num / self . _denominator ( weighted , include_transforms_for_dims , axis ) if not include_mr_cat : proportions = self . _drop_mr_cat_dims ( proportions ) arr = self . as_array ( prune = prune , include_transforms_for_dims = include_transforms_for_dims ) if isinstance ( arr , np . ma . core . MaskedArray ) : proportions = np . ma . masked_array ( proportions , arr . mask ) return proportions
Return percentage values for cube as numpy . ndarray .
37,174
def _denominator ( self , weighted , include_transforms_for_dims , axis ) : table = self . _measure ( weighted ) . raw_cube_array new_axis = self . _adjust_axis ( axis ) index = tuple ( None if i in new_axis else slice ( None ) for i , _ in enumerate ( table . shape ) ) hs_dims = self . _hs_dims_for_den ( include_transforms_for_dims , axis ) den = self . _apply_subtotals ( self . _apply_missings ( table ) , hs_dims ) return np . sum ( den , axis = new_axis ) [ index ]
Calculate denominator for percentages .
37,175
def scale_means ( self , hs_dims = None , prune = False ) : slices_means = [ ScaleMeans ( slice_ ) . data for slice_ in self . slices ] if hs_dims and self . ndim > 1 : inserted_indices = self . inserted_hs_indices ( ) [ - 2 : ] for scale_means in slices_means : if scale_means [ 0 ] is not None and 1 in hs_dims and inserted_indices [ 1 ] : for i in inserted_indices [ 1 ] : scale_means [ 0 ] = np . insert ( scale_means [ 0 ] , i , np . nan ) if scale_means [ 1 ] is not None and 0 in hs_dims and inserted_indices [ 0 ] : for i in inserted_indices [ 0 ] : scale_means [ 1 ] = np . insert ( scale_means [ 1 ] , i , np . nan ) if prune : arr = self . as_array ( include_transforms_for_dims = hs_dims , prune = True ) if isinstance ( arr , np . ma . core . MaskedArray ) : mask = arr . mask for i , scale_means in enumerate ( slices_means ) : if scale_means [ 0 ] is not None : row_mask = ( mask . all ( axis = 0 ) if self . ndim < 3 else mask . all ( axis = 1 ) [ i ] ) scale_means [ 0 ] = scale_means [ 0 ] [ ~ row_mask ] if self . ndim > 1 and scale_means [ 1 ] is not None : col_mask = ( mask . all ( axis = 1 ) if self . ndim < 3 else mask . all ( axis = 2 ) [ i ] ) scale_means [ 1 ] = scale_means [ 1 ] [ ~ col_mask ] return slices_means
Get cube means .
37,176
def zscore ( self , weighted = True , prune = False , hs_dims = None ) : res = [ s . zscore ( weighted , prune , hs_dims ) for s in self . slices ] return np . array ( res ) if self . ndim == 3 else res [ 0 ]
Return ndarray with cube s zscore measurements .
37,177
def wishart_pairwise_pvals ( self , axis = 0 ) : return [ slice_ . wishart_pairwise_pvals ( axis = axis ) for slice_ in self . slices ]
Return matrices of column - comparison p - values as list of numpy . ndarrays .
37,178
def _adjust_inserted_indices ( inserted_indices_list , prune_indices_list ) : updated_inserted = [ [ i for i in dim_inds ] for dim_inds in inserted_indices_list ] pruned_and_inserted = zip ( prune_indices_list , updated_inserted ) for prune_inds , inserted_inds in pruned_and_inserted : prune_inds = prune_inds [ ~ np . in1d ( prune_inds , inserted_inds ) ] for i , ind in enumerate ( inserted_inds ) : ind -= np . sum ( prune_inds < ind ) inserted_inds [ i ] = ind return updated_inserted
Adjust inserted indices if there are pruned elements .
37,179
def _apply_missings ( self , res , include_missing = False ) : element_idxs = tuple ( ( d . all_elements . element_idxs if include_missing else d . valid_elements . element_idxs ) for d in self . _all_dimensions ) return res [ np . ix_ ( * element_idxs ) ] if element_idxs else res
Return ndarray with missing and insertions as specified .
37,180
def _as_array ( self , include_missing = False , get_non_selected = False , weighted = True , include_transforms_for_dims = False , ) : return self . _apply_subtotals ( self . _apply_missings ( self . _measure ( weighted ) . raw_cube_array , include_missing = include_missing ) , include_transforms_for_dims , )
Get crunch cube as ndarray .
37,181
def _cube_dict ( self ) : try : cube_response = self . _cube_response_arg cube_dict = ( cube_response if isinstance ( cube_response , dict ) else json . loads ( cube_response ) ) return cube_dict . get ( "value" , cube_dict ) except TypeError : raise TypeError ( "Unsupported type <%s> provided. Cube response must be JSON " "(str) or dict." % type ( self . _cube_response_arg ) . __name__ )
dict containing raw cube response parsed from JSON payload .
37,182
def _fix_valid_indices ( cls , valid_indices , insertion_index , dim ) : indices = np . array ( sorted ( valid_indices [ dim ] ) ) slice_index = np . sum ( indices <= insertion_index ) indices [ slice_index : ] += 1 indices = np . insert ( indices , slice_index , insertion_index + 1 ) valid_indices [ dim ] = indices . tolist ( ) return valid_indices
Add indices for H&S inserted elements .
37,183
def _is_axis_allowed ( self , axis ) : if axis is None : if DT . CA_SUBVAR in self . dim_types [ - 2 : ] : return False return True if isinstance ( axis , int ) : if self . ndim == 1 and axis == 1 : return True axis = [ axis ] for dim_idx in axis : if self . dim_types [ dim_idx ] == DT . CA_SUBVAR : return False return True
Check if axis are allowed .
37,184
def _measure ( self , weighted ) : return ( self . _measures . means if self . _measures . means is not None else self . _measures . weighted_counts if weighted else self . _measures . unweighted_counts )
_BaseMeasure subclass representing primary measure for this cube .
37,185
def _prune_3d_body ( self , res , transforms ) : mask = np . zeros ( res . shape ) mr_dim_idxs = self . mr_dim_ind for i , prune_inds in enumerate ( self . prune_indices ( transforms ) ) : rows_pruned = prune_inds [ 0 ] cols_pruned = prune_inds [ 1 ] rows_pruned = np . repeat ( rows_pruned [ : , None ] , len ( cols_pruned ) , axis = 1 ) cols_pruned = np . repeat ( cols_pruned [ None , : ] , len ( rows_pruned ) , axis = 0 ) slice_mask = np . logical_or ( rows_pruned , cols_pruned ) if mr_dim_idxs == ( 1 , 2 ) : slice_mask = slice_mask [ : , np . newaxis , : , np . newaxis ] elif mr_dim_idxs == ( 0 , 1 ) : slice_mask = slice_mask [ np . newaxis , : , np . newaxis , : ] elif mr_dim_idxs == ( 0 , 2 ) : slice_mask = slice_mask [ np . newaxis , : , : , np . newaxis ] elif mr_dim_idxs == 1 and self . ndim == 3 : slice_mask = slice_mask [ : , np . newaxis , : ] elif mr_dim_idxs == 2 and self . ndim == 3 : slice_mask = slice_mask [ : , : , np . newaxis ] mask [ i ] = slice_mask res = np . ma . masked_array ( res , mask = mask ) return res
Return masked array where mask indicates pruned vectors .
37,186
def prune_indices ( self , transforms = None ) : if self . ndim >= 3 : return self . _prune_3d_indices ( transforms ) def prune_non_3d_indices ( transforms ) : row_margin = self . _pruning_base ( hs_dims = transforms , axis = self . row_direction_axis ) row_indices = self . _margin_pruned_indices ( row_margin , self . _inserted_dim_inds ( transforms , 0 ) , 0 ) if row_indices . ndim > 1 : row_indices = row_indices . all ( axis = 1 ) if self . ndim == 1 : return [ row_indices ] col_margin = self . _pruning_base ( hs_dims = transforms , axis = self . _col_direction_axis ) col_indices = self . _margin_pruned_indices ( col_margin , self . _inserted_dim_inds ( transforms , 1 ) , 1 ) if col_indices . ndim > 1 : col_indices = col_indices . all ( axis = 0 ) return [ row_indices , col_indices ] return prune_non_3d_indices ( transforms )
Return indices of pruned rows and columns as list .
37,187
def _pruning_base ( self , axis = None , hs_dims = None ) : if not self . _is_axis_allowed ( axis ) : return self . as_array ( weighted = False , include_transforms_for_dims = hs_dims ) return self . margin ( axis = axis , weighted = False , include_transforms_for_dims = hs_dims )
Gets margin if across CAT dimension . Gets counts if across items .
37,188
def _update_result ( self , result , insertions , dimension_index ) : for j , ( ind_insertion , value ) in enumerate ( insertions ) : result = np . insert ( result , ind_insertion + j + 1 , value , axis = dimension_index ) return result
Insert subtotals into resulting ndarray .
37,189
def means ( self ) : mean_measure_dict = ( self . _cube_dict . get ( "result" , { } ) . get ( "measures" , { } ) . get ( "mean" ) ) if mean_measure_dict is None : return None return _MeanMeasure ( self . _cube_dict , self . _all_dimensions )
_MeanMeasure object providing access to means values .
37,190
def missing_count ( self ) : if self . means : return self . means . missing_count return self . _cube_dict [ "result" ] . get ( "missing" , 0 )
numeric representing count of missing rows in cube response .
37,191
def weighted_counts ( self ) : if not self . is_weighted : return self . unweighted_counts return _WeightedCountMeasure ( self . _cube_dict , self . _all_dimensions )
_WeightedCountMeasure object for this cube .
37,192
def weighted_n ( self ) : if not self . is_weighted : return float ( self . unweighted_n ) return float ( sum ( self . _cube_dict [ "result" ] [ "measures" ] [ "count" ] [ "data" ] ) )
float count of returned rows adjusted for weighting .
37,193
def raw_cube_array ( self ) : array = np . array ( self . _flat_values ) . reshape ( self . _all_dimensions . shape ) array . flags . writeable = False return array
Return read - only ndarray of measure values from cube - response .
37,194
def _flat_values ( self ) : return tuple ( np . nan if type ( x ) is dict else x for x in self . _cube_dict [ "result" ] [ "measures" ] [ "mean" ] [ "data" ] )
Return tuple of mean values as found in cube response .
37,195
def make_input_dataframe_by_entity ( tax_benefit_system , nb_persons , nb_groups ) : input_dataframe_by_entity = dict ( ) person_entity = [ entity for entity in tax_benefit_system . entities if entity . is_person ] [ 0 ] person_id = np . arange ( nb_persons ) input_dataframe_by_entity = dict ( ) input_dataframe_by_entity [ person_entity . key ] = pd . DataFrame ( { person_entity . key + '_id' : person_id , } ) input_dataframe_by_entity [ person_entity . key ] . set_index ( 'person_id' ) adults = [ 0 ] + sorted ( random . sample ( range ( 1 , nb_persons ) , nb_groups - 1 ) ) members_entity_id = np . empty ( nb_persons , dtype = int ) members_legacy_role = np . empty ( nb_persons , dtype = int ) id_group = - 1 for id_person in range ( nb_persons ) : if id_person in adults : id_group += 1 legacy_role = 0 else : legacy_role = 2 if legacy_role == 0 else legacy_role + 1 members_legacy_role [ id_person ] = legacy_role members_entity_id [ id_person ] = id_group for entity in tax_benefit_system . entities : if entity . is_person : continue key = entity . key person_dataframe = input_dataframe_by_entity [ person_entity . key ] person_dataframe [ key + '_id' ] = members_entity_id person_dataframe [ key + '_legacy_role' ] = members_legacy_role person_dataframe [ key + '_role' ] = np . where ( members_legacy_role == 0 , entity . flattened_roles [ 0 ] . key , entity . flattened_roles [ - 1 ] . key ) input_dataframe_by_entity [ key ] = pd . DataFrame ( { key + '_id' : range ( nb_groups ) } ) input_dataframe_by_entity [ key ] . set_index ( key + '_id' , inplace = True ) return input_dataframe_by_entity
Generate a dictionnary of dataframes containing nb_persons persons spread in nb_groups groups .
37,196
def insert_table ( self , label = None , name = None , ** kwargs ) : data_frame = kwargs . pop ( 'data_frame' , None ) if data_frame is None : data_frame = kwargs . pop ( 'dataframe' , None ) to_hdf_kwargs = kwargs . pop ( 'to_hdf_kwargs' , dict ( ) ) if data_frame is not None : assert isinstance ( data_frame , pandas . DataFrame ) if data_frame is not None : if label is None : label = name table = Table ( label = label , name = name , survey = self ) assert table . survey . hdf5_file_path is not None log . debug ( "Saving table {} in {}" . format ( name , table . survey . hdf5_file_path ) ) table . save_data_frame ( data_frame , ** to_hdf_kwargs ) if name not in self . tables : self . tables [ name ] = dict ( ) for key , val in kwargs . items ( ) : self . tables [ name ] [ key ] = val
Insert a table in the Survey object
37,197
def quantile ( q , variable , weight_variable = None , filter_variable = None ) : def formula ( entity , period ) : value = entity ( variable , period ) if weight_variable is not None : weight = entity ( weight_variable , period ) weight = entity . filled_array ( 1 ) if filter_variable is not None : filter_value = entity ( filter_variable , period ) weight = filter_value * weight labels = arange ( 1 , q + 1 ) quantile , _ = weightedcalcs_quantiles ( value , labels , weight , return_quantiles = True , ) if filter_variable is not None : quantile = where ( weight > 0 , quantile , - 1 ) return quantile return formula
Return quantile of a variable with weight provided by a specific wieght variable potentially filtered
37,198
def _get_version ( ) : with open ( "../waliki/__init__.py" ) as fh : for line in fh : if line . startswith ( "__version__ = " ) : return line . split ( "=" ) [ - 1 ] . strip ( ) . strip ( "'" ) . strip ( '"' )
Get the version from package itself .
37,199
def clean_meta ( rst_content ) : rst = rst_content . split ( '\n' ) for i , line in enumerate ( rst ) : if line . startswith ( '#' ) : continue break return '\n' . join ( rst [ i : ] )
remove moinmoin metada from the top of the file