idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
15,600
def from_sym_2_tri ( symm ) : inds = np . triu_indices_from ( symm ) tri = symm [ inds ] return tri
convert a 2D symmetric matrix to an upper triangular matrix in 1D format
15,601
def sumexp_stable ( data ) : max_value = data . max ( axis = 0 ) result_exp = np . exp ( data - max_value ) result_sum = np . sum ( result_exp , axis = 0 ) return result_sum , max_value , result_exp
Compute the sum of exponents for a list of samples
15,602
def concatenate_not_none ( l , axis = 0 ) : mask = [ ] for i in range ( len ( l ) ) : if l [ i ] is not None : mask . append ( i ) l_stacked = np . concatenate ( [ l [ i ] for i in mask ] , axis = axis ) return l_stacked
Construct a numpy array by stacking not - None arrays in a list
15,603
def cov2corr ( cov ) : assert cov . ndim == 2 , 'covariance matrix should be 2D array' inv_sd = 1 / np . sqrt ( np . diag ( cov ) ) corr = cov * inv_sd [ None , : ] * inv_sd [ : , None ] return corr
Calculate the correlation matrix based on a covariance matrix
15,604
def usable_cpu_count ( ) : try : result = len ( os . sched_getaffinity ( 0 ) ) except AttributeError : try : result = len ( psutil . Process ( ) . cpu_affinity ( ) ) except AttributeError : result = os . cpu_count ( ) return result
Get number of CPUs usable by the current process .
15,605
def phase_randomize ( data , voxelwise = False , random_state = None ) : data_ndim = data . ndim data , n_TRs , n_voxels , n_subjects = _check_timeseries_input ( data ) if isinstance ( random_state , np . random . RandomState ) : prng = random_state else : prng = np . random . RandomState ( random_state ) if n_TRs % 2 == 0 : pos_freq = np . arange ( 1 , data . shape [ 0 ] // 2 ) neg_freq = np . arange ( data . shape [ 0 ] - 1 , data . shape [ 0 ] // 2 , - 1 ) else : pos_freq = np . arange ( 1 , ( data . shape [ 0 ] - 1 ) // 2 + 1 ) neg_freq = np . arange ( data . shape [ 0 ] - 1 , ( data . shape [ 0 ] - 1 ) // 2 , - 1 ) if not voxelwise : phase_shifts = ( prng . rand ( len ( pos_freq ) , 1 , n_subjects ) * 2 * np . math . pi ) else : phase_shifts = ( prng . rand ( len ( pos_freq ) , n_voxels , n_subjects ) * 2 * np . math . pi ) fft_data = fft ( data , axis = 0 ) fft_data [ pos_freq , : , : ] *= np . exp ( 1j * phase_shifts ) fft_data [ neg_freq , : , : ] *= np . exp ( - 1j * phase_shifts ) shifted_data = np . real ( ifft ( fft_data , axis = 0 ) ) if data_ndim == 2 : shifted_data = shifted_data [ : , 0 , : ] return shifted_data
Randomize phase of time series across subjects
15,606
def p_from_null ( observed , distribution , side = 'two-sided' , exact = False , axis = None ) : if side not in ( 'two-sided' , 'left' , 'right' ) : raise ValueError ( "The value for 'side' must be either " "'two-sided', 'left', or 'right', got {0}" . format ( side ) ) n_samples = len ( distribution ) logger . info ( "Assuming {0} resampling iterations" . format ( n_samples ) ) if side == 'two-sided' : numerator = np . sum ( np . abs ( distribution ) >= np . abs ( observed ) , axis = axis ) elif side == 'left' : numerator = np . sum ( distribution <= observed , axis = axis ) elif side == 'right' : numerator = np . sum ( distribution >= observed , axis = axis ) if exact : p = numerator / n_samples else : p = ( numerator + 1 ) / ( n_samples + 1 ) return p
Compute p - value from null distribution
15,607
def array_correlation ( x , y , axis = 0 ) : if not isinstance ( x , np . ndarray ) : x = np . asarray ( x ) if not isinstance ( y , np . ndarray ) : y = np . asarray ( y ) if x . shape != y . shape : raise ValueError ( "Input arrays must be the same shape" ) if axis == 1 : x , y = x . T , y . T x_demean = x - np . mean ( x , axis = 0 ) y_demean = y - np . mean ( y , axis = 0 ) numerator = np . sum ( x_demean * y_demean , axis = 0 ) denominator = np . sqrt ( np . sum ( x_demean ** 2 , axis = 0 ) * np . sum ( y_demean ** 2 , axis = 0 ) ) return numerator / denominator
Column - or row - wise Pearson correlation between two arrays
15,608
def _prepare_corerelation_data ( self , X1 , X2 , start_voxel = 0 , num_processed_voxels = None ) : num_samples = len ( X1 ) assert num_samples > 0 , 'at least one sample is needed for correlation computation' num_voxels1 = X1 [ 0 ] . shape [ 1 ] num_voxels2 = X2 [ 0 ] . shape [ 1 ] assert num_voxels1 * num_voxels2 == self . num_features_ , 'the number of features provided by the input data ' 'does not match the number of features defined in the model' assert X1 [ 0 ] . shape [ 0 ] == X2 [ 0 ] . shape [ 0 ] , 'the numbers of TRs of X1 and X2 are not identical' if num_processed_voxels is None : num_processed_voxels = num_voxels1 corr_data = np . zeros ( ( num_samples , num_processed_voxels , num_voxels2 ) , np . float32 , order = 'C' ) for idx , data in enumerate ( X1 ) : data2 = X2 [ idx ] num_TRs = data . shape [ 0 ] blas . compute_corr_vectors ( 'N' , 'T' , num_voxels2 , num_processed_voxels , num_TRs , 1.0 , data2 , num_voxels2 , data , num_voxels1 , 0.0 , corr_data , num_voxels2 , start_voxel , idx ) logger . debug ( 'correlation computation done' ) return corr_data
Compute auto - correlation for the input data X1 and X2 .
15,609
def _normalize_correlation_data ( self , corr_data , norm_unit ) : if norm_unit > 1 : num_samples = len ( corr_data ) [ _ , d2 , d3 ] = corr_data . shape second_dimension = d2 * d3 normalized_corr_data = corr_data . reshape ( 1 , num_samples , second_dimension ) fcma_extension . normalization ( normalized_corr_data , norm_unit ) normalized_corr_data = normalized_corr_data . reshape ( num_samples , d2 , d3 ) logger . debug ( 'normalization done' ) else : normalized_corr_data = corr_data return normalized_corr_data
Normalize the correlation data if necessary .
15,610
def _compute_kernel_matrix_in_portion ( self , X1 , X2 ) : kernel_matrix = np . zeros ( ( self . num_samples_ , self . num_samples_ ) , np . float32 , order = 'C' ) sr = 0 row_length = self . num_processed_voxels num_voxels2 = X2 [ 0 ] . shape [ 1 ] normalized_corr_data = None while sr < self . num_voxels_ : if row_length >= self . num_voxels_ - sr : row_length = self . num_voxels_ - sr corr_data = self . _prepare_corerelation_data ( X1 , X2 , sr , row_length ) normalized_corr_data = self . _normalize_correlation_data ( corr_data , self . epochs_per_subj ) normalized_corr_data = normalized_corr_data . reshape ( 1 , self . num_samples_ , row_length * num_voxels2 ) blas . compute_kernel_matrix ( 'L' , 'T' , self . num_samples_ , row_length * num_voxels2 , 1.0 , normalized_corr_data , 0 , row_length * num_voxels2 , 1.0 , kernel_matrix , self . num_samples_ ) sr += row_length num_digits = len ( str ( int ( kernel_matrix [ 0 , 0 ] ) ) ) self . num_digits_ = num_digits if num_digits > 2 : proportion = 10 ** ( 2 - num_digits ) kernel_matrix *= proportion return kernel_matrix , normalized_corr_data
Compute kernel matrix for sklearn . svm . SVC with precomputed kernel .
15,611
def _generate_training_data ( self , X1 , X2 , num_training_samples ) : if not ( isinstance ( self . clf , sklearn . svm . SVC ) and self . clf . kernel == 'precomputed' ) : corr_data = self . _prepare_corerelation_data ( X1 , X2 ) normalized_corr_data = self . _normalize_correlation_data ( corr_data , self . epochs_per_subj ) data = normalized_corr_data . reshape ( self . num_samples_ , self . num_features_ ) self . training_data_ = None else : if self . num_processed_voxels < self . num_voxels_ : if num_training_samples is None : raise RuntimeError ( 'the kernel matrix will be ' 'computed portion by portion, ' 'the test samples must be predefined ' 'by specifying ' 'num_training_samples' ) if num_training_samples >= self . num_samples_ : raise ValueError ( 'the number of training samples ' 'must be smaller than ' 'the number of total samples' ) data , normalized_corr_data = self . _compute_kernel_matrix_in_portion ( X1 , X2 ) if self . num_processed_voxels >= self . num_voxels_ : self . training_data_ = normalized_corr_data . reshape ( self . num_samples_ , self . num_features_ ) else : self . training_data_ = None logger . debug ( 'kernel computation done' ) return data
Generate training data for the classifier .
15,612
def fit ( self , X , y , num_training_samples = None ) : time1 = time . time ( ) assert len ( X ) == len ( y ) , 'the number of samples must be equal to the number of labels' for x in X : assert len ( x ) == 2 , 'there must be two parts for each correlation computation' X1 , X2 = zip ( * X ) if not ( isinstance ( self . clf , sklearn . svm . SVC ) and self . clf . kernel == 'precomputed' ) : if num_training_samples is not None : num_training_samples = None logger . warn ( 'num_training_samples should not be set for classifiers ' 'other than SVM with precomputed kernels' ) num_samples = len ( X1 ) num_voxels1 = X1 [ 0 ] . shape [ 1 ] num_voxels2 = X2 [ 0 ] . shape [ 1 ] if num_voxels1 < num_voxels2 : X1 , X2 = X2 , X1 num_voxels1 , num_voxels2 = num_voxels2 , num_voxels1 self . num_voxels_ = num_voxels1 self . num_features_ = num_voxels1 * num_voxels2 self . num_samples_ = num_samples data = self . _generate_training_data ( X1 , X2 , num_training_samples ) if num_training_samples is not None : self . test_raw_data_ = None self . test_data_ = data [ num_training_samples : , 0 : num_training_samples ] data = data [ 0 : num_training_samples , 0 : num_training_samples ] self . clf = self . clf . fit ( data , y [ 0 : num_training_samples ] ) if num_training_samples is None : self . test_raw_data_ = None self . test_data_ = None time2 = time . time ( ) logger . info ( 'training done, takes %.2f s' % ( time2 - time1 ) ) return self
Use correlation data to train a model .
15,613
def predict ( self , X = None ) : time1 = time . time ( ) if X is not None : for x in X : assert len ( x ) == 2 , 'there must be two parts for each correlation computation' X1 , X2 = zip ( * X ) num_voxels1 = X1 [ 0 ] . shape [ 1 ] num_voxels2 = X2 [ 0 ] . shape [ 1 ] if num_voxels1 < num_voxels2 : X1 , X2 = X2 , X1 num_voxels1 , num_voxels2 = num_voxels2 , num_voxels1 assert self . num_features_ == num_voxels1 * num_voxels2 , 'the number of features does not match the model' num_test_samples = len ( X1 ) self . test_raw_data_ = X corr_data = self . _prepare_corerelation_data ( X1 , X2 ) normalized_corr_data = self . _normalize_correlation_data ( corr_data , num_test_samples ) self . test_data_ = self . _prepare_test_data ( normalized_corr_data ) y_pred = self . clf . predict ( self . test_data_ ) time2 = time . time ( ) logger . info ( 'prediction done, takes %.2f s' % ( time2 - time1 ) ) return y_pred
Use a trained model to predict correlation data .
15,614
def decision_function ( self , X = None ) : if X is not None and not self . _is_equal_to_test_raw_data ( X ) : for x in X : assert len ( x ) == 2 , 'there must be two parts for each correlation computation' X1 , X2 = zip ( * X ) num_voxels1 = X1 [ 0 ] . shape [ 1 ] num_voxels2 = X2 [ 0 ] . shape [ 1 ] assert len ( X1 ) == len ( X2 ) , 'the list lengths do not match' if num_voxels1 < num_voxels2 : X1 , X2 = X2 , X1 num_voxels1 , num_voxels2 = num_voxels2 , num_voxels1 assert self . num_features_ == num_voxels1 * num_voxels2 , 'the number of features does not match the model' num_test_samples = len ( X1 ) self . test_raw_data_ = X corr_data = self . _prepare_corerelation_data ( X1 , X2 ) normalized_corr_data = self . _normalize_correlation_data ( corr_data , num_test_samples ) self . test_data_ = self . _prepare_test_data ( normalized_corr_data ) confidence = self . clf . decision_function ( self . test_data_ ) return confidence
Output the decision value of the prediction .
15,615
def _check_isc_input ( iscs , pairwise = False ) : if type ( iscs ) == list : iscs = np . array ( iscs ) [ : , np . newaxis ] elif isinstance ( iscs , np . ndarray ) : if iscs . ndim == 1 : iscs = iscs [ : , np . newaxis ] if pairwise : try : test_square = squareform ( iscs [ : , 0 ] ) n_subjects = test_square . shape [ 0 ] except ValueError : raise ValueError ( "For pairwise input, ISCs must be the " "vectorized triangle of a square matrix." ) elif not pairwise : n_subjects = iscs . shape [ 0 ] n_voxels = iscs . shape [ 1 ] logger . info ( "Assuming {0} subjects with and {1} " "voxel(s) or ROI(s) in bootstrap ISC test." . format ( n_subjects , n_voxels ) ) return iscs , n_subjects , n_voxels
Checks ISC inputs for statistical tests
15,616
def _check_targets_input ( targets , data ) : if isinstance ( targets , np . ndarray ) or isinstance ( targets , list ) : targets , n_TRs , n_voxels , n_subjects = ( _check_timeseries_input ( targets ) ) if data . shape [ 0 ] != n_TRs : raise ValueError ( "Targets array must have same number of " "TRs as input data" ) if data . shape [ 2 ] != n_subjects : raise ValueError ( "Targets array must have same number of " "subjects as input data" ) symmetric = False else : targets = data n_TRs , n_voxels , n_subjects = data . shape symmetric = True return targets , n_TRs , n_voxels , n_subjects , symmetric
Checks ISFC targets input array
15,617
def compute_summary_statistic ( iscs , summary_statistic = 'mean' , axis = None ) : if summary_statistic not in ( 'mean' , 'median' ) : raise ValueError ( "Summary statistic must be 'mean' or 'median'" ) if summary_statistic == 'mean' : statistic = np . tanh ( np . nanmean ( np . arctanh ( iscs ) , axis = axis ) ) elif summary_statistic == 'median' : statistic = np . nanmedian ( iscs , axis = axis ) return statistic
Computes summary statistics for ISCs
15,618
def _threshold_nans ( data , tolerate_nans ) : nans = np . all ( np . any ( np . isnan ( data ) , axis = 0 ) , axis = 1 ) if tolerate_nans is True : logger . info ( "ISC computation will tolerate all NaNs when averaging" ) elif type ( tolerate_nans ) is float : if not 0.0 <= tolerate_nans <= 1.0 : raise ValueError ( "If threshold to tolerate NaNs is a float, " "it must be between 0.0 and 1.0; got {0}" . format ( tolerate_nans ) ) nans += ~ ( np . sum ( ~ np . any ( np . isnan ( data ) , axis = 0 ) , axis = 1 ) >= data . shape [ - 1 ] * tolerate_nans ) logger . info ( "ISC computation will tolerate voxels with at least " "{0} non-NaN values: {1} voxels do not meet " "threshold" . format ( tolerate_nans , np . sum ( nans ) ) ) else : logger . info ( "ISC computation will not tolerate NaNs when averaging" ) mask = ~ nans data = data [ : , mask , : ] return data , mask
Thresholds data based on proportion of subjects with NaNs
15,619
def bootstrap_isc ( iscs , pairwise = False , summary_statistic = 'median' , n_bootstraps = 1000 , ci_percentile = 95 , random_state = None ) : iscs , n_subjects , n_voxels = _check_isc_input ( iscs , pairwise = pairwise ) if summary_statistic not in ( 'mean' , 'median' ) : raise ValueError ( "Summary statistic must be 'mean' or 'median'" ) observed = compute_summary_statistic ( iscs , summary_statistic = summary_statistic , axis = 0 ) distribution = [ ] for i in np . arange ( n_bootstraps ) : if isinstance ( random_state , np . random . RandomState ) : prng = random_state else : prng = np . random . RandomState ( random_state ) subject_sample = sorted ( prng . choice ( np . arange ( n_subjects ) , size = n_subjects ) ) if pairwise : isc_sample = [ ] for voxel_iscs in iscs . T : voxel_iscs = squareform ( voxel_iscs ) np . fill_diagonal ( voxel_iscs , 1 ) assert voxel_iscs . shape [ 0 ] == voxel_iscs . shape [ 1 ] assert np . allclose ( voxel_iscs , voxel_iscs . T ) voxel_sample = voxel_iscs [ subject_sample , : ] [ : , subject_sample ] voxel_sample = squareform ( voxel_sample , checks = False ) voxel_sample [ voxel_sample == 1. ] = np . NaN isc_sample . append ( voxel_sample ) isc_sample = np . column_stack ( isc_sample ) elif not pairwise : isc_sample = iscs [ subject_sample , : ] distribution . append ( compute_summary_statistic ( isc_sample , summary_statistic = summary_statistic , axis = 0 ) ) random_state = np . random . RandomState ( prng . randint ( 0 , MAX_RANDOM_SEED ) ) distribution = np . array ( distribution ) ci = ( np . percentile ( distribution , ( 100 - ci_percentile ) / 2 , axis = 0 ) , np . percentile ( distribution , ci_percentile + ( 100 - ci_percentile ) / 2 , axis = 0 ) ) shifted = distribution - observed p = p_from_null ( observed , shifted , side = 'two-sided' , exact = False , axis = 0 ) return observed , ci , p , distribution
One - sample group - level bootstrap hypothesis test for ISCs
15,620
def _permute_one_sample_iscs ( iscs , group_parameters , i , pairwise = False , summary_statistic = 'median' , group_matrix = None , exact_permutations = None , prng = None ) : if exact_permutations : sign_flipper = np . array ( exact_permutations [ i ] ) else : sign_flipper = prng . choice ( [ - 1 , 1 ] , size = group_parameters [ 'n_subjects' ] , replace = True ) if pairwise : matrix_flipped = ( group_parameters [ 'group_matrix' ] * sign_flipper * sign_flipper [ : , np . newaxis ] ) sign_flipper = squareform ( matrix_flipped , checks = False ) isc_flipped = iscs * sign_flipper [ : , np . newaxis ] isc_sample = compute_summary_statistic ( isc_flipped , summary_statistic = summary_statistic , axis = 0 ) return isc_sample
Applies one - sample permutations to ISC data
15,621
def _permute_two_sample_iscs ( iscs , group_parameters , i , pairwise = False , summary_statistic = 'median' , exact_permutations = None , prng = None ) : if exact_permutations : group_shuffler = np . array ( exact_permutations [ i ] ) elif not exact_permutations and pairwise : group_shuffler = prng . permutation ( np . arange ( len ( np . array ( group_parameters [ 'group_assignment' ] ) [ group_parameters [ 'sorter' ] ] ) ) ) elif not exact_permutations and not pairwise : group_shuffler = prng . permutation ( np . arange ( len ( group_parameters [ 'group_assignment' ] ) ) ) if pairwise : group_shuffled = group_parameters [ 'group_matrix' ] [ group_shuffler , : ] [ : , group_shuffler ] group_selector = squareform ( group_shuffled [ group_parameters [ 'unsorter' ] , : ] [ : , group_parameters [ 'unsorter' ] ] , checks = False ) elif not pairwise : group_selector = np . array ( group_parameters [ 'group_assignment' ] ) [ group_shuffler ] isc_sample = ( compute_summary_statistic ( iscs [ group_selector == group_parameters [ 'group_labels' ] [ 0 ] , : ] , summary_statistic = summary_statistic , axis = 0 ) - compute_summary_statistic ( iscs [ group_selector == group_parameters [ 'group_labels' ] [ 1 ] , : ] , summary_statistic = summary_statistic , axis = 0 ) ) return isc_sample
Applies two - sample permutations to ISC data
15,622
def timeshift_isc ( data , pairwise = False , summary_statistic = 'median' , n_shifts = 1000 , tolerate_nans = True , random_state = None ) : data , n_TRs , n_voxels , n_subjects = _check_timeseries_input ( data ) observed = isc ( data , pairwise = pairwise , summary_statistic = summary_statistic , tolerate_nans = tolerate_nans ) if pairwise : data = np . rollaxis ( data , 2 , 0 ) distribution = [ ] for i in np . arange ( n_shifts ) : if isinstance ( random_state , np . random . RandomState ) : prng = random_state else : prng = np . random . RandomState ( random_state ) shifts = prng . choice ( np . arange ( n_TRs ) , size = n_subjects , replace = True ) if pairwise : shifted_data = [ ] for subject , shift in zip ( data , shifts ) : shifted_data . append ( np . concatenate ( ( subject [ - shift : , : ] , subject [ : - shift , : ] ) ) ) shifted_data = np . dstack ( shifted_data ) shifted_isc = isc ( shifted_data , pairwise = pairwise , summary_statistic = summary_statistic , tolerate_nans = tolerate_nans ) elif not pairwise : shifted_isc = [ ] for s , shift in enumerate ( shifts ) : shifted_subject = np . concatenate ( ( data [ - shift : , : , s ] , data [ : - shift , : , s ] ) ) nonshifted_mean = np . mean ( np . delete ( data , s , 2 ) , axis = 2 ) loo_isc = isc ( np . dstack ( ( shifted_subject , nonshifted_mean ) ) , pairwise = False , summary_statistic = None , tolerate_nans = tolerate_nans ) shifted_isc . append ( loo_isc ) shifted_isc = compute_summary_statistic ( np . dstack ( shifted_isc ) , summary_statistic = summary_statistic , axis = 2 ) distribution . append ( shifted_isc ) random_state = np . random . RandomState ( prng . randint ( 0 , MAX_RANDOM_SEED ) ) distribution = np . vstack ( distribution ) p = p_from_null ( observed , distribution , side = 'two-sided' , exact = False , axis = 0 ) return observed , p , distribution
Circular time - shift randomization for one - sample ISC test
15,623
def phaseshift_isc ( data , pairwise = False , summary_statistic = 'median' , n_shifts = 1000 , tolerate_nans = True , random_state = None ) : data , n_TRs , n_voxels , n_subjects = _check_timeseries_input ( data ) observed = isc ( data , pairwise = pairwise , summary_statistic = summary_statistic , tolerate_nans = tolerate_nans ) distribution = [ ] for i in np . arange ( n_shifts ) : if isinstance ( random_state , np . random . RandomState ) : prng = random_state else : prng = np . random . RandomState ( random_state ) shifted_data = phase_randomize ( data , random_state = prng ) if pairwise : shifted_isc = isc ( shifted_data , pairwise = True , summary_statistic = summary_statistic , tolerate_nans = tolerate_nans ) elif not pairwise : shifted_data = np . rollaxis ( shifted_data , 2 , 0 ) shifted_isc = [ ] for s , shifted_subject in enumerate ( shifted_data ) : nonshifted_mean = np . mean ( np . delete ( data , s , axis = 2 ) , axis = 2 ) loo_isc = isc ( np . dstack ( ( shifted_subject , nonshifted_mean ) ) , pairwise = False , summary_statistic = None , tolerate_nans = tolerate_nans ) shifted_isc . append ( loo_isc ) shifted_isc = compute_summary_statistic ( np . dstack ( shifted_isc ) , summary_statistic = summary_statistic , axis = 2 ) distribution . append ( shifted_isc ) random_state = np . random . RandomState ( prng . randint ( 0 , MAX_RANDOM_SEED ) ) distribution = np . vstack ( distribution ) p = p_from_null ( observed , distribution , side = 'two-sided' , exact = False , axis = 0 ) return observed , p , distribution
Phase randomization for one - sample ISC test
15,624
def init_prior ( self , R ) : centers , widths = self . init_centers_widths ( R ) prior = np . zeros ( self . K * ( self . n_dim + 1 ) ) self . set_centers ( prior , centers ) self . set_widths ( prior , widths ) self . set_prior ( prior ) return self
initialize prior for the subject
15,625
def _assign_posterior ( self ) : prior_centers = self . get_centers ( self . local_prior ) posterior_centers = self . get_centers ( self . local_posterior_ ) posterior_widths = self . get_widths ( self . local_posterior_ ) cost = distance . cdist ( prior_centers , posterior_centers , 'euclidean' ) _ , col_ind = linear_sum_assignment ( cost ) self . set_centers ( self . local_posterior_ , posterior_centers [ col_ind ] ) self . set_widths ( self . local_posterior_ , posterior_widths [ col_ind ] ) return self
assign posterior to prior based on Hungarian algorithm
15,626
def _mse_converged ( self ) : mse = mean_squared_error ( self . local_prior , self . local_posterior_ , multioutput = 'uniform_average' ) if mse > self . threshold : return False , mse else : return True , mse
Check convergence based on mean squared error
15,627
def init_centers_widths ( self , R ) : kmeans = KMeans ( init = 'k-means++' , n_clusters = self . K , n_init = 10 , random_state = 100 ) kmeans . fit ( R ) centers = kmeans . cluster_centers_ widths = self . _get_max_sigma ( R ) * np . ones ( ( self . K , 1 ) ) return centers , widths
Initialize prior of centers and widths
15,628
def get_template ( self , R ) : centers , widths = self . init_centers_widths ( R ) template_prior = np . zeros ( self . K * ( self . n_dim + 2 + self . cov_vec_size ) ) template_centers_cov = np . cov ( R . T ) * math . pow ( self . K , - 2 / 3.0 ) template_widths_var = self . _get_max_sigma ( R ) centers_cov_all = np . tile ( from_sym_2_tri ( template_centers_cov ) , self . K ) widths_var_all = np . tile ( template_widths_var , self . K ) self . set_centers ( template_prior , centers ) self . set_widths ( template_prior , widths ) self . set_centers_mean_cov ( template_prior , centers_cov_all ) self . set_widths_mean_var ( template_prior , widths_var_all ) return template_prior , template_centers_cov , template_widths_var
Compute a template on latent factors
15,629
def set_widths ( self , estimation , widths ) : estimation [ self . map_offset [ 1 ] : self . map_offset [ 2 ] ] = widths . ravel ( )
Set estimation on widths
15,630
def set_centers_mean_cov ( self , estimation , centers_mean_cov ) : estimation [ self . map_offset [ 2 ] : self . map_offset [ 3 ] ] = centers_mean_cov . ravel ( )
Set estimation on centers
15,631
def get_centers ( self , estimation ) : centers = estimation [ 0 : self . map_offset [ 1 ] ] . reshape ( self . K , self . n_dim ) return centers
Get estimation on centers
15,632
def get_widths ( self , estimation ) : widths = estimation [ self . map_offset [ 1 ] : self . map_offset [ 2 ] ] . reshape ( self . K , 1 ) return widths
Get estimation on widths
15,633
def get_centers_mean_cov ( self , estimation ) : centers_mean_cov = estimation [ self . map_offset [ 2 ] : self . map_offset [ 3 ] ] . reshape ( self . K , self . cov_vec_size ) return centers_mean_cov
Get estimation on the covariance of centers mean
15,634
def get_widths_mean_var ( self , estimation ) : widths_mean_var = estimation [ self . map_offset [ 3 ] : ] . reshape ( self . K , 1 ) return widths_mean_var
Get estimation on the variance of widths mean
15,635
def get_factors ( self , unique_R , inds , centers , widths ) : F = np . zeros ( ( len ( inds [ 0 ] ) , self . K ) ) tfa_extension . factor ( F , centers , widths , unique_R [ 0 ] , unique_R [ 1 ] , unique_R [ 2 ] , inds [ 0 ] , inds [ 1 ] , inds [ 2 ] ) return F
Calculate factors based on centers and widths
15,636
def get_weights ( self , data , F ) : beta = np . var ( data ) trans_F = F . T . copy ( ) W = np . zeros ( ( self . K , data . shape [ 1 ] ) ) if self . weight_method == 'rr' : W = np . linalg . solve ( trans_F . dot ( F ) + beta * np . identity ( self . K ) , trans_F . dot ( data ) ) else : W = np . linalg . solve ( trans_F . dot ( F ) , trans_F . dot ( data ) ) return W
Calculate weight matrix based on fMRI data and factors
15,637
def _get_max_sigma ( self , R ) : max_sigma = 2.0 * math . pow ( np . nanmax ( np . std ( R , axis = 0 ) ) , 2 ) return max_sigma
Calculate maximum sigma of scanner RAS coordinates
15,638
def get_bounds ( self , R ) : max_sigma = self . _get_max_sigma ( R ) final_lower = np . zeros ( self . K * ( self . n_dim + 1 ) ) final_lower [ 0 : self . K * self . n_dim ] = np . tile ( np . nanmin ( R , axis = 0 ) , self . K ) final_lower [ self . K * self . n_dim : ] = np . repeat ( self . lower_ratio * max_sigma , self . K ) final_upper = np . zeros ( self . K * ( self . n_dim + 1 ) ) final_upper [ 0 : self . K * self . n_dim ] = np . tile ( np . nanmax ( R , axis = 0 ) , self . K ) final_upper [ self . K * self . n_dim : ] = np . repeat ( self . upper_ratio * max_sigma , self . K ) bounds = ( final_lower , final_upper ) return bounds
Calculate lower and upper bounds for centers and widths
15,639
def _residual_multivariate ( self , estimate , unique_R , inds , X , W , template_centers , template_centers_mean_cov , template_widths , template_widths_mean_var_reci , data_sigma ) : centers = self . get_centers ( estimate ) widths = self . get_widths ( estimate ) recon = X . size other_err = 0 if template_centers is None else ( 2 * self . K ) final_err = np . zeros ( recon + other_err ) F = self . get_factors ( unique_R , inds , centers , widths ) sigma = np . zeros ( ( 1 , ) ) sigma [ 0 ] = data_sigma tfa_extension . recon ( final_err [ 0 : recon ] , X , F , W , sigma ) if other_err > 0 : for k in np . arange ( self . K ) : diff = ( centers [ k ] - template_centers [ k ] ) cov = from_tri_2_sym ( template_centers_mean_cov [ k ] , self . n_dim ) final_err [ recon + k ] = math . sqrt ( self . sample_scaling * diff . dot ( np . linalg . solve ( cov , diff . T ) ) ) base = recon + self . K dist = template_widths_mean_var_reci * ( widths - template_widths ) ** 2 final_err [ base : ] = np . sqrt ( self . sample_scaling * dist ) . ravel ( ) return final_err
Residual function for estimating centers and widths
15,640
def _estimate_centers_widths ( self , unique_R , inds , X , W , init_centers , init_widths , template_centers , template_widths , template_centers_mean_cov , template_widths_mean_var_reci ) : init_estimate = np . hstack ( ( init_centers . ravel ( ) , init_widths . ravel ( ) ) ) data_sigma = 1.0 / math . sqrt ( 2.0 ) * np . std ( X ) final_estimate = least_squares ( self . _residual_multivariate , init_estimate , args = ( unique_R , inds , X , W , template_centers , template_widths , template_centers_mean_cov , template_widths_mean_var_reci , data_sigma ) , method = self . nlss_method , loss = self . nlss_loss , bounds = self . bounds , verbose = 0 , x_scale = self . x_scale , tr_solver = self . tr_solver ) return final_estimate . x , final_estimate . cost
Estimate centers and widths
15,641
def _fit_tfa ( self , data , R , template_prior = None ) : if template_prior is None : template_centers = None template_widths = None template_centers_mean_cov = None template_widths_mean_var_reci = None else : template_centers = self . get_centers ( template_prior ) template_widths = self . get_widths ( template_prior ) template_centers_mean_cov = self . get_centers_mean_cov ( template_prior ) template_widths_mean_var_reci = 1.0 / self . get_widths_mean_var ( template_prior ) inner_converged = False np . random . seed ( self . seed ) n = 0 while n < self . miter and not inner_converged : self . _fit_tfa_inner ( data , R , template_centers , template_widths , template_centers_mean_cov , template_widths_mean_var_reci ) self . _assign_posterior ( ) inner_converged , _ = self . _converged ( ) if not inner_converged : self . local_prior = self . local_posterior_ else : logger . info ( "TFA converged at %d iteration." % ( n ) ) n += 1 gc . collect ( ) return self
TFA main algorithm
15,642
def get_unique_R ( self , R ) : unique_R = [ ] inds = [ ] for d in np . arange ( self . n_dim ) : tmp_unique , tmp_inds = np . unique ( R [ : , d ] , return_inverse = True ) unique_R . append ( tmp_unique ) inds . append ( tmp_inds ) return unique_R , inds
Get unique vlaues from coordinate matrix
15,643
def _fit_tfa_inner ( self , data , R , template_centers , template_widths , template_centers_mean_cov , template_widths_mean_var_reci ) : nfeature = data . shape [ 0 ] nsample = data . shape [ 1 ] feature_indices = np . random . choice ( nfeature , self . max_num_voxel , replace = False ) sample_features = np . zeros ( nfeature ) . astype ( bool ) sample_features [ feature_indices ] = True samples_indices = np . random . choice ( nsample , self . max_num_tr , replace = False ) curr_data = np . zeros ( ( self . max_num_voxel , self . max_num_tr ) ) . astype ( float ) curr_data = data [ feature_indices ] curr_data = curr_data [ : , samples_indices ] . copy ( ) curr_R = R [ feature_indices ] . copy ( ) centers = self . get_centers ( self . local_prior ) widths = self . get_widths ( self . local_prior ) unique_R , inds = self . get_unique_R ( curr_R ) F = self . get_factors ( unique_R , inds , centers , widths ) W = self . get_weights ( curr_data , F ) self . local_posterior_ , self . total_cost = self . _estimate_centers_widths ( unique_R , inds , curr_data , W , centers , widths , template_centers , template_centers_mean_cov , template_widths , template_widths_mean_var_reci ) return self
Fit TFA model the inner loop part
15,644
def recon_err ( data , F , W ) : recon = F . dot ( W ) . ravel ( ) err = mean_squared_error ( data . ravel ( ) , recon , multioutput = 'uniform_average' ) return math . sqrt ( err )
Calcuate reconstruction error
15,645
def get_train_err ( htfa , data , F ) : W = htfa . get_weights ( data , F ) return recon_err ( data , F , W )
Calcuate training error
15,646
def _sfn ( l , mask , myrad , bcast_var ) : clf = bcast_var [ 2 ] data = l [ 0 ] [ mask , : ] . T skf = model_selection . StratifiedKFold ( n_splits = bcast_var [ 1 ] , shuffle = False ) accuracy = np . mean ( model_selection . cross_val_score ( clf , data , y = bcast_var [ 0 ] , cv = skf , n_jobs = 1 ) ) return accuracy
Score classifier on searchlight data using cross - validation .
15,647
def run ( self , clf ) : rank = MPI . COMM_WORLD . Get_rank ( ) if rank == 0 : logger . info ( 'running activity-based voxel selection via Searchlight' ) self . sl . distribute ( [ self . data ] , self . mask ) self . sl . broadcast ( ( self . labels , self . num_folds , clf ) ) if rank == 0 : logger . info ( 'data preparation done' ) result_volume = self . sl . run_searchlight ( _sfn ) result_list = result_volume [ self . mask ] results = [ ] if rank == 0 : for idx , value in enumerate ( result_list ) : if value is None : value = 0 results . append ( ( idx , value ) ) results . sort ( key = lambda tup : tup [ 1 ] , reverse = True ) logger . info ( 'activity-based voxel selection via Searchlight is done' ) return result_volume , results
run activity - based voxel selection
15,648
def _cross_validation_for_one_voxel ( clf , vid , num_folds , subject_data , labels ) : skf = model_selection . StratifiedKFold ( n_splits = num_folds , shuffle = False ) scores = model_selection . cross_val_score ( clf , subject_data , y = labels , cv = skf , n_jobs = 1 ) logger . debug ( 'cross validation for voxel %d is done' % vid ) return ( vid , scores . mean ( ) )
Score classifier on data using cross validation .
15,649
def run ( self , clf ) : rank = MPI . COMM_WORLD . Get_rank ( ) if rank == self . master_rank : results = self . _master ( ) results . sort ( key = lambda tup : tup [ 1 ] , reverse = True ) else : self . _worker ( clf ) results = [ ] return results
Run correlation - based voxel selection in master - worker model .
15,650
def _master ( self ) : logger . info ( 'Master at rank %d starts to allocate tasks' , MPI . COMM_WORLD . Get_rank ( ) ) results = [ ] comm = MPI . COMM_WORLD size = comm . Get_size ( ) sending_voxels = self . voxel_unit if self . voxel_unit < self . num_voxels else self . num_voxels current_task = ( 0 , sending_voxels ) status = MPI . Status ( ) using_size = size for i in range ( 0 , size ) : if i == self . master_rank : continue if current_task [ 1 ] == 0 : using_size = i break logger . debug ( 'master starts to send a task to worker %d' % i ) comm . send ( current_task , dest = i , tag = self . _WORKTAG ) next_start = current_task [ 0 ] + current_task [ 1 ] sending_voxels = self . voxel_unit if self . voxel_unit < self . num_voxels - next_start else self . num_voxels - next_start current_task = ( next_start , sending_voxels ) while using_size == size : if current_task [ 1 ] == 0 : break result = comm . recv ( source = MPI . ANY_SOURCE , tag = MPI . ANY_TAG , status = status ) results += result comm . send ( current_task , dest = status . Get_source ( ) , tag = self . _WORKTAG ) next_start = current_task [ 0 ] + current_task [ 1 ] sending_voxels = self . voxel_unit if self . voxel_unit < self . num_voxels - next_start else self . num_voxels - next_start current_task = ( next_start , sending_voxels ) for i in range ( 0 , using_size ) : if i == self . master_rank : continue result = comm . recv ( source = MPI . ANY_SOURCE , tag = MPI . ANY_TAG ) results += result for i in range ( 0 , size ) : if i == self . master_rank : continue comm . send ( None , dest = i , tag = self . _TERMINATETAG ) return results
Master node s operation .
15,651
def _worker ( self , clf ) : logger . debug ( 'worker %d is running, waiting for tasks from master at rank %d' % ( MPI . COMM_WORLD . Get_rank ( ) , self . master_rank ) ) comm = MPI . COMM_WORLD status = MPI . Status ( ) while 1 : task = comm . recv ( source = self . master_rank , tag = MPI . ANY_TAG , status = status ) if status . Get_tag ( ) : break comm . send ( self . _voxel_scoring ( task , clf ) , dest = self . master_rank )
Worker node s operation .
15,652
def _correlation_normalization ( self , corr ) : time1 = time . time ( ) ( sv , e , av ) = corr . shape for i in range ( sv ) : start = 0 while start < e : cur_val = corr [ i , start : start + self . epochs_per_subj , : ] cur_val = .5 * np . log ( ( cur_val + 1 ) / ( 1 - cur_val ) ) corr [ i , start : start + self . epochs_per_subj , : ] = zscore ( cur_val , axis = 0 , ddof = 0 ) start += self . epochs_per_subj corr = np . nan_to_num ( corr ) time2 = time . time ( ) logger . debug ( 'within-subject normalization for %d voxels ' 'using numpy zscore function, takes %.2f s' % ( sv , ( time2 - time1 ) ) ) return corr
Do within - subject normalization .
15,653
def _prepare_for_cross_validation ( self , corr , clf ) : time1 = time . time ( ) ( num_processed_voxels , num_epochs , _ ) = corr . shape if isinstance ( clf , sklearn . svm . SVC ) and clf . kernel == 'precomputed' : kernel_matrices = np . zeros ( ( num_processed_voxels , num_epochs , num_epochs ) , np . float32 , order = 'C' ) for i in range ( num_processed_voxels ) : blas . compute_kernel_matrix ( 'L' , 'T' , num_epochs , self . num_voxels2 , 1.0 , corr , i , self . num_voxels2 , 0.0 , kernel_matrices [ i , : , : ] , num_epochs ) num_digits = len ( str ( int ( kernel_matrices [ i , 0 , 0 ] ) ) ) if num_digits > 2 : proportion = 10 ** ( 2 - num_digits ) kernel_matrices [ i , : , : ] *= proportion data = kernel_matrices else : data = corr time2 = time . time ( ) logger . debug ( 'cross validation data preparation takes %.2f s' % ( time2 - time1 ) ) return data
Prepare data for voxelwise cross validation .
15,654
def _do_cross_validation ( self , clf , data , task ) : time1 = time . time ( ) if isinstance ( clf , sklearn . svm . SVC ) and clf . kernel == 'precomputed' and self . use_multiprocessing : inlist = [ ( clf , i + task [ 0 ] , self . num_folds , data [ i , : , : ] , self . labels ) for i in range ( task [ 1 ] ) ] with multiprocessing . Pool ( self . process_num ) as pool : results = list ( pool . starmap ( _cross_validation_for_one_voxel , inlist ) ) else : results = [ ] for i in range ( task [ 1 ] ) : result = _cross_validation_for_one_voxel ( clf , i + task [ 0 ] , self . num_folds , data [ i , : , : ] , self . labels ) results . append ( result ) time2 = time . time ( ) logger . debug ( 'cross validation for %d voxels, takes %.2f s' % ( task [ 1 ] , ( time2 - time1 ) ) ) return results
Run voxelwise cross validation based on correlation vectors .
15,655
def _voxel_scoring ( self , task , clf ) : time1 = time . time ( ) corr = self . _correlation_computation ( task ) time3 = time . time ( ) fcma_extension . normalization ( corr , self . epochs_per_subj ) time4 = time . time ( ) logger . debug ( 'within-subject normalization for %d voxels ' 'using C++, takes %.2f s' % ( task [ 1 ] , ( time4 - time3 ) ) ) data = self . _prepare_for_cross_validation ( corr , clf ) if isinstance ( clf , sklearn . svm . SVC ) and clf . kernel == 'precomputed' : del corr results = self . _do_cross_validation ( clf , data , task ) time2 = time . time ( ) logger . info ( 'in rank %d, task %d takes %.2f s' % ( MPI . COMM_WORLD . Get_rank ( ) , ( int ( task [ 0 ] / self . voxel_unit ) ) , ( time2 - time1 ) ) ) return results
The voxel selection process done in the worker node .
15,656
def fit ( self , X , y , Z ) : logger . info ( 'Starting SS-SRM' ) if 0.0 >= self . alpha or self . alpha >= 1.0 : raise ValueError ( "Alpha parameter should be in range (0.0, 1.0)" ) if 0.0 >= self . gamma : raise ValueError ( "Gamma parameter should be positive." ) if len ( X ) <= 1 or len ( y ) <= 1 or len ( Z ) <= 1 : raise ValueError ( "There are not enough subjects in the input " "data to train the model." ) if not ( len ( X ) == len ( y ) ) or not ( len ( X ) == len ( Z ) ) : raise ValueError ( "Different number of subjects in data." ) if X [ 0 ] . shape [ 1 ] < self . features : raise ValueError ( "There are not enough samples to train the model with " "{0:d} features." . format ( self . features ) ) number_trs = X [ 0 ] . shape [ 1 ] number_subjects = len ( X ) for subject in range ( number_subjects ) : assert_all_finite ( X [ subject ] ) assert_all_finite ( Z [ subject ] ) if X [ subject ] . shape [ 1 ] != number_trs : raise ValueError ( "Different number of alignment samples " "between subjects." ) if X [ subject ] . shape [ 0 ] != Z [ subject ] . shape [ 0 ] : raise ValueError ( "Different number of voxels between alignment" " and classification data (subject {0:d})" "." . format ( subject ) ) if Z [ subject ] . shape [ 1 ] != y [ subject ] . size : raise ValueError ( "Different number of samples and labels in " "subject {0:d}." . format ( subject ) ) new_y = self . _init_classes ( y ) self . w_ , self . s_ , self . theta_ , self . bias_ = self . _sssrm ( X , Z , new_y ) return self
Compute the Semi - Supervised Shared Response Model
15,657
def predict ( self , X ) : if hasattr ( self , 'w_' ) is False : raise NotFittedError ( "The model fit has not been run yet." ) if len ( X ) != len ( self . w_ ) : raise ValueError ( "The number of subjects does not match the one" " in the model." ) X_shared = self . transform ( X ) p = [ None ] * len ( X_shared ) for subject in range ( len ( X_shared ) ) : sumexp , _ , exponents = utils . sumexp_stable ( self . theta_ . T . dot ( X_shared [ subject ] ) + self . bias_ ) p [ subject ] = self . classes_ [ ( exponents / sumexp [ np . newaxis , : ] ) . argmax ( axis = 0 ) ] return p
Classify the output for given data
15,658
def _sssrm ( self , data_align , data_sup , labels ) : classes = self . classes_ . size self . random_state_ = np . random . RandomState ( self . rand_seed ) random_states = [ np . random . RandomState ( self . random_state_ . randint ( 2 ** 32 ) ) for i in range ( len ( data_align ) ) ] w , _ = srm . _init_w_transforms ( data_align , self . features , random_states ) s = SSSRM . _compute_shared_response ( data_align , w ) theta , bias = self . _update_classifier ( data_sup , labels , w , classes ) if logger . isEnabledFor ( logging . INFO ) : objective = self . _objective_function ( data_align , data_sup , labels , w , s , theta , bias ) logger . info ( 'Objective function %f' % objective ) for iteration in range ( self . n_iter ) : logger . info ( 'Iteration %d' % ( iteration + 1 ) ) w = self . _update_w ( data_align , data_sup , labels , w , s , theta , bias ) if logger . isEnabledFor ( logging . INFO ) : objective = self . _objective_function ( data_align , data_sup , labels , w , s , theta , bias ) logger . info ( 'Objective function after updating Wi %f' % objective ) s = SSSRM . _compute_shared_response ( data_align , w ) if logger . isEnabledFor ( logging . INFO ) : objective = self . _objective_function ( data_align , data_sup , labels , w , s , theta , bias ) logger . info ( 'Objective function after updating S %f' % objective ) theta , bias = self . _update_classifier ( data_sup , labels , w , classes ) if logger . isEnabledFor ( logging . INFO ) : objective = self . _objective_function ( data_align , data_sup , labels , w , s , theta , bias ) logger . info ( 'Objective function after updating MLR %f' % objective ) return w , s , theta , bias
Block - Coordinate Descent algorithm for fitting SS - SRM .
15,659
def _update_classifier ( self , data , labels , w , classes ) : data_stacked , labels_stacked , weights = SSSRM . _stack_list ( data , labels , w ) features = w [ 0 ] . shape [ 1 ] total_samples = weights . size data_th = S . shared ( data_stacked . astype ( theano . config . floatX ) ) val_ = S . shared ( labels_stacked ) total_samples_S = S . shared ( total_samples ) theta_th = T . matrix ( name = 'theta' , dtype = theano . config . floatX ) bias_th = T . col ( name = 'bias' , dtype = theano . config . floatX ) constf2 = S . shared ( self . alpha / self . gamma , allow_downcast = True ) weights_th = S . shared ( weights ) log_p_y_given_x = T . log ( T . nnet . softmax ( ( theta_th . T . dot ( data_th . T ) ) . T + bias_th . T ) ) f = - constf2 * T . sum ( ( log_p_y_given_x [ T . arange ( total_samples_S ) , val_ ] ) / weights_th ) + 0.5 * T . sum ( theta_th ** 2 ) manifold = Product ( ( Euclidean ( features , classes ) , Euclidean ( classes , 1 ) ) ) problem = Problem ( manifold = manifold , cost = f , arg = [ theta_th , bias_th ] , verbosity = 0 ) solver = ConjugateGradient ( mingradnorm = 1e-6 ) solution = solver . solve ( problem ) theta = solution [ 0 ] bias = solution [ 1 ] del constf2 del theta_th del bias_th del data_th del val_ del solver del solution return theta , bias
Update the classifier parameters theta and bias
15,660
def _compute_shared_response ( data , w ) : s = np . zeros ( ( w [ 0 ] . shape [ 1 ] , data [ 0 ] . shape [ 1 ] ) ) for m in range ( len ( w ) ) : s = s + w [ m ] . T . dot ( data [ m ] ) s /= len ( w ) return s
Compute the shared response S
15,661
def _objective_function ( self , data_align , data_sup , labels , w , s , theta , bias ) : subjects = len ( data_align ) f_val = 0.0 for subject in range ( subjects ) : samples = data_align [ subject ] . shape [ 1 ] f_val += ( 1 - self . alpha ) * ( 0.5 / samples ) * np . linalg . norm ( data_align [ subject ] - w [ subject ] . dot ( s ) , 'fro' ) ** 2 f_val += self . _loss_lr ( data_sup , labels , w , theta , bias ) return f_val
Compute the objective function of the Semi - Supervised SRM
15,662
def _objective_function_subject ( self , data_align , data_sup , labels , w , s , theta , bias ) : f_val = 0.0 samples = data_align . shape [ 1 ] f_val += ( 1 - self . alpha ) * ( 0.5 / samples ) * np . linalg . norm ( data_align - w . dot ( s ) , 'fro' ) ** 2 f_val += self . _loss_lr_subject ( data_sup , labels , w , theta , bias ) return f_val
Compute the objective function for one subject .
15,663
def _stack_list ( data , data_labels , w ) : labels_stacked = utils . concatenate_not_none ( data_labels ) weights = np . empty ( ( labels_stacked . size , ) ) data_shared = [ None ] * len ( data ) curr_samples = 0 for s in range ( len ( data ) ) : if data [ s ] is not None : subject_samples = data [ s ] . shape [ 1 ] curr_samples_end = curr_samples + subject_samples weights [ curr_samples : curr_samples_end ] = subject_samples data_shared [ s ] = w [ s ] . T . dot ( data [ s ] ) curr_samples += data [ s ] . shape [ 1 ] data_stacked = utils . concatenate_not_none ( data_shared , axis = 1 ) . T return data_stacked , labels_stacked , weights
Construct a numpy array by stacking arrays in a list
15,664
def _singlenode_searchlight ( l , msk , mysl_rad , bcast_var , extra_params ) : voxel_fn = extra_params [ 0 ] shape_mask = extra_params [ 1 ] min_active_voxels_proportion = extra_params [ 2 ] outmat = np . empty ( msk . shape , dtype = np . object ) [ mysl_rad : - mysl_rad , mysl_rad : - mysl_rad , mysl_rad : - mysl_rad ] for i in range ( 0 , outmat . shape [ 0 ] ) : for j in range ( 0 , outmat . shape [ 1 ] ) : for k in range ( 0 , outmat . shape [ 2 ] ) : if msk [ i + mysl_rad , j + mysl_rad , k + mysl_rad ] : searchlight_slice = np . s_ [ i : i + 2 * mysl_rad + 1 , j : j + 2 * mysl_rad + 1 , k : k + 2 * mysl_rad + 1 ] voxel_fn_mask = msk [ searchlight_slice ] * shape_mask if ( min_active_voxels_proportion == 0 or np . count_nonzero ( voxel_fn_mask ) / voxel_fn_mask . size > min_active_voxels_proportion ) : outmat [ i , j , k ] = voxel_fn ( [ ll [ searchlight_slice ] for ll in l ] , msk [ searchlight_slice ] * shape_mask , mysl_rad , bcast_var ) return outmat
Run searchlight function on block data in parallel .
15,665
def _get_ownership ( self , data ) : rank = self . comm . rank B = [ ( rank , idx ) for ( idx , c ) in enumerate ( data ) if c is not None ] C = self . comm . allreduce ( B ) ownership = [ None ] * len ( data ) for c in C : ownership [ c [ 1 ] ] = c [ 0 ] return ownership
Determine on which rank each subject currently resides
15,666
def _get_blocks ( self , mask ) : blocks = [ ] outerblk = self . max_blk_edge + 2 * self . sl_rad for i in range ( 0 , mask . shape [ 0 ] , self . max_blk_edge ) : for j in range ( 0 , mask . shape [ 1 ] , self . max_blk_edge ) : for k in range ( 0 , mask . shape [ 2 ] , self . max_blk_edge ) : block_shape = mask [ i : i + outerblk , j : j + outerblk , k : k + outerblk ] . shape if np . any ( mask [ i + self . sl_rad : i + block_shape [ 0 ] - self . sl_rad , j + self . sl_rad : j + block_shape [ 1 ] - self . sl_rad , k + self . sl_rad : k + block_shape [ 2 ] - self . sl_rad ] ) : blocks . append ( ( ( i , j , k ) , block_shape ) ) return blocks
Divide the volume into a set of blocks
15,667
def _get_block_data ( self , mat , block ) : ( pt , sz ) = block if len ( mat . shape ) == 3 : return mat [ pt [ 0 ] : pt [ 0 ] + sz [ 0 ] , pt [ 1 ] : pt [ 1 ] + sz [ 1 ] , pt [ 2 ] : pt [ 2 ] + sz [ 2 ] ] . copy ( ) elif len ( mat . shape ) == 4 : return mat [ pt [ 0 ] : pt [ 0 ] + sz [ 0 ] , pt [ 1 ] : pt [ 1 ] + sz [ 1 ] , pt [ 2 ] : pt [ 2 ] + sz [ 2 ] , : ] . copy ( )
Retrieve a block from a 3D or 4D volume
15,668
def _split_volume ( self , mat , blocks ) : return [ self . _get_block_data ( mat , block ) for block in blocks ]
Convert a volume into a list of block data
15,669
def _scatter_list ( self , data , owner ) : rank = self . comm . rank size = self . comm . size subject_submatrices = [ ] nblocks = self . comm . bcast ( len ( data ) if rank == owner else None , root = owner ) for idx in range ( 0 , nblocks , size ) : padded = None extra = max ( 0 , idx + size - nblocks ) if data is not None : padded = data [ idx : idx + size ] if extra > 0 : padded = padded + [ None ] * extra mytrans = self . comm . scatter ( padded , root = owner ) if mytrans is not None : subject_submatrices += [ mytrans ] return subject_submatrices
Distribute a list from one rank to other ranks in a cyclic manner
15,670
def distribute ( self , subjects , mask ) : if mask . ndim != 3 : raise ValueError ( 'mask should be a 3D array' ) for ( idx , subj ) in enumerate ( subjects ) : if subj is not None : if subj . ndim != 4 : raise ValueError ( 'subjects[{}] must be 4D' . format ( idx ) ) self . mask = mask rank = self . comm . rank ownership = self . _get_ownership ( subjects ) all_blocks = self . _get_blocks ( mask ) if rank == 0 else None all_blocks = self . comm . bcast ( all_blocks ) splitsubj = [ self . _split_volume ( s , all_blocks ) if s is not None else None for s in subjects ] submasks = self . _split_volume ( mask , all_blocks ) self . blocks = self . _scatter_list ( all_blocks , 0 ) self . submasks = self . _scatter_list ( submasks , 0 ) self . subproblems = [ self . _scatter_list ( s , ownership [ s_idx ] ) for ( s_idx , s ) in enumerate ( splitsubj ) ]
Distribute data to MPI ranks
15,671
def run_block_function ( self , block_fn , extra_block_fn_params = None , pool_size = None ) : rank = self . comm . rank results = [ ] usable_cpus = usable_cpu_count ( ) if pool_size is None : processes = usable_cpus else : processes = min ( pool_size , usable_cpus ) if processes > 1 : with Pool ( processes ) as pool : for idx , block in enumerate ( self . blocks ) : result = pool . apply_async ( block_fn , ( [ subproblem [ idx ] for subproblem in self . subproblems ] , self . submasks [ idx ] , self . sl_rad , self . bcast_var , extra_block_fn_params ) ) results . append ( ( block [ 0 ] , result ) ) local_outputs = [ ( result [ 0 ] , result [ 1 ] . get ( ) ) for result in results ] else : for idx , block in enumerate ( self . blocks ) : subprob_list = [ subproblem [ idx ] for subproblem in self . subproblems ] result = block_fn ( subprob_list , self . submasks [ idx ] , self . sl_rad , self . bcast_var , extra_block_fn_params ) results . append ( ( block [ 0 ] , result ) ) local_outputs = [ ( result [ 0 ] , result [ 1 ] ) for result in results ] global_outputs = self . comm . gather ( local_outputs ) outmat = np . empty ( self . mask . shape , dtype = np . object ) if rank == 0 : for go_rank in global_outputs : for ( pt , mat ) in go_rank : coords = np . s_ [ pt [ 0 ] + self . sl_rad : pt [ 0 ] + self . sl_rad + mat . shape [ 0 ] , pt [ 1 ] + self . sl_rad : pt [ 1 ] + self . sl_rad + mat . shape [ 1 ] , pt [ 2 ] + self . sl_rad : pt [ 2 ] + self . sl_rad + mat . shape [ 2 ] ] outmat [ coords ] = mat return outmat
Perform a function for each block in a volume .
15,672
def run_searchlight ( self , voxel_fn , pool_size = None ) : extra_block_fn_params = ( voxel_fn , self . shape , self . min_active_voxels_proportion ) block_fn_result = self . run_block_function ( _singlenode_searchlight , extra_block_fn_params , pool_size ) return block_fn_result
Perform a function at each voxel which is set to True in the user - provided mask . The mask passed to the searchlight function will be further masked by the user - provided searchlight shape .
15,673
def _normalize_for_correlation ( data , axis , return_nans = False ) : shape = data . shape data = zscore ( data , axis = axis , ddof = 0 ) if not return_nans : data = np . nan_to_num ( data ) data = data / math . sqrt ( shape [ axis ] ) return data
normalize the data before computing correlation
15,674
def compute_correlation ( matrix1 , matrix2 , return_nans = False ) : matrix1 = matrix1 . astype ( np . float32 ) matrix2 = matrix2 . astype ( np . float32 ) [ r1 , d1 ] = matrix1 . shape [ r2 , d2 ] = matrix2 . shape if d1 != d2 : raise ValueError ( 'Dimension discrepancy' ) matrix1 = _normalize_for_correlation ( matrix1 , 1 , return_nans = return_nans ) matrix2 = _normalize_for_correlation ( matrix2 , 1 , return_nans = return_nans ) corr_data = np . empty ( ( r1 , r2 ) , dtype = np . float32 , order = 'C' ) blas . compute_single_matrix_multiplication ( 'T' , 'N' , r2 , r1 , d1 , 1.0 , matrix2 , d2 , matrix1 , d1 , 0.0 , corr_data , r2 ) return corr_data
compute correlation between two sets of variables
15,675
def _zscore ( a ) : assert a . ndim > 1 , 'a must have more than one dimensions' zscore = scipy . stats . zscore ( a , axis = 0 ) zscore [ : , np . logical_not ( np . all ( np . isfinite ( zscore ) , axis = 0 ) ) ] = 0 return zscore
Calculating z - score of data on the first axis . If the numbers in any column are all equal scipy . stats . zscore will return NaN for this column . We shall correct them all to be zeros .
15,676
def score ( self , X , design , scan_onsets = None ) : assert X . ndim == 2 and X . shape [ 1 ] == self . beta_ . shape [ 1 ] , 'The shape of X is not consistent with the shape of data ' 'used in the fitting step. They should have the same number ' 'of voxels' assert scan_onsets is None or ( scan_onsets . ndim == 1 and 0 in scan_onsets ) , 'scan_onsets should either be None or an array of indices ' 'If it is given, it should include at least 0' if scan_onsets is None : scan_onsets = np . array ( [ 0 ] , dtype = int ) else : scan_onsets = np . int32 ( scan_onsets ) ll = self . _score ( Y = X , design = design , beta = self . beta_ , scan_onsets = scan_onsets , beta0 = self . beta0_ , rho_e = self . rho_ , sigma_e = self . sigma_ , rho_X0 = self . _rho_X0_ , sigma2_X0 = self . _sigma2_X0_ ) ll_null = self . _score ( Y = X , design = None , beta = None , scan_onsets = scan_onsets , beta0 = self . beta0_ , rho_e = self . rho_ , sigma_e = self . sigma_ , rho_X0 = self . _rho_X0_ , sigma2_X0 = self . _sigma2_X0_ ) return ll , ll_null
Use the model and parameters estimated by fit function from some data of a participant to evaluate the log likelihood of some new data of the same participant . Design matrix of the same set of experimental conditions in the testing data should be provided with each column corresponding to the same condition as that column in the design matrix of the training data . Unknown nuisance time series will be marginalized assuming they follow the same spatial pattern as in the training data . The hypothetical response captured by the design matrix will be subtracted from data before the marginalization when evaluating the log likelihood . For null model nothing will be subtracted before marginalization .
15,677
def _prepare_data_XY ( self , X , Y , D , F ) : XTY , XTDY , XTFY = self . _make_templates ( D , F , X , Y ) YTY_diag = np . sum ( Y * Y , axis = 0 ) YTDY_diag = np . sum ( Y * np . dot ( D , Y ) , axis = 0 ) YTFY_diag = np . sum ( Y * np . dot ( F , Y ) , axis = 0 ) XTX , XTDX , XTFX = self . _make_templates ( D , F , X , X ) return XTY , XTDY , XTFY , YTY_diag , YTDY_diag , YTFY_diag , XTX , XTDX , XTFX
Prepares different forms of products of design matrix X and data Y or between themselves . These products are re - used a lot during fitting . So we pre - calculate them . Because these are reused it is in principle possible to update the fitting as new data come in by just incrementally adding the products of new data and their corresponding parts of design matrix to these pre - calculated terms .
15,678
def _prepare_data_XYX0 ( self , X , Y , X_base , X_res , D , F , run_TRs , no_DC = False ) : X_DC = self . _gen_X_DC ( run_TRs ) reg_sol = np . linalg . lstsq ( X_DC , X ) if np . any ( np . isclose ( reg_sol [ 1 ] , 0 ) ) : raise ValueError ( 'Your design matrix appears to have ' 'included baseline time series.' 'Either remove them, or move them to' ' nuisance regressors.' ) X_DC , X_base , idx_DC = self . _merge_DC_to_base ( X_DC , X_base , no_DC ) if X_res is None : X0 = X_base else : X0 = np . concatenate ( ( X_base , X_res ) , axis = 1 ) n_X0 = X0 . shape [ 1 ] X0TX0 , X0TDX0 , X0TFX0 = self . _make_templates ( D , F , X0 , X0 ) XTX0 , XTDX0 , XTFX0 = self . _make_templates ( D , F , X , X0 ) X0TY , X0TDY , X0TFY = self . _make_templates ( D , F , X0 , Y ) return X0TX0 , X0TDX0 , X0TFX0 , XTX0 , XTDX0 , XTFX0 , X0TY , X0TDY , X0TFY , X0 , X_base , n_X0 , idx_DC
Prepares different forms of products between design matrix X or data Y or nuisance regressors X0 . These products are re - used a lot during fitting . So we pre - calculate them . no_DC means not inserting regressors for DC components into nuisance regressor . It will only take effect if X_base is not None .
15,679
def _merge_DC_to_base ( self , X_DC , X_base , no_DC ) : if X_base is not None : reg_sol = np . linalg . lstsq ( X_DC , X_base ) if not no_DC : if not np . any ( np . isclose ( reg_sol [ 1 ] , 0 ) ) : X_base = np . concatenate ( ( X_DC , X_base ) , axis = 1 ) idx_DC = np . arange ( 0 , X_DC . shape [ 1 ] ) else : logger . warning ( 'Provided regressors for uninteresting ' 'time series already include baseline. ' 'No additional baseline is inserted.' ) idx_DC = np . where ( np . isclose ( reg_sol [ 1 ] , 0 ) ) [ 0 ] else : idx_DC = np . where ( np . isclose ( reg_sol [ 1 ] , 0 ) ) [ 0 ] else : X_base = X_DC idx_DC = np . arange ( 0 , X_base . shape [ 1 ] ) logger . info ( 'You did not provide time series of no interest ' 'such as DC component. Trivial regressors of' ' DC component are included for further modeling.' ' The final covariance matrix won' 't ' 'reflect these components.' ) return X_DC , X_base , idx_DC
Merge DC components X_DC to the baseline time series X_base ( By baseline this means any fixed nuisance regressors not updated during fitting including DC components and any nuisance regressors provided by the user . X_DC is always in the first few columns of X_base .
15,680
def _build_index_param ( self , n_l , n_V , n_smooth ) : idx_param_sing = { 'Cholesky' : np . arange ( n_l ) , 'a1' : n_l } idx_param_fitU = { 'Cholesky' : np . arange ( n_l ) , 'a1' : np . arange ( n_l , n_l + n_V ) } idx_param_fitV = { 'log_SNR2' : np . arange ( n_V - 1 ) , 'c_space' : n_V - 1 , 'c_inten' : n_V , 'c_both' : np . arange ( n_V - 1 , n_V - 1 + n_smooth ) } return idx_param_sing , idx_param_fitU , idx_param_fitV
Build dictionaries to retrieve each parameter from the combined parameters .
15,681
def _score ( self , Y , design , beta , scan_onsets , beta0 , rho_e , sigma_e , rho_X0 , sigma2_X0 ) : logger . info ( 'Estimating cross-validated score for new data.' ) n_T = Y . shape [ 0 ] if design is not None : Y = Y - np . dot ( design , beta ) T_X = np . diag ( rho_X0 ) Var_X = sigma2_X0 / ( 1 - rho_X0 ** 2 ) Var_dX = sigma2_X0 sigma2_e = sigma_e ** 2 scan_onsets = np . setdiff1d ( scan_onsets , n_T ) . astype ( int ) n_scan = scan_onsets . size total_log_p = 0 for scan , onset in enumerate ( scan_onsets ) : if scan == n_scan - 1 : offset = n_T else : offset = scan_onsets [ scan + 1 ] _ , _ , _ , log_p_data , _ , _ , _ , _ , _ = self . _forward_step ( Y [ onset : offset , : ] , T_X , Var_X , Var_dX , rho_e , sigma2_e , beta0 ) total_log_p += log_p_data return total_log_p
Given the data Y and the spatial pattern beta0 of nuisance time series return the cross - validated score of the data Y given all parameters of the subject estimated during the first step . It is assumed that the user has design matrix built for the data Y . Both beta and beta0 are posterior expectation estimated from training data with the estimated covariance matrix U and SNR serving as prior . We marginalize X0 instead of fitting it in this function because this function is for the purpose of evaluating model no new data . We should avoid doing any additional fitting when performing cross - validation . The hypothetic response to the task will be subtracted and the unknown nuisance activity which contributes to the data through beta0 will be marginalized .
15,682
def _backward_step ( self , deltaY , deltaY_sigma2inv_rho_weightT , sigma2_e , weight , mu , mu_Gamma_inv , Gamma_inv , Lambda_0 , Lambda_1 , H ) : n_T = len ( Gamma_inv ) Gamma_inv_hat = [ None ] * n_T mu_Gamma_inv_hat = [ None ] * n_T mu_hat = [ None ] * n_T mu_hat [ - 1 ] = mu [ - 1 ] . copy ( ) mu_Gamma_inv_hat [ - 1 ] = mu_Gamma_inv [ - 1 ] . copy ( ) Gamma_inv_hat [ - 1 ] = Gamma_inv [ - 1 ] . copy ( ) for t in np . arange ( n_T - 2 , - 1 , - 1 ) : tmp = np . linalg . solve ( Gamma_inv_hat [ t + 1 ] - Gamma_inv [ t + 1 ] + Lambda_1 , H ) Gamma_inv_hat [ t ] = Gamma_inv [ t ] + Lambda_0 - np . dot ( H . T , tmp ) mu_Gamma_inv_hat [ t ] = mu_Gamma_inv [ t ] - deltaY_sigma2inv_rho_weightT [ t , : ] + np . dot ( mu_Gamma_inv_hat [ t + 1 ] - mu_Gamma_inv [ t + 1 ] + np . dot ( deltaY [ t , : ] / sigma2_e , weight . T ) , tmp ) mu_hat [ t ] = np . linalg . solve ( Gamma_inv_hat [ t ] , mu_Gamma_inv_hat [ t ] ) return mu_hat , mu_Gamma_inv_hat , Gamma_inv_hat
backward step for HMM assuming both the hidden state and noise have 1 - step dependence on the previous value .
15,683
def _set_SNR_grids ( self ) : if self . SNR_prior == 'unif' : SNR_grids = np . linspace ( 0 , 1 , self . SNR_bins ) SNR_weights = np . ones ( self . SNR_bins ) / ( self . SNR_bins - 1 ) SNR_weights [ 0 ] = SNR_weights [ 0 ] / 2.0 SNR_weights [ - 1 ] = SNR_weights [ - 1 ] / 2.0 elif self . SNR_prior == 'lognorm' : dist = scipy . stats . lognorm alphas = np . arange ( np . mod ( self . SNR_bins , 2 ) , self . SNR_bins + 2 , 2 ) / self . SNR_bins bounds = dist . interval ( alphas , ( self . logS_range , ) ) bounds = np . unique ( bounds ) SNR_grids = np . zeros ( self . SNR_bins ) for i in np . arange ( self . SNR_bins ) : SNR_grids [ i ] = dist . expect ( lambda x : x , args = ( self . logS_range , ) , lb = bounds [ i ] , ub = bounds [ i + 1 ] ) * self . SNR_bins SNR_weights = np . ones ( self . SNR_bins ) / self . SNR_bins elif self . SNR_prior == 'exp' : SNR_grids = self . _bin_exp ( self . SNR_bins ) SNR_weights = np . ones ( self . SNR_bins ) / self . SNR_bins else : SNR_grids = np . ones ( 1 ) SNR_weights = np . ones ( 1 ) SNR_weights = SNR_weights / np . sum ( SNR_weights ) return SNR_grids , SNR_weights
Set the grids and weights for SNR used in numerical integration of SNR parameters .
15,684
def _matrix_flattened_grid ( self , X0TAX0 , X0TAX0_i , SNR_grids , XTAcorrX , YTAcorrY_diag , XTAcorrY , X0TAY , XTAX0 , n_C , n_V , n_X0 , n_grid ) : half_log_det_X0TAX0 = np . reshape ( np . repeat ( self . _half_log_det ( X0TAX0 ) [ None , : ] , self . SNR_bins , axis = 0 ) , n_grid ) X0TAX0 = np . reshape ( np . repeat ( X0TAX0 [ None , : , : , : ] , self . SNR_bins , axis = 0 ) , ( n_grid , n_X0 , n_X0 ) ) X0TAX0_i = np . reshape ( np . repeat ( X0TAX0_i [ None , : , : , : ] , self . SNR_bins , axis = 0 ) , ( n_grid , n_X0 , n_X0 ) ) s2XTAcorrX = np . reshape ( SNR_grids [ : , None , None , None ] ** 2 * XTAcorrX , ( n_grid , n_C , n_C ) ) YTAcorrY_diag = np . reshape ( np . repeat ( YTAcorrY_diag [ None , : , : ] , self . SNR_bins , axis = 0 ) , ( n_grid , n_V ) ) sXTAcorrY = np . reshape ( SNR_grids [ : , None , None , None ] * XTAcorrY , ( n_grid , n_C , n_V ) ) X0TAY = np . reshape ( np . repeat ( X0TAY [ None , : , : , : ] , self . SNR_bins , axis = 0 ) , ( n_grid , n_X0 , n_V ) ) XTAX0 = np . reshape ( np . repeat ( XTAX0 [ None , : , : , : ] , self . SNR_bins , axis = 0 ) , ( n_grid , n_C , n_X0 ) ) return half_log_det_X0TAX0 , X0TAX0 , X0TAX0_i , s2XTAcorrX , YTAcorrY_diag , sXTAcorrY , X0TAY , XTAX0
We need to integrate parameters SNR and rho on 2 - d discrete grids . This function generates matrices which have only one dimension for these two parameters with each slice in that dimension corresponding to each combination of the discrete grids of SNR and discrete grids of rho .
15,685
def fit ( self , X ) : logger . info ( 'Starting RSRM' ) if 0.0 >= self . lam : raise ValueError ( "Gamma parameter should be positive." ) if len ( X ) <= 1 : raise ValueError ( "There are not enough subjects in the input " "data to train the model." ) if X [ 0 ] . shape [ 1 ] < self . features : raise ValueError ( "There are not enough timepoints to train the model with " "{0:d} features." . format ( self . features ) ) number_trs = X [ 0 ] . shape [ 1 ] number_subjects = len ( X ) for subject in range ( number_subjects ) : assert_all_finite ( X [ subject ] ) if X [ subject ] . shape [ 1 ] != number_trs : raise ValueError ( "Different number of alignment timepoints " "between subjects." ) self . random_state_ = np . random . RandomState ( self . rand_seed ) self . w_ , self . r_ , self . s_ = self . _rsrm ( X ) return self
Compute the Robust Shared Response Model
15,686
def transform ( self , X ) : if hasattr ( self , 'w_' ) is False : raise NotFittedError ( "The model fit has not been run yet." ) if len ( X ) != len ( self . w_ ) : raise ValueError ( "The number of subjects does not match the one" " in the model." ) r = [ None ] * len ( X ) s = [ None ] * len ( X ) for subject in range ( len ( X ) ) : if X [ subject ] is not None : r [ subject ] , s [ subject ] = self . _transform_new_data ( X [ subject ] , subject ) return r , s
Use the model to transform new data to Shared Response space
15,687
def _transform_new_data ( self , X , subject ) : S = np . zeros_like ( X ) R = None for i in range ( self . n_iter ) : R = self . w_ [ subject ] . T . dot ( X - S ) S = self . _shrink ( X - self . w_ [ subject ] . dot ( R ) , self . lam ) return R , S
Transform new data for a subjects by projecting to the shared subspace and computing the individual information .
15,688
def transform_subject ( self , X ) : if hasattr ( self , 'w_' ) is False : raise NotFittedError ( "The model fit has not been run yet." ) if X . shape [ 1 ] != self . r_ . shape [ 1 ] : raise ValueError ( "The number of timepoints(TRs) does not match the" "one in the model." ) s = np . zeros_like ( X ) for i in range ( self . n_iter ) : w = self . _update_transform_subject ( X , s , self . r_ ) s = self . _shrink ( X - w . dot ( self . r_ ) , self . lam ) return w , s
Transform a new subject using the existing model
15,689
def _rsrm ( self , X ) : subjs = len ( X ) voxels = [ X [ i ] . shape [ 0 ] for i in range ( subjs ) ] TRs = X [ 0 ] . shape [ 1 ] features = self . features W = self . _init_transforms ( subjs , voxels , features , self . random_state_ ) S = self . _init_individual ( subjs , voxels , TRs ) R = self . _update_shared_response ( X , S , W , features ) if logger . isEnabledFor ( logging . INFO ) : objective = self . _objective_function ( X , W , R , S , self . lam ) logger . info ( 'Objective function %f' % objective ) for i in range ( self . n_iter ) : W = self . _update_transforms ( X , S , R ) S = self . _update_individual ( X , W , R , self . lam ) R = self . _update_shared_response ( X , S , W , features ) if logger . isEnabledFor ( logging . INFO ) : objective = self . _objective_function ( X , W , R , S , self . lam ) logger . info ( 'Objective function %f' % objective ) return W , R , S
Block - Coordinate Descent algorithm for fitting RSRM .
15,690
def _objective_function ( X , W , R , S , gamma ) : subjs = len ( X ) func = .0 for i in range ( subjs ) : func += 0.5 * np . sum ( ( X [ i ] - W [ i ] . dot ( R ) - S [ i ] ) ** 2 ) + gamma * np . sum ( np . abs ( S [ i ] ) ) return func
Evaluate the objective function .
15,691
def _update_individual ( X , W , R , gamma ) : subjs = len ( X ) S = [ ] for i in range ( subjs ) : S . append ( RSRM . _shrink ( X [ i ] - W [ i ] . dot ( R ) , gamma ) ) return S
Update the individual components S_i .
15,692
def _update_shared_response ( X , S , W , features ) : subjs = len ( X ) TRs = X [ 0 ] . shape [ 1 ] R = np . zeros ( ( features , TRs ) ) for i in range ( subjs ) : R += W [ i ] . T . dot ( X [ i ] - S [ i ] ) R /= subjs return R
Update the shared response R .
15,693
def _update_transforms ( X , S , R ) : subjs = len ( X ) W = [ ] for i in range ( subjs ) : W . append ( RSRM . _update_transform_subject ( X [ i ] , S [ i ] , R ) ) return W
Updates the mappings W_i for each subject .
15,694
def _shrink ( v , gamma ) : pos = v > gamma neg = v < - gamma v [ pos ] -= gamma v [ neg ] += gamma v [ np . logical_and ( ~ pos , ~ neg ) ] = .0 return v
Soft - shrinkage of an array with parameter gamma .
15,695
def plot_confusion_matrix ( cm , title = "Confusion Matrix" ) : import matplotlib . pyplot as plt import math plt . figure ( ) subjects = len ( cm ) root_subjects = math . sqrt ( subjects ) cols = math . ceil ( root_subjects ) rows = math . ceil ( subjects / cols ) classes = cm [ 0 ] . shape [ 0 ] for subject in range ( subjects ) : plt . subplot ( rows , cols , subject + 1 ) plt . imshow ( cm [ subject ] , interpolation = 'nearest' , cmap = plt . cm . bone ) plt . xticks ( np . arange ( classes ) , range ( 1 , classes + 1 ) ) plt . yticks ( np . arange ( classes ) , range ( 1 , classes + 1 ) ) cbar = plt . colorbar ( ticks = [ 0.0 , 1.0 ] , shrink = 0.6 ) cbar . set_clim ( 0.0 , 1.0 ) plt . xlabel ( "Predicted" ) plt . ylabel ( "True label" ) plt . title ( "{0:d}" . format ( subject + 1 ) ) plt . suptitle ( title ) plt . tight_layout ( ) plt . show ( )
Plots a confusion matrix for each subject
15,696
def mask_image ( image : SpatialImage , mask : np . ndarray , data_type : type = None ) -> np . ndarray : image_data = image . get_data ( ) if image_data . shape [ : 3 ] != mask . shape : raise ValueError ( "Image data and mask have different shapes." ) if data_type is not None : cast_data = image_data . astype ( data_type ) else : cast_data = image_data return cast_data [ mask ]
Mask image after optionally casting its type .
15,697
def multimask_images ( images : Iterable [ SpatialImage ] , masks : Sequence [ np . ndarray ] , image_type : type = None ) -> Iterable [ Sequence [ np . ndarray ] ] : for image in images : yield [ mask_image ( image , mask , image_type ) for mask in masks ]
Mask images with multiple masks .
15,698
def mask_images ( images : Iterable [ SpatialImage ] , mask : np . ndarray , image_type : type = None ) -> Iterable [ np . ndarray ] : for images in multimask_images ( images , ( mask , ) , image_type ) : yield images [ 0 ]
Mask images .
15,699
def from_masked_images ( cls : Type [ T ] , masked_images : Iterable [ np . ndarray ] , n_subjects : int ) -> T : images_iterator = iter ( masked_images ) first_image = next ( images_iterator ) first_image_shape = first_image . T . shape result = np . empty ( ( first_image_shape [ 0 ] , first_image_shape [ 1 ] , n_subjects ) ) for n_images , image in enumerate ( itertools . chain ( [ first_image ] , images_iterator ) ) : image = image . T if image . shape != first_image_shape : raise ValueError ( "Image {} has different shape from first " "image: {} != {}" . format ( n_images , image . shape , first_image_shape ) ) result [ : , : , n_images ] = image n_images += 1 if n_images != n_subjects : raise ValueError ( "n_subjects != number of images: {} != {}" . format ( n_subjects , n_images ) ) return result . view ( cls )
Create a new instance of MaskedMultiSubjecData from masked images .