repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
brainiak/brainiak | brainiak/factoranalysis/tfa.py | TFA.get_factors | def get_factors(self, unique_R, inds, centers, widths):
"""Calculate factors based on centers and widths
Parameters
----------
unique_R : a list of array,
Each element contains unique value in one dimension of
scanner coordinate matrix R.
inds : a list of array,
Each element contains the indices to reconstruct one
dimension of original cooridnate matrix from the unique
array.
centers : 2D array, with shape [K, n_dim]
The centers of factors.
widths : 1D array, with shape [K, 1]
The widths of factors.
Returns
-------
F : 2D array, with shape [n_voxel,self.K]
The latent factors from fMRI data.
"""
F = np.zeros((len(inds[0]), self.K))
tfa_extension.factor(
F,
centers,
widths,
unique_R[0],
unique_R[1],
unique_R[2],
inds[0],
inds[1],
inds[2])
return F | python | def get_factors(self, unique_R, inds, centers, widths):
"""Calculate factors based on centers and widths
Parameters
----------
unique_R : a list of array,
Each element contains unique value in one dimension of
scanner coordinate matrix R.
inds : a list of array,
Each element contains the indices to reconstruct one
dimension of original cooridnate matrix from the unique
array.
centers : 2D array, with shape [K, n_dim]
The centers of factors.
widths : 1D array, with shape [K, 1]
The widths of factors.
Returns
-------
F : 2D array, with shape [n_voxel,self.K]
The latent factors from fMRI data.
"""
F = np.zeros((len(inds[0]), self.K))
tfa_extension.factor(
F,
centers,
widths,
unique_R[0],
unique_R[1],
unique_R[2],
inds[0],
inds[1],
inds[2])
return F | [
"def",
"get_factors",
"(",
"self",
",",
"unique_R",
",",
"inds",
",",
"centers",
",",
"widths",
")",
":",
"F",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"inds",
"[",
"0",
"]",
")",
",",
"self",
".",
"K",
")",
")",
"tfa_extension",
".",
"fa... | Calculate factors based on centers and widths
Parameters
----------
unique_R : a list of array,
Each element contains unique value in one dimension of
scanner coordinate matrix R.
inds : a list of array,
Each element contains the indices to reconstruct one
dimension of original cooridnate matrix from the unique
array.
centers : 2D array, with shape [K, n_dim]
The centers of factors.
widths : 1D array, with shape [K, 1]
The widths of factors.
Returns
-------
F : 2D array, with shape [n_voxel,self.K]
The latent factors from fMRI data. | [
"Calculate",
"factors",
"based",
"on",
"centers",
"and",
"widths"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/tfa.py#L525-L567 | train | 204,500 |
brainiak/brainiak | brainiak/factoranalysis/tfa.py | TFA.get_weights | def get_weights(self, data, F):
"""Calculate weight matrix based on fMRI data and factors
Parameters
----------
data : 2D array, with shape [n_voxel, n_tr]
fMRI data from one subject
F : 2D array, with shape [n_voxel,self.K]
The latent factors from fMRI data.
Returns
-------
W : 2D array, with shape [K, n_tr]
The weight matrix from fMRI data.
"""
beta = np.var(data)
trans_F = F.T.copy()
W = np.zeros((self.K, data.shape[1]))
if self.weight_method == 'rr':
W = np.linalg.solve(trans_F.dot(F) + beta * np.identity(self.K),
trans_F.dot(data))
else:
W = np.linalg.solve(trans_F.dot(F), trans_F.dot(data))
return W | python | def get_weights(self, data, F):
"""Calculate weight matrix based on fMRI data and factors
Parameters
----------
data : 2D array, with shape [n_voxel, n_tr]
fMRI data from one subject
F : 2D array, with shape [n_voxel,self.K]
The latent factors from fMRI data.
Returns
-------
W : 2D array, with shape [K, n_tr]
The weight matrix from fMRI data.
"""
beta = np.var(data)
trans_F = F.T.copy()
W = np.zeros((self.K, data.shape[1]))
if self.weight_method == 'rr':
W = np.linalg.solve(trans_F.dot(F) + beta * np.identity(self.K),
trans_F.dot(data))
else:
W = np.linalg.solve(trans_F.dot(F), trans_F.dot(data))
return W | [
"def",
"get_weights",
"(",
"self",
",",
"data",
",",
"F",
")",
":",
"beta",
"=",
"np",
".",
"var",
"(",
"data",
")",
"trans_F",
"=",
"F",
".",
"T",
".",
"copy",
"(",
")",
"W",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"K",
",",
"data"... | Calculate weight matrix based on fMRI data and factors
Parameters
----------
data : 2D array, with shape [n_voxel, n_tr]
fMRI data from one subject
F : 2D array, with shape [n_voxel,self.K]
The latent factors from fMRI data.
Returns
-------
W : 2D array, with shape [K, n_tr]
The weight matrix from fMRI data. | [
"Calculate",
"weight",
"matrix",
"based",
"on",
"fMRI",
"data",
"and",
"factors"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/tfa.py#L569-L598 | train | 204,501 |
brainiak/brainiak | brainiak/factoranalysis/tfa.py | TFA._get_max_sigma | def _get_max_sigma(self, R):
"""Calculate maximum sigma of scanner RAS coordinates
Parameters
----------
R : 2D array, with shape [n_voxel, n_dim]
The coordinate matrix of fMRI data from one subject
Returns
-------
max_sigma : float
The maximum sigma of scanner coordinates.
"""
max_sigma = 2.0 * math.pow(np.nanmax(np.std(R, axis=0)), 2)
return max_sigma | python | def _get_max_sigma(self, R):
"""Calculate maximum sigma of scanner RAS coordinates
Parameters
----------
R : 2D array, with shape [n_voxel, n_dim]
The coordinate matrix of fMRI data from one subject
Returns
-------
max_sigma : float
The maximum sigma of scanner coordinates.
"""
max_sigma = 2.0 * math.pow(np.nanmax(np.std(R, axis=0)), 2)
return max_sigma | [
"def",
"_get_max_sigma",
"(",
"self",
",",
"R",
")",
":",
"max_sigma",
"=",
"2.0",
"*",
"math",
".",
"pow",
"(",
"np",
".",
"nanmax",
"(",
"np",
".",
"std",
"(",
"R",
",",
"axis",
"=",
"0",
")",
")",
",",
"2",
")",
"return",
"max_sigma"
] | Calculate maximum sigma of scanner RAS coordinates
Parameters
----------
R : 2D array, with shape [n_voxel, n_dim]
The coordinate matrix of fMRI data from one subject
Returns
-------
max_sigma : float
The maximum sigma of scanner coordinates. | [
"Calculate",
"maximum",
"sigma",
"of",
"scanner",
"RAS",
"coordinates"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/tfa.py#L600-L618 | train | 204,502 |
brainiak/brainiak | brainiak/factoranalysis/tfa.py | TFA.get_bounds | def get_bounds(self, R):
"""Calculate lower and upper bounds for centers and widths
Parameters
----------
R : 2D array, with shape [n_voxel, n_dim]
The coordinate matrix of fMRI data from one subject
Returns
-------
bounds : 2-tuple of array_like, default: None
The lower and upper bounds on factor's centers and widths.
"""
max_sigma = self._get_max_sigma(R)
final_lower = np.zeros(self.K * (self.n_dim + 1))
final_lower[0:self.K * self.n_dim] =\
np.tile(np.nanmin(R, axis=0), self.K)
final_lower[self.K * self.n_dim:] =\
np.repeat(self.lower_ratio * max_sigma, self.K)
final_upper = np.zeros(self.K * (self.n_dim + 1))
final_upper[0:self.K * self.n_dim] =\
np.tile(np.nanmax(R, axis=0), self.K)
final_upper[self.K * self.n_dim:] =\
np.repeat(self.upper_ratio * max_sigma, self.K)
bounds = (final_lower, final_upper)
return bounds | python | def get_bounds(self, R):
"""Calculate lower and upper bounds for centers and widths
Parameters
----------
R : 2D array, with shape [n_voxel, n_dim]
The coordinate matrix of fMRI data from one subject
Returns
-------
bounds : 2-tuple of array_like, default: None
The lower and upper bounds on factor's centers and widths.
"""
max_sigma = self._get_max_sigma(R)
final_lower = np.zeros(self.K * (self.n_dim + 1))
final_lower[0:self.K * self.n_dim] =\
np.tile(np.nanmin(R, axis=0), self.K)
final_lower[self.K * self.n_dim:] =\
np.repeat(self.lower_ratio * max_sigma, self.K)
final_upper = np.zeros(self.K * (self.n_dim + 1))
final_upper[0:self.K * self.n_dim] =\
np.tile(np.nanmax(R, axis=0), self.K)
final_upper[self.K * self.n_dim:] =\
np.repeat(self.upper_ratio * max_sigma, self.K)
bounds = (final_lower, final_upper)
return bounds | [
"def",
"get_bounds",
"(",
"self",
",",
"R",
")",
":",
"max_sigma",
"=",
"self",
".",
"_get_max_sigma",
"(",
"R",
")",
"final_lower",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"K",
"*",
"(",
"self",
".",
"n_dim",
"+",
"1",
")",
")",
"final_lower",
... | Calculate lower and upper bounds for centers and widths
Parameters
----------
R : 2D array, with shape [n_voxel, n_dim]
The coordinate matrix of fMRI data from one subject
Returns
-------
bounds : 2-tuple of array_like, default: None
The lower and upper bounds on factor's centers and widths. | [
"Calculate",
"lower",
"and",
"upper",
"bounds",
"for",
"centers",
"and",
"widths"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/tfa.py#L620-L650 | train | 204,503 |
brainiak/brainiak | brainiak/factoranalysis/tfa.py | TFA._residual_multivariate | def _residual_multivariate(
self,
estimate,
unique_R,
inds,
X,
W,
template_centers,
template_centers_mean_cov,
template_widths,
template_widths_mean_var_reci,
data_sigma):
"""Residual function for estimating centers and widths
Parameters
----------
estimate : 1D array
Initial estimation on centers
unique_R : a list of array,
Each element contains unique value in one dimension of
coordinate matrix R.
inds : a list of array,
Each element contains the indices to reconstruct one
dimension of original cooridnate matrix from the unique
array.
X : 2D array, with shape [n_voxel, n_tr]
fMRI data from one subject.
W : 2D array, with shape [K, n_tr]
The weight matrix.
template_centers: 2D array, with shape [K, n_dim]
The template prior on centers
template_centers_mean_cov: 2D array, with shape [K, cov_size]
The template prior on covariance of centers' mean
template_widths: 1D array
The template prior on widths
template_widths_mean_var_reci: 1D array
The reciprocal of template prior on variance of widths' mean
data_sigma: float
The variance of X.
Returns
-------
final_err : 1D array
The residual function for estimating centers.
"""
centers = self.get_centers(estimate)
widths = self.get_widths(estimate)
recon = X.size
other_err = 0 if template_centers is None else (2 * self.K)
final_err = np.zeros(recon + other_err)
F = self.get_factors(unique_R, inds, centers, widths)
sigma = np.zeros((1,))
sigma[0] = data_sigma
tfa_extension.recon(final_err[0:recon], X, F, W, sigma)
if other_err > 0:
# center error
for k in np.arange(self.K):
diff = (centers[k] - template_centers[k])
cov = from_tri_2_sym(template_centers_mean_cov[k], self.n_dim)
final_err[recon + k] = math.sqrt(
self.sample_scaling *
diff.dot(np.linalg.solve(cov, diff.T)))
# width error
base = recon + self.K
dist = template_widths_mean_var_reci *\
(widths - template_widths) ** 2
final_err[base:] = np.sqrt(self.sample_scaling * dist).ravel()
return final_err | python | def _residual_multivariate(
self,
estimate,
unique_R,
inds,
X,
W,
template_centers,
template_centers_mean_cov,
template_widths,
template_widths_mean_var_reci,
data_sigma):
"""Residual function for estimating centers and widths
Parameters
----------
estimate : 1D array
Initial estimation on centers
unique_R : a list of array,
Each element contains unique value in one dimension of
coordinate matrix R.
inds : a list of array,
Each element contains the indices to reconstruct one
dimension of original cooridnate matrix from the unique
array.
X : 2D array, with shape [n_voxel, n_tr]
fMRI data from one subject.
W : 2D array, with shape [K, n_tr]
The weight matrix.
template_centers: 2D array, with shape [K, n_dim]
The template prior on centers
template_centers_mean_cov: 2D array, with shape [K, cov_size]
The template prior on covariance of centers' mean
template_widths: 1D array
The template prior on widths
template_widths_mean_var_reci: 1D array
The reciprocal of template prior on variance of widths' mean
data_sigma: float
The variance of X.
Returns
-------
final_err : 1D array
The residual function for estimating centers.
"""
centers = self.get_centers(estimate)
widths = self.get_widths(estimate)
recon = X.size
other_err = 0 if template_centers is None else (2 * self.K)
final_err = np.zeros(recon + other_err)
F = self.get_factors(unique_R, inds, centers, widths)
sigma = np.zeros((1,))
sigma[0] = data_sigma
tfa_extension.recon(final_err[0:recon], X, F, W, sigma)
if other_err > 0:
# center error
for k in np.arange(self.K):
diff = (centers[k] - template_centers[k])
cov = from_tri_2_sym(template_centers_mean_cov[k], self.n_dim)
final_err[recon + k] = math.sqrt(
self.sample_scaling *
diff.dot(np.linalg.solve(cov, diff.T)))
# width error
base = recon + self.K
dist = template_widths_mean_var_reci *\
(widths - template_widths) ** 2
final_err[base:] = np.sqrt(self.sample_scaling * dist).ravel()
return final_err | [
"def",
"_residual_multivariate",
"(",
"self",
",",
"estimate",
",",
"unique_R",
",",
"inds",
",",
"X",
",",
"W",
",",
"template_centers",
",",
"template_centers_mean_cov",
",",
"template_widths",
",",
"template_widths_mean_var_reci",
",",
"data_sigma",
")",
":",
"... | Residual function for estimating centers and widths
Parameters
----------
estimate : 1D array
Initial estimation on centers
unique_R : a list of array,
Each element contains unique value in one dimension of
coordinate matrix R.
inds : a list of array,
Each element contains the indices to reconstruct one
dimension of original cooridnate matrix from the unique
array.
X : 2D array, with shape [n_voxel, n_tr]
fMRI data from one subject.
W : 2D array, with shape [K, n_tr]
The weight matrix.
template_centers: 2D array, with shape [K, n_dim]
The template prior on centers
template_centers_mean_cov: 2D array, with shape [K, cov_size]
The template prior on covariance of centers' mean
template_widths: 1D array
The template prior on widths
template_widths_mean_var_reci: 1D array
The reciprocal of template prior on variance of widths' mean
data_sigma: float
The variance of X.
Returns
-------
final_err : 1D array
The residual function for estimating centers. | [
"Residual",
"function",
"for",
"estimating",
"centers",
"and",
"widths"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/tfa.py#L652-L736 | train | 204,504 |
brainiak/brainiak | brainiak/factoranalysis/tfa.py | TFA._estimate_centers_widths | def _estimate_centers_widths(
self,
unique_R,
inds,
X,
W,
init_centers,
init_widths,
template_centers,
template_widths,
template_centers_mean_cov,
template_widths_mean_var_reci):
"""Estimate centers and widths
Parameters
----------
unique_R : a list of array,
Each element contains unique value in one dimension of
coordinate matrix R.
inds : a list of array,
Each element contains the indices to reconstruct one
dimension of original cooridnate matrix from the unique
array.
X : 2D array, with shape [n_voxel, n_tr]
fMRI data from one subject.
W : 2D array, with shape [K, n_tr]
The weight matrix.
init_centers : 2D array, with shape [K, n_dim]
The initial values of centers.
init_widths : 1D array
The initial values of widths.
template_centers: 1D array
The template prior on centers
template_widths: 1D array
The template prior on widths
template_centers_mean_cov: 2D array, with shape [K, cov_size]
The template prior on centers' mean
template_widths_mean_var_reci: 1D array
The reciprocal of template prior on variance of widths' mean
Returns
-------
final_estimate.x: 1D array
The newly estimated centers and widths.
final_estimate.cost: float
The cost value.
"""
# least_squares only accept x in 1D format
init_estimate = np.hstack(
(init_centers.ravel(), init_widths.ravel())) # .copy()
data_sigma = 1.0 / math.sqrt(2.0) * np.std(X)
final_estimate = least_squares(
self._residual_multivariate,
init_estimate,
args=(
unique_R,
inds,
X,
W,
template_centers,
template_widths,
template_centers_mean_cov,
template_widths_mean_var_reci,
data_sigma),
method=self.nlss_method,
loss=self.nlss_loss,
bounds=self.bounds,
verbose=0,
x_scale=self.x_scale,
tr_solver=self.tr_solver)
return final_estimate.x, final_estimate.cost | python | def _estimate_centers_widths(
self,
unique_R,
inds,
X,
W,
init_centers,
init_widths,
template_centers,
template_widths,
template_centers_mean_cov,
template_widths_mean_var_reci):
"""Estimate centers and widths
Parameters
----------
unique_R : a list of array,
Each element contains unique value in one dimension of
coordinate matrix R.
inds : a list of array,
Each element contains the indices to reconstruct one
dimension of original cooridnate matrix from the unique
array.
X : 2D array, with shape [n_voxel, n_tr]
fMRI data from one subject.
W : 2D array, with shape [K, n_tr]
The weight matrix.
init_centers : 2D array, with shape [K, n_dim]
The initial values of centers.
init_widths : 1D array
The initial values of widths.
template_centers: 1D array
The template prior on centers
template_widths: 1D array
The template prior on widths
template_centers_mean_cov: 2D array, with shape [K, cov_size]
The template prior on centers' mean
template_widths_mean_var_reci: 1D array
The reciprocal of template prior on variance of widths' mean
Returns
-------
final_estimate.x: 1D array
The newly estimated centers and widths.
final_estimate.cost: float
The cost value.
"""
# least_squares only accept x in 1D format
init_estimate = np.hstack(
(init_centers.ravel(), init_widths.ravel())) # .copy()
data_sigma = 1.0 / math.sqrt(2.0) * np.std(X)
final_estimate = least_squares(
self._residual_multivariate,
init_estimate,
args=(
unique_R,
inds,
X,
W,
template_centers,
template_widths,
template_centers_mean_cov,
template_widths_mean_var_reci,
data_sigma),
method=self.nlss_method,
loss=self.nlss_loss,
bounds=self.bounds,
verbose=0,
x_scale=self.x_scale,
tr_solver=self.tr_solver)
return final_estimate.x, final_estimate.cost | [
"def",
"_estimate_centers_widths",
"(",
"self",
",",
"unique_R",
",",
"inds",
",",
"X",
",",
"W",
",",
"init_centers",
",",
"init_widths",
",",
"template_centers",
",",
"template_widths",
",",
"template_centers_mean_cov",
",",
"template_widths_mean_var_reci",
")",
"... | Estimate centers and widths
Parameters
----------
unique_R : a list of array,
Each element contains unique value in one dimension of
coordinate matrix R.
inds : a list of array,
Each element contains the indices to reconstruct one
dimension of original cooridnate matrix from the unique
array.
X : 2D array, with shape [n_voxel, n_tr]
fMRI data from one subject.
W : 2D array, with shape [K, n_tr]
The weight matrix.
init_centers : 2D array, with shape [K, n_dim]
The initial values of centers.
init_widths : 1D array
The initial values of widths.
template_centers: 1D array
The template prior on centers
template_widths: 1D array
The template prior on widths
template_centers_mean_cov: 2D array, with shape [K, cov_size]
The template prior on centers' mean
template_widths_mean_var_reci: 1D array
The reciprocal of template prior on variance of widths' mean
Returns
-------
final_estimate.x: 1D array
The newly estimated centers and widths.
final_estimate.cost: float
The cost value. | [
"Estimate",
"centers",
"and",
"widths"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/tfa.py#L738-L822 | train | 204,505 |
brainiak/brainiak | brainiak/factoranalysis/tfa.py | TFA._fit_tfa | def _fit_tfa(self, data, R, template_prior=None):
"""TFA main algorithm
Parameters
----------
data: 2D array, in shape [n_voxel, n_tr]
The fMRI data from one subject.
R : 2D array, in shape [n_voxel, n_dim]
The voxel coordinate matrix of fMRI data
template_prior : 1D array,
The template prior on centers and widths.
Returns
-------
TFA
Returns the instance itself.
"""
if template_prior is None:
template_centers = None
template_widths = None
template_centers_mean_cov = None
template_widths_mean_var_reci = None
else:
template_centers = self.get_centers(template_prior)
template_widths = self.get_widths(template_prior)
template_centers_mean_cov =\
self.get_centers_mean_cov(template_prior)
template_widths_mean_var_reci = 1.0 /\
self.get_widths_mean_var(template_prior)
inner_converged = False
np.random.seed(self.seed)
n = 0
while n < self.miter and not inner_converged:
self._fit_tfa_inner(
data,
R,
template_centers,
template_widths,
template_centers_mean_cov,
template_widths_mean_var_reci)
self._assign_posterior()
inner_converged, _ = self._converged()
if not inner_converged:
self.local_prior = self.local_posterior_
else:
logger.info("TFA converged at %d iteration." % (n))
n += 1
gc.collect()
return self | python | def _fit_tfa(self, data, R, template_prior=None):
"""TFA main algorithm
Parameters
----------
data: 2D array, in shape [n_voxel, n_tr]
The fMRI data from one subject.
R : 2D array, in shape [n_voxel, n_dim]
The voxel coordinate matrix of fMRI data
template_prior : 1D array,
The template prior on centers and widths.
Returns
-------
TFA
Returns the instance itself.
"""
if template_prior is None:
template_centers = None
template_widths = None
template_centers_mean_cov = None
template_widths_mean_var_reci = None
else:
template_centers = self.get_centers(template_prior)
template_widths = self.get_widths(template_prior)
template_centers_mean_cov =\
self.get_centers_mean_cov(template_prior)
template_widths_mean_var_reci = 1.0 /\
self.get_widths_mean_var(template_prior)
inner_converged = False
np.random.seed(self.seed)
n = 0
while n < self.miter and not inner_converged:
self._fit_tfa_inner(
data,
R,
template_centers,
template_widths,
template_centers_mean_cov,
template_widths_mean_var_reci)
self._assign_posterior()
inner_converged, _ = self._converged()
if not inner_converged:
self.local_prior = self.local_posterior_
else:
logger.info("TFA converged at %d iteration." % (n))
n += 1
gc.collect()
return self | [
"def",
"_fit_tfa",
"(",
"self",
",",
"data",
",",
"R",
",",
"template_prior",
"=",
"None",
")",
":",
"if",
"template_prior",
"is",
"None",
":",
"template_centers",
"=",
"None",
"template_widths",
"=",
"None",
"template_centers_mean_cov",
"=",
"None",
"template... | TFA main algorithm
Parameters
----------
data: 2D array, in shape [n_voxel, n_tr]
The fMRI data from one subject.
R : 2D array, in shape [n_voxel, n_dim]
The voxel coordinate matrix of fMRI data
template_prior : 1D array,
The template prior on centers and widths.
Returns
-------
TFA
Returns the instance itself. | [
"TFA",
"main",
"algorithm"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/tfa.py#L824-L877 | train | 204,506 |
brainiak/brainiak | brainiak/factoranalysis/tfa.py | TFA.get_unique_R | def get_unique_R(self, R):
"""Get unique vlaues from coordinate matrix
Parameters
----------
R : 2D array
The coordinate matrix of a subject's fMRI data
Return
------
unique_R : a list of array,
Each element contains unique value in one dimension of
coordinate matrix R.
inds : a list of array,
Each element contains the indices to reconstruct one
dimension of original cooridnate matrix from the unique
array.
"""
unique_R = []
inds = []
for d in np.arange(self.n_dim):
tmp_unique, tmp_inds = np.unique(R[:, d], return_inverse=True)
unique_R.append(tmp_unique)
inds.append(tmp_inds)
return unique_R, inds | python | def get_unique_R(self, R):
"""Get unique vlaues from coordinate matrix
Parameters
----------
R : 2D array
The coordinate matrix of a subject's fMRI data
Return
------
unique_R : a list of array,
Each element contains unique value in one dimension of
coordinate matrix R.
inds : a list of array,
Each element contains the indices to reconstruct one
dimension of original cooridnate matrix from the unique
array.
"""
unique_R = []
inds = []
for d in np.arange(self.n_dim):
tmp_unique, tmp_inds = np.unique(R[:, d], return_inverse=True)
unique_R.append(tmp_unique)
inds.append(tmp_inds)
return unique_R, inds | [
"def",
"get_unique_R",
"(",
"self",
",",
"R",
")",
":",
"unique_R",
"=",
"[",
"]",
"inds",
"=",
"[",
"]",
"for",
"d",
"in",
"np",
".",
"arange",
"(",
"self",
".",
"n_dim",
")",
":",
"tmp_unique",
",",
"tmp_inds",
"=",
"np",
".",
"unique",
"(",
... | Get unique vlaues from coordinate matrix
Parameters
----------
R : 2D array
The coordinate matrix of a subject's fMRI data
Return
------
unique_R : a list of array,
Each element contains unique value in one dimension of
coordinate matrix R.
inds : a list of array,
Each element contains the indices to reconstruct one
dimension of original cooridnate matrix from the unique
array. | [
"Get",
"unique",
"vlaues",
"from",
"coordinate",
"matrix"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/tfa.py#L879-L906 | train | 204,507 |
brainiak/brainiak | brainiak/factoranalysis/tfa.py | TFA._fit_tfa_inner | def _fit_tfa_inner(
self,
data,
R,
template_centers,
template_widths,
template_centers_mean_cov,
template_widths_mean_var_reci):
"""Fit TFA model, the inner loop part
Parameters
----------
data: 2D array, in shape [n_voxel, n_tr]
The fMRI data of a subject
R : 2D array, in shape [n_voxel, n_dim]
The voxel coordinate matrix of fMRI data
template_centers: 1D array
The template prior on centers
template_widths: 1D array
The template prior on widths
template_centers_mean_cov: 2D array, with shape [K, cov_size]
The template prior on covariance of centers' mean
template_widths_mean_var_reci: 1D array
The reciprocal of template prior on variance of widths' mean
Returns
-------
TFA
Returns the instance itself.
"""
nfeature = data.shape[0]
nsample = data.shape[1]
feature_indices =\
np.random.choice(nfeature, self.max_num_voxel, replace=False)
sample_features = np.zeros(nfeature).astype(bool)
sample_features[feature_indices] = True
samples_indices =\
np.random.choice(nsample, self.max_num_tr, replace=False)
curr_data = np.zeros((self.max_num_voxel, self.max_num_tr))\
.astype(float)
curr_data = data[feature_indices]
curr_data = curr_data[:, samples_indices].copy()
curr_R = R[feature_indices].copy()
centers = self.get_centers(self.local_prior)
widths = self.get_widths(self.local_prior)
unique_R, inds = self.get_unique_R(curr_R)
F = self.get_factors(unique_R, inds, centers, widths)
W = self.get_weights(curr_data, F)
self.local_posterior_, self.total_cost = self._estimate_centers_widths(
unique_R, inds, curr_data, W, centers, widths,
template_centers, template_centers_mean_cov,
template_widths, template_widths_mean_var_reci)
return self | python | def _fit_tfa_inner(
self,
data,
R,
template_centers,
template_widths,
template_centers_mean_cov,
template_widths_mean_var_reci):
"""Fit TFA model, the inner loop part
Parameters
----------
data: 2D array, in shape [n_voxel, n_tr]
The fMRI data of a subject
R : 2D array, in shape [n_voxel, n_dim]
The voxel coordinate matrix of fMRI data
template_centers: 1D array
The template prior on centers
template_widths: 1D array
The template prior on widths
template_centers_mean_cov: 2D array, with shape [K, cov_size]
The template prior on covariance of centers' mean
template_widths_mean_var_reci: 1D array
The reciprocal of template prior on variance of widths' mean
Returns
-------
TFA
Returns the instance itself.
"""
nfeature = data.shape[0]
nsample = data.shape[1]
feature_indices =\
np.random.choice(nfeature, self.max_num_voxel, replace=False)
sample_features = np.zeros(nfeature).astype(bool)
sample_features[feature_indices] = True
samples_indices =\
np.random.choice(nsample, self.max_num_tr, replace=False)
curr_data = np.zeros((self.max_num_voxel, self.max_num_tr))\
.astype(float)
curr_data = data[feature_indices]
curr_data = curr_data[:, samples_indices].copy()
curr_R = R[feature_indices].copy()
centers = self.get_centers(self.local_prior)
widths = self.get_widths(self.local_prior)
unique_R, inds = self.get_unique_R(curr_R)
F = self.get_factors(unique_R, inds, centers, widths)
W = self.get_weights(curr_data, F)
self.local_posterior_, self.total_cost = self._estimate_centers_widths(
unique_R, inds, curr_data, W, centers, widths,
template_centers, template_centers_mean_cov,
template_widths, template_widths_mean_var_reci)
return self | [
"def",
"_fit_tfa_inner",
"(",
"self",
",",
"data",
",",
"R",
",",
"template_centers",
",",
"template_widths",
",",
"template_centers_mean_cov",
",",
"template_widths_mean_var_reci",
")",
":",
"nfeature",
"=",
"data",
".",
"shape",
"[",
"0",
"]",
"nsample",
"=",
... | Fit TFA model, the inner loop part
Parameters
----------
data: 2D array, in shape [n_voxel, n_tr]
The fMRI data of a subject
R : 2D array, in shape [n_voxel, n_dim]
The voxel coordinate matrix of fMRI data
template_centers: 1D array
The template prior on centers
template_widths: 1D array
The template prior on widths
template_centers_mean_cov: 2D array, with shape [K, cov_size]
The template prior on covariance of centers' mean
template_widths_mean_var_reci: 1D array
The reciprocal of template prior on variance of widths' mean
Returns
-------
TFA
Returns the instance itself. | [
"Fit",
"TFA",
"model",
"the",
"inner",
"loop",
"part"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/factoranalysis/tfa.py#L908-L969 | train | 204,508 |
brainiak/brainiak | examples/factoranalysis/htfa_cv_example.py | recon_err | def recon_err(data, F, W):
"""Calcuate reconstruction error
Parameters
----------
data : 2D array
True data to recover.
F : 2D array
HTFA factor matrix.
W : 2D array
HTFA weight matrix.
Returns
-------
float
Returns root mean squared reconstruction error.
"""
recon = F.dot(W).ravel()
err = mean_squared_error(
data.ravel(),
recon,
multioutput='uniform_average')
return math.sqrt(err) | python | def recon_err(data, F, W):
"""Calcuate reconstruction error
Parameters
----------
data : 2D array
True data to recover.
F : 2D array
HTFA factor matrix.
W : 2D array
HTFA weight matrix.
Returns
-------
float
Returns root mean squared reconstruction error.
"""
recon = F.dot(W).ravel()
err = mean_squared_error(
data.ravel(),
recon,
multioutput='uniform_average')
return math.sqrt(err) | [
"def",
"recon_err",
"(",
"data",
",",
"F",
",",
"W",
")",
":",
"recon",
"=",
"F",
".",
"dot",
"(",
"W",
")",
".",
"ravel",
"(",
")",
"err",
"=",
"mean_squared_error",
"(",
"data",
".",
"ravel",
"(",
")",
",",
"recon",
",",
"multioutput",
"=",
"... | Calcuate reconstruction error
Parameters
----------
data : 2D array
True data to recover.
F : 2D array
HTFA factor matrix.
W : 2D array
HTFA weight matrix.
Returns
-------
float
Returns root mean squared reconstruction error. | [
"Calcuate",
"reconstruction",
"error"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/examples/factoranalysis/htfa_cv_example.py#L26-L54 | train | 204,509 |
brainiak/brainiak | examples/factoranalysis/htfa_cv_example.py | get_train_err | def get_train_err(htfa, data, F):
"""Calcuate training error
Parameters
----------
htfa : HTFA
An instance of HTFA, factor anaysis class in BrainIAK.
data : 2D array
Input data to HTFA.
F : 2D array
HTFA factor matrix.
Returns
-------
float
Returns root mean squared error on training.
"""
W = htfa.get_weights(data, F)
return recon_err(data, F, W) | python | def get_train_err(htfa, data, F):
"""Calcuate training error
Parameters
----------
htfa : HTFA
An instance of HTFA, factor anaysis class in BrainIAK.
data : 2D array
Input data to HTFA.
F : 2D array
HTFA factor matrix.
Returns
-------
float
Returns root mean squared error on training.
"""
W = htfa.get_weights(data, F)
return recon_err(data, F, W) | [
"def",
"get_train_err",
"(",
"htfa",
",",
"data",
",",
"F",
")",
":",
"W",
"=",
"htfa",
".",
"get_weights",
"(",
"data",
",",
"F",
")",
"return",
"recon_err",
"(",
"data",
",",
"F",
",",
"W",
")"
] | Calcuate training error
Parameters
----------
htfa : HTFA
An instance of HTFA, factor anaysis class in BrainIAK.
data : 2D array
Input data to HTFA.
F : 2D array
HTFA factor matrix.
Returns
-------
float
Returns root mean squared error on training. | [
"Calcuate",
"training",
"error"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/examples/factoranalysis/htfa_cv_example.py#L56-L79 | train | 204,510 |
brainiak/brainiak | brainiak/fcma/mvpa_voxelselector.py | _sfn | def _sfn(l, mask, myrad, bcast_var):
"""Score classifier on searchlight data using cross-validation.
The classifier is in `bcast_var[2]`. The labels are in `bast_var[0]`. The
number of cross-validation folds is in `bast_var[1].
"""
clf = bcast_var[2]
data = l[0][mask, :].T
# print(l[0].shape, mask.shape, data.shape)
skf = model_selection.StratifiedKFold(n_splits=bcast_var[1],
shuffle=False)
accuracy = np.mean(model_selection.cross_val_score(clf, data,
y=bcast_var[0],
cv=skf,
n_jobs=1))
return accuracy | python | def _sfn(l, mask, myrad, bcast_var):
"""Score classifier on searchlight data using cross-validation.
The classifier is in `bcast_var[2]`. The labels are in `bast_var[0]`. The
number of cross-validation folds is in `bast_var[1].
"""
clf = bcast_var[2]
data = l[0][mask, :].T
# print(l[0].shape, mask.shape, data.shape)
skf = model_selection.StratifiedKFold(n_splits=bcast_var[1],
shuffle=False)
accuracy = np.mean(model_selection.cross_val_score(clf, data,
y=bcast_var[0],
cv=skf,
n_jobs=1))
return accuracy | [
"def",
"_sfn",
"(",
"l",
",",
"mask",
",",
"myrad",
",",
"bcast_var",
")",
":",
"clf",
"=",
"bcast_var",
"[",
"2",
"]",
"data",
"=",
"l",
"[",
"0",
"]",
"[",
"mask",
",",
":",
"]",
".",
"T",
"# print(l[0].shape, mask.shape, data.shape)",
"skf",
"=",
... | Score classifier on searchlight data using cross-validation.
The classifier is in `bcast_var[2]`. The labels are in `bast_var[0]`. The
number of cross-validation folds is in `bast_var[1]. | [
"Score",
"classifier",
"on",
"searchlight",
"data",
"using",
"cross",
"-",
"validation",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/mvpa_voxelselector.py#L34-L49 | train | 204,511 |
brainiak/brainiak | brainiak/fcma/mvpa_voxelselector.py | MVPAVoxelSelector.run | def run(self, clf):
""" run activity-based voxel selection
Sort the voxels based on the cross-validation accuracy
of their activity vectors within the searchlight
Parameters
----------
clf: classification function
the classifier to be used in cross validation
Returns
-------
result_volume: 3D array of accuracy numbers
contains the voxelwise accuracy numbers obtained via Searchlight
results: list of tuple (voxel_id, accuracy)
the accuracy numbers of all voxels, in accuracy descending order
the length of array equals the number of voxels
"""
rank = MPI.COMM_WORLD.Get_rank()
if rank == 0:
logger.info(
'running activity-based voxel selection via Searchlight'
)
self.sl.distribute([self.data], self.mask)
self.sl.broadcast((self.labels, self.num_folds, clf))
if rank == 0:
logger.info(
'data preparation done'
)
# obtain a 3D array with accuracy numbers
result_volume = self.sl.run_searchlight(_sfn)
# get result tuple list from the volume
result_list = result_volume[self.mask]
results = []
if rank == 0:
for idx, value in enumerate(result_list):
if value is None:
value = 0
results.append((idx, value))
# Sort the voxels
results.sort(key=lambda tup: tup[1], reverse=True)
logger.info(
'activity-based voxel selection via Searchlight is done'
)
return result_volume, results | python | def run(self, clf):
""" run activity-based voxel selection
Sort the voxels based on the cross-validation accuracy
of their activity vectors within the searchlight
Parameters
----------
clf: classification function
the classifier to be used in cross validation
Returns
-------
result_volume: 3D array of accuracy numbers
contains the voxelwise accuracy numbers obtained via Searchlight
results: list of tuple (voxel_id, accuracy)
the accuracy numbers of all voxels, in accuracy descending order
the length of array equals the number of voxels
"""
rank = MPI.COMM_WORLD.Get_rank()
if rank == 0:
logger.info(
'running activity-based voxel selection via Searchlight'
)
self.sl.distribute([self.data], self.mask)
self.sl.broadcast((self.labels, self.num_folds, clf))
if rank == 0:
logger.info(
'data preparation done'
)
# obtain a 3D array with accuracy numbers
result_volume = self.sl.run_searchlight(_sfn)
# get result tuple list from the volume
result_list = result_volume[self.mask]
results = []
if rank == 0:
for idx, value in enumerate(result_list):
if value is None:
value = 0
results.append((idx, value))
# Sort the voxels
results.sort(key=lambda tup: tup[1], reverse=True)
logger.info(
'activity-based voxel selection via Searchlight is done'
)
return result_volume, results | [
"def",
"run",
"(",
"self",
",",
"clf",
")",
":",
"rank",
"=",
"MPI",
".",
"COMM_WORLD",
".",
"Get_rank",
"(",
")",
"if",
"rank",
"==",
"0",
":",
"logger",
".",
"info",
"(",
"'running activity-based voxel selection via Searchlight'",
")",
"self",
".",
"sl",... | run activity-based voxel selection
Sort the voxels based on the cross-validation accuracy
of their activity vectors within the searchlight
Parameters
----------
clf: classification function
the classifier to be used in cross validation
Returns
-------
result_volume: 3D array of accuracy numbers
contains the voxelwise accuracy numbers obtained via Searchlight
results: list of tuple (voxel_id, accuracy)
the accuracy numbers of all voxels, in accuracy descending order
the length of array equals the number of voxels | [
"run",
"activity",
"-",
"based",
"voxel",
"selection"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/mvpa_voxelselector.py#L90-L136 | train | 204,512 |
brainiak/brainiak | brainiak/fcma/voxelselector.py | _cross_validation_for_one_voxel | def _cross_validation_for_one_voxel(clf, vid, num_folds, subject_data, labels):
"""Score classifier on data using cross validation."""
# no shuffling in cv
skf = model_selection.StratifiedKFold(n_splits=num_folds,
shuffle=False)
scores = model_selection.cross_val_score(clf, subject_data,
y=labels,
cv=skf, n_jobs=1)
logger.debug(
'cross validation for voxel %d is done' %
vid
)
return (vid, scores.mean()) | python | def _cross_validation_for_one_voxel(clf, vid, num_folds, subject_data, labels):
"""Score classifier on data using cross validation."""
# no shuffling in cv
skf = model_selection.StratifiedKFold(n_splits=num_folds,
shuffle=False)
scores = model_selection.cross_val_score(clf, subject_data,
y=labels,
cv=skf, n_jobs=1)
logger.debug(
'cross validation for voxel %d is done' %
vid
)
return (vid, scores.mean()) | [
"def",
"_cross_validation_for_one_voxel",
"(",
"clf",
",",
"vid",
",",
"num_folds",
",",
"subject_data",
",",
"labels",
")",
":",
"# no shuffling in cv",
"skf",
"=",
"model_selection",
".",
"StratifiedKFold",
"(",
"n_splits",
"=",
"num_folds",
",",
"shuffle",
"=",... | Score classifier on data using cross validation. | [
"Score",
"classifier",
"on",
"data",
"using",
"cross",
"validation",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/voxelselector.py#L41-L53 | train | 204,513 |
brainiak/brainiak | brainiak/fcma/voxelselector.py | VoxelSelector.run | def run(self, clf):
"""Run correlation-based voxel selection in master-worker model.
Sort the voxels based on the cross-validation accuracy
of their correlation vectors
Parameters
----------
clf: classification function
the classifier to be used in cross validation
Returns
-------
results: list of tuple (voxel_id, accuracy)
the accuracy numbers of all voxels, in accuracy descending order
the length of array equals the number of voxels
"""
rank = MPI.COMM_WORLD.Get_rank()
if rank == self.master_rank:
results = self._master()
# Sort the voxels
results.sort(key=lambda tup: tup[1], reverse=True)
else:
self._worker(clf)
results = []
return results | python | def run(self, clf):
"""Run correlation-based voxel selection in master-worker model.
Sort the voxels based on the cross-validation accuracy
of their correlation vectors
Parameters
----------
clf: classification function
the classifier to be used in cross validation
Returns
-------
results: list of tuple (voxel_id, accuracy)
the accuracy numbers of all voxels, in accuracy descending order
the length of array equals the number of voxels
"""
rank = MPI.COMM_WORLD.Get_rank()
if rank == self.master_rank:
results = self._master()
# Sort the voxels
results.sort(key=lambda tup: tup[1], reverse=True)
else:
self._worker(clf)
results = []
return results | [
"def",
"run",
"(",
"self",
",",
"clf",
")",
":",
"rank",
"=",
"MPI",
".",
"COMM_WORLD",
".",
"Get_rank",
"(",
")",
"if",
"rank",
"==",
"self",
".",
"master_rank",
":",
"results",
"=",
"self",
".",
"_master",
"(",
")",
"# Sort the voxels",
"results",
... | Run correlation-based voxel selection in master-worker model.
Sort the voxels based on the cross-validation accuracy
of their correlation vectors
Parameters
----------
clf: classification function
the classifier to be used in cross validation
Returns
-------
results: list of tuple (voxel_id, accuracy)
the accuracy numbers of all voxels, in accuracy descending order
the length of array equals the number of voxels | [
"Run",
"correlation",
"-",
"based",
"voxel",
"selection",
"in",
"master",
"-",
"worker",
"model",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/voxelselector.py#L149-L174 | train | 204,514 |
brainiak/brainiak | brainiak/fcma/voxelselector.py | VoxelSelector._master | def _master(self):
"""Master node's operation.
Assigning tasks to workers and collecting results from them
Parameters
----------
None
Returns
-------
results: list of tuple (voxel_id, accuracy)
the accuracy numbers of all voxels, in accuracy descending order
the length of array equals the number of voxels
"""
logger.info(
'Master at rank %d starts to allocate tasks',
MPI.COMM_WORLD.Get_rank()
)
results = []
comm = MPI.COMM_WORLD
size = comm.Get_size()
sending_voxels = self.voxel_unit if self.voxel_unit < self.num_voxels \
else self.num_voxels
current_task = (0, sending_voxels)
status = MPI.Status()
# using_size is used when the number of tasks
# is smaller than the number of workers
using_size = size
for i in range(0, size):
if i == self.master_rank:
continue
if current_task[1] == 0:
using_size = i
break
logger.debug(
'master starts to send a task to worker %d' %
i
)
comm.send(current_task,
dest=i,
tag=self._WORKTAG)
next_start = current_task[0] + current_task[1]
sending_voxels = self.voxel_unit \
if self.voxel_unit < self.num_voxels - next_start \
else self.num_voxels - next_start
current_task = (next_start, sending_voxels)
while using_size == size:
if current_task[1] == 0:
break
result = comm.recv(source=MPI.ANY_SOURCE,
tag=MPI.ANY_TAG,
status=status)
results += result
comm.send(current_task,
dest=status.Get_source(),
tag=self._WORKTAG)
next_start = current_task[0] + current_task[1]
sending_voxels = self.voxel_unit \
if self.voxel_unit < self.num_voxels - next_start \
else self.num_voxels - next_start
current_task = (next_start, sending_voxels)
for i in range(0, using_size):
if i == self.master_rank:
continue
result = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG)
results += result
for i in range(0, size):
if i == self.master_rank:
continue
comm.send(None,
dest=i,
tag=self._TERMINATETAG)
return results | python | def _master(self):
"""Master node's operation.
Assigning tasks to workers and collecting results from them
Parameters
----------
None
Returns
-------
results: list of tuple (voxel_id, accuracy)
the accuracy numbers of all voxels, in accuracy descending order
the length of array equals the number of voxels
"""
logger.info(
'Master at rank %d starts to allocate tasks',
MPI.COMM_WORLD.Get_rank()
)
results = []
comm = MPI.COMM_WORLD
size = comm.Get_size()
sending_voxels = self.voxel_unit if self.voxel_unit < self.num_voxels \
else self.num_voxels
current_task = (0, sending_voxels)
status = MPI.Status()
# using_size is used when the number of tasks
# is smaller than the number of workers
using_size = size
for i in range(0, size):
if i == self.master_rank:
continue
if current_task[1] == 0:
using_size = i
break
logger.debug(
'master starts to send a task to worker %d' %
i
)
comm.send(current_task,
dest=i,
tag=self._WORKTAG)
next_start = current_task[0] + current_task[1]
sending_voxels = self.voxel_unit \
if self.voxel_unit < self.num_voxels - next_start \
else self.num_voxels - next_start
current_task = (next_start, sending_voxels)
while using_size == size:
if current_task[1] == 0:
break
result = comm.recv(source=MPI.ANY_SOURCE,
tag=MPI.ANY_TAG,
status=status)
results += result
comm.send(current_task,
dest=status.Get_source(),
tag=self._WORKTAG)
next_start = current_task[0] + current_task[1]
sending_voxels = self.voxel_unit \
if self.voxel_unit < self.num_voxels - next_start \
else self.num_voxels - next_start
current_task = (next_start, sending_voxels)
for i in range(0, using_size):
if i == self.master_rank:
continue
result = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG)
results += result
for i in range(0, size):
if i == self.master_rank:
continue
comm.send(None,
dest=i,
tag=self._TERMINATETAG)
return results | [
"def",
"_master",
"(",
"self",
")",
":",
"logger",
".",
"info",
"(",
"'Master at rank %d starts to allocate tasks'",
",",
"MPI",
".",
"COMM_WORLD",
".",
"Get_rank",
"(",
")",
")",
"results",
"=",
"[",
"]",
"comm",
"=",
"MPI",
".",
"COMM_WORLD",
"size",
"="... | Master node's operation.
Assigning tasks to workers and collecting results from them
Parameters
----------
None
Returns
-------
results: list of tuple (voxel_id, accuracy)
the accuracy numbers of all voxels, in accuracy descending order
the length of array equals the number of voxels | [
"Master",
"node",
"s",
"operation",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/voxelselector.py#L176-L253 | train | 204,515 |
brainiak/brainiak | brainiak/fcma/voxelselector.py | VoxelSelector._worker | def _worker(self, clf):
"""Worker node's operation.
Receiving tasks from the master to process and sending the result back
Parameters
----------
clf: classification function
the classifier to be used in cross validation
Returns
-------
None
"""
logger.debug(
'worker %d is running, waiting for tasks from master at rank %d' %
(MPI.COMM_WORLD.Get_rank(), self.master_rank)
)
comm = MPI.COMM_WORLD
status = MPI.Status()
while 1:
task = comm.recv(source=self.master_rank,
tag=MPI.ANY_TAG,
status=status)
if status.Get_tag():
break
comm.send(self._voxel_scoring(task, clf),
dest=self.master_rank) | python | def _worker(self, clf):
"""Worker node's operation.
Receiving tasks from the master to process and sending the result back
Parameters
----------
clf: classification function
the classifier to be used in cross validation
Returns
-------
None
"""
logger.debug(
'worker %d is running, waiting for tasks from master at rank %d' %
(MPI.COMM_WORLD.Get_rank(), self.master_rank)
)
comm = MPI.COMM_WORLD
status = MPI.Status()
while 1:
task = comm.recv(source=self.master_rank,
tag=MPI.ANY_TAG,
status=status)
if status.Get_tag():
break
comm.send(self._voxel_scoring(task, clf),
dest=self.master_rank) | [
"def",
"_worker",
"(",
"self",
",",
"clf",
")",
":",
"logger",
".",
"debug",
"(",
"'worker %d is running, waiting for tasks from master at rank %d'",
"%",
"(",
"MPI",
".",
"COMM_WORLD",
".",
"Get_rank",
"(",
")",
",",
"self",
".",
"master_rank",
")",
")",
"com... | Worker node's operation.
Receiving tasks from the master to process and sending the result back
Parameters
----------
clf: classification function
the classifier to be used in cross validation
Returns
-------
None | [
"Worker",
"node",
"s",
"operation",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/voxelselector.py#L255-L282 | train | 204,516 |
brainiak/brainiak | brainiak/fcma/voxelselector.py | VoxelSelector._correlation_normalization | def _correlation_normalization(self, corr):
"""Do within-subject normalization.
This method uses scipy.zscore to normalize the data,
but is much slower than its C++ counterpart.
It is doing in-place z-score.
Parameters
----------
corr: 3D array in shape [num_processed_voxels, num_epochs, num_voxels]
the correlation values of all subjects in all epochs
for the assigned values, in row-major
Returns
-------
corr: 3D array in shape [num_processed_voxels, num_epochs, num_voxels]
the normalized correlation values of all subjects in all epochs
for the assigned values, in row-major
"""
time1 = time.time()
(sv, e, av) = corr.shape
for i in range(sv):
start = 0
while start < e:
cur_val = corr[i, start: start + self.epochs_per_subj, :]
cur_val = .5 * np.log((cur_val + 1) / (1 - cur_val))
corr[i, start: start + self.epochs_per_subj, :] = \
zscore(cur_val, axis=0, ddof=0)
start += self.epochs_per_subj
# if zscore fails (standard deviation is zero),
# set all values to be zero
corr = np.nan_to_num(corr)
time2 = time.time()
logger.debug(
'within-subject normalization for %d voxels '
'using numpy zscore function, takes %.2f s' %
(sv, (time2 - time1))
)
return corr | python | def _correlation_normalization(self, corr):
"""Do within-subject normalization.
This method uses scipy.zscore to normalize the data,
but is much slower than its C++ counterpart.
It is doing in-place z-score.
Parameters
----------
corr: 3D array in shape [num_processed_voxels, num_epochs, num_voxels]
the correlation values of all subjects in all epochs
for the assigned values, in row-major
Returns
-------
corr: 3D array in shape [num_processed_voxels, num_epochs, num_voxels]
the normalized correlation values of all subjects in all epochs
for the assigned values, in row-major
"""
time1 = time.time()
(sv, e, av) = corr.shape
for i in range(sv):
start = 0
while start < e:
cur_val = corr[i, start: start + self.epochs_per_subj, :]
cur_val = .5 * np.log((cur_val + 1) / (1 - cur_val))
corr[i, start: start + self.epochs_per_subj, :] = \
zscore(cur_val, axis=0, ddof=0)
start += self.epochs_per_subj
# if zscore fails (standard deviation is zero),
# set all values to be zero
corr = np.nan_to_num(corr)
time2 = time.time()
logger.debug(
'within-subject normalization for %d voxels '
'using numpy zscore function, takes %.2f s' %
(sv, (time2 - time1))
)
return corr | [
"def",
"_correlation_normalization",
"(",
"self",
",",
"corr",
")",
":",
"time1",
"=",
"time",
".",
"time",
"(",
")",
"(",
"sv",
",",
"e",
",",
"av",
")",
"=",
"corr",
".",
"shape",
"for",
"i",
"in",
"range",
"(",
"sv",
")",
":",
"start",
"=",
... | Do within-subject normalization.
This method uses scipy.zscore to normalize the data,
but is much slower than its C++ counterpart.
It is doing in-place z-score.
Parameters
----------
corr: 3D array in shape [num_processed_voxels, num_epochs, num_voxels]
the correlation values of all subjects in all epochs
for the assigned values, in row-major
Returns
-------
corr: 3D array in shape [num_processed_voxels, num_epochs, num_voxels]
the normalized correlation values of all subjects in all epochs
for the assigned values, in row-major | [
"Do",
"within",
"-",
"subject",
"normalization",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/voxelselector.py#L331-L369 | train | 204,517 |
brainiak/brainiak | brainiak/fcma/voxelselector.py | VoxelSelector._prepare_for_cross_validation | def _prepare_for_cross_validation(self, corr, clf):
"""Prepare data for voxelwise cross validation.
If the classifier is sklearn.svm.SVC with precomputed kernel,
the kernel matrix of each voxel is computed, otherwise do nothing.
Parameters
----------
corr: 3D array in shape [num_processed_voxels, num_epochs, num_voxels]
the normalized correlation values of all subjects in all epochs
for the assigned values, in row-major
clf: classification function
the classifier to be used in cross validation
Returns
-------
data: 3D numpy array
If using sklearn.svm.SVC with precomputed kernel,
it is in shape [num_processed_voxels, num_epochs, num_epochs];
otherwise it is the input argument corr,
in shape [num_processed_voxels, num_epochs, num_voxels]
"""
time1 = time.time()
(num_processed_voxels, num_epochs, _) = corr.shape
if isinstance(clf, sklearn.svm.SVC) and clf.kernel == 'precomputed':
# kernel matrices should be computed
kernel_matrices = np.zeros((num_processed_voxels, num_epochs,
num_epochs),
np.float32, order='C')
for i in range(num_processed_voxels):
blas.compute_kernel_matrix('L', 'T',
num_epochs, self.num_voxels2,
1.0, corr,
i, self.num_voxels2,
0.0, kernel_matrices[i, :, :],
num_epochs)
# shrink the values for getting more stable alpha values
# in SVM training iteration
num_digits = len(str(int(kernel_matrices[i, 0, 0])))
if num_digits > 2:
proportion = 10**(2-num_digits)
kernel_matrices[i, :, :] *= proportion
data = kernel_matrices
else:
data = corr
time2 = time.time()
logger.debug(
'cross validation data preparation takes %.2f s' %
(time2 - time1)
)
return data | python | def _prepare_for_cross_validation(self, corr, clf):
"""Prepare data for voxelwise cross validation.
If the classifier is sklearn.svm.SVC with precomputed kernel,
the kernel matrix of each voxel is computed, otherwise do nothing.
Parameters
----------
corr: 3D array in shape [num_processed_voxels, num_epochs, num_voxels]
the normalized correlation values of all subjects in all epochs
for the assigned values, in row-major
clf: classification function
the classifier to be used in cross validation
Returns
-------
data: 3D numpy array
If using sklearn.svm.SVC with precomputed kernel,
it is in shape [num_processed_voxels, num_epochs, num_epochs];
otherwise it is the input argument corr,
in shape [num_processed_voxels, num_epochs, num_voxels]
"""
time1 = time.time()
(num_processed_voxels, num_epochs, _) = corr.shape
if isinstance(clf, sklearn.svm.SVC) and clf.kernel == 'precomputed':
# kernel matrices should be computed
kernel_matrices = np.zeros((num_processed_voxels, num_epochs,
num_epochs),
np.float32, order='C')
for i in range(num_processed_voxels):
blas.compute_kernel_matrix('L', 'T',
num_epochs, self.num_voxels2,
1.0, corr,
i, self.num_voxels2,
0.0, kernel_matrices[i, :, :],
num_epochs)
# shrink the values for getting more stable alpha values
# in SVM training iteration
num_digits = len(str(int(kernel_matrices[i, 0, 0])))
if num_digits > 2:
proportion = 10**(2-num_digits)
kernel_matrices[i, :, :] *= proportion
data = kernel_matrices
else:
data = corr
time2 = time.time()
logger.debug(
'cross validation data preparation takes %.2f s' %
(time2 - time1)
)
return data | [
"def",
"_prepare_for_cross_validation",
"(",
"self",
",",
"corr",
",",
"clf",
")",
":",
"time1",
"=",
"time",
".",
"time",
"(",
")",
"(",
"num_processed_voxels",
",",
"num_epochs",
",",
"_",
")",
"=",
"corr",
".",
"shape",
"if",
"isinstance",
"(",
"clf",... | Prepare data for voxelwise cross validation.
If the classifier is sklearn.svm.SVC with precomputed kernel,
the kernel matrix of each voxel is computed, otherwise do nothing.
Parameters
----------
corr: 3D array in shape [num_processed_voxels, num_epochs, num_voxels]
the normalized correlation values of all subjects in all epochs
for the assigned values, in row-major
clf: classification function
the classifier to be used in cross validation
Returns
-------
data: 3D numpy array
If using sklearn.svm.SVC with precomputed kernel,
it is in shape [num_processed_voxels, num_epochs, num_epochs];
otherwise it is the input argument corr,
in shape [num_processed_voxels, num_epochs, num_voxels] | [
"Prepare",
"data",
"for",
"voxelwise",
"cross",
"validation",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/voxelselector.py#L371-L421 | train | 204,518 |
brainiak/brainiak | brainiak/fcma/voxelselector.py | VoxelSelector._do_cross_validation | def _do_cross_validation(self, clf, data, task):
"""Run voxelwise cross validation based on correlation vectors.
clf: classification function
the classifier to be used in cross validation
data: 3D numpy array
If using sklearn.svm.SVC with precomputed kernel,
it is in shape [num_processed_voxels, num_epochs, num_epochs];
otherwise it is the input argument corr,
in shape [num_processed_voxels, num_epochs, num_voxels]
task: tuple (start_voxel_id, num_processed_voxels)
depicting the voxels assigned to compute
Returns
-------
results: list of tuple (voxel_id, accuracy)
the accuracy numbers of all voxels, in accuracy descending order
the length of array equals the number of assigned voxels
"""
time1 = time.time()
if isinstance(clf, sklearn.svm.SVC) and clf.kernel == 'precomputed'\
and self.use_multiprocessing:
inlist = [(clf, i + task[0], self.num_folds, data[i, :, :],
self.labels) for i in range(task[1])]
with multiprocessing.Pool(self.process_num) as pool:
results = list(pool.starmap(_cross_validation_for_one_voxel,
inlist))
else:
results = []
for i in range(task[1]):
result = _cross_validation_for_one_voxel(clf, i + task[0],
self.num_folds,
data[i, :, :],
self.labels)
results.append(result)
time2 = time.time()
logger.debug(
'cross validation for %d voxels, takes %.2f s' %
(task[1], (time2 - time1))
)
return results | python | def _do_cross_validation(self, clf, data, task):
"""Run voxelwise cross validation based on correlation vectors.
clf: classification function
the classifier to be used in cross validation
data: 3D numpy array
If using sklearn.svm.SVC with precomputed kernel,
it is in shape [num_processed_voxels, num_epochs, num_epochs];
otherwise it is the input argument corr,
in shape [num_processed_voxels, num_epochs, num_voxels]
task: tuple (start_voxel_id, num_processed_voxels)
depicting the voxels assigned to compute
Returns
-------
results: list of tuple (voxel_id, accuracy)
the accuracy numbers of all voxels, in accuracy descending order
the length of array equals the number of assigned voxels
"""
time1 = time.time()
if isinstance(clf, sklearn.svm.SVC) and clf.kernel == 'precomputed'\
and self.use_multiprocessing:
inlist = [(clf, i + task[0], self.num_folds, data[i, :, :],
self.labels) for i in range(task[1])]
with multiprocessing.Pool(self.process_num) as pool:
results = list(pool.starmap(_cross_validation_for_one_voxel,
inlist))
else:
results = []
for i in range(task[1]):
result = _cross_validation_for_one_voxel(clf, i + task[0],
self.num_folds,
data[i, :, :],
self.labels)
results.append(result)
time2 = time.time()
logger.debug(
'cross validation for %d voxels, takes %.2f s' %
(task[1], (time2 - time1))
)
return results | [
"def",
"_do_cross_validation",
"(",
"self",
",",
"clf",
",",
"data",
",",
"task",
")",
":",
"time1",
"=",
"time",
".",
"time",
"(",
")",
"if",
"isinstance",
"(",
"clf",
",",
"sklearn",
".",
"svm",
".",
"SVC",
")",
"and",
"clf",
".",
"kernel",
"==",... | Run voxelwise cross validation based on correlation vectors.
clf: classification function
the classifier to be used in cross validation
data: 3D numpy array
If using sklearn.svm.SVC with precomputed kernel,
it is in shape [num_processed_voxels, num_epochs, num_epochs];
otherwise it is the input argument corr,
in shape [num_processed_voxels, num_epochs, num_voxels]
task: tuple (start_voxel_id, num_processed_voxels)
depicting the voxels assigned to compute
Returns
-------
results: list of tuple (voxel_id, accuracy)
the accuracy numbers of all voxels, in accuracy descending order
the length of array equals the number of assigned voxels | [
"Run",
"voxelwise",
"cross",
"validation",
"based",
"on",
"correlation",
"vectors",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/voxelselector.py#L423-L465 | train | 204,519 |
brainiak/brainiak | brainiak/fcma/voxelselector.py | VoxelSelector._voxel_scoring | def _voxel_scoring(self, task, clf):
"""The voxel selection process done in the worker node.
Take the task in,
do analysis on voxels specified by the task (voxel id, num_voxels)
It is a three-stage pipeline consisting of:
1. correlation computation
2. within-subject normalization
3. voxelwise cross validation
Parameters
----------
task: tuple (start_voxel_id, num_processed_voxels),
depicting the voxels assigned to compute
clf: classification function
the classifier to be used in cross validation
Returns
-------
results: list of tuple (voxel_id, accuracy)
the accuracy numbers of all voxels, in accuracy descending order
the length of array equals the number of assigned voxels
"""
time1 = time.time()
# correlation computation
corr = self._correlation_computation(task)
# normalization
# corr = self._correlation_normalization(corr)
time3 = time.time()
fcma_extension.normalization(corr, self.epochs_per_subj)
time4 = time.time()
logger.debug(
'within-subject normalization for %d voxels '
'using C++, takes %.2f s' %
(task[1], (time4 - time3))
)
# cross validation
data = self._prepare_for_cross_validation(corr, clf)
if isinstance(clf, sklearn.svm.SVC) and clf.kernel == 'precomputed':
# to save memory so that the process can be forked
del corr
results = self._do_cross_validation(clf, data, task)
time2 = time.time()
logger.info(
'in rank %d, task %d takes %.2f s' %
(MPI.COMM_WORLD.Get_rank(),
(int(task[0] / self.voxel_unit)), (time2 - time1))
)
return results | python | def _voxel_scoring(self, task, clf):
"""The voxel selection process done in the worker node.
Take the task in,
do analysis on voxels specified by the task (voxel id, num_voxels)
It is a three-stage pipeline consisting of:
1. correlation computation
2. within-subject normalization
3. voxelwise cross validation
Parameters
----------
task: tuple (start_voxel_id, num_processed_voxels),
depicting the voxels assigned to compute
clf: classification function
the classifier to be used in cross validation
Returns
-------
results: list of tuple (voxel_id, accuracy)
the accuracy numbers of all voxels, in accuracy descending order
the length of array equals the number of assigned voxels
"""
time1 = time.time()
# correlation computation
corr = self._correlation_computation(task)
# normalization
# corr = self._correlation_normalization(corr)
time3 = time.time()
fcma_extension.normalization(corr, self.epochs_per_subj)
time4 = time.time()
logger.debug(
'within-subject normalization for %d voxels '
'using C++, takes %.2f s' %
(task[1], (time4 - time3))
)
# cross validation
data = self._prepare_for_cross_validation(corr, clf)
if isinstance(clf, sklearn.svm.SVC) and clf.kernel == 'precomputed':
# to save memory so that the process can be forked
del corr
results = self._do_cross_validation(clf, data, task)
time2 = time.time()
logger.info(
'in rank %d, task %d takes %.2f s' %
(MPI.COMM_WORLD.Get_rank(),
(int(task[0] / self.voxel_unit)), (time2 - time1))
)
return results | [
"def",
"_voxel_scoring",
"(",
"self",
",",
"task",
",",
"clf",
")",
":",
"time1",
"=",
"time",
".",
"time",
"(",
")",
"# correlation computation",
"corr",
"=",
"self",
".",
"_correlation_computation",
"(",
"task",
")",
"# normalization",
"# corr = self._correlat... | The voxel selection process done in the worker node.
Take the task in,
do analysis on voxels specified by the task (voxel id, num_voxels)
It is a three-stage pipeline consisting of:
1. correlation computation
2. within-subject normalization
3. voxelwise cross validation
Parameters
----------
task: tuple (start_voxel_id, num_processed_voxels),
depicting the voxels assigned to compute
clf: classification function
the classifier to be used in cross validation
Returns
-------
results: list of tuple (voxel_id, accuracy)
the accuracy numbers of all voxels, in accuracy descending order
the length of array equals the number of assigned voxels | [
"The",
"voxel",
"selection",
"process",
"done",
"in",
"the",
"worker",
"node",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/voxelselector.py#L467-L516 | train | 204,520 |
brainiak/brainiak | brainiak/funcalign/sssrm.py | SSSRM.fit | def fit(self, X, y, Z):
"""Compute the Semi-Supervised Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, n_align]
Each element in the list contains the fMRI data for alignment of
one subject. There are n_align samples for each subject.
y : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the data samples
in Z.
Z : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject
for training the MLR classifier.
"""
logger.info('Starting SS-SRM')
# Check that the alpha value is in range (0.0,1.0)
if 0.0 >= self.alpha or self.alpha >= 1.0:
raise ValueError("Alpha parameter should be in range (0.0, 1.0)")
# Check that the regularizer value is positive
if 0.0 >= self.gamma:
raise ValueError("Gamma parameter should be positive.")
# Check the number of subjects
if len(X) <= 1 or len(y) <= 1 or len(Z) <= 1:
raise ValueError("There are not enough subjects in the input "
"data to train the model.")
if not (len(X) == len(y)) or not (len(X) == len(Z)):
raise ValueError("Different number of subjects in data.")
# Check for input data sizes
if X[0].shape[1] < self.features:
raise ValueError(
"There are not enough samples to train the model with "
"{0:d} features.".format(self.features))
# Check if all subjects have same number of TRs for alignment
# and if alignment and classification data have the same number of
# voxels per subject. Also check that there labels for all the classif.
# sample
number_trs = X[0].shape[1]
number_subjects = len(X)
for subject in range(number_subjects):
assert_all_finite(X[subject])
assert_all_finite(Z[subject])
if X[subject].shape[1] != number_trs:
raise ValueError("Different number of alignment samples "
"between subjects.")
if X[subject].shape[0] != Z[subject].shape[0]:
raise ValueError("Different number of voxels between alignment"
" and classification data (subject {0:d})"
".".format(subject))
if Z[subject].shape[1] != y[subject].size:
raise ValueError("Different number of samples and labels in "
"subject {0:d}.".format(subject))
# Map the classes to [0..C-1]
new_y = self._init_classes(y)
# Run SS-SRM
self.w_, self.s_, self.theta_, self.bias_ = self._sssrm(X, Z, new_y)
return self | python | def fit(self, X, y, Z):
"""Compute the Semi-Supervised Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, n_align]
Each element in the list contains the fMRI data for alignment of
one subject. There are n_align samples for each subject.
y : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the data samples
in Z.
Z : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject
for training the MLR classifier.
"""
logger.info('Starting SS-SRM')
# Check that the alpha value is in range (0.0,1.0)
if 0.0 >= self.alpha or self.alpha >= 1.0:
raise ValueError("Alpha parameter should be in range (0.0, 1.0)")
# Check that the regularizer value is positive
if 0.0 >= self.gamma:
raise ValueError("Gamma parameter should be positive.")
# Check the number of subjects
if len(X) <= 1 or len(y) <= 1 or len(Z) <= 1:
raise ValueError("There are not enough subjects in the input "
"data to train the model.")
if not (len(X) == len(y)) or not (len(X) == len(Z)):
raise ValueError("Different number of subjects in data.")
# Check for input data sizes
if X[0].shape[1] < self.features:
raise ValueError(
"There are not enough samples to train the model with "
"{0:d} features.".format(self.features))
# Check if all subjects have same number of TRs for alignment
# and if alignment and classification data have the same number of
# voxels per subject. Also check that there labels for all the classif.
# sample
number_trs = X[0].shape[1]
number_subjects = len(X)
for subject in range(number_subjects):
assert_all_finite(X[subject])
assert_all_finite(Z[subject])
if X[subject].shape[1] != number_trs:
raise ValueError("Different number of alignment samples "
"between subjects.")
if X[subject].shape[0] != Z[subject].shape[0]:
raise ValueError("Different number of voxels between alignment"
" and classification data (subject {0:d})"
".".format(subject))
if Z[subject].shape[1] != y[subject].size:
raise ValueError("Different number of samples and labels in "
"subject {0:d}.".format(subject))
# Map the classes to [0..C-1]
new_y = self._init_classes(y)
# Run SS-SRM
self.w_, self.s_, self.theta_, self.bias_ = self._sssrm(X, Z, new_y)
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
",",
"Z",
")",
":",
"logger",
".",
"info",
"(",
"'Starting SS-SRM'",
")",
"# Check that the alpha value is in range (0.0,1.0)",
"if",
"0.0",
">=",
"self",
".",
"alpha",
"or",
"self",
".",
"alpha",
">=",
"1.0",... | Compute the Semi-Supervised Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, n_align]
Each element in the list contains the fMRI data for alignment of
one subject. There are n_align samples for each subject.
y : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the data samples
in Z.
Z : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject
for training the MLR classifier. | [
"Compute",
"the",
"Semi",
"-",
"Supervised",
"Shared",
"Response",
"Model"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/funcalign/sssrm.py#L133-L202 | train | 204,521 |
brainiak/brainiak | brainiak/funcalign/sssrm.py | SSSRM.predict | def predict(self, X):
"""Classify the output for given data
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject
The number of voxels should be according to each subject at
the moment of training the model.
Returns
-------
p: list of arrays, element i has shape=[samples_i]
Predictions for each data sample.
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of subjects
if len(X) != len(self.w_):
raise ValueError("The number of subjects does not match the one"
" in the model.")
X_shared = self.transform(X)
p = [None] * len(X_shared)
for subject in range(len(X_shared)):
sumexp, _, exponents = utils.sumexp_stable(
self.theta_.T.dot(X_shared[subject]) + self.bias_)
p[subject] = self.classes_[
(exponents / sumexp[np.newaxis, :]).argmax(axis=0)]
return p | python | def predict(self, X):
"""Classify the output for given data
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject
The number of voxels should be according to each subject at
the moment of training the model.
Returns
-------
p: list of arrays, element i has shape=[samples_i]
Predictions for each data sample.
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of subjects
if len(X) != len(self.w_):
raise ValueError("The number of subjects does not match the one"
" in the model.")
X_shared = self.transform(X)
p = [None] * len(X_shared)
for subject in range(len(X_shared)):
sumexp, _, exponents = utils.sumexp_stable(
self.theta_.T.dot(X_shared[subject]) + self.bias_)
p[subject] = self.classes_[
(exponents / sumexp[np.newaxis, :]).argmax(axis=0)]
return p | [
"def",
"predict",
"(",
"self",
",",
"X",
")",
":",
"# Check if the model exist",
"if",
"hasattr",
"(",
"self",
",",
"'w_'",
")",
"is",
"False",
":",
"raise",
"NotFittedError",
"(",
"\"The model fit has not been run yet.\"",
")",
"# Check the number of subjects",
"if... | Classify the output for given data
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject
The number of voxels should be according to each subject at
the moment of training the model.
Returns
-------
p: list of arrays, element i has shape=[samples_i]
Predictions for each data sample. | [
"Classify",
"the",
"output",
"for",
"given",
"data"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/funcalign/sssrm.py#L264-L297 | train | 204,522 |
brainiak/brainiak | brainiak/funcalign/sssrm.py | SSSRM._sssrm | def _sssrm(self, data_align, data_sup, labels):
"""Block-Coordinate Descent algorithm for fitting SS-SRM.
Parameters
----------
data_align : list of 2D arrays, element i has shape=[voxels_i, n_align]
Each element in the list contains the fMRI data for alignment of
one subject. There are n_align samples for each subject.
data_sup : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject for
the classification task.
labels : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the data samples
in data_sup.
Returns
-------
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response.
"""
classes = self.classes_.size
# Initialization:
self.random_state_ = np.random.RandomState(self.rand_seed)
random_states = [
np.random.RandomState(self.random_state_.randint(2**32))
for i in range(len(data_align))]
# Set Wi's to a random orthogonal voxels by TRs
w, _ = srm._init_w_transforms(data_align, self.features, random_states)
# Initialize the shared response S
s = SSSRM._compute_shared_response(data_align, w)
# Initialize theta and bias
theta, bias = self._update_classifier(data_sup, labels, w, classes)
# calculate and print the objective function
if logger.isEnabledFor(logging.INFO):
objective = self._objective_function(data_align, data_sup, labels,
w, s, theta, bias)
logger.info('Objective function %f' % objective)
# Main loop:
for iteration in range(self.n_iter):
logger.info('Iteration %d' % (iteration + 1))
# Update the mappings Wi
w = self._update_w(data_align, data_sup, labels, w, s, theta, bias)
# Output the objective function
if logger.isEnabledFor(logging.INFO):
objective = self._objective_function(data_align, data_sup,
labels, w, s, theta, bias)
logger.info('Objective function after updating Wi %f'
% objective)
# Update the shared response S
s = SSSRM._compute_shared_response(data_align, w)
# Output the objective function
if logger.isEnabledFor(logging.INFO):
objective = self._objective_function(data_align, data_sup,
labels, w, s, theta, bias)
logger.info('Objective function after updating S %f'
% objective)
# Update the MLR classifier, theta and bias
theta, bias = self._update_classifier(data_sup, labels, w, classes)
# Output the objective function
if logger.isEnabledFor(logging.INFO):
objective = self._objective_function(data_align, data_sup,
labels, w, s, theta, bias)
logger.info('Objective function after updating MLR %f'
% objective)
return w, s, theta, bias | python | def _sssrm(self, data_align, data_sup, labels):
"""Block-Coordinate Descent algorithm for fitting SS-SRM.
Parameters
----------
data_align : list of 2D arrays, element i has shape=[voxels_i, n_align]
Each element in the list contains the fMRI data for alignment of
one subject. There are n_align samples for each subject.
data_sup : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject for
the classification task.
labels : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the data samples
in data_sup.
Returns
-------
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response.
"""
classes = self.classes_.size
# Initialization:
self.random_state_ = np.random.RandomState(self.rand_seed)
random_states = [
np.random.RandomState(self.random_state_.randint(2**32))
for i in range(len(data_align))]
# Set Wi's to a random orthogonal voxels by TRs
w, _ = srm._init_w_transforms(data_align, self.features, random_states)
# Initialize the shared response S
s = SSSRM._compute_shared_response(data_align, w)
# Initialize theta and bias
theta, bias = self._update_classifier(data_sup, labels, w, classes)
# calculate and print the objective function
if logger.isEnabledFor(logging.INFO):
objective = self._objective_function(data_align, data_sup, labels,
w, s, theta, bias)
logger.info('Objective function %f' % objective)
# Main loop:
for iteration in range(self.n_iter):
logger.info('Iteration %d' % (iteration + 1))
# Update the mappings Wi
w = self._update_w(data_align, data_sup, labels, w, s, theta, bias)
# Output the objective function
if logger.isEnabledFor(logging.INFO):
objective = self._objective_function(data_align, data_sup,
labels, w, s, theta, bias)
logger.info('Objective function after updating Wi %f'
% objective)
# Update the shared response S
s = SSSRM._compute_shared_response(data_align, w)
# Output the objective function
if logger.isEnabledFor(logging.INFO):
objective = self._objective_function(data_align, data_sup,
labels, w, s, theta, bias)
logger.info('Objective function after updating S %f'
% objective)
# Update the MLR classifier, theta and bias
theta, bias = self._update_classifier(data_sup, labels, w, classes)
# Output the objective function
if logger.isEnabledFor(logging.INFO):
objective = self._objective_function(data_align, data_sup,
labels, w, s, theta, bias)
logger.info('Objective function after updating MLR %f'
% objective)
return w, s, theta, bias | [
"def",
"_sssrm",
"(",
"self",
",",
"data_align",
",",
"data_sup",
",",
"labels",
")",
":",
"classes",
"=",
"self",
".",
"classes_",
".",
"size",
"# Initialization:",
"self",
".",
"random_state_",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
"self",
... | Block-Coordinate Descent algorithm for fitting SS-SRM.
Parameters
----------
data_align : list of 2D arrays, element i has shape=[voxels_i, n_align]
Each element in the list contains the fMRI data for alignment of
one subject. There are n_align samples for each subject.
data_sup : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject for
the classification task.
labels : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the data samples
in data_sup.
Returns
-------
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response. | [
"Block",
"-",
"Coordinate",
"Descent",
"algorithm",
"for",
"fitting",
"SS",
"-",
"SRM",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/funcalign/sssrm.py#L299-L383 | train | 204,523 |
brainiak/brainiak | brainiak/funcalign/sssrm.py | SSSRM._update_classifier | def _update_classifier(self, data, labels, w, classes):
"""Update the classifier parameters theta and bias
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject for
the classification task.
labels : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the data samples
in data_sup.
w : list of 2D array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
classes : int
The number of classes in the classifier.
Returns
-------
theta : array, shape=[features, classes]
The MLR parameter for the class planes.
bias : array shape=[classes,]
The MLR parameter for class biases.
"""
# Stack the data and labels for training the classifier
data_stacked, labels_stacked, weights = \
SSSRM._stack_list(data, labels, w)
features = w[0].shape[1]
total_samples = weights.size
data_th = S.shared(data_stacked.astype(theano.config.floatX))
val_ = S.shared(labels_stacked)
total_samples_S = S.shared(total_samples)
theta_th = T.matrix(name='theta', dtype=theano.config.floatX)
bias_th = T.col(name='bias', dtype=theano.config.floatX)
constf2 = S.shared(self.alpha / self.gamma, allow_downcast=True)
weights_th = S.shared(weights)
log_p_y_given_x = \
T.log(T.nnet.softmax((theta_th.T.dot(data_th.T)).T + bias_th.T))
f = -constf2 * T.sum((log_p_y_given_x[T.arange(total_samples_S), val_])
/ weights_th) + 0.5 * T.sum(theta_th ** 2)
manifold = Product((Euclidean(features, classes),
Euclidean(classes, 1)))
problem = Problem(manifold=manifold, cost=f, arg=[theta_th, bias_th],
verbosity=0)
solver = ConjugateGradient(mingradnorm=1e-6)
solution = solver.solve(problem)
theta = solution[0]
bias = solution[1]
del constf2
del theta_th
del bias_th
del data_th
del val_
del solver
del solution
return theta, bias | python | def _update_classifier(self, data, labels, w, classes):
"""Update the classifier parameters theta and bias
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject for
the classification task.
labels : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the data samples
in data_sup.
w : list of 2D array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
classes : int
The number of classes in the classifier.
Returns
-------
theta : array, shape=[features, classes]
The MLR parameter for the class planes.
bias : array shape=[classes,]
The MLR parameter for class biases.
"""
# Stack the data and labels for training the classifier
data_stacked, labels_stacked, weights = \
SSSRM._stack_list(data, labels, w)
features = w[0].shape[1]
total_samples = weights.size
data_th = S.shared(data_stacked.astype(theano.config.floatX))
val_ = S.shared(labels_stacked)
total_samples_S = S.shared(total_samples)
theta_th = T.matrix(name='theta', dtype=theano.config.floatX)
bias_th = T.col(name='bias', dtype=theano.config.floatX)
constf2 = S.shared(self.alpha / self.gamma, allow_downcast=True)
weights_th = S.shared(weights)
log_p_y_given_x = \
T.log(T.nnet.softmax((theta_th.T.dot(data_th.T)).T + bias_th.T))
f = -constf2 * T.sum((log_p_y_given_x[T.arange(total_samples_S), val_])
/ weights_th) + 0.5 * T.sum(theta_th ** 2)
manifold = Product((Euclidean(features, classes),
Euclidean(classes, 1)))
problem = Problem(manifold=manifold, cost=f, arg=[theta_th, bias_th],
verbosity=0)
solver = ConjugateGradient(mingradnorm=1e-6)
solution = solver.solve(problem)
theta = solution[0]
bias = solution[1]
del constf2
del theta_th
del bias_th
del data_th
del val_
del solver
del solution
return theta, bias | [
"def",
"_update_classifier",
"(",
"self",
",",
"data",
",",
"labels",
",",
"w",
",",
"classes",
")",
":",
"# Stack the data and labels for training the classifier",
"data_stacked",
",",
"labels_stacked",
",",
"weights",
"=",
"SSSRM",
".",
"_stack_list",
"(",
"data",... | Update the classifier parameters theta and bias
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject for
the classification task.
labels : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the data samples
in data_sup.
w : list of 2D array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
classes : int
The number of classes in the classifier.
Returns
-------
theta : array, shape=[features, classes]
The MLR parameter for the class planes.
bias : array shape=[classes,]
The MLR parameter for class biases. | [
"Update",
"the",
"classifier",
"parameters",
"theta",
"and",
"bias"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/funcalign/sssrm.py#L385-L453 | train | 204,524 |
brainiak/brainiak | brainiak/funcalign/sssrm.py | SSSRM._compute_shared_response | def _compute_shared_response(data, w):
""" Compute the shared response S
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
w : list of 2D arrays, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
Returns
-------
s : array, shape=[features, samples]
The shared response for the subjects data with the mappings in w.
"""
s = np.zeros((w[0].shape[1], data[0].shape[1]))
for m in range(len(w)):
s = s + w[m].T.dot(data[m])
s /= len(w)
return s | python | def _compute_shared_response(data, w):
""" Compute the shared response S
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
w : list of 2D arrays, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
Returns
-------
s : array, shape=[features, samples]
The shared response for the subjects data with the mappings in w.
"""
s = np.zeros((w[0].shape[1], data[0].shape[1]))
for m in range(len(w)):
s = s + w[m].T.dot(data[m])
s /= len(w)
return s | [
"def",
"_compute_shared_response",
"(",
"data",
",",
"w",
")",
":",
"s",
"=",
"np",
".",
"zeros",
"(",
"(",
"w",
"[",
"0",
"]",
".",
"shape",
"[",
"1",
"]",
",",
"data",
"[",
"0",
"]",
".",
"shape",
"[",
"1",
"]",
")",
")",
"for",
"m",
"in"... | Compute the shared response S
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
w : list of 2D arrays, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
Returns
-------
s : array, shape=[features, samples]
The shared response for the subjects data with the mappings in w. | [
"Compute",
"the",
"shared",
"response",
"S"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/funcalign/sssrm.py#L559-L582 | train | 204,525 |
brainiak/brainiak | brainiak/funcalign/sssrm.py | SSSRM._objective_function | def _objective_function(self, data_align, data_sup, labels, w, s, theta,
bias):
"""Compute the objective function of the Semi-Supervised SRM
See :eq:`sssrm-eq`.
Parameters
----------
data_align : list of 2D arrays, element i has shape=[voxels_i, n_align]
Each element in the list contains the fMRI data for alignment of
one subject. There are n_align samples for each subject.
data_sup : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject for
the classification task.
labels : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the data samples
in data_sup.
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response.
theta : array, shape=[classes, features]
The MLR class plane parameters.
bias : array, shape=[classes]
The MLR class biases.
Returns
-------
f_val : float
The SS-SRM objective function evaluated based on the parameters to
this function.
"""
subjects = len(data_align)
# Compute the SRM loss
f_val = 0.0
for subject in range(subjects):
samples = data_align[subject].shape[1]
f_val += (1 - self.alpha) * (0.5 / samples) \
* np.linalg.norm(data_align[subject] - w[subject].dot(s),
'fro')**2
# Compute the MLR loss
f_val += self._loss_lr(data_sup, labels, w, theta, bias)
return f_val | python | def _objective_function(self, data_align, data_sup, labels, w, s, theta,
bias):
"""Compute the objective function of the Semi-Supervised SRM
See :eq:`sssrm-eq`.
Parameters
----------
data_align : list of 2D arrays, element i has shape=[voxels_i, n_align]
Each element in the list contains the fMRI data for alignment of
one subject. There are n_align samples for each subject.
data_sup : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject for
the classification task.
labels : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the data samples
in data_sup.
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response.
theta : array, shape=[classes, features]
The MLR class plane parameters.
bias : array, shape=[classes]
The MLR class biases.
Returns
-------
f_val : float
The SS-SRM objective function evaluated based on the parameters to
this function.
"""
subjects = len(data_align)
# Compute the SRM loss
f_val = 0.0
for subject in range(subjects):
samples = data_align[subject].shape[1]
f_val += (1 - self.alpha) * (0.5 / samples) \
* np.linalg.norm(data_align[subject] - w[subject].dot(s),
'fro')**2
# Compute the MLR loss
f_val += self._loss_lr(data_sup, labels, w, theta, bias)
return f_val | [
"def",
"_objective_function",
"(",
"self",
",",
"data_align",
",",
"data_sup",
",",
"labels",
",",
"w",
",",
"s",
",",
"theta",
",",
"bias",
")",
":",
"subjects",
"=",
"len",
"(",
"data_align",
")",
"# Compute the SRM loss",
"f_val",
"=",
"0.0",
"for",
... | Compute the objective function of the Semi-Supervised SRM
See :eq:`sssrm-eq`.
Parameters
----------
data_align : list of 2D arrays, element i has shape=[voxels_i, n_align]
Each element in the list contains the fMRI data for alignment of
one subject. There are n_align samples for each subject.
data_sup : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject for
the classification task.
labels : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the data samples
in data_sup.
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
s : array, shape=[features, samples]
The shared response.
theta : array, shape=[classes, features]
The MLR class plane parameters.
bias : array, shape=[classes]
The MLR class biases.
Returns
-------
f_val : float
The SS-SRM objective function evaluated based on the parameters to
this function. | [
"Compute",
"the",
"objective",
"function",
"of",
"the",
"Semi",
"-",
"Supervised",
"SRM"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/funcalign/sssrm.py#L584-L638 | train | 204,526 |
brainiak/brainiak | brainiak/funcalign/sssrm.py | SSSRM._objective_function_subject | def _objective_function_subject(self, data_align, data_sup, labels, w, s,
theta, bias):
"""Compute the objective function for one subject.
.. math:: (1-C)*Loss_{SRM}_i(W_i,S;X_i)
.. math:: + C/\\gamma * Loss_{MLR_i}(\\theta, bias; {(W_i^T*Z_i, y_i})
.. math:: + R(\\theta)
Parameters
----------
data_align : 2D array, shape=[voxels_i, samples_align]
Contains the fMRI data for alignment of subject i.
data_sup : 2D array, shape=[voxels_i, samples_i]
Contains the fMRI data of one subject for the classification task.
labels : array of int, shape=[samples_i]
The labels for the data samples in data_sup.
w : array, shape=[voxels_i, features]
The orthogonal transform (mapping) :math:`W_i` for subject i.
s : array, shape=[features, samples]
The shared response.
theta : array, shape=[classes, features]
The MLR class plane parameters.
bias : array, shape=[classes]
The MLR class biases.
Returns
-------
f_val : float
The SS-SRM objective function for subject i evaluated on the
parameters to this function.
"""
# Compute the SRM loss
f_val = 0.0
samples = data_align.shape[1]
f_val += (1 - self.alpha) * (0.5 / samples) \
* np.linalg.norm(data_align - w.dot(s), 'fro')**2
# Compute the MLR loss
f_val += self._loss_lr_subject(data_sup, labels, w, theta, bias)
return f_val | python | def _objective_function_subject(self, data_align, data_sup, labels, w, s,
theta, bias):
"""Compute the objective function for one subject.
.. math:: (1-C)*Loss_{SRM}_i(W_i,S;X_i)
.. math:: + C/\\gamma * Loss_{MLR_i}(\\theta, bias; {(W_i^T*Z_i, y_i})
.. math:: + R(\\theta)
Parameters
----------
data_align : 2D array, shape=[voxels_i, samples_align]
Contains the fMRI data for alignment of subject i.
data_sup : 2D array, shape=[voxels_i, samples_i]
Contains the fMRI data of one subject for the classification task.
labels : array of int, shape=[samples_i]
The labels for the data samples in data_sup.
w : array, shape=[voxels_i, features]
The orthogonal transform (mapping) :math:`W_i` for subject i.
s : array, shape=[features, samples]
The shared response.
theta : array, shape=[classes, features]
The MLR class plane parameters.
bias : array, shape=[classes]
The MLR class biases.
Returns
-------
f_val : float
The SS-SRM objective function for subject i evaluated on the
parameters to this function.
"""
# Compute the SRM loss
f_val = 0.0
samples = data_align.shape[1]
f_val += (1 - self.alpha) * (0.5 / samples) \
* np.linalg.norm(data_align - w.dot(s), 'fro')**2
# Compute the MLR loss
f_val += self._loss_lr_subject(data_sup, labels, w, theta, bias)
return f_val | [
"def",
"_objective_function_subject",
"(",
"self",
",",
"data_align",
",",
"data_sup",
",",
"labels",
",",
"w",
",",
"s",
",",
"theta",
",",
"bias",
")",
":",
"# Compute the SRM loss",
"f_val",
"=",
"0.0",
"samples",
"=",
"data_align",
".",
"shape",
"[",
... | Compute the objective function for one subject.
.. math:: (1-C)*Loss_{SRM}_i(W_i,S;X_i)
.. math:: + C/\\gamma * Loss_{MLR_i}(\\theta, bias; {(W_i^T*Z_i, y_i})
.. math:: + R(\\theta)
Parameters
----------
data_align : 2D array, shape=[voxels_i, samples_align]
Contains the fMRI data for alignment of subject i.
data_sup : 2D array, shape=[voxels_i, samples_i]
Contains the fMRI data of one subject for the classification task.
labels : array of int, shape=[samples_i]
The labels for the data samples in data_sup.
w : array, shape=[voxels_i, features]
The orthogonal transform (mapping) :math:`W_i` for subject i.
s : array, shape=[features, samples]
The shared response.
theta : array, shape=[classes, features]
The MLR class plane parameters.
bias : array, shape=[classes]
The MLR class biases.
Returns
-------
f_val : float
The SS-SRM objective function for subject i evaluated on the
parameters to this function. | [
"Compute",
"the",
"objective",
"function",
"for",
"one",
"subject",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/funcalign/sssrm.py#L640-L689 | train | 204,527 |
brainiak/brainiak | brainiak/funcalign/sssrm.py | SSSRM._stack_list | def _stack_list(data, data_labels, w):
"""Construct a numpy array by stacking arrays in a list
Parameter
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject for
the classification task.
data_labels : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the samples in
data.
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
Returns
-------
data_stacked : 2D array, shape=[samples, features]
The data samples from all subjects are stacked into a single
2D array, where "samples" is the sum of samples_i.
labels_stacked : array, shape=[samples,]
The labels from all subjects are stacked into a single
array, where "samples" is the sum of samples_i.
weights : array, shape=[samples,]
The number of samples of the subject that are related to that
sample. They become a weight per sample in the MLR loss.
"""
labels_stacked = utils.concatenate_not_none(data_labels)
weights = np.empty((labels_stacked.size,))
data_shared = [None] * len(data)
curr_samples = 0
for s in range(len(data)):
if data[s] is not None:
subject_samples = data[s].shape[1]
curr_samples_end = curr_samples + subject_samples
weights[curr_samples:curr_samples_end] = subject_samples
data_shared[s] = w[s].T.dot(data[s])
curr_samples += data[s].shape[1]
data_stacked = utils.concatenate_not_none(data_shared, axis=1).T
return data_stacked, labels_stacked, weights | python | def _stack_list(data, data_labels, w):
"""Construct a numpy array by stacking arrays in a list
Parameter
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject for
the classification task.
data_labels : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the samples in
data.
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
Returns
-------
data_stacked : 2D array, shape=[samples, features]
The data samples from all subjects are stacked into a single
2D array, where "samples" is the sum of samples_i.
labels_stacked : array, shape=[samples,]
The labels from all subjects are stacked into a single
array, where "samples" is the sum of samples_i.
weights : array, shape=[samples,]
The number of samples of the subject that are related to that
sample. They become a weight per sample in the MLR loss.
"""
labels_stacked = utils.concatenate_not_none(data_labels)
weights = np.empty((labels_stacked.size,))
data_shared = [None] * len(data)
curr_samples = 0
for s in range(len(data)):
if data[s] is not None:
subject_samples = data[s].shape[1]
curr_samples_end = curr_samples + subject_samples
weights[curr_samples:curr_samples_end] = subject_samples
data_shared[s] = w[s].T.dot(data[s])
curr_samples += data[s].shape[1]
data_stacked = utils.concatenate_not_none(data_shared, axis=1).T
return data_stacked, labels_stacked, weights | [
"def",
"_stack_list",
"(",
"data",
",",
"data_labels",
",",
"w",
")",
":",
"labels_stacked",
"=",
"utils",
".",
"concatenate_not_none",
"(",
"data_labels",
")",
"weights",
"=",
"np",
".",
"empty",
"(",
"(",
"labels_stacked",
".",
"size",
",",
")",
")",
"... | Construct a numpy array by stacking arrays in a list
Parameter
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject for
the classification task.
data_labels : list of arrays of int, element i has shape=[samples_i]
Each element in the list contains the labels for the samples in
data.
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
Returns
-------
data_stacked : 2D array, shape=[samples, features]
The data samples from all subjects are stacked into a single
2D array, where "samples" is the sum of samples_i.
labels_stacked : array, shape=[samples,]
The labels from all subjects are stacked into a single
array, where "samples" is the sum of samples_i.
weights : array, shape=[samples,]
The number of samples of the subject that are related to that
sample. They become a weight per sample in the MLR loss. | [
"Construct",
"a",
"numpy",
"array",
"by",
"stacking",
"arrays",
"in",
"a",
"list"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/funcalign/sssrm.py#L773-L820 | train | 204,528 |
brainiak/brainiak | brainiak/searchlight/searchlight.py | _singlenode_searchlight | def _singlenode_searchlight(l, msk, mysl_rad, bcast_var, extra_params):
"""Run searchlight function on block data in parallel.
`extra_params` contains:
- Searchlight function.
- `Shape` mask.
- Minimum active voxels proportion required to run the searchlight
function.
"""
voxel_fn = extra_params[0]
shape_mask = extra_params[1]
min_active_voxels_proportion = extra_params[2]
outmat = np.empty(msk.shape, dtype=np.object)[mysl_rad:-mysl_rad,
mysl_rad:-mysl_rad,
mysl_rad:-mysl_rad]
for i in range(0, outmat.shape[0]):
for j in range(0, outmat.shape[1]):
for k in range(0, outmat.shape[2]):
if msk[i+mysl_rad, j+mysl_rad, k+mysl_rad]:
searchlight_slice = np.s_[
i:i+2*mysl_rad+1,
j:j+2*mysl_rad+1,
k:k+2*mysl_rad+1]
voxel_fn_mask = msk[searchlight_slice] * shape_mask
if (min_active_voxels_proportion == 0
or np.count_nonzero(voxel_fn_mask) / voxel_fn_mask.size
> min_active_voxels_proportion):
outmat[i, j, k] = voxel_fn(
[ll[searchlight_slice] for ll in l],
msk[searchlight_slice] * shape_mask,
mysl_rad,
bcast_var)
return outmat | python | def _singlenode_searchlight(l, msk, mysl_rad, bcast_var, extra_params):
"""Run searchlight function on block data in parallel.
`extra_params` contains:
- Searchlight function.
- `Shape` mask.
- Minimum active voxels proportion required to run the searchlight
function.
"""
voxel_fn = extra_params[0]
shape_mask = extra_params[1]
min_active_voxels_proportion = extra_params[2]
outmat = np.empty(msk.shape, dtype=np.object)[mysl_rad:-mysl_rad,
mysl_rad:-mysl_rad,
mysl_rad:-mysl_rad]
for i in range(0, outmat.shape[0]):
for j in range(0, outmat.shape[1]):
for k in range(0, outmat.shape[2]):
if msk[i+mysl_rad, j+mysl_rad, k+mysl_rad]:
searchlight_slice = np.s_[
i:i+2*mysl_rad+1,
j:j+2*mysl_rad+1,
k:k+2*mysl_rad+1]
voxel_fn_mask = msk[searchlight_slice] * shape_mask
if (min_active_voxels_proportion == 0
or np.count_nonzero(voxel_fn_mask) / voxel_fn_mask.size
> min_active_voxels_proportion):
outmat[i, j, k] = voxel_fn(
[ll[searchlight_slice] for ll in l],
msk[searchlight_slice] * shape_mask,
mysl_rad,
bcast_var)
return outmat | [
"def",
"_singlenode_searchlight",
"(",
"l",
",",
"msk",
",",
"mysl_rad",
",",
"bcast_var",
",",
"extra_params",
")",
":",
"voxel_fn",
"=",
"extra_params",
"[",
"0",
"]",
"shape_mask",
"=",
"extra_params",
"[",
"1",
"]",
"min_active_voxels_proportion",
"=",
"ex... | Run searchlight function on block data in parallel.
`extra_params` contains:
- Searchlight function.
- `Shape` mask.
- Minimum active voxels proportion required to run the searchlight
function. | [
"Run",
"searchlight",
"function",
"on",
"block",
"data",
"in",
"parallel",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/searchlight/searchlight.py#L523-L557 | train | 204,529 |
brainiak/brainiak | brainiak/searchlight/searchlight.py | Searchlight._get_ownership | def _get_ownership(self, data):
"""Determine on which rank each subject currently resides
Parameters
----------
data: list of 4D arrays with subject data
Returns
-------
list of ranks indicating the owner of each subject
"""
rank = self.comm.rank
B = [(rank, idx) for (idx, c) in enumerate(data) if c is not None]
C = self.comm.allreduce(B)
ownership = [None] * len(data)
for c in C:
ownership[c[1]] = c[0]
return ownership | python | def _get_ownership(self, data):
"""Determine on which rank each subject currently resides
Parameters
----------
data: list of 4D arrays with subject data
Returns
-------
list of ranks indicating the owner of each subject
"""
rank = self.comm.rank
B = [(rank, idx) for (idx, c) in enumerate(data) if c is not None]
C = self.comm.allreduce(B)
ownership = [None] * len(data)
for c in C:
ownership[c[1]] = c[0]
return ownership | [
"def",
"_get_ownership",
"(",
"self",
",",
"data",
")",
":",
"rank",
"=",
"self",
".",
"comm",
".",
"rank",
"B",
"=",
"[",
"(",
"rank",
",",
"idx",
")",
"for",
"(",
"idx",
",",
"c",
")",
"in",
"enumerate",
"(",
"data",
")",
"if",
"c",
"is",
"... | Determine on which rank each subject currently resides
Parameters
----------
data: list of 4D arrays with subject data
Returns
-------
list of ranks indicating the owner of each subject | [
"Determine",
"on",
"which",
"rank",
"each",
"subject",
"currently",
"resides"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/searchlight/searchlight.py#L165-L185 | train | 204,530 |
brainiak/brainiak | brainiak/searchlight/searchlight.py | Searchlight._get_blocks | def _get_blocks(self, mask):
"""Divide the volume into a set of blocks
Ignore blocks that have no active voxels in the mask
Parameters
----------
mask: a boolean 3D array which is true at every active voxel
Returns
-------
list of tuples containing block information:
- a triple containing top left point of the block and
- a triple containing the size in voxels of the block
"""
blocks = []
outerblk = self.max_blk_edge + 2*self.sl_rad
for i in range(0, mask.shape[0], self.max_blk_edge):
for j in range(0, mask.shape[1], self.max_blk_edge):
for k in range(0, mask.shape[2], self.max_blk_edge):
block_shape = mask[i:i+outerblk,
j:j+outerblk,
k:k+outerblk
].shape
if np.any(
mask[i+self.sl_rad:i+block_shape[0]-self.sl_rad,
j+self.sl_rad:j+block_shape[1]-self.sl_rad,
k+self.sl_rad:k+block_shape[2]-self.sl_rad]):
blocks.append(((i, j, k), block_shape))
return blocks | python | def _get_blocks(self, mask):
"""Divide the volume into a set of blocks
Ignore blocks that have no active voxels in the mask
Parameters
----------
mask: a boolean 3D array which is true at every active voxel
Returns
-------
list of tuples containing block information:
- a triple containing top left point of the block and
- a triple containing the size in voxels of the block
"""
blocks = []
outerblk = self.max_blk_edge + 2*self.sl_rad
for i in range(0, mask.shape[0], self.max_blk_edge):
for j in range(0, mask.shape[1], self.max_blk_edge):
for k in range(0, mask.shape[2], self.max_blk_edge):
block_shape = mask[i:i+outerblk,
j:j+outerblk,
k:k+outerblk
].shape
if np.any(
mask[i+self.sl_rad:i+block_shape[0]-self.sl_rad,
j+self.sl_rad:j+block_shape[1]-self.sl_rad,
k+self.sl_rad:k+block_shape[2]-self.sl_rad]):
blocks.append(((i, j, k), block_shape))
return blocks | [
"def",
"_get_blocks",
"(",
"self",
",",
"mask",
")",
":",
"blocks",
"=",
"[",
"]",
"outerblk",
"=",
"self",
".",
"max_blk_edge",
"+",
"2",
"*",
"self",
".",
"sl_rad",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"mask",
".",
"shape",
"[",
"0",
"]",
... | Divide the volume into a set of blocks
Ignore blocks that have no active voxels in the mask
Parameters
----------
mask: a boolean 3D array which is true at every active voxel
Returns
-------
list of tuples containing block information:
- a triple containing top left point of the block and
- a triple containing the size in voxels of the block | [
"Divide",
"the",
"volume",
"into",
"a",
"set",
"of",
"blocks"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/searchlight/searchlight.py#L187-L219 | train | 204,531 |
brainiak/brainiak | brainiak/searchlight/searchlight.py | Searchlight._get_block_data | def _get_block_data(self, mat, block):
"""Retrieve a block from a 3D or 4D volume
Parameters
----------
mat: a 3D or 4D volume
block: a tuple containing block information:
- a triple containing the lowest-coordinate voxel in the block
- a triple containing the size in voxels of the block
Returns
-------
In the case of a 3D array, a 3D subarray at the block location
In the case of a 4D array, a 4D subarray at the block location,
including the entire fourth dimension.
"""
(pt, sz) = block
if len(mat.shape) == 3:
return mat[pt[0]:pt[0]+sz[0],
pt[1]:pt[1]+sz[1],
pt[2]:pt[2]+sz[2]].copy()
elif len(mat.shape) == 4:
return mat[pt[0]:pt[0]+sz[0],
pt[1]:pt[1]+sz[1],
pt[2]:pt[2]+sz[2],
:].copy() | python | def _get_block_data(self, mat, block):
"""Retrieve a block from a 3D or 4D volume
Parameters
----------
mat: a 3D or 4D volume
block: a tuple containing block information:
- a triple containing the lowest-coordinate voxel in the block
- a triple containing the size in voxels of the block
Returns
-------
In the case of a 3D array, a 3D subarray at the block location
In the case of a 4D array, a 4D subarray at the block location,
including the entire fourth dimension.
"""
(pt, sz) = block
if len(mat.shape) == 3:
return mat[pt[0]:pt[0]+sz[0],
pt[1]:pt[1]+sz[1],
pt[2]:pt[2]+sz[2]].copy()
elif len(mat.shape) == 4:
return mat[pt[0]:pt[0]+sz[0],
pt[1]:pt[1]+sz[1],
pt[2]:pt[2]+sz[2],
:].copy() | [
"def",
"_get_block_data",
"(",
"self",
",",
"mat",
",",
"block",
")",
":",
"(",
"pt",
",",
"sz",
")",
"=",
"block",
"if",
"len",
"(",
"mat",
".",
"shape",
")",
"==",
"3",
":",
"return",
"mat",
"[",
"pt",
"[",
"0",
"]",
":",
"pt",
"[",
"0",
... | Retrieve a block from a 3D or 4D volume
Parameters
----------
mat: a 3D or 4D volume
block: a tuple containing block information:
- a triple containing the lowest-coordinate voxel in the block
- a triple containing the size in voxels of the block
Returns
-------
In the case of a 3D array, a 3D subarray at the block location
In the case of a 4D array, a 4D subarray at the block location,
including the entire fourth dimension. | [
"Retrieve",
"a",
"block",
"from",
"a",
"3D",
"or",
"4D",
"volume"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/searchlight/searchlight.py#L221-L250 | train | 204,532 |
brainiak/brainiak | brainiak/searchlight/searchlight.py | Searchlight._split_volume | def _split_volume(self, mat, blocks):
"""Convert a volume into a list of block data
Parameters
----------
mat: A 3D or 4D array to be split
blocks: a list of tuples containing block information:
- a triple containing the top left point of the block and
- a triple containing the size in voxels of the block
Returns
-------
A list of the subarrays corresponding to each block
"""
return [self._get_block_data(mat, block) for block in blocks] | python | def _split_volume(self, mat, blocks):
"""Convert a volume into a list of block data
Parameters
----------
mat: A 3D or 4D array to be split
blocks: a list of tuples containing block information:
- a triple containing the top left point of the block and
- a triple containing the size in voxels of the block
Returns
-------
A list of the subarrays corresponding to each block
"""
return [self._get_block_data(mat, block) for block in blocks] | [
"def",
"_split_volume",
"(",
"self",
",",
"mat",
",",
"blocks",
")",
":",
"return",
"[",
"self",
".",
"_get_block_data",
"(",
"mat",
",",
"block",
")",
"for",
"block",
"in",
"blocks",
"]"
] | Convert a volume into a list of block data
Parameters
----------
mat: A 3D or 4D array to be split
blocks: a list of tuples containing block information:
- a triple containing the top left point of the block and
- a triple containing the size in voxels of the block
Returns
-------
A list of the subarrays corresponding to each block | [
"Convert",
"a",
"volume",
"into",
"a",
"list",
"of",
"block",
"data"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/searchlight/searchlight.py#L252-L271 | train | 204,533 |
brainiak/brainiak | brainiak/searchlight/searchlight.py | Searchlight._scatter_list | def _scatter_list(self, data, owner):
"""Distribute a list from one rank to other ranks in a cyclic manner
Parameters
----------
data: list of pickle-able data
owner: rank that owns the data
Returns
-------
A list containing the data in a cyclic layout across ranks
"""
rank = self.comm.rank
size = self.comm.size
subject_submatrices = []
nblocks = self.comm.bcast(len(data)
if rank == owner else None, root=owner)
# For each submatrix
for idx in range(0, nblocks, size):
padded = None
extra = max(0, idx+size - nblocks)
# Pad with "None" so scatter can go to all processes
if data is not None:
padded = data[idx:idx+size]
if extra > 0:
padded = padded + [None]*extra
# Scatter submatrices to all processes
mytrans = self.comm.scatter(padded, root=owner)
# Contribute submatrix to subject list
if mytrans is not None:
subject_submatrices += [mytrans]
return subject_submatrices | python | def _scatter_list(self, data, owner):
"""Distribute a list from one rank to other ranks in a cyclic manner
Parameters
----------
data: list of pickle-able data
owner: rank that owns the data
Returns
-------
A list containing the data in a cyclic layout across ranks
"""
rank = self.comm.rank
size = self.comm.size
subject_submatrices = []
nblocks = self.comm.bcast(len(data)
if rank == owner else None, root=owner)
# For each submatrix
for idx in range(0, nblocks, size):
padded = None
extra = max(0, idx+size - nblocks)
# Pad with "None" so scatter can go to all processes
if data is not None:
padded = data[idx:idx+size]
if extra > 0:
padded = padded + [None]*extra
# Scatter submatrices to all processes
mytrans = self.comm.scatter(padded, root=owner)
# Contribute submatrix to subject list
if mytrans is not None:
subject_submatrices += [mytrans]
return subject_submatrices | [
"def",
"_scatter_list",
"(",
"self",
",",
"data",
",",
"owner",
")",
":",
"rank",
"=",
"self",
".",
"comm",
".",
"rank",
"size",
"=",
"self",
".",
"comm",
".",
"size",
"subject_submatrices",
"=",
"[",
"]",
"nblocks",
"=",
"self",
".",
"comm",
".",
... | Distribute a list from one rank to other ranks in a cyclic manner
Parameters
----------
data: list of pickle-able data
owner: rank that owns the data
Returns
-------
A list containing the data in a cyclic layout across ranks | [
"Distribute",
"a",
"list",
"from",
"one",
"rank",
"to",
"other",
"ranks",
"in",
"a",
"cyclic",
"manner"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/searchlight/searchlight.py#L273-L314 | train | 204,534 |
brainiak/brainiak | brainiak/searchlight/searchlight.py | Searchlight.distribute | def distribute(self, subjects, mask):
"""Distribute data to MPI ranks
Parameters
----------
subjects : list of 4D arrays containing data for one or more subjects.
Each entry of the list must be present on at most one rank,
and the other ranks contain a "None" at this list location.
For example, for 3 ranks you may lay out the data in the
following manner:
Rank 0: [Subj0, None, None]
Rank 1: [None, Subj1, None]
Rank 2: [None, None, Subj2]
Or alternatively, you may lay out the data in this manner:
Rank 0: [Subj0, Subj1, Subj2]
Rank 1: [None, None, None]
Rank 2: [None, None, None]
mask: 3D array with "True" entries at active vertices
"""
if mask.ndim != 3:
raise ValueError('mask should be a 3D array')
for (idx, subj) in enumerate(subjects):
if subj is not None:
if subj.ndim != 4:
raise ValueError('subjects[{}] must be 4D'.format(idx))
self.mask = mask
rank = self.comm.rank
# Get/set ownership
ownership = self._get_ownership(subjects)
all_blocks = self._get_blocks(mask) if rank == 0 else None
all_blocks = self.comm.bcast(all_blocks)
# Divide data and mask
splitsubj = [self._split_volume(s, all_blocks)
if s is not None else None
for s in subjects]
submasks = self._split_volume(mask, all_blocks)
# Scatter points, data, and mask
self.blocks = self._scatter_list(all_blocks, 0)
self.submasks = self._scatter_list(submasks, 0)
self.subproblems = [self._scatter_list(s, ownership[s_idx])
for (s_idx, s) in enumerate(splitsubj)] | python | def distribute(self, subjects, mask):
"""Distribute data to MPI ranks
Parameters
----------
subjects : list of 4D arrays containing data for one or more subjects.
Each entry of the list must be present on at most one rank,
and the other ranks contain a "None" at this list location.
For example, for 3 ranks you may lay out the data in the
following manner:
Rank 0: [Subj0, None, None]
Rank 1: [None, Subj1, None]
Rank 2: [None, None, Subj2]
Or alternatively, you may lay out the data in this manner:
Rank 0: [Subj0, Subj1, Subj2]
Rank 1: [None, None, None]
Rank 2: [None, None, None]
mask: 3D array with "True" entries at active vertices
"""
if mask.ndim != 3:
raise ValueError('mask should be a 3D array')
for (idx, subj) in enumerate(subjects):
if subj is not None:
if subj.ndim != 4:
raise ValueError('subjects[{}] must be 4D'.format(idx))
self.mask = mask
rank = self.comm.rank
# Get/set ownership
ownership = self._get_ownership(subjects)
all_blocks = self._get_blocks(mask) if rank == 0 else None
all_blocks = self.comm.bcast(all_blocks)
# Divide data and mask
splitsubj = [self._split_volume(s, all_blocks)
if s is not None else None
for s in subjects]
submasks = self._split_volume(mask, all_blocks)
# Scatter points, data, and mask
self.blocks = self._scatter_list(all_blocks, 0)
self.submasks = self._scatter_list(submasks, 0)
self.subproblems = [self._scatter_list(s, ownership[s_idx])
for (s_idx, s) in enumerate(splitsubj)] | [
"def",
"distribute",
"(",
"self",
",",
"subjects",
",",
"mask",
")",
":",
"if",
"mask",
".",
"ndim",
"!=",
"3",
":",
"raise",
"ValueError",
"(",
"'mask should be a 3D array'",
")",
"for",
"(",
"idx",
",",
"subj",
")",
"in",
"enumerate",
"(",
"subjects",
... | Distribute data to MPI ranks
Parameters
----------
subjects : list of 4D arrays containing data for one or more subjects.
Each entry of the list must be present on at most one rank,
and the other ranks contain a "None" at this list location.
For example, for 3 ranks you may lay out the data in the
following manner:
Rank 0: [Subj0, None, None]
Rank 1: [None, Subj1, None]
Rank 2: [None, None, Subj2]
Or alternatively, you may lay out the data in this manner:
Rank 0: [Subj0, Subj1, Subj2]
Rank 1: [None, None, None]
Rank 2: [None, None, None]
mask: 3D array with "True" entries at active vertices | [
"Distribute",
"data",
"to",
"MPI",
"ranks"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/searchlight/searchlight.py#L316-L368 | train | 204,535 |
brainiak/brainiak | brainiak/searchlight/searchlight.py | Searchlight.run_block_function | def run_block_function(self, block_fn, extra_block_fn_params=None,
pool_size=None):
"""Perform a function for each block in a volume.
Parameters
----------
block_fn: function to apply to each block:
Parameters
data: list of 4D arrays containing subset of subject data,
which is padded with sl_rad voxels.
mask: 3D array containing subset of mask data
sl_rad: radius, in voxels, of the sphere inscribed in the
cube
bcast_var: shared data which is broadcast to all processes
extra_params: extra parameters
Returns
3D array which is the same size as the mask
input with padding removed
extra_block_fn_params: tuple
Extra parameters to pass to the block function
pool_size: int
Maximum number of processes running the block function in parallel.
If None, number of available hardware threads, considering cpusets
restrictions.
"""
rank = self.comm.rank
results = []
usable_cpus = usable_cpu_count()
if pool_size is None:
processes = usable_cpus
else:
processes = min(pool_size, usable_cpus)
if processes > 1:
with Pool(processes) as pool:
for idx, block in enumerate(self.blocks):
result = pool.apply_async(
block_fn,
([subproblem[idx] for subproblem in self.subproblems],
self.submasks[idx],
self.sl_rad,
self.bcast_var,
extra_block_fn_params))
results.append((block[0], result))
local_outputs = [(result[0], result[1].get())
for result in results]
else:
# If we only are using one CPU core, no need to create a Pool,
# cause an underlying fork(), and send the data to that process.
# Just do it here in serial. This will save copying the memory
# and will stop a fork() which can cause problems in some MPI
# implementations.
for idx, block in enumerate(self.blocks):
subprob_list = [subproblem[idx]
for subproblem in self.subproblems]
result = block_fn(
subprob_list,
self.submasks[idx],
self.sl_rad,
self.bcast_var,
extra_block_fn_params)
results.append((block[0], result))
local_outputs = [(result[0], result[1]) for result in results]
# Collect results
global_outputs = self.comm.gather(local_outputs)
# Coalesce results
outmat = np.empty(self.mask.shape, dtype=np.object)
if rank == 0:
for go_rank in global_outputs:
for (pt, mat) in go_rank:
coords = np.s_[
pt[0]+self.sl_rad:pt[0]+self.sl_rad+mat.shape[0],
pt[1]+self.sl_rad:pt[1]+self.sl_rad+mat.shape[1],
pt[2]+self.sl_rad:pt[2]+self.sl_rad+mat.shape[2]
]
outmat[coords] = mat
return outmat | python | def run_block_function(self, block_fn, extra_block_fn_params=None,
pool_size=None):
"""Perform a function for each block in a volume.
Parameters
----------
block_fn: function to apply to each block:
Parameters
data: list of 4D arrays containing subset of subject data,
which is padded with sl_rad voxels.
mask: 3D array containing subset of mask data
sl_rad: radius, in voxels, of the sphere inscribed in the
cube
bcast_var: shared data which is broadcast to all processes
extra_params: extra parameters
Returns
3D array which is the same size as the mask
input with padding removed
extra_block_fn_params: tuple
Extra parameters to pass to the block function
pool_size: int
Maximum number of processes running the block function in parallel.
If None, number of available hardware threads, considering cpusets
restrictions.
"""
rank = self.comm.rank
results = []
usable_cpus = usable_cpu_count()
if pool_size is None:
processes = usable_cpus
else:
processes = min(pool_size, usable_cpus)
if processes > 1:
with Pool(processes) as pool:
for idx, block in enumerate(self.blocks):
result = pool.apply_async(
block_fn,
([subproblem[idx] for subproblem in self.subproblems],
self.submasks[idx],
self.sl_rad,
self.bcast_var,
extra_block_fn_params))
results.append((block[0], result))
local_outputs = [(result[0], result[1].get())
for result in results]
else:
# If we only are using one CPU core, no need to create a Pool,
# cause an underlying fork(), and send the data to that process.
# Just do it here in serial. This will save copying the memory
# and will stop a fork() which can cause problems in some MPI
# implementations.
for idx, block in enumerate(self.blocks):
subprob_list = [subproblem[idx]
for subproblem in self.subproblems]
result = block_fn(
subprob_list,
self.submasks[idx],
self.sl_rad,
self.bcast_var,
extra_block_fn_params)
results.append((block[0], result))
local_outputs = [(result[0], result[1]) for result in results]
# Collect results
global_outputs = self.comm.gather(local_outputs)
# Coalesce results
outmat = np.empty(self.mask.shape, dtype=np.object)
if rank == 0:
for go_rank in global_outputs:
for (pt, mat) in go_rank:
coords = np.s_[
pt[0]+self.sl_rad:pt[0]+self.sl_rad+mat.shape[0],
pt[1]+self.sl_rad:pt[1]+self.sl_rad+mat.shape[1],
pt[2]+self.sl_rad:pt[2]+self.sl_rad+mat.shape[2]
]
outmat[coords] = mat
return outmat | [
"def",
"run_block_function",
"(",
"self",
",",
"block_fn",
",",
"extra_block_fn_params",
"=",
"None",
",",
"pool_size",
"=",
"None",
")",
":",
"rank",
"=",
"self",
".",
"comm",
".",
"rank",
"results",
"=",
"[",
"]",
"usable_cpus",
"=",
"usable_cpu_count",
... | Perform a function for each block in a volume.
Parameters
----------
block_fn: function to apply to each block:
Parameters
data: list of 4D arrays containing subset of subject data,
which is padded with sl_rad voxels.
mask: 3D array containing subset of mask data
sl_rad: radius, in voxels, of the sphere inscribed in the
cube
bcast_var: shared data which is broadcast to all processes
extra_params: extra parameters
Returns
3D array which is the same size as the mask
input with padding removed
extra_block_fn_params: tuple
Extra parameters to pass to the block function
pool_size: int
Maximum number of processes running the block function in parallel.
If None, number of available hardware threads, considering cpusets
restrictions. | [
"Perform",
"a",
"function",
"for",
"each",
"block",
"in",
"a",
"volume",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/searchlight/searchlight.py#L382-L473 | train | 204,536 |
brainiak/brainiak | brainiak/searchlight/searchlight.py | Searchlight.run_searchlight | def run_searchlight(self, voxel_fn, pool_size=None):
"""Perform a function at each voxel which is set to True in the
user-provided mask. The mask passed to the searchlight function will be
further masked by the user-provided searchlight shape.
Parameters
----------
voxel_fn: function to apply at each voxel
Must be `serializeable using pickle
<https://docs.python.org/3/library/pickle.html#what-can-be-pickled-and-unpickled>`_.
Parameters
subj: list of 4D arrays containing subset of subject data
mask: 3D array containing subset of mask data
sl_rad: radius, in voxels, of the sphere inscribed in the
cube
bcast_var: shared data which is broadcast to all processes
Returns
Value of any pickle-able type
Returns
-------
A volume which is the same size as the mask, however a number of voxels
equal to the searchlight radius has been removed from each border of
the volume. This volume contains the values returned from the
searchlight function at each voxel which was set to True in the mask,
and None elsewhere.
"""
extra_block_fn_params = (voxel_fn, self.shape,
self.min_active_voxels_proportion)
block_fn_result = self.run_block_function(_singlenode_searchlight,
extra_block_fn_params,
pool_size)
return block_fn_result | python | def run_searchlight(self, voxel_fn, pool_size=None):
"""Perform a function at each voxel which is set to True in the
user-provided mask. The mask passed to the searchlight function will be
further masked by the user-provided searchlight shape.
Parameters
----------
voxel_fn: function to apply at each voxel
Must be `serializeable using pickle
<https://docs.python.org/3/library/pickle.html#what-can-be-pickled-and-unpickled>`_.
Parameters
subj: list of 4D arrays containing subset of subject data
mask: 3D array containing subset of mask data
sl_rad: radius, in voxels, of the sphere inscribed in the
cube
bcast_var: shared data which is broadcast to all processes
Returns
Value of any pickle-able type
Returns
-------
A volume which is the same size as the mask, however a number of voxels
equal to the searchlight radius has been removed from each border of
the volume. This volume contains the values returned from the
searchlight function at each voxel which was set to True in the mask,
and None elsewhere.
"""
extra_block_fn_params = (voxel_fn, self.shape,
self.min_active_voxels_proportion)
block_fn_result = self.run_block_function(_singlenode_searchlight,
extra_block_fn_params,
pool_size)
return block_fn_result | [
"def",
"run_searchlight",
"(",
"self",
",",
"voxel_fn",
",",
"pool_size",
"=",
"None",
")",
":",
"extra_block_fn_params",
"=",
"(",
"voxel_fn",
",",
"self",
".",
"shape",
",",
"self",
".",
"min_active_voxels_proportion",
")",
"block_fn_result",
"=",
"self",
".... | Perform a function at each voxel which is set to True in the
user-provided mask. The mask passed to the searchlight function will be
further masked by the user-provided searchlight shape.
Parameters
----------
voxel_fn: function to apply at each voxel
Must be `serializeable using pickle
<https://docs.python.org/3/library/pickle.html#what-can-be-pickled-and-unpickled>`_.
Parameters
subj: list of 4D arrays containing subset of subject data
mask: 3D array containing subset of mask data
sl_rad: radius, in voxels, of the sphere inscribed in the
cube
bcast_var: shared data which is broadcast to all processes
Returns
Value of any pickle-able type
Returns
-------
A volume which is the same size as the mask, however a number of voxels
equal to the searchlight radius has been removed from each border of
the volume. This volume contains the values returned from the
searchlight function at each voxel which was set to True in the mask,
and None elsewhere. | [
"Perform",
"a",
"function",
"at",
"each",
"voxel",
"which",
"is",
"set",
"to",
"True",
"in",
"the",
"user",
"-",
"provided",
"mask",
".",
"The",
"mask",
"passed",
"to",
"the",
"searchlight",
"function",
"will",
"be",
"further",
"masked",
"by",
"the",
"us... | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/searchlight/searchlight.py#L475-L520 | train | 204,537 |
brainiak/brainiak | brainiak/fcma/util.py | _normalize_for_correlation | def _normalize_for_correlation(data, axis, return_nans=False):
"""normalize the data before computing correlation
The data will be z-scored and divided by sqrt(n)
along the assigned axis
Parameters
----------
data: 2D array
axis: int
specify which dimension of the data should be normalized
return_nans: bool, default:False
If False, return zeros for NaNs; if True, return NaNs
Returns
-------
data: 2D array
the normalized data
"""
shape = data.shape
data = zscore(data, axis=axis, ddof=0)
# if zscore fails (standard deviation is zero),
# optionally set all values to be zero
if not return_nans:
data = np.nan_to_num(data)
data = data / math.sqrt(shape[axis])
return data | python | def _normalize_for_correlation(data, axis, return_nans=False):
"""normalize the data before computing correlation
The data will be z-scored and divided by sqrt(n)
along the assigned axis
Parameters
----------
data: 2D array
axis: int
specify which dimension of the data should be normalized
return_nans: bool, default:False
If False, return zeros for NaNs; if True, return NaNs
Returns
-------
data: 2D array
the normalized data
"""
shape = data.shape
data = zscore(data, axis=axis, ddof=0)
# if zscore fails (standard deviation is zero),
# optionally set all values to be zero
if not return_nans:
data = np.nan_to_num(data)
data = data / math.sqrt(shape[axis])
return data | [
"def",
"_normalize_for_correlation",
"(",
"data",
",",
"axis",
",",
"return_nans",
"=",
"False",
")",
":",
"shape",
"=",
"data",
".",
"shape",
"data",
"=",
"zscore",
"(",
"data",
",",
"axis",
"=",
"axis",
",",
"ddof",
"=",
"0",
")",
"# if zscore fails (s... | normalize the data before computing correlation
The data will be z-scored and divided by sqrt(n)
along the assigned axis
Parameters
----------
data: 2D array
axis: int
specify which dimension of the data should be normalized
return_nans: bool, default:False
If False, return zeros for NaNs; if True, return NaNs
Returns
-------
data: 2D array
the normalized data | [
"normalize",
"the",
"data",
"before",
"computing",
"correlation"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/util.py#L32-L60 | train | 204,538 |
brainiak/brainiak | brainiak/fcma/util.py | compute_correlation | def compute_correlation(matrix1, matrix2, return_nans=False):
"""compute correlation between two sets of variables
Correlate the rows of matrix1 with the rows of matrix2.
If matrix1 == matrix2, it is auto-correlation computation
resulting in a symmetric correlation matrix.
The number of columns MUST agree between set1 and set2.
The correlation being computed here is
the Pearson's correlation coefficient, which can be expressed as
.. math:: corr(X, Y) = \\frac{cov(X, Y)}{\\sigma_X\\sigma_Y}
where cov(X, Y) is the covariance of variable X and Y, and
.. math:: \\sigma_X
is the standard deviation of variable X
Reducing the correlation computation to matrix multiplication
and using BLAS GEMM API wrapped by Scipy can speedup the numpy built-in
correlation computation (numpy.corrcoef) by one order of magnitude
.. math::
corr(X, Y)
&= \\frac{\\sum\\limits_{i=1}^n (x_i-\\bar{x})(y_i-\\bar{y})}{(n-1)
\\sqrt{\\frac{\\sum\\limits_{j=1}^n x_j^2-n\\bar{x}}{n-1}}
\\sqrt{\\frac{\\sum\\limits_{j=1}^{n} y_j^2-n\\bar{y}}{n-1}}}\\\\
&= \\sum\\limits_{i=1}^n(\\frac{(x_i-\\bar{x})}
{\\sqrt{\\sum\\limits_{j=1}^n x_j^2-n\\bar{x}}}
\\frac{(y_i-\\bar{y})}{\\sqrt{\\sum\\limits_{j=1}^n y_j^2-n\\bar{y}}})
By default (return_nans=False), returns zeros for vectors with NaNs.
If return_nans=True, convert zeros to NaNs (np.nan) in output.
Parameters
----------
matrix1: 2D array in shape [r1, c]
MUST be continuous and row-major
matrix2: 2D array in shape [r2, c]
MUST be continuous and row-major
return_nans: bool, default:False
If False, return zeros for NaNs; if True, return NaNs
Returns
-------
corr_data: 2D array in shape [r1, r2]
continuous and row-major in np.float32
"""
matrix1 = matrix1.astype(np.float32)
matrix2 = matrix2.astype(np.float32)
[r1, d1] = matrix1.shape
[r2, d2] = matrix2.shape
if d1 != d2:
raise ValueError('Dimension discrepancy')
# preprocess two components
matrix1 = _normalize_for_correlation(matrix1, 1,
return_nans=return_nans)
matrix2 = _normalize_for_correlation(matrix2, 1,
return_nans=return_nans)
corr_data = np.empty((r1, r2), dtype=np.float32, order='C')
# blas routine is column-major
blas.compute_single_matrix_multiplication('T', 'N',
r2, r1, d1,
1.0,
matrix2, d2,
matrix1, d1,
0.0,
corr_data,
r2)
return corr_data | python | def compute_correlation(matrix1, matrix2, return_nans=False):
"""compute correlation between two sets of variables
Correlate the rows of matrix1 with the rows of matrix2.
If matrix1 == matrix2, it is auto-correlation computation
resulting in a symmetric correlation matrix.
The number of columns MUST agree between set1 and set2.
The correlation being computed here is
the Pearson's correlation coefficient, which can be expressed as
.. math:: corr(X, Y) = \\frac{cov(X, Y)}{\\sigma_X\\sigma_Y}
where cov(X, Y) is the covariance of variable X and Y, and
.. math:: \\sigma_X
is the standard deviation of variable X
Reducing the correlation computation to matrix multiplication
and using BLAS GEMM API wrapped by Scipy can speedup the numpy built-in
correlation computation (numpy.corrcoef) by one order of magnitude
.. math::
corr(X, Y)
&= \\frac{\\sum\\limits_{i=1}^n (x_i-\\bar{x})(y_i-\\bar{y})}{(n-1)
\\sqrt{\\frac{\\sum\\limits_{j=1}^n x_j^2-n\\bar{x}}{n-1}}
\\sqrt{\\frac{\\sum\\limits_{j=1}^{n} y_j^2-n\\bar{y}}{n-1}}}\\\\
&= \\sum\\limits_{i=1}^n(\\frac{(x_i-\\bar{x})}
{\\sqrt{\\sum\\limits_{j=1}^n x_j^2-n\\bar{x}}}
\\frac{(y_i-\\bar{y})}{\\sqrt{\\sum\\limits_{j=1}^n y_j^2-n\\bar{y}}})
By default (return_nans=False), returns zeros for vectors with NaNs.
If return_nans=True, convert zeros to NaNs (np.nan) in output.
Parameters
----------
matrix1: 2D array in shape [r1, c]
MUST be continuous and row-major
matrix2: 2D array in shape [r2, c]
MUST be continuous and row-major
return_nans: bool, default:False
If False, return zeros for NaNs; if True, return NaNs
Returns
-------
corr_data: 2D array in shape [r1, r2]
continuous and row-major in np.float32
"""
matrix1 = matrix1.astype(np.float32)
matrix2 = matrix2.astype(np.float32)
[r1, d1] = matrix1.shape
[r2, d2] = matrix2.shape
if d1 != d2:
raise ValueError('Dimension discrepancy')
# preprocess two components
matrix1 = _normalize_for_correlation(matrix1, 1,
return_nans=return_nans)
matrix2 = _normalize_for_correlation(matrix2, 1,
return_nans=return_nans)
corr_data = np.empty((r1, r2), dtype=np.float32, order='C')
# blas routine is column-major
blas.compute_single_matrix_multiplication('T', 'N',
r2, r1, d1,
1.0,
matrix2, d2,
matrix1, d1,
0.0,
corr_data,
r2)
return corr_data | [
"def",
"compute_correlation",
"(",
"matrix1",
",",
"matrix2",
",",
"return_nans",
"=",
"False",
")",
":",
"matrix1",
"=",
"matrix1",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"matrix2",
"=",
"matrix2",
".",
"astype",
"(",
"np",
".",
"float32",
")",
... | compute correlation between two sets of variables
Correlate the rows of matrix1 with the rows of matrix2.
If matrix1 == matrix2, it is auto-correlation computation
resulting in a symmetric correlation matrix.
The number of columns MUST agree between set1 and set2.
The correlation being computed here is
the Pearson's correlation coefficient, which can be expressed as
.. math:: corr(X, Y) = \\frac{cov(X, Y)}{\\sigma_X\\sigma_Y}
where cov(X, Y) is the covariance of variable X and Y, and
.. math:: \\sigma_X
is the standard deviation of variable X
Reducing the correlation computation to matrix multiplication
and using BLAS GEMM API wrapped by Scipy can speedup the numpy built-in
correlation computation (numpy.corrcoef) by one order of magnitude
.. math::
corr(X, Y)
&= \\frac{\\sum\\limits_{i=1}^n (x_i-\\bar{x})(y_i-\\bar{y})}{(n-1)
\\sqrt{\\frac{\\sum\\limits_{j=1}^n x_j^2-n\\bar{x}}{n-1}}
\\sqrt{\\frac{\\sum\\limits_{j=1}^{n} y_j^2-n\\bar{y}}{n-1}}}\\\\
&= \\sum\\limits_{i=1}^n(\\frac{(x_i-\\bar{x})}
{\\sqrt{\\sum\\limits_{j=1}^n x_j^2-n\\bar{x}}}
\\frac{(y_i-\\bar{y})}{\\sqrt{\\sum\\limits_{j=1}^n y_j^2-n\\bar{y}}})
By default (return_nans=False), returns zeros for vectors with NaNs.
If return_nans=True, convert zeros to NaNs (np.nan) in output.
Parameters
----------
matrix1: 2D array in shape [r1, c]
MUST be continuous and row-major
matrix2: 2D array in shape [r2, c]
MUST be continuous and row-major
return_nans: bool, default:False
If False, return zeros for NaNs; if True, return NaNs
Returns
-------
corr_data: 2D array in shape [r1, r2]
continuous and row-major in np.float32 | [
"compute",
"correlation",
"between",
"two",
"sets",
"of",
"variables"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/util.py#L63-L134 | train | 204,539 |
brainiak/brainiak | brainiak/reprsimil/brsa.py | _zscore | def _zscore(a):
""" Calculating z-score of data on the first axis.
If the numbers in any column are all equal, scipy.stats.zscore
will return NaN for this column. We shall correct them all to
be zeros.
Parameters
----------
a: numpy array
Returns
-------
zscore: numpy array
The z-scores of input "a", with any columns including non-finite
numbers replaced by all zeros.
"""
assert a.ndim > 1, 'a must have more than one dimensions'
zscore = scipy.stats.zscore(a, axis=0)
zscore[:, np.logical_not(np.all(np.isfinite(zscore), axis=0))] = 0
return zscore | python | def _zscore(a):
""" Calculating z-score of data on the first axis.
If the numbers in any column are all equal, scipy.stats.zscore
will return NaN for this column. We shall correct them all to
be zeros.
Parameters
----------
a: numpy array
Returns
-------
zscore: numpy array
The z-scores of input "a", with any columns including non-finite
numbers replaced by all zeros.
"""
assert a.ndim > 1, 'a must have more than one dimensions'
zscore = scipy.stats.zscore(a, axis=0)
zscore[:, np.logical_not(np.all(np.isfinite(zscore), axis=0))] = 0
return zscore | [
"def",
"_zscore",
"(",
"a",
")",
":",
"assert",
"a",
".",
"ndim",
">",
"1",
",",
"'a must have more than one dimensions'",
"zscore",
"=",
"scipy",
".",
"stats",
".",
"zscore",
"(",
"a",
",",
"axis",
"=",
"0",
")",
"zscore",
"[",
":",
",",
"np",
".",
... | Calculating z-score of data on the first axis.
If the numbers in any column are all equal, scipy.stats.zscore
will return NaN for this column. We shall correct them all to
be zeros.
Parameters
----------
a: numpy array
Returns
-------
zscore: numpy array
The z-scores of input "a", with any columns including non-finite
numbers replaced by all zeros. | [
"Calculating",
"z",
"-",
"score",
"of",
"data",
"on",
"the",
"first",
"axis",
".",
"If",
"the",
"numbers",
"in",
"any",
"column",
"are",
"all",
"equal",
"scipy",
".",
"stats",
".",
"zscore",
"will",
"return",
"NaN",
"for",
"this",
"column",
".",
"We",
... | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/reprsimil/brsa.py#L201-L220 | train | 204,540 |
brainiak/brainiak | brainiak/reprsimil/brsa.py | BRSA.score | def score(self, X, design, scan_onsets=None):
""" Use the model and parameters estimated by fit function
from some data of a participant to evaluate the log
likelihood of some new data of the same participant.
Design matrix of the same set of experimental
conditions in the testing data should be provided, with each
column corresponding to the same condition as that column
in the design matrix of the training data.
Unknown nuisance time series will be marginalized, assuming
they follow the same spatial pattern as in the training
data. The hypothetical response captured by the design matrix
will be subtracted from data before the marginalization
when evaluating the log likelihood. For null model,
nothing will be subtracted before marginalization.
There is a difference between the form of likelihood function
used in fit() and score(). In fit(), the response amplitude
beta to design matrix X and the modulation beta0 by nuisance
regressor X0 are both marginalized, with X provided and X0
estimated from data. In score(), posterior estimation of
beta and beta0 from the fitting step are assumed unchanged
to testing data and X0 is marginalized.
The logic underlying score() is to transfer
as much as what we can learn from training data when
calculating a likelihood score for testing data.
If you z-scored your data during fit step, you should
z-score them for score function as well. If you did not
z-score in fitting, you should not z-score here either.
Parameters
----------
X : numpy arrays, shape=[time_points, voxels]
fMRI data of new data of the same subject. The voxels should
match those used in the fit() function. If data are z-scored
(recommended) when fitting the model, data should be z-scored
as well when calling transform()
design : numpy array, shape=[time_points, conditions]
Design matrix expressing the hypothetical response of
the task conditions in data X.
scan_onsets : numpy array, shape=[number of runs].
A list of indices corresponding to the onsets of
scans in the data X. If not provided, data will be assumed
to be acquired in a continuous scan.
Returns
-------
ll: float.
The log likelihood of the new data based on the model and its
parameters fit to the training data.
ll_null: float.
The log likelihood of the new data based on a null model
which assumes the same as the full model for everything
except for that there is no response to any of the
task conditions.
"""
assert X.ndim == 2 and X.shape[1] == self.beta_.shape[1], \
'The shape of X is not consistent with the shape of data '\
'used in the fitting step. They should have the same number '\
'of voxels'
assert scan_onsets is None or (scan_onsets.ndim == 1 and
0 in scan_onsets), \
'scan_onsets should either be None or an array of indices '\
'If it is given, it should include at least 0'
if scan_onsets is None:
scan_onsets = np.array([0], dtype=int)
else:
scan_onsets = np.int32(scan_onsets)
ll = self._score(Y=X, design=design, beta=self.beta_,
scan_onsets=scan_onsets, beta0=self.beta0_,
rho_e=self.rho_, sigma_e=self.sigma_,
rho_X0=self._rho_X0_, sigma2_X0=self._sigma2_X0_)
ll_null = self._score(Y=X, design=None, beta=None,
scan_onsets=scan_onsets, beta0=self.beta0_,
rho_e=self.rho_, sigma_e=self.sigma_,
rho_X0=self._rho_X0_,
sigma2_X0=self._sigma2_X0_)
return ll, ll_null | python | def score(self, X, design, scan_onsets=None):
""" Use the model and parameters estimated by fit function
from some data of a participant to evaluate the log
likelihood of some new data of the same participant.
Design matrix of the same set of experimental
conditions in the testing data should be provided, with each
column corresponding to the same condition as that column
in the design matrix of the training data.
Unknown nuisance time series will be marginalized, assuming
they follow the same spatial pattern as in the training
data. The hypothetical response captured by the design matrix
will be subtracted from data before the marginalization
when evaluating the log likelihood. For null model,
nothing will be subtracted before marginalization.
There is a difference between the form of likelihood function
used in fit() and score(). In fit(), the response amplitude
beta to design matrix X and the modulation beta0 by nuisance
regressor X0 are both marginalized, with X provided and X0
estimated from data. In score(), posterior estimation of
beta and beta0 from the fitting step are assumed unchanged
to testing data and X0 is marginalized.
The logic underlying score() is to transfer
as much as what we can learn from training data when
calculating a likelihood score for testing data.
If you z-scored your data during fit step, you should
z-score them for score function as well. If you did not
z-score in fitting, you should not z-score here either.
Parameters
----------
X : numpy arrays, shape=[time_points, voxels]
fMRI data of new data of the same subject. The voxels should
match those used in the fit() function. If data are z-scored
(recommended) when fitting the model, data should be z-scored
as well when calling transform()
design : numpy array, shape=[time_points, conditions]
Design matrix expressing the hypothetical response of
the task conditions in data X.
scan_onsets : numpy array, shape=[number of runs].
A list of indices corresponding to the onsets of
scans in the data X. If not provided, data will be assumed
to be acquired in a continuous scan.
Returns
-------
ll: float.
The log likelihood of the new data based on the model and its
parameters fit to the training data.
ll_null: float.
The log likelihood of the new data based on a null model
which assumes the same as the full model for everything
except for that there is no response to any of the
task conditions.
"""
assert X.ndim == 2 and X.shape[1] == self.beta_.shape[1], \
'The shape of X is not consistent with the shape of data '\
'used in the fitting step. They should have the same number '\
'of voxels'
assert scan_onsets is None or (scan_onsets.ndim == 1 and
0 in scan_onsets), \
'scan_onsets should either be None or an array of indices '\
'If it is given, it should include at least 0'
if scan_onsets is None:
scan_onsets = np.array([0], dtype=int)
else:
scan_onsets = np.int32(scan_onsets)
ll = self._score(Y=X, design=design, beta=self.beta_,
scan_onsets=scan_onsets, beta0=self.beta0_,
rho_e=self.rho_, sigma_e=self.sigma_,
rho_X0=self._rho_X0_, sigma2_X0=self._sigma2_X0_)
ll_null = self._score(Y=X, design=None, beta=None,
scan_onsets=scan_onsets, beta0=self.beta0_,
rho_e=self.rho_, sigma_e=self.sigma_,
rho_X0=self._rho_X0_,
sigma2_X0=self._sigma2_X0_)
return ll, ll_null | [
"def",
"score",
"(",
"self",
",",
"X",
",",
"design",
",",
"scan_onsets",
"=",
"None",
")",
":",
"assert",
"X",
".",
"ndim",
"==",
"2",
"and",
"X",
".",
"shape",
"[",
"1",
"]",
"==",
"self",
".",
"beta_",
".",
"shape",
"[",
"1",
"]",
",",
"'T... | Use the model and parameters estimated by fit function
from some data of a participant to evaluate the log
likelihood of some new data of the same participant.
Design matrix of the same set of experimental
conditions in the testing data should be provided, with each
column corresponding to the same condition as that column
in the design matrix of the training data.
Unknown nuisance time series will be marginalized, assuming
they follow the same spatial pattern as in the training
data. The hypothetical response captured by the design matrix
will be subtracted from data before the marginalization
when evaluating the log likelihood. For null model,
nothing will be subtracted before marginalization.
There is a difference between the form of likelihood function
used in fit() and score(). In fit(), the response amplitude
beta to design matrix X and the modulation beta0 by nuisance
regressor X0 are both marginalized, with X provided and X0
estimated from data. In score(), posterior estimation of
beta and beta0 from the fitting step are assumed unchanged
to testing data and X0 is marginalized.
The logic underlying score() is to transfer
as much as what we can learn from training data when
calculating a likelihood score for testing data.
If you z-scored your data during fit step, you should
z-score them for score function as well. If you did not
z-score in fitting, you should not z-score here either.
Parameters
----------
X : numpy arrays, shape=[time_points, voxels]
fMRI data of new data of the same subject. The voxels should
match those used in the fit() function. If data are z-scored
(recommended) when fitting the model, data should be z-scored
as well when calling transform()
design : numpy array, shape=[time_points, conditions]
Design matrix expressing the hypothetical response of
the task conditions in data X.
scan_onsets : numpy array, shape=[number of runs].
A list of indices corresponding to the onsets of
scans in the data X. If not provided, data will be assumed
to be acquired in a continuous scan.
Returns
-------
ll: float.
The log likelihood of the new data based on the model and its
parameters fit to the training data.
ll_null: float.
The log likelihood of the new data based on a null model
which assumes the same as the full model for everything
except for that there is no response to any of the
task conditions. | [
"Use",
"the",
"model",
"and",
"parameters",
"estimated",
"by",
"fit",
"function",
"from",
"some",
"data",
"of",
"a",
"participant",
"to",
"evaluate",
"the",
"log",
"likelihood",
"of",
"some",
"new",
"data",
"of",
"the",
"same",
"participant",
".",
"Design",
... | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/reprsimil/brsa.py#L855-L932 | train | 204,541 |
brainiak/brainiak | brainiak/reprsimil/brsa.py | BRSA._prepare_data_XY | def _prepare_data_XY(self, X, Y, D, F):
"""Prepares different forms of products of design matrix X
and data Y, or between themselves.
These products are re-used a lot during fitting.
So we pre-calculate them. Because these are reused,
it is in principle possible to update the fitting
as new data come in, by just incrementally adding
the products of new data and their corresponding parts
of design matrix to these pre-calculated terms.
"""
XTY, XTDY, XTFY = self._make_templates(D, F, X, Y)
YTY_diag = np.sum(Y * Y, axis=0)
YTDY_diag = np.sum(Y * np.dot(D, Y), axis=0)
YTFY_diag = np.sum(Y * np.dot(F, Y), axis=0)
XTX, XTDX, XTFX = self._make_templates(D, F, X, X)
return XTY, XTDY, XTFY, YTY_diag, YTDY_diag, YTFY_diag, XTX, \
XTDX, XTFX | python | def _prepare_data_XY(self, X, Y, D, F):
"""Prepares different forms of products of design matrix X
and data Y, or between themselves.
These products are re-used a lot during fitting.
So we pre-calculate them. Because these are reused,
it is in principle possible to update the fitting
as new data come in, by just incrementally adding
the products of new data and their corresponding parts
of design matrix to these pre-calculated terms.
"""
XTY, XTDY, XTFY = self._make_templates(D, F, X, Y)
YTY_diag = np.sum(Y * Y, axis=0)
YTDY_diag = np.sum(Y * np.dot(D, Y), axis=0)
YTFY_diag = np.sum(Y * np.dot(F, Y), axis=0)
XTX, XTDX, XTFX = self._make_templates(D, F, X, X)
return XTY, XTDY, XTFY, YTY_diag, YTDY_diag, YTFY_diag, XTX, \
XTDX, XTFX | [
"def",
"_prepare_data_XY",
"(",
"self",
",",
"X",
",",
"Y",
",",
"D",
",",
"F",
")",
":",
"XTY",
",",
"XTDY",
",",
"XTFY",
"=",
"self",
".",
"_make_templates",
"(",
"D",
",",
"F",
",",
"X",
",",
"Y",
")",
"YTY_diag",
"=",
"np",
".",
"sum",
"(... | Prepares different forms of products of design matrix X
and data Y, or between themselves.
These products are re-used a lot during fitting.
So we pre-calculate them. Because these are reused,
it is in principle possible to update the fitting
as new data come in, by just incrementally adding
the products of new data and their corresponding parts
of design matrix to these pre-calculated terms. | [
"Prepares",
"different",
"forms",
"of",
"products",
"of",
"design",
"matrix",
"X",
"and",
"data",
"Y",
"or",
"between",
"themselves",
".",
"These",
"products",
"are",
"re",
"-",
"used",
"a",
"lot",
"during",
"fitting",
".",
"So",
"we",
"pre",
"-",
"calcu... | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/reprsimil/brsa.py#L1006-L1025 | train | 204,542 |
brainiak/brainiak | brainiak/reprsimil/brsa.py | BRSA._prepare_data_XYX0 | def _prepare_data_XYX0(self, X, Y, X_base, X_res, D, F, run_TRs,
no_DC=False):
"""Prepares different forms of products between design matrix X or
data Y or nuisance regressors X0.
These products are re-used a lot during fitting.
So we pre-calculate them.
no_DC means not inserting regressors for DC components
into nuisance regressor.
It will only take effect if X_base is not None.
"""
X_DC = self._gen_X_DC(run_TRs)
reg_sol = np.linalg.lstsq(X_DC, X)
if np.any(np.isclose(reg_sol[1], 0)):
raise ValueError('Your design matrix appears to have '
'included baseline time series.'
'Either remove them, or move them to'
' nuisance regressors.')
X_DC, X_base, idx_DC = self._merge_DC_to_base(X_DC, X_base,
no_DC)
if X_res is None:
X0 = X_base
else:
X0 = np.concatenate((X_base, X_res), axis=1)
n_X0 = X0.shape[1]
X0TX0, X0TDX0, X0TFX0 = self._make_templates(D, F, X0, X0)
XTX0, XTDX0, XTFX0 = self._make_templates(D, F, X, X0)
X0TY, X0TDY, X0TFY = self._make_templates(D, F, X0, Y)
return X0TX0, X0TDX0, X0TFX0, XTX0, XTDX0, XTFX0, \
X0TY, X0TDY, X0TFY, X0, X_base, n_X0, idx_DC | python | def _prepare_data_XYX0(self, X, Y, X_base, X_res, D, F, run_TRs,
no_DC=False):
"""Prepares different forms of products between design matrix X or
data Y or nuisance regressors X0.
These products are re-used a lot during fitting.
So we pre-calculate them.
no_DC means not inserting regressors for DC components
into nuisance regressor.
It will only take effect if X_base is not None.
"""
X_DC = self._gen_X_DC(run_TRs)
reg_sol = np.linalg.lstsq(X_DC, X)
if np.any(np.isclose(reg_sol[1], 0)):
raise ValueError('Your design matrix appears to have '
'included baseline time series.'
'Either remove them, or move them to'
' nuisance regressors.')
X_DC, X_base, idx_DC = self._merge_DC_to_base(X_DC, X_base,
no_DC)
if X_res is None:
X0 = X_base
else:
X0 = np.concatenate((X_base, X_res), axis=1)
n_X0 = X0.shape[1]
X0TX0, X0TDX0, X0TFX0 = self._make_templates(D, F, X0, X0)
XTX0, XTDX0, XTFX0 = self._make_templates(D, F, X, X0)
X0TY, X0TDY, X0TFY = self._make_templates(D, F, X0, Y)
return X0TX0, X0TDX0, X0TFX0, XTX0, XTDX0, XTFX0, \
X0TY, X0TDY, X0TFY, X0, X_base, n_X0, idx_DC | [
"def",
"_prepare_data_XYX0",
"(",
"self",
",",
"X",
",",
"Y",
",",
"X_base",
",",
"X_res",
",",
"D",
",",
"F",
",",
"run_TRs",
",",
"no_DC",
"=",
"False",
")",
":",
"X_DC",
"=",
"self",
".",
"_gen_X_DC",
"(",
"run_TRs",
")",
"reg_sol",
"=",
"np",
... | Prepares different forms of products between design matrix X or
data Y or nuisance regressors X0.
These products are re-used a lot during fitting.
So we pre-calculate them.
no_DC means not inserting regressors for DC components
into nuisance regressor.
It will only take effect if X_base is not None. | [
"Prepares",
"different",
"forms",
"of",
"products",
"between",
"design",
"matrix",
"X",
"or",
"data",
"Y",
"or",
"nuisance",
"regressors",
"X0",
".",
"These",
"products",
"are",
"re",
"-",
"used",
"a",
"lot",
"during",
"fitting",
".",
"So",
"we",
"pre",
... | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/reprsimil/brsa.py#L1043-L1072 | train | 204,543 |
brainiak/brainiak | brainiak/reprsimil/brsa.py | BRSA._merge_DC_to_base | def _merge_DC_to_base(self, X_DC, X_base, no_DC):
""" Merge DC components X_DC to the baseline time series
X_base (By baseline, this means any fixed nuisance
regressors not updated during fitting, including DC
components and any nuisance regressors provided by
the user.
X_DC is always in the first few columns of X_base.
"""
if X_base is not None:
reg_sol = np.linalg.lstsq(X_DC, X_base)
if not no_DC:
if not np.any(np.isclose(reg_sol[1], 0)):
# No columns in X_base can be explained by the
# baseline regressors. So we insert them.
X_base = np.concatenate((X_DC, X_base), axis=1)
idx_DC = np.arange(0, X_DC.shape[1])
else:
logger.warning('Provided regressors for uninteresting '
'time series already include baseline. '
'No additional baseline is inserted.')
idx_DC = np.where(np.isclose(reg_sol[1], 0))[0]
else:
idx_DC = np.where(np.isclose(reg_sol[1], 0))[0]
else:
# If a set of regressors for non-interested signals is not
# provided, then we simply include one baseline for each run.
X_base = X_DC
idx_DC = np.arange(0, X_base.shape[1])
logger.info('You did not provide time series of no interest '
'such as DC component. Trivial regressors of'
' DC component are included for further modeling.'
' The final covariance matrix won''t '
'reflect these components.')
return X_DC, X_base, idx_DC | python | def _merge_DC_to_base(self, X_DC, X_base, no_DC):
""" Merge DC components X_DC to the baseline time series
X_base (By baseline, this means any fixed nuisance
regressors not updated during fitting, including DC
components and any nuisance regressors provided by
the user.
X_DC is always in the first few columns of X_base.
"""
if X_base is not None:
reg_sol = np.linalg.lstsq(X_DC, X_base)
if not no_DC:
if not np.any(np.isclose(reg_sol[1], 0)):
# No columns in X_base can be explained by the
# baseline regressors. So we insert them.
X_base = np.concatenate((X_DC, X_base), axis=1)
idx_DC = np.arange(0, X_DC.shape[1])
else:
logger.warning('Provided regressors for uninteresting '
'time series already include baseline. '
'No additional baseline is inserted.')
idx_DC = np.where(np.isclose(reg_sol[1], 0))[0]
else:
idx_DC = np.where(np.isclose(reg_sol[1], 0))[0]
else:
# If a set of regressors for non-interested signals is not
# provided, then we simply include one baseline for each run.
X_base = X_DC
idx_DC = np.arange(0, X_base.shape[1])
logger.info('You did not provide time series of no interest '
'such as DC component. Trivial regressors of'
' DC component are included for further modeling.'
' The final covariance matrix won''t '
'reflect these components.')
return X_DC, X_base, idx_DC | [
"def",
"_merge_DC_to_base",
"(",
"self",
",",
"X_DC",
",",
"X_base",
",",
"no_DC",
")",
":",
"if",
"X_base",
"is",
"not",
"None",
":",
"reg_sol",
"=",
"np",
".",
"linalg",
".",
"lstsq",
"(",
"X_DC",
",",
"X_base",
")",
"if",
"not",
"no_DC",
":",
"i... | Merge DC components X_DC to the baseline time series
X_base (By baseline, this means any fixed nuisance
regressors not updated during fitting, including DC
components and any nuisance regressors provided by
the user.
X_DC is always in the first few columns of X_base. | [
"Merge",
"DC",
"components",
"X_DC",
"to",
"the",
"baseline",
"time",
"series",
"X_base",
"(",
"By",
"baseline",
"this",
"means",
"any",
"fixed",
"nuisance",
"regressors",
"not",
"updated",
"during",
"fitting",
"including",
"DC",
"components",
"and",
"any",
"n... | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/reprsimil/brsa.py#L1074-L1107 | train | 204,544 |
brainiak/brainiak | brainiak/reprsimil/brsa.py | BRSA._build_index_param | def _build_index_param(self, n_l, n_V, n_smooth):
""" Build dictionaries to retrieve each parameter
from the combined parameters.
"""
idx_param_sing = {'Cholesky': np.arange(n_l), 'a1': n_l}
# for simplified fitting
idx_param_fitU = {'Cholesky': np.arange(n_l),
'a1': np.arange(n_l, n_l + n_V)}
# for the likelihood function when we fit U (the shared covariance).
idx_param_fitV = {'log_SNR2': np.arange(n_V - 1),
'c_space': n_V - 1, 'c_inten': n_V,
'c_both': np.arange(n_V - 1, n_V - 1 + n_smooth)}
# for the likelihood function when we fit V (reflected by SNR of
# each voxel)
return idx_param_sing, idx_param_fitU, idx_param_fitV | python | def _build_index_param(self, n_l, n_V, n_smooth):
""" Build dictionaries to retrieve each parameter
from the combined parameters.
"""
idx_param_sing = {'Cholesky': np.arange(n_l), 'a1': n_l}
# for simplified fitting
idx_param_fitU = {'Cholesky': np.arange(n_l),
'a1': np.arange(n_l, n_l + n_V)}
# for the likelihood function when we fit U (the shared covariance).
idx_param_fitV = {'log_SNR2': np.arange(n_V - 1),
'c_space': n_V - 1, 'c_inten': n_V,
'c_both': np.arange(n_V - 1, n_V - 1 + n_smooth)}
# for the likelihood function when we fit V (reflected by SNR of
# each voxel)
return idx_param_sing, idx_param_fitU, idx_param_fitV | [
"def",
"_build_index_param",
"(",
"self",
",",
"n_l",
",",
"n_V",
",",
"n_smooth",
")",
":",
"idx_param_sing",
"=",
"{",
"'Cholesky'",
":",
"np",
".",
"arange",
"(",
"n_l",
")",
",",
"'a1'",
":",
"n_l",
"}",
"# for simplified fitting",
"idx_param_fitU",
"=... | Build dictionaries to retrieve each parameter
from the combined parameters. | [
"Build",
"dictionaries",
"to",
"retrieve",
"each",
"parameter",
"from",
"the",
"combined",
"parameters",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/reprsimil/brsa.py#L1259-L1273 | train | 204,545 |
brainiak/brainiak | brainiak/reprsimil/brsa.py | BRSA._score | def _score(self, Y, design, beta, scan_onsets, beta0, rho_e, sigma_e,
rho_X0, sigma2_X0):
""" Given the data Y, and the spatial pattern beta0
of nuisance time series, return the cross-validated score
of the data Y given all parameters of the subject estimated
during the first step.
It is assumed that the user has design matrix built for the
data Y. Both beta and beta0 are posterior expectation estimated
from training data with the estimated covariance matrix U and
SNR serving as prior. We marginalize X0 instead of fitting
it in this function because this function is for the purpose
of evaluating model no new data. We should avoid doing any
additional fitting when performing cross-validation.
The hypothetic response to the task will be subtracted, and
the unknown nuisance activity which contributes to the data
through beta0 will be marginalized.
"""
logger.info('Estimating cross-validated score for new data.')
n_T = Y.shape[0]
if design is not None:
Y = Y - np.dot(design, beta)
# The function works for both full model and null model.
# If design matrix is not provided, the whole data is
# used as input for _forward_step. If design matrix is provided,
# residual after subtracting design * beta is fed to _forward_step
T_X = np.diag(rho_X0)
Var_X = sigma2_X0 / (1 - rho_X0**2)
Var_dX = sigma2_X0
# Prior parmeters for X0: T_X is transitioning matrix, Var_X
# is the marginal variance of the first time point. Var_dX is the
# variance of the updating noise.
sigma2_e = sigma_e ** 2
# variance of voxel-specific updating noise component
scan_onsets = np.setdiff1d(scan_onsets, n_T).astype(int)
n_scan = scan_onsets.size
total_log_p = 0
for scan, onset in enumerate(scan_onsets):
# Forward step
if scan == n_scan - 1:
offset = n_T
else:
offset = scan_onsets[scan + 1]
_, _, _, log_p_data, _, _, _, _, _ = \
self._forward_step(
Y[onset:offset, :], T_X, Var_X, Var_dX, rho_e, sigma2_e,
beta0)
total_log_p += log_p_data
return total_log_p | python | def _score(self, Y, design, beta, scan_onsets, beta0, rho_e, sigma_e,
rho_X0, sigma2_X0):
""" Given the data Y, and the spatial pattern beta0
of nuisance time series, return the cross-validated score
of the data Y given all parameters of the subject estimated
during the first step.
It is assumed that the user has design matrix built for the
data Y. Both beta and beta0 are posterior expectation estimated
from training data with the estimated covariance matrix U and
SNR serving as prior. We marginalize X0 instead of fitting
it in this function because this function is for the purpose
of evaluating model no new data. We should avoid doing any
additional fitting when performing cross-validation.
The hypothetic response to the task will be subtracted, and
the unknown nuisance activity which contributes to the data
through beta0 will be marginalized.
"""
logger.info('Estimating cross-validated score for new data.')
n_T = Y.shape[0]
if design is not None:
Y = Y - np.dot(design, beta)
# The function works for both full model and null model.
# If design matrix is not provided, the whole data is
# used as input for _forward_step. If design matrix is provided,
# residual after subtracting design * beta is fed to _forward_step
T_X = np.diag(rho_X0)
Var_X = sigma2_X0 / (1 - rho_X0**2)
Var_dX = sigma2_X0
# Prior parmeters for X0: T_X is transitioning matrix, Var_X
# is the marginal variance of the first time point. Var_dX is the
# variance of the updating noise.
sigma2_e = sigma_e ** 2
# variance of voxel-specific updating noise component
scan_onsets = np.setdiff1d(scan_onsets, n_T).astype(int)
n_scan = scan_onsets.size
total_log_p = 0
for scan, onset in enumerate(scan_onsets):
# Forward step
if scan == n_scan - 1:
offset = n_T
else:
offset = scan_onsets[scan + 1]
_, _, _, log_p_data, _, _, _, _, _ = \
self._forward_step(
Y[onset:offset, :], T_X, Var_X, Var_dX, rho_e, sigma2_e,
beta0)
total_log_p += log_p_data
return total_log_p | [
"def",
"_score",
"(",
"self",
",",
"Y",
",",
"design",
",",
"beta",
",",
"scan_onsets",
",",
"beta0",
",",
"rho_e",
",",
"sigma_e",
",",
"rho_X0",
",",
"sigma2_X0",
")",
":",
"logger",
".",
"info",
"(",
"'Estimating cross-validated score for new data.'",
")"... | Given the data Y, and the spatial pattern beta0
of nuisance time series, return the cross-validated score
of the data Y given all parameters of the subject estimated
during the first step.
It is assumed that the user has design matrix built for the
data Y. Both beta and beta0 are posterior expectation estimated
from training data with the estimated covariance matrix U and
SNR serving as prior. We marginalize X0 instead of fitting
it in this function because this function is for the purpose
of evaluating model no new data. We should avoid doing any
additional fitting when performing cross-validation.
The hypothetic response to the task will be subtracted, and
the unknown nuisance activity which contributes to the data
through beta0 will be marginalized. | [
"Given",
"the",
"data",
"Y",
"and",
"the",
"spatial",
"pattern",
"beta0",
"of",
"nuisance",
"time",
"series",
"return",
"the",
"cross",
"-",
"validated",
"score",
"of",
"the",
"data",
"Y",
"given",
"all",
"parameters",
"of",
"the",
"subject",
"estimated",
... | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/reprsimil/brsa.py#L1586-L1633 | train | 204,546 |
brainiak/brainiak | brainiak/reprsimil/brsa.py | BRSA._backward_step | def _backward_step(self, deltaY, deltaY_sigma2inv_rho_weightT,
sigma2_e, weight, mu, mu_Gamma_inv, Gamma_inv,
Lambda_0, Lambda_1, H):
""" backward step for HMM, assuming both the hidden state and noise
have 1-step dependence on the previous value.
"""
n_T = len(Gamma_inv)
# All the terms with hat before are parameters of posterior
# distributions of X conditioned on data from all time points,
# whereas the ones without hat calculated by _forward_step
# are mean and covariance of posterior of X conditioned on
# data up to the time point.
Gamma_inv_hat = [None] * n_T
mu_Gamma_inv_hat = [None] * n_T
mu_hat = [None] * n_T
mu_hat[-1] = mu[-1].copy()
mu_Gamma_inv_hat[-1] = mu_Gamma_inv[-1].copy()
Gamma_inv_hat[-1] = Gamma_inv[-1].copy()
for t in np.arange(n_T - 2, -1, -1):
tmp = np.linalg.solve(Gamma_inv_hat[t + 1] - Gamma_inv[t + 1]
+ Lambda_1, H)
Gamma_inv_hat[t] = Gamma_inv[t] + Lambda_0 - np.dot(H.T, tmp)
mu_Gamma_inv_hat[t] = mu_Gamma_inv[t] \
- deltaY_sigma2inv_rho_weightT[t, :] + np.dot(
mu_Gamma_inv_hat[t + 1] - mu_Gamma_inv[t + 1]
+ np.dot(deltaY[t, :] / sigma2_e, weight.T), tmp)
mu_hat[t] = np.linalg.solve(Gamma_inv_hat[t],
mu_Gamma_inv_hat[t])
return mu_hat, mu_Gamma_inv_hat, Gamma_inv_hat | python | def _backward_step(self, deltaY, deltaY_sigma2inv_rho_weightT,
sigma2_e, weight, mu, mu_Gamma_inv, Gamma_inv,
Lambda_0, Lambda_1, H):
""" backward step for HMM, assuming both the hidden state and noise
have 1-step dependence on the previous value.
"""
n_T = len(Gamma_inv)
# All the terms with hat before are parameters of posterior
# distributions of X conditioned on data from all time points,
# whereas the ones without hat calculated by _forward_step
# are mean and covariance of posterior of X conditioned on
# data up to the time point.
Gamma_inv_hat = [None] * n_T
mu_Gamma_inv_hat = [None] * n_T
mu_hat = [None] * n_T
mu_hat[-1] = mu[-1].copy()
mu_Gamma_inv_hat[-1] = mu_Gamma_inv[-1].copy()
Gamma_inv_hat[-1] = Gamma_inv[-1].copy()
for t in np.arange(n_T - 2, -1, -1):
tmp = np.linalg.solve(Gamma_inv_hat[t + 1] - Gamma_inv[t + 1]
+ Lambda_1, H)
Gamma_inv_hat[t] = Gamma_inv[t] + Lambda_0 - np.dot(H.T, tmp)
mu_Gamma_inv_hat[t] = mu_Gamma_inv[t] \
- deltaY_sigma2inv_rho_weightT[t, :] + np.dot(
mu_Gamma_inv_hat[t + 1] - mu_Gamma_inv[t + 1]
+ np.dot(deltaY[t, :] / sigma2_e, weight.T), tmp)
mu_hat[t] = np.linalg.solve(Gamma_inv_hat[t],
mu_Gamma_inv_hat[t])
return mu_hat, mu_Gamma_inv_hat, Gamma_inv_hat | [
"def",
"_backward_step",
"(",
"self",
",",
"deltaY",
",",
"deltaY_sigma2inv_rho_weightT",
",",
"sigma2_e",
",",
"weight",
",",
"mu",
",",
"mu_Gamma_inv",
",",
"Gamma_inv",
",",
"Lambda_0",
",",
"Lambda_1",
",",
"H",
")",
":",
"n_T",
"=",
"len",
"(",
"Gamma... | backward step for HMM, assuming both the hidden state and noise
have 1-step dependence on the previous value. | [
"backward",
"step",
"for",
"HMM",
"assuming",
"both",
"the",
"hidden",
"state",
"and",
"noise",
"have",
"1",
"-",
"step",
"dependence",
"on",
"the",
"previous",
"value",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/reprsimil/brsa.py#L1743-L1773 | train | 204,547 |
brainiak/brainiak | brainiak/reprsimil/brsa.py | GBRSA._set_SNR_grids | def _set_SNR_grids(self):
""" Set the grids and weights for SNR used in numerical integration
of SNR parameters.
"""
if self.SNR_prior == 'unif':
SNR_grids = np.linspace(0, 1, self.SNR_bins)
SNR_weights = np.ones(self.SNR_bins) / (self.SNR_bins - 1)
SNR_weights[0] = SNR_weights[0] / 2.0
SNR_weights[-1] = SNR_weights[-1] / 2.0
elif self.SNR_prior == 'lognorm':
dist = scipy.stats.lognorm
alphas = np.arange(np.mod(self.SNR_bins, 2),
self.SNR_bins + 2, 2) / self.SNR_bins
# The goal here is to divide the area under the pdf curve
# to segments representing equal probabilities.
bounds = dist.interval(alphas, (self.logS_range,))
bounds = np.unique(bounds)
# bounds contain the boundaries which equally separate
# the probability mass of the distribution
SNR_grids = np.zeros(self.SNR_bins)
for i in np.arange(self.SNR_bins):
SNR_grids[i] = dist.expect(
lambda x: x, args=(self.logS_range,),
lb=bounds[i], ub=bounds[i + 1]) * self.SNR_bins
# Center of mass of each segment between consecutive
# bounds are set as the grids for SNR.
SNR_weights = np.ones(self.SNR_bins) / self.SNR_bins
elif self.SNR_prior == 'exp':
SNR_grids = self._bin_exp(self.SNR_bins)
SNR_weights = np.ones(self.SNR_bins) / self.SNR_bins
else:
SNR_grids = np.ones(1)
SNR_weights = np.ones(1)
SNR_weights = SNR_weights / np.sum(SNR_weights)
return SNR_grids, SNR_weights | python | def _set_SNR_grids(self):
""" Set the grids and weights for SNR used in numerical integration
of SNR parameters.
"""
if self.SNR_prior == 'unif':
SNR_grids = np.linspace(0, 1, self.SNR_bins)
SNR_weights = np.ones(self.SNR_bins) / (self.SNR_bins - 1)
SNR_weights[0] = SNR_weights[0] / 2.0
SNR_weights[-1] = SNR_weights[-1] / 2.0
elif self.SNR_prior == 'lognorm':
dist = scipy.stats.lognorm
alphas = np.arange(np.mod(self.SNR_bins, 2),
self.SNR_bins + 2, 2) / self.SNR_bins
# The goal here is to divide the area under the pdf curve
# to segments representing equal probabilities.
bounds = dist.interval(alphas, (self.logS_range,))
bounds = np.unique(bounds)
# bounds contain the boundaries which equally separate
# the probability mass of the distribution
SNR_grids = np.zeros(self.SNR_bins)
for i in np.arange(self.SNR_bins):
SNR_grids[i] = dist.expect(
lambda x: x, args=(self.logS_range,),
lb=bounds[i], ub=bounds[i + 1]) * self.SNR_bins
# Center of mass of each segment between consecutive
# bounds are set as the grids for SNR.
SNR_weights = np.ones(self.SNR_bins) / self.SNR_bins
elif self.SNR_prior == 'exp':
SNR_grids = self._bin_exp(self.SNR_bins)
SNR_weights = np.ones(self.SNR_bins) / self.SNR_bins
else:
SNR_grids = np.ones(1)
SNR_weights = np.ones(1)
SNR_weights = SNR_weights / np.sum(SNR_weights)
return SNR_grids, SNR_weights | [
"def",
"_set_SNR_grids",
"(",
"self",
")",
":",
"if",
"self",
".",
"SNR_prior",
"==",
"'unif'",
":",
"SNR_grids",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"1",
",",
"self",
".",
"SNR_bins",
")",
"SNR_weights",
"=",
"np",
".",
"ones",
"(",
"self",
... | Set the grids and weights for SNR used in numerical integration
of SNR parameters. | [
"Set",
"the",
"grids",
"and",
"weights",
"for",
"SNR",
"used",
"in",
"numerical",
"integration",
"of",
"SNR",
"parameters",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/reprsimil/brsa.py#L4117-L4151 | train | 204,548 |
brainiak/brainiak | brainiak/reprsimil/brsa.py | GBRSA._matrix_flattened_grid | def _matrix_flattened_grid(self, X0TAX0, X0TAX0_i, SNR_grids, XTAcorrX,
YTAcorrY_diag, XTAcorrY, X0TAY, XTAX0,
n_C, n_V, n_X0, n_grid):
""" We need to integrate parameters SNR and rho on 2-d discrete grids.
This function generates matrices which have only one dimension for
these two parameters, with each slice in that dimension
corresponding to each combination of the discrete grids of SNR
and discrete grids of rho.
"""
half_log_det_X0TAX0 = np.reshape(
np.repeat(self._half_log_det(X0TAX0)[None, :],
self.SNR_bins, axis=0), n_grid)
X0TAX0 = np.reshape(
np.repeat(X0TAX0[None, :, :, :],
self.SNR_bins, axis=0),
(n_grid, n_X0, n_X0))
X0TAX0_i = np.reshape(np.repeat(
X0TAX0_i[None, :, :, :],
self.SNR_bins, axis=0),
(n_grid, n_X0, n_X0))
s2XTAcorrX = np.reshape(
SNR_grids[:, None, None, None]**2 * XTAcorrX,
(n_grid, n_C, n_C))
YTAcorrY_diag = np.reshape(np.repeat(
YTAcorrY_diag[None, :, :],
self.SNR_bins, axis=0), (n_grid, n_V))
sXTAcorrY = np.reshape(SNR_grids[:, None, None, None]
* XTAcorrY, (n_grid, n_C, n_V))
X0TAY = np.reshape(np.repeat(X0TAY[None, :, :, :],
self.SNR_bins, axis=0),
(n_grid, n_X0, n_V))
XTAX0 = np.reshape(np.repeat(XTAX0[None, :, :, :],
self.SNR_bins, axis=0),
(n_grid, n_C, n_X0))
return half_log_det_X0TAX0, X0TAX0, X0TAX0_i, s2XTAcorrX, \
YTAcorrY_diag, sXTAcorrY, X0TAY, XTAX0 | python | def _matrix_flattened_grid(self, X0TAX0, X0TAX0_i, SNR_grids, XTAcorrX,
YTAcorrY_diag, XTAcorrY, X0TAY, XTAX0,
n_C, n_V, n_X0, n_grid):
""" We need to integrate parameters SNR and rho on 2-d discrete grids.
This function generates matrices which have only one dimension for
these two parameters, with each slice in that dimension
corresponding to each combination of the discrete grids of SNR
and discrete grids of rho.
"""
half_log_det_X0TAX0 = np.reshape(
np.repeat(self._half_log_det(X0TAX0)[None, :],
self.SNR_bins, axis=0), n_grid)
X0TAX0 = np.reshape(
np.repeat(X0TAX0[None, :, :, :],
self.SNR_bins, axis=0),
(n_grid, n_X0, n_X0))
X0TAX0_i = np.reshape(np.repeat(
X0TAX0_i[None, :, :, :],
self.SNR_bins, axis=0),
(n_grid, n_X0, n_X0))
s2XTAcorrX = np.reshape(
SNR_grids[:, None, None, None]**2 * XTAcorrX,
(n_grid, n_C, n_C))
YTAcorrY_diag = np.reshape(np.repeat(
YTAcorrY_diag[None, :, :],
self.SNR_bins, axis=0), (n_grid, n_V))
sXTAcorrY = np.reshape(SNR_grids[:, None, None, None]
* XTAcorrY, (n_grid, n_C, n_V))
X0TAY = np.reshape(np.repeat(X0TAY[None, :, :, :],
self.SNR_bins, axis=0),
(n_grid, n_X0, n_V))
XTAX0 = np.reshape(np.repeat(XTAX0[None, :, :, :],
self.SNR_bins, axis=0),
(n_grid, n_C, n_X0))
return half_log_det_X0TAX0, X0TAX0, X0TAX0_i, s2XTAcorrX, \
YTAcorrY_diag, sXTAcorrY, X0TAY, XTAX0 | [
"def",
"_matrix_flattened_grid",
"(",
"self",
",",
"X0TAX0",
",",
"X0TAX0_i",
",",
"SNR_grids",
",",
"XTAcorrX",
",",
"YTAcorrY_diag",
",",
"XTAcorrY",
",",
"X0TAY",
",",
"XTAX0",
",",
"n_C",
",",
"n_V",
",",
"n_X0",
",",
"n_grid",
")",
":",
"half_log_det_... | We need to integrate parameters SNR and rho on 2-d discrete grids.
This function generates matrices which have only one dimension for
these two parameters, with each slice in that dimension
corresponding to each combination of the discrete grids of SNR
and discrete grids of rho. | [
"We",
"need",
"to",
"integrate",
"parameters",
"SNR",
"and",
"rho",
"on",
"2",
"-",
"d",
"discrete",
"grids",
".",
"This",
"function",
"generates",
"matrices",
"which",
"have",
"only",
"one",
"dimension",
"for",
"these",
"two",
"parameters",
"with",
"each",
... | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/reprsimil/brsa.py#L4162-L4197 | train | 204,549 |
brainiak/brainiak | brainiak/funcalign/rsrm.py | RSRM.fit | def fit(self, X):
"""Compute the Robust Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, timepoints]
Each element in the list contains the fMRI data of one subject.
"""
logger.info('Starting RSRM')
# Check that the regularizer value is positive
if 0.0 >= self.lam:
raise ValueError("Gamma parameter should be positive.")
# Check the number of subjects
if len(X) <= 1:
raise ValueError("There are not enough subjects in the input "
"data to train the model.")
# Check for input data sizes
if X[0].shape[1] < self.features:
raise ValueError(
"There are not enough timepoints to train the model with "
"{0:d} features.".format(self.features))
# Check if all subjects have same number of TRs for alignment
number_trs = X[0].shape[1]
number_subjects = len(X)
for subject in range(number_subjects):
assert_all_finite(X[subject])
if X[subject].shape[1] != number_trs:
raise ValueError("Different number of alignment timepoints "
"between subjects.")
# Create a new random state
self.random_state_ = np.random.RandomState(self.rand_seed)
# Run RSRM
self.w_, self.r_, self.s_ = self._rsrm(X)
return self | python | def fit(self, X):
"""Compute the Robust Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, timepoints]
Each element in the list contains the fMRI data of one subject.
"""
logger.info('Starting RSRM')
# Check that the regularizer value is positive
if 0.0 >= self.lam:
raise ValueError("Gamma parameter should be positive.")
# Check the number of subjects
if len(X) <= 1:
raise ValueError("There are not enough subjects in the input "
"data to train the model.")
# Check for input data sizes
if X[0].shape[1] < self.features:
raise ValueError(
"There are not enough timepoints to train the model with "
"{0:d} features.".format(self.features))
# Check if all subjects have same number of TRs for alignment
number_trs = X[0].shape[1]
number_subjects = len(X)
for subject in range(number_subjects):
assert_all_finite(X[subject])
if X[subject].shape[1] != number_trs:
raise ValueError("Different number of alignment timepoints "
"between subjects.")
# Create a new random state
self.random_state_ = np.random.RandomState(self.rand_seed)
# Run RSRM
self.w_, self.r_, self.s_ = self._rsrm(X)
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
")",
":",
"logger",
".",
"info",
"(",
"'Starting RSRM'",
")",
"# Check that the regularizer value is positive",
"if",
"0.0",
">=",
"self",
".",
"lam",
":",
"raise",
"ValueError",
"(",
"\"Gamma parameter should be positive.\"",
... | Compute the Robust Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, timepoints]
Each element in the list contains the fMRI data of one subject. | [
"Compute",
"the",
"Robust",
"Shared",
"Response",
"Model"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/funcalign/rsrm.py#L114-L155 | train | 204,550 |
brainiak/brainiak | brainiak/funcalign/rsrm.py | RSRM.transform | def transform(self, X):
"""Use the model to transform new data to Shared Response space
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, timepoints_i]
Each element in the list contains the fMRI data of one subject.
Returns
-------
r : list of 2D arrays, element i has shape=[features_i, timepoints_i]
Shared responses from input data (X)
s : list of 2D arrays, element i has shape=[voxels_i, timepoints_i]
Individual data obtained from fitting model to input data (X)
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of subjects
if len(X) != len(self.w_):
raise ValueError("The number of subjects does not match the one"
" in the model.")
r = [None] * len(X)
s = [None] * len(X)
for subject in range(len(X)):
if X[subject] is not None:
r[subject], s[subject] = self._transform_new_data(X[subject],
subject)
return r, s | python | def transform(self, X):
"""Use the model to transform new data to Shared Response space
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, timepoints_i]
Each element in the list contains the fMRI data of one subject.
Returns
-------
r : list of 2D arrays, element i has shape=[features_i, timepoints_i]
Shared responses from input data (X)
s : list of 2D arrays, element i has shape=[voxels_i, timepoints_i]
Individual data obtained from fitting model to input data (X)
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of subjects
if len(X) != len(self.w_):
raise ValueError("The number of subjects does not match the one"
" in the model.")
r = [None] * len(X)
s = [None] * len(X)
for subject in range(len(X)):
if X[subject] is not None:
r[subject], s[subject] = self._transform_new_data(X[subject],
subject)
return r, s | [
"def",
"transform",
"(",
"self",
",",
"X",
")",
":",
"# Check if the model exist",
"if",
"hasattr",
"(",
"self",
",",
"'w_'",
")",
"is",
"False",
":",
"raise",
"NotFittedError",
"(",
"\"The model fit has not been run yet.\"",
")",
"# Check the number of subjects",
"... | Use the model to transform new data to Shared Response space
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, timepoints_i]
Each element in the list contains the fMRI data of one subject.
Returns
-------
r : list of 2D arrays, element i has shape=[features_i, timepoints_i]
Shared responses from input data (X)
s : list of 2D arrays, element i has shape=[voxels_i, timepoints_i]
Individual data obtained from fitting model to input data (X) | [
"Use",
"the",
"model",
"to",
"transform",
"new",
"data",
"to",
"Shared",
"Response",
"space"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/funcalign/rsrm.py#L157-L191 | train | 204,551 |
brainiak/brainiak | brainiak/funcalign/rsrm.py | RSRM._transform_new_data | def _transform_new_data(self, X, subject):
"""Transform new data for a subjects by projecting to the shared subspace and
computing the individual information.
Parameters
----------
X : array, shape=[voxels, timepoints]
The fMRI data of the subject.
subject : int
The subject id.
Returns
-------
R : array, shape=[features, timepoints]
Shared response from input data (X)
S : array, shape=[voxels, timepoints]
Individual data obtained from fitting model to input data (X)
"""
S = np.zeros_like(X)
R = None
for i in range(self.n_iter):
R = self.w_[subject].T.dot(X - S)
S = self._shrink(X - self.w_[subject].dot(R), self.lam)
return R, S | python | def _transform_new_data(self, X, subject):
"""Transform new data for a subjects by projecting to the shared subspace and
computing the individual information.
Parameters
----------
X : array, shape=[voxels, timepoints]
The fMRI data of the subject.
subject : int
The subject id.
Returns
-------
R : array, shape=[features, timepoints]
Shared response from input data (X)
S : array, shape=[voxels, timepoints]
Individual data obtained from fitting model to input data (X)
"""
S = np.zeros_like(X)
R = None
for i in range(self.n_iter):
R = self.w_[subject].T.dot(X - S)
S = self._shrink(X - self.w_[subject].dot(R), self.lam)
return R, S | [
"def",
"_transform_new_data",
"(",
"self",
",",
"X",
",",
"subject",
")",
":",
"S",
"=",
"np",
".",
"zeros_like",
"(",
"X",
")",
"R",
"=",
"None",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"n_iter",
")",
":",
"R",
"=",
"self",
".",
"w_",
"["... | Transform new data for a subjects by projecting to the shared subspace and
computing the individual information.
Parameters
----------
X : array, shape=[voxels, timepoints]
The fMRI data of the subject.
subject : int
The subject id.
Returns
-------
R : array, shape=[features, timepoints]
Shared response from input data (X)
S : array, shape=[voxels, timepoints]
Individual data obtained from fitting model to input data (X) | [
"Transform",
"new",
"data",
"for",
"a",
"subjects",
"by",
"projecting",
"to",
"the",
"shared",
"subspace",
"and",
"computing",
"the",
"individual",
"information",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/funcalign/rsrm.py#L193-L220 | train | 204,552 |
brainiak/brainiak | brainiak/funcalign/rsrm.py | RSRM.transform_subject | def transform_subject(self, X):
"""Transform a new subject using the existing model
Parameters
----------
X : 2D array, shape=[voxels, timepoints]
The fMRI data of the new subject.
Returns
-------
w : 2D array, shape=[voxels, features]
Orthogonal mapping `W_{new}` for new subject
s : 2D array, shape=[voxels, timepoints]
Individual term `S_{new}` for new subject
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of TRs in the subject
if X.shape[1] != self.r_.shape[1]:
raise ValueError("The number of timepoints(TRs) does not match the"
"one in the model.")
s = np.zeros_like(X)
for i in range(self.n_iter):
w = self._update_transform_subject(X, s, self.r_)
s = self._shrink(X - w.dot(self.r_), self.lam)
return w, s | python | def transform_subject(self, X):
"""Transform a new subject using the existing model
Parameters
----------
X : 2D array, shape=[voxels, timepoints]
The fMRI data of the new subject.
Returns
-------
w : 2D array, shape=[voxels, features]
Orthogonal mapping `W_{new}` for new subject
s : 2D array, shape=[voxels, timepoints]
Individual term `S_{new}` for new subject
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of TRs in the subject
if X.shape[1] != self.r_.shape[1]:
raise ValueError("The number of timepoints(TRs) does not match the"
"one in the model.")
s = np.zeros_like(X)
for i in range(self.n_iter):
w = self._update_transform_subject(X, s, self.r_)
s = self._shrink(X - w.dot(self.r_), self.lam)
return w, s | [
"def",
"transform_subject",
"(",
"self",
",",
"X",
")",
":",
"# Check if the model exist",
"if",
"hasattr",
"(",
"self",
",",
"'w_'",
")",
"is",
"False",
":",
"raise",
"NotFittedError",
"(",
"\"The model fit has not been run yet.\"",
")",
"# Check the number of TRs in... | Transform a new subject using the existing model
Parameters
----------
X : 2D array, shape=[voxels, timepoints]
The fMRI data of the new subject.
Returns
-------
w : 2D array, shape=[voxels, features]
Orthogonal mapping `W_{new}` for new subject
s : 2D array, shape=[voxels, timepoints]
Individual term `S_{new}` for new subject | [
"Transform",
"a",
"new",
"subject",
"using",
"the",
"existing",
"model"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/funcalign/rsrm.py#L222-L254 | train | 204,553 |
brainiak/brainiak | brainiak/funcalign/rsrm.py | RSRM._rsrm | def _rsrm(self, X):
"""Block-Coordinate Descent algorithm for fitting RSRM.
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, timepoints]
Each element in the list contains the fMRI data for alignment of
one subject.
Returns
-------
W : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
R : array, shape=[features, timepoints]
The shared response.
S : list of array, element i has shape=[voxels_i, timepoints]
The individual component :math:`S_i` for each subject.
"""
subjs = len(X)
voxels = [X[i].shape[0] for i in range(subjs)]
TRs = X[0].shape[1]
features = self.features
# Initialization
W = self._init_transforms(subjs, voxels, features, self.random_state_)
S = self._init_individual(subjs, voxels, TRs)
R = self._update_shared_response(X, S, W, features)
if logger.isEnabledFor(logging.INFO):
objective = self._objective_function(X, W, R, S, self.lam)
logger.info('Objective function %f' % objective)
# Main loop
for i in range(self.n_iter):
W = self._update_transforms(X, S, R)
S = self._update_individual(X, W, R, self.lam)
R = self._update_shared_response(X, S, W, features)
# Print objective function every iteration
if logger.isEnabledFor(logging.INFO):
objective = self._objective_function(X, W, R, S, self.lam)
logger.info('Objective function %f' % objective)
return W, R, S | python | def _rsrm(self, X):
"""Block-Coordinate Descent algorithm for fitting RSRM.
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, timepoints]
Each element in the list contains the fMRI data for alignment of
one subject.
Returns
-------
W : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
R : array, shape=[features, timepoints]
The shared response.
S : list of array, element i has shape=[voxels_i, timepoints]
The individual component :math:`S_i` for each subject.
"""
subjs = len(X)
voxels = [X[i].shape[0] for i in range(subjs)]
TRs = X[0].shape[1]
features = self.features
# Initialization
W = self._init_transforms(subjs, voxels, features, self.random_state_)
S = self._init_individual(subjs, voxels, TRs)
R = self._update_shared_response(X, S, W, features)
if logger.isEnabledFor(logging.INFO):
objective = self._objective_function(X, W, R, S, self.lam)
logger.info('Objective function %f' % objective)
# Main loop
for i in range(self.n_iter):
W = self._update_transforms(X, S, R)
S = self._update_individual(X, W, R, self.lam)
R = self._update_shared_response(X, S, W, features)
# Print objective function every iteration
if logger.isEnabledFor(logging.INFO):
objective = self._objective_function(X, W, R, S, self.lam)
logger.info('Objective function %f' % objective)
return W, R, S | [
"def",
"_rsrm",
"(",
"self",
",",
"X",
")",
":",
"subjs",
"=",
"len",
"(",
"X",
")",
"voxels",
"=",
"[",
"X",
"[",
"i",
"]",
".",
"shape",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"subjs",
")",
"]",
"TRs",
"=",
"X",
"[",
"0",
"]",
... | Block-Coordinate Descent algorithm for fitting RSRM.
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, timepoints]
Each element in the list contains the fMRI data for alignment of
one subject.
Returns
-------
W : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
R : array, shape=[features, timepoints]
The shared response.
S : list of array, element i has shape=[voxels_i, timepoints]
The individual component :math:`S_i` for each subject. | [
"Block",
"-",
"Coordinate",
"Descent",
"algorithm",
"for",
"fitting",
"RSRM",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/funcalign/rsrm.py#L256-L302 | train | 204,554 |
brainiak/brainiak | brainiak/funcalign/rsrm.py | RSRM._objective_function | def _objective_function(X, W, R, S, gamma):
"""Evaluate the objective function.
.. math:: \\sum_{i=1}^{N} 1/2 \\| X_i - W_i R - S_i \\|_F^2
.. math:: + /\\gamma * \\|S_i\\|_1
Parameters
----------
X : list of array, element i has shape=[voxels_i, timepoints]
Each element in the list contains the fMRI data for alignment of
one subject.
W : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
R : array, shape=[features, timepoints]
The shared response.
S : list of array, element i has shape=[voxels_i, timepoints]
The individual component :math:`S_i` for each subject.
gamma : float, default: 1.0
Regularization parameter for the sparseness of the individual
components.
Returns
-------
func : float
The RSRM objective function evaluated on the parameters to this
function.
"""
subjs = len(X)
func = .0
for i in range(subjs):
func += 0.5 * np.sum((X[i] - W[i].dot(R) - S[i])**2) \
+ gamma * np.sum(np.abs(S[i]))
return func | python | def _objective_function(X, W, R, S, gamma):
"""Evaluate the objective function.
.. math:: \\sum_{i=1}^{N} 1/2 \\| X_i - W_i R - S_i \\|_F^2
.. math:: + /\\gamma * \\|S_i\\|_1
Parameters
----------
X : list of array, element i has shape=[voxels_i, timepoints]
Each element in the list contains the fMRI data for alignment of
one subject.
W : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
R : array, shape=[features, timepoints]
The shared response.
S : list of array, element i has shape=[voxels_i, timepoints]
The individual component :math:`S_i` for each subject.
gamma : float, default: 1.0
Regularization parameter for the sparseness of the individual
components.
Returns
-------
func : float
The RSRM objective function evaluated on the parameters to this
function.
"""
subjs = len(X)
func = .0
for i in range(subjs):
func += 0.5 * np.sum((X[i] - W[i].dot(R) - S[i])**2) \
+ gamma * np.sum(np.abs(S[i]))
return func | [
"def",
"_objective_function",
"(",
"X",
",",
"W",
",",
"R",
",",
"S",
",",
"gamma",
")",
":",
"subjs",
"=",
"len",
"(",
"X",
")",
"func",
"=",
".0",
"for",
"i",
"in",
"range",
"(",
"subjs",
")",
":",
"func",
"+=",
"0.5",
"*",
"np",
".",
"sum"... | Evaluate the objective function.
.. math:: \\sum_{i=1}^{N} 1/2 \\| X_i - W_i R - S_i \\|_F^2
.. math:: + /\\gamma * \\|S_i\\|_1
Parameters
----------
X : list of array, element i has shape=[voxels_i, timepoints]
Each element in the list contains the fMRI data for alignment of
one subject.
W : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
R : array, shape=[features, timepoints]
The shared response.
S : list of array, element i has shape=[voxels_i, timepoints]
The individual component :math:`S_i` for each subject.
gamma : float, default: 1.0
Regularization parameter for the sparseness of the individual
components.
Returns
-------
func : float
The RSRM objective function evaluated on the parameters to this
function. | [
"Evaluate",
"the",
"objective",
"function",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/funcalign/rsrm.py#L346-L384 | train | 204,555 |
brainiak/brainiak | brainiak/funcalign/rsrm.py | RSRM._update_individual | def _update_individual(X, W, R, gamma):
"""Update the individual components `S_i`.
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, timepoints]
Each element in the list contains the fMRI data for alignment of
one subject.
W : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
R : array, shape=[features, timepoints]
The shared response.
gamma : float, default: 1.0
Regularization parameter for the sparseness of the individual
components.
Returns
-------
S : list of array, element i has shape=[voxels_i, timepoints]
The individual component :math:`S_i` for each subject.
"""
subjs = len(X)
S = []
for i in range(subjs):
S.append(RSRM._shrink(X[i] - W[i].dot(R), gamma))
return S | python | def _update_individual(X, W, R, gamma):
"""Update the individual components `S_i`.
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, timepoints]
Each element in the list contains the fMRI data for alignment of
one subject.
W : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
R : array, shape=[features, timepoints]
The shared response.
gamma : float, default: 1.0
Regularization parameter for the sparseness of the individual
components.
Returns
-------
S : list of array, element i has shape=[voxels_i, timepoints]
The individual component :math:`S_i` for each subject.
"""
subjs = len(X)
S = []
for i in range(subjs):
S.append(RSRM._shrink(X[i] - W[i].dot(R), gamma))
return S | [
"def",
"_update_individual",
"(",
"X",
",",
"W",
",",
"R",
",",
"gamma",
")",
":",
"subjs",
"=",
"len",
"(",
"X",
")",
"S",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"subjs",
")",
":",
"S",
".",
"append",
"(",
"RSRM",
".",
"_shrink",
"("... | Update the individual components `S_i`.
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, timepoints]
Each element in the list contains the fMRI data for alignment of
one subject.
W : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
R : array, shape=[features, timepoints]
The shared response.
gamma : float, default: 1.0
Regularization parameter for the sparseness of the individual
components.
Returns
-------
S : list of array, element i has shape=[voxels_i, timepoints]
The individual component :math:`S_i` for each subject. | [
"Update",
"the",
"individual",
"components",
"S_i",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/funcalign/rsrm.py#L387-L417 | train | 204,556 |
brainiak/brainiak | brainiak/funcalign/rsrm.py | RSRM._update_shared_response | def _update_shared_response(X, S, W, features):
"""Update the shared response `R`.
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, timepoints]
Each element in the list contains the fMRI data for alignment of
one subject.
S : list of array, element i has shape=[voxels_i, timepoints]
The individual component :math:`S_i` for each subject.
W : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
features : int
The number of features in the model.
Returns
-------
R : array, shape=[features, timepoints]
The updated shared response.
"""
subjs = len(X)
TRs = X[0].shape[1]
R = np.zeros((features, TRs))
# Project the subject data with the individual component removed into
# the shared subspace and average over all subjects.
for i in range(subjs):
R += W[i].T.dot(X[i]-S[i])
R /= subjs
return R | python | def _update_shared_response(X, S, W, features):
"""Update the shared response `R`.
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, timepoints]
Each element in the list contains the fMRI data for alignment of
one subject.
S : list of array, element i has shape=[voxels_i, timepoints]
The individual component :math:`S_i` for each subject.
W : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
features : int
The number of features in the model.
Returns
-------
R : array, shape=[features, timepoints]
The updated shared response.
"""
subjs = len(X)
TRs = X[0].shape[1]
R = np.zeros((features, TRs))
# Project the subject data with the individual component removed into
# the shared subspace and average over all subjects.
for i in range(subjs):
R += W[i].T.dot(X[i]-S[i])
R /= subjs
return R | [
"def",
"_update_shared_response",
"(",
"X",
",",
"S",
",",
"W",
",",
"features",
")",
":",
"subjs",
"=",
"len",
"(",
"X",
")",
"TRs",
"=",
"X",
"[",
"0",
"]",
".",
"shape",
"[",
"1",
"]",
"R",
"=",
"np",
".",
"zeros",
"(",
"(",
"features",
",... | Update the shared response `R`.
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, timepoints]
Each element in the list contains the fMRI data for alignment of
one subject.
S : list of array, element i has shape=[voxels_i, timepoints]
The individual component :math:`S_i` for each subject.
W : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
features : int
The number of features in the model.
Returns
-------
R : array, shape=[features, timepoints]
The updated shared response. | [
"Update",
"the",
"shared",
"response",
"R",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/funcalign/rsrm.py#L445-L478 | train | 204,557 |
brainiak/brainiak | brainiak/funcalign/rsrm.py | RSRM._update_transforms | def _update_transforms(X, S, R):
"""Updates the mappings `W_i` for each subject.
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, timepoints]
Each element in the list contains the fMRI data for alignment of
one subject.ß
S : list of array, element i has shape=[voxels_i, timepoints]
The individual component :math:`S_i` for each subject.
R : array, shape=[features, timepoints]
The shared response.
Returns
-------
W : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
"""
subjs = len(X)
W = []
for i in range(subjs):
W.append(RSRM._update_transform_subject(X[i], S[i], R))
return W | python | def _update_transforms(X, S, R):
"""Updates the mappings `W_i` for each subject.
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, timepoints]
Each element in the list contains the fMRI data for alignment of
one subject.ß
S : list of array, element i has shape=[voxels_i, timepoints]
The individual component :math:`S_i` for each subject.
R : array, shape=[features, timepoints]
The shared response.
Returns
-------
W : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
"""
subjs = len(X)
W = []
for i in range(subjs):
W.append(RSRM._update_transform_subject(X[i], S[i], R))
return W | [
"def",
"_update_transforms",
"(",
"X",
",",
"S",
",",
"R",
")",
":",
"subjs",
"=",
"len",
"(",
"X",
")",
"W",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"subjs",
")",
":",
"W",
".",
"append",
"(",
"RSRM",
".",
"_update_transform_subject",
"("... | Updates the mappings `W_i` for each subject.
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, timepoints]
Each element in the list contains the fMRI data for alignment of
one subject.ß
S : list of array, element i has shape=[voxels_i, timepoints]
The individual component :math:`S_i` for each subject.
R : array, shape=[features, timepoints]
The shared response.
Returns
-------
W : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject. | [
"Updates",
"the",
"mappings",
"W_i",
"for",
"each",
"subject",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/funcalign/rsrm.py#L509-L535 | train | 204,558 |
brainiak/brainiak | brainiak/funcalign/rsrm.py | RSRM._shrink | def _shrink(v, gamma):
"""Soft-shrinkage of an array with parameter gamma.
Parameters
----------
v : array
Array containing the values to be applied to the shrinkage operator
gamma : float
Shrinkage parameter.
Returns
-------
v : array
The same input array after the shrinkage operator was applied.
"""
pos = v > gamma
neg = v < -gamma
v[pos] -= gamma
v[neg] += gamma
v[np.logical_and(~pos, ~neg)] = .0
return v | python | def _shrink(v, gamma):
"""Soft-shrinkage of an array with parameter gamma.
Parameters
----------
v : array
Array containing the values to be applied to the shrinkage operator
gamma : float
Shrinkage parameter.
Returns
-------
v : array
The same input array after the shrinkage operator was applied.
"""
pos = v > gamma
neg = v < -gamma
v[pos] -= gamma
v[neg] += gamma
v[np.logical_and(~pos, ~neg)] = .0
return v | [
"def",
"_shrink",
"(",
"v",
",",
"gamma",
")",
":",
"pos",
"=",
"v",
">",
"gamma",
"neg",
"=",
"v",
"<",
"-",
"gamma",
"v",
"[",
"pos",
"]",
"-=",
"gamma",
"v",
"[",
"neg",
"]",
"+=",
"gamma",
"v",
"[",
"np",
".",
"logical_and",
"(",
"~",
"... | Soft-shrinkage of an array with parameter gamma.
Parameters
----------
v : array
Array containing the values to be applied to the shrinkage operator
gamma : float
Shrinkage parameter.
Returns
-------
v : array
The same input array after the shrinkage operator was applied. | [
"Soft",
"-",
"shrinkage",
"of",
"an",
"array",
"with",
"parameter",
"gamma",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/funcalign/rsrm.py#L538-L561 | train | 204,559 |
brainiak/brainiak | examples/funcalign/srm_image_prediction_example_distributed.py | plot_confusion_matrix | def plot_confusion_matrix(cm, title="Confusion Matrix"):
"""Plots a confusion matrix for each subject
"""
import matplotlib.pyplot as plt
import math
plt.figure()
subjects = len(cm)
root_subjects = math.sqrt(subjects)
cols = math.ceil(root_subjects)
rows = math.ceil(subjects/cols)
classes = cm[0].shape[0]
for subject in range(subjects):
plt.subplot(rows, cols, subject+1)
plt.imshow(cm[subject], interpolation='nearest', cmap=plt.cm.bone)
plt.xticks(np.arange(classes), range(1, classes+1))
plt.yticks(np.arange(classes), range(1, classes+1))
cbar = plt.colorbar(ticks=[0.0, 1.0], shrink=0.6)
cbar.set_clim(0.0, 1.0)
plt.xlabel("Predicted")
plt.ylabel("True label")
plt.title("{0:d}".format(subject + 1))
plt.suptitle(title)
plt.tight_layout()
plt.show() | python | def plot_confusion_matrix(cm, title="Confusion Matrix"):
"""Plots a confusion matrix for each subject
"""
import matplotlib.pyplot as plt
import math
plt.figure()
subjects = len(cm)
root_subjects = math.sqrt(subjects)
cols = math.ceil(root_subjects)
rows = math.ceil(subjects/cols)
classes = cm[0].shape[0]
for subject in range(subjects):
plt.subplot(rows, cols, subject+1)
plt.imshow(cm[subject], interpolation='nearest', cmap=plt.cm.bone)
plt.xticks(np.arange(classes), range(1, classes+1))
plt.yticks(np.arange(classes), range(1, classes+1))
cbar = plt.colorbar(ticks=[0.0, 1.0], shrink=0.6)
cbar.set_clim(0.0, 1.0)
plt.xlabel("Predicted")
plt.ylabel("True label")
plt.title("{0:d}".format(subject + 1))
plt.suptitle(title)
plt.tight_layout()
plt.show() | [
"def",
"plot_confusion_matrix",
"(",
"cm",
",",
"title",
"=",
"\"Confusion Matrix\"",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"import",
"math",
"plt",
".",
"figure",
"(",
")",
"subjects",
"=",
"len",
"(",
"cm",
")",
"root_subjects",
"... | Plots a confusion matrix for each subject | [
"Plots",
"a",
"confusion",
"matrix",
"for",
"each",
"subject"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/examples/funcalign/srm_image_prediction_example_distributed.py#L52-L75 | train | 204,560 |
brainiak/brainiak | brainiak/image.py | mask_image | def mask_image(image: SpatialImage, mask: np.ndarray, data_type: type = None
) -> np.ndarray:
"""Mask image after optionally casting its type.
Parameters
----------
image
Image to mask. Can include time as the last dimension.
mask
Mask to apply. Must have the same shape as the image data.
data_type
Type to cast image to.
Returns
-------
np.ndarray
Masked image.
Raises
------
ValueError
Image data and masks have different shapes.
"""
image_data = image.get_data()
if image_data.shape[:3] != mask.shape:
raise ValueError("Image data and mask have different shapes.")
if data_type is not None:
cast_data = image_data.astype(data_type)
else:
cast_data = image_data
return cast_data[mask] | python | def mask_image(image: SpatialImage, mask: np.ndarray, data_type: type = None
) -> np.ndarray:
"""Mask image after optionally casting its type.
Parameters
----------
image
Image to mask. Can include time as the last dimension.
mask
Mask to apply. Must have the same shape as the image data.
data_type
Type to cast image to.
Returns
-------
np.ndarray
Masked image.
Raises
------
ValueError
Image data and masks have different shapes.
"""
image_data = image.get_data()
if image_data.shape[:3] != mask.shape:
raise ValueError("Image data and mask have different shapes.")
if data_type is not None:
cast_data = image_data.astype(data_type)
else:
cast_data = image_data
return cast_data[mask] | [
"def",
"mask_image",
"(",
"image",
":",
"SpatialImage",
",",
"mask",
":",
"np",
".",
"ndarray",
",",
"data_type",
":",
"type",
"=",
"None",
")",
"->",
"np",
".",
"ndarray",
":",
"image_data",
"=",
"image",
".",
"get_data",
"(",
")",
"if",
"image_data",... | Mask image after optionally casting its type.
Parameters
----------
image
Image to mask. Can include time as the last dimension.
mask
Mask to apply. Must have the same shape as the image data.
data_type
Type to cast image to.
Returns
-------
np.ndarray
Masked image.
Raises
------
ValueError
Image data and masks have different shapes. | [
"Mask",
"image",
"after",
"optionally",
"casting",
"its",
"type",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/image.py#L107-L137 | train | 204,561 |
brainiak/brainiak | brainiak/image.py | multimask_images | def multimask_images(images: Iterable[SpatialImage],
masks: Sequence[np.ndarray], image_type: type = None
) -> Iterable[Sequence[np.ndarray]]:
"""Mask images with multiple masks.
Parameters
----------
images:
Images to mask.
masks:
Masks to apply.
image_type:
Type to cast images to.
Yields
------
Sequence[np.ndarray]
For each mask, a masked image.
"""
for image in images:
yield [mask_image(image, mask, image_type) for mask in masks] | python | def multimask_images(images: Iterable[SpatialImage],
masks: Sequence[np.ndarray], image_type: type = None
) -> Iterable[Sequence[np.ndarray]]:
"""Mask images with multiple masks.
Parameters
----------
images:
Images to mask.
masks:
Masks to apply.
image_type:
Type to cast images to.
Yields
------
Sequence[np.ndarray]
For each mask, a masked image.
"""
for image in images:
yield [mask_image(image, mask, image_type) for mask in masks] | [
"def",
"multimask_images",
"(",
"images",
":",
"Iterable",
"[",
"SpatialImage",
"]",
",",
"masks",
":",
"Sequence",
"[",
"np",
".",
"ndarray",
"]",
",",
"image_type",
":",
"type",
"=",
"None",
")",
"->",
"Iterable",
"[",
"Sequence",
"[",
"np",
".",
"nd... | Mask images with multiple masks.
Parameters
----------
images:
Images to mask.
masks:
Masks to apply.
image_type:
Type to cast images to.
Yields
------
Sequence[np.ndarray]
For each mask, a masked image. | [
"Mask",
"images",
"with",
"multiple",
"masks",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/image.py#L140-L160 | train | 204,562 |
brainiak/brainiak | brainiak/image.py | mask_images | def mask_images(images: Iterable[SpatialImage], mask: np.ndarray,
image_type: type = None) -> Iterable[np.ndarray]:
"""Mask images.
Parameters
----------
images:
Images to mask.
mask:
Mask to apply.
image_type:
Type to cast images to.
Yields
------
np.ndarray
Masked image.
"""
for images in multimask_images(images, (mask,), image_type):
yield images[0] | python | def mask_images(images: Iterable[SpatialImage], mask: np.ndarray,
image_type: type = None) -> Iterable[np.ndarray]:
"""Mask images.
Parameters
----------
images:
Images to mask.
mask:
Mask to apply.
image_type:
Type to cast images to.
Yields
------
np.ndarray
Masked image.
"""
for images in multimask_images(images, (mask,), image_type):
yield images[0] | [
"def",
"mask_images",
"(",
"images",
":",
"Iterable",
"[",
"SpatialImage",
"]",
",",
"mask",
":",
"np",
".",
"ndarray",
",",
"image_type",
":",
"type",
"=",
"None",
")",
"->",
"Iterable",
"[",
"np",
".",
"ndarray",
"]",
":",
"for",
"images",
"in",
"m... | Mask images.
Parameters
----------
images:
Images to mask.
mask:
Mask to apply.
image_type:
Type to cast images to.
Yields
------
np.ndarray
Masked image. | [
"Mask",
"images",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/image.py#L163-L182 | train | 204,563 |
brainiak/brainiak | brainiak/image.py | MaskedMultiSubjectData.from_masked_images | def from_masked_images(cls: Type[T], masked_images: Iterable[np.ndarray],
n_subjects: int) -> T:
"""Create a new instance of MaskedMultiSubjecData from masked images.
Parameters
----------
masked_images : iterator
Images from multiple subjects to stack along 3rd dimension
n_subjects : int
Number of subjects; must match the number of images
Returns
-------
T
A new instance of MaskedMultiSubjectData
Raises
------
ValueError
Images have different shapes.
The number of images differs from n_subjects.
"""
images_iterator = iter(masked_images)
first_image = next(images_iterator)
first_image_shape = first_image.T.shape
result = np.empty((first_image_shape[0], first_image_shape[1],
n_subjects))
for n_images, image in enumerate(itertools.chain([first_image],
images_iterator)):
image = image.T
if image.shape != first_image_shape:
raise ValueError("Image {} has different shape from first "
"image: {} != {}".format(n_images,
image.shape,
first_image_shape))
result[:, :, n_images] = image
n_images += 1
if n_images != n_subjects:
raise ValueError("n_subjects != number of images: {} != {}"
.format(n_subjects, n_images))
return result.view(cls) | python | def from_masked_images(cls: Type[T], masked_images: Iterable[np.ndarray],
n_subjects: int) -> T:
"""Create a new instance of MaskedMultiSubjecData from masked images.
Parameters
----------
masked_images : iterator
Images from multiple subjects to stack along 3rd dimension
n_subjects : int
Number of subjects; must match the number of images
Returns
-------
T
A new instance of MaskedMultiSubjectData
Raises
------
ValueError
Images have different shapes.
The number of images differs from n_subjects.
"""
images_iterator = iter(masked_images)
first_image = next(images_iterator)
first_image_shape = first_image.T.shape
result = np.empty((first_image_shape[0], first_image_shape[1],
n_subjects))
for n_images, image in enumerate(itertools.chain([first_image],
images_iterator)):
image = image.T
if image.shape != first_image_shape:
raise ValueError("Image {} has different shape from first "
"image: {} != {}".format(n_images,
image.shape,
first_image_shape))
result[:, :, n_images] = image
n_images += 1
if n_images != n_subjects:
raise ValueError("n_subjects != number of images: {} != {}"
.format(n_subjects, n_images))
return result.view(cls) | [
"def",
"from_masked_images",
"(",
"cls",
":",
"Type",
"[",
"T",
"]",
",",
"masked_images",
":",
"Iterable",
"[",
"np",
".",
"ndarray",
"]",
",",
"n_subjects",
":",
"int",
")",
"->",
"T",
":",
"images_iterator",
"=",
"iter",
"(",
"masked_images",
")",
"... | Create a new instance of MaskedMultiSubjecData from masked images.
Parameters
----------
masked_images : iterator
Images from multiple subjects to stack along 3rd dimension
n_subjects : int
Number of subjects; must match the number of images
Returns
-------
T
A new instance of MaskedMultiSubjectData
Raises
------
ValueError
Images have different shapes.
The number of images differs from n_subjects. | [
"Create",
"a",
"new",
"instance",
"of",
"MaskedMultiSubjecData",
"from",
"masked",
"images",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/image.py#L40-L81 | train | 204,564 |
brainiak/brainiak | brainiak/image.py | SingleConditionSpec.extract_labels | def extract_labels(self) -> np.ndarray:
"""Extract condition labels.
Returns
-------
np.ndarray
The condition label of each epoch.
"""
condition_idxs, epoch_idxs, _ = np.where(self)
_, unique_epoch_idxs = np.unique(epoch_idxs, return_index=True)
return condition_idxs[unique_epoch_idxs] | python | def extract_labels(self) -> np.ndarray:
"""Extract condition labels.
Returns
-------
np.ndarray
The condition label of each epoch.
"""
condition_idxs, epoch_idxs, _ = np.where(self)
_, unique_epoch_idxs = np.unique(epoch_idxs, return_index=True)
return condition_idxs[unique_epoch_idxs] | [
"def",
"extract_labels",
"(",
"self",
")",
"->",
"np",
".",
"ndarray",
":",
"condition_idxs",
",",
"epoch_idxs",
",",
"_",
"=",
"np",
".",
"where",
"(",
"self",
")",
"_",
",",
"unique_epoch_idxs",
"=",
"np",
".",
"unique",
"(",
"epoch_idxs",
",",
"retu... | Extract condition labels.
Returns
-------
np.ndarray
The condition label of each epoch. | [
"Extract",
"condition",
"labels",
"."
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/image.py#L94-L104 | train | 204,565 |
brainiak/brainiak | brainiak/funcalign/srm.py | SRM.fit | def fit(self, X, y=None):
"""Compute the probabilistic Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
y : not used
"""
logger.info('Starting Probabilistic SRM')
# Check the number of subjects
if len(X) <= 1:
raise ValueError("There are not enough subjects "
"({0:d}) to train the model.".format(len(X)))
# Check for input data sizes
number_subjects = len(X)
number_subjects_vec = self.comm.allgather(number_subjects)
for rank in range(self.comm.Get_size()):
if number_subjects_vec[rank] != number_subjects:
raise ValueError(
"Not all ranks have same number of subjects")
# Collect size information
shape0 = np.zeros((number_subjects,), dtype=np.int)
shape1 = np.zeros((number_subjects,), dtype=np.int)
for subject in range(number_subjects):
if X[subject] is not None:
assert_all_finite(X[subject])
shape0[subject] = X[subject].shape[0]
shape1[subject] = X[subject].shape[1]
shape0 = self.comm.allreduce(shape0, op=MPI.SUM)
shape1 = self.comm.allreduce(shape1, op=MPI.SUM)
# Check if all subjects have same number of TRs
number_trs = np.min(shape1)
for subject in range(number_subjects):
if shape1[subject] < self.features:
raise ValueError(
"There are not enough samples to train the model with "
"{0:d} features.".format(self.features))
if shape1[subject] != number_trs:
raise ValueError("Different number of samples between subjects"
".")
# Run SRM
self.sigma_s_, self.w_, self.mu_, self.rho2_, self.s_ = self._srm(X)
return self | python | def fit(self, X, y=None):
"""Compute the probabilistic Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
y : not used
"""
logger.info('Starting Probabilistic SRM')
# Check the number of subjects
if len(X) <= 1:
raise ValueError("There are not enough subjects "
"({0:d}) to train the model.".format(len(X)))
# Check for input data sizes
number_subjects = len(X)
number_subjects_vec = self.comm.allgather(number_subjects)
for rank in range(self.comm.Get_size()):
if number_subjects_vec[rank] != number_subjects:
raise ValueError(
"Not all ranks have same number of subjects")
# Collect size information
shape0 = np.zeros((number_subjects,), dtype=np.int)
shape1 = np.zeros((number_subjects,), dtype=np.int)
for subject in range(number_subjects):
if X[subject] is not None:
assert_all_finite(X[subject])
shape0[subject] = X[subject].shape[0]
shape1[subject] = X[subject].shape[1]
shape0 = self.comm.allreduce(shape0, op=MPI.SUM)
shape1 = self.comm.allreduce(shape1, op=MPI.SUM)
# Check if all subjects have same number of TRs
number_trs = np.min(shape1)
for subject in range(number_subjects):
if shape1[subject] < self.features:
raise ValueError(
"There are not enough samples to train the model with "
"{0:d} features.".format(self.features))
if shape1[subject] != number_trs:
raise ValueError("Different number of samples between subjects"
".")
# Run SRM
self.sigma_s_, self.w_, self.mu_, self.rho2_, self.s_ = self._srm(X)
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
")",
":",
"logger",
".",
"info",
"(",
"'Starting Probabilistic SRM'",
")",
"# Check the number of subjects",
"if",
"len",
"(",
"X",
")",
"<=",
"1",
":",
"raise",
"ValueError",
"(",
"\"There are no... | Compute the probabilistic Shared Response Model
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
y : not used | [
"Compute",
"the",
"probabilistic",
"Shared",
"Response",
"Model"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/funcalign/srm.py#L182-L233 | train | 204,566 |
brainiak/brainiak | brainiak/funcalign/srm.py | SRM.transform | def transform(self, X, y=None):
"""Use the model to transform matrix to Shared Response space
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject
note that number of voxels and samples can vary across subjects
y : not used (as it is unsupervised learning)
Returns
-------
s : list of 2D arrays, element i has shape=[features_i, samples_i]
Shared responses from input data (X)
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of subjects
if len(X) != len(self.w_):
raise ValueError("The number of subjects does not match the one"
" in the model.")
s = [None] * len(X)
for subject in range(len(X)):
if X[subject] is not None:
s[subject] = self.w_[subject].T.dot(X[subject])
return s | python | def transform(self, X, y=None):
"""Use the model to transform matrix to Shared Response space
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject
note that number of voxels and samples can vary across subjects
y : not used (as it is unsupervised learning)
Returns
-------
s : list of 2D arrays, element i has shape=[features_i, samples_i]
Shared responses from input data (X)
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of subjects
if len(X) != len(self.w_):
raise ValueError("The number of subjects does not match the one"
" in the model.")
s = [None] * len(X)
for subject in range(len(X)):
if X[subject] is not None:
s[subject] = self.w_[subject].T.dot(X[subject])
return s | [
"def",
"transform",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
")",
":",
"# Check if the model exist",
"if",
"hasattr",
"(",
"self",
",",
"'w_'",
")",
"is",
"False",
":",
"raise",
"NotFittedError",
"(",
"\"The model fit has not been run yet.\"",
")",
"# Che... | Use the model to transform matrix to Shared Response space
Parameters
----------
X : list of 2D arrays, element i has shape=[voxels_i, samples_i]
Each element in the list contains the fMRI data of one subject
note that number of voxels and samples can vary across subjects
y : not used (as it is unsupervised learning)
Returns
-------
s : list of 2D arrays, element i has shape=[features_i, samples_i]
Shared responses from input data (X) | [
"Use",
"the",
"model",
"to",
"transform",
"matrix",
"to",
"Shared",
"Response",
"space"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/funcalign/srm.py#L235-L266 | train | 204,567 |
brainiak/brainiak | brainiak/funcalign/srm.py | SRM.transform_subject | def transform_subject(self, X):
"""Transform a new subject using the existing model.
The subject is assumed to have recieved equivalent stimulation
Parameters
----------
X : 2D array, shape=[voxels, timepoints]
The fMRI data of the new subject.
Returns
-------
w : 2D array, shape=[voxels, features]
Orthogonal mapping `W_{new}` for new subject
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of TRs in the subject
if X.shape[1] != self.s_.shape[1]:
raise ValueError("The number of timepoints(TRs) does not match the"
"one in the model.")
w = self._update_transform_subject(X, self.s_)
return w | python | def transform_subject(self, X):
"""Transform a new subject using the existing model.
The subject is assumed to have recieved equivalent stimulation
Parameters
----------
X : 2D array, shape=[voxels, timepoints]
The fMRI data of the new subject.
Returns
-------
w : 2D array, shape=[voxels, features]
Orthogonal mapping `W_{new}` for new subject
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of TRs in the subject
if X.shape[1] != self.s_.shape[1]:
raise ValueError("The number of timepoints(TRs) does not match the"
"one in the model.")
w = self._update_transform_subject(X, self.s_)
return w | [
"def",
"transform_subject",
"(",
"self",
",",
"X",
")",
":",
"# Check if the model exist",
"if",
"hasattr",
"(",
"self",
",",
"'w_'",
")",
"is",
"False",
":",
"raise",
"NotFittedError",
"(",
"\"The model fit has not been run yet.\"",
")",
"# Check the number of TRs in... | Transform a new subject using the existing model.
The subject is assumed to have recieved equivalent stimulation
Parameters
----------
X : 2D array, shape=[voxels, timepoints]
The fMRI data of the new subject.
Returns
-------
w : 2D array, shape=[voxels, features]
Orthogonal mapping `W_{new}` for new subject | [
"Transform",
"a",
"new",
"subject",
"using",
"the",
"existing",
"model",
".",
"The",
"subject",
"is",
"assumed",
"to",
"have",
"recieved",
"equivalent",
"stimulation"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/funcalign/srm.py#L385-L413 | train | 204,568 |
brainiak/brainiak | brainiak/eventseg/event.py | EventSegment.fit | def fit(self, X, y=None):
"""Learn a segmentation on training data
Fits event patterns and a segmentation to training data. After
running this function, the learned event patterns can be used to
segment other datasets using find_events
Parameters
----------
X: time by voxel ndarray, or a list of such ndarrays
fMRI data to be segmented. If a list is given, then all datasets
are segmented simultaneously with the same event patterns
y: not used (added to comply with BaseEstimator definition)
Returns
-------
self: the EventSegment object
"""
X = copy.deepcopy(X)
if type(X) is not list:
X = check_array(X)
X = [X]
n_train = len(X)
for i in range(n_train):
X[i] = X[i].T
self.classes_ = np.arange(self.n_events)
n_dim = X[0].shape[0]
for i in range(n_train):
assert (X[i].shape[0] == n_dim)
# Double-check that data is z-scored in time
for i in range(n_train):
X[i] = stats.zscore(X[i], axis=1, ddof=1)
# Initialize variables for fitting
log_gamma = []
for i in range(n_train):
log_gamma.append(np.zeros((X[i].shape[1],
self.n_events)))
step = 1
best_ll = float("-inf")
self.ll_ = np.empty((0, n_train))
while step <= self.n_iter:
iteration_var = self.step_var(step)
# Based on the current segmentation, compute the mean pattern
# for each event
seg_prob = [np.exp(lg) / np.sum(np.exp(lg), axis=0)
for lg in log_gamma]
mean_pat = np.empty((n_train, n_dim, self.n_events))
for i in range(n_train):
mean_pat[i, :, :] = X[i].dot(seg_prob[i])
mean_pat = np.mean(mean_pat, axis=0)
# Based on the current mean patterns, compute the event
# segmentation
self.ll_ = np.append(self.ll_, np.empty((1, n_train)), axis=0)
for i in range(n_train):
logprob = self._logprob_obs(X[i],
mean_pat, iteration_var)
log_gamma[i], self.ll_[-1, i] = self._forward_backward(logprob)
# If log-likelihood has started decreasing, undo last step and stop
if np.mean(self.ll_[-1, :]) < best_ll:
self.ll_ = self.ll_[:-1, :]
break
self.segments_ = [np.exp(lg) for lg in log_gamma]
self.event_var_ = iteration_var
self.event_pat_ = mean_pat
best_ll = np.mean(self.ll_[-1, :])
logger.debug("Fitting step %d, LL=%f", step, best_ll)
step += 1
return self | python | def fit(self, X, y=None):
"""Learn a segmentation on training data
Fits event patterns and a segmentation to training data. After
running this function, the learned event patterns can be used to
segment other datasets using find_events
Parameters
----------
X: time by voxel ndarray, or a list of such ndarrays
fMRI data to be segmented. If a list is given, then all datasets
are segmented simultaneously with the same event patterns
y: not used (added to comply with BaseEstimator definition)
Returns
-------
self: the EventSegment object
"""
X = copy.deepcopy(X)
if type(X) is not list:
X = check_array(X)
X = [X]
n_train = len(X)
for i in range(n_train):
X[i] = X[i].T
self.classes_ = np.arange(self.n_events)
n_dim = X[0].shape[0]
for i in range(n_train):
assert (X[i].shape[0] == n_dim)
# Double-check that data is z-scored in time
for i in range(n_train):
X[i] = stats.zscore(X[i], axis=1, ddof=1)
# Initialize variables for fitting
log_gamma = []
for i in range(n_train):
log_gamma.append(np.zeros((X[i].shape[1],
self.n_events)))
step = 1
best_ll = float("-inf")
self.ll_ = np.empty((0, n_train))
while step <= self.n_iter:
iteration_var = self.step_var(step)
# Based on the current segmentation, compute the mean pattern
# for each event
seg_prob = [np.exp(lg) / np.sum(np.exp(lg), axis=0)
for lg in log_gamma]
mean_pat = np.empty((n_train, n_dim, self.n_events))
for i in range(n_train):
mean_pat[i, :, :] = X[i].dot(seg_prob[i])
mean_pat = np.mean(mean_pat, axis=0)
# Based on the current mean patterns, compute the event
# segmentation
self.ll_ = np.append(self.ll_, np.empty((1, n_train)), axis=0)
for i in range(n_train):
logprob = self._logprob_obs(X[i],
mean_pat, iteration_var)
log_gamma[i], self.ll_[-1, i] = self._forward_backward(logprob)
# If log-likelihood has started decreasing, undo last step and stop
if np.mean(self.ll_[-1, :]) < best_ll:
self.ll_ = self.ll_[:-1, :]
break
self.segments_ = [np.exp(lg) for lg in log_gamma]
self.event_var_ = iteration_var
self.event_pat_ = mean_pat
best_ll = np.mean(self.ll_[-1, :])
logger.debug("Fitting step %d, LL=%f", step, best_ll)
step += 1
return self | [
"def",
"fit",
"(",
"self",
",",
"X",
",",
"y",
"=",
"None",
")",
":",
"X",
"=",
"copy",
".",
"deepcopy",
"(",
"X",
")",
"if",
"type",
"(",
"X",
")",
"is",
"not",
"list",
":",
"X",
"=",
"check_array",
"(",
"X",
")",
"X",
"=",
"[",
"X",
"]"... | Learn a segmentation on training data
Fits event patterns and a segmentation to training data. After
running this function, the learned event patterns can be used to
segment other datasets using find_events
Parameters
----------
X: time by voxel ndarray, or a list of such ndarrays
fMRI data to be segmented. If a list is given, then all datasets
are segmented simultaneously with the same event patterns
y: not used (added to comply with BaseEstimator definition)
Returns
-------
self: the EventSegment object | [
"Learn",
"a",
"segmentation",
"on",
"training",
"data"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/eventseg/event.py#L108-L187 | train | 204,569 |
brainiak/brainiak | brainiak/eventseg/event.py | EventSegment._logprob_obs | def _logprob_obs(self, data, mean_pat, var):
"""Log probability of observing each timepoint under each event model
Computes the log probability of each observed timepoint being
generated by the Gaussian distribution for each event pattern
Parameters
----------
data: voxel by time ndarray
fMRI data on which to compute log probabilities
mean_pat: voxel by event ndarray
Centers of the Gaussians for each event
var: float or 1D array of length equal to the number of events
Variance of the event Gaussians. If scalar, all events are
assumed to have the same variance
Returns
-------
logprob : time by event ndarray
Log probability of each timepoint under each event Gaussian
"""
n_vox = data.shape[0]
t = data.shape[1]
# z-score both data and mean patterns in space, so that Gaussians
# are measuring Pearson correlations and are insensitive to overall
# activity changes
data_z = stats.zscore(data, axis=0, ddof=1)
mean_pat_z = stats.zscore(mean_pat, axis=0, ddof=1)
logprob = np.empty((t, self.n_events))
if type(var) is not np.ndarray:
var = var * np.ones(self.n_events)
for k in range(self.n_events):
logprob[:, k] = -0.5 * n_vox * np.log(
2 * np.pi * var[k]) - 0.5 * np.sum(
(data_z.T - mean_pat_z[:, k]).T ** 2, axis=0) / var[k]
logprob /= n_vox
return logprob | python | def _logprob_obs(self, data, mean_pat, var):
"""Log probability of observing each timepoint under each event model
Computes the log probability of each observed timepoint being
generated by the Gaussian distribution for each event pattern
Parameters
----------
data: voxel by time ndarray
fMRI data on which to compute log probabilities
mean_pat: voxel by event ndarray
Centers of the Gaussians for each event
var: float or 1D array of length equal to the number of events
Variance of the event Gaussians. If scalar, all events are
assumed to have the same variance
Returns
-------
logprob : time by event ndarray
Log probability of each timepoint under each event Gaussian
"""
n_vox = data.shape[0]
t = data.shape[1]
# z-score both data and mean patterns in space, so that Gaussians
# are measuring Pearson correlations and are insensitive to overall
# activity changes
data_z = stats.zscore(data, axis=0, ddof=1)
mean_pat_z = stats.zscore(mean_pat, axis=0, ddof=1)
logprob = np.empty((t, self.n_events))
if type(var) is not np.ndarray:
var = var * np.ones(self.n_events)
for k in range(self.n_events):
logprob[:, k] = -0.5 * n_vox * np.log(
2 * np.pi * var[k]) - 0.5 * np.sum(
(data_z.T - mean_pat_z[:, k]).T ** 2, axis=0) / var[k]
logprob /= n_vox
return logprob | [
"def",
"_logprob_obs",
"(",
"self",
",",
"data",
",",
"mean_pat",
",",
"var",
")",
":",
"n_vox",
"=",
"data",
".",
"shape",
"[",
"0",
"]",
"t",
"=",
"data",
".",
"shape",
"[",
"1",
"]",
"# z-score both data and mean patterns in space, so that Gaussians",
"# ... | Log probability of observing each timepoint under each event model
Computes the log probability of each observed timepoint being
generated by the Gaussian distribution for each event pattern
Parameters
----------
data: voxel by time ndarray
fMRI data on which to compute log probabilities
mean_pat: voxel by event ndarray
Centers of the Gaussians for each event
var: float or 1D array of length equal to the number of events
Variance of the event Gaussians. If scalar, all events are
assumed to have the same variance
Returns
-------
logprob : time by event ndarray
Log probability of each timepoint under each event Gaussian | [
"Log",
"probability",
"of",
"observing",
"each",
"timepoint",
"under",
"each",
"event",
"model"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/eventseg/event.py#L189-L233 | train | 204,570 |
brainiak/brainiak | brainiak/eventseg/event.py | EventSegment._log | def _log(self, x):
"""Modified version of np.log that manually sets values <=0 to -inf
Parameters
----------
x: ndarray of floats
Input to the log function
Returns
-------
log_ma: ndarray of floats
log of x, with x<=0 values replaced with -inf
"""
xshape = x.shape
_x = x.flatten()
y = utils.masked_log(_x)
return y.reshape(xshape) | python | def _log(self, x):
"""Modified version of np.log that manually sets values <=0 to -inf
Parameters
----------
x: ndarray of floats
Input to the log function
Returns
-------
log_ma: ndarray of floats
log of x, with x<=0 values replaced with -inf
"""
xshape = x.shape
_x = x.flatten()
y = utils.masked_log(_x)
return y.reshape(xshape) | [
"def",
"_log",
"(",
"self",
",",
"x",
")",
":",
"xshape",
"=",
"x",
".",
"shape",
"_x",
"=",
"x",
".",
"flatten",
"(",
")",
"y",
"=",
"utils",
".",
"masked_log",
"(",
"_x",
")",
"return",
"y",
".",
"reshape",
"(",
"xshape",
")"
] | Modified version of np.log that manually sets values <=0 to -inf
Parameters
----------
x: ndarray of floats
Input to the log function
Returns
-------
log_ma: ndarray of floats
log of x, with x<=0 values replaced with -inf | [
"Modified",
"version",
"of",
"np",
".",
"log",
"that",
"manually",
"sets",
"values",
"<",
"=",
"0",
"to",
"-",
"inf"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/eventseg/event.py#L322-L339 | train | 204,571 |
brainiak/brainiak | brainiak/eventseg/event.py | EventSegment.set_event_patterns | def set_event_patterns(self, event_pat):
"""Set HMM event patterns manually
Rather than fitting the event patterns automatically using fit(), this
function allows them to be set explicitly. They can then be used to
find corresponding events in a new dataset, using find_events().
Parameters
----------
event_pat: voxel by event ndarray
"""
if event_pat.shape[1] != self.n_events:
raise ValueError(("Number of columns of event_pat must match "
"number of events"))
self.event_pat_ = event_pat.copy() | python | def set_event_patterns(self, event_pat):
"""Set HMM event patterns manually
Rather than fitting the event patterns automatically using fit(), this
function allows them to be set explicitly. They can then be used to
find corresponding events in a new dataset, using find_events().
Parameters
----------
event_pat: voxel by event ndarray
"""
if event_pat.shape[1] != self.n_events:
raise ValueError(("Number of columns of event_pat must match "
"number of events"))
self.event_pat_ = event_pat.copy() | [
"def",
"set_event_patterns",
"(",
"self",
",",
"event_pat",
")",
":",
"if",
"event_pat",
".",
"shape",
"[",
"1",
"]",
"!=",
"self",
".",
"n_events",
":",
"raise",
"ValueError",
"(",
"(",
"\"Number of columns of event_pat must match \"",
"\"number of events\"",
")"... | Set HMM event patterns manually
Rather than fitting the event patterns automatically using fit(), this
function allows them to be set explicitly. They can then be used to
find corresponding events in a new dataset, using find_events().
Parameters
----------
event_pat: voxel by event ndarray | [
"Set",
"HMM",
"event",
"patterns",
"manually"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/eventseg/event.py#L341-L355 | train | 204,572 |
brainiak/brainiak | brainiak/eventseg/event.py | EventSegment.calc_weighted_event_var | def calc_weighted_event_var(self, D, weights, event_pat):
"""Computes normalized weighted variance around event pattern
Utility function for computing variance in a training set of weighted
event examples. For each event, the sum of squared differences for all
timepoints from the event pattern is computed, and then the weights
specify how much each of these differences contributes to the
variance (normalized by the number of voxels).
Parameters
----------
D : timepoint by voxel ndarray
fMRI data for which to compute event variances
weights : timepoint by event ndarray
specifies relative weights of timepoints for each event
event_pat : voxel by event ndarray
mean event patterns to compute variance around
Returns
-------
ev_var : ndarray of variances for each event
"""
Dz = stats.zscore(D, axis=1, ddof=1)
ev_var = np.empty(event_pat.shape[1])
for e in range(event_pat.shape[1]):
# Only compute variances for weights > 0.1% of max weight
nz = weights[:, e] > np.max(weights[:, e])/1000
sumsq = np.dot(weights[nz, e],
np.sum(np.square(Dz[nz, :] -
event_pat[:, e]), axis=1))
ev_var[e] = sumsq/(np.sum(weights[nz, e]) -
np.sum(np.square(weights[nz, e])) /
np.sum(weights[nz, e]))
ev_var = ev_var / D.shape[1]
return ev_var | python | def calc_weighted_event_var(self, D, weights, event_pat):
"""Computes normalized weighted variance around event pattern
Utility function for computing variance in a training set of weighted
event examples. For each event, the sum of squared differences for all
timepoints from the event pattern is computed, and then the weights
specify how much each of these differences contributes to the
variance (normalized by the number of voxels).
Parameters
----------
D : timepoint by voxel ndarray
fMRI data for which to compute event variances
weights : timepoint by event ndarray
specifies relative weights of timepoints for each event
event_pat : voxel by event ndarray
mean event patterns to compute variance around
Returns
-------
ev_var : ndarray of variances for each event
"""
Dz = stats.zscore(D, axis=1, ddof=1)
ev_var = np.empty(event_pat.shape[1])
for e in range(event_pat.shape[1]):
# Only compute variances for weights > 0.1% of max weight
nz = weights[:, e] > np.max(weights[:, e])/1000
sumsq = np.dot(weights[nz, e],
np.sum(np.square(Dz[nz, :] -
event_pat[:, e]), axis=1))
ev_var[e] = sumsq/(np.sum(weights[nz, e]) -
np.sum(np.square(weights[nz, e])) /
np.sum(weights[nz, e]))
ev_var = ev_var / D.shape[1]
return ev_var | [
"def",
"calc_weighted_event_var",
"(",
"self",
",",
"D",
",",
"weights",
",",
"event_pat",
")",
":",
"Dz",
"=",
"stats",
".",
"zscore",
"(",
"D",
",",
"axis",
"=",
"1",
",",
"ddof",
"=",
"1",
")",
"ev_var",
"=",
"np",
".",
"empty",
"(",
"event_pat"... | Computes normalized weighted variance around event pattern
Utility function for computing variance in a training set of weighted
event examples. For each event, the sum of squared differences for all
timepoints from the event pattern is computed, and then the weights
specify how much each of these differences contributes to the
variance (normalized by the number of voxels).
Parameters
----------
D : timepoint by voxel ndarray
fMRI data for which to compute event variances
weights : timepoint by event ndarray
specifies relative weights of timepoints for each event
event_pat : voxel by event ndarray
mean event patterns to compute variance around
Returns
-------
ev_var : ndarray of variances for each event | [
"Computes",
"normalized",
"weighted",
"variance",
"around",
"event",
"pattern"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/eventseg/event.py#L431-L467 | train | 204,573 |
brainiak/brainiak | brainiak/eventseg/event.py | EventSegment.model_prior | def model_prior(self, t):
"""Returns the prior probability of the HMM
Runs forward-backward without any data, showing the prior distribution
of the model (for comparison with a posterior).
Parameters
----------
t: int
Number of timepoints
Returns
-------
segments : time by event ndarray
segments[t,e] = prior probability that timepoint t is in event e
test_ll : float
Log-likelihood of model (data-independent term)"""
lg, test_ll = self._forward_backward(np.zeros((t, self.n_events)))
segments = np.exp(lg)
return segments, test_ll | python | def model_prior(self, t):
"""Returns the prior probability of the HMM
Runs forward-backward without any data, showing the prior distribution
of the model (for comparison with a posterior).
Parameters
----------
t: int
Number of timepoints
Returns
-------
segments : time by event ndarray
segments[t,e] = prior probability that timepoint t is in event e
test_ll : float
Log-likelihood of model (data-independent term)"""
lg, test_ll = self._forward_backward(np.zeros((t, self.n_events)))
segments = np.exp(lg)
return segments, test_ll | [
"def",
"model_prior",
"(",
"self",
",",
"t",
")",
":",
"lg",
",",
"test_ll",
"=",
"self",
".",
"_forward_backward",
"(",
"np",
".",
"zeros",
"(",
"(",
"t",
",",
"self",
".",
"n_events",
")",
")",
")",
"segments",
"=",
"np",
".",
"exp",
"(",
"lg",... | Returns the prior probability of the HMM
Runs forward-backward without any data, showing the prior distribution
of the model (for comparison with a posterior).
Parameters
----------
t: int
Number of timepoints
Returns
-------
segments : time by event ndarray
segments[t,e] = prior probability that timepoint t is in event e
test_ll : float
Log-likelihood of model (data-independent term) | [
"Returns",
"the",
"prior",
"probability",
"of",
"the",
"HMM"
] | 408f12dec2ff56559a26873a848a09e4c8facfeb | https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/eventseg/event.py#L469-L491 | train | 204,574 |
kinegratii/borax | borax/utils.py | chain_getattr | def chain_getattr(obj, attr, value=None):
"""Get chain attribute for an object.
"""
try:
return _resolve_value(safe_chain_getattr(obj, attr))
except AttributeError:
return value | python | def chain_getattr(obj, attr, value=None):
"""Get chain attribute for an object.
"""
try:
return _resolve_value(safe_chain_getattr(obj, attr))
except AttributeError:
return value | [
"def",
"chain_getattr",
"(",
"obj",
",",
"attr",
",",
"value",
"=",
"None",
")",
":",
"try",
":",
"return",
"_resolve_value",
"(",
"safe_chain_getattr",
"(",
"obj",
",",
"attr",
")",
")",
"except",
"AttributeError",
":",
"return",
"value"
] | Get chain attribute for an object. | [
"Get",
"chain",
"attribute",
"for",
"an",
"object",
"."
] | 921649f9277e3f657b6dea5a80e67de9ee5567f6 | https://github.com/kinegratii/borax/blob/921649f9277e3f657b6dea5a80e67de9ee5567f6/borax/utils.py#L27-L33 | train | 204,575 |
kinegratii/borax | borax/calendars/festivals.py | iter_festival_countdown | def iter_festival_countdown(countdown: Optional[int] = None, date_obj: MDate = None,
lang: str = 'zh-Hans') -> FestivalCountdownIterable:
"""Return countdown of festivals.
"""
factory = FestivalFactory(lang=lang)
return factory.iter_festival_countdown(countdown, date_obj) | python | def iter_festival_countdown(countdown: Optional[int] = None, date_obj: MDate = None,
lang: str = 'zh-Hans') -> FestivalCountdownIterable:
"""Return countdown of festivals.
"""
factory = FestivalFactory(lang=lang)
return factory.iter_festival_countdown(countdown, date_obj) | [
"def",
"iter_festival_countdown",
"(",
"countdown",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"date_obj",
":",
"MDate",
"=",
"None",
",",
"lang",
":",
"str",
"=",
"'zh-Hans'",
")",
"->",
"FestivalCountdownIterable",
":",
"factory",
"=",
"FestivalFa... | Return countdown of festivals. | [
"Return",
"countdown",
"of",
"festivals",
"."
] | 921649f9277e3f657b6dea5a80e67de9ee5567f6 | https://github.com/kinegratii/borax/blob/921649f9277e3f657b6dea5a80e67de9ee5567f6/borax/calendars/festivals.py#L324-L329 | train | 204,576 |
kinegratii/borax | borax/calendars/lunardate.py | parse_year_days | def parse_year_days(year_info):
"""Parse year days from a year info.
"""
leap_month, leap_days = _parse_leap(year_info)
res = leap_days
for month in range(1, 13):
res += (year_info >> (16 - month)) % 2 + 29
return res | python | def parse_year_days(year_info):
"""Parse year days from a year info.
"""
leap_month, leap_days = _parse_leap(year_info)
res = leap_days
for month in range(1, 13):
res += (year_info >> (16 - month)) % 2 + 29
return res | [
"def",
"parse_year_days",
"(",
"year_info",
")",
":",
"leap_month",
",",
"leap_days",
"=",
"_parse_leap",
"(",
"year_info",
")",
"res",
"=",
"leap_days",
"for",
"month",
"in",
"range",
"(",
"1",
",",
"13",
")",
":",
"res",
"+=",
"(",
"year_info",
">>",
... | Parse year days from a year info. | [
"Parse",
"year",
"days",
"from",
"a",
"year",
"info",
"."
] | 921649f9277e3f657b6dea5a80e67de9ee5567f6 | https://github.com/kinegratii/borax/blob/921649f9277e3f657b6dea5a80e67de9ee5567f6/borax/calendars/lunardate.py#L71-L78 | train | 204,577 |
kinegratii/borax | borax/calendars/lunardate.py | _iter_year_month | def _iter_year_month(year_info):
""" Iter the month days in a lunar year.
"""
# info => month, days, leap
leap_month, leap_days = _parse_leap(year_info)
months = [(i, 0) for i in range(1, 13)]
if leap_month > 0:
months.insert(leap_month, (leap_month, 1))
for month, leap in months:
if leap:
days = leap_days
else:
days = (year_info >> (16 - month)) % 2 + 29
yield month, days, leap | python | def _iter_year_month(year_info):
""" Iter the month days in a lunar year.
"""
# info => month, days, leap
leap_month, leap_days = _parse_leap(year_info)
months = [(i, 0) for i in range(1, 13)]
if leap_month > 0:
months.insert(leap_month, (leap_month, 1))
for month, leap in months:
if leap:
days = leap_days
else:
days = (year_info >> (16 - month)) % 2 + 29
yield month, days, leap | [
"def",
"_iter_year_month",
"(",
"year_info",
")",
":",
"# info => month, days, leap",
"leap_month",
",",
"leap_days",
"=",
"_parse_leap",
"(",
"year_info",
")",
"months",
"=",
"[",
"(",
"i",
",",
"0",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"13",
... | Iter the month days in a lunar year. | [
"Iter",
"the",
"month",
"days",
"in",
"a",
"lunar",
"year",
"."
] | 921649f9277e3f657b6dea5a80e67de9ee5567f6 | https://github.com/kinegratii/borax/blob/921649f9277e3f657b6dea5a80e67de9ee5567f6/borax/calendars/lunardate.py#L84-L98 | train | 204,578 |
realitix/vulkan | generator/generate.py | model_typedefs | def model_typedefs(vk, model):
"""Fill the model with typedefs
model['typedefs'] = {'name': 'type', ...}
"""
model['typedefs'] = {}
# bitmasks and basetypes
bitmasks = [x for x in vk['registry']['types']['type']
if x.get('@category') == 'bitmask']
basetypes = [x for x in vk['registry']['types']['type']
if x.get('@category') == 'basetype']
for typedef in bitmasks + basetypes:
if not typedef.get('type'):
continue
model['typedefs'][typedef['name']] = typedef['type']
# handles
handles = [x for x in vk['registry']['types']['type']
if x.get('@category') == 'handle']
for handle in handles:
if 'name' not in handle or 'type' not in handle:
continue
n = handle['name']
t = handle['type']
if t == 'VK_DEFINE_HANDLE':
model['typedefs']['struct %s_T' % n] = '*%s' % n
if t == 'VK_DEFINE_HANDLE':
model['typedefs'][n] = 'uint64_t'
# custom plaform dependant
for name in ['Display', 'xcb_connection_t', 'wl_display', 'wl_surface',
'MirConnection', 'MirSurface', 'ANativeWindow',
'SECURITY_ATTRIBUTES']:
model['typedefs'][name] = 'struct %s' % name
model['typedefs'].update({
'Window': 'uint32_t', 'VisualID': 'uint32_t',
'xcb_window_t': 'uint32_t', 'xcb_visualid_t': 'uint32_t'
}) | python | def model_typedefs(vk, model):
"""Fill the model with typedefs
model['typedefs'] = {'name': 'type', ...}
"""
model['typedefs'] = {}
# bitmasks and basetypes
bitmasks = [x for x in vk['registry']['types']['type']
if x.get('@category') == 'bitmask']
basetypes = [x for x in vk['registry']['types']['type']
if x.get('@category') == 'basetype']
for typedef in bitmasks + basetypes:
if not typedef.get('type'):
continue
model['typedefs'][typedef['name']] = typedef['type']
# handles
handles = [x for x in vk['registry']['types']['type']
if x.get('@category') == 'handle']
for handle in handles:
if 'name' not in handle or 'type' not in handle:
continue
n = handle['name']
t = handle['type']
if t == 'VK_DEFINE_HANDLE':
model['typedefs']['struct %s_T' % n] = '*%s' % n
if t == 'VK_DEFINE_HANDLE':
model['typedefs'][n] = 'uint64_t'
# custom plaform dependant
for name in ['Display', 'xcb_connection_t', 'wl_display', 'wl_surface',
'MirConnection', 'MirSurface', 'ANativeWindow',
'SECURITY_ATTRIBUTES']:
model['typedefs'][name] = 'struct %s' % name
model['typedefs'].update({
'Window': 'uint32_t', 'VisualID': 'uint32_t',
'xcb_window_t': 'uint32_t', 'xcb_visualid_t': 'uint32_t'
}) | [
"def",
"model_typedefs",
"(",
"vk",
",",
"model",
")",
":",
"model",
"[",
"'typedefs'",
"]",
"=",
"{",
"}",
"# bitmasks and basetypes",
"bitmasks",
"=",
"[",
"x",
"for",
"x",
"in",
"vk",
"[",
"'registry'",
"]",
"[",
"'types'",
"]",
"[",
"'type'",
"]",
... | Fill the model with typedefs
model['typedefs'] = {'name': 'type', ...} | [
"Fill",
"the",
"model",
"with",
"typedefs"
] | 07285387092aaa61d2d71fa2913d60a73f022cbe | https://github.com/realitix/vulkan/blob/07285387092aaa61d2d71fa2913d60a73f022cbe/generator/generate.py#L55-L98 | train | 204,579 |
realitix/vulkan | generator/generate.py | model_macros | def model_macros(vk, model):
"""Fill the model with macros
model['macros'] = {'name': value, ...}
"""
model['macros'] = {}
# API Macros
macros = [x for x in vk['registry']['enums']
if x.get('@type') not in ('bitmask', 'enum')]
# TODO: Check theses values
special_values = {'1000.0f': '1000.0',
'(~0U)': 0xffffffff,
'(~0ULL)': -1,
'(~0U-1)': 0xfffffffe,
'(~0U-2)': 0xfffffffd}
for macro in macros[0]['enum']:
if '@name' not in macro or '@value' not in macro:
continue
name = macro['@name']
value = macro['@value']
if value in special_values:
value = special_values[value]
model['macros'][name] = value
# Extension Macros
for ext in get_extensions_filtered(vk):
model['macros'][ext['@name']] = 1
for req in ext['require']:
for enum in req['enum']:
ename = enum['@name']
evalue = parse_constant(enum, int(ext['@number']))
if enum.get('@extends') == 'VkResult':
model['enums']['VkResult'][ename] = evalue
else:
model['macros'][ename] = evalue | python | def model_macros(vk, model):
"""Fill the model with macros
model['macros'] = {'name': value, ...}
"""
model['macros'] = {}
# API Macros
macros = [x for x in vk['registry']['enums']
if x.get('@type') not in ('bitmask', 'enum')]
# TODO: Check theses values
special_values = {'1000.0f': '1000.0',
'(~0U)': 0xffffffff,
'(~0ULL)': -1,
'(~0U-1)': 0xfffffffe,
'(~0U-2)': 0xfffffffd}
for macro in macros[0]['enum']:
if '@name' not in macro or '@value' not in macro:
continue
name = macro['@name']
value = macro['@value']
if value in special_values:
value = special_values[value]
model['macros'][name] = value
# Extension Macros
for ext in get_extensions_filtered(vk):
model['macros'][ext['@name']] = 1
for req in ext['require']:
for enum in req['enum']:
ename = enum['@name']
evalue = parse_constant(enum, int(ext['@number']))
if enum.get('@extends') == 'VkResult':
model['enums']['VkResult'][ename] = evalue
else:
model['macros'][ename] = evalue | [
"def",
"model_macros",
"(",
"vk",
",",
"model",
")",
":",
"model",
"[",
"'macros'",
"]",
"=",
"{",
"}",
"# API Macros",
"macros",
"=",
"[",
"x",
"for",
"x",
"in",
"vk",
"[",
"'registry'",
"]",
"[",
"'enums'",
"]",
"if",
"x",
".",
"get",
"(",
"'@t... | Fill the model with macros
model['macros'] = {'name': value, ...} | [
"Fill",
"the",
"model",
"with",
"macros"
] | 07285387092aaa61d2d71fa2913d60a73f022cbe | https://github.com/realitix/vulkan/blob/07285387092aaa61d2d71fa2913d60a73f022cbe/generator/generate.py#L186-L227 | train | 204,580 |
realitix/vulkan | generator/generate.py | model_funcpointers | def model_funcpointers(vk, model):
"""Fill the model with function pointer
model['funcpointers'] = {'pfn_name': 'struct_name'}
"""
model['funcpointers'] = {}
funcs = [x for x in vk['registry']['types']['type']
if x.get('@category') == 'funcpointer']
structs = [x for x in vk['registry']['types']['type']
if x.get('@category') == 'struct']
for f in funcs:
pfn_name = f['name']
for s in structs:
if 'member' not in s:
continue
for m in s['member']:
if m['type'] == pfn_name:
struct_name = s['@name']
model['funcpointers'][pfn_name] = struct_name | python | def model_funcpointers(vk, model):
"""Fill the model with function pointer
model['funcpointers'] = {'pfn_name': 'struct_name'}
"""
model['funcpointers'] = {}
funcs = [x for x in vk['registry']['types']['type']
if x.get('@category') == 'funcpointer']
structs = [x for x in vk['registry']['types']['type']
if x.get('@category') == 'struct']
for f in funcs:
pfn_name = f['name']
for s in structs:
if 'member' not in s:
continue
for m in s['member']:
if m['type'] == pfn_name:
struct_name = s['@name']
model['funcpointers'][pfn_name] = struct_name | [
"def",
"model_funcpointers",
"(",
"vk",
",",
"model",
")",
":",
"model",
"[",
"'funcpointers'",
"]",
"=",
"{",
"}",
"funcs",
"=",
"[",
"x",
"for",
"x",
"in",
"vk",
"[",
"'registry'",
"]",
"[",
"'types'",
"]",
"[",
"'type'",
"]",
"if",
"x",
".",
"... | Fill the model with function pointer
model['funcpointers'] = {'pfn_name': 'struct_name'} | [
"Fill",
"the",
"model",
"with",
"function",
"pointer"
] | 07285387092aaa61d2d71fa2913d60a73f022cbe | https://github.com/realitix/vulkan/blob/07285387092aaa61d2d71fa2913d60a73f022cbe/generator/generate.py#L230-L251 | train | 204,581 |
realitix/vulkan | generator/generate.py | model_exceptions | def model_exceptions(vk, model):
"""Fill the model with exceptions and errors
model['exceptions'] = {val: 'name',...}
model['errors'] = {val: 'name',...}
"""
model['exceptions'] = {}
model['errors'] = {}
all_codes = model['enums']['VkResult']
success_names = set()
error_names = set()
commands = [x for x in vk['registry']['commands']['command']]
for command in commands:
successes = command.get('@successcodes', '').split(',')
errors = command.get('@errorcodes', '').split(',')
success_names.update(successes)
error_names.update(errors)
for key, value in all_codes.items():
if key.startswith('VK_RESULT') or key == 'VK_SUCCESS':
continue
name = inflection.camelize(key.lower())
if key in success_names:
model['exceptions'][value] = name
elif key in error_names:
model['errors'][value] = name
else:
print('Warning: return code %s unused' % key) | python | def model_exceptions(vk, model):
"""Fill the model with exceptions and errors
model['exceptions'] = {val: 'name',...}
model['errors'] = {val: 'name',...}
"""
model['exceptions'] = {}
model['errors'] = {}
all_codes = model['enums']['VkResult']
success_names = set()
error_names = set()
commands = [x for x in vk['registry']['commands']['command']]
for command in commands:
successes = command.get('@successcodes', '').split(',')
errors = command.get('@errorcodes', '').split(',')
success_names.update(successes)
error_names.update(errors)
for key, value in all_codes.items():
if key.startswith('VK_RESULT') or key == 'VK_SUCCESS':
continue
name = inflection.camelize(key.lower())
if key in success_names:
model['exceptions'][value] = name
elif key in error_names:
model['errors'][value] = name
else:
print('Warning: return code %s unused' % key) | [
"def",
"model_exceptions",
"(",
"vk",
",",
"model",
")",
":",
"model",
"[",
"'exceptions'",
"]",
"=",
"{",
"}",
"model",
"[",
"'errors'",
"]",
"=",
"{",
"}",
"all_codes",
"=",
"model",
"[",
"'enums'",
"]",
"[",
"'VkResult'",
"]",
"success_names",
"=",
... | Fill the model with exceptions and errors
model['exceptions'] = {val: 'name',...}
model['errors'] = {val: 'name',...} | [
"Fill",
"the",
"model",
"with",
"exceptions",
"and",
"errors"
] | 07285387092aaa61d2d71fa2913d60a73f022cbe | https://github.com/realitix/vulkan/blob/07285387092aaa61d2d71fa2913d60a73f022cbe/generator/generate.py#L254-L285 | train | 204,582 |
realitix/vulkan | generator/generate.py | model_constructors | def model_constructors(vk, model):
"""Fill the model with constructors
model['constructors'] = [{'name': 'x', 'members': [{'name': 'y'}].}]
"""
model['constructors'] = []
structs = [x for x in vk['registry']['types']['type']
if x.get('@category') in {'struct', 'union'}]
def parse_len(member):
mlen = member.get('@len')
if not mlen:
return None
if ',' in mlen:
mlen = mlen.split(',')[0]
if 'latex' in mlen or 'null-terminated' in mlen:
return None
return mlen
for struct in structs:
if 'member' not in struct:
continue
model['constructors'].append({
'name': struct['@name'],
'members': [{
'name': x['name'],
'type': x['type'],
'default': x.get('@values'),
'len': parse_len(x)
} for x in struct['member']]
}) | python | def model_constructors(vk, model):
"""Fill the model with constructors
model['constructors'] = [{'name': 'x', 'members': [{'name': 'y'}].}]
"""
model['constructors'] = []
structs = [x for x in vk['registry']['types']['type']
if x.get('@category') in {'struct', 'union'}]
def parse_len(member):
mlen = member.get('@len')
if not mlen:
return None
if ',' in mlen:
mlen = mlen.split(',')[0]
if 'latex' in mlen or 'null-terminated' in mlen:
return None
return mlen
for struct in structs:
if 'member' not in struct:
continue
model['constructors'].append({
'name': struct['@name'],
'members': [{
'name': x['name'],
'type': x['type'],
'default': x.get('@values'),
'len': parse_len(x)
} for x in struct['member']]
}) | [
"def",
"model_constructors",
"(",
"vk",
",",
"model",
")",
":",
"model",
"[",
"'constructors'",
"]",
"=",
"[",
"]",
"structs",
"=",
"[",
"x",
"for",
"x",
"in",
"vk",
"[",
"'registry'",
"]",
"[",
"'types'",
"]",
"[",
"'type'",
"]",
"if",
"x",
".",
... | Fill the model with constructors
model['constructors'] = [{'name': 'x', 'members': [{'name': 'y'}].}] | [
"Fill",
"the",
"model",
"with",
"constructors"
] | 07285387092aaa61d2d71fa2913d60a73f022cbe | https://github.com/realitix/vulkan/blob/07285387092aaa61d2d71fa2913d60a73f022cbe/generator/generate.py#L288-L322 | train | 204,583 |
realitix/vulkan | generator/generate.py | model_ext_functions | def model_ext_functions(vk, model):
"""Fill the model with extensions functions"""
model['ext_functions'] = {'instance': {}, 'device': {}}
# invert the alias to better lookup
alias = {v: k for k, v in model['alias'].items()}
for extension in get_extensions_filtered(vk):
for req in extension['require']:
if not req.get('command'):
continue
ext_type = extension['@type']
for x in req['command']:
name = x['@name']
if name in alias.keys():
model['ext_functions'][ext_type][name] = alias[name]
else:
model['ext_functions'][ext_type][name] = name | python | def model_ext_functions(vk, model):
"""Fill the model with extensions functions"""
model['ext_functions'] = {'instance': {}, 'device': {}}
# invert the alias to better lookup
alias = {v: k for k, v in model['alias'].items()}
for extension in get_extensions_filtered(vk):
for req in extension['require']:
if not req.get('command'):
continue
ext_type = extension['@type']
for x in req['command']:
name = x['@name']
if name in alias.keys():
model['ext_functions'][ext_type][name] = alias[name]
else:
model['ext_functions'][ext_type][name] = name | [
"def",
"model_ext_functions",
"(",
"vk",
",",
"model",
")",
":",
"model",
"[",
"'ext_functions'",
"]",
"=",
"{",
"'instance'",
":",
"{",
"}",
",",
"'device'",
":",
"{",
"}",
"}",
"# invert the alias to better lookup",
"alias",
"=",
"{",
"v",
":",
"k",
"f... | Fill the model with extensions functions | [
"Fill",
"the",
"model",
"with",
"extensions",
"functions"
] | 07285387092aaa61d2d71fa2913d60a73f022cbe | https://github.com/realitix/vulkan/blob/07285387092aaa61d2d71fa2913d60a73f022cbe/generator/generate.py#L448-L466 | train | 204,584 |
realitix/vulkan | generator/generate.py | model_alias | def model_alias(vk, model):
"""Fill the model with alias since V1"""
model['alias'] = {}
# types
for s in vk['registry']['types']['type']:
if s.get('@category', None) == 'handle' and s.get('@alias'):
model['alias'][s['@alias']] = s['@name']
# commands
for c in vk['registry']['commands']['command']:
if c.get('@alias'):
model['alias'][c['@alias']] = c['@name'] | python | def model_alias(vk, model):
"""Fill the model with alias since V1"""
model['alias'] = {}
# types
for s in vk['registry']['types']['type']:
if s.get('@category', None) == 'handle' and s.get('@alias'):
model['alias'][s['@alias']] = s['@name']
# commands
for c in vk['registry']['commands']['command']:
if c.get('@alias'):
model['alias'][c['@alias']] = c['@name'] | [
"def",
"model_alias",
"(",
"vk",
",",
"model",
")",
":",
"model",
"[",
"'alias'",
"]",
"=",
"{",
"}",
"# types",
"for",
"s",
"in",
"vk",
"[",
"'registry'",
"]",
"[",
"'types'",
"]",
"[",
"'type'",
"]",
":",
"if",
"s",
".",
"get",
"(",
"'@category... | Fill the model with alias since V1 | [
"Fill",
"the",
"model",
"with",
"alias",
"since",
"V1"
] | 07285387092aaa61d2d71fa2913d60a73f022cbe | https://github.com/realitix/vulkan/blob/07285387092aaa61d2d71fa2913d60a73f022cbe/generator/generate.py#L469-L481 | train | 204,585 |
realitix/vulkan | generator/generate.py | format_vk | def format_vk(vk):
"""Format vk before using it"""
# Force extension require to be a list
for ext in get_extensions_filtered(vk):
req = ext['require']
if not isinstance(req, list):
ext['require'] = [req] | python | def format_vk(vk):
"""Format vk before using it"""
# Force extension require to be a list
for ext in get_extensions_filtered(vk):
req = ext['require']
if not isinstance(req, list):
ext['require'] = [req] | [
"def",
"format_vk",
"(",
"vk",
")",
":",
"# Force extension require to be a list",
"for",
"ext",
"in",
"get_extensions_filtered",
"(",
"vk",
")",
":",
"req",
"=",
"ext",
"[",
"'require'",
"]",
"if",
"not",
"isinstance",
"(",
"req",
",",
"list",
")",
":",
"... | Format vk before using it | [
"Format",
"vk",
"before",
"using",
"it"
] | 07285387092aaa61d2d71fa2913d60a73f022cbe | https://github.com/realitix/vulkan/blob/07285387092aaa61d2d71fa2913d60a73f022cbe/generator/generate.py#L496-L503 | train | 204,586 |
realitix/vulkan | generator/generate.py | generate_py | def generate_py():
"""Generate the python output file"""
model = {}
vk = init()
format_vk(vk)
model_alias(vk, model)
model_typedefs(vk, model)
model_enums(vk, model)
model_macros(vk, model)
model_funcpointers(vk, model)
model_exceptions(vk, model)
model_constructors(vk, model)
model_functions(vk, model)
model_ext_functions(vk, model)
env = jinja2.Environment(
autoescape=False,
trim_blocks=True,
lstrip_blocks=True,
loader=jinja2.FileSystemLoader(HERE)
)
out_file = path.join(HERE, path.pardir, 'vulkan', '_vulkan.py')
with open(out_file, 'w') as out:
out.write(env.get_template('vulkan.template.py').render(model=model)) | python | def generate_py():
"""Generate the python output file"""
model = {}
vk = init()
format_vk(vk)
model_alias(vk, model)
model_typedefs(vk, model)
model_enums(vk, model)
model_macros(vk, model)
model_funcpointers(vk, model)
model_exceptions(vk, model)
model_constructors(vk, model)
model_functions(vk, model)
model_ext_functions(vk, model)
env = jinja2.Environment(
autoescape=False,
trim_blocks=True,
lstrip_blocks=True,
loader=jinja2.FileSystemLoader(HERE)
)
out_file = path.join(HERE, path.pardir, 'vulkan', '_vulkan.py')
with open(out_file, 'w') as out:
out.write(env.get_template('vulkan.template.py').render(model=model)) | [
"def",
"generate_py",
"(",
")",
":",
"model",
"=",
"{",
"}",
"vk",
"=",
"init",
"(",
")",
"format_vk",
"(",
"vk",
")",
"model_alias",
"(",
"vk",
",",
"model",
")",
"model_typedefs",
"(",
"vk",
",",
"model",
")",
"model_enums",
"(",
"vk",
",",
"mode... | Generate the python output file | [
"Generate",
"the",
"python",
"output",
"file"
] | 07285387092aaa61d2d71fa2913d60a73f022cbe | https://github.com/realitix/vulkan/blob/07285387092aaa61d2d71fa2913d60a73f022cbe/generator/generate.py#L506-L531 | train | 204,587 |
realitix/vulkan | generator/generate.py | generate_cdef | def generate_cdef():
"""Generate the cdef output file"""
include_libc_path = path.join(HERE, 'fake_libc_include')
include_vulkan_path = path.join(HERE, 'vulkan_include')
out_file = path.join(HERE, path.pardir, 'vulkan', 'vulkan.cdef.h')
header = path.join(include_vulkan_path, 'vulkan.h')
command = ['cpp',
'-std=c99',
'-P',
'-nostdinc',
'-I' + include_libc_path,
'-I' + include_vulkan_path,
'-o' + out_file,
'-DVK_USE_PLATFORM_XCB_KHR',
'-DVK_USE_PLATFORM_WAYLAND_KHR',
'-DVK_USE_PLATFORM_ANDROID_KHR',
'-DVK_USE_PLATFORM_WIN32_KHR',
'-DVK_USE_PLATFORM_XLIB_KHR',
header]
subprocess.run(command, check=True) | python | def generate_cdef():
"""Generate the cdef output file"""
include_libc_path = path.join(HERE, 'fake_libc_include')
include_vulkan_path = path.join(HERE, 'vulkan_include')
out_file = path.join(HERE, path.pardir, 'vulkan', 'vulkan.cdef.h')
header = path.join(include_vulkan_path, 'vulkan.h')
command = ['cpp',
'-std=c99',
'-P',
'-nostdinc',
'-I' + include_libc_path,
'-I' + include_vulkan_path,
'-o' + out_file,
'-DVK_USE_PLATFORM_XCB_KHR',
'-DVK_USE_PLATFORM_WAYLAND_KHR',
'-DVK_USE_PLATFORM_ANDROID_KHR',
'-DVK_USE_PLATFORM_WIN32_KHR',
'-DVK_USE_PLATFORM_XLIB_KHR',
header]
subprocess.run(command, check=True) | [
"def",
"generate_cdef",
"(",
")",
":",
"include_libc_path",
"=",
"path",
".",
"join",
"(",
"HERE",
",",
"'fake_libc_include'",
")",
"include_vulkan_path",
"=",
"path",
".",
"join",
"(",
"HERE",
",",
"'vulkan_include'",
")",
"out_file",
"=",
"path",
".",
"joi... | Generate the cdef output file | [
"Generate",
"the",
"cdef",
"output",
"file"
] | 07285387092aaa61d2d71fa2913d60a73f022cbe | https://github.com/realitix/vulkan/blob/07285387092aaa61d2d71fa2913d60a73f022cbe/generator/generate.py#L534-L554 | train | 204,588 |
stphivos/django-mock-queries | django_mock_queries/mocks.py | mock_django_connection | def mock_django_connection(disabled_features=None):
""" Overwrite the Django database configuration with a mocked version.
This is a helper function that does the actual monkey patching.
"""
db = connections.databases['default']
db['PASSWORD'] = '****'
db['USER'] = '**Database disabled for unit tests**'
ConnectionHandler.__getitem__ = MagicMock(name='mock_connection')
# noinspection PyUnresolvedReferences
mock_connection = ConnectionHandler.__getitem__.return_value
if disabled_features:
for feature in disabled_features:
setattr(mock_connection.features, feature, False)
mock_ops = mock_connection.ops
# noinspection PyUnusedLocal
def compiler(queryset, connection, using, **kwargs):
result = MagicMock(name='mock_connection.ops.compiler()')
# noinspection PyProtectedMember
result.execute_sql.side_effect = NotSupportedError(
"Mock database tried to execute SQL for {} model.".format(
queryset.model._meta.object_name))
result.has_results.side_effect = result.execute_sql.side_effect
return result
mock_ops.compiler.return_value.side_effect = compiler
mock_ops.integer_field_range.return_value = (-sys.maxsize - 1, sys.maxsize)
mock_ops.max_name_length.return_value = sys.maxsize
Model.refresh_from_db = Mock() | python | def mock_django_connection(disabled_features=None):
""" Overwrite the Django database configuration with a mocked version.
This is a helper function that does the actual monkey patching.
"""
db = connections.databases['default']
db['PASSWORD'] = '****'
db['USER'] = '**Database disabled for unit tests**'
ConnectionHandler.__getitem__ = MagicMock(name='mock_connection')
# noinspection PyUnresolvedReferences
mock_connection = ConnectionHandler.__getitem__.return_value
if disabled_features:
for feature in disabled_features:
setattr(mock_connection.features, feature, False)
mock_ops = mock_connection.ops
# noinspection PyUnusedLocal
def compiler(queryset, connection, using, **kwargs):
result = MagicMock(name='mock_connection.ops.compiler()')
# noinspection PyProtectedMember
result.execute_sql.side_effect = NotSupportedError(
"Mock database tried to execute SQL for {} model.".format(
queryset.model._meta.object_name))
result.has_results.side_effect = result.execute_sql.side_effect
return result
mock_ops.compiler.return_value.side_effect = compiler
mock_ops.integer_field_range.return_value = (-sys.maxsize - 1, sys.maxsize)
mock_ops.max_name_length.return_value = sys.maxsize
Model.refresh_from_db = Mock() | [
"def",
"mock_django_connection",
"(",
"disabled_features",
"=",
"None",
")",
":",
"db",
"=",
"connections",
".",
"databases",
"[",
"'default'",
"]",
"db",
"[",
"'PASSWORD'",
"]",
"=",
"'****'",
"db",
"[",
"'USER'",
"]",
"=",
"'**Database disabled for unit tests*... | Overwrite the Django database configuration with a mocked version.
This is a helper function that does the actual monkey patching. | [
"Overwrite",
"the",
"Django",
"database",
"configuration",
"with",
"a",
"mocked",
"version",
"."
] | 1522a0debfa78f4a986818d92eef826410becc85 | https://github.com/stphivos/django-mock-queries/blob/1522a0debfa78f4a986818d92eef826410becc85/django_mock_queries/mocks.py#L73-L103 | train | 204,589 |
stphivos/django-mock-queries | django_mock_queries/mocks.py | find_all_models | def find_all_models(models):
""" Yield all models and their parents. """
for model in models:
yield model
# noinspection PyProtectedMember
for parent in model._meta.parents.keys():
for parent_model in find_all_models((parent,)):
yield parent_model | python | def find_all_models(models):
""" Yield all models and their parents. """
for model in models:
yield model
# noinspection PyProtectedMember
for parent in model._meta.parents.keys():
for parent_model in find_all_models((parent,)):
yield parent_model | [
"def",
"find_all_models",
"(",
"models",
")",
":",
"for",
"model",
"in",
"models",
":",
"yield",
"model",
"# noinspection PyProtectedMember",
"for",
"parent",
"in",
"model",
".",
"_meta",
".",
"parents",
".",
"keys",
"(",
")",
":",
"for",
"parent_model",
"in... | Yield all models and their parents. | [
"Yield",
"all",
"models",
"and",
"their",
"parents",
"."
] | 1522a0debfa78f4a986818d92eef826410becc85 | https://github.com/stphivos/django-mock-queries/blob/1522a0debfa78f4a986818d92eef826410becc85/django_mock_queries/mocks.py#L174-L181 | train | 204,590 |
stphivos/django-mock-queries | django_mock_queries/mocks.py | mocked_relations | def mocked_relations(*models):
""" Mock all related field managers to make pure unit tests possible.
The resulting patcher can be used just like one from the mock module:
As a test method decorator, a test class decorator, a context manager,
or by just calling start() and stop().
@mocked_relations(Dataset):
def test_dataset(self):
dataset = Dataset()
check = dataset.content_checks.create() # returns a ContentCheck object
"""
patchers = []
for model in find_all_models(models):
if isinstance(model.save, Mock):
# already mocked, so skip it
continue
model_name = model._meta.object_name
patchers.append(_patch_save(model, model_name))
if hasattr(model, 'objects'):
patchers.append(_patch_objects(model, model_name))
for related_object in chain(model._meta.related_objects,
model._meta.many_to_many):
name = related_object.name
if name not in model.__dict__ and related_object.one_to_many:
name += '_set'
if name in model.__dict__:
# Only mock direct relations, not inherited ones.
if getattr(model, name, None):
patchers.append(_patch_relation(
model, name, related_object
))
return PatcherChain(patchers, pass_mocks=False) | python | def mocked_relations(*models):
""" Mock all related field managers to make pure unit tests possible.
The resulting patcher can be used just like one from the mock module:
As a test method decorator, a test class decorator, a context manager,
or by just calling start() and stop().
@mocked_relations(Dataset):
def test_dataset(self):
dataset = Dataset()
check = dataset.content_checks.create() # returns a ContentCheck object
"""
patchers = []
for model in find_all_models(models):
if isinstance(model.save, Mock):
# already mocked, so skip it
continue
model_name = model._meta.object_name
patchers.append(_patch_save(model, model_name))
if hasattr(model, 'objects'):
patchers.append(_patch_objects(model, model_name))
for related_object in chain(model._meta.related_objects,
model._meta.many_to_many):
name = related_object.name
if name not in model.__dict__ and related_object.one_to_many:
name += '_set'
if name in model.__dict__:
# Only mock direct relations, not inherited ones.
if getattr(model, name, None):
patchers.append(_patch_relation(
model, name, related_object
))
return PatcherChain(patchers, pass_mocks=False) | [
"def",
"mocked_relations",
"(",
"*",
"models",
")",
":",
"patchers",
"=",
"[",
"]",
"for",
"model",
"in",
"find_all_models",
"(",
"models",
")",
":",
"if",
"isinstance",
"(",
"model",
".",
"save",
",",
"Mock",
")",
":",
"# already mocked, so skip it",
"con... | Mock all related field managers to make pure unit tests possible.
The resulting patcher can be used just like one from the mock module:
As a test method decorator, a test class decorator, a context manager,
or by just calling start() and stop().
@mocked_relations(Dataset):
def test_dataset(self):
dataset = Dataset()
check = dataset.content_checks.create() # returns a ContentCheck object | [
"Mock",
"all",
"related",
"field",
"managers",
"to",
"make",
"pure",
"unit",
"tests",
"possible",
"."
] | 1522a0debfa78f4a986818d92eef826410becc85 | https://github.com/stphivos/django-mock-queries/blob/1522a0debfa78f4a986818d92eef826410becc85/django_mock_queries/mocks.py#L211-L250 | train | 204,591 |
stphivos/django-mock-queries | django_mock_queries/mocks.py | PatcherChain.decorate_callable | def decorate_callable(self, target):
""" Called as a decorator. """
# noinspection PyUnusedLocal
def absorb_mocks(test_case, *args):
return target(test_case)
should_absorb = not (self.pass_mocks or isinstance(target, type))
result = absorb_mocks if should_absorb else target
for patcher in self.patchers:
result = patcher(result)
return result | python | def decorate_callable(self, target):
""" Called as a decorator. """
# noinspection PyUnusedLocal
def absorb_mocks(test_case, *args):
return target(test_case)
should_absorb = not (self.pass_mocks or isinstance(target, type))
result = absorb_mocks if should_absorb else target
for patcher in self.patchers:
result = patcher(result)
return result | [
"def",
"decorate_callable",
"(",
"self",
",",
"target",
")",
":",
"# noinspection PyUnusedLocal",
"def",
"absorb_mocks",
"(",
"test_case",
",",
"*",
"args",
")",
":",
"return",
"target",
"(",
"test_case",
")",
"should_absorb",
"=",
"not",
"(",
"self",
".",
"... | Called as a decorator. | [
"Called",
"as",
"a",
"decorator",
"."
] | 1522a0debfa78f4a986818d92eef826410becc85 | https://github.com/stphivos/django-mock-queries/blob/1522a0debfa78f4a986818d92eef826410becc85/django_mock_queries/mocks.py#L294-L305 | train | 204,592 |
data-8/datascience | datascience/tables.py | _zero_on_type_error | def _zero_on_type_error(column_fn):
"""Wrap a function on an np.ndarray to return 0 on a type error."""
if not column_fn:
return column_fn
if not callable(column_fn):
raise TypeError('column functions must be callable')
@functools.wraps(column_fn)
def wrapped(column):
try:
return column_fn(column)
except TypeError:
if isinstance(column, np.ndarray):
return column.dtype.type() # A typed zero value
else:
raise
return wrapped | python | def _zero_on_type_error(column_fn):
"""Wrap a function on an np.ndarray to return 0 on a type error."""
if not column_fn:
return column_fn
if not callable(column_fn):
raise TypeError('column functions must be callable')
@functools.wraps(column_fn)
def wrapped(column):
try:
return column_fn(column)
except TypeError:
if isinstance(column, np.ndarray):
return column.dtype.type() # A typed zero value
else:
raise
return wrapped | [
"def",
"_zero_on_type_error",
"(",
"column_fn",
")",
":",
"if",
"not",
"column_fn",
":",
"return",
"column_fn",
"if",
"not",
"callable",
"(",
"column_fn",
")",
":",
"raise",
"TypeError",
"(",
"'column functions must be callable'",
")",
"@",
"functools",
".",
"wr... | Wrap a function on an np.ndarray to return 0 on a type error. | [
"Wrap",
"a",
"function",
"on",
"an",
"np",
".",
"ndarray",
"to",
"return",
"0",
"on",
"a",
"type",
"error",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L2778-L2793 | train | 204,593 |
data-8/datascience | datascience/tables.py | _varargs_labels_as_list | def _varargs_labels_as_list(label_list):
"""Return a list of labels for a list of labels or singleton list of list
of labels."""
if len(label_list) == 0:
return []
elif not _is_non_string_iterable(label_list[0]):
# Assume everything is a label. If not, it'll be caught later.
return label_list
elif len(label_list) == 1:
return label_list[0]
else:
raise ValueError("Labels {} contain more than list.".format(label_list),
"Pass just one list of labels.") | python | def _varargs_labels_as_list(label_list):
"""Return a list of labels for a list of labels or singleton list of list
of labels."""
if len(label_list) == 0:
return []
elif not _is_non_string_iterable(label_list[0]):
# Assume everything is a label. If not, it'll be caught later.
return label_list
elif len(label_list) == 1:
return label_list[0]
else:
raise ValueError("Labels {} contain more than list.".format(label_list),
"Pass just one list of labels.") | [
"def",
"_varargs_labels_as_list",
"(",
"label_list",
")",
":",
"if",
"len",
"(",
"label_list",
")",
"==",
"0",
":",
"return",
"[",
"]",
"elif",
"not",
"_is_non_string_iterable",
"(",
"label_list",
"[",
"0",
"]",
")",
":",
"# Assume everything is a label. If not... | Return a list of labels for a list of labels or singleton list of list
of labels. | [
"Return",
"a",
"list",
"of",
"labels",
"for",
"a",
"list",
"of",
"labels",
"or",
"singleton",
"list",
"of",
"list",
"of",
"labels",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L2831-L2843 | train | 204,594 |
data-8/datascience | datascience/tables.py | _assert_same | def _assert_same(values):
"""Assert that all values are identical and return the unique value."""
assert len(values) > 0
first, rest = values[0], values[1:]
for v in rest:
assert v == first
return first | python | def _assert_same(values):
"""Assert that all values are identical and return the unique value."""
assert len(values) > 0
first, rest = values[0], values[1:]
for v in rest:
assert v == first
return first | [
"def",
"_assert_same",
"(",
"values",
")",
":",
"assert",
"len",
"(",
"values",
")",
">",
"0",
"first",
",",
"rest",
"=",
"values",
"[",
"0",
"]",
",",
"values",
"[",
"1",
":",
"]",
"for",
"v",
"in",
"rest",
":",
"assert",
"v",
"==",
"first",
"... | Assert that all values are identical and return the unique value. | [
"Assert",
"that",
"all",
"values",
"are",
"identical",
"and",
"return",
"the",
"unique",
"value",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L2845-L2851 | train | 204,595 |
data-8/datascience | datascience/tables.py | _collected_label | def _collected_label(collect, label):
"""Label of a collected column."""
if not collect.__name__.startswith('<'):
return label + ' ' + collect.__name__
else:
return label | python | def _collected_label(collect, label):
"""Label of a collected column."""
if not collect.__name__.startswith('<'):
return label + ' ' + collect.__name__
else:
return label | [
"def",
"_collected_label",
"(",
"collect",
",",
"label",
")",
":",
"if",
"not",
"collect",
".",
"__name__",
".",
"startswith",
"(",
"'<'",
")",
":",
"return",
"label",
"+",
"' '",
"+",
"collect",
".",
"__name__",
"else",
":",
"return",
"label"
] | Label of a collected column. | [
"Label",
"of",
"a",
"collected",
"column",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L2854-L2859 | train | 204,596 |
data-8/datascience | datascience/tables.py | _is_non_string_iterable | def _is_non_string_iterable(value):
"""Whether a value is iterable."""
if isinstance(value, str):
return False
if hasattr(value, '__iter__'):
return True
if isinstance(value, collections.abc.Sequence):
return True
return False | python | def _is_non_string_iterable(value):
"""Whether a value is iterable."""
if isinstance(value, str):
return False
if hasattr(value, '__iter__'):
return True
if isinstance(value, collections.abc.Sequence):
return True
return False | [
"def",
"_is_non_string_iterable",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"return",
"False",
"if",
"hasattr",
"(",
"value",
",",
"'__iter__'",
")",
":",
"return",
"True",
"if",
"isinstance",
"(",
"value",
",",
"col... | Whether a value is iterable. | [
"Whether",
"a",
"value",
"is",
"iterable",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L2862-L2870 | train | 204,597 |
data-8/datascience | datascience/tables.py | _vertical_x | def _vertical_x(axis, ticks=None, max_width=5):
"""Switch labels to vertical if they are long."""
if ticks is None:
ticks = axis.get_xticks()
if (np.array(ticks) == np.rint(ticks)).all():
ticks = np.rint(ticks).astype(np.int)
if max([len(str(tick)) for tick in ticks]) > max_width:
axis.set_xticklabels(ticks, rotation='vertical') | python | def _vertical_x(axis, ticks=None, max_width=5):
"""Switch labels to vertical if they are long."""
if ticks is None:
ticks = axis.get_xticks()
if (np.array(ticks) == np.rint(ticks)).all():
ticks = np.rint(ticks).astype(np.int)
if max([len(str(tick)) for tick in ticks]) > max_width:
axis.set_xticklabels(ticks, rotation='vertical') | [
"def",
"_vertical_x",
"(",
"axis",
",",
"ticks",
"=",
"None",
",",
"max_width",
"=",
"5",
")",
":",
"if",
"ticks",
"is",
"None",
":",
"ticks",
"=",
"axis",
".",
"get_xticks",
"(",
")",
"if",
"(",
"np",
".",
"array",
"(",
"ticks",
")",
"==",
"np",... | Switch labels to vertical if they are long. | [
"Switch",
"labels",
"to",
"vertical",
"if",
"they",
"are",
"long",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L2872-L2879 | train | 204,598 |
data-8/datascience | datascience/tables.py | Table.read_table | def read_table(cls, filepath_or_buffer, *args, **vargs):
"""Read a table from a file or web address.
filepath_or_buffer -- string or file handle / StringIO; The string
could be a URL. Valid URL schemes include http,
ftp, s3, and file.
"""
# Look for .csv at the end of the path; use "," as a separator if found
try:
path = urllib.parse.urlparse(filepath_or_buffer).path
if 'data8.berkeley.edu' in filepath_or_buffer:
raise ValueError('data8.berkeley.edu requires authentication, '
'which is not supported.')
except AttributeError:
path = filepath_or_buffer
try:
if 'sep' not in vargs and path.endswith('.csv'):
vargs['sep'] = ','
except AttributeError:
pass
df = pandas.read_table(filepath_or_buffer, *args, **vargs)
return cls.from_df(df) | python | def read_table(cls, filepath_or_buffer, *args, **vargs):
"""Read a table from a file or web address.
filepath_or_buffer -- string or file handle / StringIO; The string
could be a URL. Valid URL schemes include http,
ftp, s3, and file.
"""
# Look for .csv at the end of the path; use "," as a separator if found
try:
path = urllib.parse.urlparse(filepath_or_buffer).path
if 'data8.berkeley.edu' in filepath_or_buffer:
raise ValueError('data8.berkeley.edu requires authentication, '
'which is not supported.')
except AttributeError:
path = filepath_or_buffer
try:
if 'sep' not in vargs and path.endswith('.csv'):
vargs['sep'] = ','
except AttributeError:
pass
df = pandas.read_table(filepath_or_buffer, *args, **vargs)
return cls.from_df(df) | [
"def",
"read_table",
"(",
"cls",
",",
"filepath_or_buffer",
",",
"*",
"args",
",",
"*",
"*",
"vargs",
")",
":",
"# Look for .csv at the end of the path; use \",\" as a separator if found",
"try",
":",
"path",
"=",
"urllib",
".",
"parse",
".",
"urlparse",
"(",
"fil... | Read a table from a file or web address.
filepath_or_buffer -- string or file handle / StringIO; The string
could be a URL. Valid URL schemes include http,
ftp, s3, and file. | [
"Read",
"a",
"table",
"from",
"a",
"file",
"or",
"web",
"address",
"."
] | 4cee38266903ca169cea4a53b8cc39502d85c464 | https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L111-L133 | train | 204,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.