text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _update_classifier(self, data, labels, w, classes): """Update the classifier parameters theta and bias Parameters data : list of 2D arrays, element i has shape=[voxels_i, samples_i] Each element in the list contains the fMRI data of one subject for the classification task. labels : list of arrays of int, element i has shape=[samples_i] Each element in the list contains the labels for the data samples in data_sup. w : list of 2D array, element i has shape=[voxels_i, features] The orthogonal transforms (mappings) :math:`W_i` for each subject. classes : int The number of classes in the classifier. Returns ------- theta : array, shape=[features, classes] The MLR parameter for the class planes. bias : array shape=[classes,] The MLR parameter for class biases. """
# Stack the data and labels for training the classifier data_stacked, labels_stacked, weights = \ SSSRM._stack_list(data, labels, w) features = w[0].shape[1] total_samples = weights.size data_th = S.shared(data_stacked.astype(theano.config.floatX)) val_ = S.shared(labels_stacked) total_samples_S = S.shared(total_samples) theta_th = T.matrix(name='theta', dtype=theano.config.floatX) bias_th = T.col(name='bias', dtype=theano.config.floatX) constf2 = S.shared(self.alpha / self.gamma, allow_downcast=True) weights_th = S.shared(weights) log_p_y_given_x = \ T.log(T.nnet.softmax((theta_th.T.dot(data_th.T)).T + bias_th.T)) f = -constf2 * T.sum((log_p_y_given_x[T.arange(total_samples_S), val_]) / weights_th) + 0.5 * T.sum(theta_th ** 2) manifold = Product((Euclidean(features, classes), Euclidean(classes, 1))) problem = Problem(manifold=manifold, cost=f, arg=[theta_th, bias_th], verbosity=0) solver = ConjugateGradient(mingradnorm=1e-6) solution = solver.solve(problem) theta = solution[0] bias = solution[1] del constf2 del theta_th del bias_th del data_th del val_ del solver del solution return theta, bias
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _compute_shared_response(data, w): """ Compute the shared response S Parameters data : list of 2D arrays, element i has shape=[voxels_i, samples] Each element in the list contains the fMRI data of one subject. w : list of 2D arrays, element i has shape=[voxels_i, features] The orthogonal transforms (mappings) :math:`W_i` for each subject. Returns ------- s : array, shape=[features, samples] The shared response for the subjects data with the mappings in w. """
s = np.zeros((w[0].shape[1], data[0].shape[1])) for m in range(len(w)): s = s + w[m].T.dot(data[m]) s /= len(w) return s
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _objective_function(self, data_align, data_sup, labels, w, s, theta, bias): """Compute the objective function of the Semi-Supervised SRM See :eq:`sssrm-eq`. Parameters data_align : list of 2D arrays, element i has shape=[voxels_i, n_align] Each element in the list contains the fMRI data for alignment of one subject. There are n_align samples for each subject. data_sup : list of 2D arrays, element i has shape=[voxels_i, samples_i] Each element in the list contains the fMRI data of one subject for the classification task. labels : list of arrays of int, element i has shape=[samples_i] Each element in the list contains the labels for the data samples in data_sup. w : list of array, element i has shape=[voxels_i, features] The orthogonal transforms (mappings) :math:`W_i` for each subject. s : array, shape=[features, samples] The shared response. theta : array, shape=[classes, features] The MLR class plane parameters. bias : array, shape=[classes] The MLR class biases. Returns ------- f_val : float The SS-SRM objective function evaluated based on the parameters to this function. """
subjects = len(data_align) # Compute the SRM loss f_val = 0.0 for subject in range(subjects): samples = data_align[subject].shape[1] f_val += (1 - self.alpha) * (0.5 / samples) \ * np.linalg.norm(data_align[subject] - w[subject].dot(s), 'fro')**2 # Compute the MLR loss f_val += self._loss_lr(data_sup, labels, w, theta, bias) return f_val
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _objective_function_subject(self, data_align, data_sup, labels, w, s, theta, bias): """Compute the objective function for one subject. .. math:: (1-C)*Loss_{SRM}_i(W_i,S;X_i) .. math:: + C/\\gamma * Loss_{MLR_i}(\\theta, bias; {(W_i^T*Z_i, y_i}) .. math:: + R(\\theta) Parameters data_align : 2D array, shape=[voxels_i, samples_align] Contains the fMRI data for alignment of subject i. data_sup : 2D array, shape=[voxels_i, samples_i] Contains the fMRI data of one subject for the classification task. labels : array of int, shape=[samples_i] The labels for the data samples in data_sup. w : array, shape=[voxels_i, features] The orthogonal transform (mapping) :math:`W_i` for subject i. s : array, shape=[features, samples] The shared response. theta : array, shape=[classes, features] The MLR class plane parameters. bias : array, shape=[classes] The MLR class biases. Returns ------- f_val : float The SS-SRM objective function for subject i evaluated on the parameters to this function. """
# Compute the SRM loss f_val = 0.0 samples = data_align.shape[1] f_val += (1 - self.alpha) * (0.5 / samples) \ * np.linalg.norm(data_align - w.dot(s), 'fro')**2 # Compute the MLR loss f_val += self._loss_lr_subject(data_sup, labels, w, theta, bias) return f_val
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _stack_list(data, data_labels, w): """Construct a numpy array by stacking arrays in a list Parameter data : list of 2D arrays, element i has shape=[voxels_i, samples_i] Each element in the list contains the fMRI data of one subject for the classification task. data_labels : list of arrays of int, element i has shape=[samples_i] Each element in the list contains the labels for the samples in data. w : list of array, element i has shape=[voxels_i, features] The orthogonal transforms (mappings) :math:`W_i` for each subject. Returns ------- data_stacked : 2D array, shape=[samples, features] The data samples from all subjects are stacked into a single 2D array, where "samples" is the sum of samples_i. labels_stacked : array, shape=[samples,] The labels from all subjects are stacked into a single array, where "samples" is the sum of samples_i. weights : array, shape=[samples,] The number of samples of the subject that are related to that sample. They become a weight per sample in the MLR loss. """
labels_stacked = utils.concatenate_not_none(data_labels) weights = np.empty((labels_stacked.size,)) data_shared = [None] * len(data) curr_samples = 0 for s in range(len(data)): if data[s] is not None: subject_samples = data[s].shape[1] curr_samples_end = curr_samples + subject_samples weights[curr_samples:curr_samples_end] = subject_samples data_shared[s] = w[s].T.dot(data[s]) curr_samples += data[s].shape[1] data_stacked = utils.concatenate_not_none(data_shared, axis=1).T return data_stacked, labels_stacked, weights
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _singlenode_searchlight(l, msk, mysl_rad, bcast_var, extra_params): """Run searchlight function on block data in parallel. `extra_params` contains: - Searchlight function. - `Shape` mask. - Minimum active voxels proportion required to run the searchlight function. """
voxel_fn = extra_params[0] shape_mask = extra_params[1] min_active_voxels_proportion = extra_params[2] outmat = np.empty(msk.shape, dtype=np.object)[mysl_rad:-mysl_rad, mysl_rad:-mysl_rad, mysl_rad:-mysl_rad] for i in range(0, outmat.shape[0]): for j in range(0, outmat.shape[1]): for k in range(0, outmat.shape[2]): if msk[i+mysl_rad, j+mysl_rad, k+mysl_rad]: searchlight_slice = np.s_[ i:i+2*mysl_rad+1, j:j+2*mysl_rad+1, k:k+2*mysl_rad+1] voxel_fn_mask = msk[searchlight_slice] * shape_mask if (min_active_voxels_proportion == 0 or np.count_nonzero(voxel_fn_mask) / voxel_fn_mask.size > min_active_voxels_proportion): outmat[i, j, k] = voxel_fn( [ll[searchlight_slice] for ll in l], msk[searchlight_slice] * shape_mask, mysl_rad, bcast_var) return outmat
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_ownership(self, data): """Determine on which rank each subject currently resides Parameters data: list of 4D arrays with subject data Returns ------- list of ranks indicating the owner of each subject """
rank = self.comm.rank B = [(rank, idx) for (idx, c) in enumerate(data) if c is not None] C = self.comm.allreduce(B) ownership = [None] * len(data) for c in C: ownership[c[1]] = c[0] return ownership
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_blocks(self, mask): """Divide the volume into a set of blocks Ignore blocks that have no active voxels in the mask Parameters mask: a boolean 3D array which is true at every active voxel Returns ------- list of tuples containing block information: - a triple containing top left point of the block and - a triple containing the size in voxels of the block """
blocks = [] outerblk = self.max_blk_edge + 2*self.sl_rad for i in range(0, mask.shape[0], self.max_blk_edge): for j in range(0, mask.shape[1], self.max_blk_edge): for k in range(0, mask.shape[2], self.max_blk_edge): block_shape = mask[i:i+outerblk, j:j+outerblk, k:k+outerblk ].shape if np.any( mask[i+self.sl_rad:i+block_shape[0]-self.sl_rad, j+self.sl_rad:j+block_shape[1]-self.sl_rad, k+self.sl_rad:k+block_shape[2]-self.sl_rad]): blocks.append(((i, j, k), block_shape)) return blocks
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_block_data(self, mat, block): """Retrieve a block from a 3D or 4D volume Parameters mat: a 3D or 4D volume block: a tuple containing block information: - a triple containing the lowest-coordinate voxel in the block - a triple containing the size in voxels of the block Returns ------- In the case of a 3D array, a 3D subarray at the block location In the case of a 4D array, a 4D subarray at the block location, including the entire fourth dimension. """
(pt, sz) = block if len(mat.shape) == 3: return mat[pt[0]:pt[0]+sz[0], pt[1]:pt[1]+sz[1], pt[2]:pt[2]+sz[2]].copy() elif len(mat.shape) == 4: return mat[pt[0]:pt[0]+sz[0], pt[1]:pt[1]+sz[1], pt[2]:pt[2]+sz[2], :].copy()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _split_volume(self, mat, blocks): """Convert a volume into a list of block data Parameters mat: A 3D or 4D array to be split blocks: a list of tuples containing block information: - a triple containing the top left point of the block and - a triple containing the size in voxels of the block Returns ------- A list of the subarrays corresponding to each block """
return [self._get_block_data(mat, block) for block in blocks]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _scatter_list(self, data, owner): """Distribute a list from one rank to other ranks in a cyclic manner Parameters data: list of pickle-able data owner: rank that owns the data Returns ------- A list containing the data in a cyclic layout across ranks """
rank = self.comm.rank size = self.comm.size subject_submatrices = [] nblocks = self.comm.bcast(len(data) if rank == owner else None, root=owner) # For each submatrix for idx in range(0, nblocks, size): padded = None extra = max(0, idx+size - nblocks) # Pad with "None" so scatter can go to all processes if data is not None: padded = data[idx:idx+size] if extra > 0: padded = padded + [None]*extra # Scatter submatrices to all processes mytrans = self.comm.scatter(padded, root=owner) # Contribute submatrix to subject list if mytrans is not None: subject_submatrices += [mytrans] return subject_submatrices
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def distribute(self, subjects, mask): """Distribute data to MPI ranks Parameters subjects : list of 4D arrays containing data for one or more subjects. Each entry of the list must be present on at most one rank, and the other ranks contain a "None" at this list location. For example, for 3 ranks you may lay out the data in the following manner: Rank 0: [Subj0, None, None] Rank 1: [None, Subj1, None] Rank 2: [None, None, Subj2] Or alternatively, you may lay out the data in this manner: Rank 0: [Subj0, Subj1, Subj2] Rank 1: [None, None, None] Rank 2: [None, None, None] mask: 3D array with "True" entries at active vertices """
if mask.ndim != 3: raise ValueError('mask should be a 3D array') for (idx, subj) in enumerate(subjects): if subj is not None: if subj.ndim != 4: raise ValueError('subjects[{}] must be 4D'.format(idx)) self.mask = mask rank = self.comm.rank # Get/set ownership ownership = self._get_ownership(subjects) all_blocks = self._get_blocks(mask) if rank == 0 else None all_blocks = self.comm.bcast(all_blocks) # Divide data and mask splitsubj = [self._split_volume(s, all_blocks) if s is not None else None for s in subjects] submasks = self._split_volume(mask, all_blocks) # Scatter points, data, and mask self.blocks = self._scatter_list(all_blocks, 0) self.submasks = self._scatter_list(submasks, 0) self.subproblems = [self._scatter_list(s, ownership[s_idx]) for (s_idx, s) in enumerate(splitsubj)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_block_function(self, block_fn, extra_block_fn_params=None, pool_size=None): """Perform a function for each block in a volume. Parameters block_fn: function to apply to each block: Parameters data: list of 4D arrays containing subset of subject data, which is padded with sl_rad voxels. mask: 3D array containing subset of mask data sl_rad: radius, in voxels, of the sphere inscribed in the cube bcast_var: shared data which is broadcast to all processes extra_params: extra parameters Returns 3D array which is the same size as the mask input with padding removed extra_block_fn_params: tuple Extra parameters to pass to the block function pool_size: int Maximum number of processes running the block function in parallel. If None, number of available hardware threads, considering cpusets restrictions. """
rank = self.comm.rank results = [] usable_cpus = usable_cpu_count() if pool_size is None: processes = usable_cpus else: processes = min(pool_size, usable_cpus) if processes > 1: with Pool(processes) as pool: for idx, block in enumerate(self.blocks): result = pool.apply_async( block_fn, ([subproblem[idx] for subproblem in self.subproblems], self.submasks[idx], self.sl_rad, self.bcast_var, extra_block_fn_params)) results.append((block[0], result)) local_outputs = [(result[0], result[1].get()) for result in results] else: # If we only are using one CPU core, no need to create a Pool, # cause an underlying fork(), and send the data to that process. # Just do it here in serial. This will save copying the memory # and will stop a fork() which can cause problems in some MPI # implementations. for idx, block in enumerate(self.blocks): subprob_list = [subproblem[idx] for subproblem in self.subproblems] result = block_fn( subprob_list, self.submasks[idx], self.sl_rad, self.bcast_var, extra_block_fn_params) results.append((block[0], result)) local_outputs = [(result[0], result[1]) for result in results] # Collect results global_outputs = self.comm.gather(local_outputs) # Coalesce results outmat = np.empty(self.mask.shape, dtype=np.object) if rank == 0: for go_rank in global_outputs: for (pt, mat) in go_rank: coords = np.s_[ pt[0]+self.sl_rad:pt[0]+self.sl_rad+mat.shape[0], pt[1]+self.sl_rad:pt[1]+self.sl_rad+mat.shape[1], pt[2]+self.sl_rad:pt[2]+self.sl_rad+mat.shape[2] ] outmat[coords] = mat return outmat
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run_searchlight(self, voxel_fn, pool_size=None): """Perform a function at each voxel which is set to True in the user-provided mask. The mask passed to the searchlight function will be further masked by the user-provided searchlight shape. Parameters voxel_fn: function to apply at each voxel Must be `serializeable using pickle <https://docs.python.org/3/library/pickle.html#what-can-be-pickled-and-unpickled>`_. Parameters subj: list of 4D arrays containing subset of subject data mask: 3D array containing subset of mask data sl_rad: radius, in voxels, of the sphere inscribed in the cube bcast_var: shared data which is broadcast to all processes Returns Value of any pickle-able type Returns ------- A volume which is the same size as the mask, however a number of voxels equal to the searchlight radius has been removed from each border of the volume. This volume contains the values returned from the searchlight function at each voxel which was set to True in the mask, and None elsewhere. """
extra_block_fn_params = (voxel_fn, self.shape, self.min_active_voxels_proportion) block_fn_result = self.run_block_function(_singlenode_searchlight, extra_block_fn_params, pool_size) return block_fn_result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _normalize_for_correlation(data, axis, return_nans=False): """normalize the data before computing correlation The data will be z-scored and divided by sqrt(n) along the assigned axis Parameters data: 2D array axis: int specify which dimension of the data should be normalized return_nans: bool, default:False If False, return zeros for NaNs; if True, return NaNs Returns ------- data: 2D array the normalized data """
shape = data.shape data = zscore(data, axis=axis, ddof=0) # if zscore fails (standard deviation is zero), # optionally set all values to be zero if not return_nans: data = np.nan_to_num(data) data = data / math.sqrt(shape[axis]) return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compute_correlation(matrix1, matrix2, return_nans=False): """compute correlation between two sets of variables Correlate the rows of matrix1 with the rows of matrix2. If matrix1 == matrix2, it is auto-correlation computation resulting in a symmetric correlation matrix. The number of columns MUST agree between set1 and set2. The correlation being computed here is the Pearson's correlation coefficient, which can be expressed as .. math:: corr(X, Y) = \\frac{cov(X, Y)}{\\sigma_X\\sigma_Y} where cov(X, Y) is the covariance of variable X and Y, and .. math:: \\sigma_X is the standard deviation of variable X Reducing the correlation computation to matrix multiplication and using BLAS GEMM API wrapped by Scipy can speedup the numpy built-in correlation computation (numpy.corrcoef) by one order of magnitude .. math:: corr(X, Y) &= \\frac{\\sum\\limits_{i=1}^n (x_i-\\bar{x})(y_i-\\bar{y})}{(n-1) \\sqrt{\\frac{\\sum\\limits_{j=1}^n x_j^2-n\\bar{x}}{n-1}} \\sqrt{\\frac{\\sum\\limits_{j=1}^{n} y_j^2-n\\bar{y}}{n-1}}}\\\\ &= \\sum\\limits_{i=1}^n(\\frac{(x_i-\\bar{x})} {\\sqrt{\\sum\\limits_{j=1}^n x_j^2-n\\bar{x}}} \\frac{(y_i-\\bar{y})}{\\sqrt{\\sum\\limits_{j=1}^n y_j^2-n\\bar{y}}}) By default (return_nans=False), returns zeros for vectors with NaNs. If return_nans=True, convert zeros to NaNs (np.nan) in output. Parameters matrix1: 2D array in shape [r1, c] MUST be continuous and row-major matrix2: 2D array in shape [r2, c] MUST be continuous and row-major return_nans: bool, default:False If False, return zeros for NaNs; if True, return NaNs Returns ------- corr_data: 2D array in shape [r1, r2] continuous and row-major in np.float32 """
matrix1 = matrix1.astype(np.float32) matrix2 = matrix2.astype(np.float32) [r1, d1] = matrix1.shape [r2, d2] = matrix2.shape if d1 != d2: raise ValueError('Dimension discrepancy') # preprocess two components matrix1 = _normalize_for_correlation(matrix1, 1, return_nans=return_nans) matrix2 = _normalize_for_correlation(matrix2, 1, return_nans=return_nans) corr_data = np.empty((r1, r2), dtype=np.float32, order='C') # blas routine is column-major blas.compute_single_matrix_multiplication('T', 'N', r2, r1, d1, 1.0, matrix2, d2, matrix1, d1, 0.0, corr_data, r2) return corr_data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _zscore(a): """ Calculating z-score of data on the first axis. If the numbers in any column are all equal, scipy.stats.zscore will return NaN for this column. We shall correct them all to be zeros. Parameters a: numpy array Returns ------- zscore: numpy array The z-scores of input "a", with any columns including non-finite numbers replaced by all zeros. """
assert a.ndim > 1, 'a must have more than one dimensions' zscore = scipy.stats.zscore(a, axis=0) zscore[:, np.logical_not(np.all(np.isfinite(zscore), axis=0))] = 0 return zscore
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def score(self, X, design, scan_onsets=None): """ Use the model and parameters estimated by fit function from some data of a participant to evaluate the log likelihood of some new data of the same participant. Design matrix of the same set of experimental conditions in the testing data should be provided, with each column corresponding to the same condition as that column in the design matrix of the training data. Unknown nuisance time series will be marginalized, assuming they follow the same spatial pattern as in the training data. The hypothetical response captured by the design matrix will be subtracted from data before the marginalization when evaluating the log likelihood. For null model, nothing will be subtracted before marginalization. There is a difference between the form of likelihood function used in fit() and score(). In fit(), the response amplitude beta to design matrix X and the modulation beta0 by nuisance regressor X0 are both marginalized, with X provided and X0 estimated from data. In score(), posterior estimation of beta and beta0 from the fitting step are assumed unchanged to testing data and X0 is marginalized. The logic underlying score() is to transfer as much as what we can learn from training data when calculating a likelihood score for testing data. If you z-scored your data during fit step, you should z-score them for score function as well. If you did not z-score in fitting, you should not z-score here either. Parameters X : numpy arrays, shape=[time_points, voxels] fMRI data of new data of the same subject. The voxels should match those used in the fit() function. If data are z-scored (recommended) when fitting the model, data should be z-scored as well when calling transform() design : numpy array, shape=[time_points, conditions] Design matrix expressing the hypothetical response of the task conditions in data X. scan_onsets : numpy array, shape=[number of runs]. A list of indices corresponding to the onsets of scans in the data X. If not provided, data will be assumed to be acquired in a continuous scan. Returns ------- ll: float. The log likelihood of the new data based on the model and its parameters fit to the training data. ll_null: float. The log likelihood of the new data based on a null model which assumes the same as the full model for everything except for that there is no response to any of the task conditions. """
assert X.ndim == 2 and X.shape[1] == self.beta_.shape[1], \ 'The shape of X is not consistent with the shape of data '\ 'used in the fitting step. They should have the same number '\ 'of voxels' assert scan_onsets is None or (scan_onsets.ndim == 1 and 0 in scan_onsets), \ 'scan_onsets should either be None or an array of indices '\ 'If it is given, it should include at least 0' if scan_onsets is None: scan_onsets = np.array([0], dtype=int) else: scan_onsets = np.int32(scan_onsets) ll = self._score(Y=X, design=design, beta=self.beta_, scan_onsets=scan_onsets, beta0=self.beta0_, rho_e=self.rho_, sigma_e=self.sigma_, rho_X0=self._rho_X0_, sigma2_X0=self._sigma2_X0_) ll_null = self._score(Y=X, design=None, beta=None, scan_onsets=scan_onsets, beta0=self.beta0_, rho_e=self.rho_, sigma_e=self.sigma_, rho_X0=self._rho_X0_, sigma2_X0=self._sigma2_X0_) return ll, ll_null
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _prepare_data_XY(self, X, Y, D, F): """Prepares different forms of products of design matrix X and data Y, or between themselves. These products are re-used a lot during fitting. So we pre-calculate them. Because these are reused, it is in principle possible to update the fitting as new data come in, by just incrementally adding the products of new data and their corresponding parts of design matrix to these pre-calculated terms. """
XTY, XTDY, XTFY = self._make_templates(D, F, X, Y) YTY_diag = np.sum(Y * Y, axis=0) YTDY_diag = np.sum(Y * np.dot(D, Y), axis=0) YTFY_diag = np.sum(Y * np.dot(F, Y), axis=0) XTX, XTDX, XTFX = self._make_templates(D, F, X, X) return XTY, XTDY, XTFY, YTY_diag, YTDY_diag, YTFY_diag, XTX, \ XTDX, XTFX
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _prepare_data_XYX0(self, X, Y, X_base, X_res, D, F, run_TRs, no_DC=False): """Prepares different forms of products between design matrix X or data Y or nuisance regressors X0. These products are re-used a lot during fitting. So we pre-calculate them. no_DC means not inserting regressors for DC components into nuisance regressor. It will only take effect if X_base is not None. """
X_DC = self._gen_X_DC(run_TRs) reg_sol = np.linalg.lstsq(X_DC, X) if np.any(np.isclose(reg_sol[1], 0)): raise ValueError('Your design matrix appears to have ' 'included baseline time series.' 'Either remove them, or move them to' ' nuisance regressors.') X_DC, X_base, idx_DC = self._merge_DC_to_base(X_DC, X_base, no_DC) if X_res is None: X0 = X_base else: X0 = np.concatenate((X_base, X_res), axis=1) n_X0 = X0.shape[1] X0TX0, X0TDX0, X0TFX0 = self._make_templates(D, F, X0, X0) XTX0, XTDX0, XTFX0 = self._make_templates(D, F, X, X0) X0TY, X0TDY, X0TFY = self._make_templates(D, F, X0, Y) return X0TX0, X0TDX0, X0TFX0, XTX0, XTDX0, XTFX0, \ X0TY, X0TDY, X0TFY, X0, X_base, n_X0, idx_DC
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _merge_DC_to_base(self, X_DC, X_base, no_DC): """ Merge DC components X_DC to the baseline time series X_base (By baseline, this means any fixed nuisance regressors not updated during fitting, including DC components and any nuisance regressors provided by the user. X_DC is always in the first few columns of X_base. """
if X_base is not None: reg_sol = np.linalg.lstsq(X_DC, X_base) if not no_DC: if not np.any(np.isclose(reg_sol[1], 0)): # No columns in X_base can be explained by the # baseline regressors. So we insert them. X_base = np.concatenate((X_DC, X_base), axis=1) idx_DC = np.arange(0, X_DC.shape[1]) else: logger.warning('Provided regressors for uninteresting ' 'time series already include baseline. ' 'No additional baseline is inserted.') idx_DC = np.where(np.isclose(reg_sol[1], 0))[0] else: idx_DC = np.where(np.isclose(reg_sol[1], 0))[0] else: # If a set of regressors for non-interested signals is not # provided, then we simply include one baseline for each run. X_base = X_DC idx_DC = np.arange(0, X_base.shape[1]) logger.info('You did not provide time series of no interest ' 'such as DC component. Trivial regressors of' ' DC component are included for further modeling.' ' The final covariance matrix won''t ' 'reflect these components.') return X_DC, X_base, idx_DC
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _build_index_param(self, n_l, n_V, n_smooth): """ Build dictionaries to retrieve each parameter from the combined parameters. """
idx_param_sing = {'Cholesky': np.arange(n_l), 'a1': n_l} # for simplified fitting idx_param_fitU = {'Cholesky': np.arange(n_l), 'a1': np.arange(n_l, n_l + n_V)} # for the likelihood function when we fit U (the shared covariance). idx_param_fitV = {'log_SNR2': np.arange(n_V - 1), 'c_space': n_V - 1, 'c_inten': n_V, 'c_both': np.arange(n_V - 1, n_V - 1 + n_smooth)} # for the likelihood function when we fit V (reflected by SNR of # each voxel) return idx_param_sing, idx_param_fitU, idx_param_fitV
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _score(self, Y, design, beta, scan_onsets, beta0, rho_e, sigma_e, rho_X0, sigma2_X0): """ Given the data Y, and the spatial pattern beta0 of nuisance time series, return the cross-validated score of the data Y given all parameters of the subject estimated during the first step. It is assumed that the user has design matrix built for the data Y. Both beta and beta0 are posterior expectation estimated from training data with the estimated covariance matrix U and SNR serving as prior. We marginalize X0 instead of fitting it in this function because this function is for the purpose of evaluating model no new data. We should avoid doing any additional fitting when performing cross-validation. The hypothetic response to the task will be subtracted, and the unknown nuisance activity which contributes to the data through beta0 will be marginalized. """
logger.info('Estimating cross-validated score for new data.') n_T = Y.shape[0] if design is not None: Y = Y - np.dot(design, beta) # The function works for both full model and null model. # If design matrix is not provided, the whole data is # used as input for _forward_step. If design matrix is provided, # residual after subtracting design * beta is fed to _forward_step T_X = np.diag(rho_X0) Var_X = sigma2_X0 / (1 - rho_X0**2) Var_dX = sigma2_X0 # Prior parmeters for X0: T_X is transitioning matrix, Var_X # is the marginal variance of the first time point. Var_dX is the # variance of the updating noise. sigma2_e = sigma_e ** 2 # variance of voxel-specific updating noise component scan_onsets = np.setdiff1d(scan_onsets, n_T).astype(int) n_scan = scan_onsets.size total_log_p = 0 for scan, onset in enumerate(scan_onsets): # Forward step if scan == n_scan - 1: offset = n_T else: offset = scan_onsets[scan + 1] _, _, _, log_p_data, _, _, _, _, _ = \ self._forward_step( Y[onset:offset, :], T_X, Var_X, Var_dX, rho_e, sigma2_e, beta0) total_log_p += log_p_data return total_log_p
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _backward_step(self, deltaY, deltaY_sigma2inv_rho_weightT, sigma2_e, weight, mu, mu_Gamma_inv, Gamma_inv, Lambda_0, Lambda_1, H): """ backward step for HMM, assuming both the hidden state and noise have 1-step dependence on the previous value. """
n_T = len(Gamma_inv) # All the terms with hat before are parameters of posterior # distributions of X conditioned on data from all time points, # whereas the ones without hat calculated by _forward_step # are mean and covariance of posterior of X conditioned on # data up to the time point. Gamma_inv_hat = [None] * n_T mu_Gamma_inv_hat = [None] * n_T mu_hat = [None] * n_T mu_hat[-1] = mu[-1].copy() mu_Gamma_inv_hat[-1] = mu_Gamma_inv[-1].copy() Gamma_inv_hat[-1] = Gamma_inv[-1].copy() for t in np.arange(n_T - 2, -1, -1): tmp = np.linalg.solve(Gamma_inv_hat[t + 1] - Gamma_inv[t + 1] + Lambda_1, H) Gamma_inv_hat[t] = Gamma_inv[t] + Lambda_0 - np.dot(H.T, tmp) mu_Gamma_inv_hat[t] = mu_Gamma_inv[t] \ - deltaY_sigma2inv_rho_weightT[t, :] + np.dot( mu_Gamma_inv_hat[t + 1] - mu_Gamma_inv[t + 1] + np.dot(deltaY[t, :] / sigma2_e, weight.T), tmp) mu_hat[t] = np.linalg.solve(Gamma_inv_hat[t], mu_Gamma_inv_hat[t]) return mu_hat, mu_Gamma_inv_hat, Gamma_inv_hat
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _set_SNR_grids(self): """ Set the grids and weights for SNR used in numerical integration of SNR parameters. """
if self.SNR_prior == 'unif': SNR_grids = np.linspace(0, 1, self.SNR_bins) SNR_weights = np.ones(self.SNR_bins) / (self.SNR_bins - 1) SNR_weights[0] = SNR_weights[0] / 2.0 SNR_weights[-1] = SNR_weights[-1] / 2.0 elif self.SNR_prior == 'lognorm': dist = scipy.stats.lognorm alphas = np.arange(np.mod(self.SNR_bins, 2), self.SNR_bins + 2, 2) / self.SNR_bins # The goal here is to divide the area under the pdf curve # to segments representing equal probabilities. bounds = dist.interval(alphas, (self.logS_range,)) bounds = np.unique(bounds) # bounds contain the boundaries which equally separate # the probability mass of the distribution SNR_grids = np.zeros(self.SNR_bins) for i in np.arange(self.SNR_bins): SNR_grids[i] = dist.expect( lambda x: x, args=(self.logS_range,), lb=bounds[i], ub=bounds[i + 1]) * self.SNR_bins # Center of mass of each segment between consecutive # bounds are set as the grids for SNR. SNR_weights = np.ones(self.SNR_bins) / self.SNR_bins elif self.SNR_prior == 'exp': SNR_grids = self._bin_exp(self.SNR_bins) SNR_weights = np.ones(self.SNR_bins) / self.SNR_bins else: SNR_grids = np.ones(1) SNR_weights = np.ones(1) SNR_weights = SNR_weights / np.sum(SNR_weights) return SNR_grids, SNR_weights
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _matrix_flattened_grid(self, X0TAX0, X0TAX0_i, SNR_grids, XTAcorrX, YTAcorrY_diag, XTAcorrY, X0TAY, XTAX0, n_C, n_V, n_X0, n_grid): """ We need to integrate parameters SNR and rho on 2-d discrete grids. This function generates matrices which have only one dimension for these two parameters, with each slice in that dimension corresponding to each combination of the discrete grids of SNR and discrete grids of rho. """
half_log_det_X0TAX0 = np.reshape( np.repeat(self._half_log_det(X0TAX0)[None, :], self.SNR_bins, axis=0), n_grid) X0TAX0 = np.reshape( np.repeat(X0TAX0[None, :, :, :], self.SNR_bins, axis=0), (n_grid, n_X0, n_X0)) X0TAX0_i = np.reshape(np.repeat( X0TAX0_i[None, :, :, :], self.SNR_bins, axis=0), (n_grid, n_X0, n_X0)) s2XTAcorrX = np.reshape( SNR_grids[:, None, None, None]**2 * XTAcorrX, (n_grid, n_C, n_C)) YTAcorrY_diag = np.reshape(np.repeat( YTAcorrY_diag[None, :, :], self.SNR_bins, axis=0), (n_grid, n_V)) sXTAcorrY = np.reshape(SNR_grids[:, None, None, None] * XTAcorrY, (n_grid, n_C, n_V)) X0TAY = np.reshape(np.repeat(X0TAY[None, :, :, :], self.SNR_bins, axis=0), (n_grid, n_X0, n_V)) XTAX0 = np.reshape(np.repeat(XTAX0[None, :, :, :], self.SNR_bins, axis=0), (n_grid, n_C, n_X0)) return half_log_det_X0TAX0, X0TAX0, X0TAX0_i, s2XTAcorrX, \ YTAcorrY_diag, sXTAcorrY, X0TAY, XTAX0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fit(self, X): """Compute the Robust Shared Response Model Parameters X : list of 2D arrays, element i has shape=[voxels_i, timepoints] Each element in the list contains the fMRI data of one subject. """
logger.info('Starting RSRM') # Check that the regularizer value is positive if 0.0 >= self.lam: raise ValueError("Gamma parameter should be positive.") # Check the number of subjects if len(X) <= 1: raise ValueError("There are not enough subjects in the input " "data to train the model.") # Check for input data sizes if X[0].shape[1] < self.features: raise ValueError( "There are not enough timepoints to train the model with " "{0:d} features.".format(self.features)) # Check if all subjects have same number of TRs for alignment number_trs = X[0].shape[1] number_subjects = len(X) for subject in range(number_subjects): assert_all_finite(X[subject]) if X[subject].shape[1] != number_trs: raise ValueError("Different number of alignment timepoints " "between subjects.") # Create a new random state self.random_state_ = np.random.RandomState(self.rand_seed) # Run RSRM self.w_, self.r_, self.s_ = self._rsrm(X) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transform(self, X): """Use the model to transform new data to Shared Response space Parameters X : list of 2D arrays, element i has shape=[voxels_i, timepoints_i] Each element in the list contains the fMRI data of one subject. Returns ------- r : list of 2D arrays, element i has shape=[features_i, timepoints_i] Shared responses from input data (X) s : list of 2D arrays, element i has shape=[voxels_i, timepoints_i] Individual data obtained from fitting model to input data (X) """
# Check if the model exist if hasattr(self, 'w_') is False: raise NotFittedError("The model fit has not been run yet.") # Check the number of subjects if len(X) != len(self.w_): raise ValueError("The number of subjects does not match the one" " in the model.") r = [None] * len(X) s = [None] * len(X) for subject in range(len(X)): if X[subject] is not None: r[subject], s[subject] = self._transform_new_data(X[subject], subject) return r, s
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _transform_new_data(self, X, subject): """Transform new data for a subjects by projecting to the shared subspace and computing the individual information. Parameters X : array, shape=[voxels, timepoints] The fMRI data of the subject. subject : int The subject id. Returns ------- R : array, shape=[features, timepoints] Shared response from input data (X) S : array, shape=[voxels, timepoints] Individual data obtained from fitting model to input data (X) """
S = np.zeros_like(X) R = None for i in range(self.n_iter): R = self.w_[subject].T.dot(X - S) S = self._shrink(X - self.w_[subject].dot(R), self.lam) return R, S
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transform_subject(self, X): """Transform a new subject using the existing model Parameters X : 2D array, shape=[voxels, timepoints] The fMRI data of the new subject. Returns ------- w : 2D array, shape=[voxels, features] Orthogonal mapping `W_{new}` for new subject s : 2D array, shape=[voxels, timepoints] Individual term `S_{new}` for new subject """
# Check if the model exist if hasattr(self, 'w_') is False: raise NotFittedError("The model fit has not been run yet.") # Check the number of TRs in the subject if X.shape[1] != self.r_.shape[1]: raise ValueError("The number of timepoints(TRs) does not match the" "one in the model.") s = np.zeros_like(X) for i in range(self.n_iter): w = self._update_transform_subject(X, s, self.r_) s = self._shrink(X - w.dot(self.r_), self.lam) return w, s
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _rsrm(self, X): """Block-Coordinate Descent algorithm for fitting RSRM. Parameters X : list of 2D arrays, element i has shape=[voxels_i, timepoints] Each element in the list contains the fMRI data for alignment of one subject. Returns ------- W : list of array, element i has shape=[voxels_i, features] The orthogonal transforms (mappings) :math:`W_i` for each subject. R : array, shape=[features, timepoints] The shared response. S : list of array, element i has shape=[voxels_i, timepoints] The individual component :math:`S_i` for each subject. """
subjs = len(X) voxels = [X[i].shape[0] for i in range(subjs)] TRs = X[0].shape[1] features = self.features # Initialization W = self._init_transforms(subjs, voxels, features, self.random_state_) S = self._init_individual(subjs, voxels, TRs) R = self._update_shared_response(X, S, W, features) if logger.isEnabledFor(logging.INFO): objective = self._objective_function(X, W, R, S, self.lam) logger.info('Objective function %f' % objective) # Main loop for i in range(self.n_iter): W = self._update_transforms(X, S, R) S = self._update_individual(X, W, R, self.lam) R = self._update_shared_response(X, S, W, features) # Print objective function every iteration if logger.isEnabledFor(logging.INFO): objective = self._objective_function(X, W, R, S, self.lam) logger.info('Objective function %f' % objective) return W, R, S
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _objective_function(X, W, R, S, gamma): """Evaluate the objective function. .. math:: \\sum_{i=1}^{N} 1/2 \\| X_i - W_i R - S_i \\|_F^2 .. math:: + /\\gamma * \\|S_i\\|_1 Parameters X : list of array, element i has shape=[voxels_i, timepoints] Each element in the list contains the fMRI data for alignment of one subject. W : list of array, element i has shape=[voxels_i, features] The orthogonal transforms (mappings) :math:`W_i` for each subject. R : array, shape=[features, timepoints] The shared response. S : list of array, element i has shape=[voxels_i, timepoints] The individual component :math:`S_i` for each subject. gamma : float, default: 1.0 Regularization parameter for the sparseness of the individual components. Returns ------- func : float The RSRM objective function evaluated on the parameters to this function. """
subjs = len(X) func = .0 for i in range(subjs): func += 0.5 * np.sum((X[i] - W[i].dot(R) - S[i])**2) \ + gamma * np.sum(np.abs(S[i])) return func
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _update_individual(X, W, R, gamma): """Update the individual components `S_i`. Parameters X : list of 2D arrays, element i has shape=[voxels_i, timepoints] Each element in the list contains the fMRI data for alignment of one subject. W : list of array, element i has shape=[voxels_i, features] The orthogonal transforms (mappings) :math:`W_i` for each subject. R : array, shape=[features, timepoints] The shared response. gamma : float, default: 1.0 Regularization parameter for the sparseness of the individual components. Returns ------- S : list of array, element i has shape=[voxels_i, timepoints] The individual component :math:`S_i` for each subject. """
subjs = len(X) S = [] for i in range(subjs): S.append(RSRM._shrink(X[i] - W[i].dot(R), gamma)) return S
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _update_shared_response(X, S, W, features): """Update the shared response `R`. Parameters X : list of 2D arrays, element i has shape=[voxels_i, timepoints] Each element in the list contains the fMRI data for alignment of one subject. S : list of array, element i has shape=[voxels_i, timepoints] The individual component :math:`S_i` for each subject. W : list of array, element i has shape=[voxels_i, features] The orthogonal transforms (mappings) :math:`W_i` for each subject. features : int The number of features in the model. Returns ------- R : array, shape=[features, timepoints] The updated shared response. """
subjs = len(X) TRs = X[0].shape[1] R = np.zeros((features, TRs)) # Project the subject data with the individual component removed into # the shared subspace and average over all subjects. for i in range(subjs): R += W[i].T.dot(X[i]-S[i]) R /= subjs return R
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _update_transforms(X, S, R): """Updates the mappings `W_i` for each subject. Parameters X : list of 2D arrays, element i has shape=[voxels_i, timepoints] Each element in the list contains the fMRI data for alignment of one subject.ß S : list of array, element i has shape=[voxels_i, timepoints] The individual component :math:`S_i` for each subject. R : array, shape=[features, timepoints] The shared response. Returns ------- W : list of array, element i has shape=[voxels_i, features] The orthogonal transforms (mappings) :math:`W_i` for each subject. """
subjs = len(X) W = [] for i in range(subjs): W.append(RSRM._update_transform_subject(X[i], S[i], R)) return W
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _shrink(v, gamma): """Soft-shrinkage of an array with parameter gamma. Parameters v : array Array containing the values to be applied to the shrinkage operator gamma : float Shrinkage parameter. Returns ------- v : array The same input array after the shrinkage operator was applied. """
pos = v > gamma neg = v < -gamma v[pos] -= gamma v[neg] += gamma v[np.logical_and(~pos, ~neg)] = .0 return v
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plot_confusion_matrix(cm, title="Confusion Matrix"): """Plots a confusion matrix for each subject """
import matplotlib.pyplot as plt import math plt.figure() subjects = len(cm) root_subjects = math.sqrt(subjects) cols = math.ceil(root_subjects) rows = math.ceil(subjects/cols) classes = cm[0].shape[0] for subject in range(subjects): plt.subplot(rows, cols, subject+1) plt.imshow(cm[subject], interpolation='nearest', cmap=plt.cm.bone) plt.xticks(np.arange(classes), range(1, classes+1)) plt.yticks(np.arange(classes), range(1, classes+1)) cbar = plt.colorbar(ticks=[0.0, 1.0], shrink=0.6) cbar.set_clim(0.0, 1.0) plt.xlabel("Predicted") plt.ylabel("True label") plt.title("{0:d}".format(subject + 1)) plt.suptitle(title) plt.tight_layout() plt.show()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def mask_image(image: SpatialImage, mask: np.ndarray, data_type: type = None ) -> np.ndarray: """Mask image after optionally casting its type. Parameters image Image to mask. Can include time as the last dimension. mask Mask to apply. Must have the same shape as the image data. data_type Type to cast image to. Returns ------- np.ndarray Masked image. Raises ------ ValueError Image data and masks have different shapes. """
image_data = image.get_data() if image_data.shape[:3] != mask.shape: raise ValueError("Image data and mask have different shapes.") if data_type is not None: cast_data = image_data.astype(data_type) else: cast_data = image_data return cast_data[mask]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def multimask_images(images: Iterable[SpatialImage], masks: Sequence[np.ndarray], image_type: type = None ) -> Iterable[Sequence[np.ndarray]]: """Mask images with multiple masks. Parameters images: Images to mask. masks: Masks to apply. image_type: Type to cast images to. Yields ------ Sequence[np.ndarray] For each mask, a masked image. """
for image in images: yield [mask_image(image, mask, image_type) for mask in masks]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def mask_images(images: Iterable[SpatialImage], mask: np.ndarray, image_type: type = None) -> Iterable[np.ndarray]: """Mask images. Parameters images: Images to mask. mask: Mask to apply. image_type: Type to cast images to. Yields ------ np.ndarray Masked image. """
for images in multimask_images(images, (mask,), image_type): yield images[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_masked_images(cls: Type[T], masked_images: Iterable[np.ndarray], n_subjects: int) -> T: """Create a new instance of MaskedMultiSubjecData from masked images. Parameters masked_images : iterator Images from multiple subjects to stack along 3rd dimension n_subjects : int Number of subjects; must match the number of images Returns ------- T A new instance of MaskedMultiSubjectData Raises ------ ValueError Images have different shapes. The number of images differs from n_subjects. """
images_iterator = iter(masked_images) first_image = next(images_iterator) first_image_shape = first_image.T.shape result = np.empty((first_image_shape[0], first_image_shape[1], n_subjects)) for n_images, image in enumerate(itertools.chain([first_image], images_iterator)): image = image.T if image.shape != first_image_shape: raise ValueError("Image {} has different shape from first " "image: {} != {}".format(n_images, image.shape, first_image_shape)) result[:, :, n_images] = image n_images += 1 if n_images != n_subjects: raise ValueError("n_subjects != number of images: {} != {}" .format(n_subjects, n_images)) return result.view(cls)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def extract_labels(self) -> np.ndarray: """Extract condition labels. Returns ------- np.ndarray The condition label of each epoch. """
condition_idxs, epoch_idxs, _ = np.where(self) _, unique_epoch_idxs = np.unique(epoch_idxs, return_index=True) return condition_idxs[unique_epoch_idxs]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fit(self, X, y=None): """Compute the probabilistic Shared Response Model Parameters X : list of 2D arrays, element i has shape=[voxels_i, samples] Each element in the list contains the fMRI data of one subject. y : not used """
logger.info('Starting Probabilistic SRM') # Check the number of subjects if len(X) <= 1: raise ValueError("There are not enough subjects " "({0:d}) to train the model.".format(len(X))) # Check for input data sizes number_subjects = len(X) number_subjects_vec = self.comm.allgather(number_subjects) for rank in range(self.comm.Get_size()): if number_subjects_vec[rank] != number_subjects: raise ValueError( "Not all ranks have same number of subjects") # Collect size information shape0 = np.zeros((number_subjects,), dtype=np.int) shape1 = np.zeros((number_subjects,), dtype=np.int) for subject in range(number_subjects): if X[subject] is not None: assert_all_finite(X[subject]) shape0[subject] = X[subject].shape[0] shape1[subject] = X[subject].shape[1] shape0 = self.comm.allreduce(shape0, op=MPI.SUM) shape1 = self.comm.allreduce(shape1, op=MPI.SUM) # Check if all subjects have same number of TRs number_trs = np.min(shape1) for subject in range(number_subjects): if shape1[subject] < self.features: raise ValueError( "There are not enough samples to train the model with " "{0:d} features.".format(self.features)) if shape1[subject] != number_trs: raise ValueError("Different number of samples between subjects" ".") # Run SRM self.sigma_s_, self.w_, self.mu_, self.rho2_, self.s_ = self._srm(X) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transform(self, X, y=None): """Use the model to transform matrix to Shared Response space Parameters X : list of 2D arrays, element i has shape=[voxels_i, samples_i] Each element in the list contains the fMRI data of one subject note that number of voxels and samples can vary across subjects y : not used (as it is unsupervised learning) Returns ------- s : list of 2D arrays, element i has shape=[features_i, samples_i] Shared responses from input data (X) """
# Check if the model exist if hasattr(self, 'w_') is False: raise NotFittedError("The model fit has not been run yet.") # Check the number of subjects if len(X) != len(self.w_): raise ValueError("The number of subjects does not match the one" " in the model.") s = [None] * len(X) for subject in range(len(X)): if X[subject] is not None: s[subject] = self.w_[subject].T.dot(X[subject]) return s
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transform_subject(self, X): """Transform a new subject using the existing model. The subject is assumed to have recieved equivalent stimulation Parameters X : 2D array, shape=[voxels, timepoints] The fMRI data of the new subject. Returns ------- w : 2D array, shape=[voxels, features] Orthogonal mapping `W_{new}` for new subject """
# Check if the model exist if hasattr(self, 'w_') is False: raise NotFittedError("The model fit has not been run yet.") # Check the number of TRs in the subject if X.shape[1] != self.s_.shape[1]: raise ValueError("The number of timepoints(TRs) does not match the" "one in the model.") w = self._update_transform_subject(X, self.s_) return w
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fit(self, X, y=None): """Learn a segmentation on training data Fits event patterns and a segmentation to training data. After running this function, the learned event patterns can be used to segment other datasets using find_events Parameters X: time by voxel ndarray, or a list of such ndarrays fMRI data to be segmented. If a list is given, then all datasets are segmented simultaneously with the same event patterns y: not used (added to comply with BaseEstimator definition) Returns ------- self: the EventSegment object """
X = copy.deepcopy(X) if type(X) is not list: X = check_array(X) X = [X] n_train = len(X) for i in range(n_train): X[i] = X[i].T self.classes_ = np.arange(self.n_events) n_dim = X[0].shape[0] for i in range(n_train): assert (X[i].shape[0] == n_dim) # Double-check that data is z-scored in time for i in range(n_train): X[i] = stats.zscore(X[i], axis=1, ddof=1) # Initialize variables for fitting log_gamma = [] for i in range(n_train): log_gamma.append(np.zeros((X[i].shape[1], self.n_events))) step = 1 best_ll = float("-inf") self.ll_ = np.empty((0, n_train)) while step <= self.n_iter: iteration_var = self.step_var(step) # Based on the current segmentation, compute the mean pattern # for each event seg_prob = [np.exp(lg) / np.sum(np.exp(lg), axis=0) for lg in log_gamma] mean_pat = np.empty((n_train, n_dim, self.n_events)) for i in range(n_train): mean_pat[i, :, :] = X[i].dot(seg_prob[i]) mean_pat = np.mean(mean_pat, axis=0) # Based on the current mean patterns, compute the event # segmentation self.ll_ = np.append(self.ll_, np.empty((1, n_train)), axis=0) for i in range(n_train): logprob = self._logprob_obs(X[i], mean_pat, iteration_var) log_gamma[i], self.ll_[-1, i] = self._forward_backward(logprob) # If log-likelihood has started decreasing, undo last step and stop if np.mean(self.ll_[-1, :]) < best_ll: self.ll_ = self.ll_[:-1, :] break self.segments_ = [np.exp(lg) for lg in log_gamma] self.event_var_ = iteration_var self.event_pat_ = mean_pat best_ll = np.mean(self.ll_[-1, :]) logger.debug("Fitting step %d, LL=%f", step, best_ll) step += 1 return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _logprob_obs(self, data, mean_pat, var): """Log probability of observing each timepoint under each event model Computes the log probability of each observed timepoint being generated by the Gaussian distribution for each event pattern Parameters data: voxel by time ndarray fMRI data on which to compute log probabilities mean_pat: voxel by event ndarray Centers of the Gaussians for each event var: float or 1D array of length equal to the number of events Variance of the event Gaussians. If scalar, all events are assumed to have the same variance Returns ------- logprob : time by event ndarray Log probability of each timepoint under each event Gaussian """
n_vox = data.shape[0] t = data.shape[1] # z-score both data and mean patterns in space, so that Gaussians # are measuring Pearson correlations and are insensitive to overall # activity changes data_z = stats.zscore(data, axis=0, ddof=1) mean_pat_z = stats.zscore(mean_pat, axis=0, ddof=1) logprob = np.empty((t, self.n_events)) if type(var) is not np.ndarray: var = var * np.ones(self.n_events) for k in range(self.n_events): logprob[:, k] = -0.5 * n_vox * np.log( 2 * np.pi * var[k]) - 0.5 * np.sum( (data_z.T - mean_pat_z[:, k]).T ** 2, axis=0) / var[k] logprob /= n_vox return logprob
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _log(self, x): """Modified version of np.log that manually sets values <=0 to -inf Parameters x: ndarray of floats Input to the log function Returns ------- log_ma: ndarray of floats log of x, with x<=0 values replaced with -inf """
xshape = x.shape _x = x.flatten() y = utils.masked_log(_x) return y.reshape(xshape)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_event_patterns(self, event_pat): """Set HMM event patterns manually Rather than fitting the event patterns automatically using fit(), this function allows them to be set explicitly. They can then be used to find corresponding events in a new dataset, using find_events(). Parameters event_pat: voxel by event ndarray """
if event_pat.shape[1] != self.n_events: raise ValueError(("Number of columns of event_pat must match " "number of events")) self.event_pat_ = event_pat.copy()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def calc_weighted_event_var(self, D, weights, event_pat): """Computes normalized weighted variance around event pattern Utility function for computing variance in a training set of weighted event examples. For each event, the sum of squared differences for all timepoints from the event pattern is computed, and then the weights specify how much each of these differences contributes to the variance (normalized by the number of voxels). Parameters D : timepoint by voxel ndarray fMRI data for which to compute event variances weights : timepoint by event ndarray specifies relative weights of timepoints for each event event_pat : voxel by event ndarray mean event patterns to compute variance around Returns ------- ev_var : ndarray of variances for each event """
Dz = stats.zscore(D, axis=1, ddof=1) ev_var = np.empty(event_pat.shape[1]) for e in range(event_pat.shape[1]): # Only compute variances for weights > 0.1% of max weight nz = weights[:, e] > np.max(weights[:, e])/1000 sumsq = np.dot(weights[nz, e], np.sum(np.square(Dz[nz, :] - event_pat[:, e]), axis=1)) ev_var[e] = sumsq/(np.sum(weights[nz, e]) - np.sum(np.square(weights[nz, e])) / np.sum(weights[nz, e])) ev_var = ev_var / D.shape[1] return ev_var
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def model_prior(self, t): """Returns the prior probability of the HMM Runs forward-backward without any data, showing the prior distribution of the model (for comparison with a posterior). Parameters t: int Number of timepoints Returns ------- segments : time by event ndarray segments[t,e] = prior probability that timepoint t is in event e test_ll : float Log-likelihood of model (data-independent term)"""
lg, test_ll = self._forward_backward(np.zeros((t, self.n_events))) segments = np.exp(lg) return segments, test_ll
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def chain_getattr(obj, attr, value=None): """Get chain attribute for an object. """
try: return _resolve_value(safe_chain_getattr(obj, attr)) except AttributeError: return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def iter_festival_countdown(countdown: Optional[int] = None, date_obj: MDate = None, lang: str = 'zh-Hans') -> FestivalCountdownIterable: """Return countdown of festivals. """
factory = FestivalFactory(lang=lang) return factory.iter_festival_countdown(countdown, date_obj)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_year_days(year_info): """Parse year days from a year info. """
leap_month, leap_days = _parse_leap(year_info) res = leap_days for month in range(1, 13): res += (year_info >> (16 - month)) % 2 + 29 return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _iter_year_month(year_info): """ Iter the month days in a lunar year. """
# info => month, days, leap leap_month, leap_days = _parse_leap(year_info) months = [(i, 0) for i in range(1, 13)] if leap_month > 0: months.insert(leap_month, (leap_month, 1)) for month, leap in months: if leap: days = leap_days else: days = (year_info >> (16 - month)) % 2 + 29 yield month, days, leap
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def model_typedefs(vk, model): """Fill the model with typedefs """
model['typedefs'] = {} # bitmasks and basetypes bitmasks = [x for x in vk['registry']['types']['type'] if x.get('@category') == 'bitmask'] basetypes = [x for x in vk['registry']['types']['type'] if x.get('@category') == 'basetype'] for typedef in bitmasks + basetypes: if not typedef.get('type'): continue model['typedefs'][typedef['name']] = typedef['type'] # handles handles = [x for x in vk['registry']['types']['type'] if x.get('@category') == 'handle'] for handle in handles: if 'name' not in handle or 'type' not in handle: continue n = handle['name'] t = handle['type'] if t == 'VK_DEFINE_HANDLE': model['typedefs']['struct %s_T' % n] = '*%s' % n if t == 'VK_DEFINE_HANDLE': model['typedefs'][n] = 'uint64_t' # custom plaform dependant for name in ['Display', 'xcb_connection_t', 'wl_display', 'wl_surface', 'MirConnection', 'MirSurface', 'ANativeWindow', 'SECURITY_ATTRIBUTES']: model['typedefs'][name] = 'struct %s' % name model['typedefs'].update({ 'Window': 'uint32_t', 'VisualID': 'uint32_t', 'xcb_window_t': 'uint32_t', 'xcb_visualid_t': 'uint32_t' })
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def model_macros(vk, model): """Fill the model with macros """
model['macros'] = {} # API Macros macros = [x for x in vk['registry']['enums'] if x.get('@type') not in ('bitmask', 'enum')] # TODO: Check theses values special_values = {'1000.0f': '1000.0', '(~0U)': 0xffffffff, '(~0ULL)': -1, '(~0U-1)': 0xfffffffe, '(~0U-2)': 0xfffffffd} for macro in macros[0]['enum']: if '@name' not in macro or '@value' not in macro: continue name = macro['@name'] value = macro['@value'] if value in special_values: value = special_values[value] model['macros'][name] = value # Extension Macros for ext in get_extensions_filtered(vk): model['macros'][ext['@name']] = 1 for req in ext['require']: for enum in req['enum']: ename = enum['@name'] evalue = parse_constant(enum, int(ext['@number'])) if enum.get('@extends') == 'VkResult': model['enums']['VkResult'][ename] = evalue else: model['macros'][ename] = evalue
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def model_funcpointers(vk, model): """Fill the model with function pointer model['funcpointers'] = {'pfn_name': 'struct_name'} """
model['funcpointers'] = {} funcs = [x for x in vk['registry']['types']['type'] if x.get('@category') == 'funcpointer'] structs = [x for x in vk['registry']['types']['type'] if x.get('@category') == 'struct'] for f in funcs: pfn_name = f['name'] for s in structs: if 'member' not in s: continue for m in s['member']: if m['type'] == pfn_name: struct_name = s['@name'] model['funcpointers'][pfn_name] = struct_name
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def model_exceptions(vk, model): """Fill the model with exceptions and errors """
model['exceptions'] = {} model['errors'] = {} all_codes = model['enums']['VkResult'] success_names = set() error_names = set() commands = [x for x in vk['registry']['commands']['command']] for command in commands: successes = command.get('@successcodes', '').split(',') errors = command.get('@errorcodes', '').split(',') success_names.update(successes) error_names.update(errors) for key, value in all_codes.items(): if key.startswith('VK_RESULT') or key == 'VK_SUCCESS': continue name = inflection.camelize(key.lower()) if key in success_names: model['exceptions'][value] = name elif key in error_names: model['errors'][value] = name else: print('Warning: return code %s unused' % key)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def model_constructors(vk, model): """Fill the model with constructors model['constructors'] = [{'name': 'x', 'members': [{'name': 'y'}].}] """
model['constructors'] = [] structs = [x for x in vk['registry']['types']['type'] if x.get('@category') in {'struct', 'union'}] def parse_len(member): mlen = member.get('@len') if not mlen: return None if ',' in mlen: mlen = mlen.split(',')[0] if 'latex' in mlen or 'null-terminated' in mlen: return None return mlen for struct in structs: if 'member' not in struct: continue model['constructors'].append({ 'name': struct['@name'], 'members': [{ 'name': x['name'], 'type': x['type'], 'default': x.get('@values'), 'len': parse_len(x) } for x in struct['member']] })
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def model_ext_functions(vk, model): """Fill the model with extensions functions"""
model['ext_functions'] = {'instance': {}, 'device': {}} # invert the alias to better lookup alias = {v: k for k, v in model['alias'].items()} for extension in get_extensions_filtered(vk): for req in extension['require']: if not req.get('command'): continue ext_type = extension['@type'] for x in req['command']: name = x['@name'] if name in alias.keys(): model['ext_functions'][ext_type][name] = alias[name] else: model['ext_functions'][ext_type][name] = name
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def model_alias(vk, model): """Fill the model with alias since V1"""
model['alias'] = {} # types for s in vk['registry']['types']['type']: if s.get('@category', None) == 'handle' and s.get('@alias'): model['alias'][s['@alias']] = s['@name'] # commands for c in vk['registry']['commands']['command']: if c.get('@alias'): model['alias'][c['@alias']] = c['@name']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def format_vk(vk): """Format vk before using it"""
# Force extension require to be a list for ext in get_extensions_filtered(vk): req = ext['require'] if not isinstance(req, list): ext['require'] = [req]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_py(): """Generate the python output file"""
model = {} vk = init() format_vk(vk) model_alias(vk, model) model_typedefs(vk, model) model_enums(vk, model) model_macros(vk, model) model_funcpointers(vk, model) model_exceptions(vk, model) model_constructors(vk, model) model_functions(vk, model) model_ext_functions(vk, model) env = jinja2.Environment( autoescape=False, trim_blocks=True, lstrip_blocks=True, loader=jinja2.FileSystemLoader(HERE) ) out_file = path.join(HERE, path.pardir, 'vulkan', '_vulkan.py') with open(out_file, 'w') as out: out.write(env.get_template('vulkan.template.py').render(model=model))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_cdef(): """Generate the cdef output file"""
include_libc_path = path.join(HERE, 'fake_libc_include') include_vulkan_path = path.join(HERE, 'vulkan_include') out_file = path.join(HERE, path.pardir, 'vulkan', 'vulkan.cdef.h') header = path.join(include_vulkan_path, 'vulkan.h') command = ['cpp', '-std=c99', '-P', '-nostdinc', '-I' + include_libc_path, '-I' + include_vulkan_path, '-o' + out_file, '-DVK_USE_PLATFORM_XCB_KHR', '-DVK_USE_PLATFORM_WAYLAND_KHR', '-DVK_USE_PLATFORM_ANDROID_KHR', '-DVK_USE_PLATFORM_WIN32_KHR', '-DVK_USE_PLATFORM_XLIB_KHR', header] subprocess.run(command, check=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def mock_django_connection(disabled_features=None): """ Overwrite the Django database configuration with a mocked version. This is a helper function that does the actual monkey patching. """
db = connections.databases['default'] db['PASSWORD'] = '****' db['USER'] = '**Database disabled for unit tests**' ConnectionHandler.__getitem__ = MagicMock(name='mock_connection') # noinspection PyUnresolvedReferences mock_connection = ConnectionHandler.__getitem__.return_value if disabled_features: for feature in disabled_features: setattr(mock_connection.features, feature, False) mock_ops = mock_connection.ops # noinspection PyUnusedLocal def compiler(queryset, connection, using, **kwargs): result = MagicMock(name='mock_connection.ops.compiler()') # noinspection PyProtectedMember result.execute_sql.side_effect = NotSupportedError( "Mock database tried to execute SQL for {} model.".format( queryset.model._meta.object_name)) result.has_results.side_effect = result.execute_sql.side_effect return result mock_ops.compiler.return_value.side_effect = compiler mock_ops.integer_field_range.return_value = (-sys.maxsize - 1, sys.maxsize) mock_ops.max_name_length.return_value = sys.maxsize Model.refresh_from_db = Mock()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_all_models(models): """ Yield all models and their parents. """
for model in models: yield model # noinspection PyProtectedMember for parent in model._meta.parents.keys(): for parent_model in find_all_models((parent,)): yield parent_model
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def mocked_relations(*models): """ Mock all related field managers to make pure unit tests possible. The resulting patcher can be used just like one from the mock module: As a test method decorator, a test class decorator, a context manager, or by just calling start() and stop(). @mocked_relations(Dataset): def test_dataset(self): dataset = Dataset() check = dataset.content_checks.create() # returns a ContentCheck object """
patchers = [] for model in find_all_models(models): if isinstance(model.save, Mock): # already mocked, so skip it continue model_name = model._meta.object_name patchers.append(_patch_save(model, model_name)) if hasattr(model, 'objects'): patchers.append(_patch_objects(model, model_name)) for related_object in chain(model._meta.related_objects, model._meta.many_to_many): name = related_object.name if name not in model.__dict__ and related_object.one_to_many: name += '_set' if name in model.__dict__: # Only mock direct relations, not inherited ones. if getattr(model, name, None): patchers.append(_patch_relation( model, name, related_object )) return PatcherChain(patchers, pass_mocks=False)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def decorate_callable(self, target): """ Called as a decorator. """
# noinspection PyUnusedLocal def absorb_mocks(test_case, *args): return target(test_case) should_absorb = not (self.pass_mocks or isinstance(target, type)) result = absorb_mocks if should_absorb else target for patcher in self.patchers: result = patcher(result) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _zero_on_type_error(column_fn): """Wrap a function on an np.ndarray to return 0 on a type error."""
if not column_fn: return column_fn if not callable(column_fn): raise TypeError('column functions must be callable') @functools.wraps(column_fn) def wrapped(column): try: return column_fn(column) except TypeError: if isinstance(column, np.ndarray): return column.dtype.type() # A typed zero value else: raise return wrapped
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _varargs_labels_as_list(label_list): """Return a list of labels for a list of labels or singleton list of list of labels."""
if len(label_list) == 0: return [] elif not _is_non_string_iterable(label_list[0]): # Assume everything is a label. If not, it'll be caught later. return label_list elif len(label_list) == 1: return label_list[0] else: raise ValueError("Labels {} contain more than list.".format(label_list), "Pass just one list of labels.")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _assert_same(values): """Assert that all values are identical and return the unique value."""
assert len(values) > 0 first, rest = values[0], values[1:] for v in rest: assert v == first return first
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _collected_label(collect, label): """Label of a collected column."""
if not collect.__name__.startswith('<'): return label + ' ' + collect.__name__ else: return label
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _is_non_string_iterable(value): """Whether a value is iterable."""
if isinstance(value, str): return False if hasattr(value, '__iter__'): return True if isinstance(value, collections.abc.Sequence): return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _vertical_x(axis, ticks=None, max_width=5): """Switch labels to vertical if they are long."""
if ticks is None: ticks = axis.get_xticks() if (np.array(ticks) == np.rint(ticks)).all(): ticks = np.rint(ticks).astype(np.int) if max([len(str(tick)) for tick in ticks]) > max_width: axis.set_xticklabels(ticks, rotation='vertical')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_table(cls, filepath_or_buffer, *args, **vargs): """Read a table from a file or web address. filepath_or_buffer -- string or file handle / StringIO; The string could be a URL. Valid URL schemes include http, ftp, s3, and file. """
# Look for .csv at the end of the path; use "," as a separator if found try: path = urllib.parse.urlparse(filepath_or_buffer).path if 'data8.berkeley.edu' in filepath_or_buffer: raise ValueError('data8.berkeley.edu requires authentication, ' 'which is not supported.') except AttributeError: path = filepath_or_buffer try: if 'sep' not in vargs and path.endswith('.csv'): vargs['sep'] = ',' except AttributeError: pass df = pandas.read_table(filepath_or_buffer, *args, **vargs) return cls.from_df(df)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _with_columns(self, columns): """Create a table from a sequence of columns, copying column labels."""
table = type(self)() for label, column in zip(self.labels, columns): self._add_column_and_format(table, label, column) return table
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _add_column_and_format(self, table, label, column): """Add a column to table, copying the formatter from self."""
label = self._as_label(label) table[label] = column if label in self._formats: table._formats[label] = self._formats[label]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_df(cls, df): """Convert a Pandas DataFrame into a Table."""
t = cls() labels = df.columns for label in df.columns: t.append_column(label, df[label]) return t
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_array(cls, arr): """Convert a structured NumPy array into a Table."""
return cls().with_columns([(f, arr[f]) for f in arr.dtype.names])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def column(self, index_or_label): """Return the values of a column as an array. table.column(label) is equivalent to table[label]. ['c', 'd'] array([2, 4]) Args: label (int or str): The index or label of a column Returns: An instance of ``numpy.array``. Raises: ``ValueError``: When the ``index_or_label`` is not in the table. """
if (isinstance(index_or_label, str) and index_or_label not in self.labels): raise ValueError( 'The column "{}" is not in the table. The table contains ' 'these columns: {}' .format(index_or_label, ', '.join(self.labels)) ) if (isinstance(index_or_label, int) and not 0 <= index_or_label < len(self.labels)): raise ValueError( 'The index {} is not in the table. Only indices between ' '0 and {} are valid' .format(index_or_label, len(self.labels) - 1) ) return self._columns[self._as_label(index_or_label)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def values(self): """Return data in `self` as a numpy array. If all columns are the same dtype, the resulting array will have this dtype. If there are >1 dtypes in columns, then the resulting array will have dtype `object`. """
dtypes = [col.dtype for col in self.columns] if len(set(dtypes)) > 1: dtype = object else: dtype = None return np.array(self.columns, dtype=dtype).T
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def apply(self, fn, *column_or_columns): """Apply ``fn`` to each element or elements of ``column_or_columns``. If no ``column_or_columns`` provided, `fn`` is applied to each row. Args: ``fn`` (function) -- The function to apply. ``column_or_columns``: Columns containing the arguments to ``fn`` as either column labels (``str``) or column indices (``int``). The number of columns must match the number of arguments that ``fn`` expects. Raises: ``ValueError`` -- if ``column_label`` is not an existing column in the table. ``TypeError`` -- if insufficent number of ``column_label`` passed to ``fn``. Returns: An array consisting of results of applying ``fn`` to elements specified by ``column_label`` in each row. letter | count | points a | 9 | 1 b | 3 | 2 c | 3 | 2 z | 1 | 10 array([0, 1, 1, 9]) array([ 9, 6, 6, 10]) Traceback (most recent call last): TypeError: <lambda>() takes 1 positional argument but 2 were given Traceback (most recent call last): ValueError: The column "counts" is not in the table. The table contains these columns: letter, count, points Whole rows are passed to the function if no columns are specified. array([18, 6, 6, 2]) """
if not column_or_columns: return np.array([fn(row) for row in self.rows]) else: if len(column_or_columns) == 1 and \ _is_non_string_iterable(column_or_columns[0]): warnings.warn( "column lists are deprecated; pass each as an argument", FutureWarning) column_or_columns = column_or_columns[0] rows = zip(*self.select(*column_or_columns).columns) return np.array([fn(*row) for row in rows])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_format(self, column_or_columns, formatter): """Set the format of a column."""
if inspect.isclass(formatter): formatter = formatter() if callable(formatter) and not hasattr(formatter, 'format_column'): formatter = _formats.FunctionFormatter(formatter) if not hasattr(formatter, 'format_column'): raise Exception('Expected Formatter or function: ' + str(formatter)) for label in self._as_labels(column_or_columns): if formatter.converts_values: self[label] = formatter.convert_column(self[label]) self._formats[label] = formatter return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def move_to_start(self, column_label): """Move a column to the first in order."""
self._columns.move_to_end(column_label, last=False) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def append(self, row_or_table): """Append a row or all rows of a table. An appended table must have all columns of self."""
if not row_or_table: return if isinstance(row_or_table, Table): t = row_or_table columns = list(t.select(self.labels)._columns.values()) n = t.num_rows else: if (len(list(row_or_table)) != self.num_columns): raise Exception('Row should have '+ str(self.num_columns) + " columns") columns, n = [[value] for value in row_or_table], 1 for i, column in enumerate(self._columns): if self.num_rows: self._columns[column] = np.append(self[column], columns[i]) else: self._columns[column] = np.array(columns[i]) self._num_rows += n return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def append_column(self, label, values): """Appends a column to the table or replaces a column. ``__setitem__`` is aliased to this method: ``table.append_column('new_col', make_array(1, 2, 3))`` is equivalent to ``table['new_col'] = make_array(1, 2, 3)``. Args: ``label`` (str): The label of the new column. ``values`` (single value or list/array): If a single value, every value in the new column is ``values``. If a list or array, the new column contains the values in ``values``, which must be the same length as the table. Returns: Original table with new or replaced column Raises: ``ValueError``: If - ``label`` is not a string. - ``values`` is a list/array and does not have the same length as the number of rows in the table. letter | count | points a | 9 | 1 b | 3 | 2 c | 3 | 2 z | 1 | 10 letter | count | points | new_col1 a | 9 | 1 | 10 b | 3 | 2 | 20 c | 3 | 2 | 30 z | 1 | 10 | 40 letter | count | points | new_col1 | new_col2 a | 9 | 1 | 10 | hello b | 3 | 2 | 20 | hello c | 3 | 2 | 30 | hello z | 1 | 10 | 40 | hello Traceback (most recent call last): ValueError: The column label must be a string, but a int was given Traceback (most recent call last): ValueError: Column length mismatch. New column does not have the same number of rows as table. """
# TODO(sam): Allow append_column to take in a another table, copying # over formatter as needed. if not isinstance(label, str): raise ValueError('The column label must be a string, but a ' '{} was given'.format(label.__class__.__name__)) if not isinstance(values, np.ndarray): # Coerce a single value to a sequence if not _is_non_string_iterable(values): values = [values] * max(self.num_rows, 1) values = np.array(tuple(values)) if self.num_rows != 0 and len(values) != self.num_rows: raise ValueError('Column length mismatch. New column does not have ' 'the same number of rows as table.') else: self._num_rows = len(values) self._columns[label] = values
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove(self, row_or_row_indices): """Removes a row or multiple rows of a table in place."""
if not row_or_row_indices: return if isinstance(row_or_row_indices, int): rows_remove = [row_or_row_indices] else: rows_remove = row_or_row_indices for col in self._columns: self._columns[col] = [elem for i, elem in enumerate(self[col]) if i not in rows_remove] return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def copy(self, *, shallow=False): """Return a copy of a table."""
table = type(self)() for label in self.labels: if shallow: column = self[label] else: column = np.copy(self[label]) self._add_column_and_format(table, label, column) return table
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def select(self, *column_or_columns): """Return a table with only the columns in ``column_or_columns``. Args: ``column_or_columns``: Columns to select from the ``Table`` as either column labels (``str``) or column indices (``int``). Returns: A new instance of ``Table`` containing only selected columns. The columns of the new ``Table`` are in the order given in ``column_or_columns``. Raises: ``KeyError`` if any of ``column_or_columns`` are not in the table. Number of petals | Name | Weight 8 | lotus | 10 34 | sunflower | 5 5 | rose | 6 Number of petals | Weight 8 | 10 34 | 5 5 | 6 Number of petals | Name | Weight 8 | lotus | 10 34 | sunflower | 5 5 | rose | 6 Number of petals | Weight 8 | 10 34 | 5 5 | 6 """
labels = self._varargs_as_labels(column_or_columns) table = type(self)() for label in labels: self._add_column_and_format(table, label, np.copy(self[label])) return table
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def drop(self, *column_or_columns): """Return a Table with only columns other than selected label or labels. Args: ``column_or_columns`` (string or list of strings): The header names or indices of the columns to be dropped. ``column_or_columns`` must be an existing header name, or a valid column index. Returns: An instance of ``Table`` with given columns removed. burgers | prices | calories cheeseburger | 6 | 743 hamburger | 5 | 651 veggie burger | 5 | 582 burgers | calories cheeseburger | 743 hamburger | 651 veggie burger | 582 prices 6 5 5 prices 6 5 5 prices 6 5 5 prices 6 5 5 burgers | calories cheeseburger | 743 hamburger | 651 veggie burger | 582 """
exclude = _varargs_labels_as_list(column_or_columns) return self.select([c for (i, c) in enumerate(self.labels) if i not in exclude and c not in exclude])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def where(self, column_or_label, value_or_predicate=None, other=None): """ Return a new ``Table`` containing rows where ``value_or_predicate`` returns True for values in ``column_or_label``. Args: ``column_or_label``: A column of the ``Table`` either as a label (``str``) or an index (``int``). Can also be an array of booleans; only the rows where the array value is ``True`` are kept. ``value_or_predicate``: If a function, it is applied to every value in ``column_or_label``. Only the rows where ``value_or_predicate`` returns True are kept. If a single value, only the rows where the values in ``column_or_label`` are equal to ``value_or_predicate`` are kept. ``other``: Optional additional column label for ``value_or_predicate`` to make pairwise comparisons. See the examples below for usage. When ``other`` is supplied, ``value_or_predicate`` must be a callable function. Returns: If ``value_or_predicate`` is a function, returns a new ``Table`` containing only the rows where ``value_or_predicate(val)`` is True for the ``val``s in ``column_or_label``. If ``value_or_predicate`` is a value, returns a new ``Table`` containing only the rows where the values in ``column_or_label`` are equal to ``value_or_predicate``. If ``column_or_label`` is an array of booleans, returns a new ``Table`` containing only the rows where ``column_or_label`` is ``True``. Color | Shape | Amount | Price Red | Round | 4 | 1.3 Green | Rectangular | 6 | 1.2 Blue | Rectangular | 12 | 2 Red | Round | 7 | 1.75 Green | Rectangular | 9 | 0 Green | Round | 2 | 3 Use a value to select matching rows Color | Shape | Amount | Price Red | Round | 4 | 1.3 In general, a higher order predicate function such as the functions in ``datascience.predicates.are`` can be used. Color | Shape | Amount | Price Red | Round | 4 | 1.3 Color | Shape | Amount | Price Blue | Rectangular | 12 | 2 Red | Round | 7 | 1.75 Green | Round | 2 | 3 Use the optional argument ``other`` to apply predicates to compare columns. Color | Shape | Amount | Price Green | Round | 2 | 3 Color | Shape | Amount | Price """
column = self._get_column(column_or_label) if other is not None: assert callable(value_or_predicate), "Predicate required for 3-arg where" predicate = value_or_predicate other = self._get_column(other) column = [predicate(y)(x) for x, y in zip(column, other)] elif value_or_predicate is not None: if not callable(value_or_predicate): predicate = _predicates.are.equal_to(value_or_predicate) else: predicate = value_or_predicate column = [predicate(x) for x in column] return self.take(np.nonzero(column)[0])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sort(self, column_or_label, descending=False, distinct=False): """Return a Table of rows sorted according to the values in a column. Args: ``column_or_label``: the column whose values are used for sorting. ``descending``: if True, sorting will be in descending, rather than ascending order. ``distinct``: if True, repeated values in ``column_or_label`` will be omitted. Returns: An instance of ``Table`` containing rows sorted based on the values in ``column_or_label``. Color | Shape | Amount | Price Red | Round | 4 | 1.3 Green | Rectangular | 6 | 1.3 Blue | Rectangular | 12 | 2 Red | Round | 7 | 1.75 Green | Rectangular | 9 | 1.4 Green | Round | 2 | 1 Color | Shape | Amount | Price Green | Round | 2 | 1 Red | Round | 4 | 1.3 Green | Rectangular | 6 | 1.3 Red | Round | 7 | 1.75 Green | Rectangular | 9 | 1.4 Blue | Rectangular | 12 | 2 Color | Shape | Amount | Price Blue | Rectangular | 12 | 2 Green | Rectangular | 9 | 1.4 Red | Round | 7 | 1.75 Green | Rectangular | 6 | 1.3 Red | Round | 4 | 1.3 Green | Round | 2 | 1 Color | Shape | Amount | Price Green | Round | 2 | 1 Red | Round | 4 | 1.3 Green | Rectangular | 6 | 1.3 Green | Rectangular | 9 | 1.4 Red | Round | 7 | 1.75 Blue | Rectangular | 12 | 2 Color | Shape | Amount | Price Green | Round | 2 | 1 Red | Round | 4 | 1.3 Green | Rectangular | 9 | 1.4 Red | Round | 7 | 1.75 Blue | Rectangular | 12 | 2 """
column = self._get_column(column_or_label) if distinct: _, row_numbers = np.unique(column, return_index=True) else: row_numbers = np.argsort(column, axis=0, kind='mergesort') assert (row_numbers < self.num_rows).all(), row_numbers if descending: row_numbers = np.array(row_numbers[::-1]) return self.take(row_numbers)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def group(self, column_or_label, collect=None): """Group rows by unique values in a column; count or aggregate others. Args: ``column_or_label``: values to group (column label or index, or array) ``collect``: a function applied to values in other columns for each group Returns: A Table with each row corresponding to a unique value in ``column_or_label``, where the first column contains the unique values from ``column_or_label``, and the second contains counts for each of the unique values. If ``collect`` is provided, a Table is returned with all original columns, each containing values calculated by first grouping rows according to ``column_or_label``, then applying ``collect`` to each set of grouped values in the other columns. Note: The grouped column will appear first in the result table. If ``collect`` does not accept arguments with one of the column types, that column will be empty in the resulting table. Color | Shape | Amount | Price Red | Round | 4 | 1.3 Green | Rectangular | 6 | 1.3 Blue | Rectangular | 12 | 2 Red | Round | 7 | 1.75 Green | Rectangular | 9 | 1.4 Green | Round | 2 | 1 Color | count Blue | 1 Green | 3 Red | 2 Color | Shape max | Amount max | Price max Blue | Rectangular | 12 | 2 Green | Round | 9 | 1.4 Red | Round | 7 | 1.75 Shape | Color sum | Amount sum | Price sum Rectangular | | 27 | 4.7 Round | | 13 | 4.05 """
# Assume that a call to group with a list of labels is a call to groups if _is_non_string_iterable(column_or_label) and \ len(column_or_label) != self._num_rows: return self.groups(column_or_label, collect) self = self.copy(shallow=True) collect = _zero_on_type_error(collect) # Remove column used for grouping column = self._get_column(column_or_label) if isinstance(column_or_label, str) or isinstance(column_or_label, numbers.Integral): column_label = self._as_label(column_or_label) del self[column_label] else: column_label = self._unused_label('group') # Group by column groups = self.index_by(column) keys = sorted(groups.keys()) # Generate grouped columns if collect is None: labels = [column_label, 'count' if column_label != 'count' else self._unused_label('count')] columns = [keys, [len(groups[k]) for k in keys]] else: columns, labels = [], [] for i, label in enumerate(self.labels): labels.append(_collected_label(collect, label)) c = [collect(np.array([row[i] for row in groups[k]])) for k in keys] columns.append(c) grouped = type(self)().with_columns(zip(labels, columns)) assert column_label == self._unused_label(column_label) grouped[column_label] = keys grouped.move_to_start(column_label) return grouped
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def groups(self, labels, collect=None): """Group rows by multiple columns, count or aggregate others. Args: ``labels``: list of column names (or indices) to group on ``collect``: a function applied to values in other columns for each group Returns: A Table with each row corresponding to a unique combination of values in the columns specified in ``labels``, where the first columns are those specified in ``labels``, followed by a column of counts for each of the unique values. If ``collect`` is provided, a Table is returned with all original columns, each containing values calculated by first grouping rows according to to values in the ``labels`` column, then applying ``collect`` to each set of grouped values in the other columns. Note: The grouped columns will appear first in the result table. If ``collect`` does not accept arguments with one of the column types, that column will be empty in the resulting table. Color | Shape | Amount | Price Red | Round | 4 | 1.3 Green | Rectangular | 6 | 1.3 Blue | Rectangular | 12 | 2 Red | Round | 7 | 1.75 Green | Rectangular | 9 | 1.4 Green | Round | 2 | 1 Color | Shape | count Blue | Rectangular | 1 Green | Rectangular | 2 Green | Round | 1 Red | Round | 2 Color | Shape | Amount sum | Price sum Blue | Rectangular | 12 | 2 Green | Rectangular | 15 | 2.7 Green | Round | 2 | 1 Red | Round | 11 | 3.05 """
# Assume that a call to groups with one label is a call to group if not _is_non_string_iterable(labels): return self.group(labels, collect=collect) collect = _zero_on_type_error(collect) columns = [] labels = self._as_labels(labels) for label in labels: if label not in self.labels: raise ValueError("All labels must exist in the table") columns.append(self._get_column(label)) grouped = self.group(list(zip(*columns)), lambda s: s) grouped._columns.popitem(last=False) # Discard the column of tuples # Flatten grouping values and move them to front counts = [len(v) for v in grouped[0]] for label in labels[::-1]: grouped[label] = grouped.apply(_assert_same, label) grouped.move_to_start(label) # Aggregate other values if collect is None: count = 'count' if 'count' not in labels else self._unused_label('count') return grouped.select(labels).with_column(count, counts) else: for label in grouped.labels: if label in labels: continue column = [collect(v) for v in grouped[label]] del grouped[label] grouped[_collected_label(collect, label)] = column return grouped
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pivot_bin(self, pivot_columns, value_column, bins=None, **vargs) : """Form a table with columns formed by the unique tuples in pivot_columns containing counts per bin of the values associated with each tuple in the value_column. By default, bins are chosen to contain all values in the value_column. The following named arguments from numpy.histogram can be applied to specialize bin widths: Args: ``bins`` (int or sequence of scalars): If bins is an int, it defines the number of equal-width bins in the given range (10, by default). If bins is a sequence, it defines the bin edges, including the rightmost edge, allowing for non-uniform bin widths. ``range`` ((float, float)): The lower and upper range of the bins. If not provided, range contains all values in the table. Values outside the range are ignored. ``normed`` (bool): If False, the result will contain the number of samples in each bin. If True, the result is normalized such that the integral over the range is 1. """
pivot_columns = _as_labels(pivot_columns) selected = self.select(pivot_columns + [value_column]) grouped = selected.groups(pivot_columns, collect=lambda x:x) # refine bins by taking a histogram over all the data if bins is not None: vargs['bins'] = bins _, rbins = np.histogram(self[value_column],**vargs) # create a table with these bins a first column and counts for each group vargs['bins'] = rbins binned = type(self)().with_column('bin',rbins) for group in grouped.rows: col_label = "-".join(map(str,group[0:-1])) col_vals = group[-1] counts,_ = np.histogram(col_vals,**vargs) binned[col_label] = np.append(counts,0) return binned
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stack(self, key, labels=None): """Takes k original columns and returns two columns, with col. 1 of all column names and col. 2 of all associated data. """
rows, labels = [], labels or self.labels for row in self.rows: [rows.append((getattr(row, key), k, v)) for k, v in row.asdict().items() if k != key and k in labels] return type(self)([key, 'column', 'value']).with_rows(rows)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def join(self, column_label, other, other_label=None): """Creates a new table with the columns of self and other, containing rows for all values of a column that appear in both tables. Args: ``column_label`` (``str``): label of column in self that is used to join rows of ``other``. ``other``: Table object to join with self on matching values of ``column_label``. Kwargs: ``other_label`` (``str``): default None, assumes ``column_label``. Otherwise in ``other`` used to join rows. Returns: New table self joined with ``other`` by matching values in ``column_label`` and ``other_label``. If the resulting join is empty, returns None. a | b | c 9 | 1 | 3 3 | 2 | 4 3 | 2 | 5 1 | 10 | 6 a | d | e 9 | 1 | 3 1 | 2 | 4 1 | 2 | 5 1 | 10 | 6 a | b | c | d | e 1 | 10 | 6 | 2 | 4 1 | 10 | 6 | 2 | 5 1 | 10 | 6 | 10 | 6 9 | 1 | 3 | 1 | 3 a | b | c | d | e 1 | 10 | 6 | 2 | 4 1 | 10 | 6 | 2 | 5 1 | 10 | 6 | 10 | 6 9 | 1 | 3 | 1 | 3 a | b | c | a_2 | e 1 | 10 | 6 | 9 | 3 a | d | e 9 | 1 | 3 1 | 2 | 4 1 | 2 | 5 1 | 10 | 6 a | b | c 9 | 1 | 3 3 | 2 | 4 3 | 2 | 5 1 | 10 | 6 """
if self.num_rows == 0 or other.num_rows == 0: return None if not other_label: other_label = column_label self_rows = self.index_by(column_label) other_rows = other.index_by(other_label) # Gather joined rows from self_rows that have join values in other_rows joined_rows = [] for v, rows in self_rows.items(): if v in other_rows: joined_rows += [row + o for row in rows for o in other_rows[v]] if not joined_rows: return None # Build joined table self_labels = list(self.labels) other_labels = [self._unused_label(s) for s in other.labels] other_labels_map = dict(zip(other.labels, other_labels)) joined = type(self)(self_labels + other_labels).with_rows(joined_rows) # Copy formats from both tables joined._formats.update(self._formats) for label in other._formats: joined._formats[other_labels_map[label]] = other._formats[label] # Remove redundant column, but perhaps save its formatting del joined[other_labels_map[other_label]] if column_label not in self._formats and other_label in other._formats: joined._formats[column_label] = other._formats[other_label] return joined.move_to_start(column_label).sort(column_label)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stats(self, ops=(min, max, np.median, sum)): """Compute statistics for each column and place them in a table."""
names = [op.__name__ for op in ops] ops = [_zero_on_type_error(op) for op in ops] columns = [[op(column) for op in ops] for column in self.columns] table = type(self)().with_columns(zip(self.labels, columns)) stats = table._unused_label('statistic') table[stats] = names table.move_to_start(stats) return table
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _as_label(self, index_or_label): """Convert index to label."""
if isinstance(index_or_label, str): return index_or_label if isinstance(index_or_label, numbers.Integral): return self.labels[index_or_label] else: raise ValueError(str(index_or_label) + ' is not a label or index')