desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Check if a cluster is worthy enough to be merged. If yes then merge.'
def merge_subcluster(self, nominee_cluster, threshold):
new_ss = (self.squared_sum_ + nominee_cluster.squared_sum_) new_ls = (self.linear_sum_ + nominee_cluster.linear_sum_) new_n = (self.n_samples_ + nominee_cluster.n_samples_) new_centroid = ((1 / new_n) * new_ls) new_norm = np.dot(new_centroid, new_centroid) dot_product = (((-2) * new_n) * new_norm) sq_radius = (((new_ss + dot_product) / new_n) + new_norm) if (sq_radius <= (threshold ** 2)): (self.n_samples_, self.linear_sum_, self.squared_sum_, self.centroid_, self.sq_norm_) = (new_n, new_ls, new_ss, new_centroid, new_norm) return True return False
'Return radius of the subcluster'
@property def radius(self):
dot_product = ((-2) * np.dot(self.linear_sum_, self.centroid_)) return sqrt((((self.squared_sum_ + dot_product) / self.n_samples_) + self.sq_norm_))
'Build a CF Tree for the input data. Parameters X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data.'
def fit(self, X, y=None):
(self.fit_, self.partial_fit_) = (True, False) return self._fit(X)
'Retrieve the leaves of the CF Node. Returns leaves : array-like List of the leaf nodes.'
def _get_leaves(self):
leaf_ptr = self.dummy_leaf_.next_leaf_ leaves = [] while (leaf_ptr is not None): leaves.append(leaf_ptr) leaf_ptr = leaf_ptr.next_leaf_ return leaves
'Online learning. Prevents rebuilding of CFTree from scratch. Parameters X : {array-like, sparse matrix}, shape (n_samples, n_features), None Input data. If X is not provided, only the global clustering step is done.'
def partial_fit(self, X=None, y=None):
(self.partial_fit_, self.fit_) = (True, False) if (X is None): self._global_clustering() return self else: self._check_fit(X) return self._fit(X)
'Predict data using the ``centroids_`` of subclusters. Avoid computation of the row norms of X. Parameters X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data. Returns labels : ndarray, shape(n_samples) Labelled data.'
def predict(self, X):
X = check_array(X, accept_sparse='csr') self._check_fit(X) reduced_distance = safe_sparse_dot(X, self.subcluster_centers_.T) reduced_distance *= (-2) reduced_distance += self._subcluster_norms return self.subcluster_labels_[np.argmin(reduced_distance, axis=1)]
'Transform X into subcluster centroids dimension. Each dimension represents the distance from the sample point to each cluster centroid. Parameters X : {array-like, sparse matrix}, shape (n_samples, n_features) Input data. Returns X_trans : {array-like, sparse matrix}, shape (n_samples, n_clusters) Transformed data.'
def transform(self, X):
check_is_fitted(self, 'subcluster_centers_') return euclidean_distances(X, self.subcluster_centers_)
'Global clustering for the subclusters obtained after fitting'
def _global_clustering(self, X=None):
clusterer = self.n_clusters centroids = self.subcluster_centers_ compute_labels = ((X is not None) and self.compute_labels) not_enough_centroids = False if isinstance(clusterer, int): clusterer = AgglomerativeClustering(n_clusters=self.n_clusters) if (len(centroids) < self.n_clusters): not_enough_centroids = True elif ((clusterer is not None) and (not hasattr(clusterer, 'fit_predict'))): raise ValueError('n_clusters should be an instance of ClusterMixin or an int') self._subcluster_norms = row_norms(self.subcluster_centers_, squared=True) if ((clusterer is None) or not_enough_centroids): self.subcluster_labels_ = np.arange(len(centroids)) if not_enough_centroids: warnings.warn(('Number of subclusters found (%d) by Birch is less than (%d). Decrease the threshold.' % (len(centroids), self.n_clusters))) else: self.subcluster_labels_ = clusterer.fit_predict(self.subcluster_centers_) if compute_labels: self.labels_ = self.predict(X)
'Creates an affinity matrix for X using the selected affinity, then applies spectral clustering to this affinity matrix. Parameters X : array-like or sparse matrix, shape (n_samples, n_features) OR, if affinity==`precomputed`, a precomputed affinity matrix of shape (n_samples, n_samples)'
def fit(self, X, y=None):
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64) if ((X.shape[0] == X.shape[1]) and (self.affinity != 'precomputed')): warnings.warn('The spectral clustering API has changed. ``fit``now constructs an affinity matrix from data. To use a custom affinity matrix, set ``affinity=precomputed``.') if (self.affinity == 'nearest_neighbors'): connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True, n_jobs=self.n_jobs) self.affinity_matrix_ = (0.5 * (connectivity + connectivity.T)) elif (self.affinity == 'precomputed'): self.affinity_matrix_ = X else: params = self.kernel_params if (params is None): params = {} if (not callable(self.affinity)): params['gamma'] = self.gamma params['degree'] = self.degree params['coef0'] = self.coef0 self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity, filter_params=True, **params) random_state = check_random_state(self.random_state) self.labels_ = spectral_clustering(self.affinity_matrix_, n_clusters=self.n_clusters, eigen_solver=self.eigen_solver, random_state=random_state, n_init=self.n_init, eigen_tol=self.eigen_tol, assign_labels=self.assign_labels) return self
'Creates a biclustering for X. Parameters X : array-like, shape (n_samples, n_features)'
def fit(self, X, y=None):
X = check_array(X, accept_sparse='csr', dtype=np.float64) self._check_parameters() self._fit(X) return self
'Returns first `n_components` left and right singular vectors u and v, discarding the first `n_discard`.'
def _svd(self, array, n_components, n_discard):
if (self.svd_method == 'randomized'): kwargs = {} if (self.n_svd_vecs is not None): kwargs['n_oversamples'] = self.n_svd_vecs (u, _, vt) = randomized_svd(array, n_components, random_state=self.random_state, **kwargs) elif (self.svd_method == 'arpack'): (u, _, vt) = svds(array, k=n_components, ncv=self.n_svd_vecs) if np.any(np.isnan(vt)): A = safe_sparse_dot(array.T, array) random_state = check_random_state(self.random_state) v0 = random_state.uniform((-1), 1, A.shape[0]) (_, v) = eigsh(A, ncv=self.n_svd_vecs, v0=v0) vt = v.T if np.any(np.isnan(u)): A = safe_sparse_dot(array, array.T) random_state = check_random_state(self.random_state) v0 = random_state.uniform((-1), 1, A.shape[0]) (_, u) = eigsh(A, ncv=self.n_svd_vecs, v0=v0) assert_all_finite(u) assert_all_finite(vt) u = u[:, n_discard:] vt = vt[n_discard:] return (u, vt.T)
'Find the ``n_best`` vectors that are best approximated by piecewise constant vectors. The piecewise vectors are found by k-means; the best is chosen according to Euclidean distance.'
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
def make_piecewise(v): (centroid, labels) = self._k_means(v.reshape((-1), 1), n_clusters) return centroid[labels].ravel() piecewise_vectors = np.apply_along_axis(make_piecewise, axis=1, arr=vectors) dists = np.apply_along_axis(norm, axis=1, arr=(vectors - piecewise_vectors)) result = vectors[np.argsort(dists)[:n_best]] return result
'Project ``data`` to ``vectors`` and cluster the result.'
def _project_and_cluster(self, data, vectors, n_clusters):
projected = safe_sparse_dot(data, vectors) (_, labels) = self._k_means(projected, n_clusters) return labels
'Create affinity matrix from negative euclidean distances, then apply affinity propagation clustering. Parameters X : array-like, shape (n_samples, n_features) or (n_samples, n_samples) Data matrix or, if affinity is ``precomputed``, matrix of similarities / affinities.'
def fit(self, X, y=None):
X = check_array(X, accept_sparse='csr') if (self.affinity == 'precomputed'): self.affinity_matrix_ = X elif (self.affinity == 'euclidean'): self.affinity_matrix_ = (- euclidean_distances(X, squared=True)) else: raise ValueError(("Affinity must be 'precomputed' or 'euclidean'. Got %s instead" % str(self.affinity))) (self.cluster_centers_indices_, self.labels_, self.n_iter_) = affinity_propagation(self.affinity_matrix_, self.preference, max_iter=self.max_iter, convergence_iter=self.convergence_iter, damping=self.damping, copy=self.copy, verbose=self.verbose, return_n_iter=True) if (self.affinity != 'precomputed'): self.cluster_centers_ = X[self.cluster_centers_indices_].copy() return self
'Predict the closest cluster each sample in X belongs to. Parameters X : {array-like, sparse matrix}, shape (n_samples, n_features) New data to predict. Returns labels : array, shape (n_samples,) Index of the cluster each sample belongs to.'
def predict(self, X):
check_is_fitted(self, 'cluster_centers_indices_') if (not hasattr(self, 'cluster_centers_')): raise ValueError("Predict method is not supported when affinity='precomputed'.") return pairwise_distances_argmin(X, self.cluster_centers_)
'Fit the model using X as training data. Note that sparse arrays can only be handled by method=\'exact\'. It is recommended that you convert your sparse array to dense (e.g. `X.toarray()`) if it fits in memory, or otherwise using a dimensionality reduction technique (e.g. TruncatedSVD). Parameters X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is \'precomputed\' X must be a square distance matrix. Otherwise it contains a sample per row. Note that this when method=\'barnes_hut\', X cannot be a sparse array and if need be will be converted to a 32 bit float array. Method=\'exact\' allows sparse arrays and 64bit floating point inputs. skip_num_points : int (optional, default:0) This does not compute the gradient for points with indices below `skip_num_points`. This is useful when computing transforms of new data where you\'d like to keep the old data fixed.'
def _fit(self, X, skip_num_points=0):
if (self.method not in ['barnes_hut', 'exact']): raise ValueError("'method' must be 'barnes_hut' or 'exact'") if ((self.angle < 0.0) or (self.angle > 1.0)): raise ValueError("'angle' must be between 0.0 - 1.0") if (self.metric == 'precomputed'): if (isinstance(self.init, string_types) and (self.init == 'pca')): raise ValueError('The parameter init="pca" cannot be used with metric="precomputed".') if (X.shape[0] != X.shape[1]): raise ValueError('X should be a square distance matrix') if np.any((X < 0)): raise ValueError('All distances should be positive, the precomputed distances given as X is not correct') if ((self.method == 'barnes_hut') and sp.issparse(X)): raise TypeError('A sparse matrix was passed, but dense data is required for method="barnes_hut". Use X.toarray() to convert to a dense numpy array if the array is small enough for it to fit in memory. Otherwise consider dimensionality reduction techniques (e.g. TruncatedSVD)') else: X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=[np.float32, np.float64]) if ((self.method == 'barnes_hut') and (self.n_components > 3)): raise ValueError("'n_components' should be inferior to 4 for the barnes_hut algorithm as it relies on quad-tree or oct-tree.") random_state = check_random_state(self.random_state) if (self.early_exaggeration < 1.0): raise ValueError('early_exaggeration must be at least 1, but is {}'.format(self.early_exaggeration)) if (self.n_iter < 250): raise ValueError('n_iter should be at least 250') n_samples = X.shape[0] neighbors_nn = None if (self.method == 'exact'): if (self.metric == 'precomputed'): distances = X else: if self.verbose: print '[t-SNE] Computing pairwise distances...' if (self.metric == 'euclidean'): distances = pairwise_distances(X, metric=self.metric, squared=True) else: distances = pairwise_distances(X, metric=self.metric) if np.any((distances < 0)): raise ValueError('All distances should be positive, the metric given is not correct') P = _joint_probabilities(distances, self.perplexity, self.verbose) assert np.all(np.isfinite(P)), 'All probabilities should be finite' assert np.all((P >= 0)), 'All probabilities should be non-negative' assert np.all((P <= 1)), 'All probabilities should be less or then equal to one' else: k = min((n_samples - 1), int(((3.0 * self.perplexity) + 1))) if self.verbose: print '[t-SNE] Computing {} nearest neighbors...'.format(k) neighbors_method = 'ball_tree' if (self.metric == 'precomputed'): neighbors_method = 'brute' knn = NearestNeighbors(algorithm=neighbors_method, n_neighbors=k, metric=self.metric) t0 = time() knn.fit(X) duration = (time() - t0) if self.verbose: print '[t-SNE] Indexed {} samples in {:.3f}s...'.format(n_samples, duration) t0 = time() (distances_nn, neighbors_nn) = knn.kneighbors(None, n_neighbors=k) duration = (time() - t0) if self.verbose: print '[t-SNE] Computed neighbors for {} samples in {:.3f}s...'.format(n_samples, duration) del knn if (self.metric == 'euclidean'): distances_nn **= 2 P = _joint_probabilities_nn(distances_nn, neighbors_nn, self.perplexity, self.verbose) if isinstance(self.init, np.ndarray): X_embedded = self.init elif (self.init == 'pca'): pca = PCA(n_components=self.n_components, svd_solver='randomized', random_state=random_state) X_embedded = pca.fit_transform(X).astype(np.float32, copy=False) elif (self.init == 'random'): X_embedded = (0.0001 * random_state.randn(n_samples, self.n_components).astype(np.float32)) else: raise ValueError("'init' must be 'pca', 'random', or a numpy array") degrees_of_freedom = max((self.n_components - 1.0), 1) return self._tsne(P, degrees_of_freedom, n_samples, random_state, X_embedded=X_embedded, neighbors=neighbors_nn, skip_num_points=skip_num_points)
'Runs t-SNE.'
def _tsne(self, P, degrees_of_freedom, n_samples, random_state, X_embedded, neighbors=None, skip_num_points=0):
params = X_embedded.ravel() opt_args = {'it': 0, 'n_iter_check': self._N_ITER_CHECK, 'min_grad_norm': self.min_grad_norm, 'learning_rate': self.learning_rate, 'verbose': self.verbose, 'kwargs': dict(skip_num_points=skip_num_points), 'args': [P, degrees_of_freedom, n_samples, self.n_components], 'n_iter_without_progress': self._EXPLORATION_N_ITER, 'n_iter': self._EXPLORATION_N_ITER, 'momentum': 0.5} if (self.method == 'barnes_hut'): obj_func = _kl_divergence_bh opt_args['kwargs']['angle'] = self.angle opt_args['kwargs']['verbose'] = self.verbose else: obj_func = _kl_divergence P *= self.early_exaggeration (params, kl_divergence, it) = _gradient_descent(obj_func, params, **opt_args) if self.verbose: print ('[t-SNE] KL divergence after %d iterations with early exaggeration: %f' % ((it + 1), kl_divergence)) P /= self.early_exaggeration remaining = (self.n_iter - self._EXPLORATION_N_ITER) if ((it < self._EXPLORATION_N_ITER) or (remaining > 0)): opt_args['n_iter'] = self.n_iter opt_args['it'] = (it + 1) opt_args['momentum'] = 0.8 opt_args['n_iter_without_progress'] = self.n_iter_without_progress (params, kl_divergence, it) = _gradient_descent(obj_func, params, **opt_args) self.n_iter_ = it if self.verbose: print ('[t-SNE] Error after %d iterations: %f' % ((it + 1), kl_divergence)) X_embedded = params.reshape(n_samples, self.n_components) self.kl_divergence_ = kl_divergence return X_embedded
'Fit X into an embedded space and return that transformed output. Parameters X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is \'precomputed\' X must be a square distance matrix. Otherwise it contains a sample per row. Returns X_new : array, shape (n_samples, n_components) Embedding of the training data in low-dimensional space.'
def fit_transform(self, X, y=None):
embedding = self._fit(X) self.embedding_ = embedding return self.embedding_
'Fit X into an embedded space. Parameters X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is \'precomputed\' X must be a square distance matrix. Otherwise it contains a sample per row. If the method is \'exact\', X may be a sparse matrix of type \'csr\', \'csc\' or \'coo\'.'
def fit(self, X, y=None):
self.fit_transform(X) return self
'Computes the position of the points in the embedding space Parameters X : array, shape (n_samples, n_features) or (n_samples, n_samples) Input data. If ``dissimilarity==\'precomputed\'``, the input should be the dissimilarity matrix. init : ndarray, shape (n_samples,), optional, default: None Starting configuration of the embedding to initialize the SMACOF algorithm. By default, the algorithm is initialized with a randomly chosen array.'
def fit(self, X, y=None, init=None):
self.fit_transform(X, init=init) return self
'Fit the data from X, and returns the embedded coordinates Parameters X : array, shape (n_samples, n_features) or (n_samples, n_samples) Input data. If ``dissimilarity==\'precomputed\'``, the input should be the dissimilarity matrix. init : ndarray, shape (n_samples,), optional, default: None Starting configuration of the embedding to initialize the SMACOF algorithm. By default, the algorithm is initialized with a randomly chosen array.'
def fit_transform(self, X, y=None, init=None):
X = check_array(X) if ((X.shape[0] == X.shape[1]) and (self.dissimilarity != 'precomputed')): warnings.warn("The MDS API has changed. ``fit`` now constructs an dissimilarity matrix from data. To use a custom dissimilarity matrix, set ``dissimilarity='precomputed'``.") if (self.dissimilarity == 'precomputed'): self.dissimilarity_matrix_ = X elif (self.dissimilarity == 'euclidean'): self.dissimilarity_matrix_ = euclidean_distances(X) else: raise ValueError(("Proximity must be 'precomputed' or 'euclidean'. Got %s instead" % str(self.dissimilarity))) (self.embedding_, self.stress_, self.n_iter_) = smacof(self.dissimilarity_matrix_, metric=self.metric, n_components=self.n_components, init=init, n_init=self.n_init, n_jobs=self.n_jobs, max_iter=self.max_iter, verbose=self.verbose, eps=self.eps, random_state=self.random_state, return_n_iter=True) return self.embedding_
'Compute the embedding vectors for data X Parameters X : array-like of shape [n_samples, n_features] training set. Returns self : returns an instance of self.'
def fit(self, X, y=None):
self._fit_transform(X) return self
'Compute the embedding vectors for data X and transform X. Parameters X : array-like of shape [n_samples, n_features] training set. Returns X_new : array-like, shape (n_samples, n_components)'
def fit_transform(self, X, y=None):
self._fit_transform(X) return self.embedding_
'Transform new points into embedding space. Parameters X : array-like, shape = [n_samples, n_features] Returns X_new : array, shape = [n_samples, n_components] Notes Because of scaling performed by this method, it is discouraged to use it together with methods that are not scale-invariant (like SVMs)'
def transform(self, X):
check_is_fitted(self, 'nbrs_') X = check_array(X) ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors, return_distance=False) weights = barycenter_weights(X, self.nbrs_._fit_X[ind], reg=self.reg) X_new = np.empty((X.shape[0], self.n_components)) for i in range(X.shape[0]): X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i]) return X_new
'Calculate the affinity matrix from data Parameters X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. If affinity is "precomputed" X : array-like, shape (n_samples, n_samples), Interpret X as precomputed adjacency graph computed from samples. Returns affinity_matrix, shape (n_samples, n_samples)'
def _get_affinity_matrix(self, X, Y=None):
if (self.affinity == 'precomputed'): self.affinity_matrix_ = X return self.affinity_matrix_ if (self.affinity == 'nearest_neighbors'): if sparse.issparse(X): warnings.warn('Nearest neighbors affinity currently does not support sparse input, falling back to rbf affinity') self.affinity = 'rbf' else: self.n_neighbors_ = (self.n_neighbors if (self.n_neighbors is not None) else max(int((X.shape[0] / 10)), 1)) self.affinity_matrix_ = kneighbors_graph(X, self.n_neighbors_, include_self=True, n_jobs=self.n_jobs) self.affinity_matrix_ = (0.5 * (self.affinity_matrix_ + self.affinity_matrix_.T)) return self.affinity_matrix_ if (self.affinity == 'rbf'): self.gamma_ = (self.gamma if (self.gamma is not None) else (1.0 / X.shape[1])) self.affinity_matrix_ = rbf_kernel(X, gamma=self.gamma_) return self.affinity_matrix_ self.affinity_matrix_ = self.affinity(X) return self.affinity_matrix_
'Fit the model from data in X. Parameters X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. If affinity is "precomputed" X : array-like, shape (n_samples, n_samples), Interpret X as precomputed adjacency graph computed from samples. Returns self : object Returns the instance itself.'
def fit(self, X, y=None):
X = check_array(X, ensure_min_samples=2, estimator=self) random_state = check_random_state(self.random_state) if isinstance(self.affinity, six.string_types): if (self.affinity not in set(('nearest_neighbors', 'rbf', 'precomputed'))): raise ValueError(("%s is not a valid affinity. Expected 'precomputed', 'rbf', 'nearest_neighbors' or a callable." % self.affinity)) elif (not callable(self.affinity)): raise ValueError(("'affinity' is expected to be an affinity name or a callable. Got: %s" % self.affinity)) affinity_matrix = self._get_affinity_matrix(X) self.embedding_ = spectral_embedding(affinity_matrix, n_components=self.n_components, eigen_solver=self.eigen_solver, random_state=random_state) return self
'Fit the model from data in X and transform X. Parameters X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. If affinity is "precomputed" X : array-like, shape (n_samples, n_samples), Interpret X as precomputed adjacency graph computed from samples. Returns X_new : array-like, shape (n_samples, n_components)'
def fit_transform(self, X, y=None):
self.fit(X) return self.embedding_
'Compute the reconstruction error for the embedding. Returns reconstruction_error : float Notes The cost function of an isomap embedding is ``E = frobenius_norm[K(D) - K(D_fit)] / n_samples`` Where D is the matrix of distances for the input data X, D_fit is the matrix of distances for the output embedding X_fit, and K is the isomap kernel: ``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``'
def reconstruction_error(self):
G = ((-0.5) * (self.dist_matrix_ ** 2)) G_center = KernelCenterer().fit_transform(G) evals = self.kernel_pca_.lambdas_ return (np.sqrt((np.sum((G_center ** 2)) - np.sum((evals ** 2)))) / G.shape[0])
'Compute the embedding vectors for data X Parameters X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors} Sample data, shape = (n_samples, n_features), in the form of a numpy array, precomputed tree, or NearestNeighbors object. Returns self : returns an instance of self.'
def fit(self, X, y=None):
self._fit_transform(X) return self
'Fit the model from data in X and transform X. Parameters X : {array-like, sparse matrix, BallTree, KDTree} Training vector, where n_samples in the number of samples and n_features is the number of features. Returns X_new : array-like, shape (n_samples, n_components)'
def fit_transform(self, X, y=None):
self._fit_transform(X) return self.embedding_
'Transform X. This is implemented by linking the points X into the graph of geodesic distances of the training data. First the `n_neighbors` nearest neighbors of X are found in the training data, and from these the shortest geodesic distances from each point in X to each point in the training data are computed in order to construct the kernel. The embedding of X is the projection of this kernel onto the embedding vectors of the training set. Parameters X : array-like, shape (n_samples, n_features) Returns X_new : array-like, shape (n_samples, n_components)'
def transform(self, X):
X = check_array(X) (distances, indices) = self.nbrs_.kneighbors(X, return_distance=True) G_X = np.zeros((X.shape[0], self.training_data_.shape[0])) for i in range(X.shape[0]): G_X[i] = np.min((self.dist_matrix_[indices[i]] + distances[i][:, None]), 0) G_X **= 2 G_X *= (-0.5) return self.kernel_pca_.transform(G_X)
'Creates a customized copy of the Parameter.'
def replace(self, name=_void, kind=_void, annotation=_void, default=_void, _partial_kwarg=_void):
if (name is _void): name = self._name if (kind is _void): kind = self._kind if (annotation is _void): annotation = self._annotation if (default is _void): default = self._default if (_partial_kwarg is _void): _partial_kwarg = self._partial_kwarg return type(self)(name, kind, default=default, annotation=annotation, _partial_kwarg=_partial_kwarg)
'Constructs Signature from the given list of Parameter objects and \'return_annotation\'. All arguments are optional.'
def __init__(self, parameters=None, return_annotation=_empty, __validate_parameters__=True):
if (parameters is None): params = OrderedDict() elif __validate_parameters__: params = OrderedDict() top_kind = _POSITIONAL_ONLY for (idx, param) in enumerate(parameters): kind = param.kind if (kind < top_kind): msg = 'wrong parameter order: {0} before {1}' msg = msg.format(top_kind, param.kind) raise ValueError(msg) else: top_kind = kind name = param.name if (name is None): name = str(idx) param = param.replace(name=name) if (name in params): msg = 'duplicate parameter name: {0!r}'.format(name) raise ValueError(msg) params[name] = param else: params = OrderedDict(((param.name, param) for param in parameters)) self._parameters = params self._return_annotation = return_annotation
'Constructs Signature for the given python function'
@classmethod def from_function(cls, func):
if (not isinstance(func, types.FunctionType)): raise TypeError('{0!r} is not a Python function'.format(func)) Parameter = cls._parameter_cls func_code = func.__code__ pos_count = func_code.co_argcount arg_names = func_code.co_varnames positional = tuple(arg_names[:pos_count]) keyword_only_count = getattr(func_code, 'co_kwonlyargcount', 0) keyword_only = arg_names[pos_count:(pos_count + keyword_only_count)] annotations = getattr(func, '__annotations__', {}) defaults = func.__defaults__ kwdefaults = getattr(func, '__kwdefaults__', None) if defaults: pos_default_count = len(defaults) else: pos_default_count = 0 parameters = [] non_default_count = (pos_count - pos_default_count) for name in positional[:non_default_count]: annotation = annotations.get(name, _empty) parameters.append(Parameter(name, annotation=annotation, kind=_POSITIONAL_OR_KEYWORD)) for (offset, name) in enumerate(positional[non_default_count:]): annotation = annotations.get(name, _empty) parameters.append(Parameter(name, annotation=annotation, kind=_POSITIONAL_OR_KEYWORD, default=defaults[offset])) if (func_code.co_flags & 4): name = arg_names[(pos_count + keyword_only_count)] annotation = annotations.get(name, _empty) parameters.append(Parameter(name, annotation=annotation, kind=_VAR_POSITIONAL)) for name in keyword_only: default = _empty if (kwdefaults is not None): default = kwdefaults.get(name, _empty) annotation = annotations.get(name, _empty) parameters.append(Parameter(name, annotation=annotation, kind=_KEYWORD_ONLY, default=default)) if (func_code.co_flags & 8): index = (pos_count + keyword_only_count) if (func_code.co_flags & 4): index += 1 name = arg_names[index] annotation = annotations.get(name, _empty) parameters.append(Parameter(name, annotation=annotation, kind=_VAR_KEYWORD)) return cls(parameters, return_annotation=annotations.get('return', _empty), __validate_parameters__=False)
'Creates a customized copy of the Signature. Pass \'parameters\' and/or \'return_annotation\' arguments to override them in the new copy.'
def replace(self, parameters=_void, return_annotation=_void):
if (parameters is _void): parameters = self.parameters.values() if (return_annotation is _void): return_annotation = self._return_annotation return type(self)(parameters, return_annotation=return_annotation)
'Private method. Don\'t use directly.'
def _bind(self, args, kwargs, partial=False):
arguments = OrderedDict() parameters = iter(self.parameters.values()) parameters_ex = () arg_vals = iter(args) if partial: for (param_name, param) in self.parameters.items(): if (param._partial_kwarg and (param_name not in kwargs)): kwargs[param_name] = param.default while True: try: arg_val = next(arg_vals) except StopIteration: try: param = next(parameters) except StopIteration: break else: if (param.kind == _VAR_POSITIONAL): break elif (param.name in kwargs): if (param.kind == _POSITIONAL_ONLY): msg = '{arg!r} parameter is positional only, but was passed as a keyword' msg = msg.format(arg=param.name) raise TypeError(msg) parameters_ex = (param,) break elif ((param.kind == _VAR_KEYWORD) or (param.default is not _empty)): parameters_ex = (param,) break elif partial: parameters_ex = (param,) break else: msg = '{arg!r} parameter lacking default value' msg = msg.format(arg=param.name) raise TypeError(msg) else: try: param = next(parameters) except StopIteration: raise TypeError('too many positional arguments') else: if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY)): raise TypeError('too many positional arguments') if (param.kind == _VAR_POSITIONAL): values = [arg_val] values.extend(arg_vals) arguments[param.name] = tuple(values) break if (param.name in kwargs): raise TypeError('multiple values for argument {arg!r}'.format(arg=param.name)) arguments[param.name] = arg_val kwargs_param = None for param in itertools.chain(parameters_ex, parameters): if (param.kind == _POSITIONAL_ONLY): raise TypeError('{arg!r} parameter is positional only, but was passed as a keyword'.format(arg=param.name)) if (param.kind == _VAR_KEYWORD): kwargs_param = param continue param_name = param.name try: arg_val = kwargs.pop(param_name) except KeyError: if ((not partial) and (param.kind != _VAR_POSITIONAL) and (param.default is _empty)): raise TypeError('{arg!r} parameter lacking default value'.format(arg=param_name)) else: arguments[param_name] = arg_val if kwargs: if (kwargs_param is not None): arguments[kwargs_param.name] = kwargs else: raise TypeError('too many keyword arguments') return self._bound_arguments_cls(self, arguments)
'Get a BoundArguments object, that maps the passed `args` and `kwargs` to the function\'s signature. Raises `TypeError` if the passed arguments can not be bound.'
def bind(self, *args, **kwargs):
return self._bind(args, kwargs)
'Get a BoundArguments object, that partially maps the passed `args` and `kwargs` to the function\'s signature. Raises `TypeError` if the passed arguments can not be bound.'
def bind_partial(self, *args, **kwargs):
return self._bind(args, kwargs, partial=True)
'Parameters hash_name: string The hash algorithm to be used coerce_mmap: boolean Make no difference between np.memmap and np.ndarray objects.'
def __init__(self, hash_name='md5', coerce_mmap=False):
self.coerce_mmap = coerce_mmap Hasher.__init__(self, hash_name=hash_name) import numpy as np self.np = np if hasattr(np, 'getbuffer'): self._getbuffer = np.getbuffer else: self._getbuffer = memoryview
'Subclass the save method, to hash ndarray subclass, rather than pickling them. Off course, this is a total abuse of the Pickler class.'
def save(self, obj):
if (isinstance(obj, self.np.ndarray) and (not obj.dtype.hasobject)): if (obj.shape == ()): obj_c_contiguous = obj.flatten() elif obj.flags.c_contiguous: obj_c_contiguous = obj elif obj.flags.f_contiguous: obj_c_contiguous = obj.T else: obj_c_contiguous = obj.flatten() self._hash.update(self._getbuffer(obj_c_contiguous.view(self.np.uint8))) if (self.coerce_mmap and isinstance(obj, self.np.memmap)): klass = self.np.ndarray else: klass = obj.__class__ obj = (klass, ('HASHED', obj.dtype, obj.shape, obj.strides)) elif isinstance(obj, self.np.dtype): klass = obj.__class__ obj = (klass, ('HASHED', obj.descr)) Hasher.save(self, obj)
'Flush and close the file. May be called more than once without error. Once the file is closed, any other operation on it will raise a ValueError.'
def close(self):
with self._lock: if (self._mode == _MODE_CLOSED): return try: if (self._mode in (_MODE_READ, _MODE_READ_EOF)): self._decompressor = None elif (self._mode == _MODE_WRITE): self._fp.write(self._compressor.flush()) self._compressor = None finally: try: if self._closefp: self._fp.close() finally: self._fp = None self._closefp = False self._mode = _MODE_CLOSED self._buffer = '' self._buffer_offset = 0
'True if this file is closed.'
@property def closed(self):
return (self._mode == _MODE_CLOSED)
'Return the file descriptor for the underlying file.'
def fileno(self):
self._check_not_closed() return self._fp.fileno()
'Return whether the file supports seeking.'
def seekable(self):
return (self.readable() and self._fp.seekable())
'Return whether the file was opened for reading.'
def readable(self):
self._check_not_closed() return (self._mode in (_MODE_READ, _MODE_READ_EOF))
'Return whether the file was opened for writing.'
def writable(self):
self._check_not_closed() return (self._mode == _MODE_WRITE)
'Read up to size uncompressed bytes from the file. If size is negative or omitted, read until EOF is reached. Returns b\'\' if the file is already at EOF.'
def read(self, size=(-1)):
with self._lock: self._check_can_read() if (size == 0): return '' elif (size < 0): return self._read_all() else: return self._read_block(size)
'Read up to len(b) bytes into b. Returns the number of bytes read (0 for EOF).'
def readinto(self, b):
with self._lock: return io.BufferedIOBase.readinto(self, b)
'Write a byte string to the file. Returns the number of uncompressed bytes written, which is always len(data). Note that due to buffering, the file on disk may not reflect the data written until close() is called.'
def write(self, data):
with self._lock: self._check_can_write() if isinstance(data, memoryview): data = data.tobytes() compressed = self._compressor.compress(data) self._fp.write(compressed) self._pos += len(data) return len(data)
'Change the file position. The new position is specified by offset, relative to the position indicated by whence. Values for whence are: 0: start of stream (default); offset must not be negative 1: current stream position 2: end of stream; offset must not be positive Returns the new file position. Note that seeking is emulated, so depending on the parameters, this operation may be extremely slow.'
def seek(self, offset, whence=0):
with self._lock: self._check_can_seek() if (whence == 0): pass elif (whence == 1): offset = (self._pos + offset) elif (whence == 2): if (self._size < 0): self._read_all(return_data=False) offset = (self._size + offset) else: raise ValueError(('Invalid value for whence: %s' % (whence,))) if (offset < self._pos): self._rewind() else: offset -= self._pos self._read_block(offset, return_data=False) return self._pos
'Return the current file position.'
def tell(self):
with self._lock: self._check_not_closed() return self._pos
'Build a process or thread pool and return the number of workers'
def _initialize_backend(self):
try: n_jobs = self._backend.configure(n_jobs=self.n_jobs, parallel=self, **self._backend_args) if ((self.timeout is not None) and (not self._backend.supports_timeout)): warnings.warn("The backend class {!r} does not support timeout. You have set 'timeout={}' in Parallel but the 'timeout' parameter will not be used.".format(self._backend.__class__.__name__, self.timeout)) except FallbackToBackend as e: self._backend = e.backend n_jobs = self._initialize_backend() return n_jobs
'Queue the batch for computing, with or without multiprocessing WARNING: this method is not thread-safe: it should be only called indirectly via dispatch_one_batch.'
def _dispatch(self, batch):
if self._aborting: return self.n_dispatched_tasks += len(batch) self.n_dispatched_batches += 1 dispatch_timestamp = time.time() cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self) job = self._backend.apply_async(batch, callback=cb) self._jobs.append(job)
'Dispatch more data for parallel processing This method is meant to be called concurrently by the multiprocessing callback. We rely on the thread-safety of dispatch_one_batch to protect against concurrent consumption of the unprotected iterator.'
def dispatch_next(self):
if (not self.dispatch_one_batch(self._original_iterator)): self._iterating = False self._original_iterator = None
'Prefetch the tasks for the next batch and dispatch them. The effective size of the batch is computed here. If there are no more jobs to dispatch, return False, else return True. The iterator consumption and dispatching is protected by the same lock so calling this function should be thread safe.'
def dispatch_one_batch(self, iterator):
if (self.batch_size == 'auto'): batch_size = self._backend.compute_batch_size() else: batch_size = self.batch_size with self._lock: tasks = BatchedCalls(itertools.islice(iterator, batch_size)) if (len(tasks) == 0): return False else: self._dispatch(tasks) return True
'Display the message on stout or stderr depending on verbosity'
def _print(self, msg, msg_args):
if (not self.verbose): return if (self.verbose < 50): writer = sys.stderr.write else: writer = sys.stdout.write msg = (msg % msg_args) writer(('[%s]: %s\n' % (self, msg)))
'Display the process of the parallel execution only a fraction of time, controlled by self.verbose.'
def print_progress(self):
if (not self.verbose): return elapsed_time = (time.time() - self._start_time) if (self._original_iterator is not None): if _verbosity_filter(self.n_dispatched_batches, self.verbose): return self._print('Done %3i tasks | elapsed: %s', (self.n_completed_tasks, short_format_time(elapsed_time))) else: index = self.n_completed_tasks total_tasks = self.n_dispatched_tasks if (not (index == 0)): cursor = (((total_tasks - index) + 1) - self._pre_dispatch_amount) frequency = ((total_tasks // self.verbose) + 1) is_last_item = ((index + 1) == total_tasks) if (is_last_item or (cursor % frequency)): return remaining_time = ((elapsed_time / index) * (self.n_dispatched_tasks - (index * 1.0))) self._print('Done %3i out of %3i | elapsed: %s remaining: %s', (index, total_tasks, short_format_time(elapsed_time), short_format_time(remaining_time)))
'Attach a reducer function to a given type in the dispatch table.'
def register(self, type, reduce_func):
if hasattr(Pickler, 'dispatch'): def dispatcher(self, obj): reduced = reduce_func(obj) self.save_reduce(obj=obj, *reduced) self.dispatch[type] = dispatcher else: self.dispatch_table[type] = reduce_func
'Constructor. Store the useful information for later.'
def __init__(self, subclass, shape, order, dtype, allow_mmap=False):
self.subclass = subclass self.shape = shape self.order = order self.dtype = dtype self.allow_mmap = allow_mmap
'Write array bytes to pickler file handle. This function is an adaptation of the numpy write_array function available in version 1.10.1 in numpy/lib/format.py.'
def write_array(self, array, pickler):
buffersize = max(((16 * (1024 ** 2)) // array.itemsize), 1) if array.dtype.hasobject: pickle.dump(array, pickler.file_handle, protocol=2) else: for chunk in pickler.np.nditer(array, flags=['external_loop', 'buffered', 'zerosize_ok'], buffersize=buffersize, order=self.order): pickler.file_handle.write(chunk.tostring('C'))
'Read array from unpickler file handle. This function is an adaptation of the numpy read_array function available in version 1.10.1 in numpy/lib/format.py.'
def read_array(self, unpickler):
if (len(self.shape) == 0): count = 1 else: count = unpickler.np.multiply.reduce(self.shape) if self.dtype.hasobject: array = pickle.load(unpickler.file_handle) else: if ((not PY3_OR_LATER) and unpickler.np.compat.isfileobj(unpickler.file_handle)): array = unpickler.np.fromfile(unpickler.file_handle, dtype=self.dtype, count=count) else: max_read_count = (BUFFER_SIZE // min(BUFFER_SIZE, self.dtype.itemsize)) array = unpickler.np.empty(count, dtype=self.dtype) for i in range(0, count, max_read_count): read_count = min(max_read_count, (count - i)) read_size = int((read_count * self.dtype.itemsize)) data = _read_bytes(unpickler.file_handle, read_size, 'array data') array[i:(i + read_count)] = unpickler.np.frombuffer(data, dtype=self.dtype, count=read_count) del data if (self.order == 'F'): array.shape = self.shape[::(-1)] array = array.transpose() else: array.shape = self.shape return array
'Read an array using numpy memmap.'
def read_mmap(self, unpickler):
offset = unpickler.file_handle.tell() if (unpickler.mmap_mode == 'w+'): unpickler.mmap_mode = 'r+' marray = make_memmap(unpickler.filename, dtype=self.dtype, shape=self.shape, order=self.order, mode=unpickler.mmap_mode, offset=offset) unpickler.file_handle.seek((offset + marray.nbytes)) return marray
'Read the array corresponding to this wrapper. Use the unpickler to get all information to correctly read the array. Parameters unpickler: NumpyUnpickler Returns array: numpy.ndarray'
def read(self, unpickler):
if ((unpickler.mmap_mode is not None) and self.allow_mmap): array = self.read_mmap(unpickler) else: array = self.read_array(unpickler) if (hasattr(array, '__array_prepare__') and (self.subclass not in (unpickler.np.ndarray, unpickler.np.memmap))): new_array = unpickler.np.core.multiarray._reconstruct(self.subclass, (0,), 'b') return new_array.__array_prepare__(array) else: return array
'Create and returns a numpy array wrapper from a numpy array.'
def _create_array_wrapper(self, array):
order = ('F' if (array.flags.f_contiguous and (not array.flags.c_contiguous)) else 'C') allow_mmap = ((not self.buffered) and (not array.dtype.hasobject)) wrapper = NumpyArrayWrapper(type(array), array.shape, order, array.dtype, allow_mmap=allow_mmap) return wrapper
'Subclass the Pickler `save` method. This is a total abuse of the Pickler class in order to use the numpy persistence function `save` instead of the default pickle implementation. The numpy array is replaced by a custom wrapper in the pickle persistence stack and the serialized array is written right after in the file. Warning: the file produced does not follow the pickle format. As such it can not be read with `pickle.load`.'
def save(self, obj):
if ((self.np is not None) and (type(obj) in (self.np.ndarray, self.np.matrix, self.np.memmap))): if (type(obj) is self.np.memmap): obj = self.np.asanyarray(obj) wrapper = self._create_array_wrapper(obj) Pickler.save(self, wrapper) if (self.proto >= 4): self.framer.commit_frame(force=True) wrapper.write_array(obj, self) return return Pickler.save(self, obj)
'Called to set the state of a newly created object. We capture it to replace our place-holder objects, NDArrayWrapper or NumpyArrayWrapper, by the array we are interested in. We replace them directly in the stack of pickler. NDArrayWrapper is used for backward compatibility with joblib <= 0.9.'
def load_build(self):
Unpickler.load_build(self) if isinstance(self.stack[(-1)], (NDArrayWrapper, NumpyArrayWrapper)): if (self.np is None): raise ImportError("Trying to unpickle an ndarray, but numpy didn't import correctly") array_wrapper = self.stack.pop() if isinstance(array_wrapper, NDArrayWrapper): self.compat_mode = True self.stack.append(array_wrapper.read(self))
'Reconfigure the backend and return the number of workers. This makes it possible to reuse an existing backend instance for successive independent calls to Parallel with different parameters.'
def configure(self, n_jobs=1, parallel=None, **backend_args):
self.parallel = parallel return self.effective_n_jobs(n_jobs)
'Determine the optimal batch size'
def compute_batch_size(self):
return 1
'List of exception types to be captured.'
def get_exceptions(self):
return []
'Abort any running tasks This is called when an exception has been raised when executing a tasks and all the remaining tasks will be ignored and can therefore be aborted to spare computation resources. If ensure_ready is True, the backend should be left in an operating state as future tasks might be re-submitted via that same backend instance. If ensure_ready is False, the implementer of this method can decide to leave the backend in a closed / terminated state as no new task are expected to be submitted to this backend. Setting ensure_ready to False is an optimization that can be leveraged when aborting tasks via killing processes from a local process pool managed by the backend it-self: if we expect no new tasks, there is no point in re-creating a new working pool.'
def abort_everything(self, ensure_ready=True):
pass
'Determine the number of jobs which are going to run in parallel'
def effective_n_jobs(self, n_jobs):
if (n_jobs == 0): raise ValueError('n_jobs == 0 in Parallel has no meaning') return 1
'Schedule a func to be run'
def apply_async(self, func, callback=None):
result = ImmediateResult(func) if callback: callback(result) return result
'Determine the number of jobs which are going to run in parallel'
def effective_n_jobs(self, n_jobs):
if (n_jobs == 0): raise ValueError('n_jobs == 0 in Parallel has no meaning') elif ((mp is None) or (n_jobs is None)): return 1 elif (n_jobs < 0): n_jobs = max(((mp.cpu_count() + 1) + n_jobs), 1) return n_jobs
'Shutdown the process or thread pool'
def terminate(self):
if (self._pool is not None): self._pool.close() self._pool.terminate() self._pool = None
'Schedule a func to be run'
def apply_async(self, func, callback=None):
return self._pool.apply_async(SafeFunction(func), callback=callback)
'Shutdown the pool and restart a new one with the same parameters'
def abort_everything(self, ensure_ready=True):
self.terminate() if ensure_ready: self.configure(n_jobs=self.parallel.n_jobs, parallel=self.parallel, **self.parallel._backend_args)
'Determine the optimal batch size'
def compute_batch_size(self):
old_batch_size = self._effective_batch_size batch_duration = self._smoothed_batch_duration if ((batch_duration > 0) and (batch_duration < self.MIN_IDEAL_BATCH_DURATION)): ideal_batch_size = int(((old_batch_size * self.MIN_IDEAL_BATCH_DURATION) / batch_duration)) batch_size = max((2 * ideal_batch_size), 1) self._effective_batch_size = batch_size if (self.parallel.verbose >= 10): self.parallel._print('Batch computation too fast (%.4fs.) Setting batch_size=%d.', (batch_duration, batch_size)) elif ((batch_duration > self.MAX_IDEAL_BATCH_DURATION) and (old_batch_size >= 2)): batch_size = (old_batch_size // 2) self._effective_batch_size = batch_size if (self.parallel.verbose >= 10): self.parallel._print('Batch computation too slow (%.4fs.) Setting batch_size=%d.', (batch_duration, batch_size)) else: batch_size = old_batch_size if (batch_size != old_batch_size): self._smoothed_batch_duration = 0 return batch_size
'Callback indicate how long it took to run a batch'
def batch_completed(self, batch_size, duration):
if (batch_size == self._effective_batch_size): old_duration = self._smoothed_batch_duration if (old_duration == 0): new_duration = duration else: new_duration = ((0.8 * old_duration) + (0.2 * duration)) self._smoothed_batch_duration = new_duration
'Build a process or thread pool and return the number of workers'
def configure(self, n_jobs=1, parallel=None, **backend_args):
n_jobs = self.effective_n_jobs(n_jobs) if (n_jobs == 1): raise FallbackToBackend(SequentialBackend()) self.parallel = parallel self._pool = ThreadPool(n_jobs) return n_jobs
'Determine the number of jobs which are going to run in parallel. This also checks if we are attempting to create a nested parallel loop.'
def effective_n_jobs(self, n_jobs):
if (mp is None): return 1 if mp.current_process().daemon: if (n_jobs != 1): warnings.warn('Multiprocessing-backed parallel loops cannot be nested, setting n_jobs=1', stacklevel=3) return 1 if (not isinstance(threading.current_thread(), threading._MainThread)): warnings.warn('Multiprocessing-backed parallel loops cannot be nested below threads, setting n_jobs=1', stacklevel=3) return 1 return super(MultiprocessingBackend, self).effective_n_jobs(n_jobs)
'Build a process or thread pool and return the number of workers'
def configure(self, n_jobs=1, parallel=None, **backend_args):
n_jobs = self.effective_n_jobs(n_jobs) if (n_jobs == 1): raise FallbackToBackend(SequentialBackend()) already_forked = int(os.environ.get(self.JOBLIB_SPAWNED_PROCESS, 0)) if already_forked: raise ImportError('[joblib] Attempting to do parallel computing without protecting your import on a system that does not support forking. To use parallel-computing in a script, you must protect your main loop using "if __name__ == \'__main__\'". Please see the joblib documentation on Parallel for more information') os.environ[self.JOBLIB_SPAWNED_PROCESS] = '1' gc.collect() self._pool = MemmapingPool(n_jobs, **backend_args) self.parallel = parallel return n_jobs
'Shutdown the process or thread pool'
def terminate(self):
super(MultiprocessingBackend, self).terminate() if (self.JOBLIB_SPAWNED_PROCESS in os.environ): del os.environ[self.JOBLIB_SPAWNED_PROCESS]
'Constructor. Store the useful information for later.'
def __init__(self, filename, subclass, allow_mmap=True):
self.filename = filename self.subclass = subclass self.allow_mmap = allow_mmap
'Reconstruct the array.'
def read(self, unpickler):
filename = os.path.join(unpickler._dirname, self.filename) allow_mmap = getattr(self, 'allow_mmap', True) memmap_kwargs = ({} if (not allow_mmap) else {'mmap_mode': unpickler.mmap_mode}) array = unpickler.np.load(filename, **memmap_kwargs) if (hasattr(array, '__array_prepare__') and (self.subclass not in (unpickler.np.ndarray, unpickler.np.memmap))): new_array = unpickler.np.core.multiarray._reconstruct(self.subclass, (0,), 'b') return new_array.__array_prepare__(array) else: return array
'Constructor. Store the useful information for later.'
def __init__(self, filename, init_args, state):
self.filename = filename self.state = state self.init_args = init_args
'Reconstruct the array from the meta-information and the z-file.'
def read(self, unpickler):
filename = os.path.join(unpickler._dirname, self.filename) array = unpickler.np.core.multiarray._reconstruct(*self.init_args) with open(filename, 'rb') as f: data = read_zfile(f) state = (self.state + (data,)) array.__setstate__(state) return array
'Constructor.'
def __init__(self, filename, file_handle, mmap_mode=None):
self._filename = os.path.basename(filename) self._dirname = os.path.dirname(filename) self.mmap_mode = mmap_mode self.file_handle = self._open_pickle(file_handle) Unpickler.__init__(self, self.file_handle) try: import numpy as np except ImportError: np = None self.np = np
'Set the state of a newly created object. We capture it to replace our place-holder objects, NDArrayWrapper, by the array we are interested in. We replace them directly in the stack of pickler.'
def load_build(self):
Unpickler.load_build(self) if isinstance(self.stack[(-1)], NDArrayWrapper): if (self.np is None): raise ImportError("Trying to unpickle an ndarray, but numpy didn't import correctly") nd_array_wrapper = self.stack.pop() array = nd_array_wrapper.read(self) self.stack.append(array)
'Parameters depth: int, optional The depth of objects printed.'
def __init__(self, depth=3):
self.depth = depth
'Return the formatted representation of the object.'
def format(self, obj, indent=0):
return pformat(obj, indent=indent, depth=self.depth)
'Print the time elapsed between the last call and the current call, with an optional message.'
def __call__(self, msg='', total=False):
if (not total): time_lapse = (time.time() - self.last_time) full_msg = ('%s: %s' % (msg, format_time(time_lapse))) else: time_lapse = (time.time() - self.start_time) full_msg = ('%s: %.2fs, %.1f min' % (msg, time_lapse, (time_lapse / 60))) print(full_msg, file=sys.stderr) if (self.logfile is not None): try: with open(self.logfile, 'a') as f: print(full_msg, file=f) except: ' Multiprocessing writing to files can create race\n conditions. Rather fail silently than crash the\n calculation.\n ' self.last_time = time.time()
'Read value from cache and return it.'
def get(self):
return _load_output(self._output_dir, _get_func_fullname(self.func), timestamp=self.timestamp, metadata=self.metadata, mmap_mode=self.mmap_mode, verbose=self.verbose)
'Clear value from cache'
def clear(self):
shutil.rmtree(self._output_dir, ignore_errors=True)
'Parameters func: callable The function to decorate cachedir: string The path of the base directory to use as a data store ignore: list or None List of variable names to ignore. mmap_mode: {None, \'r+\', \'r\', \'w+\', \'c\'}, optional The memmapping mode used when loading from cache numpy arrays. See numpy.load for the meaning of the arguments. compress : boolean, or integer Whether to zip the stored data on disk. If an integer is given, it should be between 1 and 9, and sets the amount of compression. Note that compressed arrays cannot be read by memmapping. verbose: int, optional Verbosity flag, controls the debug messages that are issued as functions are evaluated. The higher, the more verbose timestamp: float, optional The reference time from which times in tracing messages are reported.'
def __init__(self, func, cachedir, ignore=None, mmap_mode=None, compress=False, verbose=1, timestamp=None):
Logger.__init__(self) self.mmap_mode = mmap_mode self.func = func if (ignore is None): ignore = [] self.ignore = ignore self._verbose = verbose self.cachedir = cachedir self.compress = compress if (compress and (self.mmap_mode is not None)): warnings.warn('Compressed results cannot be memmapped', stacklevel=2) if (timestamp is None): timestamp = time.time() self.timestamp = timestamp mkdirp(self.cachedir) try: functools.update_wrapper(self, func) except: " Objects like ufunc don't like that " if inspect.isfunction(func): doc = pydoc.TextDoc().document(func) doc = doc.replace('\n', '\n\n', 1) doc = re.sub('\x08.', '', doc) else: doc = func.__doc__ self.__doc__ = ('Memoized version of %s' % doc)
'Call wrapped function and cache result, or read cache if available. This function returns the wrapped function output and some metadata. Returns output: value or tuple what is returned by wrapped function argument_hash: string hash of function arguments metadata: dict some metadata about wrapped function call (see _persist_input())'
def _cached_call(self, args, kwargs):
(output_dir, argument_hash) = self._get_output_dir(*args, **kwargs) metadata = None output_pickle_path = os.path.join(output_dir, 'output.pkl') if (not (self._check_previous_func_code(stacklevel=4) and os.path.isfile(output_pickle_path))): if (self._verbose > 10): (_, name) = get_func_name(self.func) self.warn(('Computing func %s, argument hash %s in directory %s' % (name, argument_hash, output_dir))) (out, metadata) = self.call(*args, **kwargs) if (self.mmap_mode is not None): out = _load_output(output_dir, _get_func_fullname(self.func), timestamp=self.timestamp, mmap_mode=self.mmap_mode, verbose=self._verbose) else: try: t0 = time.time() out = _load_output(output_dir, _get_func_fullname(self.func), timestamp=self.timestamp, metadata=metadata, mmap_mode=self.mmap_mode, verbose=self._verbose) if (self._verbose > 4): t = (time.time() - t0) (_, name) = get_func_name(self.func) msg = ('%s cache loaded - %s' % (name, format_time(t))) print ((max(0, (80 - len(msg))) * '_') + msg) except Exception: (_, signature) = format_signature(self.func, *args, **kwargs) self.warn('Exception while loading results for {}\n {}'.format(signature, traceback.format_exc())) (out, metadata) = self.call(*args, **kwargs) argument_hash = None return (out, argument_hash, metadata)
'Call wrapped function, cache result and return a reference. This method returns a reference to the cached result instead of the result itself. The reference object is small and pickeable, allowing to send or store it easily. Call .get() on reference object to get result. Returns cached_result: MemorizedResult or NotMemorizedResult reference to the value returned by the wrapped function. The class "NotMemorizedResult" is used when there is no cache activated (e.g. cachedir=None in Memory).'
def call_and_shelve(self, *args, **kwargs):
(_, argument_hash, metadata) = self._cached_call(args, kwargs) return MemorizedResult(self.cachedir, self.func, argument_hash, metadata=metadata, verbose=(self._verbose - 1), timestamp=self.timestamp)
'We don\'t store the timestamp when pickling, to avoid the hash depending from it. In addition, when unpickling, we run the __init__'
def __reduce__(self):
return (self.__class__, (self.func, self.cachedir, self.ignore, self.mmap_mode, self.compress, self._verbose))
'Return the directory in which are persisted the result of the function called with the given arguments.'
def _get_output_dir(self, *args, **kwargs):
argument_hash = self._get_argument_hash(*args, **kwargs) output_dir = os.path.join(self._get_func_dir(self.func), argument_hash) return (output_dir, argument_hash)
'Get the directory corresponding to the cache for the function.'
def _get_func_dir(self, mkdir=True):
func_dir = _cache_key_to_dir(self.cachedir, self.func, None) if mkdir: mkdirp(func_dir) return func_dir
'Hash a function to key the online cache'
def _hash_func(self):
func_code_h = hash(getattr(self.func, '__code__', None)) return (id(self.func), hash(self.func), func_code_h)