text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def BSR_Row_WriteScalar(A, i, x): """Write a scalar at each nonzero location in row i of BSR matrix A. Parameters A : bsr_matrix Input matrix i : int Row number x : float Scalar to overwrite nonzeros of row i in A Returns ------- A : bsr_matrix All nonzeros in row i of A have been overwritten with x. If x is a vector, the first length(x) nonzeros in row i of A have been overwritten with entries from x Examples -------- """
blocksize = A.blocksize[0] BlockIndx = int(i/blocksize) rowstart = A.indptr[BlockIndx] rowend = A.indptr[BlockIndx+1] localRowIndx = i % blocksize # for j in range(rowstart, rowend): # indys = A.data[j,localRowIndx,:].nonzero()[0] # increment = indys.shape[0] # A.data[j,localRowIndx,indys] = x indys = A.data[rowstart:rowend, localRowIndx, :].nonzero() A.data[rowstart:rowend, localRowIndx, :][indys[0], indys[1]] = x
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def BSR_Row_WriteVect(A, i, x): """Overwrite the nonzeros in row i of BSR matrix A with the vector x. length(x) and nnz(A[i,:]) must be equivalent Parameters A : bsr_matrix Matrix assumed to be in BSR format i : int Row number x : array Array of values to overwrite nonzeros in row i of A Returns ------- A : bsr_matrix The nonzeros in row i of A have been overwritten with entries from x. x must be same length as nonzeros of row i. This is guaranteed when this routine is used with vectors derived form Get_BSR_Row Examples -------- """
blocksize = A.blocksize[0] BlockIndx = int(i/blocksize) rowstart = A.indptr[BlockIndx] rowend = A.indptr[BlockIndx+1] localRowIndx = i % blocksize # like matlab slicing: x = x.__array__().reshape((max(x.shape),)) # counter = 0 # for j in range(rowstart, rowend): # indys = A.data[j,localRowIndx,:].nonzero()[0] # increment = min(indys.shape[0], blocksize) # A.data[j,localRowIndx,indys] = x[counter:(counter+increment), 0] # counter += increment indys = A.data[rowstart:rowend, localRowIndx, :].nonzero() A.data[rowstart:rowend, localRowIndx, :][indys[0], indys[1]] = x
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def direct_interpolation(A, C, splitting): """Create prolongator using direct interpolation. Parameters A : csr_matrix NxN matrix in CSR format C : csr_matrix Strength-of-Connection matrix Must have zero diagonal splitting : array C/F splitting stored in an array of length N Returns ------- P : csr_matrix Prolongator using direct interpolation Examples -------- [[ 1. 0. 0. ] [ 0.5 0.5 0. ] [ 0. 1. 0. ] [ 0. 0.5 0.5] [ 0. 0. 1. ]] """
if not isspmatrix_csr(A): raise TypeError('expected csr_matrix for A') if not isspmatrix_csr(C): raise TypeError('expected csr_matrix for C') # Interpolation weights are computed based on entries in A, but subject to # the sparsity pattern of C. So, copy the entries of A into the # sparsity pattern of C. C = C.copy() C.data[:] = 1.0 C = C.multiply(A) Pp = np.empty_like(A.indptr) amg_core.rs_direct_interpolation_pass1(A.shape[0], C.indptr, C.indices, splitting, Pp) nnz = Pp[-1] Pj = np.empty(nnz, dtype=Pp.dtype) Px = np.empty(nnz, dtype=A.dtype) amg_core.rs_direct_interpolation_pass2(A.shape[0], A.indptr, A.indices, A.data, C.indptr, C.indices, C.data, splitting, Pp, Pj, Px) return csr_matrix((Px, Pj, Pp))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def apply_givens(Q, v, k): """Apply the first k Givens rotations in Q to v. Parameters Q : list list of consecutive 2x2 Givens rotations v : array vector to apply the rotations to k : int number of rotations to apply. Returns ------- v is changed in place Notes ----- This routine is specialized for GMRES. It assumes that the first Givens rotation is for dofs 0 and 1, the second Givens rotation is for dofs 1 and 2, and so on. """
for j in range(k): Qloc = Q[j] v[j:j+2] = np.dot(Qloc, v[j:j+2])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def diffusion_stencil_2d(epsilon=1.0, theta=0.0, type='FE'): """Rotated Anisotropic diffusion in 2d of the form. -div Q A Q^T grad u Q = [cos(theta) -sin(theta)] [sin(theta) cos(theta)] A = [1 0 ] [0 eps ] Parameters epsilon : float, optional Anisotropic diffusion coefficient: -div A grad u, where A = [1 0; 0 epsilon]. The default is isotropic, epsilon=1.0 theta : float, optional Rotation angle `theta` in radians defines -div Q A Q^T grad, where Q = [cos(`theta`) -sin(`theta`); sin(`theta`) cos(`theta`)]. type : {'FE','FD'} Specifies the discretization as Q1 finite element (FE) or 2nd order finite difference (FD) The default is `theta` = 0.0 Returns ------- stencil : numpy array A 3x3 diffusion stencil See Also -------- stencil_grid, poisson Notes ----- Not all combinations are supported. Examples -------- [[-0.2164847 -0.750025 0.2164847] [-0.250075 2.0002 -0.250075 ] [ 0.2164847 -0.750025 -0.2164847]] """
eps = float(epsilon) # for brevity theta = float(theta) C = np.cos(theta) S = np.sin(theta) CS = C*S CC = C**2 SS = S**2 if(type == 'FE'): """FE approximation to:: - (eps c^2 + s^2) u_xx + -2(eps - 1) c s u_xy + - ( c^2 + eps s^2) u_yy [ -c^2*eps-s^2+3*c*s*(eps-1)-c^2-s^2*eps, 2*c^2*eps+2*s^2-4*c^2-4*s^2*eps, -c^2*eps-s^2-3*c*s*(eps-1)-c^2-s^2*eps] [-4*c^2*eps-4*s^2+2*c^2+2*s^2*eps, 8*c^2*eps+8*s^2+8*c^2+8*s^2*eps, -4*c^2*eps-4*s^2+2*c^2+2*s^2*eps] [-c^2*eps-s^2-3*c*s*(eps-1)-c^2-s^2*eps, 2*c^2*eps+2*s^2-4*c^2-4*s^2*eps, -c^2*eps-s^2+3*c*s*(eps-1)-c^2-s^2*eps] c = cos(theta) s = sin(theta) """ a = (-1*eps - 1)*CC + (-1*eps - 1)*SS + (3*eps - 3)*CS b = (2*eps - 4)*CC + (-4*eps + 2)*SS c = (-1*eps - 1)*CC + (-1*eps - 1)*SS + (-3*eps + 3)*CS d = (-4*eps + 2)*CC + (2*eps - 4)*SS e = (8*eps + 8)*CC + (8*eps + 8)*SS stencil = np.array([[a, b, c], [d, e, d], [c, b, a]]) / 6.0 elif type == 'FD': """FD approximation to: - (eps c^2 + s^2) u_xx + -2(eps - 1) c s u_xy + - ( c^2 + eps s^2) u_yy c = cos(theta) s = sin(theta) A = [ 1/2(eps - 1) c s -(c^2 + eps s^2) -1/2(eps - 1) c s ] [ ] [ -(eps c^2 + s^2) 2 (eps + 1) -(eps c^2 + s^2) ] [ ] [ -1/2(eps - 1) c s -(c^2 + eps s^2) 1/2(eps - 1) c s ] """ a = 0.5*(eps - 1)*CS b = -(eps*SS + CC) c = -a d = -(eps*CC + SS) e = 2.0*(eps + 1) stencil = np.array([[a, b, c], [d, e, d], [c, b, a]]) return stencil
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _symbolic_rotation_helper(): """Use SymPy to generate the 3D rotation matrix and products for diffusion_stencil_3d."""
from sympy import symbols, Matrix cpsi, spsi = symbols('cpsi, spsi') cth, sth = symbols('cth, sth') cphi, sphi = symbols('cphi, sphi') Rpsi = Matrix([[cpsi, spsi, 0], [-spsi, cpsi, 0], [0, 0, 1]]) Rth = Matrix([[1, 0, 0], [0, cth, sth], [0, -sth, cth]]) Rphi = Matrix([[cphi, sphi, 0], [-sphi, cphi, 0], [0, 0, 1]]) Q = Rpsi * Rth * Rphi epsy, epsz = symbols('epsy, epsz') A = Matrix([[1, 0, 0], [0, epsy, 0], [0, 0, epsz]]) D = Q * A * Q.T for i in range(3): for j in range(3): print('D[%d, %d] = %s' % (i, j, D[i, j]))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _symbolic_product_helper(): """Use SymPy to generate the 3D products for diffusion_stencil_3d."""
from sympy import symbols, Matrix D11, D12, D13, D21, D22, D23, D31, D32, D33 =\ symbols('D11, D12, D13, D21, D22, D23, D31, D32, D33') D = Matrix([[D11, D12, D13], [D21, D22, D23], [D31, D32, D33]]) grad = Matrix([['dx', 'dy', 'dz']]).T div = grad.T a = div * D * grad print(a[0])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_system(A, x, b, formats=None): """Return A,x,b suitable for relaxation or raise an exception. Parameters A : sparse-matrix n x n system x : array n-vector, initial guess b : array n-vector, right-hand side desired sparse matrix format default is no change to A's format Returns ------- (A,x,b), where A is in the desired sparse-matrix format and x and b are "raveled", i.e. (n,) vectors. Notes ----- Does some rudimentary error checking on the system, such as checking for compatible dimensions and checking for compatible type, i.e. float or complex. Examples -------- (100,) (100,) csc """
if formats is None: pass elif formats == ['csr']: if sparse.isspmatrix_csr(A): pass elif sparse.isspmatrix_bsr(A): A = A.tocsr() else: warn('implicit conversion to CSR', sparse.SparseEfficiencyWarning) A = sparse.csr_matrix(A) else: if sparse.isspmatrix(A) and A.format in formats: pass else: A = sparse.csr_matrix(A).asformat(formats[0]) if not isinstance(x, np.ndarray): raise ValueError('expected numpy array for argument x') if not isinstance(b, np.ndarray): raise ValueError('expected numpy array for argument b') M, N = A.shape if M != N: raise ValueError('expected square matrix') if x.shape not in [(M,), (M, 1)]: raise ValueError('x has invalid dimensions') if b.shape not in [(M,), (M, 1)]: raise ValueError('b has invalid dimensions') if A.dtype != x.dtype or A.dtype != b.dtype: raise TypeError('arguments A, x, and b must have the same dtype') if not x.flags.carray: raise ValueError('x must be contiguous in memory') x = np.ravel(x) b = np.ravel(b) return A, x, b
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sor(A, x, b, omega, iterations=1, sweep='forward'): """Perform SOR iteration on the linear system Ax=b. Parameters A : csr_matrix, bsr_matrix Sparse NxN matrix x : ndarray Approximate solution (length N) b : ndarray Right-hand side (length N) omega : scalar Damping parameter iterations : int Number of iterations to perform sweep : {'forward','backward','symmetric'} Direction of sweep Returns ------- Nothing, x will be modified in place. Notes ----- When omega=1.0, SOR is equivalent to Gauss-Seidel. Examples -------- 3.03888724811 """
A, x, b = make_system(A, x, b, formats=['csr', 'bsr']) x_old = np.empty_like(x) for i in range(iterations): x_old[:] = x gauss_seidel(A, x, b, iterations=1, sweep=sweep) x *= omega x_old *= (1-omega) x += x_old
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def schwarz(A, x, b, iterations=1, subdomain=None, subdomain_ptr=None, inv_subblock=None, inv_subblock_ptr=None, sweep='forward'): """Perform Overlapping multiplicative Schwarz on the linear system Ax=b. Parameters A : csr_matrix, bsr_matrix Sparse NxN matrix x : ndarray Approximate solution (length N) b : ndarray Right-hand side (length N) iterations : int Number of iterations to perform subdomain : int array Linear array containing each subdomain's elements subdomain_ptr : int array Pointer in subdomain, such that subdomain[subdomain_ptr[i]:subdomain_ptr[i+1]]] contains the _sorted_ indices in subdomain i inv_subblock : int_array Linear array containing each subdomain's inverted diagonal block of A inv_subblock_ptr : int array Pointer in inv_subblock, such that inv_subblock[inv_subblock_ptr[i]:inv_subblock_ptr[i+1]]] contains the inverted diagonal block of A for the i-th subdomain in _row_ major order sweep : {'forward','backward','symmetric'} Direction of sweep Returns ------- Nothing, x will be modified in place. Notes ----- If subdomains is None, then a point-wise iteration takes place, with the overlapping region defined by each degree-of-freedom's neighbors in the matrix graph. If subdomains is not None, but subblocks is, then the subblocks are formed internally. Currently only supports CSR matrices Examples -------- 0.126326160522 """
A, x, b = make_system(A, x, b, formats=['csr']) A.sort_indices() if subdomain is None and inv_subblock is not None: raise ValueError("inv_subblock must be None if subdomain is None") # If no subdomains are defined, default is to use the sparsity pattern of A # to define the overlapping regions (subdomain, subdomain_ptr, inv_subblock, inv_subblock_ptr) = \ schwarz_parameters(A, subdomain, subdomain_ptr, inv_subblock, inv_subblock_ptr) if sweep == 'forward': row_start, row_stop, row_step = 0, subdomain_ptr.shape[0]-1, 1 elif sweep == 'backward': row_start, row_stop, row_step = subdomain_ptr.shape[0]-2, -1, -1 elif sweep == 'symmetric': for iter in range(iterations): schwarz(A, x, b, iterations=1, subdomain=subdomain, subdomain_ptr=subdomain_ptr, inv_subblock=inv_subblock, inv_subblock_ptr=inv_subblock_ptr, sweep='forward') schwarz(A, x, b, iterations=1, subdomain=subdomain, subdomain_ptr=subdomain_ptr, inv_subblock=inv_subblock, inv_subblock_ptr=inv_subblock_ptr, sweep='backward') return else: raise ValueError("valid sweep directions are 'forward',\ 'backward', and 'symmetric'") # Call C code, need to make sure that subdomains are sorted and unique for iter in range(iterations): amg_core.overlapping_schwarz_csr(A.indptr, A.indices, A.data, x, b, inv_subblock, inv_subblock_ptr, subdomain, subdomain_ptr, subdomain_ptr.shape[0]-1, A.shape[0], row_start, row_stop, row_step)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gauss_seidel(A, x, b, iterations=1, sweep='forward'): """Perform Gauss-Seidel iteration on the linear system Ax=b. Parameters A : csr_matrix, bsr_matrix Sparse NxN matrix x : ndarray Approximate solution (length N) b : ndarray Right-hand side (length N) iterations : int Number of iterations to perform sweep : {'forward','backward','symmetric'} Direction of sweep Returns ------- Nothing, x will be modified in place. Examples -------- 4.00733716236 """
A, x, b = make_system(A, x, b, formats=['csr', 'bsr']) if sparse.isspmatrix_csr(A): blocksize = 1 else: R, C = A.blocksize if R != C: raise ValueError('BSR blocks must be square') blocksize = R if sweep == 'forward': row_start, row_stop, row_step = 0, int(len(x)/blocksize), 1 elif sweep == 'backward': row_start, row_stop, row_step = int(len(x)/blocksize)-1, -1, -1 elif sweep == 'symmetric': for iter in range(iterations): gauss_seidel(A, x, b, iterations=1, sweep='forward') gauss_seidel(A, x, b, iterations=1, sweep='backward') return else: raise ValueError("valid sweep directions are 'forward',\ 'backward', and 'symmetric'") if sparse.isspmatrix_csr(A): for iter in range(iterations): amg_core.gauss_seidel(A.indptr, A.indices, A.data, x, b, row_start, row_stop, row_step) else: for iter in range(iterations): amg_core.bsr_gauss_seidel(A.indptr, A.indices, np.ravel(A.data), x, b, row_start, row_stop, row_step, R)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def jacobi(A, x, b, iterations=1, omega=1.0): """Perform Jacobi iteration on the linear system Ax=b. Parameters A : csr_matrix Sparse NxN matrix x : ndarray Approximate solution (length N) b : ndarray Right-hand side (length N) iterations : int Number of iterations to perform omega : scalar Damping parameter Returns ------- Nothing, x will be modified in place. Examples -------- 5.83475132751 """
A, x, b = make_system(A, x, b, formats=['csr', 'bsr']) sweep = slice(None) (row_start, row_stop, row_step) = sweep.indices(A.shape[0]) if (row_stop - row_start) * row_step <= 0: # no work to do return temp = np.empty_like(x) # Create uniform type, convert possibly complex scalars to length 1 arrays [omega] = type_prep(A.dtype, [omega]) if sparse.isspmatrix_csr(A): for iter in range(iterations): amg_core.jacobi(A.indptr, A.indices, A.data, x, b, temp, row_start, row_stop, row_step, omega) else: R, C = A.blocksize if R != C: raise ValueError('BSR blocks must be square') row_start = int(row_start / R) row_stop = int(row_stop / R) for iter in range(iterations): amg_core.bsr_jacobi(A.indptr, A.indices, np.ravel(A.data), x, b, temp, row_start, row_stop, row_step, R, omega)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def block_jacobi(A, x, b, Dinv=None, blocksize=1, iterations=1, omega=1.0): """Perform block Jacobi iteration on the linear system Ax=b. Parameters A : csr_matrix or bsr_matrix Sparse NxN matrix x : ndarray Approximate solution (length N) b : ndarray Right-hand side (length N) Dinv : array Array holding block diagonal inverses of A size (N/blocksize, blocksize, blocksize) blocksize : int Desired dimension of blocks iterations : int Number of iterations to perform omega : scalar Damping parameter Returns ------- Nothing, x will be modified in place. Examples -------- 4.66474230129 """
A, x, b = make_system(A, x, b, formats=['csr', 'bsr']) A = A.tobsr(blocksize=(blocksize, blocksize)) if Dinv is None: Dinv = get_block_diag(A, blocksize=blocksize, inv_flag=True) elif Dinv.shape[0] != int(A.shape[0]/blocksize): raise ValueError('Dinv and A have incompatible dimensions') elif (Dinv.shape[1] != blocksize) or (Dinv.shape[2] != blocksize): raise ValueError('Dinv and blocksize are incompatible') sweep = slice(None) (row_start, row_stop, row_step) = sweep.indices(int(A.shape[0]/blocksize)) if (row_stop - row_start) * row_step <= 0: # no work to do return temp = np.empty_like(x) # Create uniform type, convert possibly complex scalars to length 1 arrays [omega] = type_prep(A.dtype, [omega]) for iter in range(iterations): amg_core.block_jacobi(A.indptr, A.indices, np.ravel(A.data), x, b, np.ravel(Dinv), temp, row_start, row_stop, row_step, omega, blocksize)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def block_gauss_seidel(A, x, b, iterations=1, sweep='forward', blocksize=1, Dinv=None): """Perform block Gauss-Seidel iteration on the linear system Ax=b. Parameters A : csr_matrix, bsr_matrix Sparse NxN matrix x : ndarray Approximate solution (length N) b : ndarray Right-hand side (length N) iterations : int Number of iterations to perform sweep : {'forward','backward','symmetric'} Direction of sweep Dinv : array Array holding block diagonal inverses of A size (N/blocksize, blocksize, blocksize) blocksize : int Desired dimension of blocks Returns ------- Nothing, x will be modified in place. Examples -------- sweep='symmetric') 0.958333817624 """
A, x, b = make_system(A, x, b, formats=['csr', 'bsr']) A = A.tobsr(blocksize=(blocksize, blocksize)) if Dinv is None: Dinv = get_block_diag(A, blocksize=blocksize, inv_flag=True) elif Dinv.shape[0] != int(A.shape[0]/blocksize): raise ValueError('Dinv and A have incompatible dimensions') elif (Dinv.shape[1] != blocksize) or (Dinv.shape[2] != blocksize): raise ValueError('Dinv and blocksize are incompatible') if sweep == 'forward': row_start, row_stop, row_step = 0, int(len(x)/blocksize), 1 elif sweep == 'backward': row_start, row_stop, row_step = int(len(x)/blocksize)-1, -1, -1 elif sweep == 'symmetric': for iter in range(iterations): block_gauss_seidel(A, x, b, iterations=1, sweep='forward', blocksize=blocksize, Dinv=Dinv) block_gauss_seidel(A, x, b, iterations=1, sweep='backward', blocksize=blocksize, Dinv=Dinv) return else: raise ValueError("valid sweep directions are 'forward',\ 'backward', and 'symmetric'") for iter in range(iterations): amg_core.block_gauss_seidel(A.indptr, A.indices, np.ravel(A.data), x, b, np.ravel(Dinv), row_start, row_stop, row_step, blocksize)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def polynomial(A, x, b, coefficients, iterations=1): """Apply a polynomial smoother to the system Ax=b. Parameters A : sparse matrix Sparse NxN matrix x : ndarray Approximate solution (length N) b : ndarray Right-hand side (length N) coefficients : array_like Coefficients of the polynomial. See Notes section for details. iterations : int Number of iterations to perform Returns ------- Nothing, x will be modified in place. Notes ----- The smoother has the form x[:] = x + p(A) (b - A*x) where p(A) is a polynomial in A whose scalar coefficients are specified (in descending order) by argument 'coefficients'. - Richardson iteration p(A) = c_0: polynomial_smoother(A, x, b, [c_0]) - Linear smoother p(A) = c_1*A + c_0: polynomial_smoother(A, x, b, [c_1, c_0]) - Quadratic smoother p(A) = c_2*A^2 + c_1*A + c_0: polynomial_smoother(A, x, b, [c_2, c_1, c_0]) Here, Horner's Rule is applied to avoid computing A^k directly. For efficience, the method detects the case x = 0 one matrix-vector product is avoided (since (b - A*x) is b). Examples -------- """
A, x, b = make_system(A, x, b, formats=None) for i in range(iterations): from pyamg.util.linalg import norm if norm(x) == 0: residual = b else: residual = (b - A*x) h = coefficients[0]*residual for c in coefficients[1:]: h = c*residual + A*h x += h
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gauss_seidel_indexed(A, x, b, indices, iterations=1, sweep='forward'): """Perform indexed Gauss-Seidel iteration on the linear system Ax=b. In indexed Gauss-Seidel, the sequence in which unknowns are relaxed is specified explicitly. In contrast, the standard Gauss-Seidel method always performs complete sweeps of all variables in increasing or decreasing order. The indexed method may be used to implement specialized smoothers, like F-smoothing in Classical AMG. Parameters A : csr_matrix Sparse NxN matrix x : ndarray Approximate solution (length N) b : ndarray Right-hand side (length N) indices : ndarray Row indices to relax. iterations : int Number of iterations to perform sweep : {'forward','backward','symmetric'} Direction of sweep Returns ------- Nothing, x will be modified in place. Examples -------- """
A, x, b = make_system(A, x, b, formats=['csr']) indices = np.asarray(indices, dtype='intc') # if indices.min() < 0: # raise ValueError('row index (%d) is invalid' % indices.min()) # if indices.max() >= A.shape[0] # raise ValueError('row index (%d) is invalid' % indices.max()) if sweep == 'forward': row_start, row_stop, row_step = 0, len(indices), 1 elif sweep == 'backward': row_start, row_stop, row_step = len(indices)-1, -1, -1 elif sweep == 'symmetric': for iter in range(iterations): gauss_seidel_indexed(A, x, b, indices, iterations=1, sweep='forward') gauss_seidel_indexed(A, x, b, indices, iterations=1, sweep='backward') return else: raise ValueError("valid sweep directions are 'forward',\ 'backward', and 'symmetric'") for iter in range(iterations): amg_core.gauss_seidel_indexed(A.indptr, A.indices, A.data, x, b, indices, row_start, row_stop, row_step)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def jacobi_ne(A, x, b, iterations=1, omega=1.0): """Perform Jacobi iterations on the linear system A A.H x = A.H b. Also known as Cimmino relaxation Parameters A : csr_matrix Sparse NxN matrix x : ndarray Approximate solution (length N) b : ndarray Right-hand side (length N) iterations : int Number of iterations to perform omega : scalar Damping parameter Returns ------- Nothing, x will be modified in place. References .. [1] Brandt, Ta'asan. "Multigrid Method For Nearly Singular And Slightly Indefinite Problems." 1985. NASA Technical Report Numbers: ICASE-85-57; NAS 1.26:178026; NASA-CR-178026; .. [2] Kaczmarz. Angenaeherte Aufloesung von Systemen Linearer Gleichungen. Bull. Acad. Polon. Sci. Lett. A 35, 355-57. 1937 .. [3] Cimmino. La ricerca scientifica ser. II 1. Pubbliz. dell'Inst. pre le Appl. del Calculo 34, 326-333, 1938. Examples -------- 49.3886046066 """
A, x, b = make_system(A, x, b, formats=['csr']) sweep = slice(None) (row_start, row_stop, row_step) = sweep.indices(A.shape[0]) temp = np.zeros_like(x) # Dinv for A*A.H Dinv = get_diagonal(A, norm_eq=2, inv=True) # Create uniform type, convert possibly complex scalars to length 1 arrays [omega] = type_prep(A.dtype, [omega]) for i in range(iterations): delta = (np.ravel(b - A*x)*np.ravel(Dinv)).astype(A.dtype) amg_core.jacobi_ne(A.indptr, A.indices, A.data, x, b, delta, temp, row_start, row_stop, row_step, omega)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gauss_seidel_ne(A, x, b, iterations=1, sweep='forward', omega=1.0, Dinv=None): """Perform Gauss-Seidel iterations on the linear system A A.H x = b. Also known as Kaczmarz relaxation Parameters A : csr_matrix Sparse NxN matrix x : ndarray Approximate solution (length N) b : ndarray Right-hand side (length N) iterations : int Number of iterations to perform sweep : {'forward','backward','symmetric'} Direction of sweep omega : float Relaxation parameter typically in (0, 2) if omega != 1.0, then algorithm becomes SOR on A A.H Dinv : ndarray Inverse of diag(A A.H), (length N) Returns ------- Nothing, x will be modified in place. References .. [1] Brandt, Ta'asan. "Multigrid Method For Nearly Singular And Slightly Indefinite Problems." 1985. NASA Technical Report Numbers: ICASE-85-57; NAS 1.26:178026; NASA-CR-178026; .. [2] Kaczmarz. Angenaeherte Aufloesung von Systemen Linearer Gleichungen. Bull. Acad. Polon. Sci. Lett. A 35, 355-57. 1937 Examples -------- 8.47576806771 """
A, x, b = make_system(A, x, b, formats=['csr']) # Dinv for A*A.H if Dinv is None: Dinv = np.ravel(get_diagonal(A, norm_eq=2, inv=True)) if sweep == 'forward': row_start, row_stop, row_step = 0, len(x), 1 elif sweep == 'backward': row_start, row_stop, row_step = len(x)-1, -1, -1 elif sweep == 'symmetric': for iter in range(iterations): gauss_seidel_ne(A, x, b, iterations=1, sweep='forward', omega=omega, Dinv=Dinv) gauss_seidel_ne(A, x, b, iterations=1, sweep='backward', omega=omega, Dinv=Dinv) return else: raise ValueError("valid sweep directions are 'forward',\ 'backward', and 'symmetric'") for i in range(iterations): amg_core.gauss_seidel_ne(A.indptr, A.indices, A.data, x, b, row_start, row_stop, row_step, Dinv, omega)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gauss_seidel_nr(A, x, b, iterations=1, sweep='forward', omega=1.0, Dinv=None): """Perform Gauss-Seidel iterations on the linear system A.H A x = A.H b. Parameters A : csr_matrix Sparse NxN matrix x : ndarray Approximate solution (length N) b : ndarray Right-hand side (length N) iterations : int Number of iterations to perform sweep : {'forward','backward','symmetric'} Direction of sweep omega : float Relaxation parameter typically in (0, 2) if omega != 1.0, then algorithm becomes SOR on A.H A Dinv : ndarray Inverse of diag(A.H A), (length N) Returns ------- Nothing, x will be modified in place. References .. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems, Second Edition", SIAM, pp. 247-9, 2003 http://www-users.cs.umn.edu/~saad/books.html Examples -------- 8.45044864352 """
A, x, b = make_system(A, x, b, formats=['csc']) # Dinv for A.H*A if Dinv is None: Dinv = np.ravel(get_diagonal(A, norm_eq=1, inv=True)) if sweep == 'forward': col_start, col_stop, col_step = 0, len(x), 1 elif sweep == 'backward': col_start, col_stop, col_step = len(x)-1, -1, -1 elif sweep == 'symmetric': for iter in range(iterations): gauss_seidel_nr(A, x, b, iterations=1, sweep='forward', omega=omega, Dinv=Dinv) gauss_seidel_nr(A, x, b, iterations=1, sweep='backward', omega=omega, Dinv=Dinv) return else: raise ValueError("valid sweep directions are 'forward',\ 'backward', and 'symmetric'") # Calculate initial residual r = b - A*x for i in range(iterations): amg_core.gauss_seidel_nr(A.indptr, A.indices, A.data, x, r, col_start, col_stop, col_step, Dinv, omega)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def schwarz_parameters(A, subdomain=None, subdomain_ptr=None, inv_subblock=None, inv_subblock_ptr=None): """Set Schwarz parameters. Helper function for setting up Schwarz relaxation. This function avoids recomputing the subdomains and block inverses manytimes, e.g., it avoids a costly double computation when setting up pre and post smoothing with Schwarz. Parameters A {csr_matrix} Returns ------- A.schwarz_parameters[0] is subdomain A.schwarz_parameters[1] is subdomain_ptr A.schwarz_parameters[2] is inv_subblock A.schwarz_parameters[3] is inv_subblock_ptr """
# Check if A has a pre-existing set of Schwarz parameters if hasattr(A, 'schwarz_parameters'): if subdomain is not None and subdomain_ptr is not None: # check that the existing parameters correspond to the same # subdomains if np.array(A.schwarz_parameters[0] == subdomain).all() and \ np.array(A.schwarz_parameters[1] == subdomain_ptr).all(): return A.schwarz_parameters else: return A.schwarz_parameters # Default is to use the overlapping regions defined by A's sparsity pattern if subdomain is None or subdomain_ptr is None: subdomain_ptr = A.indptr.copy() subdomain = A.indices.copy() # Extract each subdomain's block from the matrix if inv_subblock is None or inv_subblock_ptr is None: inv_subblock_ptr = np.zeros(subdomain_ptr.shape, dtype=A.indices.dtype) blocksize = (subdomain_ptr[1:] - subdomain_ptr[:-1]) inv_subblock_ptr[1:] = np.cumsum(blocksize*blocksize) # Extract each block column from A inv_subblock = np.zeros((inv_subblock_ptr[-1],), dtype=A.dtype) amg_core.extract_subblocks(A.indptr, A.indices, A.data, inv_subblock, inv_subblock_ptr, subdomain, subdomain_ptr, int(subdomain_ptr.shape[0]-1), A.shape[0]) # Choose tolerance for which singular values are zero in *gelss below t = A.dtype.char eps = np.finfo(np.float).eps feps = np.finfo(np.single).eps geps = np.finfo(np.longfloat).eps _array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2} cond = {0: feps*1e3, 1: eps*1e6, 2: geps*1e6}[_array_precision[t]] # Invert each block column my_pinv, = la.get_lapack_funcs(['gelss'], (np.ones((1,), dtype=A.dtype))) for i in range(subdomain_ptr.shape[0]-1): m = blocksize[i] rhs = sp.eye(m, m, dtype=A.dtype) j0 = inv_subblock_ptr[i] j1 = inv_subblock_ptr[i+1] gelssoutput = my_pinv(inv_subblock[j0:j1].reshape(m, m), rhs, cond=cond, overwrite_a=True, overwrite_b=True) inv_subblock[j0:j1] = np.ravel(gelssoutput[1]) A.schwarz_parameters = (subdomain, subdomain_ptr, inv_subblock, inv_subblock_ptr) return A.schwarz_parameters
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cg(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None, callback=None, residuals=None): """Conjugate Gradient algorithm. Solves the linear system Ax = b. Left preconditioning is supported. Parameters A : array, matrix, sparse matrix, LinearOperator n x n, linear system to solve b : array, matrix right hand side, shape is (n,) or (n,1) x0 : array, matrix initial guess, default is a vector of zeros tol : float relative convergence tolerance, i.e. tol is scaled by the preconditioner norm of r_0, or ||r_0||_M. maxiter : int maximum number of allowed iterations xtype : type dtype for the solution, default is automatic type detection M : array, matrix, sparse matrix, LinearOperator n x n, inverted preconditioner, i.e. solve M A x = M b. callback : function User-supplied function is called after each iteration as callback(xk), where xk is the current solution vector residuals : list residuals contains the residual norm history, including the initial residual. The preconditioner norm is used, instead of the Euclidean norm. Returns ------- (xNew, info) xNew : an updated guess to the solution of Ax = b info : halting status of cg == ======================================= 0 successful exit >0 convergence to tolerance not achieved, return iteration count instead. <0 numerical breakdown, or illegal input == ======================================= Notes ----- The LinearOperator class is in scipy.sparse.linalg.interface. Use this class if you prefer to define A or M as a mat-vec routine as opposed to explicitly constructing the matrix. A.psolve(..) is still supported as a legacy. The residual in the preconditioner norm is both used for halting and returned in the residuals list. Examples -------- 10.9370700187 References .. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems, Second Edition", SIAM, pp. 262-67, 2003 http://www-users.cs.umn.edu/~saad/books.html """
A, M, x, b, postprocess = make_system(A, M, x0, b) # Ensure that warnings are always reissued from this function import warnings warnings.filterwarnings('always', module='pyamg\.krylov\._cg') # determine maxiter if maxiter is None: maxiter = int(1.3*len(b)) + 2 elif maxiter < 1: raise ValueError('Number of iterations must be positive') # choose tolerance for numerically zero values # t = A.dtype.char # eps = np.finfo(np.float).eps # feps = np.finfo(np.single).eps # geps = np.finfo(np.longfloat).eps # _array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2} # numerically_zero = {0: feps*1e3, 1: eps*1e6, # 2: geps*1e6}[_array_precision[t]] # setup method r = b - A*x z = M*r p = z.copy() rz = np.inner(r.conjugate(), z) # use preconditioner norm normr = np.sqrt(rz) if residuals is not None: residuals[:] = [normr] # initial residual # Check initial guess ( scaling by b, if b != 0, # must account for case when norm(b) is very small) normb = norm(b) if normb == 0.0: normb = 1.0 if normr < tol*normb: return (postprocess(x), 0) # Scale tol by ||r_0||_M if normr != 0.0: tol = tol*normr # How often should r be recomputed recompute_r = 8 iter = 0 while True: Ap = A*p rz_old = rz # Step number in Saad's pseudocode pAp = np.inner(Ap.conjugate(), p) # check curvature of A if pAp < 0.0: warn("\nIndefinite matrix detected in CG, aborting\n") return (postprocess(x), -1) alpha = rz/pAp # 3 x += alpha * p # 4 if np.mod(iter, recompute_r) and iter > 0: # 5 r -= alpha * Ap else: r = b - A*x z = M*r # 6 rz = np.inner(r.conjugate(), z) if rz < 0.0: # check curvature of M warn("\nIndefinite preconditioner detected in CG, aborting\n") return (postprocess(x), -1) beta = rz/rz_old # 7 p *= beta # 8 p += z iter += 1 normr = np.sqrt(rz) # use preconditioner norm if residuals is not None: residuals.append(normr) if callback is not None: callback(x) if normr < tol: return (postprocess(x), 0) elif rz == 0.0: # important to test after testing normr < tol. rz == 0.0 is an # indicator of convergence when r = 0.0 warn("\nSingular preconditioner detected in CG, ceasing \ iterations\n") return (postprocess(x), -1) if iter == maxiter: return (postprocess(x), iter)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bicgstab(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None, callback=None, residuals=None): """Biconjugate Gradient Algorithm with Stabilization. Solves the linear system Ax = b. Left preconditioning is supported. Parameters A : array, matrix, sparse matrix, LinearOperator n x n, linear system to solve b : array, matrix right hand side, shape is (n,) or (n,1) x0 : array, matrix initial guess, default is a vector of zeros tol : float relative convergence tolerance, i.e. tol is scaled by ||r_0||_2 maxiter : int maximum number of allowed iterations xtype : type dtype for the solution, default is automatic type detection M : array, matrix, sparse matrix, LinearOperator n x n, inverted preconditioner, i.e. solve M A A.H x = M b. callback : function User-supplied function is called after each iteration as callback(xk), where xk is the current solution vector residuals : list residuals has the residual norm history, including the initial residual, appended to it Returns ------- (xNew, info) xNew : an updated guess to the solution of Ax = b info : halting status of bicgstab == ====================================== 0 successful exit >0 convergence to tolerance not achieved, return iteration count instead. <0 numerical breakdown, or illegal input == ====================================== Notes ----- The LinearOperator class is in scipy.sparse.linalg.interface. Use this class if you prefer to define A or M as a mat-vec routine as opposed to explicitly constructing the matrix. A.psolve(..) is still supported as a legacy. Examples -------- 4.68163045309 References .. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems, Second Edition", SIAM, pp. 231-234, 2003 http://www-users.cs.umn.edu/~saad/books.html """
# Convert inputs to linear system, with error checking A, M, x, b, postprocess = make_system(A, M, x0, b) # Ensure that warnings are always reissued from this function import warnings warnings.filterwarnings('always', module='pyamg\.krylov\._bicgstab') # Check iteration numbers if maxiter is None: maxiter = len(x) + 5 elif maxiter < 1: raise ValueError('Number of iterations must be positive') # Prep for method r = b - A*x normr = norm(r) if residuals is not None: residuals[:] = [normr] # Check initial guess ( scaling by b, if b != 0, # must account for case when norm(b) is very small) normb = norm(b) if normb == 0.0: normb = 1.0 if normr < tol*normb: return (postprocess(x), 0) # Scale tol by ||r_0||_2 if normr != 0.0: tol = tol*normr # Is this a one dimensional matrix? if A.shape[0] == 1: entry = np.ravel(A*np.array([1.0], dtype=xtype)) return (postprocess(b/entry), 0) rstar = r.copy() p = r.copy() rrstarOld = np.inner(rstar.conjugate(), r) iter = 0 # Begin BiCGStab while True: Mp = M*p AMp = A*Mp # alpha = (r_j, rstar) / (A*M*p_j, rstar) alpha = rrstarOld/np.inner(rstar.conjugate(), AMp) # s_j = r_j - alpha*A*M*p_j s = r - alpha*AMp Ms = M*s AMs = A*Ms # omega = (A*M*s_j, s_j)/(A*M*s_j, A*M*s_j) omega = np.inner(AMs.conjugate(), s)/np.inner(AMs.conjugate(), AMs) # x_{j+1} = x_j + alpha*M*p_j + omega*M*s_j x = x + alpha*Mp + omega*Ms # r_{j+1} = s_j - omega*A*M*s r = s - omega*AMs # beta_j = (r_{j+1}, rstar)/(r_j, rstar) * (alpha/omega) rrstarNew = np.inner(rstar.conjugate(), r) beta = (rrstarNew / rrstarOld) * (alpha / omega) rrstarOld = rrstarNew # p_{j+1} = r_{j+1} + beta*(p_j - omega*A*M*p) p = r + beta*(p - omega*AMp) iter += 1 normr = norm(r) if residuals is not None: residuals.append(normr) if callback is not None: callback(x) if normr < tol: return (postprocess(x), 0) if iter == maxiter: return (postprocess(x), iter)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def operator_complexity(self): """Operator complexity of this multigrid hierarchy. Defined as: Number of nonzeros in the matrix on all levels / Number of nonzeros in the matrix on the finest level """
return sum([level.A.nnz for level in self.levels]) /\ float(self.levels[0].A.nnz)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def grid_complexity(self): """Grid complexity of this multigrid hierarchy. Defined as: Number of unknowns on all levels / Number of unknowns on the finest level """
return sum([level.A.shape[0] for level in self.levels]) /\ float(self.levels[0].A.shape[0])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def aspreconditioner(self, cycle='V'): """Create a preconditioner using this multigrid cycle. Parameters cycle : {'V','W','F','AMLI'} Type of multigrid cycle to perform in each iteration. Returns ------- precond : LinearOperator Preconditioner suitable for the iterative solvers in defined in the scipy.sparse.linalg module (e.g. cg, gmres) and any other solver that uses the LinearOperator interface. Refer to the LinearOperator documentation in scipy.sparse.linalg See Also -------- multilevel_solver.solve, scipy.sparse.linalg.LinearOperator Examples -------- """
from scipy.sparse.linalg import LinearOperator shape = self.levels[0].A.shape dtype = self.levels[0].A.dtype def matvec(b): return self.solve(b, maxiter=1, cycle=cycle, tol=1e-12) return LinearOperator(shape, matvec, dtype=dtype)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __solve(self, lvl, x, b, cycle): """Multigrid cycling. Parameters lvl : int Solve problem on level `lvl` x : numpy array Initial guess `x` and return correction b : numpy array Right-hand side for Ax=b cycle : {'V','W','F','AMLI'} Recursively called cycling function. The Defines the cycling used: cycle = 'V', V-cycle cycle = 'W', W-cycle cycle = 'F', F-cycle cycle = 'AMLI', AMLI-cycle """
A = self.levels[lvl].A self.levels[lvl].presmoother(A, x, b) residual = b - A * x coarse_b = self.levels[lvl].R * residual coarse_x = np.zeros_like(coarse_b) if lvl == len(self.levels) - 2: coarse_x[:] = self.coarse_solver(self.levels[-1].A, coarse_b) else: if cycle == 'V': self.__solve(lvl + 1, coarse_x, coarse_b, 'V') elif cycle == 'W': self.__solve(lvl + 1, coarse_x, coarse_b, cycle) self.__solve(lvl + 1, coarse_x, coarse_b, cycle) elif cycle == 'F': self.__solve(lvl + 1, coarse_x, coarse_b, cycle) self.__solve(lvl + 1, coarse_x, coarse_b, 'V') elif cycle == "AMLI": # Run nAMLI AMLI cycles, which compute "optimal" corrections by # orthogonalizing the coarse-grid corrections in the A-norm nAMLI = 2 Ac = self.levels[lvl + 1].A p = np.zeros((nAMLI, coarse_b.shape[0]), dtype=coarse_b.dtype) beta = np.zeros((nAMLI, nAMLI), dtype=coarse_b.dtype) for k in range(nAMLI): # New search direction --> M^{-1}*residual p[k, :] = 1 self.__solve(lvl + 1, p[k, :].reshape(coarse_b.shape), coarse_b, cycle) # Orthogonalize new search direction to old directions for j in range(k): # loops from j = 0...(k-1) beta[k, j] = np.inner(p[j, :].conj(), Ac * p[k, :]) /\ np.inner(p[j, :].conj(), Ac * p[j, :]) p[k, :] -= beta[k, j] * p[j, :] # Compute step size Ap = Ac * p[k, :] alpha = np.inner(p[k, :].conj(), np.ravel(coarse_b)) /\ np.inner(p[k, :].conj(), Ap) # Update solution coarse_x += alpha * p[k, :].reshape(coarse_x.shape) # Update residual coarse_b -= alpha * Ap.reshape(coarse_b.shape) else: raise TypeError('Unrecognized cycle type (%s)' % cycle) x += self.levels[lvl].P * coarse_x # coarse grid correction self.levels[lvl].postsmoother(A, x, b)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def maximal_independent_set(G, algo='serial', k=None): """Compute a maximal independent vertex set for a graph. Parameters G : sparse matrix Symmetric matrix, preferably in sparse CSR or CSC format The nonzeros of G represent the edges of an undirected graph. algo : {'serial', 'parallel'} Algorithm used to compute the MIS * serial : greedy serial algorithm * parallel : variant of Luby's parallel MIS algorithm Returns ------- S : array S[i] = 1 if vertex i is in the MIS S[i] = 0 otherwise Notes ----- Diagonal entries in the G (self loops) will be ignored. Luby's algorithm is significantly more expensive than the greedy serial algorithm. """
G = asgraph(G) N = G.shape[0] mis = np.empty(N, dtype='intc') mis[:] = -1 if k is None: if algo == 'serial': fn = amg_core.maximal_independent_set_serial fn(N, G.indptr, G.indices, -1, 1, 0, mis) elif algo == 'parallel': fn = amg_core.maximal_independent_set_parallel fn(N, G.indptr, G.indices, -1, 1, 0, mis, sp.rand(N), -1) else: raise ValueError('unknown algorithm (%s)' % algo) else: fn = amg_core.maximal_independent_set_k_parallel fn(N, G.indptr, G.indices, k, mis, sp.rand(N), -1) return mis
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def vertex_coloring(G, method='MIS'): """Compute a vertex coloring of a graph. Parameters G : sparse matrix Symmetric matrix, preferably in sparse CSR or CSC format The nonzeros of G represent the edges of an undirected graph. method : string Algorithm used to compute the vertex coloring: * 'MIS' - Maximal Independent Set * 'JP' - Jones-Plassmann (parallel) * 'LDF' - Largest-Degree-First (parallel) Returns ------- coloring : array An array of vertex colors (integers beginning at 0) Notes ----- Diagonal entries in the G (self loops) will be ignored. """
G = asgraph(G) N = G.shape[0] coloring = np.empty(N, dtype='intc') if method == 'MIS': fn = amg_core.vertex_coloring_mis fn(N, G.indptr, G.indices, coloring) elif method == 'JP': fn = amg_core.vertex_coloring_jones_plassmann fn(N, G.indptr, G.indices, coloring, sp.rand(N)) elif method == 'LDF': fn = amg_core.vertex_coloring_LDF fn(N, G.indptr, G.indices, coloring, sp.rand(N)) else: raise ValueError('unknown method (%s)' % method) return coloring
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bellman_ford(G, seeds, maxiter=None): """Bellman-Ford iteration. Parameters G : sparse matrix Returns ------- distances : array nearest_seed : array References CLR """
G = asgraph(G) N = G.shape[0] if maxiter is not None and maxiter < 0: raise ValueError('maxiter must be positive') if G.dtype == complex: raise ValueError('Bellman-Ford algorithm only defined for real\ weights') seeds = np.asarray(seeds, dtype='intc') distances = np.empty(N, dtype=G.dtype) distances[:] = max_value(G.dtype) distances[seeds] = 0 nearest_seed = np.empty(N, dtype='intc') nearest_seed[:] = -1 nearest_seed[seeds] = seeds old_distances = np.empty_like(distances) iter = 0 while maxiter is None or iter < maxiter: old_distances[:] = distances amg_core.bellman_ford(N, G.indptr, G.indices, G.data, distances, nearest_seed) if (old_distances == distances).all(): break return (distances, nearest_seed)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lloyd_cluster(G, seeds, maxiter=10): """Perform Lloyd clustering on graph with weighted edges. Parameters G : csr_matrix, csc_matrix A sparse NxN matrix where each nonzero entry G[i,j] is the distance between nodes i and j. seeds : int array If seeds is an integer, then its value determines the number of clusters. Otherwise, seeds is an array of unique integers between 0 and N-1 that will be used as the initial seeds for clustering. maxiter : int The maximum number of iterations to perform. Returns ------- distances : array final distances clusters : int array id of each cluster of points seeds : int array index of each seed Notes ----- If G has complex values, abs(G) is used instead. """
G = asgraph(G) N = G.shape[0] if G.dtype.kind == 'c': # complex dtype G = np.abs(G) # interpret seeds argument if np.isscalar(seeds): seeds = np.random.permutation(N)[:seeds] seeds = seeds.astype('intc') else: seeds = np.array(seeds, dtype='intc') if len(seeds) < 1: raise ValueError('at least one seed is required') if seeds.min() < 0: raise ValueError('invalid seed index (%d)' % seeds.min()) if seeds.max() >= N: raise ValueError('invalid seed index (%d)' % seeds.max()) clusters = np.empty(N, dtype='intc') distances = np.empty(N, dtype=G.dtype) for i in range(maxiter): last_seeds = seeds.copy() amg_core.lloyd_cluster(N, G.indptr, G.indices, G.data, len(seeds), distances, clusters, seeds) if (seeds == last_seeds).all(): break return (distances, clusters, seeds)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def breadth_first_search(G, seed): """Breadth First search of a graph. Parameters G : csr_matrix, csc_matrix A sparse NxN matrix where each nonzero entry G[i,j] is the distance between nodes i and j. seed : int Index of the seed location Returns ------- order : int array Breadth first order level : int array Final levels Examples -------- 0---2 | / | / 1---4---7---8---9 | /| / | / | / 3/ 6/ | | 5 [4,6], [4,7], [6,7], [7,8], [8,9]]) [0 1 1 2 2 3 3 3 4 5] [0 1 2 3 4 5 6 7 8 9] """
G = asgraph(G) N = G.shape[0] order = np.empty(N, G.indptr.dtype) level = np.empty(N, G.indptr.dtype) level[:] = -1 BFS = amg_core.breadth_first_search BFS(G.indptr, G.indices, int(seed), order, level) return order, level
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def connected_components(G): """Compute the connected components of a graph. The connected components of a graph G, which is represented by a symmetric sparse matrix, are labeled with the integers 0,1,..(K-1) where K is the number of components. Parameters G : symmetric matrix, preferably in sparse CSR or CSC format The nonzeros of G represent the edges of an undirected graph. Returns ------- components : ndarray An array of component labels for each vertex of the graph. Notes ----- If the nonzero structure of G is not symmetric, then the result is undefined. Examples -------- [0 0 0] [0 0 1] [0 1 2] [0 0 1 1] """
G = asgraph(G) N = G.shape[0] components = np.empty(N, G.indptr.dtype) fn = amg_core.connected_components fn(N, G.indptr, G.indices, components) return components
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def symmetric_rcm(A): """Symmetric Reverse Cutthill-McKee. Parameters A : sparse matrix Sparse matrix Returns ------- B : sparse matrix Permuted matrix with reordering Notes ----- Get a pseudo-peripheral node, then call BFS Examples -------- See Also -------- pseudo_peripheral_node """
n = A.shape[0] root, order, level = pseudo_peripheral_node(A) Perm = sparse.identity(n, format='csr') p = level.argsort() Perm = Perm[p, :] return Perm * A * Perm.T
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pseudo_peripheral_node(A): """Find a pseudo peripheral node. Parameters A : sparse matrix Sparse matrix Returns ------- x : int Locaiton of the node order : array BFS ordering level : array BFS levels Notes ----- Algorithm in Saad """
from pyamg.graph import breadth_first_search n = A.shape[0] valence = np.diff(A.indptr) # select an initial node x, set delta = 0 x = int(np.random.rand() * n) delta = 0 while True: # do a level-set traversal from x order, level = breadth_first_search(A, x) # select a node y in the last level with min degree maxlevel = level.max() lastnodes = np.where(level == maxlevel)[0] lastnodesvalence = valence[lastnodes] minlastnodesvalence = lastnodesvalence.min() y = np.where(lastnodesvalence == minlastnodesvalence)[0][0] y = lastnodes[y] # if d(x,y)>delta, set, and go to bfs above if level[y] > delta: x = y delta = level[y] else: return x, order, level
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def profile_solver(ml, accel=None, **kwargs): """Profile a particular multilevel object. Parameters ml : multilevel Fully constructed multilevel object accel : function pointer Pointer to a valid Krylov solver (e.g. gmres, cg) Returns ------- residuals : array Array of residuals for each iteration See Also -------- multilevel.psolve, multilevel.solve Examples -------- """
A = ml.levels[0].A b = A * sp.rand(A.shape[0], 1) residuals = [] if accel is None: ml.solve(b, residuals=residuals, **kwargs) else: def callback(x): residuals.append(norm(np.ravel(b) - np.ravel(A*x))) M = ml.aspreconditioner(cycle=kwargs.get('cycle', 'V')) accel(A, b, M=M, callback=callback, **kwargs) return np.asarray(residuals)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def diag_sparse(A): """Return a diagonal. If A is a sparse matrix (e.g. csr_matrix or csc_matrix) - return the diagonal of A as an array Otherwise - return a csr_matrix with A on the diagonal Parameters A : sparse matrix or 1d array General sparse matrix or array of diagonal entries Returns ------- B : array or sparse matrix Diagonal sparse is returned as csr if A is dense otherwise return an array of the diagonal Examples -------- [[ 2. 0. 0.] [ 0. 2. 0.] [ 0. 0. 2.]] """
if isspmatrix(A): return A.diagonal() else: if(np.ndim(A) != 1): raise ValueError('input diagonal array expected to be 1d') return csr_matrix((np.asarray(A), np.arange(len(A)), np.arange(len(A)+1)), (len(A), len(A)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def scale_rows(A, v, copy=True): """Scale the sparse rows of a matrix. Parameters A : sparse matrix Sparse matrix with M rows v : array_like Array of M scales copy : {True,False} - If copy=True, then the matrix is copied to a new and different return matrix (e.g. B=scale_rows(A,v)) - If copy=False, then the matrix is overwritten deeply (e.g. scale_rows(A,v,copy=False) overwrites A) Returns ------- A : sparse matrix Scaled sparse matrix in original format See Also -------- scipy.sparse._sparsetools.csr_scale_rows, scale_columns Notes ----- - if A is a csc_matrix, the transpose A.T is passed to scale_columns - if A is not csr, csc, or bsr, it is converted to csr and sent to scale_rows Examples -------- """
v = np.ravel(v) M, N = A.shape if not isspmatrix(A): raise ValueError('scale rows needs a sparse matrix') if M != len(v): raise ValueError('scale vector has incompatible shape') if copy: A = A.copy() A.data = np.asarray(A.data, dtype=upcast(A.dtype, v.dtype)) else: v = np.asarray(v, dtype=A.dtype) if isspmatrix_csr(A): csr_scale_rows(M, N, A.indptr, A.indices, A.data, v) elif isspmatrix_bsr(A): R, C = A.blocksize bsr_scale_rows(int(M/R), int(N/C), R, C, A.indptr, A.indices, np.ravel(A.data), v) elif isspmatrix_csc(A): pyamg.amg_core.csc_scale_rows(M, N, A.indptr, A.indices, A.data, v) else: fmt = A.format A = scale_rows(csr_matrix(A), v).asformat(fmt) return A
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def scale_columns(A, v, copy=True): """Scale the sparse columns of a matrix. Parameters A : sparse matrix Sparse matrix with N rows v : array_like Array of N scales copy : {True,False} - If copy=True, then the matrix is copied to a new and different return matrix (e.g. B=scale_columns(A,v)) - If copy=False, then the matrix is overwritten deeply (e.g. scale_columns(A,v,copy=False) overwrites A) Returns ------- A : sparse matrix Scaled sparse matrix in original format See Also -------- scipy.sparse._sparsetools.csr_scale_columns, scale_rows Notes ----- - if A is a csc_matrix, the transpose A.T is passed to scale_rows - if A is not csr, csc, or bsr, it is converted to csr and sent to scale_rows Examples -------- [[ 10. -5. 0. 0.] [ -5. 10. -5. 0.] [ 0. -5. 10. -5.] [ 0. 0. -5. 10.] [ 0. 0. 0. -5.]] """
v = np.ravel(v) M, N = A.shape if not isspmatrix(A): raise ValueError('scale columns needs a sparse matrix') if N != len(v): raise ValueError('scale vector has incompatible shape') if copy: A = A.copy() A.data = np.asarray(A.data, dtype=upcast(A.dtype, v.dtype)) else: v = np.asarray(v, dtype=A.dtype) if isspmatrix_csr(A): csr_scale_columns(M, N, A.indptr, A.indices, A.data, v) elif isspmatrix_bsr(A): R, C = A.blocksize bsr_scale_columns(int(M/R), int(N/C), R, C, A.indptr, A.indices, np.ravel(A.data), v) elif isspmatrix_csc(A): pyamg.amg_core.csc_scale_columns(M, N, A.indptr, A.indices, A.data, v) else: fmt = A.format A = scale_columns(csr_matrix(A), v).asformat(fmt) return A
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def type_prep(upcast_type, varlist): """Upcast variables to a type. Loop over all elements of varlist and convert them to upcasttype are wrapped into (1,0) arrays. This is desirable when passing the numpy complex data type to C routines and complex scalars aren't handled correctly Parameters upcast_type : data type e.g. complex, float64 or complex128 varlist : list list may contain arrays, mat's, sparse matrices, or scalars the elements may be float, int or complex Returns ------- Returns upcast-ed varlist to upcast_type Notes ----- Useful when harmonizing the types of variables, such as if A and b are complex, but x,y and z are not. Examples -------- """
varlist = to_type(upcast_type, varlist) for i in range(len(varlist)): if np.isscalar(varlist[i]): varlist[i] = np.array([varlist[i]]) return varlist
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_type(upcast_type, varlist): """Loop over all elements of varlist and convert them to upcasttype. Parameters upcast_type : data type e.g. complex, float64 or complex128 varlist : list list may contain arrays, mat's, sparse matrices, or scalars the elements may be float, int or complex Returns ------- Returns upcast-ed varlist to upcast_type Notes ----- Useful when harmonizing the types of variables, such as if A and b are complex, but x,y and z are not. Examples -------- """
# convert_type = type(np.array([0], upcast_type)[0]) for i in range(len(varlist)): # convert scalars to complex if np.isscalar(varlist[i]): varlist[i] = np.array([varlist[i]], upcast_type)[0] else: # convert sparse and dense mats to complex try: if varlist[i].dtype != upcast_type: varlist[i] = varlist[i].astype(upcast_type) except AttributeError: warn('Failed to cast in to_type') pass return varlist
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_block_diag(A, blocksize, inv_flag=True): """Return the block diagonal of A, in array form. Parameters A : csr_matrix assumed to be square blocksize : int square block size for the diagonal inv_flag : bool if True, return the inverse of the block diagonal Returns ------- block_diag : array block diagonal of A in array form, array size is (A.shape[0]/blocksize, blocksize, blocksize) Examples -------- [[[ 0. 1.] [ 6. 7.]] <BLANKLINE> [[ 14. 15.] [ 20. 21.]] <BLANKLINE> [[ 28. 29.] [ 34. 35.]]] """
if not isspmatrix(A): raise TypeError('Expected sparse matrix') if A.shape[0] != A.shape[1]: raise ValueError("Expected square matrix") if sp.mod(A.shape[0], blocksize) != 0: raise ValueError("blocksize and A.shape must be compatible") # If the block diagonal of A already exists, return that if hasattr(A, 'block_D_inv') and inv_flag: if (A.block_D_inv.shape[1] == blocksize) and\ (A.block_D_inv.shape[2] == blocksize) and \ (A.block_D_inv.shape[0] == int(A.shape[0]/blocksize)): return A.block_D_inv elif hasattr(A, 'block_D') and (not inv_flag): if (A.block_D.shape[1] == blocksize) and\ (A.block_D.shape[2] == blocksize) and \ (A.block_D.shape[0] == int(A.shape[0]/blocksize)): return A.block_D # Convert to BSR if not isspmatrix_bsr(A): A = bsr_matrix(A, blocksize=(blocksize, blocksize)) if A.blocksize != (blocksize, blocksize): A = A.tobsr(blocksize=(blocksize, blocksize)) # Peel off block diagonal by extracting block entries from the now BSR # matrix A A = A.asfptype() block_diag = sp.zeros((int(A.shape[0]/blocksize), blocksize, blocksize), dtype=A.dtype) AAIJ = (sp.arange(1, A.indices.shape[0]+1), A.indices, A.indptr) shape = (int(A.shape[0]/blocksize), int(A.shape[0]/blocksize)) diag_entries = csr_matrix(AAIJ, shape=shape).diagonal() diag_entries -= 1 nonzero_mask = (diag_entries != -1) diag_entries = diag_entries[nonzero_mask] if diag_entries.shape != (0,): block_diag[nonzero_mask, :, :] = A.data[diag_entries, :, :] if inv_flag: # Invert each block if block_diag.shape[1] < 7: # This specialized routine lacks robustness for large matrices pyamg.amg_core.pinv_array(block_diag.ravel(), block_diag.shape[0], block_diag.shape[1], 'T') else: pinv_array(block_diag) A.block_D_inv = block_diag else: A.block_D = block_diag return block_diag
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def amalgamate(A, blocksize): """Amalgamate matrix A. Parameters A : csr_matrix Matrix to amalgamate blocksize : int blocksize to use while amalgamating Returns ------- A_amal : csr_matrix Amalgamated matrix A, first, convert A to BSR with square blocksize and then return a CSR matrix of ones using the resulting BSR indptr and indices Notes ----- inverse operation of UnAmal for square matrices Examples -------- matrix([[1, 0, 2, 0], [0, 3, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]) matrix([[ 1., 1.], [ 0., 0.]]) """
if blocksize == 1: return A elif sp.mod(A.shape[0], blocksize) != 0: raise ValueError("Incompatible blocksize") A = A.tobsr(blocksize=(blocksize, blocksize)) A.sort_indices() subI = (np.ones(A.indices.shape), A.indices, A.indptr) shape = (int(A.shape[0]/A.blocksize[0]), int(A.shape[1]/A.blocksize[1])) return csr_matrix(subI, shape=shape)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def UnAmal(A, RowsPerBlock, ColsPerBlock): """Unamalgamate a CSR A with blocks of 1's. This operation is equivalent to replacing each entry of A with ones(RowsPerBlock, ColsPerBlock), i.e., this is equivalent to setting all of A's nonzeros to 1 and then doing a Kronecker product between A and ones(RowsPerBlock, ColsPerBlock). Parameters A : csr_matrix Amalgamted matrix RowsPerBlock : int Give A blocks of size (RowsPerBlock, ColsPerBlock) ColsPerBlock : int Give A blocks of size (RowsPerBlock, ColsPerBlock) Returns ------- A : bsr_matrix Returns A.data[:] = 1, followed by a Kronecker product of A and ones(RowsPerBlock, ColsPerBlock) Examples -------- matrix([[1, 0, 2], [0, 0, 3], [4, 5, 6]]) matrix([[ 1., 1., 0., 0., 1., 1.], [ 1., 1., 0., 0., 1., 1.], [ 0., 0., 0., 0., 1., 1.], [ 0., 0., 0., 0., 1., 1.], [ 1., 1., 1., 1., 1., 1.], [ 1., 1., 1., 1., 1., 1.]]) """
data = np.ones((A.indices.shape[0], RowsPerBlock, ColsPerBlock)) blockI = (data, A.indices, A.indptr) shape = (RowsPerBlock*A.shape[0], ColsPerBlock*A.shape[1]) return bsr_matrix(blockI, shape=shape)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def print_table(table, title='', delim='|', centering='center', col_padding=2, header=True, headerchar='-'): """Print a table from a list of lists representing the rows of a table. Parameters table : list list of lists, e.g. a table with 3 columns and 2 rows could be [ ['0,0', '0,1', '0,2'], ['1,0', '1,1', '1,2'] ] title : string Printed centered above the table delim : string character to delimit columns centering : {'left', 'right', 'center'} chooses justification for columns col_padding : int number of blank spaces to add to each column header : {True, False} Does the first entry of table contain column headers? headerchar : {string} character to separate column headers from rest of table Returns ------- string representing table that's ready to be printed Notes ----- The string for the table will have correctly justified columns with extra padding added into each column entry to ensure columns align. The characters to delimit the columns can be user defined. This should be useful for printing convergence data from tests. Examples -------- """
table_str = '\n' # sometimes, the table will be passed in as (title, table) if isinstance(table, tuple): title = table[0] table = table[1] # Calculate each column's width colwidths = [] for i in range(len(table)): # extend colwidths for row i for k in range(len(table[i]) - len(colwidths)): colwidths.append(-1) # Update colwidths if table[i][j] is wider than colwidth[j] for j in range(len(table[i])): if len(table[i][j]) > colwidths[j]: colwidths[j] = len(table[i][j]) # Factor in extra column padding for i in range(len(colwidths)): colwidths[i] += col_padding # Total table width ttwidth = sum(colwidths) + len(delim)*(len(colwidths)-1) # Print Title if len(title) > 0: title = title.split("\n") for i in range(len(title)): table_str += str.center(title[i], ttwidth) + '\n' table_str += "\n" # Choose centering scheme centering = centering.lower() if centering == 'center': centering = str.center if centering == 'right': centering = str.rjust if centering == 'left': centering = str.ljust if header: # Append Column Headers for elmt, elmtwidth in zip(table[0], colwidths): table_str += centering(str(elmt), elmtwidth) + delim if table[0] != []: table_str = table_str[:-len(delim)] + '\n' # Append Header Separator # Total Column Width Total Col Delimiter Widths if len(headerchar) == 0: headerchar = ' ' table_str += headerchar *\ int(sp.ceil(float(ttwidth)/float(len(headerchar)))) + '\n' table = table[1:] for row in table: for elmt, elmtwidth in zip(row, colwidths): table_str += centering(str(elmt), elmtwidth) + delim if row != []: table_str = table_str[:-len(delim)] + '\n' else: table_str += '\n' return table_str
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def relaxation_as_linear_operator(method, A, b): """Create a linear operator that applies a relaxation method for the given right-hand-side. Parameters methods : {tuple or string} Relaxation descriptor: Each tuple must be of the form ('method','opts') where 'method' is the name of a supported smoother, e.g., gauss_seidel, and 'opts' a dict of keyword arguments to the smoother, e.g., opts = {'sweep':symmetric}. If string, must be that of a supported smoother, e.g., gauss_seidel. Returns ------- linear operator that applies the relaxation method to a vector for a fixed right-hand-side, b. Notes ----- This method is primarily used to improve B during the aggregation setup phase. Here b = 0, and each relaxation call can improve the quality of B, especially near the boundaries. Examples -------- """
from pyamg import relaxation from scipy.sparse.linalg.interface import LinearOperator import pyamg.multilevel def unpack_arg(v): if isinstance(v, tuple): return v[0], v[1] else: return v, {} # setup variables accepted_methods = ['gauss_seidel', 'block_gauss_seidel', 'sor', 'gauss_seidel_ne', 'gauss_seidel_nr', 'jacobi', 'block_jacobi', 'richardson', 'schwarz', 'strength_based_schwarz', 'jacobi_ne'] b = np.array(b, dtype=A.dtype) fn, kwargs = unpack_arg(method) lvl = pyamg.multilevel_solver.level() lvl.A = A # Retrieve setup call from relaxation.smoothing for this relaxation method if not accepted_methods.__contains__(fn): raise NameError("invalid relaxation method: ", fn) try: setup_smoother = getattr(relaxation.smoothing, 'setup_' + fn) except NameError: raise NameError("invalid presmoother method: ", fn) # Get relaxation routine that takes only (A, x, b) as parameters relax = setup_smoother(lvl, **kwargs) # Define matvec def matvec(x): xcopy = x.copy() relax(A, xcopy, b) return xcopy return LinearOperator(A.shape, matvec, dtype=A.dtype)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def scale_T(T, P_I, I_F): """Scale T with a block diagonal matrix. Helper function that scales T with a right multiplication by a block diagonal inverse, so that T is the identity at C-node rows. Parameters T : {bsr_matrix} Tentative prolongator, with square blocks in the BSR data structure, and a non-overlapping block-diagonal structure P_I : {bsr_matrix} Interpolation operator that carries out only simple injection from the coarse grid to fine grid Cpts nodes I_F : {bsr_matrix} Identity operator on Fpts, i.e., the action of this matrix zeros out entries in a vector at all Cpts, leaving Fpts untouched Returns ------- T : {bsr_matrix} Tentative prolongator scaled to be identity at C-pt nodes Examples -------- matrix([[ 2. , 0. , 0. ], [ 1. , 0. , 0. ], [ 0. , 1. , 0. ], [ 0. , 0.5, 0. ], [ 0. , 0. , 4. ], [ 0. , 0. , 1. ]]) Notes ----- This routine is primarily used in pyamg.aggregation.smooth.energy_prolongation_smoother, where it is used to generate a suitable initial guess for the energy-minimization process, when root-node style SA is used. This function, scale_T, takes an existing tentative prolongator and ensures that it injects from the coarse-grid to fine-grid root-nodes. When generating initial guesses for root-node style prolongation operators, this function is usually called after pyamg.uti.utils.filter_operator This function assumes that the eventual coarse-grid nullspace vectors equal coarse-grid injection applied to the fine-grid nullspace vectors. """
if not isspmatrix_bsr(T): raise TypeError('Expected BSR matrix T') elif T.blocksize[0] != T.blocksize[1]: raise TypeError('Expected BSR matrix T with square blocks') if not isspmatrix_bsr(P_I): raise TypeError('Expected BSR matrix P_I') elif P_I.blocksize[0] != P_I.blocksize[1]: raise TypeError('Expected BSR matrix P_I with square blocks') if not isspmatrix_bsr(I_F): raise TypeError('Expected BSR matrix I_F') elif I_F.blocksize[0] != I_F.blocksize[1]: raise TypeError('Expected BSR matrix I_F with square blocks') if (I_F.blocksize[0] != P_I.blocksize[0]) or\ (I_F.blocksize[0] != T.blocksize[0]): raise TypeError('Expected identical blocksize in I_F, P_I and T') # Only do if we have a non-trivial coarse-grid if P_I.nnz > 0: # Construct block diagonal inverse D D = P_I.T*T if D.nnz > 0: # changes D in place pinv_array(D.data) # Scale T to be identity at root-nodes T = T*D # Ensure coarse-grid injection T = I_F*T + P_I return T
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compute_BtBinv(B, C): """Create block inverses. Helper function that creates inv(B_i.T B_i) for each block row i in C, where B_i is B restricted to the sparsity pattern of block row i. Parameters B : {array} (M,k) array, typically near-nullspace modes for coarse grid, i.e., B_c. C : {csr_matrix, bsr_matrix} Sparse NxM matrix, whose sparsity structure (i.e., matrix graph) is used to determine BtBinv. Returns ------- BtBinv : {array} BtBinv[i] = inv(B_i.T B_i), where B_i is B restricted to the nonzero pattern of block row i in C. Examples -------- array([[[ 1. ]], <BLANKLINE> [[ 1. ]], <BLANKLINE> [[ 0.25]], <BLANKLINE> [[ 0.25]]]) Notes ----- The principal calling routines are aggregation.smooth.energy_prolongation_smoother, and util.utils.filter_operator. BtBinv is used in the prolongation smoothing process that incorporates B into the span of prolongation with row-wise projection operators. It is these projection operators that BtBinv is part of. """
if not isspmatrix_bsr(C) and not isspmatrix_csr(C): raise TypeError('Expected bsr_matrix or csr_matrix for C') if C.shape[1] != B.shape[0]: raise TypeError('Expected matching dimensions such that C*B') # Problem parameters if isspmatrix_bsr(C): ColsPerBlock = C.blocksize[1] RowsPerBlock = C.blocksize[0] else: ColsPerBlock = 1 RowsPerBlock = 1 Ncoarse = C.shape[1] Nfine = C.shape[0] NullDim = B.shape[1] Nnodes = int(Nfine/RowsPerBlock) # Construct BtB BtBinv = np.zeros((Nnodes, NullDim, NullDim), dtype=B.dtype) BsqCols = sum(range(NullDim+1)) Bsq = np.zeros((Ncoarse, BsqCols), dtype=B.dtype) counter = 0 for i in range(NullDim): for j in range(i, NullDim): Bsq[:, counter] = np.conjugate(np.ravel(np.asarray(B[:, i]))) * \ np.ravel(np.asarray(B[:, j])) counter = counter + 1 # This specialized C-routine calculates (B.T B) for each row using Bsq pyamg.amg_core.calc_BtB(NullDim, Nnodes, ColsPerBlock, np.ravel(np.asarray(Bsq)), BsqCols, np.ravel(np.asarray(BtBinv)), C.indptr, C.indices) # Invert each block of BtBinv, noting that amg_core.calc_BtB(...) returns # values in column-major form, thus necessitating the deep transpose # This is the old call to a specialized routine, but lacks robustness # pyamg.amg_core.pinv_array(np.ravel(BtBinv), Nnodes, NullDim, 'F') BtBinv = BtBinv.transpose((0, 2, 1)).copy() pinv_array(BtBinv) return BtBinv
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def eliminate_diag_dom_nodes(A, C, theta=1.02): r"""Eliminate diagonally dominance. Helper function that eliminates diagonally dominant rows and cols from A in the separate matrix C. This is useful because it eliminates nodes in C which we don't want coarsened. These eliminated nodes in C just become the rows and columns of the identity. Parameters A : {csr_matrix, bsr_matrix} Sparse NxN matrix C : {csr_matrix} Sparse MxM matrix, where M is the number of nodes in A. M=N if A is CSR or is BSR with blocksize 1. Otherwise M = N/blocksize. theta : {float} determines diagonal dominance threshhold Returns ------- C : {csr_matrix} C updated such that the rows and columns corresponding to diagonally dominant rows in A have been eliminated and replaced with rows and columns of the identity. Notes ----- Diagonal dominance is defined as :math:`\| (e_i, A) - a_{ii} \|_1 < \\theta a_{ii}` that is, the 1-norm of the off diagonal elements in row i must be less than theta times the diagonal element. Examples -------- matrix([[ 1., 0., 0., 0.], [ 0., 2., -1., 0.], [ 0., -1., 2., 0.], [ 0., 0., 0., 1.]]) """
# Find the diagonally dominant rows in A. A_abs = A.copy() A_abs.data = np.abs(A_abs.data) D_abs = get_diagonal(A_abs, norm_eq=0, inv=False) diag_dom_rows = (D_abs > (theta*(A_abs*np.ones((A_abs.shape[0],), dtype=A_abs) - D_abs))) # Account for BSR matrices and translate diag_dom_rows from dofs to nodes bsize = blocksize(A_abs) if bsize > 1: diag_dom_rows = np.array(diag_dom_rows, dtype=int) diag_dom_rows = diag_dom_rows.reshape(-1, bsize) diag_dom_rows = np.sum(diag_dom_rows, axis=1) diag_dom_rows = (diag_dom_rows == bsize) # Replace these rows/cols in # C with rows/cols of the identity. Id = eye(C.shape[0], C.shape[1], format='csr') Id.data[diag_dom_rows] = 0.0 C = Id * C * Id Id.data[diag_dom_rows] = 1.0 Id.data[np.where(diag_dom_rows == 0)[0]] = 0.0 C = C + Id del A_abs return C
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_diagonal(S): """Remove the diagonal of the matrix S. Parameters S : csr_matrix Square matrix Returns ------- S : csr_matrix Strength matrix with the diagonal removed Notes ----- This is needed by all the splitting routines which operate on matrix graphs with an assumed zero diagonal Examples -------- matrix([[ 0., -1., 0., 0.], [-1., 0., -1., 0.], [ 0., -1., 0., -1.], [ 0., 0., -1., 0.]]) """
if not isspmatrix_csr(S): raise TypeError('expected csr_matrix') if S.shape[0] != S.shape[1]: raise ValueError('expected square matrix, shape=%s' % (S.shape,)) S = coo_matrix(S) mask = S.row != S.col S.row = S.row[mask] S.col = S.col[mask] S.data = S.data[mask] return S.tocsr()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def scale_rows_by_largest_entry(S): """Scale each row in S by it's largest in magnitude entry. Parameters S : csr_matrix Returns ------- S : csr_matrix Each row has been scaled by it's largest in magnitude entry Examples -------- matrix([[ 0.4, 1. , 0. , 0. ], [-0.5, 1. , -0.5, 0. ], [ 0. , -0.5, 1. , -0.5], [ 0. , 0. , -0.5, 1. ]]) """
if not isspmatrix_csr(S): raise TypeError('expected csr_matrix') # Scale S by the largest magnitude entry in each row largest_row_entry = np.zeros((S.shape[0],), dtype=S.dtype) pyamg.amg_core.maximum_row_value(S.shape[0], largest_row_entry, S.indptr, S.indices, S.data) largest_row_entry[largest_row_entry != 0] =\ 1.0 / largest_row_entry[largest_row_entry != 0] S = scale_rows(S, largest_row_entry, copy=True) return S
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def levelize_strength_or_aggregation(to_levelize, max_levels, max_coarse): """Turn parameter into a list per level. Helper function to preprocess the strength and aggregation parameters passed to smoothed_aggregation_solver and rootnode_solver. Parameters to_levelize : {string, tuple, list} Parameter to preprocess, i.e., levelize and convert to a level-by-level list such that entry i specifies the parameter at level i max_levels : int Defines the maximum number of levels considered max_coarse : int Defines the maximum coarse grid size allowed Returns ------- (max_levels, max_coarse, to_levelize) : tuple New max_levels and max_coarse values and then the parameter list to_levelize, such that entry i specifies the parameter choice at level i. max_levels and max_coarse are returned, because they may be updated if strength or aggregation set a predefined coarsening and possibly change these values. Notes -------- This routine is needed because the user will pass in a parameter option such as smooth='jacobi', or smooth=['jacobi', None], and this option must be "levelized", or converted to a list of length max_levels such that entry [i] in that list is the parameter choice for level i. The parameter choice in to_levelize can be a string, tuple or list. If it is a string or tuple, then that option is assumed to be the parameter setting at every level. If to_levelize is inititally a list, if the length of the list is less than max_levels, the last entry in the list defines that parameter for all subsequent levels. Examples -------- (4, 10, ['evolution', 'classical', 'classical']) """
if isinstance(to_levelize, tuple): if to_levelize[0] == 'predefined': to_levelize = [to_levelize] max_levels = 2 max_coarse = 0 else: to_levelize = [to_levelize for i in range(max_levels-1)] elif isinstance(to_levelize, str): if to_levelize == 'predefined': raise ValueError('predefined to_levelize requires a user-provided\ CSR matrix representing strength or aggregation\ i.e., (\'predefined\', {\'C\' : CSR_MAT}).') else: to_levelize = [to_levelize for i in range(max_levels-1)] elif isinstance(to_levelize, list): if isinstance(to_levelize[-1], tuple) and\ (to_levelize[-1][0] == 'predefined'): # to_levelize is a list that ends with a predefined operator max_levels = len(to_levelize) + 1 max_coarse = 0 else: # to_levelize a list that __doesn't__ end with 'predefined' if len(to_levelize) < max_levels-1: mlz = max_levels - 1 - len(to_levelize) toext = [to_levelize[-1] for i in range(mlz)] to_levelize.extend(toext) elif to_levelize is None: to_levelize = [(None, {}) for i in range(max_levels-1)] else: raise ValueError('invalid to_levelize') return max_levels, max_coarse, to_levelize
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def levelize_smooth_or_improve_candidates(to_levelize, max_levels): """Turn parameter in to a list per level. Helper function to preprocess the smooth and improve_candidates parameters passed to smoothed_aggregation_solver and rootnode_solver. Parameters to_levelize : {string, tuple, list} Parameter to preprocess, i.e., levelize and convert to a level-by-level list such that entry i specifies the parameter at level i max_levels : int Defines the maximum number of levels considered Returns ------- to_levelize : list The parameter list such that entry i specifies the parameter choice at level i. Notes -------- This routine is needed because the user will pass in a parameter option such as smooth='jacobi', or smooth=['jacobi', None], and this option must be "levelized", or converted to a list of length max_levels such that entry [i] in that list is the parameter choice for level i. The parameter choice in to_levelize can be a string, tuple or list. If it is a string or tuple, then that option is assumed to be the parameter setting at every level. If to_levelize is inititally a list, if the length of the list is less than max_levels, the last entry in the list defines that parameter for all subsequent levels. Examples -------- ['gauss_seidel', None, None, None] """
if isinstance(to_levelize, tuple) or isinstance(to_levelize, str): to_levelize = [to_levelize for i in range(max_levels)] elif isinstance(to_levelize, list): if len(to_levelize) < max_levels: mlz = max_levels - len(to_levelize) toext = [to_levelize[-1] for i in range(mlz)] to_levelize.extend(toext) elif to_levelize is None: to_levelize = [(None, {}) for i in range(max_levels)] return to_levelize
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filter_matrix_columns(A, theta): """Filter each column of A with tol. i.e., drop all entries in column k where abs(A[i,k]) < tol max( abs(A[:,k]) ) Parameters A : sparse_matrix theta : float In range [0,1) and defines drop-tolerance used to filter the columns of A Returns ------- A_filter : sparse_matrix Each column has been filtered by dropping all entries where abs(A[i,k]) < tol max( abs(A[:,k]) ) Examples -------- matrix([[ 0. , 1. , 0. ], [-0.5, 1. , -0.5], [ 0. , 0. , 1. ], [ 0. , 0. , -0.5]]) """
if not isspmatrix(A): raise ValueError("Sparse matrix input needed") if isspmatrix_bsr(A): blocksize = A.blocksize Aformat = A.format if (theta < 0) or (theta >= 1.0): raise ValueError("theta must be in [0,1)") # Apply drop-tolerance to each column of A, which is most easily # accessed by converting to CSC. We apply the drop-tolerance with # amg_core.classical_strength_of_connection(), which ignores # diagonal entries, thus necessitating the trick where we add # A.shape[1] to each of the column indices A = A.copy().tocsc() A_filter = A.copy() A.indices += A.shape[1] A_filter.indices += A.shape[1] # classical_strength_of_connection takes an absolute value internally pyamg.amg_core.classical_strength_of_connection_abs( A.shape[1], theta, A.indptr, A.indices, A.data, A_filter.indptr, A_filter.indices, A_filter.data) A_filter.indices[:A_filter.indptr[-1]] -= A_filter.shape[1] A_filter = csc_matrix((A_filter.data[:A_filter.indptr[-1]], A_filter.indices[:A_filter.indptr[-1]], A_filter.indptr), shape=A_filter.shape) del A if Aformat == 'bsr': A_filter = A_filter.tobsr(blocksize) else: A_filter = A_filter.asformat(Aformat) return A_filter
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filter_matrix_rows(A, theta): """Filter each row of A with tol. i.e., drop all entries in row k where abs(A[i,k]) < tol max( abs(A[:,k]) ) Parameters A : sparse_matrix theta : float In range [0,1) and defines drop-tolerance used to filter the row of A Returns ------- A_filter : sparse_matrix Each row has been filtered by dropping all entries where abs(A[i,k]) < tol max( abs(A[:,k]) ) Examples -------- matrix([[ 0. , -0.5, 0. , 0. ], [ 1. , 1. , 0. , 0. ], [ 0. , -0.5, 1. , -0.5]]) """
if not isspmatrix(A): raise ValueError("Sparse matrix input needed") if isspmatrix_bsr(A): blocksize = A.blocksize Aformat = A.format A = A.tocsr() if (theta < 0) or (theta >= 1.0): raise ValueError("theta must be in [0,1)") # Apply drop-tolerance to each row of A. We apply the drop-tolerance with # amg_core.classical_strength_of_connection(), which ignores diagonal # entries, thus necessitating the trick where we add A.shape[0] to each of # the row indices A_filter = A.copy() A.indices += A.shape[0] A_filter.indices += A.shape[0] # classical_strength_of_connection takes an absolute value internally pyamg.amg_core.classical_strength_of_connection_abs( A.shape[0], theta, A.indptr, A.indices, A.data, A_filter.indptr, A_filter.indices, A_filter.data) A_filter.indices[:A_filter.indptr[-1]] -= A_filter.shape[0] A_filter = csr_matrix((A_filter.data[:A_filter.indptr[-1]], A_filter.indices[:A_filter.indptr[-1]], A_filter.indptr), shape=A_filter.shape) if Aformat == 'bsr': A_filter = A_filter.tobsr(blocksize) else: A_filter = A_filter.asformat(Aformat) A.indices -= A.shape[0] return A_filter
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def truncate_rows(A, nz_per_row): """Truncate the rows of A by keeping only the largest in magnitude entries in each row. Parameters A : sparse_matrix nz_per_row : int Determines how many entries in each row to keep Returns ------- A : sparse_matrix Each row has been truncated to at most nz_per_row entries Examples -------- matrix([[-0.24, -0.5 , 0. , 0. ], [ 1. , -1.1 , 0. , 0. ], [ 0. , 0. , 1. , 0.5 ]]) """
if not isspmatrix(A): raise ValueError("Sparse matrix input needed") if isspmatrix_bsr(A): blocksize = A.blocksize if isspmatrix_csr(A): A = A.copy() # don't modify A in-place Aformat = A.format A = A.tocsr() nz_per_row = int(nz_per_row) # Truncate rows of A, and then convert A back to original format pyamg.amg_core.truncate_rows_csr(A.shape[0], nz_per_row, A.indptr, A.indices, A.data) A.eliminate_zeros() if Aformat == 'bsr': A = A.tobsr(blocksize) else: A = A.asformat(Aformat) return A
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def require_auth(function): """ A decorator that wraps the passed in function and raises exception if access token is missing """
@functools.wraps(function) def wrapper(self, *args, **kwargs): if not self.access_token(): raise MissingAccessTokenError return function(self, *args, **kwargs) return wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def randomizable(function): """ A decorator which randomizes requests if needed """
@functools.wraps(function) def wrapper(self, *args, **kwargs): if self.randomize: self.randomize_headers() return function(self, *args, **kwargs) return wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_profile(self): """ Get my own profile """
r = self._session.get(API_URL + "/logins/me") r.raise_for_status() return r.json()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_calendar(self, listing_id, starting_month=datetime.datetime.now().month, starting_year=datetime.datetime.now().year, calendar_months=12): """ Get availability calendar for a given listing """
params = { 'year': str(starting_year), 'listing_id': str(listing_id), '_format': 'with_conditions', 'count': str(calendar_months), 'month': str(starting_month) } r = self._session.get(API_URL + "/calendar_months", params=params) r.raise_for_status() return r.json()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_reviews(self, listing_id, offset=0, limit=20): """ Get reviews for a given listing """
params = { '_order': 'language_country', 'listing_id': str(listing_id), '_offset': str(offset), 'role': 'all', '_limit': str(limit), '_format': 'for_mobile_client', } print(self._session.headers) r = self._session.get(API_URL + "/reviews", params=params) r.raise_for_status() return r.json()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_listing_calendar(self, listing_id, starting_date=datetime.datetime.now(), calendar_months=6): """ Get host availability calendar for a given listing """
params = { '_format': 'host_calendar_detailed' } starting_date_str = starting_date.strftime("%Y-%m-%d") ending_date_str = ( starting_date + datetime.timedelta(days=30)).strftime("%Y-%m-%d") r = self._session.get(API_URL + "/calendars/{}/{}/{}".format( str(listing_id), starting_date_str, ending_date_str), params=params) r.raise_for_status() return r.json()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_gui_and_backend(): """Return the gui and mpl backend."""
matplotlib = sys.modules['matplotlib'] # WARNING: this assumes matplotlib 1.1 or newer!! backend = matplotlib.rcParams['backend'] # In this case, we need to find what the appropriate gui selection call # should be for IPython, so we can activate inputhook accordingly gui = backend2gui.get(backend, None) return gui, backend
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_interactive_backend(backend): """ Check if backend is interactive """
matplotlib = sys.modules['matplotlib'] from matplotlib.rcsetup import interactive_bk, non_interactive_bk # @UnresolvedImport if backend in interactive_bk: return True elif backend in non_interactive_bk: return False else: return matplotlib.is_interactive()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def activate_matplotlib(enable_gui_function): """Set interactive to True for interactive backends. enable_gui_function - Function which enables gui, should be run in the main thread. """
matplotlib = sys.modules['matplotlib'] gui, backend = find_gui_and_backend() is_interactive = is_interactive_backend(backend) if is_interactive: enable_gui_function(gui) if not matplotlib.is_interactive(): sys.stdout.write("Backend %s is interactive backend. Turning interactive mode on.\n" % backend) matplotlib.interactive(True) else: if matplotlib.is_interactive(): sys.stdout.write("Backend %s is non-interactive backend. Turning interactive mode off.\n" % backend) matplotlib.interactive(False) patch_use(enable_gui_function) patch_is_interactive()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def flag_calls(func): """Wrap a function to detect and flag when it gets called. This is a decorator which takes a function and wraps it in a function with a 'called' attribute. wrapper.called is initialized to False. The wrapper.called attribute is set to False right before each call to the wrapped function, so if the call fails it remains False. After the call completes, wrapper.called is set to True and the output is returned. Testing for truth in wrapper.called allows you to determine if a call to func() was attempted and succeeded."""
# don't wrap twice if hasattr(func, 'called'): return func def wrapper(*args, **kw): wrapper.called = False out = func(*args, **kw) wrapper.called = True return out wrapper.called = False wrapper.__doc__ = func.__doc__ return wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def is_valid_py_file(path): ''' Checks whether the file can be read by the coverage module. This is especially needed for .pyx files and .py files with syntax errors. ''' import os is_valid = False if os.path.isfile(path) and not os.path.splitext(path)[1] == '.pyx': try: with open(path, 'rb') as f: compile(f.read(), path, 'exec') is_valid = True except: pass return is_valid
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __get_hooks_for_dll(self, event): """ Get the requested API hooks for the current DLL. Used by L{__hook_dll} and L{__unhook_dll}. """
result = [] if self.__apiHooks: path = event.get_module().get_filename() if path: lib_name = PathOperations.pathname_to_filename(path).lower() for hook_lib, hook_api_list in compat.iteritems(self.__apiHooks): if hook_lib == lib_name: result.extend(hook_api_list) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def event(self, event): """ Forwards events to the corresponding instance of your event handler for this process. If you subclass L{EventSift} and reimplement this method, no event will be forwarded at all unless you call the superclass implementation. If your filtering is based on the event type, there's a much easier way to do it: just implement a handler for it. """
eventCode = event.get_event_code() pid = event.get_pid() handler = self.forward.get(pid, None) if handler is None: handler = self.cls(*self.argv, **self.argd) if eventCode != win32.EXIT_PROCESS_DEBUG_EVENT: self.forward[pid] = handler elif eventCode == win32.EXIT_PROCESS_DEBUG_EVENT: del self.forward[pid] return handler(event)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_event_handler(self, eventHandler): """ Set the event handler. @warn: This is normally not needed. Use with care! @type eventHandler: L{EventHandler} @param eventHandler: New event handler object, or C{None}. @rtype: L{EventHandler} @return: Previous event handler object, or C{None}. @raise TypeError: The event handler is of an incorrect type. @note: The L{eventHandler} parameter may be any callable Python object (for example a function, or an instance method). However you'll probably find it more convenient to use an instance of a subclass of L{EventHandler} here. """
if eventHandler is not None and not callable(eventHandler): raise TypeError("Event handler must be a callable object") try: wrong_type = issubclass(eventHandler, EventHandler) except TypeError: wrong_type = False if wrong_type: classname = str(eventHandler) msg = "Event handler must be an instance of class %s" msg += "rather than the %s class itself. (Missing parens?)" msg = msg % (classname, classname) raise TypeError(msg) try: previous = self.__eventHandler except AttributeError: previous = None self.__eventHandler = eventHandler return previous
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def should_skip(filename, config, path='/'): """Returns True if the file should be skipped based on the passed in settings."""
for skip_path in config['skip']: if posixpath.abspath(posixpath.join(path, filename)) == posixpath.abspath(skip_path.replace('\\', '/')): return True position = os.path.split(filename) while position[1]: if position[1] in config['skip']: return True position = os.path.split(position[0]) for glob in config['skip_glob']: if fnmatch.fnmatch(filename, glob): return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ismethod(func): '''this function should return the information gathered on a function @param func: this is the function we want to get info on @return a tuple where: 0 = indicates whether the parameter passed is a method or not 1 = a list of classes 'Info', with the info gathered from the function this is a list because when we have methods from java with the same name and different signatures, we actually have many methods, each with its own set of arguments ''' try: if isinstance(func, core.PyFunction): #ok, this is from python, created by jython #print_ ' PyFunction' def getargs(func_code): """Get information about the arguments accepted by a code object. Three things are returned: (args, varargs, varkw), where 'args' is a list of argument names (possibly containing nested lists), and 'varargs' and 'varkw' are the names of the * and ** arguments or None."""
nargs = func_code.co_argcount names = func_code.co_varnames args = list(names[:nargs]) step = 0 if not hasattr(func_code, 'CO_VARARGS'): from org.python.core import CodeFlag # @UnresolvedImport co_varargs_flag = CodeFlag.CO_VARARGS.flag co_varkeywords_flag = CodeFlag.CO_VARKEYWORDS.flag else: co_varargs_flag = func_code.CO_VARARGS co_varkeywords_flag = func_code.CO_VARKEYWORDS varargs = None if func_code.co_flags & co_varargs_flag: varargs = func_code.co_varnames[nargs] nargs = nargs + 1 varkw = None if func_code.co_flags & co_varkeywords_flag: varkw = func_code.co_varnames[nargs] return args, varargs, varkw args = getargs(func.func_code) return 1, [Info(func.func_name, args=args[0], varargs=args[1], kwargs=args[2], doc=func.func_doc)] if isinstance(func, core.PyMethod): #this is something from java itself, and jython just wrapped it... #things to play in func: #['__call__', '__class__', '__cmp__', '__delattr__', '__dir__', '__doc__', '__findattr__', '__name__', '_doget', 'im_class', #'im_func', 'im_self', 'toString'] #print_ ' PyMethod' #that's the PyReflectedFunction... keep going to get it func = func.im_func if isinstance(func, PyReflectedFunction): #this is something from java itself, and jython just wrapped it... #print_ ' PyReflectedFunction' infos = [] for i in xrange(len(func.argslist)): #things to play in func.argslist[i]: #'PyArgsCall', 'PyArgsKeywordsCall', 'REPLACE', 'StandardCall', 'args', 'compare', 'compareTo', 'data', 'declaringClass' #'flags', 'isStatic', 'matches', 'precedence'] #print_ ' ', func.argslist[i].data.__class__ #func.argslist[i].data.__class__ == java.lang.reflect.Method if func.argslist[i]: met = func.argslist[i].data name = met.getName() try: ret = met.getReturnType() except AttributeError: ret = '' parameterTypes = met.getParameterTypes() args = [] for j in xrange(len(parameterTypes)): paramTypesClass = parameterTypes[j] try: try: paramClassName = paramTypesClass.getName() except: paramClassName = paramTypesClass.getName(paramTypesClass) except AttributeError: try: paramClassName = repr(paramTypesClass) #should be something like <type 'object'> paramClassName = paramClassName.split('\'')[1] except: paramClassName = repr(paramTypesClass) #just in case something else happens... it will at least be visible #if the parameter equals [C, it means it it a char array, so, let's change it a = format_param_class_name(paramClassName) #a = a.replace('[]','Array') #a = a.replace('Object', 'obj') #a = a.replace('String', 's') #a = a.replace('Integer', 'i') #a = a.replace('Char', 'c') #a = a.replace('Double', 'd') args.append(a) #so we don't leave invalid code info = Info(name, args=args, ret=ret) #print_ info.basic_as_str() infos.append(info) return 1, infos except Exception: s = StringIO.StringIO() traceback.print_exc(file=s) return 1, [Info(str('ERROR'), doc=s.getvalue())] return 0, None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def format_arg(arg): '''formats an argument to be shown ''' s = str(arg) dot = s.rfind('.') if dot >= 0: s = s[dot + 1:] s = s.replace(';', '') s = s.replace('[]', 'Array') if len(s) > 0: c = s[0].lower() s = c + s[1:] return s
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def start_server(port): ''' binds to a port, waits for the debugger to connect ''' s = socket(AF_INET, SOCK_STREAM) s.settimeout(None) try: from socket import SO_REUSEPORT s.setsockopt(SOL_SOCKET, SO_REUSEPORT, 1) except ImportError: s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) s.bind(('', port)) pydev_log.info("Bound to port :%s", port) try: s.listen(1) newSock, _addr = s.accept() pydev_log.info("Connection accepted") # closing server socket is not necessary but we don't need it s.shutdown(SHUT_RDWR) s.close() return newSock except: pydev_log.exception("Could not bind to port: %s\n", port)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def internal_change_variable(dbg, seq, thread_id, frame_id, scope, attr, value): ''' Changes the value of a variable ''' try: frame = dbg.find_frame(thread_id, frame_id) if frame is not None: result = pydevd_vars.change_attr_expression(frame, attr, value, dbg) else: result = None xml = "<xml>" xml += pydevd_xml.var_to_xml(result, "") xml += "</xml>" cmd = dbg.cmd_factory.make_variable_changed_message(seq, xml) dbg.writer.add_command(cmd) except Exception: cmd = dbg.cmd_factory.make_error_message(seq, "Error changing variable attr:%s expression:%s traceback:%s" % (attr, value, get_exception_traceback_str())) dbg.writer.add_command(cmd)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def internal_get_next_statement_targets(dbg, seq, thread_id, frame_id): ''' gets the valid line numbers for use with set next statement ''' try: frame = dbg.find_frame(thread_id, frame_id) if frame is not None: code = frame.f_code xml = "<xml>" if hasattr(code, 'co_lnotab'): lineno = code.co_firstlineno lnotab = code.co_lnotab for i in itertools.islice(lnotab, 1, len(lnotab), 2): if isinstance(i, int): lineno = lineno + i else: # in python 2 elements in co_lnotab are of type str lineno = lineno + ord(i) xml += "<line>%d</line>" % (lineno,) else: xml += "<line>%d</line>" % (frame.f_lineno,) del frame xml += "</xml>" cmd = dbg.cmd_factory.make_get_next_statement_targets_message(seq, xml) dbg.writer.add_command(cmd) else: cmd = dbg.cmd_factory.make_error_message(seq, "Frame not found: %s from thread: %s" % (frame_id, thread_id)) dbg.writer.add_command(cmd) except: cmd = dbg.cmd_factory.make_error_message(seq, "Error resolving frame: %s from thread: %s" % (frame_id, thread_id)) dbg.writer.add_command(cmd)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def internal_evaluate_expression(dbg, seq, thread_id, frame_id, expression, is_exec, trim_if_too_big, attr_to_set_result): ''' gets the value of a variable ''' try: frame = dbg.find_frame(thread_id, frame_id) if frame is not None: result = pydevd_vars.evaluate_expression(dbg, frame, expression, is_exec) if attr_to_set_result != "": pydevd_vars.change_attr_expression(frame, attr_to_set_result, expression, dbg, result) else: result = None xml = "<xml>" xml += pydevd_xml.var_to_xml(result, expression, trim_if_too_big) xml += "</xml>" cmd = dbg.cmd_factory.make_evaluate_expression_message(seq, xml) dbg.writer.add_command(cmd) except: exc = get_exception_traceback_str() cmd = dbg.cmd_factory.make_error_message(seq, "Error evaluating expression " + exc) dbg.writer.add_command(cmd)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def internal_get_description(dbg, seq, thread_id, frame_id, expression): ''' Fetch the variable description stub from the debug console ''' try: frame = dbg.find_frame(thread_id, frame_id) description = pydevd_console.get_description(frame, thread_id, frame_id, expression) description = pydevd_xml.make_valid_xml_value(quote(description, '/>_= \t')) description_xml = '<xml><var name="" type="" value="%s"/></xml>' % description cmd = dbg.cmd_factory.make_get_description_message(seq, description_xml) dbg.writer.add_command(cmd) except: exc = get_exception_traceback_str() cmd = dbg.cmd_factory.make_error_message(seq, "Error in fetching description" + exc) dbg.writer.add_command(cmd)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def internal_get_exception_details_json(dbg, request, thread_id, max_frames, set_additional_thread_info=None, iter_visible_frames_info=None): ''' Fetch exception details ''' try: response = build_exception_info_response(dbg, thread_id, request.seq, set_additional_thread_info, iter_visible_frames_info, max_frames) except: exc = get_exception_traceback_str() response = pydevd_base_schema.build_response(request, kwargs={ 'success': False, 'message': exc, 'body':{} }) dbg.writer.add_command(NetCommand(CMD_RETURN, 0, response, is_json=True))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _on_run(self): ''' just loop and write responses ''' try: while True: try: try: cmd = self.cmdQueue.get(1, 0.1) except _queue.Empty: if self.killReceived: try: self.sock.shutdown(SHUT_WR) self.sock.close() except: pass return # break if queue is empty and killReceived else: continue except: # pydev_log.info('Finishing debug communication...(1)') # when liberating the thread here, we could have errors because we were shutting down # but the thread was still not liberated return cmd.send(self.sock) if cmd.id == CMD_EXIT: break if time is None: break # interpreter shutdown time.sleep(self.timeout) except Exception: GlobalDebuggerHolder.global_dbg.finish_debugging_session() if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 0: pydev_log_exception()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def can_be_executed_by(self, thread_id): '''By default, it must be in the same thread to be executed ''' return self.thread_id == thread_id or self.thread_id.endswith('|' + thread_id)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def do_it(self, dbg): ''' Get completions and write back to the client ''' try: frame = dbg.find_frame(self.thread_id, self.frame_id) completions_xml = pydevd_console.get_completions(frame, self.act_tok) cmd = dbg.cmd_factory.make_send_console_message(self.sequence, completions_xml) dbg.writer.add_command(cmd) except: exc = get_exception_traceback_str() cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error in fetching completions" + exc) dbg.writer.add_command(cmd)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def do_it(self, dbg): '''Starts a thread that will load values asynchronously''' try: var_objects = [] for variable in self.vars: variable = variable.strip() if len(variable) > 0: if '\t' in variable: # there are attributes beyond scope scope, attrs = variable.split('\t', 1) name = attrs[0] else: scope, attrs = (variable, None) name = scope var_obj = pydevd_vars.getVariable(dbg, self.thread_id, self.frame_id, scope, attrs) var_objects.append((var_obj, name)) t = GetValueAsyncThreadDebug(dbg, self.sequence, var_objects) t.start() except: exc = get_exception_traceback_str() sys.stderr.write('%s\n' % (exc,)) cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating variable %s " % exc) dbg.writer.add_command(cmd)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def RaiseIfLastError(result, func = None, arguments = ()): """ Error checking for Win32 API calls with no error-specific return value. Regardless of the return value, the function calls GetLastError(). If the code is not C{ERROR_SUCCESS} then a C{WindowsError} exception is raised. For this to work, the user MUST call SetLastError(ERROR_SUCCESS) prior to calling the API. Otherwise an exception may be raised even on success, since most API calls don't clear the error status code. """
code = GetLastError() if code != ERROR_SUCCESS: raise ctypes.WinError(code) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def close(self): """ Closes the Win32 handle. """
if self.bOwnership and self.value not in (None, INVALID_HANDLE_VALUE): if Handle.__bLeakDetection: # XXX DEBUG print("CLOSE HANDLE (%d) %r" % (self.value, self)) try: self._close() finally: self._value = None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _normalize(value): """ Normalize handle values. """
if hasattr(value, 'value'): value = value.value if value is not None: value = long(value) return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def wait(self, dwMilliseconds = None): """ Wait for the Win32 object to be signaled. @type dwMilliseconds: int @param dwMilliseconds: (Optional) Timeout value in milliseconds. Use C{INFINITE} or C{None} for no timeout. """
if self.value is None: raise ValueError("Handle is already closed!") if dwMilliseconds is None: dwMilliseconds = INFINITE r = WaitForSingleObject(self.value, dwMilliseconds) if r != WAIT_OBJECT_0: raise ctypes.WinError(r)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def add(self, pattern, start): "Recursively adds a linear pattern to the AC automaton" #print("adding pattern", pattern, "to", start) if not pattern: #print("empty pattern") return [start] if isinstance(pattern[0], tuple): #alternatives #print("alternatives") match_nodes = [] for alternative in pattern[0]: #add all alternatives, and add the rest of the pattern #to each end node end_nodes = self.add(alternative, start=start) for end in end_nodes: match_nodes.extend(self.add(pattern[1:], end)) return match_nodes else: #single token #not last if pattern[0] not in start.transition_table: #transition did not exist, create new next_node = BMNode() start.transition_table[pattern[0]] = next_node else: #transition exists already, follow next_node = start.transition_table[pattern[0]] if pattern[1:]: end_nodes = self.add(pattern[1:], start=next_node) else: end_nodes = [next_node] return end_nodes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_globals(): """Return current Python interpreter globals namespace"""
if _get_globals_callback is not None: return _get_globals_callback() else: try: from __main__ import __dict__ as namespace except ImportError: try: # The import fails on IronPython import __main__ namespace = __main__.__dict__ except: namespace shell = namespace.get('__ipythonshell__') if shell is not None and hasattr(shell, 'user_ns'): # IPython 0.12+ kernel return shell.user_ns else: # Python interpreter return namespace return namespace
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(self, verbose=False): """ Del user modules to force Python to deeply reload them Do not del modules which are considered as system modules, i.e. modules installed in subdirectories of Python interpreter's binary Do not del C modules """
log = [] modules_copy = dict(sys.modules) for modname, module in modules_copy.items(): if modname == 'aaaaa': print(modname, module) print(self.previous_modules) if modname not in self.previous_modules: modpath = getattr(module, '__file__', None) if modpath is None: # *module* is a C module that is statically linked into the # interpreter. There is no way to know its path, so we # choose to ignore it. continue if not self.is_module_blacklisted(modname, modpath): log.append(modname) del sys.modules[modname] if verbose and log: print("\x1b[4;33m%s\x1b[24m%s\x1b[0m" % ("UMD has deleted", ": " + ", ".join(log)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_stdlib_path(): """Returns the path to the standard lib for the current path installation. This function can be dropped and "sysconfig.get_paths()" used directly once Python 2.6 support is dropped. """
if sys.version_info >= (2, 7): import sysconfig return sysconfig.get_paths()['stdlib'] else: return os.path.join(sys.prefix, 'lib')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def exists_case_sensitive(path): """ Returns if the given path exists and also matches the case on Windows. When finding files that can be imported, it is important for the cases to match because while file os.path.exists("module.py") and os.path.exists("MODULE.py") both return True on Windows, Python can only import using the case of the real file. """
result = os.path.exists(path) if sys.platform.startswith('win') and result: directory, basename = os.path.split(path) result = basename in os.listdir(directory) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _add_comments(self, comments, original_string=""): """ Returns a string with comments added """
return comments and "{0} # {1}".format(self._strip_comments(original_string)[0], "; ".join(comments)) or original_string
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def settrace( host=None, stdoutToServer=False, stderrToServer=False, port=5678, suspend=True, trace_only_current_thread=False, overwrite_prev_trace=False, patch_multiprocessing=False, stop_at_frame=None, ): '''Sets the tracing function with the pydev debug function and initializes needed facilities. @param host: the user may specify another host, if the debug server is not in the same machine (default is the local host) @param stdoutToServer: when this is true, the stdout is passed to the debug server @param stderrToServer: when this is true, the stderr is passed to the debug server so that they are printed in its console and not in this process console. @param port: specifies which port to use for communicating with the server (note that the server must be started in the same port). @note: currently it's hard-coded at 5678 in the client @param suspend: whether a breakpoint should be emulated as soon as this function is called. @param trace_only_current_thread: determines if only the current thread will be traced or all current and future threads will also have the tracing enabled. @param overwrite_prev_trace: deprecated @param patch_multiprocessing: if True we'll patch the functions which create new processes so that launched processes are debugged. @param stop_at_frame: if passed it'll stop at the given frame, otherwise it'll stop in the function which called this method. ''' _set_trace_lock.acquire() try: _locked_settrace( host, stdoutToServer, stderrToServer, port, suspend, trace_only_current_thread, patch_multiprocessing, stop_at_frame, ) finally: _set_trace_lock.release()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def settrace_forked(): ''' When creating a fork from a process in the debugger, we need to reset the whole debugger environment! ''' from _pydevd_bundle.pydevd_constants import GlobalDebuggerHolder GlobalDebuggerHolder.global_dbg = None threading.current_thread().additional_info = None from _pydevd_frame_eval.pydevd_frame_eval_main import clear_thread_local_info host, port = dispatch() import pydevd_tracing pydevd_tracing.restore_sys_set_trace_func() if port is not None: global connected connected = False global forked forked = True custom_frames_container_init() if clear_thread_local_info is not None: clear_thread_local_info() settrace( host, port=port, suspend=False, trace_only_current_thread=False, overwrite_prev_trace=True, patch_multiprocessing=True, )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def enable_tracing(self, thread_trace_func=None): ''' Enables tracing. If in regular mode (tracing), will set the tracing function to the tracing function for this thread -- by default it's `PyDB.trace_dispatch`, but after `PyDB.enable_tracing` is called with a `thread_trace_func`, the given function will be the default for the given thread. ''' if self.frame_eval_func is not None: self.frame_eval_func() pydevd_tracing.SetTrace(self.dummy_trace_dispatch) return if thread_trace_func is None: thread_trace_func = self.get_thread_local_trace_func() else: self._local_thread_trace_func.thread_trace_func = thread_trace_func pydevd_tracing.SetTrace(thread_trace_func)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def on_breakpoints_changed(self, removed=False): ''' When breakpoints change, we have to re-evaluate all the assumptions we've made so far. ''' if not self.ready_to_run: # No need to do anything if we're still not running. return self.mtime += 1 if not removed: # When removing breakpoints we can leave tracing as was, but if a breakpoint was added # we have to reset the tracing for the existing functions to be re-evaluated. self.set_tracing_for_untraced_contexts()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def apply_files_filter(self, frame, filename, force_check_project_scope): ''' Should only be called if `self.is_files_filter_enabled == True`. Note that it covers both the filter by specific paths includes/excludes as well as the check which filters out libraries if not in the project scope. :param force_check_project_scope: Check that the file is in the project scope even if the global setting is off. :return bool: True if it should be excluded when stepping and False if it should be included. ''' cache_key = (frame.f_code.co_firstlineno, frame.f_code.co_name, filename, force_check_project_scope) try: return self._apply_filter_cache[cache_key] except KeyError: if self.plugin is not None and (self.has_plugin_line_breaks or self.has_plugin_exception_breaks): # If it's explicitly needed by some plugin, we can't skip it. if not self.plugin.can_skip(self, frame): # print('include (include by plugins): %s' % filename) self._apply_filter_cache[cache_key] = False return False if self._exclude_filters_enabled: exclude_by_filter = self._exclude_by_filter(frame, filename) if exclude_by_filter is not None: if exclude_by_filter: # ignore files matching stepping filters # print('exclude (filtered out): %s' % filename) self._apply_filter_cache[cache_key] = True return True else: # print('include (explicitly included): %s' % filename) self._apply_filter_cache[cache_key] = False return False if (self._is_libraries_filter_enabled or force_check_project_scope) and not self.in_project_scope(filename): # print('exclude (not on project): %s' % filename) # ignore library files while stepping self._apply_filter_cache[cache_key] = True return True # print('include (on project): %s' % filename) self._apply_filter_cache[cache_key] = False return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_internal_queue(self, thread_id): """ returns internal command queue for a given thread. if new queue is created, notify the RDB about it """
if thread_id.startswith('__frame__'): thread_id = thread_id[thread_id.rfind('|') + 1:] return self._cmd_queue[thread_id]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def notify_thread_not_alive(self, thread_id, use_lock=True): """ if thread is not alive, cancel trace_dispatch processing """
if self.writer is None: return with self._lock_running_thread_ids if use_lock else NULL: if not self._enable_thread_notifications: return thread = self._running_thread_ids.pop(thread_id, None) if thread is None: return was_notified = thread.additional_info.pydev_notify_kill if not was_notified: thread.additional_info.pydev_notify_kill = True self.writer.add_command(self.cmd_factory.make_thread_killed_message(thread_id))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def process_internal_commands(self): '''This function processes internal commands ''' with self._main_lock: self.check_output_redirect() program_threads_alive = {} all_threads = threadingEnumerate() program_threads_dead = [] with self._lock_running_thread_ids: reset_cache = not self._running_thread_ids for t in all_threads: if getattr(t, 'is_pydev_daemon_thread', False): pass # I.e.: skip the DummyThreads created from pydev daemon threads elif isinstance(t, PyDBDaemonThread): pydev_log.error_once('Error in debugger: Found PyDBDaemonThread not marked with is_pydev_daemon_thread=True.') elif is_thread_alive(t): if reset_cache: # Fix multiprocessing debug with breakpoints in both main and child processes # (https://youtrack.jetbrains.com/issue/PY-17092) When the new process is created, the main # thread in the new process already has the attribute 'pydevd_id', so the new thread doesn't # get new id with its process number and the debugger loses access to both threads. # Therefore we should update thread_id for every main thread in the new process. clear_cached_thread_id(t) thread_id = get_thread_id(t) program_threads_alive[thread_id] = t self.notify_thread_created(thread_id, t, use_lock=False) # Compute and notify about threads which are no longer alive. thread_ids = list(self._running_thread_ids.keys()) for thread_id in thread_ids: if thread_id not in program_threads_alive: program_threads_dead.append(thread_id) for thread_id in program_threads_dead: self.notify_thread_not_alive(thread_id, use_lock=False) # Without self._lock_running_thread_ids if len(program_threads_alive) == 0: self.finish_debugging_session() for t in all_threads: if hasattr(t, 'do_kill_pydev_thread'): t.do_kill_pydev_thread() else: # Actually process the commands now (make sure we don't have a lock for _lock_running_thread_ids # acquired at this point as it could lead to a deadlock if some command evaluated tried to # create a thread and wait for it -- which would try to notify about it getting that lock). curr_thread_id = get_current_thread_id(threadingCurrentThread()) for thread_id in (curr_thread_id, '*'): queue = self.get_internal_queue(thread_id) # some commands must be processed by the thread itself... if that's the case, # we will re-add the commands to the queue after executing. cmds_to_add_back = [] try: while True: int_cmd = queue.get(False) if not self.mpl_hooks_in_debug_console and isinstance(int_cmd, InternalConsoleExec): # add import hooks for matplotlib patches if only debug console was started try: self.init_matplotlib_in_debug_console() self.mpl_in_use = True except: pydev_log.debug("Matplotlib support in debug console failed", traceback.format_exc()) self.mpl_hooks_in_debug_console = True if int_cmd.can_be_executed_by(curr_thread_id): pydev_log.verbose("processing internal command ", int_cmd) int_cmd.do_it(self) else: pydev_log.verbose("NOT processing internal command ", int_cmd) cmds_to_add_back.append(int_cmd) except _queue.Empty: # @UndefinedVariable # this is how we exit for int_cmd in cmds_to_add_back: queue.put(int_cmd)