repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
pyamg/pyamg | pyamg/vis/vtk_writer.py | set_attributes | def set_attributes(d, elm):
"""Set attributes from dictionary of values."""
for key in d:
elm.setAttribute(key, d[key]) | python | def set_attributes(d, elm):
"""Set attributes from dictionary of values."""
for key in d:
elm.setAttribute(key, d[key]) | [
"def",
"set_attributes",
"(",
"d",
",",
"elm",
")",
":",
"for",
"key",
"in",
"d",
":",
"elm",
".",
"setAttribute",
"(",
"key",
",",
"d",
"[",
"key",
"]",
")"
] | Set attributes from dictionary of values. | [
"Set",
"attributes",
"from",
"dictionary",
"of",
"values",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/vis/vtk_writer.py#L478-L481 | train | 209,200 |
pyamg/pyamg | pyamg/aggregation/adaptive.py | eliminate_local_candidates | def eliminate_local_candidates(x, AggOp, A, T, Ca=1.0, **kwargs):
"""Eliminate canidates locally.
Helper function that determines where to eliminate candidates locally
on a per aggregate basis.
Parameters
---------
x : array
n x 1 vector of new candidate
AggOp : CSR or CSC sparse matrix
Aggregation operator for the level that x was generated for
A : sparse matrix
Operator for the level that x was generated for
T : sparse matrix
Tentative prolongation operator for the level that x was generated for
Ca : scalar
Constant threshold parameter to decide when to drop candidates
Returns
-------
Nothing, x is modified in place
"""
if not (isspmatrix_csr(AggOp) or isspmatrix_csc(AggOp)):
raise TypeError('AggOp must be a CSR or CSC matrix')
else:
AggOp = AggOp.tocsc()
ndof = max(x.shape)
nPDEs = int(ndof/AggOp.shape[0])
def aggregate_wise_inner_product(z, AggOp, nPDEs, ndof):
"""Inner products per aggregate.
Helper function that calculates <z, z>_i, i.e., the
inner product of z only over aggregate i
Returns a vector of length num_aggregates where entry i is <z, z>_i
"""
z = np.ravel(z)*np.ravel(z)
innerp = np.zeros((1, AggOp.shape[1]), dtype=z.dtype)
for j in range(nPDEs):
innerp += z[slice(j, ndof, nPDEs)].reshape(1, -1) * AggOp
return innerp.reshape(-1, 1)
def get_aggregate_weights(AggOp, A, z, nPDEs, ndof):
"""Weights per aggregate.
Calculate local aggregate quantities
Return a vector of length num_aggregates where entry i is
(card(agg_i)/A.shape[0]) ( <Az, z>/rho(A) )
"""
rho = approximate_spectral_radius(A)
zAz = np.dot(z.reshape(1, -1), A*z.reshape(-1, 1))
card = nPDEs*(AggOp.indptr[1:]-AggOp.indptr[:-1])
weights = (np.ravel(card)*zAz)/(A.shape[0]*rho)
return weights.reshape(-1, 1)
# Run test 1, which finds where x is small relative to its energy
weights = Ca*get_aggregate_weights(AggOp, A, x, nPDEs, ndof)
mask1 = aggregate_wise_inner_product(x, AggOp, nPDEs, ndof) <= weights
# Run test 2, which finds where x is already approximated
# accurately by the existing T
projected_x = x - T*(T.T*x)
mask2 = aggregate_wise_inner_product(projected_x,
AggOp, nPDEs, ndof) <= weights
# Combine masks and zero out corresponding aggregates in x
mask = np.ravel(mask1 + mask2).nonzero()[0]
if mask.shape[0] > 0:
mask = nPDEs*AggOp[:, mask].indices
for j in range(nPDEs):
x[mask+j] = 0.0 | python | def eliminate_local_candidates(x, AggOp, A, T, Ca=1.0, **kwargs):
"""Eliminate canidates locally.
Helper function that determines where to eliminate candidates locally
on a per aggregate basis.
Parameters
---------
x : array
n x 1 vector of new candidate
AggOp : CSR or CSC sparse matrix
Aggregation operator for the level that x was generated for
A : sparse matrix
Operator for the level that x was generated for
T : sparse matrix
Tentative prolongation operator for the level that x was generated for
Ca : scalar
Constant threshold parameter to decide when to drop candidates
Returns
-------
Nothing, x is modified in place
"""
if not (isspmatrix_csr(AggOp) or isspmatrix_csc(AggOp)):
raise TypeError('AggOp must be a CSR or CSC matrix')
else:
AggOp = AggOp.tocsc()
ndof = max(x.shape)
nPDEs = int(ndof/AggOp.shape[0])
def aggregate_wise_inner_product(z, AggOp, nPDEs, ndof):
"""Inner products per aggregate.
Helper function that calculates <z, z>_i, i.e., the
inner product of z only over aggregate i
Returns a vector of length num_aggregates where entry i is <z, z>_i
"""
z = np.ravel(z)*np.ravel(z)
innerp = np.zeros((1, AggOp.shape[1]), dtype=z.dtype)
for j in range(nPDEs):
innerp += z[slice(j, ndof, nPDEs)].reshape(1, -1) * AggOp
return innerp.reshape(-1, 1)
def get_aggregate_weights(AggOp, A, z, nPDEs, ndof):
"""Weights per aggregate.
Calculate local aggregate quantities
Return a vector of length num_aggregates where entry i is
(card(agg_i)/A.shape[0]) ( <Az, z>/rho(A) )
"""
rho = approximate_spectral_radius(A)
zAz = np.dot(z.reshape(1, -1), A*z.reshape(-1, 1))
card = nPDEs*(AggOp.indptr[1:]-AggOp.indptr[:-1])
weights = (np.ravel(card)*zAz)/(A.shape[0]*rho)
return weights.reshape(-1, 1)
# Run test 1, which finds where x is small relative to its energy
weights = Ca*get_aggregate_weights(AggOp, A, x, nPDEs, ndof)
mask1 = aggregate_wise_inner_product(x, AggOp, nPDEs, ndof) <= weights
# Run test 2, which finds where x is already approximated
# accurately by the existing T
projected_x = x - T*(T.T*x)
mask2 = aggregate_wise_inner_product(projected_x,
AggOp, nPDEs, ndof) <= weights
# Combine masks and zero out corresponding aggregates in x
mask = np.ravel(mask1 + mask2).nonzero()[0]
if mask.shape[0] > 0:
mask = nPDEs*AggOp[:, mask].indices
for j in range(nPDEs):
x[mask+j] = 0.0 | [
"def",
"eliminate_local_candidates",
"(",
"x",
",",
"AggOp",
",",
"A",
",",
"T",
",",
"Ca",
"=",
"1.0",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"(",
"isspmatrix_csr",
"(",
"AggOp",
")",
"or",
"isspmatrix_csc",
"(",
"AggOp",
")",
")",
":",
"... | Eliminate canidates locally.
Helper function that determines where to eliminate candidates locally
on a per aggregate basis.
Parameters
---------
x : array
n x 1 vector of new candidate
AggOp : CSR or CSC sparse matrix
Aggregation operator for the level that x was generated for
A : sparse matrix
Operator for the level that x was generated for
T : sparse matrix
Tentative prolongation operator for the level that x was generated for
Ca : scalar
Constant threshold parameter to decide when to drop candidates
Returns
-------
Nothing, x is modified in place | [
"Eliminate",
"canidates",
"locally",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/aggregation/adaptive.py#L30-L105 | train | 209,201 |
pyamg/pyamg | pyamg/gallery/laplacian.py | poisson | def poisson(grid, spacing=None, dtype=float, format=None, type='FD'):
"""Return a sparse matrix for the N-dimensional Poisson problem.
The matrix represents a finite Difference approximation to the
Poisson problem on a regular n-dimensional grid with unit grid
spacing and Dirichlet boundary conditions.
Parameters
----------
grid : tuple of integers
grid dimensions e.g. (100,100)
Notes
-----
The matrix is symmetric and positive definite (SPD).
Examples
--------
>>> from pyamg.gallery import poisson
>>> # 4 nodes in one dimension
>>> poisson( (4,) ).todense()
matrix([[ 2., -1., 0., 0.],
[-1., 2., -1., 0.],
[ 0., -1., 2., -1.],
[ 0., 0., -1., 2.]])
>>> # rectangular two dimensional grid
>>> poisson( (2,3) ).todense()
matrix([[ 4., -1., 0., -1., 0., 0.],
[-1., 4., -1., 0., -1., 0.],
[ 0., -1., 4., 0., 0., -1.],
[-1., 0., 0., 4., -1., 0.],
[ 0., -1., 0., -1., 4., -1.],
[ 0., 0., -1., 0., -1., 4.]])
"""
grid = tuple(grid)
N = len(grid) # grid dimension
if N < 1 or min(grid) < 1:
raise ValueError('invalid grid shape: %s' % str(grid))
# create N-dimension Laplacian stencil
if type == 'FD':
stencil = np.zeros((3,) * N, dtype=dtype)
for i in range(N):
stencil[(1,)*i + (0,) + (1,)*(N-i-1)] = -1
stencil[(1,)*i + (2,) + (1,)*(N-i-1)] = -1
stencil[(1,)*N] = 2*N
if type == 'FE':
stencil = -np.ones((3,) * N, dtype=dtype)
stencil[(1,)*N] = 3**N - 1
return stencil_grid(stencil, grid, format=format) | python | def poisson(grid, spacing=None, dtype=float, format=None, type='FD'):
"""Return a sparse matrix for the N-dimensional Poisson problem.
The matrix represents a finite Difference approximation to the
Poisson problem on a regular n-dimensional grid with unit grid
spacing and Dirichlet boundary conditions.
Parameters
----------
grid : tuple of integers
grid dimensions e.g. (100,100)
Notes
-----
The matrix is symmetric and positive definite (SPD).
Examples
--------
>>> from pyamg.gallery import poisson
>>> # 4 nodes in one dimension
>>> poisson( (4,) ).todense()
matrix([[ 2., -1., 0., 0.],
[-1., 2., -1., 0.],
[ 0., -1., 2., -1.],
[ 0., 0., -1., 2.]])
>>> # rectangular two dimensional grid
>>> poisson( (2,3) ).todense()
matrix([[ 4., -1., 0., -1., 0., 0.],
[-1., 4., -1., 0., -1., 0.],
[ 0., -1., 4., 0., 0., -1.],
[-1., 0., 0., 4., -1., 0.],
[ 0., -1., 0., -1., 4., -1.],
[ 0., 0., -1., 0., -1., 4.]])
"""
grid = tuple(grid)
N = len(grid) # grid dimension
if N < 1 or min(grid) < 1:
raise ValueError('invalid grid shape: %s' % str(grid))
# create N-dimension Laplacian stencil
if type == 'FD':
stencil = np.zeros((3,) * N, dtype=dtype)
for i in range(N):
stencil[(1,)*i + (0,) + (1,)*(N-i-1)] = -1
stencil[(1,)*i + (2,) + (1,)*(N-i-1)] = -1
stencil[(1,)*N] = 2*N
if type == 'FE':
stencil = -np.ones((3,) * N, dtype=dtype)
stencil[(1,)*N] = 3**N - 1
return stencil_grid(stencil, grid, format=format) | [
"def",
"poisson",
"(",
"grid",
",",
"spacing",
"=",
"None",
",",
"dtype",
"=",
"float",
",",
"format",
"=",
"None",
",",
"type",
"=",
"'FD'",
")",
":",
"grid",
"=",
"tuple",
"(",
"grid",
")",
"N",
"=",
"len",
"(",
"grid",
")",
"# grid dimension",
... | Return a sparse matrix for the N-dimensional Poisson problem.
The matrix represents a finite Difference approximation to the
Poisson problem on a regular n-dimensional grid with unit grid
spacing and Dirichlet boundary conditions.
Parameters
----------
grid : tuple of integers
grid dimensions e.g. (100,100)
Notes
-----
The matrix is symmetric and positive definite (SPD).
Examples
--------
>>> from pyamg.gallery import poisson
>>> # 4 nodes in one dimension
>>> poisson( (4,) ).todense()
matrix([[ 2., -1., 0., 0.],
[-1., 2., -1., 0.],
[ 0., -1., 2., -1.],
[ 0., 0., -1., 2.]])
>>> # rectangular two dimensional grid
>>> poisson( (2,3) ).todense()
matrix([[ 4., -1., 0., -1., 0., 0.],
[-1., 4., -1., 0., -1., 0.],
[ 0., -1., 4., 0., 0., -1.],
[-1., 0., 0., 4., -1., 0.],
[ 0., -1., 0., -1., 4., -1.],
[ 0., 0., -1., 0., -1., 4.]]) | [
"Return",
"a",
"sparse",
"matrix",
"for",
"the",
"N",
"-",
"dimensional",
"Poisson",
"problem",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/gallery/laplacian.py#L12-L67 | train | 209,202 |
pyamg/pyamg | pyamg/util/linalg.py | norm | def norm(x, pnorm='2'):
"""2-norm of a vector.
Parameters
----------
x : array_like
Vector of complex or real values
pnorm : string
'2' calculates the 2-norm
'inf' calculates the infinity-norm
Returns
-------
n : float
2-norm of a vector
Notes
-----
- currently 1+ order of magnitude faster than scipy.linalg.norm(x), which
calls sqrt(numpy.sum(real((conjugate(x)*x)),axis=0)) resulting in an
extra copy
- only handles the 2-norm and infinity-norm for vectors
See Also
--------
scipy.linalg.norm : scipy general matrix or vector norm
"""
# TODO check dimensions of x
# TODO speedup complex case
x = np.ravel(x)
if pnorm == '2':
return np.sqrt(np.inner(x.conj(), x).real)
elif pnorm == 'inf':
return np.max(np.abs(x))
else:
raise ValueError('Only the 2-norm and infinity-norm are supported') | python | def norm(x, pnorm='2'):
"""2-norm of a vector.
Parameters
----------
x : array_like
Vector of complex or real values
pnorm : string
'2' calculates the 2-norm
'inf' calculates the infinity-norm
Returns
-------
n : float
2-norm of a vector
Notes
-----
- currently 1+ order of magnitude faster than scipy.linalg.norm(x), which
calls sqrt(numpy.sum(real((conjugate(x)*x)),axis=0)) resulting in an
extra copy
- only handles the 2-norm and infinity-norm for vectors
See Also
--------
scipy.linalg.norm : scipy general matrix or vector norm
"""
# TODO check dimensions of x
# TODO speedup complex case
x = np.ravel(x)
if pnorm == '2':
return np.sqrt(np.inner(x.conj(), x).real)
elif pnorm == 'inf':
return np.max(np.abs(x))
else:
raise ValueError('Only the 2-norm and infinity-norm are supported') | [
"def",
"norm",
"(",
"x",
",",
"pnorm",
"=",
"'2'",
")",
":",
"# TODO check dimensions of x",
"# TODO speedup complex case",
"x",
"=",
"np",
".",
"ravel",
"(",
"x",
")",
"if",
"pnorm",
"==",
"'2'",
":",
"return",
"np",
".",
"sqrt",
"(",
"np",
".",
"inne... | 2-norm of a vector.
Parameters
----------
x : array_like
Vector of complex or real values
pnorm : string
'2' calculates the 2-norm
'inf' calculates the infinity-norm
Returns
-------
n : float
2-norm of a vector
Notes
-----
- currently 1+ order of magnitude faster than scipy.linalg.norm(x), which
calls sqrt(numpy.sum(real((conjugate(x)*x)),axis=0)) resulting in an
extra copy
- only handles the 2-norm and infinity-norm for vectors
See Also
--------
scipy.linalg.norm : scipy general matrix or vector norm | [
"2",
"-",
"norm",
"of",
"a",
"vector",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/linalg.py#L16-L55 | train | 209,203 |
pyamg/pyamg | pyamg/util/linalg.py | approximate_spectral_radius | def approximate_spectral_radius(A, tol=0.01, maxiter=15, restart=5,
symmetric=None, initial_guess=None,
return_vector=False):
"""Approximate the spectral radius of a matrix.
Parameters
----------
A : {dense or sparse matrix}
E.g. csr_matrix, csc_matrix, ndarray, etc.
tol : {scalar}
Relative tolerance of approximation, i.e., the error divided
by the approximate spectral radius is compared to tol.
maxiter : {integer}
Maximum number of iterations to perform
restart : {integer}
Number of restarted Arnoldi processes. For example, a value of 0 will
run Arnoldi once, for maxiter iterations, and a value of 1 will restart
Arnoldi once, using the maximal eigenvector from the first Arnoldi
process as the initial guess.
symmetric : {boolean}
True - if A is symmetric Lanczos iteration is used (more efficient)
False - if A is non-symmetric Arnoldi iteration is used (less efficient)
initial_guess : {array|None}
If n x 1 array, then use as initial guess for Arnoldi/Lanczos.
If None, then use a random initial guess.
return_vector : {boolean}
True - return an approximate dominant eigenvector, in addition to the spectral radius.
False - Do not return the approximate dominant eigenvector
Returns
-------
An approximation to the spectral radius of A, and
if return_vector=True, then also return the approximate dominant
eigenvector
Notes
-----
The spectral radius is approximated by looking at the Ritz eigenvalues.
Arnoldi iteration (or Lanczos) is used to project the matrix A onto a
Krylov subspace: H = Q* A Q. The eigenvalues of H (i.e. the Ritz
eigenvalues) should represent the eigenvalues of A in the sense that the
minimum and maximum values are usually well matched (for the symmetric case
it is true since the eigenvalues are real).
References
----------
.. [1] Z. Bai, J. Demmel, J. Dongarra, A. Ruhe, and H. van der Vorst,
editors. "Templates for the Solution of Algebraic Eigenvalue Problems:
A Practical Guide", SIAM, Philadelphia, 2000.
Examples
--------
>>> from pyamg.util.linalg import approximate_spectral_radius
>>> import numpy as np
>>> from scipy.linalg import eigvals, norm
>>> A = np.array([[1.,0.],[0.,1.]])
>>> print approximate_spectral_radius(A,maxiter=3)
1.0
>>> print max([norm(x) for x in eigvals(A)])
1.0
"""
if not hasattr(A, 'rho') or return_vector:
# somehow more restart causes a nonsymmetric case to fail...look at
# this what about A.dtype=int? convert somehow?
# The use of the restart vector v0 requires that the full Krylov
# subspace V be stored. So, set symmetric to False.
symmetric = False
if maxiter < 1:
raise ValueError('expected maxiter > 0')
if restart < 0:
raise ValueError('expected restart >= 0')
if A.dtype == int:
raise ValueError('expected A to be float (complex or real)')
if A.shape[0] != A.shape[1]:
raise ValueError('expected square A')
if initial_guess is None:
v0 = sp.rand(A.shape[1], 1)
if A.dtype == complex:
v0 = v0 + 1.0j * sp.rand(A.shape[1], 1)
else:
if initial_guess.shape[0] != A.shape[0]:
raise ValueError('initial_guess and A must have same shape')
if (len(initial_guess.shape) > 1) and (initial_guess.shape[1] > 1):
raise ValueError('initial_guess must be an (n,1) or\
(n,) vector')
v0 = initial_guess.reshape(-1, 1)
v0 = np.array(v0, dtype=A.dtype)
for j in range(restart+1):
[evect, ev, H, V, breakdown_flag] =\
_approximate_eigenvalues(A, tol, maxiter,
symmetric, initial_guess=v0)
# Calculate error in dominant eigenvector
nvecs = ev.shape[0]
max_index = np.abs(ev).argmax()
error = H[nvecs, nvecs-1]*evect[-1, max_index]
# error is a fast way of calculating the following line
# error2 = ( A - ev[max_index]*sp.mat(
# sp.eye(A.shape[0],A.shape[1])) )*\
# ( sp.mat(sp.hstack(V[:-1]))*\
# evect[:,max_index].reshape(-1,1) )
# print str(error) + " " + str(sp.linalg.norm(e2))
if (np.abs(error)/np.abs(ev[max_index]) < tol) or\
breakdown_flag:
# halt if below relative tolerance
v0 = np.dot(np.hstack(V[:-1]),
evect[:, max_index].reshape(-1, 1))
break
else:
v0 = np.dot(np.hstack(V[:-1]),
evect[:, max_index].reshape(-1, 1))
# end j-loop
rho = np.abs(ev[max_index])
if sparse.isspmatrix(A):
A.rho = rho
if return_vector:
return (rho, v0)
else:
return rho
else:
return A.rho | python | def approximate_spectral_radius(A, tol=0.01, maxiter=15, restart=5,
symmetric=None, initial_guess=None,
return_vector=False):
"""Approximate the spectral radius of a matrix.
Parameters
----------
A : {dense or sparse matrix}
E.g. csr_matrix, csc_matrix, ndarray, etc.
tol : {scalar}
Relative tolerance of approximation, i.e., the error divided
by the approximate spectral radius is compared to tol.
maxiter : {integer}
Maximum number of iterations to perform
restart : {integer}
Number of restarted Arnoldi processes. For example, a value of 0 will
run Arnoldi once, for maxiter iterations, and a value of 1 will restart
Arnoldi once, using the maximal eigenvector from the first Arnoldi
process as the initial guess.
symmetric : {boolean}
True - if A is symmetric Lanczos iteration is used (more efficient)
False - if A is non-symmetric Arnoldi iteration is used (less efficient)
initial_guess : {array|None}
If n x 1 array, then use as initial guess for Arnoldi/Lanczos.
If None, then use a random initial guess.
return_vector : {boolean}
True - return an approximate dominant eigenvector, in addition to the spectral radius.
False - Do not return the approximate dominant eigenvector
Returns
-------
An approximation to the spectral radius of A, and
if return_vector=True, then also return the approximate dominant
eigenvector
Notes
-----
The spectral radius is approximated by looking at the Ritz eigenvalues.
Arnoldi iteration (or Lanczos) is used to project the matrix A onto a
Krylov subspace: H = Q* A Q. The eigenvalues of H (i.e. the Ritz
eigenvalues) should represent the eigenvalues of A in the sense that the
minimum and maximum values are usually well matched (for the symmetric case
it is true since the eigenvalues are real).
References
----------
.. [1] Z. Bai, J. Demmel, J. Dongarra, A. Ruhe, and H. van der Vorst,
editors. "Templates for the Solution of Algebraic Eigenvalue Problems:
A Practical Guide", SIAM, Philadelphia, 2000.
Examples
--------
>>> from pyamg.util.linalg import approximate_spectral_radius
>>> import numpy as np
>>> from scipy.linalg import eigvals, norm
>>> A = np.array([[1.,0.],[0.,1.]])
>>> print approximate_spectral_radius(A,maxiter=3)
1.0
>>> print max([norm(x) for x in eigvals(A)])
1.0
"""
if not hasattr(A, 'rho') or return_vector:
# somehow more restart causes a nonsymmetric case to fail...look at
# this what about A.dtype=int? convert somehow?
# The use of the restart vector v0 requires that the full Krylov
# subspace V be stored. So, set symmetric to False.
symmetric = False
if maxiter < 1:
raise ValueError('expected maxiter > 0')
if restart < 0:
raise ValueError('expected restart >= 0')
if A.dtype == int:
raise ValueError('expected A to be float (complex or real)')
if A.shape[0] != A.shape[1]:
raise ValueError('expected square A')
if initial_guess is None:
v0 = sp.rand(A.shape[1], 1)
if A.dtype == complex:
v0 = v0 + 1.0j * sp.rand(A.shape[1], 1)
else:
if initial_guess.shape[0] != A.shape[0]:
raise ValueError('initial_guess and A must have same shape')
if (len(initial_guess.shape) > 1) and (initial_guess.shape[1] > 1):
raise ValueError('initial_guess must be an (n,1) or\
(n,) vector')
v0 = initial_guess.reshape(-1, 1)
v0 = np.array(v0, dtype=A.dtype)
for j in range(restart+1):
[evect, ev, H, V, breakdown_flag] =\
_approximate_eigenvalues(A, tol, maxiter,
symmetric, initial_guess=v0)
# Calculate error in dominant eigenvector
nvecs = ev.shape[0]
max_index = np.abs(ev).argmax()
error = H[nvecs, nvecs-1]*evect[-1, max_index]
# error is a fast way of calculating the following line
# error2 = ( A - ev[max_index]*sp.mat(
# sp.eye(A.shape[0],A.shape[1])) )*\
# ( sp.mat(sp.hstack(V[:-1]))*\
# evect[:,max_index].reshape(-1,1) )
# print str(error) + " " + str(sp.linalg.norm(e2))
if (np.abs(error)/np.abs(ev[max_index]) < tol) or\
breakdown_flag:
# halt if below relative tolerance
v0 = np.dot(np.hstack(V[:-1]),
evect[:, max_index].reshape(-1, 1))
break
else:
v0 = np.dot(np.hstack(V[:-1]),
evect[:, max_index].reshape(-1, 1))
# end j-loop
rho = np.abs(ev[max_index])
if sparse.isspmatrix(A):
A.rho = rho
if return_vector:
return (rho, v0)
else:
return rho
else:
return A.rho | [
"def",
"approximate_spectral_radius",
"(",
"A",
",",
"tol",
"=",
"0.01",
",",
"maxiter",
"=",
"15",
",",
"restart",
"=",
"5",
",",
"symmetric",
"=",
"None",
",",
"initial_guess",
"=",
"None",
",",
"return_vector",
"=",
"False",
")",
":",
"if",
"not",
"... | Approximate the spectral radius of a matrix.
Parameters
----------
A : {dense or sparse matrix}
E.g. csr_matrix, csc_matrix, ndarray, etc.
tol : {scalar}
Relative tolerance of approximation, i.e., the error divided
by the approximate spectral radius is compared to tol.
maxiter : {integer}
Maximum number of iterations to perform
restart : {integer}
Number of restarted Arnoldi processes. For example, a value of 0 will
run Arnoldi once, for maxiter iterations, and a value of 1 will restart
Arnoldi once, using the maximal eigenvector from the first Arnoldi
process as the initial guess.
symmetric : {boolean}
True - if A is symmetric Lanczos iteration is used (more efficient)
False - if A is non-symmetric Arnoldi iteration is used (less efficient)
initial_guess : {array|None}
If n x 1 array, then use as initial guess for Arnoldi/Lanczos.
If None, then use a random initial guess.
return_vector : {boolean}
True - return an approximate dominant eigenvector, in addition to the spectral radius.
False - Do not return the approximate dominant eigenvector
Returns
-------
An approximation to the spectral radius of A, and
if return_vector=True, then also return the approximate dominant
eigenvector
Notes
-----
The spectral radius is approximated by looking at the Ritz eigenvalues.
Arnoldi iteration (or Lanczos) is used to project the matrix A onto a
Krylov subspace: H = Q* A Q. The eigenvalues of H (i.e. the Ritz
eigenvalues) should represent the eigenvalues of A in the sense that the
minimum and maximum values are usually well matched (for the symmetric case
it is true since the eigenvalues are real).
References
----------
.. [1] Z. Bai, J. Demmel, J. Dongarra, A. Ruhe, and H. van der Vorst,
editors. "Templates for the Solution of Algebraic Eigenvalue Problems:
A Practical Guide", SIAM, Philadelphia, 2000.
Examples
--------
>>> from pyamg.util.linalg import approximate_spectral_radius
>>> import numpy as np
>>> from scipy.linalg import eigvals, norm
>>> A = np.array([[1.,0.],[0.,1.]])
>>> print approximate_spectral_radius(A,maxiter=3)
1.0
>>> print max([norm(x) for x in eigvals(A)])
1.0 | [
"Approximate",
"the",
"spectral",
"radius",
"of",
"a",
"matrix",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/linalg.py#L278-L407 | train | 209,204 |
pyamg/pyamg | pyamg/util/linalg.py | condest | def condest(A, tol=0.1, maxiter=25, symmetric=False):
r"""Estimates the condition number of A.
Parameters
----------
A : {dense or sparse matrix}
e.g. array, matrix, csr_matrix, ...
tol : {float}
Approximation tolerance, currently not used
maxiter: {int}
Max number of Arnoldi/Lanczos iterations
symmetric : {bool}
If symmetric use the far more efficient Lanczos algorithm,
Else use Arnoldi
Returns
-------
Estimate of cond(A) with \|lambda_max\| / \|lambda_min\|
through the use of Arnoldi or Lanczos iterations, depending on
the symmetric flag
Notes
-----
The condition number measures how large of a change in the
the problems solution is caused by a change in problem's input.
Large condition numbers indicate that small perturbations
and numerical errors are magnified greatly when solving the system.
Examples
--------
>>> import numpy as np
>>> from pyamg.util.linalg import condest
>>> c = condest(np.array([[1.,0.],[0.,2.]]))
>>> print c
2.0
"""
[evect, ev, H, V, breakdown_flag] =\
_approximate_eigenvalues(A, tol, maxiter, symmetric)
return np.max([norm(x) for x in ev])/min([norm(x) for x in ev]) | python | def condest(A, tol=0.1, maxiter=25, symmetric=False):
r"""Estimates the condition number of A.
Parameters
----------
A : {dense or sparse matrix}
e.g. array, matrix, csr_matrix, ...
tol : {float}
Approximation tolerance, currently not used
maxiter: {int}
Max number of Arnoldi/Lanczos iterations
symmetric : {bool}
If symmetric use the far more efficient Lanczos algorithm,
Else use Arnoldi
Returns
-------
Estimate of cond(A) with \|lambda_max\| / \|lambda_min\|
through the use of Arnoldi or Lanczos iterations, depending on
the symmetric flag
Notes
-----
The condition number measures how large of a change in the
the problems solution is caused by a change in problem's input.
Large condition numbers indicate that small perturbations
and numerical errors are magnified greatly when solving the system.
Examples
--------
>>> import numpy as np
>>> from pyamg.util.linalg import condest
>>> c = condest(np.array([[1.,0.],[0.,2.]]))
>>> print c
2.0
"""
[evect, ev, H, V, breakdown_flag] =\
_approximate_eigenvalues(A, tol, maxiter, symmetric)
return np.max([norm(x) for x in ev])/min([norm(x) for x in ev]) | [
"def",
"condest",
"(",
"A",
",",
"tol",
"=",
"0.1",
",",
"maxiter",
"=",
"25",
",",
"symmetric",
"=",
"False",
")",
":",
"[",
"evect",
",",
"ev",
",",
"H",
",",
"V",
",",
"breakdown_flag",
"]",
"=",
"_approximate_eigenvalues",
"(",
"A",
",",
"tol",... | r"""Estimates the condition number of A.
Parameters
----------
A : {dense or sparse matrix}
e.g. array, matrix, csr_matrix, ...
tol : {float}
Approximation tolerance, currently not used
maxiter: {int}
Max number of Arnoldi/Lanczos iterations
symmetric : {bool}
If symmetric use the far more efficient Lanczos algorithm,
Else use Arnoldi
Returns
-------
Estimate of cond(A) with \|lambda_max\| / \|lambda_min\|
through the use of Arnoldi or Lanczos iterations, depending on
the symmetric flag
Notes
-----
The condition number measures how large of a change in the
the problems solution is caused by a change in problem's input.
Large condition numbers indicate that small perturbations
and numerical errors are magnified greatly when solving the system.
Examples
--------
>>> import numpy as np
>>> from pyamg.util.linalg import condest
>>> c = condest(np.array([[1.,0.],[0.,2.]]))
>>> print c
2.0 | [
"r",
"Estimates",
"the",
"condition",
"number",
"of",
"A",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/linalg.py#L410-L450 | train | 209,205 |
pyamg/pyamg | pyamg/util/linalg.py | cond | def cond(A):
"""Return condition number of A.
Parameters
----------
A : {dense or sparse matrix}
e.g. array, matrix, csr_matrix, ...
Returns
-------
2-norm condition number through use of the SVD
Use for small to moderate sized dense matrices.
For large sparse matrices, use condest.
Notes
-----
The condition number measures how large of a change in
the problems solution is caused by a change in problem's input.
Large condition numbers indicate that small perturbations
and numerical errors are magnified greatly when solving the system.
Examples
--------
>>> import numpy as np
>>> from pyamg.util.linalg import condest
>>> c = condest(np.array([[1.0,0.],[0.,2.0]]))
>>> print c
2.0
"""
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix')
if sparse.isspmatrix(A):
A = A.todense()
# 2-Norm Condition Number
from scipy.linalg import svd
U, Sigma, Vh = svd(A)
return np.max(Sigma)/min(Sigma) | python | def cond(A):
"""Return condition number of A.
Parameters
----------
A : {dense or sparse matrix}
e.g. array, matrix, csr_matrix, ...
Returns
-------
2-norm condition number through use of the SVD
Use for small to moderate sized dense matrices.
For large sparse matrices, use condest.
Notes
-----
The condition number measures how large of a change in
the problems solution is caused by a change in problem's input.
Large condition numbers indicate that small perturbations
and numerical errors are magnified greatly when solving the system.
Examples
--------
>>> import numpy as np
>>> from pyamg.util.linalg import condest
>>> c = condest(np.array([[1.0,0.],[0.,2.0]]))
>>> print c
2.0
"""
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix')
if sparse.isspmatrix(A):
A = A.todense()
# 2-Norm Condition Number
from scipy.linalg import svd
U, Sigma, Vh = svd(A)
return np.max(Sigma)/min(Sigma) | [
"def",
"cond",
"(",
"A",
")",
":",
"if",
"A",
".",
"shape",
"[",
"0",
"]",
"!=",
"A",
".",
"shape",
"[",
"1",
"]",
":",
"raise",
"ValueError",
"(",
"'expected square matrix'",
")",
"if",
"sparse",
".",
"isspmatrix",
"(",
"A",
")",
":",
"A",
"=",
... | Return condition number of A.
Parameters
----------
A : {dense or sparse matrix}
e.g. array, matrix, csr_matrix, ...
Returns
-------
2-norm condition number through use of the SVD
Use for small to moderate sized dense matrices.
For large sparse matrices, use condest.
Notes
-----
The condition number measures how large of a change in
the problems solution is caused by a change in problem's input.
Large condition numbers indicate that small perturbations
and numerical errors are magnified greatly when solving the system.
Examples
--------
>>> import numpy as np
>>> from pyamg.util.linalg import condest
>>> c = condest(np.array([[1.0,0.],[0.,2.0]]))
>>> print c
2.0 | [
"Return",
"condition",
"number",
"of",
"A",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/linalg.py#L453-L493 | train | 209,206 |
pyamg/pyamg | pyamg/util/linalg.py | ishermitian | def ishermitian(A, fast_check=True, tol=1e-6, verbose=False):
r"""Return True if A is Hermitian to within tol.
Parameters
----------
A : {dense or sparse matrix}
e.g. array, matrix, csr_matrix, ...
fast_check : {bool}
If True, use the heuristic < Ax, y> = < x, Ay>
for random vectors x and y to check for conjugate symmetry.
If False, compute A - A.H.
tol : {float}
Symmetry tolerance
verbose: {bool}
prints
max( \|A - A.H\| ) if nonhermitian and fast_check=False
abs( <Ax, y> - <x, Ay> ) if nonhermitian and fast_check=False
Returns
-------
True if hermitian
False if nonhermitian
Notes
-----
This function applies a simple test of conjugate symmetry
Examples
--------
>>> import numpy as np
>>> from pyamg.util.linalg import ishermitian
>>> ishermitian(np.array([[1,2],[1,1]]))
False
>>> from pyamg.gallery import poisson
>>> ishermitian(poisson((10,10)))
True
"""
# convert to matrix type
if not sparse.isspmatrix(A):
A = np.asmatrix(A)
if fast_check:
x = sp.rand(A.shape[0], 1)
y = sp.rand(A.shape[0], 1)
if A.dtype == complex:
x = x + 1.0j*sp.rand(A.shape[0], 1)
y = y + 1.0j*sp.rand(A.shape[0], 1)
xAy = np.dot((A*x).conjugate().T, y)
xAty = np.dot(x.conjugate().T, A*y)
diff = float(np.abs(xAy - xAty) / np.sqrt(np.abs(xAy*xAty)))
else:
# compute the difference, A - A.H
if sparse.isspmatrix(A):
diff = np.ravel((A - A.H).data)
else:
diff = np.ravel(A - A.H)
if np.max(diff.shape) == 0:
diff = 0
else:
diff = np.max(np.abs(diff))
if diff < tol:
diff = 0
return True
else:
if verbose:
print(diff)
return False
return diff | python | def ishermitian(A, fast_check=True, tol=1e-6, verbose=False):
r"""Return True if A is Hermitian to within tol.
Parameters
----------
A : {dense or sparse matrix}
e.g. array, matrix, csr_matrix, ...
fast_check : {bool}
If True, use the heuristic < Ax, y> = < x, Ay>
for random vectors x and y to check for conjugate symmetry.
If False, compute A - A.H.
tol : {float}
Symmetry tolerance
verbose: {bool}
prints
max( \|A - A.H\| ) if nonhermitian and fast_check=False
abs( <Ax, y> - <x, Ay> ) if nonhermitian and fast_check=False
Returns
-------
True if hermitian
False if nonhermitian
Notes
-----
This function applies a simple test of conjugate symmetry
Examples
--------
>>> import numpy as np
>>> from pyamg.util.linalg import ishermitian
>>> ishermitian(np.array([[1,2],[1,1]]))
False
>>> from pyamg.gallery import poisson
>>> ishermitian(poisson((10,10)))
True
"""
# convert to matrix type
if not sparse.isspmatrix(A):
A = np.asmatrix(A)
if fast_check:
x = sp.rand(A.shape[0], 1)
y = sp.rand(A.shape[0], 1)
if A.dtype == complex:
x = x + 1.0j*sp.rand(A.shape[0], 1)
y = y + 1.0j*sp.rand(A.shape[0], 1)
xAy = np.dot((A*x).conjugate().T, y)
xAty = np.dot(x.conjugate().T, A*y)
diff = float(np.abs(xAy - xAty) / np.sqrt(np.abs(xAy*xAty)))
else:
# compute the difference, A - A.H
if sparse.isspmatrix(A):
diff = np.ravel((A - A.H).data)
else:
diff = np.ravel(A - A.H)
if np.max(diff.shape) == 0:
diff = 0
else:
diff = np.max(np.abs(diff))
if diff < tol:
diff = 0
return True
else:
if verbose:
print(diff)
return False
return diff | [
"def",
"ishermitian",
"(",
"A",
",",
"fast_check",
"=",
"True",
",",
"tol",
"=",
"1e-6",
",",
"verbose",
"=",
"False",
")",
":",
"# convert to matrix type",
"if",
"not",
"sparse",
".",
"isspmatrix",
"(",
"A",
")",
":",
"A",
"=",
"np",
".",
"asmatrix",
... | r"""Return True if A is Hermitian to within tol.
Parameters
----------
A : {dense or sparse matrix}
e.g. array, matrix, csr_matrix, ...
fast_check : {bool}
If True, use the heuristic < Ax, y> = < x, Ay>
for random vectors x and y to check for conjugate symmetry.
If False, compute A - A.H.
tol : {float}
Symmetry tolerance
verbose: {bool}
prints
max( \|A - A.H\| ) if nonhermitian and fast_check=False
abs( <Ax, y> - <x, Ay> ) if nonhermitian and fast_check=False
Returns
-------
True if hermitian
False if nonhermitian
Notes
-----
This function applies a simple test of conjugate symmetry
Examples
--------
>>> import numpy as np
>>> from pyamg.util.linalg import ishermitian
>>> ishermitian(np.array([[1,2],[1,1]]))
False
>>> from pyamg.gallery import poisson
>>> ishermitian(poisson((10,10)))
True | [
"r",
"Return",
"True",
"if",
"A",
"is",
"Hermitian",
"to",
"within",
"tol",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/linalg.py#L496-L570 | train | 209,207 |
pyamg/pyamg | pyamg/util/linalg.py | pinv_array | def pinv_array(a, cond=None):
"""Calculate the Moore-Penrose pseudo inverse of each block of the three dimensional array a.
Parameters
----------
a : {dense array}
Is of size (n, m, m)
cond : {float}
Used by gelss to filter numerically zeros singular values.
If None, a suitable value is chosen for you.
Returns
-------
Nothing, a is modified in place so that a[k] holds the pseudoinverse
of that block.
Notes
-----
By using lapack wrappers, this can be much faster for large n, than
directly calling pinv2
Examples
--------
>>> import numpy as np
>>> from pyamg.util.linalg import pinv_array
>>> a = np.array([[[1.,2.],[1.,1.]], [[1.,1.],[3.,3.]]])
>>> ac = a.copy()
>>> # each block of a is inverted in-place
>>> pinv_array(a)
"""
n = a.shape[0]
m = a.shape[1]
if m == 1:
# Pseudo-inverse of 1 x 1 matrices is trivial
zero_entries = (a == 0.0).nonzero()[0]
a[zero_entries] = 1.0
a[:] = 1.0/a
a[zero_entries] = 0.0
del zero_entries
else:
# The block size is greater than 1
# Create necessary arrays and function pointers for calculating pinv
gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
(np.ones((1,), dtype=a.dtype)))
RHS = np.eye(m, dtype=a.dtype)
lwork = _compute_lwork(gelss_lwork, m, m, m)
# Choose tolerance for which singular values are zero in *gelss below
if cond is None:
t = a.dtype.char
eps = np.finfo(np.float).eps
feps = np.finfo(np.single).eps
geps = np.finfo(np.longfloat).eps
_array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2}
cond = {0: feps*1e3, 1: eps*1e6, 2: geps*1e6}[_array_precision[t]]
# Invert each block of a
for kk in range(n):
gelssoutput = gelss(a[kk], RHS, cond=cond, lwork=lwork,
overwrite_a=True, overwrite_b=False)
a[kk] = gelssoutput[1] | python | def pinv_array(a, cond=None):
"""Calculate the Moore-Penrose pseudo inverse of each block of the three dimensional array a.
Parameters
----------
a : {dense array}
Is of size (n, m, m)
cond : {float}
Used by gelss to filter numerically zeros singular values.
If None, a suitable value is chosen for you.
Returns
-------
Nothing, a is modified in place so that a[k] holds the pseudoinverse
of that block.
Notes
-----
By using lapack wrappers, this can be much faster for large n, than
directly calling pinv2
Examples
--------
>>> import numpy as np
>>> from pyamg.util.linalg import pinv_array
>>> a = np.array([[[1.,2.],[1.,1.]], [[1.,1.],[3.,3.]]])
>>> ac = a.copy()
>>> # each block of a is inverted in-place
>>> pinv_array(a)
"""
n = a.shape[0]
m = a.shape[1]
if m == 1:
# Pseudo-inverse of 1 x 1 matrices is trivial
zero_entries = (a == 0.0).nonzero()[0]
a[zero_entries] = 1.0
a[:] = 1.0/a
a[zero_entries] = 0.0
del zero_entries
else:
# The block size is greater than 1
# Create necessary arrays and function pointers for calculating pinv
gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
(np.ones((1,), dtype=a.dtype)))
RHS = np.eye(m, dtype=a.dtype)
lwork = _compute_lwork(gelss_lwork, m, m, m)
# Choose tolerance for which singular values are zero in *gelss below
if cond is None:
t = a.dtype.char
eps = np.finfo(np.float).eps
feps = np.finfo(np.single).eps
geps = np.finfo(np.longfloat).eps
_array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2}
cond = {0: feps*1e3, 1: eps*1e6, 2: geps*1e6}[_array_precision[t]]
# Invert each block of a
for kk in range(n):
gelssoutput = gelss(a[kk], RHS, cond=cond, lwork=lwork,
overwrite_a=True, overwrite_b=False)
a[kk] = gelssoutput[1] | [
"def",
"pinv_array",
"(",
"a",
",",
"cond",
"=",
"None",
")",
":",
"n",
"=",
"a",
".",
"shape",
"[",
"0",
"]",
"m",
"=",
"a",
".",
"shape",
"[",
"1",
"]",
"if",
"m",
"==",
"1",
":",
"# Pseudo-inverse of 1 x 1 matrices is trivial",
"zero_entries",
"="... | Calculate the Moore-Penrose pseudo inverse of each block of the three dimensional array a.
Parameters
----------
a : {dense array}
Is of size (n, m, m)
cond : {float}
Used by gelss to filter numerically zeros singular values.
If None, a suitable value is chosen for you.
Returns
-------
Nothing, a is modified in place so that a[k] holds the pseudoinverse
of that block.
Notes
-----
By using lapack wrappers, this can be much faster for large n, than
directly calling pinv2
Examples
--------
>>> import numpy as np
>>> from pyamg.util.linalg import pinv_array
>>> a = np.array([[[1.,2.],[1.,1.]], [[1.,1.],[3.,3.]]])
>>> ac = a.copy()
>>> # each block of a is inverted in-place
>>> pinv_array(a) | [
"Calculate",
"the",
"Moore",
"-",
"Penrose",
"pseudo",
"inverse",
"of",
"each",
"block",
"of",
"the",
"three",
"dimensional",
"array",
"a",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/linalg.py#L573-L637 | train | 209,208 |
pyamg/pyamg | pyamg/strength.py | distance_strength_of_connection | def distance_strength_of_connection(A, V, theta=2.0, relative_drop=True):
"""Distance based strength-of-connection.
Parameters
----------
A : csr_matrix or bsr_matrix
Square, sparse matrix in CSR or BSR format
V : array
Coordinates of the vertices of the graph of A
relative_drop : bool
If false, then a connection must be within a distance of theta
from a point to be strongly connected.
If true, then the closest connection is always strong, and other points
must be within theta times the smallest distance to be strong
Returns
-------
C : csr_matrix
C(i,j) = distance(point_i, point_j)
Strength of connection matrix where strength values are
distances, i.e. the smaller the value, the stronger the connection.
Sparsity pattern of C is copied from A.
Notes
-----
- theta is a drop tolerance that is applied row-wise
- If a BSR matrix given, then the return matrix is still CSR. The strength
is given between super nodes based on the BSR block size.
Examples
--------
>>> from pyamg.gallery import load_example
>>> from pyamg.strength import distance_strength_of_connection
>>> data = load_example('airfoil')
>>> A = data['A'].tocsr()
>>> S = distance_strength_of_connection(data['A'], data['vertices'])
"""
# Amalgamate for the supernode case
if sparse.isspmatrix_bsr(A):
sn = int(A.shape[0] / A.blocksize[0])
u = np.ones((A.data.shape[0],))
A = sparse.csr_matrix((u, A.indices, A.indptr), shape=(sn, sn))
if not sparse.isspmatrix_csr(A):
warn("Implicit conversion of A to csr", sparse.SparseEfficiencyWarning)
A = sparse.csr_matrix(A)
dim = V.shape[1]
# Create two arrays for differencing the different coordinates such
# that C(i,j) = distance(point_i, point_j)
cols = A.indices
rows = np.repeat(np.arange(A.shape[0]), A.indptr[1:] - A.indptr[0:-1])
# Insert difference for each coordinate into C
C = (V[rows, 0] - V[cols, 0])**2
for d in range(1, dim):
C += (V[rows, d] - V[cols, d])**2
C = np.sqrt(C)
C[C < 1e-6] = 1e-6
C = sparse.csr_matrix((C, A.indices.copy(), A.indptr.copy()),
shape=A.shape)
# Apply drop tolerance
if relative_drop is True:
if theta != np.inf:
amg_core.apply_distance_filter(C.shape[0], theta, C.indptr,
C.indices, C.data)
else:
amg_core.apply_absolute_distance_filter(C.shape[0], theta, C.indptr,
C.indices, C.data)
C.eliminate_zeros()
C = C + sparse.eye(C.shape[0], C.shape[1], format='csr')
# Standardized strength values require small values be weak and large
# values be strong. So, we invert the distances.
C.data = 1.0 / C.data
# Scale C by the largest magnitude entry in each row
C = scale_rows_by_largest_entry(C)
return C | python | def distance_strength_of_connection(A, V, theta=2.0, relative_drop=True):
"""Distance based strength-of-connection.
Parameters
----------
A : csr_matrix or bsr_matrix
Square, sparse matrix in CSR or BSR format
V : array
Coordinates of the vertices of the graph of A
relative_drop : bool
If false, then a connection must be within a distance of theta
from a point to be strongly connected.
If true, then the closest connection is always strong, and other points
must be within theta times the smallest distance to be strong
Returns
-------
C : csr_matrix
C(i,j) = distance(point_i, point_j)
Strength of connection matrix where strength values are
distances, i.e. the smaller the value, the stronger the connection.
Sparsity pattern of C is copied from A.
Notes
-----
- theta is a drop tolerance that is applied row-wise
- If a BSR matrix given, then the return matrix is still CSR. The strength
is given between super nodes based on the BSR block size.
Examples
--------
>>> from pyamg.gallery import load_example
>>> from pyamg.strength import distance_strength_of_connection
>>> data = load_example('airfoil')
>>> A = data['A'].tocsr()
>>> S = distance_strength_of_connection(data['A'], data['vertices'])
"""
# Amalgamate for the supernode case
if sparse.isspmatrix_bsr(A):
sn = int(A.shape[0] / A.blocksize[0])
u = np.ones((A.data.shape[0],))
A = sparse.csr_matrix((u, A.indices, A.indptr), shape=(sn, sn))
if not sparse.isspmatrix_csr(A):
warn("Implicit conversion of A to csr", sparse.SparseEfficiencyWarning)
A = sparse.csr_matrix(A)
dim = V.shape[1]
# Create two arrays for differencing the different coordinates such
# that C(i,j) = distance(point_i, point_j)
cols = A.indices
rows = np.repeat(np.arange(A.shape[0]), A.indptr[1:] - A.indptr[0:-1])
# Insert difference for each coordinate into C
C = (V[rows, 0] - V[cols, 0])**2
for d in range(1, dim):
C += (V[rows, d] - V[cols, d])**2
C = np.sqrt(C)
C[C < 1e-6] = 1e-6
C = sparse.csr_matrix((C, A.indices.copy(), A.indptr.copy()),
shape=A.shape)
# Apply drop tolerance
if relative_drop is True:
if theta != np.inf:
amg_core.apply_distance_filter(C.shape[0], theta, C.indptr,
C.indices, C.data)
else:
amg_core.apply_absolute_distance_filter(C.shape[0], theta, C.indptr,
C.indices, C.data)
C.eliminate_zeros()
C = C + sparse.eye(C.shape[0], C.shape[1], format='csr')
# Standardized strength values require small values be weak and large
# values be strong. So, we invert the distances.
C.data = 1.0 / C.data
# Scale C by the largest magnitude entry in each row
C = scale_rows_by_largest_entry(C)
return C | [
"def",
"distance_strength_of_connection",
"(",
"A",
",",
"V",
",",
"theta",
"=",
"2.0",
",",
"relative_drop",
"=",
"True",
")",
":",
"# Amalgamate for the supernode case",
"if",
"sparse",
".",
"isspmatrix_bsr",
"(",
"A",
")",
":",
"sn",
"=",
"int",
"(",
"A",... | Distance based strength-of-connection.
Parameters
----------
A : csr_matrix or bsr_matrix
Square, sparse matrix in CSR or BSR format
V : array
Coordinates of the vertices of the graph of A
relative_drop : bool
If false, then a connection must be within a distance of theta
from a point to be strongly connected.
If true, then the closest connection is always strong, and other points
must be within theta times the smallest distance to be strong
Returns
-------
C : csr_matrix
C(i,j) = distance(point_i, point_j)
Strength of connection matrix where strength values are
distances, i.e. the smaller the value, the stronger the connection.
Sparsity pattern of C is copied from A.
Notes
-----
- theta is a drop tolerance that is applied row-wise
- If a BSR matrix given, then the return matrix is still CSR. The strength
is given between super nodes based on the BSR block size.
Examples
--------
>>> from pyamg.gallery import load_example
>>> from pyamg.strength import distance_strength_of_connection
>>> data = load_example('airfoil')
>>> A = data['A'].tocsr()
>>> S = distance_strength_of_connection(data['A'], data['vertices']) | [
"Distance",
"based",
"strength",
"-",
"of",
"-",
"connection",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/strength.py#L32-L116 | train | 209,209 |
pyamg/pyamg | pyamg/strength.py | classical_strength_of_connection | def classical_strength_of_connection(A, theta=0.0, norm='abs'):
"""Classical Strength Measure.
Return a strength of connection matrix using the classical AMG measure
An off-diagonal entry A[i,j] is a strong connection iff::
A[i,j] >= theta * max(|A[i,k]|), where k != i (norm='abs')
-A[i,j] >= theta * max(-A[i,k]), where k != i (norm='min')
Parameters
----------
A : csr_matrix or bsr_matrix
Square, sparse matrix in CSR or BSR format
theta : float
Threshold parameter in [0,1].
norm: 'string'
'abs' : to use the absolute value,
'min' : to use the negative value (see above)
Returns
-------
S : csr_matrix
Matrix graph defining strong connections. S[i,j]=1 if vertex i
is strongly influenced by vertex j.
See Also
--------
symmetric_strength_of_connection : symmetric measure used in SA
evolution_strength_of_connection : relaxation based strength measure
Notes
-----
- A symmetric A does not necessarily yield a symmetric strength matrix S
- Calls C++ function classical_strength_of_connection
- The version as implemented is designed form M-matrices. Trottenberg et
al. use max A[i,k] over all negative entries, which is the same. A
positive edge weight never indicates a strong connection.
- See [2000BrHeMc]_ and [2001bTrOoSc]_
References
----------
.. [2000BrHeMc] Briggs, W. L., Henson, V. E., McCormick, S. F., "A multigrid
tutorial", Second edition. Society for Industrial and Applied
Mathematics (SIAM), Philadelphia, PA, 2000. xii+193 pp.
.. [2001bTrOoSc] Trottenberg, U., Oosterlee, C. W., Schuller, A., "Multigrid",
Academic Press, Inc., San Diego, CA, 2001. xvi+631 pp.
Examples
--------
>>> import numpy as np
>>> from pyamg.gallery import stencil_grid
>>> from pyamg.strength import classical_strength_of_connection
>>> n=3
>>> stencil = np.array([[-1.0,-1.0,-1.0],
... [-1.0, 8.0,-1.0],
... [-1.0,-1.0,-1.0]])
>>> A = stencil_grid(stencil, (n,n), format='csr')
>>> S = classical_strength_of_connection(A, 0.0)
"""
if sparse.isspmatrix_bsr(A):
blocksize = A.blocksize[0]
else:
blocksize = 1
if not sparse.isspmatrix_csr(A):
warn("Implicit conversion of A to csr", sparse.SparseEfficiencyWarning)
A = sparse.csr_matrix(A)
if (theta < 0 or theta > 1):
raise ValueError('expected theta in [0,1]')
Sp = np.empty_like(A.indptr)
Sj = np.empty_like(A.indices)
Sx = np.empty_like(A.data)
if norm == 'abs':
amg_core.classical_strength_of_connection_abs(
A.shape[0], theta, A.indptr, A.indices, A.data, Sp, Sj, Sx)
elif norm == 'min':
amg_core.classical_strength_of_connection_min(
A.shape[0], theta, A.indptr, A.indices, A.data, Sp, Sj, Sx)
else:
raise ValueError('Unknown norm')
S = sparse.csr_matrix((Sx, Sj, Sp), shape=A.shape)
if blocksize > 1:
S = amalgamate(S, blocksize)
# Strength represents "distance", so take the magnitude
S.data = np.abs(S.data)
# Scale S by the largest magnitude entry in each row
S = scale_rows_by_largest_entry(S)
return S | python | def classical_strength_of_connection(A, theta=0.0, norm='abs'):
"""Classical Strength Measure.
Return a strength of connection matrix using the classical AMG measure
An off-diagonal entry A[i,j] is a strong connection iff::
A[i,j] >= theta * max(|A[i,k]|), where k != i (norm='abs')
-A[i,j] >= theta * max(-A[i,k]), where k != i (norm='min')
Parameters
----------
A : csr_matrix or bsr_matrix
Square, sparse matrix in CSR or BSR format
theta : float
Threshold parameter in [0,1].
norm: 'string'
'abs' : to use the absolute value,
'min' : to use the negative value (see above)
Returns
-------
S : csr_matrix
Matrix graph defining strong connections. S[i,j]=1 if vertex i
is strongly influenced by vertex j.
See Also
--------
symmetric_strength_of_connection : symmetric measure used in SA
evolution_strength_of_connection : relaxation based strength measure
Notes
-----
- A symmetric A does not necessarily yield a symmetric strength matrix S
- Calls C++ function classical_strength_of_connection
- The version as implemented is designed form M-matrices. Trottenberg et
al. use max A[i,k] over all negative entries, which is the same. A
positive edge weight never indicates a strong connection.
- See [2000BrHeMc]_ and [2001bTrOoSc]_
References
----------
.. [2000BrHeMc] Briggs, W. L., Henson, V. E., McCormick, S. F., "A multigrid
tutorial", Second edition. Society for Industrial and Applied
Mathematics (SIAM), Philadelphia, PA, 2000. xii+193 pp.
.. [2001bTrOoSc] Trottenberg, U., Oosterlee, C. W., Schuller, A., "Multigrid",
Academic Press, Inc., San Diego, CA, 2001. xvi+631 pp.
Examples
--------
>>> import numpy as np
>>> from pyamg.gallery import stencil_grid
>>> from pyamg.strength import classical_strength_of_connection
>>> n=3
>>> stencil = np.array([[-1.0,-1.0,-1.0],
... [-1.0, 8.0,-1.0],
... [-1.0,-1.0,-1.0]])
>>> A = stencil_grid(stencil, (n,n), format='csr')
>>> S = classical_strength_of_connection(A, 0.0)
"""
if sparse.isspmatrix_bsr(A):
blocksize = A.blocksize[0]
else:
blocksize = 1
if not sparse.isspmatrix_csr(A):
warn("Implicit conversion of A to csr", sparse.SparseEfficiencyWarning)
A = sparse.csr_matrix(A)
if (theta < 0 or theta > 1):
raise ValueError('expected theta in [0,1]')
Sp = np.empty_like(A.indptr)
Sj = np.empty_like(A.indices)
Sx = np.empty_like(A.data)
if norm == 'abs':
amg_core.classical_strength_of_connection_abs(
A.shape[0], theta, A.indptr, A.indices, A.data, Sp, Sj, Sx)
elif norm == 'min':
amg_core.classical_strength_of_connection_min(
A.shape[0], theta, A.indptr, A.indices, A.data, Sp, Sj, Sx)
else:
raise ValueError('Unknown norm')
S = sparse.csr_matrix((Sx, Sj, Sp), shape=A.shape)
if blocksize > 1:
S = amalgamate(S, blocksize)
# Strength represents "distance", so take the magnitude
S.data = np.abs(S.data)
# Scale S by the largest magnitude entry in each row
S = scale_rows_by_largest_entry(S)
return S | [
"def",
"classical_strength_of_connection",
"(",
"A",
",",
"theta",
"=",
"0.0",
",",
"norm",
"=",
"'abs'",
")",
":",
"if",
"sparse",
".",
"isspmatrix_bsr",
"(",
"A",
")",
":",
"blocksize",
"=",
"A",
".",
"blocksize",
"[",
"0",
"]",
"else",
":",
"blocksi... | Classical Strength Measure.
Return a strength of connection matrix using the classical AMG measure
An off-diagonal entry A[i,j] is a strong connection iff::
A[i,j] >= theta * max(|A[i,k]|), where k != i (norm='abs')
-A[i,j] >= theta * max(-A[i,k]), where k != i (norm='min')
Parameters
----------
A : csr_matrix or bsr_matrix
Square, sparse matrix in CSR or BSR format
theta : float
Threshold parameter in [0,1].
norm: 'string'
'abs' : to use the absolute value,
'min' : to use the negative value (see above)
Returns
-------
S : csr_matrix
Matrix graph defining strong connections. S[i,j]=1 if vertex i
is strongly influenced by vertex j.
See Also
--------
symmetric_strength_of_connection : symmetric measure used in SA
evolution_strength_of_connection : relaxation based strength measure
Notes
-----
- A symmetric A does not necessarily yield a symmetric strength matrix S
- Calls C++ function classical_strength_of_connection
- The version as implemented is designed form M-matrices. Trottenberg et
al. use max A[i,k] over all negative entries, which is the same. A
positive edge weight never indicates a strong connection.
- See [2000BrHeMc]_ and [2001bTrOoSc]_
References
----------
.. [2000BrHeMc] Briggs, W. L., Henson, V. E., McCormick, S. F., "A multigrid
tutorial", Second edition. Society for Industrial and Applied
Mathematics (SIAM), Philadelphia, PA, 2000. xii+193 pp.
.. [2001bTrOoSc] Trottenberg, U., Oosterlee, C. W., Schuller, A., "Multigrid",
Academic Press, Inc., San Diego, CA, 2001. xvi+631 pp.
Examples
--------
>>> import numpy as np
>>> from pyamg.gallery import stencil_grid
>>> from pyamg.strength import classical_strength_of_connection
>>> n=3
>>> stencil = np.array([[-1.0,-1.0,-1.0],
... [-1.0, 8.0,-1.0],
... [-1.0,-1.0,-1.0]])
>>> A = stencil_grid(stencil, (n,n), format='csr')
>>> S = classical_strength_of_connection(A, 0.0) | [
"Classical",
"Strength",
"Measure",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/strength.py#L119-L216 | train | 209,210 |
pyamg/pyamg | pyamg/strength.py | symmetric_strength_of_connection | def symmetric_strength_of_connection(A, theta=0):
"""Symmetric Strength Measure.
Compute strength of connection matrix using the standard symmetric measure
An off-diagonal connection A[i,j] is strong iff::
abs(A[i,j]) >= theta * sqrt( abs(A[i,i]) * abs(A[j,j]) )
Parameters
----------
A : csr_matrix
Matrix graph defined in sparse format. Entry A[i,j] describes the
strength of edge [i,j]
theta : float
Threshold parameter (positive).
Returns
-------
S : csr_matrix
Matrix graph defining strong connections. S[i,j]=1 if vertex i
is strongly influenced by vertex j.
See Also
--------
symmetric_strength_of_connection : symmetric measure used in SA
evolution_strength_of_connection : relaxation based strength measure
Notes
-----
- For vector problems, standard strength measures may produce
undesirable aggregates. A "block approach" from Vanek et al. is used
to replace vertex comparisons with block-type comparisons. A
connection between nodes i and j in the block case is strong if::
||AB[i,j]|| >= theta * sqrt( ||AB[i,i]||*||AB[j,j]|| ) where AB[k,l]
is the matrix block (degrees of freedom) associated with nodes k and
l and ||.|| is a matrix norm, such a Frobenius.
- See [1996bVaMaBr]_ for more details.
References
----------
.. [1996bVaMaBr] Vanek, P. and Mandel, J. and Brezina, M.,
"Algebraic Multigrid by Smoothed Aggregation for
Second and Fourth Order Elliptic Problems",
Computing, vol. 56, no. 3, pp. 179--196, 1996.
http://citeseer.ist.psu.edu/vanek96algebraic.html
Examples
--------
>>> import numpy as np
>>> from pyamg.gallery import stencil_grid
>>> from pyamg.strength import symmetric_strength_of_connection
>>> n=3
>>> stencil = np.array([[-1.0,-1.0,-1.0],
... [-1.0, 8.0,-1.0],
... [-1.0,-1.0,-1.0]])
>>> A = stencil_grid(stencil, (n,n), format='csr')
>>> S = symmetric_strength_of_connection(A, 0.0)
"""
if theta < 0:
raise ValueError('expected a positive theta')
if sparse.isspmatrix_csr(A):
# if theta == 0:
# return A
Sp = np.empty_like(A.indptr)
Sj = np.empty_like(A.indices)
Sx = np.empty_like(A.data)
fn = amg_core.symmetric_strength_of_connection
fn(A.shape[0], theta, A.indptr, A.indices, A.data, Sp, Sj, Sx)
S = sparse.csr_matrix((Sx, Sj, Sp), shape=A.shape)
elif sparse.isspmatrix_bsr(A):
M, N = A.shape
R, C = A.blocksize
if R != C:
raise ValueError('matrix must have square blocks')
if theta == 0:
data = np.ones(len(A.indices), dtype=A.dtype)
S = sparse.csr_matrix((data, A.indices.copy(), A.indptr.copy()),
shape=(int(M / R), int(N / C)))
else:
# the strength of connection matrix is based on the
# Frobenius norms of the blocks
data = (np.conjugate(A.data) * A.data).reshape(-1, R * C)
data = data.sum(axis=1)
A = sparse.csr_matrix((data, A.indices, A.indptr),
shape=(int(M / R), int(N / C)))
return symmetric_strength_of_connection(A, theta)
else:
raise TypeError('expected csr_matrix or bsr_matrix')
# Strength represents "distance", so take the magnitude
S.data = np.abs(S.data)
# Scale S by the largest magnitude entry in each row
S = scale_rows_by_largest_entry(S)
return S | python | def symmetric_strength_of_connection(A, theta=0):
"""Symmetric Strength Measure.
Compute strength of connection matrix using the standard symmetric measure
An off-diagonal connection A[i,j] is strong iff::
abs(A[i,j]) >= theta * sqrt( abs(A[i,i]) * abs(A[j,j]) )
Parameters
----------
A : csr_matrix
Matrix graph defined in sparse format. Entry A[i,j] describes the
strength of edge [i,j]
theta : float
Threshold parameter (positive).
Returns
-------
S : csr_matrix
Matrix graph defining strong connections. S[i,j]=1 if vertex i
is strongly influenced by vertex j.
See Also
--------
symmetric_strength_of_connection : symmetric measure used in SA
evolution_strength_of_connection : relaxation based strength measure
Notes
-----
- For vector problems, standard strength measures may produce
undesirable aggregates. A "block approach" from Vanek et al. is used
to replace vertex comparisons with block-type comparisons. A
connection between nodes i and j in the block case is strong if::
||AB[i,j]|| >= theta * sqrt( ||AB[i,i]||*||AB[j,j]|| ) where AB[k,l]
is the matrix block (degrees of freedom) associated with nodes k and
l and ||.|| is a matrix norm, such a Frobenius.
- See [1996bVaMaBr]_ for more details.
References
----------
.. [1996bVaMaBr] Vanek, P. and Mandel, J. and Brezina, M.,
"Algebraic Multigrid by Smoothed Aggregation for
Second and Fourth Order Elliptic Problems",
Computing, vol. 56, no. 3, pp. 179--196, 1996.
http://citeseer.ist.psu.edu/vanek96algebraic.html
Examples
--------
>>> import numpy as np
>>> from pyamg.gallery import stencil_grid
>>> from pyamg.strength import symmetric_strength_of_connection
>>> n=3
>>> stencil = np.array([[-1.0,-1.0,-1.0],
... [-1.0, 8.0,-1.0],
... [-1.0,-1.0,-1.0]])
>>> A = stencil_grid(stencil, (n,n), format='csr')
>>> S = symmetric_strength_of_connection(A, 0.0)
"""
if theta < 0:
raise ValueError('expected a positive theta')
if sparse.isspmatrix_csr(A):
# if theta == 0:
# return A
Sp = np.empty_like(A.indptr)
Sj = np.empty_like(A.indices)
Sx = np.empty_like(A.data)
fn = amg_core.symmetric_strength_of_connection
fn(A.shape[0], theta, A.indptr, A.indices, A.data, Sp, Sj, Sx)
S = sparse.csr_matrix((Sx, Sj, Sp), shape=A.shape)
elif sparse.isspmatrix_bsr(A):
M, N = A.shape
R, C = A.blocksize
if R != C:
raise ValueError('matrix must have square blocks')
if theta == 0:
data = np.ones(len(A.indices), dtype=A.dtype)
S = sparse.csr_matrix((data, A.indices.copy(), A.indptr.copy()),
shape=(int(M / R), int(N / C)))
else:
# the strength of connection matrix is based on the
# Frobenius norms of the blocks
data = (np.conjugate(A.data) * A.data).reshape(-1, R * C)
data = data.sum(axis=1)
A = sparse.csr_matrix((data, A.indices, A.indptr),
shape=(int(M / R), int(N / C)))
return symmetric_strength_of_connection(A, theta)
else:
raise TypeError('expected csr_matrix or bsr_matrix')
# Strength represents "distance", so take the magnitude
S.data = np.abs(S.data)
# Scale S by the largest magnitude entry in each row
S = scale_rows_by_largest_entry(S)
return S | [
"def",
"symmetric_strength_of_connection",
"(",
"A",
",",
"theta",
"=",
"0",
")",
":",
"if",
"theta",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"'expected a positive theta'",
")",
"if",
"sparse",
".",
"isspmatrix_csr",
"(",
"A",
")",
":",
"# if theta == 0:",
... | Symmetric Strength Measure.
Compute strength of connection matrix using the standard symmetric measure
An off-diagonal connection A[i,j] is strong iff::
abs(A[i,j]) >= theta * sqrt( abs(A[i,i]) * abs(A[j,j]) )
Parameters
----------
A : csr_matrix
Matrix graph defined in sparse format. Entry A[i,j] describes the
strength of edge [i,j]
theta : float
Threshold parameter (positive).
Returns
-------
S : csr_matrix
Matrix graph defining strong connections. S[i,j]=1 if vertex i
is strongly influenced by vertex j.
See Also
--------
symmetric_strength_of_connection : symmetric measure used in SA
evolution_strength_of_connection : relaxation based strength measure
Notes
-----
- For vector problems, standard strength measures may produce
undesirable aggregates. A "block approach" from Vanek et al. is used
to replace vertex comparisons with block-type comparisons. A
connection between nodes i and j in the block case is strong if::
||AB[i,j]|| >= theta * sqrt( ||AB[i,i]||*||AB[j,j]|| ) where AB[k,l]
is the matrix block (degrees of freedom) associated with nodes k and
l and ||.|| is a matrix norm, such a Frobenius.
- See [1996bVaMaBr]_ for more details.
References
----------
.. [1996bVaMaBr] Vanek, P. and Mandel, J. and Brezina, M.,
"Algebraic Multigrid by Smoothed Aggregation for
Second and Fourth Order Elliptic Problems",
Computing, vol. 56, no. 3, pp. 179--196, 1996.
http://citeseer.ist.psu.edu/vanek96algebraic.html
Examples
--------
>>> import numpy as np
>>> from pyamg.gallery import stencil_grid
>>> from pyamg.strength import symmetric_strength_of_connection
>>> n=3
>>> stencil = np.array([[-1.0,-1.0,-1.0],
... [-1.0, 8.0,-1.0],
... [-1.0,-1.0,-1.0]])
>>> A = stencil_grid(stencil, (n,n), format='csr')
>>> S = symmetric_strength_of_connection(A, 0.0) | [
"Symmetric",
"Strength",
"Measure",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/strength.py#L219-L326 | train | 209,211 |
pyamg/pyamg | pyamg/strength.py | relaxation_vectors | def relaxation_vectors(A, R, k, alpha):
"""Generate test vectors by relaxing on Ax=0 for some random vectors x.
Parameters
----------
A : csr_matrix
Sparse NxN matrix
alpha : scalar
Weight for Jacobi
R : integer
Number of random vectors
k : integer
Number of relaxation passes
Returns
-------
x : array
Dense array N x k array of relaxation vectors
"""
# random n x R block in column ordering
n = A.shape[0]
x = np.random.rand(n * R) - 0.5
x = np.reshape(x, (n, R), order='F')
# for i in range(R):
# x[:,i] = x[:,i] - np.mean(x[:,i])
b = np.zeros((n, 1))
for r in range(0, R):
jacobi(A, x[:, r], b, iterations=k, omega=alpha)
# x[:,r] = x[:,r]/norm(x[:,r])
return x | python | def relaxation_vectors(A, R, k, alpha):
"""Generate test vectors by relaxing on Ax=0 for some random vectors x.
Parameters
----------
A : csr_matrix
Sparse NxN matrix
alpha : scalar
Weight for Jacobi
R : integer
Number of random vectors
k : integer
Number of relaxation passes
Returns
-------
x : array
Dense array N x k array of relaxation vectors
"""
# random n x R block in column ordering
n = A.shape[0]
x = np.random.rand(n * R) - 0.5
x = np.reshape(x, (n, R), order='F')
# for i in range(R):
# x[:,i] = x[:,i] - np.mean(x[:,i])
b = np.zeros((n, 1))
for r in range(0, R):
jacobi(A, x[:, r], b, iterations=k, omega=alpha)
# x[:,r] = x[:,r]/norm(x[:,r])
return x | [
"def",
"relaxation_vectors",
"(",
"A",
",",
"R",
",",
"k",
",",
"alpha",
")",
":",
"# random n x R block in column ordering",
"n",
"=",
"A",
".",
"shape",
"[",
"0",
"]",
"x",
"=",
"np",
".",
"random",
".",
"rand",
"(",
"n",
"*",
"R",
")",
"-",
"0.5... | Generate test vectors by relaxing on Ax=0 for some random vectors x.
Parameters
----------
A : csr_matrix
Sparse NxN matrix
alpha : scalar
Weight for Jacobi
R : integer
Number of random vectors
k : integer
Number of relaxation passes
Returns
-------
x : array
Dense array N x k array of relaxation vectors | [
"Generate",
"test",
"vectors",
"by",
"relaxing",
"on",
"Ax",
"=",
"0",
"for",
"some",
"random",
"vectors",
"x",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/strength.py#L836-L868 | train | 209,212 |
pyamg/pyamg | pyamg/strength.py | affinity_distance | def affinity_distance(A, alpha=0.5, R=5, k=20, epsilon=4.0):
"""Affinity Distance Strength Measure.
Parameters
----------
A : csr_matrix
Sparse NxN matrix
alpha : scalar
Weight for Jacobi
R : integer
Number of random vectors
k : integer
Number of relaxation passes
epsilon : scalar
Drop tolerance
Returns
-------
C : csr_matrix
Sparse matrix of strength values
References
----------
.. [LiBr] Oren E. Livne and Achi Brandt, "Lean Algebraic Multigrid
(LAMG): Fast Graph Laplacian Linear Solver"
Notes
-----
No unit testing yet.
Does not handle BSR matrices yet.
See [LiBr]_ for more details.
"""
if not sparse.isspmatrix_csr(A):
A = sparse.csr_matrix(A)
if alpha < 0:
raise ValueError('expected alpha>0')
if R <= 0 or not isinstance(R, int):
raise ValueError('expected integer R>0')
if k <= 0 or not isinstance(k, int):
raise ValueError('expected integer k>0')
if epsilon < 1:
raise ValueError('expected epsilon>1.0')
def distance(x):
(rows, cols) = A.nonzero()
return 1 - np.sum(x[rows] * x[cols], axis=1)**2 / \
(np.sum(x[rows]**2, axis=1) * np.sum(x[cols]**2, axis=1))
return distance_measure_common(A, distance, alpha, R, k, epsilon) | python | def affinity_distance(A, alpha=0.5, R=5, k=20, epsilon=4.0):
"""Affinity Distance Strength Measure.
Parameters
----------
A : csr_matrix
Sparse NxN matrix
alpha : scalar
Weight for Jacobi
R : integer
Number of random vectors
k : integer
Number of relaxation passes
epsilon : scalar
Drop tolerance
Returns
-------
C : csr_matrix
Sparse matrix of strength values
References
----------
.. [LiBr] Oren E. Livne and Achi Brandt, "Lean Algebraic Multigrid
(LAMG): Fast Graph Laplacian Linear Solver"
Notes
-----
No unit testing yet.
Does not handle BSR matrices yet.
See [LiBr]_ for more details.
"""
if not sparse.isspmatrix_csr(A):
A = sparse.csr_matrix(A)
if alpha < 0:
raise ValueError('expected alpha>0')
if R <= 0 or not isinstance(R, int):
raise ValueError('expected integer R>0')
if k <= 0 or not isinstance(k, int):
raise ValueError('expected integer k>0')
if epsilon < 1:
raise ValueError('expected epsilon>1.0')
def distance(x):
(rows, cols) = A.nonzero()
return 1 - np.sum(x[rows] * x[cols], axis=1)**2 / \
(np.sum(x[rows]**2, axis=1) * np.sum(x[cols]**2, axis=1))
return distance_measure_common(A, distance, alpha, R, k, epsilon) | [
"def",
"affinity_distance",
"(",
"A",
",",
"alpha",
"=",
"0.5",
",",
"R",
"=",
"5",
",",
"k",
"=",
"20",
",",
"epsilon",
"=",
"4.0",
")",
":",
"if",
"not",
"sparse",
".",
"isspmatrix_csr",
"(",
"A",
")",
":",
"A",
"=",
"sparse",
".",
"csr_matrix"... | Affinity Distance Strength Measure.
Parameters
----------
A : csr_matrix
Sparse NxN matrix
alpha : scalar
Weight for Jacobi
R : integer
Number of random vectors
k : integer
Number of relaxation passes
epsilon : scalar
Drop tolerance
Returns
-------
C : csr_matrix
Sparse matrix of strength values
References
----------
.. [LiBr] Oren E. Livne and Achi Brandt, "Lean Algebraic Multigrid
(LAMG): Fast Graph Laplacian Linear Solver"
Notes
-----
No unit testing yet.
Does not handle BSR matrices yet.
See [LiBr]_ for more details. | [
"Affinity",
"Distance",
"Strength",
"Measure",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/strength.py#L871-L926 | train | 209,213 |
pyamg/pyamg | pyamg/strength.py | algebraic_distance | def algebraic_distance(A, alpha=0.5, R=5, k=20, epsilon=2.0, p=2):
"""Algebraic Distance Strength Measure.
Parameters
----------
A : csr_matrix
Sparse NxN matrix
alpha : scalar
Weight for Jacobi
R : integer
Number of random vectors
k : integer
Number of relaxation passes
epsilon : scalar
Drop tolerance
p : scalar or inf
p-norm of the measure
Returns
-------
C : csr_matrix
Sparse matrix of strength values
References
----------
.. [SaSaSc] Ilya Safro, Peter Sanders, and Christian Schulz,
"Advanced Coarsening Schemes for Graph Partitioning"
Notes
-----
No unit testing yet.
Does not handle BSR matrices yet.
See [SaSaSc]_ for more details.
"""
if not sparse.isspmatrix_csr(A):
A = sparse.csr_matrix(A)
if alpha < 0:
raise ValueError('expected alpha>0')
if R <= 0 or not isinstance(R, int):
raise ValueError('expected integer R>0')
if k <= 0 or not isinstance(k, int):
raise ValueError('expected integer k>0')
if epsilon < 1:
raise ValueError('expected epsilon>1.0')
if p < 1:
raise ValueError('expected p>1 or equal to numpy.inf')
def distance(x):
(rows, cols) = A.nonzero()
if p != np.inf:
avg = np.sum(np.abs(x[rows] - x[cols])**p, axis=1) / R
return (avg)**(1.0 / p)
else:
return np.abs(x[rows] - x[cols]).max(axis=1)
return distance_measure_common(A, distance, alpha, R, k, epsilon) | python | def algebraic_distance(A, alpha=0.5, R=5, k=20, epsilon=2.0, p=2):
"""Algebraic Distance Strength Measure.
Parameters
----------
A : csr_matrix
Sparse NxN matrix
alpha : scalar
Weight for Jacobi
R : integer
Number of random vectors
k : integer
Number of relaxation passes
epsilon : scalar
Drop tolerance
p : scalar or inf
p-norm of the measure
Returns
-------
C : csr_matrix
Sparse matrix of strength values
References
----------
.. [SaSaSc] Ilya Safro, Peter Sanders, and Christian Schulz,
"Advanced Coarsening Schemes for Graph Partitioning"
Notes
-----
No unit testing yet.
Does not handle BSR matrices yet.
See [SaSaSc]_ for more details.
"""
if not sparse.isspmatrix_csr(A):
A = sparse.csr_matrix(A)
if alpha < 0:
raise ValueError('expected alpha>0')
if R <= 0 or not isinstance(R, int):
raise ValueError('expected integer R>0')
if k <= 0 or not isinstance(k, int):
raise ValueError('expected integer k>0')
if epsilon < 1:
raise ValueError('expected epsilon>1.0')
if p < 1:
raise ValueError('expected p>1 or equal to numpy.inf')
def distance(x):
(rows, cols) = A.nonzero()
if p != np.inf:
avg = np.sum(np.abs(x[rows] - x[cols])**p, axis=1) / R
return (avg)**(1.0 / p)
else:
return np.abs(x[rows] - x[cols]).max(axis=1)
return distance_measure_common(A, distance, alpha, R, k, epsilon) | [
"def",
"algebraic_distance",
"(",
"A",
",",
"alpha",
"=",
"0.5",
",",
"R",
"=",
"5",
",",
"k",
"=",
"20",
",",
"epsilon",
"=",
"2.0",
",",
"p",
"=",
"2",
")",
":",
"if",
"not",
"sparse",
".",
"isspmatrix_csr",
"(",
"A",
")",
":",
"A",
"=",
"s... | Algebraic Distance Strength Measure.
Parameters
----------
A : csr_matrix
Sparse NxN matrix
alpha : scalar
Weight for Jacobi
R : integer
Number of random vectors
k : integer
Number of relaxation passes
epsilon : scalar
Drop tolerance
p : scalar or inf
p-norm of the measure
Returns
-------
C : csr_matrix
Sparse matrix of strength values
References
----------
.. [SaSaSc] Ilya Safro, Peter Sanders, and Christian Schulz,
"Advanced Coarsening Schemes for Graph Partitioning"
Notes
-----
No unit testing yet.
Does not handle BSR matrices yet.
See [SaSaSc]_ for more details. | [
"Algebraic",
"Distance",
"Strength",
"Measure",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/strength.py#L929-L992 | train | 209,214 |
pyamg/pyamg | pyamg/strength.py | distance_measure_common | def distance_measure_common(A, func, alpha, R, k, epsilon):
"""Create strength of connection matrixfrom a function applied to relaxation vectors."""
# create test vectors
x = relaxation_vectors(A, R, k, alpha)
# apply distance measure function to vectors
d = func(x)
# drop distances to self
(rows, cols) = A.nonzero()
weak = np.where(rows == cols)[0]
d[weak] = 0
C = sparse.csr_matrix((d, (rows, cols)), shape=A.shape)
C.eliminate_zeros()
# remove weak connections
# removes entry e from a row if e > theta * min of all entries in the row
amg_core.apply_distance_filter(C.shape[0], epsilon, C.indptr,
C.indices, C.data)
C.eliminate_zeros()
# Standardized strength values require small values be weak and large
# values be strong. So, we invert the distances.
C.data = 1.0 / C.data
# Put an identity on the diagonal
C = C + sparse.eye(C.shape[0], C.shape[1], format='csr')
# Scale C by the largest magnitude entry in each row
C = scale_rows_by_largest_entry(C)
return C | python | def distance_measure_common(A, func, alpha, R, k, epsilon):
"""Create strength of connection matrixfrom a function applied to relaxation vectors."""
# create test vectors
x = relaxation_vectors(A, R, k, alpha)
# apply distance measure function to vectors
d = func(x)
# drop distances to self
(rows, cols) = A.nonzero()
weak = np.where(rows == cols)[0]
d[weak] = 0
C = sparse.csr_matrix((d, (rows, cols)), shape=A.shape)
C.eliminate_zeros()
# remove weak connections
# removes entry e from a row if e > theta * min of all entries in the row
amg_core.apply_distance_filter(C.shape[0], epsilon, C.indptr,
C.indices, C.data)
C.eliminate_zeros()
# Standardized strength values require small values be weak and large
# values be strong. So, we invert the distances.
C.data = 1.0 / C.data
# Put an identity on the diagonal
C = C + sparse.eye(C.shape[0], C.shape[1], format='csr')
# Scale C by the largest magnitude entry in each row
C = scale_rows_by_largest_entry(C)
return C | [
"def",
"distance_measure_common",
"(",
"A",
",",
"func",
",",
"alpha",
",",
"R",
",",
"k",
",",
"epsilon",
")",
":",
"# create test vectors",
"x",
"=",
"relaxation_vectors",
"(",
"A",
",",
"R",
",",
"k",
",",
"alpha",
")",
"# apply distance measure function ... | Create strength of connection matrixfrom a function applied to relaxation vectors. | [
"Create",
"strength",
"of",
"connection",
"matrixfrom",
"a",
"function",
"applied",
"to",
"relaxation",
"vectors",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/strength.py#L995-L1026 | train | 209,215 |
pyamg/pyamg | pyamg/aggregation/smooth.py | jacobi_prolongation_smoother | def jacobi_prolongation_smoother(S, T, C, B, omega=4.0/3.0, degree=1,
filter=False, weighting='diagonal'):
"""Jacobi prolongation smoother.
Parameters
----------
S : csr_matrix, bsr_matrix
Sparse NxN matrix used for smoothing. Typically, A.
T : csr_matrix, bsr_matrix
Tentative prolongator
C : csr_matrix, bsr_matrix
Strength-of-connection matrix
B : array
Near nullspace modes for the coarse grid such that T*B
exactly reproduces the fine grid near nullspace modes
omega : scalar
Damping parameter
filter : boolean
If true, filter S before smoothing T. This option can greatly control
complexity.
weighting : string
'block', 'diagonal' or 'local' weighting for constructing the Jacobi D
'local' Uses a local row-wise weight based on the Gershgorin estimate.
Avoids any potential under-damping due to inaccurate spectral radius
estimates.
'block' uses a block diagonal inverse of A if A is BSR
'diagonal' uses classic Jacobi with D = diagonal(A)
Returns
-------
P : csr_matrix, bsr_matrix
Smoothed (final) prolongator defined by P = (I - omega/rho(K) K) * T
where K = diag(S)^-1 * S and rho(K) is an approximation to the
spectral radius of K.
Notes
-----
If weighting is not 'local', then results using Jacobi prolongation
smoother are not precisely reproducible due to a random initial guess used
for the spectral radius approximation. For precise reproducibility,
set numpy.random.seed(..) to the same value before each test.
Examples
--------
>>> from pyamg.aggregation import jacobi_prolongation_smoother
>>> from pyamg.gallery import poisson
>>> from scipy.sparse import coo_matrix
>>> import numpy as np
>>> data = np.ones((6,))
>>> row = np.arange(0,6)
>>> col = np.kron([0,1],np.ones((3,)))
>>> T = coo_matrix((data,(row,col)),shape=(6,2)).tocsr()
>>> T.todense()
matrix([[ 1., 0.],
[ 1., 0.],
[ 1., 0.],
[ 0., 1.],
[ 0., 1.],
[ 0., 1.]])
>>> A = poisson((6,),format='csr')
>>> P = jacobi_prolongation_smoother(A,T,A,np.ones((2,1)))
>>> P.todense()
matrix([[ 0.64930164, 0. ],
[ 1. , 0. ],
[ 0.64930164, 0.35069836],
[ 0.35069836, 0.64930164],
[ 0. , 1. ],
[ 0. , 0.64930164]])
"""
# preprocess weighting
if weighting == 'block':
if sparse.isspmatrix_csr(S):
weighting = 'diagonal'
elif sparse.isspmatrix_bsr(S):
if S.blocksize[0] == 1:
weighting = 'diagonal'
if filter:
# Implement filtered prolongation smoothing for the general case by
# utilizing satisfy constraints
if sparse.isspmatrix_bsr(S):
numPDEs = S.blocksize[0]
else:
numPDEs = 1
# Create a filtered S with entries dropped that aren't in C
C = UnAmal(C, numPDEs, numPDEs)
S = S.multiply(C)
S.eliminate_zeros()
if weighting == 'diagonal':
# Use diagonal of S
D_inv = get_diagonal(S, inv=True)
D_inv_S = scale_rows(S, D_inv, copy=True)
D_inv_S = (omega/approximate_spectral_radius(D_inv_S))*D_inv_S
elif weighting == 'block':
# Use block diagonal of S
D_inv = get_block_diag(S, blocksize=S.blocksize[0], inv_flag=True)
D_inv = sparse.bsr_matrix((D_inv, np.arange(D_inv.shape[0]),
np.arange(D_inv.shape[0]+1)),
shape=S.shape)
D_inv_S = D_inv*S
D_inv_S = (omega/approximate_spectral_radius(D_inv_S))*D_inv_S
elif weighting == 'local':
# Use the Gershgorin estimate as each row's weight, instead of a global
# spectral radius estimate
D = np.abs(S)*np.ones((S.shape[0], 1), dtype=S.dtype)
D_inv = np.zeros_like(D)
D_inv[D != 0] = 1.0 / np.abs(D[D != 0])
D_inv_S = scale_rows(S, D_inv, copy=True)
D_inv_S = omega*D_inv_S
else:
raise ValueError('Incorrect weighting option')
if filter:
# Carry out Jacobi, but after calculating the prolongator update, U,
# apply satisfy constraints so that U*B = 0
P = T
for i in range(degree):
U = (D_inv_S*P).tobsr(blocksize=P.blocksize)
# Enforce U*B = 0 (1) Construct array of inv(Bi'Bi), where Bi is B
# restricted to row i's sparsity pattern in Sparsity Pattern. This
# array is used multiple times in Satisfy_Constraints(...).
BtBinv = compute_BtBinv(B, U)
# (2) Apply satisfy constraints
Satisfy_Constraints(U, B, BtBinv)
# Update P
P = P - U
else:
# Carry out Jacobi as normal
P = T
for i in range(degree):
P = P - (D_inv_S*P)
return P | python | def jacobi_prolongation_smoother(S, T, C, B, omega=4.0/3.0, degree=1,
filter=False, weighting='diagonal'):
"""Jacobi prolongation smoother.
Parameters
----------
S : csr_matrix, bsr_matrix
Sparse NxN matrix used for smoothing. Typically, A.
T : csr_matrix, bsr_matrix
Tentative prolongator
C : csr_matrix, bsr_matrix
Strength-of-connection matrix
B : array
Near nullspace modes for the coarse grid such that T*B
exactly reproduces the fine grid near nullspace modes
omega : scalar
Damping parameter
filter : boolean
If true, filter S before smoothing T. This option can greatly control
complexity.
weighting : string
'block', 'diagonal' or 'local' weighting for constructing the Jacobi D
'local' Uses a local row-wise weight based on the Gershgorin estimate.
Avoids any potential under-damping due to inaccurate spectral radius
estimates.
'block' uses a block diagonal inverse of A if A is BSR
'diagonal' uses classic Jacobi with D = diagonal(A)
Returns
-------
P : csr_matrix, bsr_matrix
Smoothed (final) prolongator defined by P = (I - omega/rho(K) K) * T
where K = diag(S)^-1 * S and rho(K) is an approximation to the
spectral radius of K.
Notes
-----
If weighting is not 'local', then results using Jacobi prolongation
smoother are not precisely reproducible due to a random initial guess used
for the spectral radius approximation. For precise reproducibility,
set numpy.random.seed(..) to the same value before each test.
Examples
--------
>>> from pyamg.aggregation import jacobi_prolongation_smoother
>>> from pyamg.gallery import poisson
>>> from scipy.sparse import coo_matrix
>>> import numpy as np
>>> data = np.ones((6,))
>>> row = np.arange(0,6)
>>> col = np.kron([0,1],np.ones((3,)))
>>> T = coo_matrix((data,(row,col)),shape=(6,2)).tocsr()
>>> T.todense()
matrix([[ 1., 0.],
[ 1., 0.],
[ 1., 0.],
[ 0., 1.],
[ 0., 1.],
[ 0., 1.]])
>>> A = poisson((6,),format='csr')
>>> P = jacobi_prolongation_smoother(A,T,A,np.ones((2,1)))
>>> P.todense()
matrix([[ 0.64930164, 0. ],
[ 1. , 0. ],
[ 0.64930164, 0.35069836],
[ 0.35069836, 0.64930164],
[ 0. , 1. ],
[ 0. , 0.64930164]])
"""
# preprocess weighting
if weighting == 'block':
if sparse.isspmatrix_csr(S):
weighting = 'diagonal'
elif sparse.isspmatrix_bsr(S):
if S.blocksize[0] == 1:
weighting = 'diagonal'
if filter:
# Implement filtered prolongation smoothing for the general case by
# utilizing satisfy constraints
if sparse.isspmatrix_bsr(S):
numPDEs = S.blocksize[0]
else:
numPDEs = 1
# Create a filtered S with entries dropped that aren't in C
C = UnAmal(C, numPDEs, numPDEs)
S = S.multiply(C)
S.eliminate_zeros()
if weighting == 'diagonal':
# Use diagonal of S
D_inv = get_diagonal(S, inv=True)
D_inv_S = scale_rows(S, D_inv, copy=True)
D_inv_S = (omega/approximate_spectral_radius(D_inv_S))*D_inv_S
elif weighting == 'block':
# Use block diagonal of S
D_inv = get_block_diag(S, blocksize=S.blocksize[0], inv_flag=True)
D_inv = sparse.bsr_matrix((D_inv, np.arange(D_inv.shape[0]),
np.arange(D_inv.shape[0]+1)),
shape=S.shape)
D_inv_S = D_inv*S
D_inv_S = (omega/approximate_spectral_radius(D_inv_S))*D_inv_S
elif weighting == 'local':
# Use the Gershgorin estimate as each row's weight, instead of a global
# spectral radius estimate
D = np.abs(S)*np.ones((S.shape[0], 1), dtype=S.dtype)
D_inv = np.zeros_like(D)
D_inv[D != 0] = 1.0 / np.abs(D[D != 0])
D_inv_S = scale_rows(S, D_inv, copy=True)
D_inv_S = omega*D_inv_S
else:
raise ValueError('Incorrect weighting option')
if filter:
# Carry out Jacobi, but after calculating the prolongator update, U,
# apply satisfy constraints so that U*B = 0
P = T
for i in range(degree):
U = (D_inv_S*P).tobsr(blocksize=P.blocksize)
# Enforce U*B = 0 (1) Construct array of inv(Bi'Bi), where Bi is B
# restricted to row i's sparsity pattern in Sparsity Pattern. This
# array is used multiple times in Satisfy_Constraints(...).
BtBinv = compute_BtBinv(B, U)
# (2) Apply satisfy constraints
Satisfy_Constraints(U, B, BtBinv)
# Update P
P = P - U
else:
# Carry out Jacobi as normal
P = T
for i in range(degree):
P = P - (D_inv_S*P)
return P | [
"def",
"jacobi_prolongation_smoother",
"(",
"S",
",",
"T",
",",
"C",
",",
"B",
",",
"omega",
"=",
"4.0",
"/",
"3.0",
",",
"degree",
"=",
"1",
",",
"filter",
"=",
"False",
",",
"weighting",
"=",
"'diagonal'",
")",
":",
"# preprocess weighting",
"if",
"w... | Jacobi prolongation smoother.
Parameters
----------
S : csr_matrix, bsr_matrix
Sparse NxN matrix used for smoothing. Typically, A.
T : csr_matrix, bsr_matrix
Tentative prolongator
C : csr_matrix, bsr_matrix
Strength-of-connection matrix
B : array
Near nullspace modes for the coarse grid such that T*B
exactly reproduces the fine grid near nullspace modes
omega : scalar
Damping parameter
filter : boolean
If true, filter S before smoothing T. This option can greatly control
complexity.
weighting : string
'block', 'diagonal' or 'local' weighting for constructing the Jacobi D
'local' Uses a local row-wise weight based on the Gershgorin estimate.
Avoids any potential under-damping due to inaccurate spectral radius
estimates.
'block' uses a block diagonal inverse of A if A is BSR
'diagonal' uses classic Jacobi with D = diagonal(A)
Returns
-------
P : csr_matrix, bsr_matrix
Smoothed (final) prolongator defined by P = (I - omega/rho(K) K) * T
where K = diag(S)^-1 * S and rho(K) is an approximation to the
spectral radius of K.
Notes
-----
If weighting is not 'local', then results using Jacobi prolongation
smoother are not precisely reproducible due to a random initial guess used
for the spectral radius approximation. For precise reproducibility,
set numpy.random.seed(..) to the same value before each test.
Examples
--------
>>> from pyamg.aggregation import jacobi_prolongation_smoother
>>> from pyamg.gallery import poisson
>>> from scipy.sparse import coo_matrix
>>> import numpy as np
>>> data = np.ones((6,))
>>> row = np.arange(0,6)
>>> col = np.kron([0,1],np.ones((3,)))
>>> T = coo_matrix((data,(row,col)),shape=(6,2)).tocsr()
>>> T.todense()
matrix([[ 1., 0.],
[ 1., 0.],
[ 1., 0.],
[ 0., 1.],
[ 0., 1.],
[ 0., 1.]])
>>> A = poisson((6,),format='csr')
>>> P = jacobi_prolongation_smoother(A,T,A,np.ones((2,1)))
>>> P.todense()
matrix([[ 0.64930164, 0. ],
[ 1. , 0. ],
[ 0.64930164, 0.35069836],
[ 0.35069836, 0.64930164],
[ 0. , 1. ],
[ 0. , 0.64930164]]) | [
"Jacobi",
"prolongation",
"smoother",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/aggregation/smooth.py#L64-L204 | train | 209,216 |
pyamg/pyamg | pyamg/aggregation/smooth.py | richardson_prolongation_smoother | def richardson_prolongation_smoother(S, T, omega=4.0/3.0, degree=1):
"""Richardson prolongation smoother.
Parameters
----------
S : csr_matrix, bsr_matrix
Sparse NxN matrix used for smoothing. Typically, A or the
"filtered matrix" obtained from A by lumping weak connections
onto the diagonal of A.
T : csr_matrix, bsr_matrix
Tentative prolongator
omega : scalar
Damping parameter
Returns
-------
P : csr_matrix, bsr_matrix
Smoothed (final) prolongator defined by P = (I - omega/rho(S) S) * T
where rho(S) is an approximation to the spectral radius of S.
Notes
-----
Results using Richardson prolongation smoother are not precisely
reproducible due to a random initial guess used for the spectral radius
approximation. For precise reproducibility, set numpy.random.seed(..) to
the same value before each test.
Examples
--------
>>> from pyamg.aggregation import richardson_prolongation_smoother
>>> from pyamg.gallery import poisson
>>> from scipy.sparse import coo_matrix
>>> import numpy as np
>>> data = np.ones((6,))
>>> row = np.arange(0,6)
>>> col = np.kron([0,1],np.ones((3,)))
>>> T = coo_matrix((data,(row,col)),shape=(6,2)).tocsr()
>>> T.todense()
matrix([[ 1., 0.],
[ 1., 0.],
[ 1., 0.],
[ 0., 1.],
[ 0., 1.],
[ 0., 1.]])
>>> A = poisson((6,),format='csr')
>>> P = richardson_prolongation_smoother(A,T)
>>> P.todense()
matrix([[ 0.64930164, 0. ],
[ 1. , 0. ],
[ 0.64930164, 0.35069836],
[ 0.35069836, 0.64930164],
[ 0. , 1. ],
[ 0. , 0.64930164]])
"""
weight = omega/approximate_spectral_radius(S)
P = T
for i in range(degree):
P = P - weight*(S*P)
return P | python | def richardson_prolongation_smoother(S, T, omega=4.0/3.0, degree=1):
"""Richardson prolongation smoother.
Parameters
----------
S : csr_matrix, bsr_matrix
Sparse NxN matrix used for smoothing. Typically, A or the
"filtered matrix" obtained from A by lumping weak connections
onto the diagonal of A.
T : csr_matrix, bsr_matrix
Tentative prolongator
omega : scalar
Damping parameter
Returns
-------
P : csr_matrix, bsr_matrix
Smoothed (final) prolongator defined by P = (I - omega/rho(S) S) * T
where rho(S) is an approximation to the spectral radius of S.
Notes
-----
Results using Richardson prolongation smoother are not precisely
reproducible due to a random initial guess used for the spectral radius
approximation. For precise reproducibility, set numpy.random.seed(..) to
the same value before each test.
Examples
--------
>>> from pyamg.aggregation import richardson_prolongation_smoother
>>> from pyamg.gallery import poisson
>>> from scipy.sparse import coo_matrix
>>> import numpy as np
>>> data = np.ones((6,))
>>> row = np.arange(0,6)
>>> col = np.kron([0,1],np.ones((3,)))
>>> T = coo_matrix((data,(row,col)),shape=(6,2)).tocsr()
>>> T.todense()
matrix([[ 1., 0.],
[ 1., 0.],
[ 1., 0.],
[ 0., 1.],
[ 0., 1.],
[ 0., 1.]])
>>> A = poisson((6,),format='csr')
>>> P = richardson_prolongation_smoother(A,T)
>>> P.todense()
matrix([[ 0.64930164, 0. ],
[ 1. , 0. ],
[ 0.64930164, 0.35069836],
[ 0.35069836, 0.64930164],
[ 0. , 1. ],
[ 0. , 0.64930164]])
"""
weight = omega/approximate_spectral_radius(S)
P = T
for i in range(degree):
P = P - weight*(S*P)
return P | [
"def",
"richardson_prolongation_smoother",
"(",
"S",
",",
"T",
",",
"omega",
"=",
"4.0",
"/",
"3.0",
",",
"degree",
"=",
"1",
")",
":",
"weight",
"=",
"omega",
"/",
"approximate_spectral_radius",
"(",
"S",
")",
"P",
"=",
"T",
"for",
"i",
"in",
"range",... | Richardson prolongation smoother.
Parameters
----------
S : csr_matrix, bsr_matrix
Sparse NxN matrix used for smoothing. Typically, A or the
"filtered matrix" obtained from A by lumping weak connections
onto the diagonal of A.
T : csr_matrix, bsr_matrix
Tentative prolongator
omega : scalar
Damping parameter
Returns
-------
P : csr_matrix, bsr_matrix
Smoothed (final) prolongator defined by P = (I - omega/rho(S) S) * T
where rho(S) is an approximation to the spectral radius of S.
Notes
-----
Results using Richardson prolongation smoother are not precisely
reproducible due to a random initial guess used for the spectral radius
approximation. For precise reproducibility, set numpy.random.seed(..) to
the same value before each test.
Examples
--------
>>> from pyamg.aggregation import richardson_prolongation_smoother
>>> from pyamg.gallery import poisson
>>> from scipy.sparse import coo_matrix
>>> import numpy as np
>>> data = np.ones((6,))
>>> row = np.arange(0,6)
>>> col = np.kron([0,1],np.ones((3,)))
>>> T = coo_matrix((data,(row,col)),shape=(6,2)).tocsr()
>>> T.todense()
matrix([[ 1., 0.],
[ 1., 0.],
[ 1., 0.],
[ 0., 1.],
[ 0., 1.],
[ 0., 1.]])
>>> A = poisson((6,),format='csr')
>>> P = richardson_prolongation_smoother(A,T)
>>> P.todense()
matrix([[ 0.64930164, 0. ],
[ 1. , 0. ],
[ 0.64930164, 0.35069836],
[ 0.35069836, 0.64930164],
[ 0. , 1. ],
[ 0. , 0.64930164]]) | [
"Richardson",
"prolongation",
"smoother",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/aggregation/smooth.py#L207-L269 | train | 209,217 |
pyamg/pyamg | pyamg/relaxation/smoothing.py | matrix_asformat | def matrix_asformat(lvl, name, format, blocksize=None):
"""Set a matrix to a specific format.
This routine looks for the matrix "name" in the specified format as a
member of the level instance, lvl. For example, if name='A', format='bsr'
and blocksize=(4,4), and if lvl.Absr44 exists with the correct blocksize,
then lvl.Absr is returned. If the matrix doesn't already exist, lvl.name
is converted to the desired format, and made a member of lvl.
Only create such persistent copies of a matrix for routines such as
presmoothing and postsmoothing, where the matrix conversion is done every
cycle.
Calling this function can _dramatically_ increase your memory costs.
Be careful with it's usage.
"""
desired_matrix = name + format
M = getattr(lvl, name)
if format == 'bsr':
desired_matrix += str(blocksize[0])+str(blocksize[1])
if hasattr(lvl, desired_matrix):
# if lvl already contains lvl.name+format
pass
elif M.format == format and format != 'bsr':
# is base_matrix already in the correct format?
setattr(lvl, desired_matrix, M)
elif M.format == format and format == 'bsr':
# convert to bsr with the right blocksize
# tobsr() will not do anything extra if this is uneeded
setattr(lvl, desired_matrix, M.tobsr(blocksize=blocksize))
else:
# convert
newM = getattr(M, 'to' + format)()
setattr(lvl, desired_matrix, newM)
return getattr(lvl, desired_matrix) | python | def matrix_asformat(lvl, name, format, blocksize=None):
"""Set a matrix to a specific format.
This routine looks for the matrix "name" in the specified format as a
member of the level instance, lvl. For example, if name='A', format='bsr'
and blocksize=(4,4), and if lvl.Absr44 exists with the correct blocksize,
then lvl.Absr is returned. If the matrix doesn't already exist, lvl.name
is converted to the desired format, and made a member of lvl.
Only create such persistent copies of a matrix for routines such as
presmoothing and postsmoothing, where the matrix conversion is done every
cycle.
Calling this function can _dramatically_ increase your memory costs.
Be careful with it's usage.
"""
desired_matrix = name + format
M = getattr(lvl, name)
if format == 'bsr':
desired_matrix += str(blocksize[0])+str(blocksize[1])
if hasattr(lvl, desired_matrix):
# if lvl already contains lvl.name+format
pass
elif M.format == format and format != 'bsr':
# is base_matrix already in the correct format?
setattr(lvl, desired_matrix, M)
elif M.format == format and format == 'bsr':
# convert to bsr with the right blocksize
# tobsr() will not do anything extra if this is uneeded
setattr(lvl, desired_matrix, M.tobsr(blocksize=blocksize))
else:
# convert
newM = getattr(M, 'to' + format)()
setattr(lvl, desired_matrix, newM)
return getattr(lvl, desired_matrix) | [
"def",
"matrix_asformat",
"(",
"lvl",
",",
"name",
",",
"format",
",",
"blocksize",
"=",
"None",
")",
":",
"desired_matrix",
"=",
"name",
"+",
"format",
"M",
"=",
"getattr",
"(",
"lvl",
",",
"name",
")",
"if",
"format",
"==",
"'bsr'",
":",
"desired_mat... | Set a matrix to a specific format.
This routine looks for the matrix "name" in the specified format as a
member of the level instance, lvl. For example, if name='A', format='bsr'
and blocksize=(4,4), and if lvl.Absr44 exists with the correct blocksize,
then lvl.Absr is returned. If the matrix doesn't already exist, lvl.name
is converted to the desired format, and made a member of lvl.
Only create such persistent copies of a matrix for routines such as
presmoothing and postsmoothing, where the matrix conversion is done every
cycle.
Calling this function can _dramatically_ increase your memory costs.
Be careful with it's usage. | [
"Set",
"a",
"matrix",
"to",
"a",
"specific",
"format",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/relaxation/smoothing.py#L378-L416 | train | 209,218 |
pyamg/pyamg | pyamg/gallery/mesh.py | regular_triangle_mesh | def regular_triangle_mesh(nx, ny):
"""Construct a regular triangular mesh in the unit square.
Parameters
----------
nx : int
Number of nodes in the x-direction
ny : int
Number of nodes in the y-direction
Returns
-------
Vert : array
nx*ny x 2 vertex list
E2V : array
Nex x 3 element list
Examples
--------
>>> from pyamg.gallery import regular_triangle_mesh
>>> E2V,Vert = regular_triangle_mesh(3, 2)
"""
nx, ny = int(nx), int(ny)
if nx < 2 or ny < 2:
raise ValueError('minimum mesh dimension is 2: %s' % ((nx, ny),))
Vert1 = np.tile(np.arange(0, nx-1), ny - 1) +\
np.repeat(np.arange(0, nx * (ny - 1), nx), nx - 1)
Vert3 = np.tile(np.arange(0, nx-1), ny - 1) +\
np.repeat(np.arange(0, nx * (ny - 1), nx), nx - 1) + nx
Vert2 = Vert3 + 1
Vert4 = Vert1 + 1
Verttmp = np.meshgrid(np.arange(0, nx, dtype='float'),
np.arange(0, ny, dtype='float'))
Verttmp = (Verttmp[0].ravel(), Verttmp[1].ravel())
Vert = np.vstack(Verttmp).transpose()
Vert[:, 0] = (1.0 / (nx - 1)) * Vert[:, 0]
Vert[:, 1] = (1.0 / (ny - 1)) * Vert[:, 1]
E2V1 = np.vstack((Vert1, Vert2, Vert3)).transpose()
E2V2 = np.vstack((Vert1, Vert4, Vert2)).transpose()
E2V = np.vstack((E2V1, E2V2))
return Vert, E2V | python | def regular_triangle_mesh(nx, ny):
"""Construct a regular triangular mesh in the unit square.
Parameters
----------
nx : int
Number of nodes in the x-direction
ny : int
Number of nodes in the y-direction
Returns
-------
Vert : array
nx*ny x 2 vertex list
E2V : array
Nex x 3 element list
Examples
--------
>>> from pyamg.gallery import regular_triangle_mesh
>>> E2V,Vert = regular_triangle_mesh(3, 2)
"""
nx, ny = int(nx), int(ny)
if nx < 2 or ny < 2:
raise ValueError('minimum mesh dimension is 2: %s' % ((nx, ny),))
Vert1 = np.tile(np.arange(0, nx-1), ny - 1) +\
np.repeat(np.arange(0, nx * (ny - 1), nx), nx - 1)
Vert3 = np.tile(np.arange(0, nx-1), ny - 1) +\
np.repeat(np.arange(0, nx * (ny - 1), nx), nx - 1) + nx
Vert2 = Vert3 + 1
Vert4 = Vert1 + 1
Verttmp = np.meshgrid(np.arange(0, nx, dtype='float'),
np.arange(0, ny, dtype='float'))
Verttmp = (Verttmp[0].ravel(), Verttmp[1].ravel())
Vert = np.vstack(Verttmp).transpose()
Vert[:, 0] = (1.0 / (nx - 1)) * Vert[:, 0]
Vert[:, 1] = (1.0 / (ny - 1)) * Vert[:, 1]
E2V1 = np.vstack((Vert1, Vert2, Vert3)).transpose()
E2V2 = np.vstack((Vert1, Vert4, Vert2)).transpose()
E2V = np.vstack((E2V1, E2V2))
return Vert, E2V | [
"def",
"regular_triangle_mesh",
"(",
"nx",
",",
"ny",
")",
":",
"nx",
",",
"ny",
"=",
"int",
"(",
"nx",
")",
",",
"int",
"(",
"ny",
")",
"if",
"nx",
"<",
"2",
"or",
"ny",
"<",
"2",
":",
"raise",
"ValueError",
"(",
"'minimum mesh dimension is 2: %s'",... | Construct a regular triangular mesh in the unit square.
Parameters
----------
nx : int
Number of nodes in the x-direction
ny : int
Number of nodes in the y-direction
Returns
-------
Vert : array
nx*ny x 2 vertex list
E2V : array
Nex x 3 element list
Examples
--------
>>> from pyamg.gallery import regular_triangle_mesh
>>> E2V,Vert = regular_triangle_mesh(3, 2) | [
"Construct",
"a",
"regular",
"triangular",
"mesh",
"in",
"the",
"unit",
"square",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/gallery/mesh.py#L9-L55 | train | 209,219 |
pyamg/pyamg | pyamg/vis/vis_coarse.py | check_input | def check_input(Verts=None, E2V=None, Agg=None, A=None, splitting=None,
mesh_type=None):
"""Check input for local functions."""
if Verts is not None:
if not np.issubdtype(Verts.dtype, np.floating):
raise ValueError('Verts should be of type float')
if E2V is not None:
if not np.issubdtype(E2V.dtype, np.integer):
raise ValueError('E2V should be of type integer')
if E2V.min() != 0:
warnings.warn('element indices begin at %d' % E2V.min())
if Agg is not None:
if Agg.shape[1] > Agg.shape[0]:
raise ValueError('Agg should be of size Npts x Nagg')
if A is not None:
if Agg is not None:
if (A.shape[0] != A.shape[1]) or (A.shape[0] != Agg.shape[0]):
raise ValueError('expected square matrix A\
and compatible with Agg')
else:
raise ValueError('problem with check_input')
if splitting is not None:
splitting = splitting.ravel()
if Verts is not None:
if (len(splitting) % Verts.shape[0]) != 0:
raise ValueError('splitting must be a multiple of N')
else:
raise ValueError('problem with check_input')
if mesh_type is not None:
valid_mesh_types = ('vertex', 'tri', 'quad', 'tet', 'hex')
if mesh_type not in valid_mesh_types:
raise ValueError('mesh_type should be %s' %
' or '.join(valid_mesh_types)) | python | def check_input(Verts=None, E2V=None, Agg=None, A=None, splitting=None,
mesh_type=None):
"""Check input for local functions."""
if Verts is not None:
if not np.issubdtype(Verts.dtype, np.floating):
raise ValueError('Verts should be of type float')
if E2V is not None:
if not np.issubdtype(E2V.dtype, np.integer):
raise ValueError('E2V should be of type integer')
if E2V.min() != 0:
warnings.warn('element indices begin at %d' % E2V.min())
if Agg is not None:
if Agg.shape[1] > Agg.shape[0]:
raise ValueError('Agg should be of size Npts x Nagg')
if A is not None:
if Agg is not None:
if (A.shape[0] != A.shape[1]) or (A.shape[0] != Agg.shape[0]):
raise ValueError('expected square matrix A\
and compatible with Agg')
else:
raise ValueError('problem with check_input')
if splitting is not None:
splitting = splitting.ravel()
if Verts is not None:
if (len(splitting) % Verts.shape[0]) != 0:
raise ValueError('splitting must be a multiple of N')
else:
raise ValueError('problem with check_input')
if mesh_type is not None:
valid_mesh_types = ('vertex', 'tri', 'quad', 'tet', 'hex')
if mesh_type not in valid_mesh_types:
raise ValueError('mesh_type should be %s' %
' or '.join(valid_mesh_types)) | [
"def",
"check_input",
"(",
"Verts",
"=",
"None",
",",
"E2V",
"=",
"None",
",",
"Agg",
"=",
"None",
",",
"A",
"=",
"None",
",",
"splitting",
"=",
"None",
",",
"mesh_type",
"=",
"None",
")",
":",
"if",
"Verts",
"is",
"not",
"None",
":",
"if",
"not"... | Check input for local functions. | [
"Check",
"input",
"for",
"local",
"functions",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/vis/vis_coarse.py#L257-L294 | train | 209,220 |
pyamg/pyamg | pyamg/classical/split.py | MIS | def MIS(G, weights, maxiter=None):
"""Compute a maximal independent set of a graph in parallel.
Parameters
----------
G : csr_matrix
Matrix graph, G[i,j] != 0 indicates an edge
weights : ndarray
Array of weights for each vertex in the graph G
maxiter : int
Maximum number of iterations (default: None)
Returns
-------
mis : array
Array of length of G of zeros/ones indicating the independent set
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.classical import MIS
>>> import numpy as np
>>> G = poisson((7,), format='csr') # 1D mesh with 7 vertices
>>> w = np.ones((G.shape[0],1)).ravel()
>>> mis = MIS(G,w)
See Also
--------
fn = amg_core.maximal_independent_set_parallel
"""
if not isspmatrix_csr(G):
raise TypeError('expected csr_matrix')
G = remove_diagonal(G)
mis = np.empty(G.shape[0], dtype='intc')
mis[:] = -1
fn = amg_core.maximal_independent_set_parallel
if maxiter is None:
fn(G.shape[0], G.indptr, G.indices, -1, 1, 0, mis, weights, -1)
else:
if maxiter < 0:
raise ValueError('maxiter must be >= 0')
fn(G.shape[0], G.indptr, G.indices, -1, 1, 0, mis, weights, maxiter)
return mis | python | def MIS(G, weights, maxiter=None):
"""Compute a maximal independent set of a graph in parallel.
Parameters
----------
G : csr_matrix
Matrix graph, G[i,j] != 0 indicates an edge
weights : ndarray
Array of weights for each vertex in the graph G
maxiter : int
Maximum number of iterations (default: None)
Returns
-------
mis : array
Array of length of G of zeros/ones indicating the independent set
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.classical import MIS
>>> import numpy as np
>>> G = poisson((7,), format='csr') # 1D mesh with 7 vertices
>>> w = np.ones((G.shape[0],1)).ravel()
>>> mis = MIS(G,w)
See Also
--------
fn = amg_core.maximal_independent_set_parallel
"""
if not isspmatrix_csr(G):
raise TypeError('expected csr_matrix')
G = remove_diagonal(G)
mis = np.empty(G.shape[0], dtype='intc')
mis[:] = -1
fn = amg_core.maximal_independent_set_parallel
if maxiter is None:
fn(G.shape[0], G.indptr, G.indices, -1, 1, 0, mis, weights, -1)
else:
if maxiter < 0:
raise ValueError('maxiter must be >= 0')
fn(G.shape[0], G.indptr, G.indices, -1, 1, 0, mis, weights, maxiter)
return mis | [
"def",
"MIS",
"(",
"G",
",",
"weights",
",",
"maxiter",
"=",
"None",
")",
":",
"if",
"not",
"isspmatrix_csr",
"(",
"G",
")",
":",
"raise",
"TypeError",
"(",
"'expected csr_matrix'",
")",
"G",
"=",
"remove_diagonal",
"(",
"G",
")",
"mis",
"=",
"np",
"... | Compute a maximal independent set of a graph in parallel.
Parameters
----------
G : csr_matrix
Matrix graph, G[i,j] != 0 indicates an edge
weights : ndarray
Array of weights for each vertex in the graph G
maxiter : int
Maximum number of iterations (default: None)
Returns
-------
mis : array
Array of length of G of zeros/ones indicating the independent set
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.classical import MIS
>>> import numpy as np
>>> G = poisson((7,), format='csr') # 1D mesh with 7 vertices
>>> w = np.ones((G.shape[0],1)).ravel()
>>> mis = MIS(G,w)
See Also
--------
fn = amg_core.maximal_independent_set_parallel | [
"Compute",
"a",
"maximal",
"independent",
"set",
"of",
"a",
"graph",
"in",
"parallel",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/classical/split.py#L333-L381 | train | 209,221 |
pyamg/pyamg | pyamg/classical/split.py | preprocess | def preprocess(S, coloring_method=None):
"""Preprocess splitting functions.
Parameters
----------
S : csr_matrix
Strength of connection matrix
method : string
Algorithm used to compute the vertex coloring:
* 'MIS' - Maximal Independent Set
* 'JP' - Jones-Plassmann (parallel)
* 'LDF' - Largest-Degree-First (parallel)
Returns
-------
weights: ndarray
Weights from a graph coloring of G
S : csr_matrix
Strength matrix with ones
T : csr_matrix
transpose of S
G : csr_matrix
union of S and T
Notes
-----
Performs the following operations:
- Checks input strength of connection matrix S
- Replaces S.data with ones
- Creates T = S.T in CSR format
- Creates G = S union T in CSR format
- Creates random weights
- Augments weights with graph coloring (if use_color == True)
"""
if not isspmatrix_csr(S):
raise TypeError('expected csr_matrix')
if S.shape[0] != S.shape[1]:
raise ValueError('expected square matrix, shape=%s' % (S.shape,))
N = S.shape[0]
S = csr_matrix((np.ones(S.nnz, dtype='int8'), S.indices, S.indptr),
shape=(N, N))
T = S.T.tocsr() # transpose S for efficient column access
G = S + T # form graph (must be symmetric)
G.data[:] = 1
weights = np.ravel(T.sum(axis=1)) # initial weights
# weights -= T.diagonal() # discount self loops
if coloring_method is None:
weights = weights + sp.rand(len(weights))
else:
coloring = vertex_coloring(G, coloring_method)
num_colors = coloring.max() + 1
weights = weights + (sp.rand(len(weights)) + coloring)/num_colors
return (weights, G, S, T) | python | def preprocess(S, coloring_method=None):
"""Preprocess splitting functions.
Parameters
----------
S : csr_matrix
Strength of connection matrix
method : string
Algorithm used to compute the vertex coloring:
* 'MIS' - Maximal Independent Set
* 'JP' - Jones-Plassmann (parallel)
* 'LDF' - Largest-Degree-First (parallel)
Returns
-------
weights: ndarray
Weights from a graph coloring of G
S : csr_matrix
Strength matrix with ones
T : csr_matrix
transpose of S
G : csr_matrix
union of S and T
Notes
-----
Performs the following operations:
- Checks input strength of connection matrix S
- Replaces S.data with ones
- Creates T = S.T in CSR format
- Creates G = S union T in CSR format
- Creates random weights
- Augments weights with graph coloring (if use_color == True)
"""
if not isspmatrix_csr(S):
raise TypeError('expected csr_matrix')
if S.shape[0] != S.shape[1]:
raise ValueError('expected square matrix, shape=%s' % (S.shape,))
N = S.shape[0]
S = csr_matrix((np.ones(S.nnz, dtype='int8'), S.indices, S.indptr),
shape=(N, N))
T = S.T.tocsr() # transpose S for efficient column access
G = S + T # form graph (must be symmetric)
G.data[:] = 1
weights = np.ravel(T.sum(axis=1)) # initial weights
# weights -= T.diagonal() # discount self loops
if coloring_method is None:
weights = weights + sp.rand(len(weights))
else:
coloring = vertex_coloring(G, coloring_method)
num_colors = coloring.max() + 1
weights = weights + (sp.rand(len(weights)) + coloring)/num_colors
return (weights, G, S, T) | [
"def",
"preprocess",
"(",
"S",
",",
"coloring_method",
"=",
"None",
")",
":",
"if",
"not",
"isspmatrix_csr",
"(",
"S",
")",
":",
"raise",
"TypeError",
"(",
"'expected csr_matrix'",
")",
"if",
"S",
".",
"shape",
"[",
"0",
"]",
"!=",
"S",
".",
"shape",
... | Preprocess splitting functions.
Parameters
----------
S : csr_matrix
Strength of connection matrix
method : string
Algorithm used to compute the vertex coloring:
* 'MIS' - Maximal Independent Set
* 'JP' - Jones-Plassmann (parallel)
* 'LDF' - Largest-Degree-First (parallel)
Returns
-------
weights: ndarray
Weights from a graph coloring of G
S : csr_matrix
Strength matrix with ones
T : csr_matrix
transpose of S
G : csr_matrix
union of S and T
Notes
-----
Performs the following operations:
- Checks input strength of connection matrix S
- Replaces S.data with ones
- Creates T = S.T in CSR format
- Creates G = S union T in CSR format
- Creates random weights
- Augments weights with graph coloring (if use_color == True) | [
"Preprocess",
"splitting",
"functions",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/classical/split.py#L385-L444 | train | 209,222 |
pyamg/pyamg | pyamg/gallery/example.py | load_example | def load_example(name):
"""Load an example problem by name.
Parameters
----------
name : string (e.g. 'airfoil')
Name of the example to load
Notes
-----
Each example is stored in a dictionary with the following keys:
- 'A' : sparse matrix
- 'B' : near-nullspace candidates
- 'vertices' : dense array of nodal coordinates
- 'elements' : dense array of element indices
Current example names are:%s
Examples
--------
>>> from pyamg.gallery import load_example
>>> ex = load_example('knot')
"""
if name not in example_names:
raise ValueError('no example with name (%s)' % name)
else:
return loadmat(os.path.join(example_dir, name + '.mat'),
struct_as_record=True) | python | def load_example(name):
"""Load an example problem by name.
Parameters
----------
name : string (e.g. 'airfoil')
Name of the example to load
Notes
-----
Each example is stored in a dictionary with the following keys:
- 'A' : sparse matrix
- 'B' : near-nullspace candidates
- 'vertices' : dense array of nodal coordinates
- 'elements' : dense array of element indices
Current example names are:%s
Examples
--------
>>> from pyamg.gallery import load_example
>>> ex = load_example('knot')
"""
if name not in example_names:
raise ValueError('no example with name (%s)' % name)
else:
return loadmat(os.path.join(example_dir, name + '.mat'),
struct_as_record=True) | [
"def",
"load_example",
"(",
"name",
")",
":",
"if",
"name",
"not",
"in",
"example_names",
":",
"raise",
"ValueError",
"(",
"'no example with name (%s)'",
"%",
"name",
")",
"else",
":",
"return",
"loadmat",
"(",
"os",
".",
"path",
".",
"join",
"(",
"example... | Load an example problem by name.
Parameters
----------
name : string (e.g. 'airfoil')
Name of the example to load
Notes
-----
Each example is stored in a dictionary with the following keys:
- 'A' : sparse matrix
- 'B' : near-nullspace candidates
- 'vertices' : dense array of nodal coordinates
- 'elements' : dense array of element indices
Current example names are:%s
Examples
--------
>>> from pyamg.gallery import load_example
>>> ex = load_example('knot') | [
"Load",
"an",
"example",
"problem",
"by",
"name",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/gallery/example.py#L17-L45 | train | 209,223 |
pyamg/pyamg | pyamg/gallery/stencil.py | stencil_grid | def stencil_grid(S, grid, dtype=None, format=None):
"""Construct a sparse matrix form a local matrix stencil.
Parameters
----------
S : ndarray
matrix stencil stored in N-d array
grid : tuple
tuple containing the N grid dimensions
dtype :
data type of the result
format : string
sparse matrix format to return, e.g. "csr", "coo", etc.
Returns
-------
A : sparse matrix
Sparse matrix which represents the operator given by applying
stencil S at each vertex of a regular grid with given dimensions.
Notes
-----
The grid vertices are enumerated as arange(prod(grid)).reshape(grid).
This implies that the last grid dimension cycles fastest, while the
first dimension cycles slowest. For example, if grid=(2,3) then the
grid vertices are ordered as (0,0), (0,1), (0,2), (1,0), (1,1), (1,2).
This coincides with the ordering used by the NumPy functions
ndenumerate() and mgrid().
Examples
--------
>>> from pyamg.gallery import stencil_grid
>>> stencil = [-1,2,-1] # 1D Poisson stencil
>>> grid = (5,) # 1D grid with 5 vertices
>>> A = stencil_grid(stencil, grid, dtype=float, format='csr')
>>> A.todense()
matrix([[ 2., -1., 0., 0., 0.],
[-1., 2., -1., 0., 0.],
[ 0., -1., 2., -1., 0.],
[ 0., 0., -1., 2., -1.],
[ 0., 0., 0., -1., 2.]])
>>> stencil = [[0,-1,0],[-1,4,-1],[0,-1,0]] # 2D Poisson stencil
>>> grid = (3,3) # 2D grid with shape 3x3
>>> A = stencil_grid(stencil, grid, dtype=float, format='csr')
>>> A.todense()
matrix([[ 4., -1., 0., -1., 0., 0., 0., 0., 0.],
[-1., 4., -1., 0., -1., 0., 0., 0., 0.],
[ 0., -1., 4., 0., 0., -1., 0., 0., 0.],
[-1., 0., 0., 4., -1., 0., -1., 0., 0.],
[ 0., -1., 0., -1., 4., -1., 0., -1., 0.],
[ 0., 0., -1., 0., -1., 4., 0., 0., -1.],
[ 0., 0., 0., -1., 0., 0., 4., -1., 0.],
[ 0., 0., 0., 0., -1., 0., -1., 4., -1.],
[ 0., 0., 0., 0., 0., -1., 0., -1., 4.]])
"""
S = np.asarray(S, dtype=dtype)
grid = tuple(grid)
if not (np.asarray(S.shape) % 2 == 1).all():
raise ValueError('all stencil dimensions must be odd')
if len(grid) != np.ndim(S):
raise ValueError('stencil dimension must equal number of grid\
dimensions')
if min(grid) < 1:
raise ValueError('grid dimensions must be positive')
N_v = np.prod(grid) # number of vertices in the mesh
N_s = (S != 0).sum() # number of nonzero stencil entries
# diagonal offsets
diags = np.zeros(N_s, dtype=int)
# compute index offset of each dof within the stencil
strides = np.cumprod([1] + list(reversed(grid)))[:-1]
indices = tuple(i.copy() for i in S.nonzero())
for i, s in zip(indices, S.shape):
i -= s // 2
# i = (i - s) // 2
# i = i // 2
# i = i - (s // 2)
for stride, coords in zip(strides, reversed(indices)):
diags += stride * coords
data = S[S != 0].repeat(N_v).reshape(N_s, N_v)
indices = np.vstack(indices).T
# zero boundary connections
for index, diag in zip(indices, data):
diag = diag.reshape(grid)
for n, i in enumerate(index):
if i > 0:
s = [slice(None)] * len(grid)
s[n] = slice(0, i)
s = tuple(s)
diag[s] = 0
elif i < 0:
s = [slice(None)]*len(grid)
s[n] = slice(i, None)
s = tuple(s)
diag[s] = 0
# remove diagonals that lie outside matrix
mask = abs(diags) < N_v
if not mask.all():
diags = diags[mask]
data = data[mask]
# sum duplicate diagonals
if len(np.unique(diags)) != len(diags):
new_diags = np.unique(diags)
new_data = np.zeros((len(new_diags), data.shape[1]),
dtype=data.dtype)
for dia, dat in zip(diags, data):
n = np.searchsorted(new_diags, dia)
new_data[n, :] += dat
diags = new_diags
data = new_data
return sparse.dia_matrix((data, diags),
shape=(N_v, N_v)).asformat(format) | python | def stencil_grid(S, grid, dtype=None, format=None):
"""Construct a sparse matrix form a local matrix stencil.
Parameters
----------
S : ndarray
matrix stencil stored in N-d array
grid : tuple
tuple containing the N grid dimensions
dtype :
data type of the result
format : string
sparse matrix format to return, e.g. "csr", "coo", etc.
Returns
-------
A : sparse matrix
Sparse matrix which represents the operator given by applying
stencil S at each vertex of a regular grid with given dimensions.
Notes
-----
The grid vertices are enumerated as arange(prod(grid)).reshape(grid).
This implies that the last grid dimension cycles fastest, while the
first dimension cycles slowest. For example, if grid=(2,3) then the
grid vertices are ordered as (0,0), (0,1), (0,2), (1,0), (1,1), (1,2).
This coincides with the ordering used by the NumPy functions
ndenumerate() and mgrid().
Examples
--------
>>> from pyamg.gallery import stencil_grid
>>> stencil = [-1,2,-1] # 1D Poisson stencil
>>> grid = (5,) # 1D grid with 5 vertices
>>> A = stencil_grid(stencil, grid, dtype=float, format='csr')
>>> A.todense()
matrix([[ 2., -1., 0., 0., 0.],
[-1., 2., -1., 0., 0.],
[ 0., -1., 2., -1., 0.],
[ 0., 0., -1., 2., -1.],
[ 0., 0., 0., -1., 2.]])
>>> stencil = [[0,-1,0],[-1,4,-1],[0,-1,0]] # 2D Poisson stencil
>>> grid = (3,3) # 2D grid with shape 3x3
>>> A = stencil_grid(stencil, grid, dtype=float, format='csr')
>>> A.todense()
matrix([[ 4., -1., 0., -1., 0., 0., 0., 0., 0.],
[-1., 4., -1., 0., -1., 0., 0., 0., 0.],
[ 0., -1., 4., 0., 0., -1., 0., 0., 0.],
[-1., 0., 0., 4., -1., 0., -1., 0., 0.],
[ 0., -1., 0., -1., 4., -1., 0., -1., 0.],
[ 0., 0., -1., 0., -1., 4., 0., 0., -1.],
[ 0., 0., 0., -1., 0., 0., 4., -1., 0.],
[ 0., 0., 0., 0., -1., 0., -1., 4., -1.],
[ 0., 0., 0., 0., 0., -1., 0., -1., 4.]])
"""
S = np.asarray(S, dtype=dtype)
grid = tuple(grid)
if not (np.asarray(S.shape) % 2 == 1).all():
raise ValueError('all stencil dimensions must be odd')
if len(grid) != np.ndim(S):
raise ValueError('stencil dimension must equal number of grid\
dimensions')
if min(grid) < 1:
raise ValueError('grid dimensions must be positive')
N_v = np.prod(grid) # number of vertices in the mesh
N_s = (S != 0).sum() # number of nonzero stencil entries
# diagonal offsets
diags = np.zeros(N_s, dtype=int)
# compute index offset of each dof within the stencil
strides = np.cumprod([1] + list(reversed(grid)))[:-1]
indices = tuple(i.copy() for i in S.nonzero())
for i, s in zip(indices, S.shape):
i -= s // 2
# i = (i - s) // 2
# i = i // 2
# i = i - (s // 2)
for stride, coords in zip(strides, reversed(indices)):
diags += stride * coords
data = S[S != 0].repeat(N_v).reshape(N_s, N_v)
indices = np.vstack(indices).T
# zero boundary connections
for index, diag in zip(indices, data):
diag = diag.reshape(grid)
for n, i in enumerate(index):
if i > 0:
s = [slice(None)] * len(grid)
s[n] = slice(0, i)
s = tuple(s)
diag[s] = 0
elif i < 0:
s = [slice(None)]*len(grid)
s[n] = slice(i, None)
s = tuple(s)
diag[s] = 0
# remove diagonals that lie outside matrix
mask = abs(diags) < N_v
if not mask.all():
diags = diags[mask]
data = data[mask]
# sum duplicate diagonals
if len(np.unique(diags)) != len(diags):
new_diags = np.unique(diags)
new_data = np.zeros((len(new_diags), data.shape[1]),
dtype=data.dtype)
for dia, dat in zip(diags, data):
n = np.searchsorted(new_diags, dia)
new_data[n, :] += dat
diags = new_diags
data = new_data
return sparse.dia_matrix((data, diags),
shape=(N_v, N_v)).asformat(format) | [
"def",
"stencil_grid",
"(",
"S",
",",
"grid",
",",
"dtype",
"=",
"None",
",",
"format",
"=",
"None",
")",
":",
"S",
"=",
"np",
".",
"asarray",
"(",
"S",
",",
"dtype",
"=",
"dtype",
")",
"grid",
"=",
"tuple",
"(",
"grid",
")",
"if",
"not",
"(",
... | Construct a sparse matrix form a local matrix stencil.
Parameters
----------
S : ndarray
matrix stencil stored in N-d array
grid : tuple
tuple containing the N grid dimensions
dtype :
data type of the result
format : string
sparse matrix format to return, e.g. "csr", "coo", etc.
Returns
-------
A : sparse matrix
Sparse matrix which represents the operator given by applying
stencil S at each vertex of a regular grid with given dimensions.
Notes
-----
The grid vertices are enumerated as arange(prod(grid)).reshape(grid).
This implies that the last grid dimension cycles fastest, while the
first dimension cycles slowest. For example, if grid=(2,3) then the
grid vertices are ordered as (0,0), (0,1), (0,2), (1,0), (1,1), (1,2).
This coincides with the ordering used by the NumPy functions
ndenumerate() and mgrid().
Examples
--------
>>> from pyamg.gallery import stencil_grid
>>> stencil = [-1,2,-1] # 1D Poisson stencil
>>> grid = (5,) # 1D grid with 5 vertices
>>> A = stencil_grid(stencil, grid, dtype=float, format='csr')
>>> A.todense()
matrix([[ 2., -1., 0., 0., 0.],
[-1., 2., -1., 0., 0.],
[ 0., -1., 2., -1., 0.],
[ 0., 0., -1., 2., -1.],
[ 0., 0., 0., -1., 2.]])
>>> stencil = [[0,-1,0],[-1,4,-1],[0,-1,0]] # 2D Poisson stencil
>>> grid = (3,3) # 2D grid with shape 3x3
>>> A = stencil_grid(stencil, grid, dtype=float, format='csr')
>>> A.todense()
matrix([[ 4., -1., 0., -1., 0., 0., 0., 0., 0.],
[-1., 4., -1., 0., -1., 0., 0., 0., 0.],
[ 0., -1., 4., 0., 0., -1., 0., 0., 0.],
[-1., 0., 0., 4., -1., 0., -1., 0., 0.],
[ 0., -1., 0., -1., 4., -1., 0., -1., 0.],
[ 0., 0., -1., 0., -1., 4., 0., 0., -1.],
[ 0., 0., 0., -1., 0., 0., 4., -1., 0.],
[ 0., 0., 0., 0., -1., 0., -1., 4., -1.],
[ 0., 0., 0., 0., 0., -1., 0., -1., 4.]]) | [
"Construct",
"a",
"sparse",
"matrix",
"form",
"a",
"local",
"matrix",
"stencil",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/gallery/stencil.py#L11-L138 | train | 209,224 |
pyamg/pyamg | pyamg/classical/cr.py | _CRsweep | def _CRsweep(A, B, Findex, Cindex, nu, thetacr, method):
"""Perform CR sweeps on a target vector.
Internal function called by CR. Performs habituated or concurrent
relaxation sweeps on target vector. Stops when either (i) very fast
convergence, CF < 0.1*thetacr, are observed, or at least a given number
of sweeps have been performed and the relative change in CF < 0.1.
Parameters
----------
A : csr_matrix
B : array like
Target near null space mode
Findex : array like
List of F indices in current splitting
Cindex : array like
List of C indices in current splitting
nu : int
minimum number of relaxation sweeps to do
thetacr
Desired convergence factor
Returns
-------
rho : float
Convergence factor of last iteration
e : array like
Smoothed error vector
"""
n = A.shape[0] # problem size
numax = nu
z = np.zeros((n,))
e = deepcopy(B[:, 0])
e[Cindex] = 0.0
enorm = norm(e)
rhok = 1
it = 0
while True:
if method == 'habituated':
gauss_seidel(A, e, z, iterations=1)
e[Cindex] = 0.0
elif method == 'concurrent':
gauss_seidel_indexed(A, e, z, indices=Findex, iterations=1)
else:
raise NotImplementedError('method not recognized: need habituated '
'or concurrent')
enorm_old = enorm
enorm = norm(e)
rhok_old = rhok
rhok = enorm / enorm_old
it += 1
# criteria 1 -- fast convergence
if rhok < 0.1 * thetacr:
break
# criteria 2 -- at least nu iters, small relative change in CF (<0.1)
elif ((abs(rhok - rhok_old) / rhok) < 0.1) and (it >= nu):
break
return rhok, e | python | def _CRsweep(A, B, Findex, Cindex, nu, thetacr, method):
"""Perform CR sweeps on a target vector.
Internal function called by CR. Performs habituated or concurrent
relaxation sweeps on target vector. Stops when either (i) very fast
convergence, CF < 0.1*thetacr, are observed, or at least a given number
of sweeps have been performed and the relative change in CF < 0.1.
Parameters
----------
A : csr_matrix
B : array like
Target near null space mode
Findex : array like
List of F indices in current splitting
Cindex : array like
List of C indices in current splitting
nu : int
minimum number of relaxation sweeps to do
thetacr
Desired convergence factor
Returns
-------
rho : float
Convergence factor of last iteration
e : array like
Smoothed error vector
"""
n = A.shape[0] # problem size
numax = nu
z = np.zeros((n,))
e = deepcopy(B[:, 0])
e[Cindex] = 0.0
enorm = norm(e)
rhok = 1
it = 0
while True:
if method == 'habituated':
gauss_seidel(A, e, z, iterations=1)
e[Cindex] = 0.0
elif method == 'concurrent':
gauss_seidel_indexed(A, e, z, indices=Findex, iterations=1)
else:
raise NotImplementedError('method not recognized: need habituated '
'or concurrent')
enorm_old = enorm
enorm = norm(e)
rhok_old = rhok
rhok = enorm / enorm_old
it += 1
# criteria 1 -- fast convergence
if rhok < 0.1 * thetacr:
break
# criteria 2 -- at least nu iters, small relative change in CF (<0.1)
elif ((abs(rhok - rhok_old) / rhok) < 0.1) and (it >= nu):
break
return rhok, e | [
"def",
"_CRsweep",
"(",
"A",
",",
"B",
",",
"Findex",
",",
"Cindex",
",",
"nu",
",",
"thetacr",
",",
"method",
")",
":",
"n",
"=",
"A",
".",
"shape",
"[",
"0",
"]",
"# problem size",
"numax",
"=",
"nu",
"z",
"=",
"np",
".",
"zeros",
"(",
"(",
... | Perform CR sweeps on a target vector.
Internal function called by CR. Performs habituated or concurrent
relaxation sweeps on target vector. Stops when either (i) very fast
convergence, CF < 0.1*thetacr, are observed, or at least a given number
of sweeps have been performed and the relative change in CF < 0.1.
Parameters
----------
A : csr_matrix
B : array like
Target near null space mode
Findex : array like
List of F indices in current splitting
Cindex : array like
List of C indices in current splitting
nu : int
minimum number of relaxation sweeps to do
thetacr
Desired convergence factor
Returns
-------
rho : float
Convergence factor of last iteration
e : array like
Smoothed error vector | [
"Perform",
"CR",
"sweeps",
"on",
"a",
"target",
"vector",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/classical/cr.py#L16-L78 | train | 209,225 |
pyamg/pyamg | pyamg/classical/cr.py | binormalize | def binormalize(A, tol=1e-5, maxiter=10):
"""Binormalize matrix A. Attempt to create unit l_1 norm rows.
Parameters
----------
A : csr_matrix
sparse matrix (n x n)
tol : float
tolerance
x : array
guess at the diagonal
maxiter : int
maximum number of iterations to try
Returns
-------
C : csr_matrix
diagonally scaled A, C=DAD
Notes
-----
- Goal: Scale A so that l_1 norm of the rows are equal to 1:
- B = DAD
- want row sum of B = 1
- easily done with tol=0 if B=DA, but this is not symmetric
- algorithm is O(N log (1.0/tol))
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.classical import binormalize
>>> A = poisson((10,),format='csr')
>>> C = binormalize(A)
References
----------
.. [1] Livne, Golub, "Scaling by Binormalization"
Tech Report SCCM-03-12, SCCM, Stanford, 2003
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.3.1679
"""
if not isspmatrix(A):
raise TypeError('expecting sparse matrix A')
if A.dtype == complex:
raise NotImplementedError('complex A not implemented')
n = A.shape[0]
it = 0
x = np.ones((n, 1)).ravel()
# 1.
B = A.multiply(A).tocsc() # power(A,2) inconsistent in numpy, scipy.sparse
d = B.diagonal().ravel()
# 2.
beta = B * x
betabar = (1.0/n) * np.dot(x, beta)
stdev = rowsum_stdev(x, beta)
# 3
while stdev > tol and it < maxiter:
for i in range(0, n):
# solve equation x_i, keeping x_j's fixed
# see equation (12)
c2 = (n-1)*d[i]
c1 = (n-2)*(beta[i] - d[i]*x[i])
c0 = -d[i]*x[i]*x[i] + 2*beta[i]*x[i] - n*betabar
if (-c0 < 1e-14):
print('warning: A nearly un-binormalizable...')
return A
else:
# see equation (12)
xnew = (2*c0)/(-c1 - np.sqrt(c1*c1 - 4*c0*c2))
dx = xnew - x[i]
# here we assume input matrix is symmetric since we grab a row of B
# instead of a column
ii = B.indptr[i]
iii = B.indptr[i+1]
dot_Bcol = np.dot(x[B.indices[ii:iii]], B.data[ii:iii])
betabar = betabar + (1.0/n)*dx*(dot_Bcol + beta[i] + d[i]*dx)
beta[B.indices[ii:iii]] += dx*B.data[ii:iii]
x[i] = xnew
stdev = rowsum_stdev(x, beta)
it += 1
# rescale for unit 2-norm
d = np.sqrt(x)
D = spdiags(d.ravel(), [0], n, n)
C = D * A * D
C = C.tocsr()
beta = C.multiply(C).sum(axis=1)
scale = np.sqrt((1.0/n) * np.sum(beta))
return (1/scale)*C | python | def binormalize(A, tol=1e-5, maxiter=10):
"""Binormalize matrix A. Attempt to create unit l_1 norm rows.
Parameters
----------
A : csr_matrix
sparse matrix (n x n)
tol : float
tolerance
x : array
guess at the diagonal
maxiter : int
maximum number of iterations to try
Returns
-------
C : csr_matrix
diagonally scaled A, C=DAD
Notes
-----
- Goal: Scale A so that l_1 norm of the rows are equal to 1:
- B = DAD
- want row sum of B = 1
- easily done with tol=0 if B=DA, but this is not symmetric
- algorithm is O(N log (1.0/tol))
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.classical import binormalize
>>> A = poisson((10,),format='csr')
>>> C = binormalize(A)
References
----------
.. [1] Livne, Golub, "Scaling by Binormalization"
Tech Report SCCM-03-12, SCCM, Stanford, 2003
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.3.1679
"""
if not isspmatrix(A):
raise TypeError('expecting sparse matrix A')
if A.dtype == complex:
raise NotImplementedError('complex A not implemented')
n = A.shape[0]
it = 0
x = np.ones((n, 1)).ravel()
# 1.
B = A.multiply(A).tocsc() # power(A,2) inconsistent in numpy, scipy.sparse
d = B.diagonal().ravel()
# 2.
beta = B * x
betabar = (1.0/n) * np.dot(x, beta)
stdev = rowsum_stdev(x, beta)
# 3
while stdev > tol and it < maxiter:
for i in range(0, n):
# solve equation x_i, keeping x_j's fixed
# see equation (12)
c2 = (n-1)*d[i]
c1 = (n-2)*(beta[i] - d[i]*x[i])
c0 = -d[i]*x[i]*x[i] + 2*beta[i]*x[i] - n*betabar
if (-c0 < 1e-14):
print('warning: A nearly un-binormalizable...')
return A
else:
# see equation (12)
xnew = (2*c0)/(-c1 - np.sqrt(c1*c1 - 4*c0*c2))
dx = xnew - x[i]
# here we assume input matrix is symmetric since we grab a row of B
# instead of a column
ii = B.indptr[i]
iii = B.indptr[i+1]
dot_Bcol = np.dot(x[B.indices[ii:iii]], B.data[ii:iii])
betabar = betabar + (1.0/n)*dx*(dot_Bcol + beta[i] + d[i]*dx)
beta[B.indices[ii:iii]] += dx*B.data[ii:iii]
x[i] = xnew
stdev = rowsum_stdev(x, beta)
it += 1
# rescale for unit 2-norm
d = np.sqrt(x)
D = spdiags(d.ravel(), [0], n, n)
C = D * A * D
C = C.tocsr()
beta = C.multiply(C).sum(axis=1)
scale = np.sqrt((1.0/n) * np.sum(beta))
return (1/scale)*C | [
"def",
"binormalize",
"(",
"A",
",",
"tol",
"=",
"1e-5",
",",
"maxiter",
"=",
"10",
")",
":",
"if",
"not",
"isspmatrix",
"(",
"A",
")",
":",
"raise",
"TypeError",
"(",
"'expecting sparse matrix A'",
")",
"if",
"A",
".",
"dtype",
"==",
"complex",
":",
... | Binormalize matrix A. Attempt to create unit l_1 norm rows.
Parameters
----------
A : csr_matrix
sparse matrix (n x n)
tol : float
tolerance
x : array
guess at the diagonal
maxiter : int
maximum number of iterations to try
Returns
-------
C : csr_matrix
diagonally scaled A, C=DAD
Notes
-----
- Goal: Scale A so that l_1 norm of the rows are equal to 1:
- B = DAD
- want row sum of B = 1
- easily done with tol=0 if B=DA, but this is not symmetric
- algorithm is O(N log (1.0/tol))
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.classical import binormalize
>>> A = poisson((10,),format='csr')
>>> C = binormalize(A)
References
----------
.. [1] Livne, Golub, "Scaling by Binormalization"
Tech Report SCCM-03-12, SCCM, Stanford, 2003
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.3.1679 | [
"Binormalize",
"matrix",
"A",
".",
"Attempt",
"to",
"create",
"unit",
"l_1",
"norm",
"rows",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/classical/cr.py#L220-L317 | train | 209,226 |
pyamg/pyamg | pyamg/classical/cr.py | rowsum_stdev | def rowsum_stdev(x, beta):
r"""Compute row sum standard deviation.
Compute for approximation x, the std dev of the row sums
s(x) = ( 1/n \sum_k (x_k beta_k - betabar)^2 )^(1/2)
with betabar = 1/n dot(beta,x)
Parameters
----------
x : array
beta : array
Returns
-------
s(x)/betabar : float
Notes
-----
equation (7) in Livne/Golub
"""
n = x.size
betabar = (1.0/n) * np.dot(x, beta)
stdev = np.sqrt((1.0/n) *
np.sum(np.power(np.multiply(x, beta) - betabar, 2)))
return stdev/betabar | python | def rowsum_stdev(x, beta):
r"""Compute row sum standard deviation.
Compute for approximation x, the std dev of the row sums
s(x) = ( 1/n \sum_k (x_k beta_k - betabar)^2 )^(1/2)
with betabar = 1/n dot(beta,x)
Parameters
----------
x : array
beta : array
Returns
-------
s(x)/betabar : float
Notes
-----
equation (7) in Livne/Golub
"""
n = x.size
betabar = (1.0/n) * np.dot(x, beta)
stdev = np.sqrt((1.0/n) *
np.sum(np.power(np.multiply(x, beta) - betabar, 2)))
return stdev/betabar | [
"def",
"rowsum_stdev",
"(",
"x",
",",
"beta",
")",
":",
"n",
"=",
"x",
".",
"size",
"betabar",
"=",
"(",
"1.0",
"/",
"n",
")",
"*",
"np",
".",
"dot",
"(",
"x",
",",
"beta",
")",
"stdev",
"=",
"np",
".",
"sqrt",
"(",
"(",
"1.0",
"/",
"n",
... | r"""Compute row sum standard deviation.
Compute for approximation x, the std dev of the row sums
s(x) = ( 1/n \sum_k (x_k beta_k - betabar)^2 )^(1/2)
with betabar = 1/n dot(beta,x)
Parameters
----------
x : array
beta : array
Returns
-------
s(x)/betabar : float
Notes
-----
equation (7) in Livne/Golub | [
"r",
"Compute",
"row",
"sum",
"standard",
"deviation",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/classical/cr.py#L320-L345 | train | 209,227 |
pyamg/pyamg | pyamg/relaxation/chebyshev.py | mls_polynomial_coefficients | def mls_polynomial_coefficients(rho, degree):
"""Determine the coefficients for a MLS polynomial smoother.
Parameters
----------
rho : float
Spectral radius of the matrix in question
degree : int
Degree of polynomial coefficients to generate
Returns
-------
Tuple of arrays (coeffs,roots) containing the
coefficients for the (symmetric) polynomial smoother and
the roots of polynomial prolongation smoother.
The coefficients of the polynomial are in descending order
References
----------
.. [1] Parallel multigrid smoothing: polynomial versus Gauss--Seidel
M. F. Adams, M. Brezina, J. J. Hu, and R. S. Tuminaro
J. Comp. Phys., 188 (2003), pp. 593--610
Examples
--------
>>> from pyamg.relaxation.chebyshev import mls_polynomial_coefficients
>>> mls = mls_polynomial_coefficients(2.0, 2)
>>> print mls[0] # coefficients
[ 6.4 -48. 144. -220. 180. -75.8 14.5]
>>> print mls[1] # roots
[ 1.4472136 0.5527864]
"""
# std_roots = np.cos(np.pi * (np.arange(degree) + 0.5)/ degree)
# print std_roots
roots = rho/2.0 * \
(1.0 - np.cos(2*np.pi*(np.arange(degree, dtype='float64') + 1)/(2.0*degree+1.0)))
# print roots
roots = 1.0/roots
# S_coeffs = list(-np.poly(roots)[1:][::-1])
S = np.poly(roots)[::-1] # monomial coefficients of S error propagator
SSA_max = rho/((2.0*degree+1.0)**2) # upper bound spectral radius of S^2A
S_hat = np.polymul(S, S) # monomial coefficients of \hat{S} propagator
S_hat = np.hstack(((-1.0/SSA_max)*S_hat, [1]))
# coeff for combined error propagator \hat{S}S
coeffs = np.polymul(S_hat, S)
coeffs = -coeffs[:-1] # coeff for smoother
return (coeffs, roots) | python | def mls_polynomial_coefficients(rho, degree):
"""Determine the coefficients for a MLS polynomial smoother.
Parameters
----------
rho : float
Spectral radius of the matrix in question
degree : int
Degree of polynomial coefficients to generate
Returns
-------
Tuple of arrays (coeffs,roots) containing the
coefficients for the (symmetric) polynomial smoother and
the roots of polynomial prolongation smoother.
The coefficients of the polynomial are in descending order
References
----------
.. [1] Parallel multigrid smoothing: polynomial versus Gauss--Seidel
M. F. Adams, M. Brezina, J. J. Hu, and R. S. Tuminaro
J. Comp. Phys., 188 (2003), pp. 593--610
Examples
--------
>>> from pyamg.relaxation.chebyshev import mls_polynomial_coefficients
>>> mls = mls_polynomial_coefficients(2.0, 2)
>>> print mls[0] # coefficients
[ 6.4 -48. 144. -220. 180. -75.8 14.5]
>>> print mls[1] # roots
[ 1.4472136 0.5527864]
"""
# std_roots = np.cos(np.pi * (np.arange(degree) + 0.5)/ degree)
# print std_roots
roots = rho/2.0 * \
(1.0 - np.cos(2*np.pi*(np.arange(degree, dtype='float64') + 1)/(2.0*degree+1.0)))
# print roots
roots = 1.0/roots
# S_coeffs = list(-np.poly(roots)[1:][::-1])
S = np.poly(roots)[::-1] # monomial coefficients of S error propagator
SSA_max = rho/((2.0*degree+1.0)**2) # upper bound spectral radius of S^2A
S_hat = np.polymul(S, S) # monomial coefficients of \hat{S} propagator
S_hat = np.hstack(((-1.0/SSA_max)*S_hat, [1]))
# coeff for combined error propagator \hat{S}S
coeffs = np.polymul(S_hat, S)
coeffs = -coeffs[:-1] # coeff for smoother
return (coeffs, roots) | [
"def",
"mls_polynomial_coefficients",
"(",
"rho",
",",
"degree",
")",
":",
"# std_roots = np.cos(np.pi * (np.arange(degree) + 0.5)/ degree)",
"# print std_roots",
"roots",
"=",
"rho",
"/",
"2.0",
"*",
"(",
"1.0",
"-",
"np",
".",
"cos",
"(",
"2",
"*",
"np",
".",
... | Determine the coefficients for a MLS polynomial smoother.
Parameters
----------
rho : float
Spectral radius of the matrix in question
degree : int
Degree of polynomial coefficients to generate
Returns
-------
Tuple of arrays (coeffs,roots) containing the
coefficients for the (symmetric) polynomial smoother and
the roots of polynomial prolongation smoother.
The coefficients of the polynomial are in descending order
References
----------
.. [1] Parallel multigrid smoothing: polynomial versus Gauss--Seidel
M. F. Adams, M. Brezina, J. J. Hu, and R. S. Tuminaro
J. Comp. Phys., 188 (2003), pp. 593--610
Examples
--------
>>> from pyamg.relaxation.chebyshev import mls_polynomial_coefficients
>>> mls = mls_polynomial_coefficients(2.0, 2)
>>> print mls[0] # coefficients
[ 6.4 -48. 144. -220. 180. -75.8 14.5]
>>> print mls[1] # roots
[ 1.4472136 0.5527864] | [
"Determine",
"the",
"coefficients",
"for",
"a",
"MLS",
"polynomial",
"smoother",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/relaxation/chebyshev.py#L56-L110 | train | 209,228 |
pyamg/pyamg | pyamg/krylov/_steepest_descent.py | steepest_descent | def steepest_descent(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None,
callback=None, residuals=None):
"""Steepest descent algorithm.
Solves the linear system Ax = b. Left preconditioning is supported.
Parameters
----------
A : array, matrix, sparse matrix, LinearOperator
n x n, linear system to solve
b : array, matrix
right hand side, shape is (n,) or (n,1)
x0 : array, matrix
initial guess, default is a vector of zeros
tol : float
relative convergence tolerance, i.e. tol is scaled by the
preconditioner norm of r_0, or ||r_0||_M.
maxiter : int
maximum number of allowed iterations
xtype : type
dtype for the solution, default is automatic type detection
M : array, matrix, sparse matrix, LinearOperator
n x n, inverted preconditioner, i.e. solve M A x = M b.
callback : function
User-supplied function is called after each iteration as
callback(xk), where xk is the current solution vector
residuals : list
residuals contains the residual norm history,
including the initial residual. The preconditioner norm
is used, instead of the Euclidean norm.
Returns
-------
(xNew, info)
xNew : an updated guess to the solution of Ax = b
info : halting status of cg
== =======================================
0 successful exit
>0 convergence to tolerance not achieved,
return iteration count instead.
<0 numerical breakdown, or illegal input
== =======================================
Notes
-----
The LinearOperator class is in scipy.sparse.linalg.interface.
Use this class if you prefer to define A or M as a mat-vec routine
as opposed to explicitly constructing the matrix. A.psolve(..) is
still supported as a legacy.
The residual in the preconditioner norm is both used for halting and
returned in the residuals list.
Examples
--------
>>> from pyamg.krylov import steepest_descent
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> from pyamg.gallery import poisson
>>> A = poisson((10,10))
>>> b = np.ones((A.shape[0],))
>>> (x,flag) = steepest_descent(A,b, maxiter=2, tol=1e-8)
>>> print norm(b - A*x)
7.89436429704
References
----------
.. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
Second Edition", SIAM, pp. 137--142, 2003
http://www-users.cs.umn.edu/~saad/books.html
"""
A, M, x, b, postprocess = make_system(A, M, x0, b)
# Ensure that warnings are always reissued from this function
import warnings
warnings.filterwarnings('always',
module='pyamg\.krylov\._steepest_descent')
# determine maxiter
if maxiter is None:
maxiter = int(len(b))
elif maxiter < 1:
raise ValueError('Number of iterations must be positive')
# setup method
r = b - A*x
z = M*r
rz = np.inner(r.conjugate(), z)
# use preconditioner norm
normr = np.sqrt(rz)
if residuals is not None:
residuals[:] = [normr] # initial residual
# Check initial guess ( scaling by b, if b != 0,
# must account for case when norm(b) is very small)
normb = norm(b)
if normb == 0.0:
normb = 1.0
if normr < tol*normb:
return (postprocess(x), 0)
# Scale tol by ||r_0||_M
if normr != 0.0:
tol = tol*normr
# How often should r be recomputed
recompute_r = 50
iter = 0
while True:
iter = iter+1
q = A*z
zAz = np.inner(z.conjugate(), q) # check curvature of A
if zAz < 0.0:
warn("\nIndefinite matrix detected in steepest descent,\
aborting\n")
return (postprocess(x), -1)
alpha = rz / zAz # step size
x = x + alpha*z
if np.mod(iter, recompute_r) and iter > 0:
r = b - A*x
else:
r = r - alpha*q
z = M*r
rz = np.inner(r.conjugate(), z)
if rz < 0.0: # check curvature of M
warn("\nIndefinite preconditioner detected in steepest descent,\
aborting\n")
return (postprocess(x), -1)
normr = np.sqrt(rz) # use preconditioner norm
if residuals is not None:
residuals.append(normr)
if callback is not None:
callback(x)
if normr < tol:
return (postprocess(x), 0)
elif rz == 0.0:
# important to test after testing normr < tol. rz == 0.0 is an
# indicator of convergence when r = 0.0
warn("\nSingular preconditioner detected in steepest descent,\
ceasing iterations\n")
return (postprocess(x), -1)
if iter == maxiter:
return (postprocess(x), iter) | python | def steepest_descent(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None,
callback=None, residuals=None):
"""Steepest descent algorithm.
Solves the linear system Ax = b. Left preconditioning is supported.
Parameters
----------
A : array, matrix, sparse matrix, LinearOperator
n x n, linear system to solve
b : array, matrix
right hand side, shape is (n,) or (n,1)
x0 : array, matrix
initial guess, default is a vector of zeros
tol : float
relative convergence tolerance, i.e. tol is scaled by the
preconditioner norm of r_0, or ||r_0||_M.
maxiter : int
maximum number of allowed iterations
xtype : type
dtype for the solution, default is automatic type detection
M : array, matrix, sparse matrix, LinearOperator
n x n, inverted preconditioner, i.e. solve M A x = M b.
callback : function
User-supplied function is called after each iteration as
callback(xk), where xk is the current solution vector
residuals : list
residuals contains the residual norm history,
including the initial residual. The preconditioner norm
is used, instead of the Euclidean norm.
Returns
-------
(xNew, info)
xNew : an updated guess to the solution of Ax = b
info : halting status of cg
== =======================================
0 successful exit
>0 convergence to tolerance not achieved,
return iteration count instead.
<0 numerical breakdown, or illegal input
== =======================================
Notes
-----
The LinearOperator class is in scipy.sparse.linalg.interface.
Use this class if you prefer to define A or M as a mat-vec routine
as opposed to explicitly constructing the matrix. A.psolve(..) is
still supported as a legacy.
The residual in the preconditioner norm is both used for halting and
returned in the residuals list.
Examples
--------
>>> from pyamg.krylov import steepest_descent
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> from pyamg.gallery import poisson
>>> A = poisson((10,10))
>>> b = np.ones((A.shape[0],))
>>> (x,flag) = steepest_descent(A,b, maxiter=2, tol=1e-8)
>>> print norm(b - A*x)
7.89436429704
References
----------
.. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
Second Edition", SIAM, pp. 137--142, 2003
http://www-users.cs.umn.edu/~saad/books.html
"""
A, M, x, b, postprocess = make_system(A, M, x0, b)
# Ensure that warnings are always reissued from this function
import warnings
warnings.filterwarnings('always',
module='pyamg\.krylov\._steepest_descent')
# determine maxiter
if maxiter is None:
maxiter = int(len(b))
elif maxiter < 1:
raise ValueError('Number of iterations must be positive')
# setup method
r = b - A*x
z = M*r
rz = np.inner(r.conjugate(), z)
# use preconditioner norm
normr = np.sqrt(rz)
if residuals is not None:
residuals[:] = [normr] # initial residual
# Check initial guess ( scaling by b, if b != 0,
# must account for case when norm(b) is very small)
normb = norm(b)
if normb == 0.0:
normb = 1.0
if normr < tol*normb:
return (postprocess(x), 0)
# Scale tol by ||r_0||_M
if normr != 0.0:
tol = tol*normr
# How often should r be recomputed
recompute_r = 50
iter = 0
while True:
iter = iter+1
q = A*z
zAz = np.inner(z.conjugate(), q) # check curvature of A
if zAz < 0.0:
warn("\nIndefinite matrix detected in steepest descent,\
aborting\n")
return (postprocess(x), -1)
alpha = rz / zAz # step size
x = x + alpha*z
if np.mod(iter, recompute_r) and iter > 0:
r = b - A*x
else:
r = r - alpha*q
z = M*r
rz = np.inner(r.conjugate(), z)
if rz < 0.0: # check curvature of M
warn("\nIndefinite preconditioner detected in steepest descent,\
aborting\n")
return (postprocess(x), -1)
normr = np.sqrt(rz) # use preconditioner norm
if residuals is not None:
residuals.append(normr)
if callback is not None:
callback(x)
if normr < tol:
return (postprocess(x), 0)
elif rz == 0.0:
# important to test after testing normr < tol. rz == 0.0 is an
# indicator of convergence when r = 0.0
warn("\nSingular preconditioner detected in steepest descent,\
ceasing iterations\n")
return (postprocess(x), -1)
if iter == maxiter:
return (postprocess(x), iter) | [
"def",
"steepest_descent",
"(",
"A",
",",
"b",
",",
"x0",
"=",
"None",
",",
"tol",
"=",
"1e-5",
",",
"maxiter",
"=",
"None",
",",
"xtype",
"=",
"None",
",",
"M",
"=",
"None",
",",
"callback",
"=",
"None",
",",
"residuals",
"=",
"None",
")",
":",
... | Steepest descent algorithm.
Solves the linear system Ax = b. Left preconditioning is supported.
Parameters
----------
A : array, matrix, sparse matrix, LinearOperator
n x n, linear system to solve
b : array, matrix
right hand side, shape is (n,) or (n,1)
x0 : array, matrix
initial guess, default is a vector of zeros
tol : float
relative convergence tolerance, i.e. tol is scaled by the
preconditioner norm of r_0, or ||r_0||_M.
maxiter : int
maximum number of allowed iterations
xtype : type
dtype for the solution, default is automatic type detection
M : array, matrix, sparse matrix, LinearOperator
n x n, inverted preconditioner, i.e. solve M A x = M b.
callback : function
User-supplied function is called after each iteration as
callback(xk), where xk is the current solution vector
residuals : list
residuals contains the residual norm history,
including the initial residual. The preconditioner norm
is used, instead of the Euclidean norm.
Returns
-------
(xNew, info)
xNew : an updated guess to the solution of Ax = b
info : halting status of cg
== =======================================
0 successful exit
>0 convergence to tolerance not achieved,
return iteration count instead.
<0 numerical breakdown, or illegal input
== =======================================
Notes
-----
The LinearOperator class is in scipy.sparse.linalg.interface.
Use this class if you prefer to define A or M as a mat-vec routine
as opposed to explicitly constructing the matrix. A.psolve(..) is
still supported as a legacy.
The residual in the preconditioner norm is both used for halting and
returned in the residuals list.
Examples
--------
>>> from pyamg.krylov import steepest_descent
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> from pyamg.gallery import poisson
>>> A = poisson((10,10))
>>> b = np.ones((A.shape[0],))
>>> (x,flag) = steepest_descent(A,b, maxiter=2, tol=1e-8)
>>> print norm(b - A*x)
7.89436429704
References
----------
.. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
Second Edition", SIAM, pp. 137--142, 2003
http://www-users.cs.umn.edu/~saad/books.html | [
"Steepest",
"descent",
"algorithm",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/krylov/_steepest_descent.py#L10-L168 | train | 209,229 |
pyamg/pyamg | pyamg/gallery/demo.py | demo | def demo():
"""Outline basic demo."""
A = poisson((100, 100), format='csr') # 2D FD Poisson problem
B = None # no near-null spaces guesses for SA
b = sp.rand(A.shape[0], 1) # a random right-hand side
# use AMG based on Smoothed Aggregation (SA) and display info
mls = smoothed_aggregation_solver(A, B=B)
print(mls)
# Solve Ax=b with no acceleration ('standalone' solver)
standalone_residuals = []
x = mls.solve(b, tol=1e-10, accel=None, residuals=standalone_residuals)
# Solve Ax=b with Conjugate Gradient (AMG as a preconditioner to CG)
accelerated_residuals = []
x = mls.solve(b, tol=1e-10, accel='cg', residuals=accelerated_residuals)
del x
# Compute relative residuals
standalone_residuals = \
np.array(standalone_residuals) / standalone_residuals[0]
accelerated_residuals = \
np.array(accelerated_residuals) / accelerated_residuals[0]
# Compute (geometric) convergence factors
factor1 = standalone_residuals[-1]**(1.0/len(standalone_residuals))
factor2 = accelerated_residuals[-1]**(1.0/len(accelerated_residuals))
print(" MG convergence factor: %g" % (factor1))
print("MG with CG acceleration convergence factor: %g" % (factor2))
# Plot convergence history
try:
import matplotlib.pyplot as plt
plt.figure()
plt.title('Convergence History')
plt.xlabel('Iteration')
plt.ylabel('Relative Residual')
plt.semilogy(standalone_residuals, label='Standalone',
linestyle='-', marker='o')
plt.semilogy(accelerated_residuals, label='Accelerated',
linestyle='-', marker='s')
plt.legend()
plt.show()
except ImportError:
print("\n\nNote: pylab not available on your system.") | python | def demo():
"""Outline basic demo."""
A = poisson((100, 100), format='csr') # 2D FD Poisson problem
B = None # no near-null spaces guesses for SA
b = sp.rand(A.shape[0], 1) # a random right-hand side
# use AMG based on Smoothed Aggregation (SA) and display info
mls = smoothed_aggregation_solver(A, B=B)
print(mls)
# Solve Ax=b with no acceleration ('standalone' solver)
standalone_residuals = []
x = mls.solve(b, tol=1e-10, accel=None, residuals=standalone_residuals)
# Solve Ax=b with Conjugate Gradient (AMG as a preconditioner to CG)
accelerated_residuals = []
x = mls.solve(b, tol=1e-10, accel='cg', residuals=accelerated_residuals)
del x
# Compute relative residuals
standalone_residuals = \
np.array(standalone_residuals) / standalone_residuals[0]
accelerated_residuals = \
np.array(accelerated_residuals) / accelerated_residuals[0]
# Compute (geometric) convergence factors
factor1 = standalone_residuals[-1]**(1.0/len(standalone_residuals))
factor2 = accelerated_residuals[-1]**(1.0/len(accelerated_residuals))
print(" MG convergence factor: %g" % (factor1))
print("MG with CG acceleration convergence factor: %g" % (factor2))
# Plot convergence history
try:
import matplotlib.pyplot as plt
plt.figure()
plt.title('Convergence History')
plt.xlabel('Iteration')
plt.ylabel('Relative Residual')
plt.semilogy(standalone_residuals, label='Standalone',
linestyle='-', marker='o')
plt.semilogy(accelerated_residuals, label='Accelerated',
linestyle='-', marker='s')
plt.legend()
plt.show()
except ImportError:
print("\n\nNote: pylab not available on your system.") | [
"def",
"demo",
"(",
")",
":",
"A",
"=",
"poisson",
"(",
"(",
"100",
",",
"100",
")",
",",
"format",
"=",
"'csr'",
")",
"# 2D FD Poisson problem",
"B",
"=",
"None",
"# no near-null spaces guesses for SA",
"b",
"=",
"sp",
".",
"rand",
"(",
"A",
".",
"sha... | Outline basic demo. | [
"Outline",
"basic",
"demo",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/gallery/demo.py#L12-L58 | train | 209,230 |
pyamg/pyamg | pyamg/aggregation/aggregate.py | lloyd_aggregation | def lloyd_aggregation(C, ratio=0.03, distance='unit', maxiter=10):
"""Aggregate nodes using Lloyd Clustering.
Parameters
----------
C : csr_matrix
strength of connection matrix
ratio : scalar
Fraction of the nodes which will be seeds.
distance : ['unit','abs','inv',None]
Distance assigned to each edge of the graph G used in Lloyd clustering
For each nonzero value C[i,j]:
======= ===========================
'unit' G[i,j] = 1
'abs' G[i,j] = abs(C[i,j])
'inv' G[i,j] = 1.0/abs(C[i,j])
'same' G[i,j] = C[i,j]
'sub' G[i,j] = C[i,j] - min(C)
======= ===========================
maxiter : int
Maximum number of iterations to perform
Returns
-------
AggOp : csr_matrix
aggregation operator which determines the sparsity pattern
of the tentative prolongator
seeds : array
array of Cpts, i.e., Cpts[i] = root node of aggregate i
See Also
--------
amg_core.standard_aggregation
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> from pyamg.gallery import poisson
>>> from pyamg.aggregation.aggregate import lloyd_aggregation
>>> A = poisson((4,), format='csr') # 1D mesh with 4 vertices
>>> A.todense()
matrix([[ 2., -1., 0., 0.],
[-1., 2., -1., 0.],
[ 0., -1., 2., -1.],
[ 0., 0., -1., 2.]])
>>> lloyd_aggregation(A)[0].todense() # one aggregate
matrix([[1],
[1],
[1],
[1]], dtype=int8)
>>> # more seeding for two aggregates
>>> Agg = lloyd_aggregation(A,ratio=0.5)[0].todense()
"""
if ratio <= 0 or ratio > 1:
raise ValueError('ratio must be > 0.0 and <= 1.0')
if not (isspmatrix_csr(C) or isspmatrix_csc(C)):
raise TypeError('expected csr_matrix or csc_matrix')
if distance == 'unit':
data = np.ones_like(C.data).astype(float)
elif distance == 'abs':
data = abs(C.data)
elif distance == 'inv':
data = 1.0/abs(C.data)
elif distance is 'same':
data = C.data
elif distance is 'min':
data = C.data - C.data.min()
else:
raise ValueError('unrecognized value distance=%s' % distance)
if C.dtype == complex:
data = np.real(data)
assert(data.min() >= 0)
G = C.__class__((data, C.indices, C.indptr), shape=C.shape)
num_seeds = int(min(max(ratio * G.shape[0], 1), G.shape[0]))
distances, clusters, seeds = lloyd_cluster(G, num_seeds, maxiter=maxiter)
row = (clusters >= 0).nonzero()[0]
col = clusters[row]
data = np.ones(len(row), dtype='int8')
AggOp = coo_matrix((data, (row, col)),
shape=(G.shape[0], num_seeds)).tocsr()
return AggOp, seeds | python | def lloyd_aggregation(C, ratio=0.03, distance='unit', maxiter=10):
"""Aggregate nodes using Lloyd Clustering.
Parameters
----------
C : csr_matrix
strength of connection matrix
ratio : scalar
Fraction of the nodes which will be seeds.
distance : ['unit','abs','inv',None]
Distance assigned to each edge of the graph G used in Lloyd clustering
For each nonzero value C[i,j]:
======= ===========================
'unit' G[i,j] = 1
'abs' G[i,j] = abs(C[i,j])
'inv' G[i,j] = 1.0/abs(C[i,j])
'same' G[i,j] = C[i,j]
'sub' G[i,j] = C[i,j] - min(C)
======= ===========================
maxiter : int
Maximum number of iterations to perform
Returns
-------
AggOp : csr_matrix
aggregation operator which determines the sparsity pattern
of the tentative prolongator
seeds : array
array of Cpts, i.e., Cpts[i] = root node of aggregate i
See Also
--------
amg_core.standard_aggregation
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> from pyamg.gallery import poisson
>>> from pyamg.aggregation.aggregate import lloyd_aggregation
>>> A = poisson((4,), format='csr') # 1D mesh with 4 vertices
>>> A.todense()
matrix([[ 2., -1., 0., 0.],
[-1., 2., -1., 0.],
[ 0., -1., 2., -1.],
[ 0., 0., -1., 2.]])
>>> lloyd_aggregation(A)[0].todense() # one aggregate
matrix([[1],
[1],
[1],
[1]], dtype=int8)
>>> # more seeding for two aggregates
>>> Agg = lloyd_aggregation(A,ratio=0.5)[0].todense()
"""
if ratio <= 0 or ratio > 1:
raise ValueError('ratio must be > 0.0 and <= 1.0')
if not (isspmatrix_csr(C) or isspmatrix_csc(C)):
raise TypeError('expected csr_matrix or csc_matrix')
if distance == 'unit':
data = np.ones_like(C.data).astype(float)
elif distance == 'abs':
data = abs(C.data)
elif distance == 'inv':
data = 1.0/abs(C.data)
elif distance is 'same':
data = C.data
elif distance is 'min':
data = C.data - C.data.min()
else:
raise ValueError('unrecognized value distance=%s' % distance)
if C.dtype == complex:
data = np.real(data)
assert(data.min() >= 0)
G = C.__class__((data, C.indices, C.indptr), shape=C.shape)
num_seeds = int(min(max(ratio * G.shape[0], 1), G.shape[0]))
distances, clusters, seeds = lloyd_cluster(G, num_seeds, maxiter=maxiter)
row = (clusters >= 0).nonzero()[0]
col = clusters[row]
data = np.ones(len(row), dtype='int8')
AggOp = coo_matrix((data, (row, col)),
shape=(G.shape[0], num_seeds)).tocsr()
return AggOp, seeds | [
"def",
"lloyd_aggregation",
"(",
"C",
",",
"ratio",
"=",
"0.03",
",",
"distance",
"=",
"'unit'",
",",
"maxiter",
"=",
"10",
")",
":",
"if",
"ratio",
"<=",
"0",
"or",
"ratio",
">",
"1",
":",
"raise",
"ValueError",
"(",
"'ratio must be > 0.0 and <= 1.0'",
... | Aggregate nodes using Lloyd Clustering.
Parameters
----------
C : csr_matrix
strength of connection matrix
ratio : scalar
Fraction of the nodes which will be seeds.
distance : ['unit','abs','inv',None]
Distance assigned to each edge of the graph G used in Lloyd clustering
For each nonzero value C[i,j]:
======= ===========================
'unit' G[i,j] = 1
'abs' G[i,j] = abs(C[i,j])
'inv' G[i,j] = 1.0/abs(C[i,j])
'same' G[i,j] = C[i,j]
'sub' G[i,j] = C[i,j] - min(C)
======= ===========================
maxiter : int
Maximum number of iterations to perform
Returns
-------
AggOp : csr_matrix
aggregation operator which determines the sparsity pattern
of the tentative prolongator
seeds : array
array of Cpts, i.e., Cpts[i] = root node of aggregate i
See Also
--------
amg_core.standard_aggregation
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> from pyamg.gallery import poisson
>>> from pyamg.aggregation.aggregate import lloyd_aggregation
>>> A = poisson((4,), format='csr') # 1D mesh with 4 vertices
>>> A.todense()
matrix([[ 2., -1., 0., 0.],
[-1., 2., -1., 0.],
[ 0., -1., 2., -1.],
[ 0., 0., -1., 2.]])
>>> lloyd_aggregation(A)[0].todense() # one aggregate
matrix([[1],
[1],
[1],
[1]], dtype=int8)
>>> # more seeding for two aggregates
>>> Agg = lloyd_aggregation(A,ratio=0.5)[0].todense() | [
"Aggregate",
"nodes",
"using",
"Lloyd",
"Clustering",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/aggregation/aggregate.py#L180-L272 | train | 209,231 |
pyamg/pyamg | pyamg/krylov/_cr.py | cr | def cr(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None,
callback=None, residuals=None):
"""Conjugate Residual algorithm.
Solves the linear system Ax = b. Left preconditioning is supported.
The matrix A must be Hermitian symmetric (but not necessarily definite).
Parameters
----------
A : array, matrix, sparse matrix, LinearOperator
n x n, linear system to solve
b : array, matrix
right hand side, shape is (n,) or (n,1)
x0 : array, matrix
initial guess, default is a vector of zeros
tol : float
relative convergence tolerance, i.e. tol is scaled by the
preconditioner norm of r_0, or ||r_0||_M.
maxiter : int
maximum number of allowed iterations
xtype : type
dtype for the solution, default is automatic type detection
M : array, matrix, sparse matrix, LinearOperator
n x n, inverted preconditioner, i.e. solve M A x = M b.
callback : function
User-supplied function is called after each iteration as
callback(xk), where xk is the current solution vector
residuals : list
residuals contains the residual norm history,
including the initial residual. The preconditioner norm
is used, instead of the Euclidean norm.
Returns
-------
(xNew, info)
xNew : an updated guess to the solution of Ax = b
info : halting status of cr
== =======================================
0 successful exit
>0 convergence to tolerance not achieved,
return iteration count instead.
<0 numerical breakdown, or illegal input
== =======================================
Notes
-----
The LinearOperator class is in scipy.sparse.linalg.interface.
Use this class if you prefer to define A or M as a mat-vec routine
as opposed to explicitly constructing the matrix. A.psolve(..) is
still supported as a legacy.
The 2-norm of the preconditioned residual is used both for halting and
returned in the residuals list.
Examples
--------
>>> from pyamg.krylov.cr import cr
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> from pyamg.gallery import poisson
>>> A = poisson((10,10))
>>> b = np.ones((A.shape[0],))
>>> (x,flag) = cr(A,b, maxiter=2, tol=1e-8)
>>> print norm(b - A*x)
10.9370700187
References
----------
.. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
Second Edition", SIAM, pp. 262-67, 2003
http://www-users.cs.umn.edu/~saad/books.html
"""
A, M, x, b, postprocess = make_system(A, M, x0, b)
# n = len(b)
# Ensure that warnings are always reissued from this function
import warnings
warnings.filterwarnings('always', module='pyamg\.krylov\._cr')
# determine maxiter
if maxiter is None:
maxiter = int(1.3*len(b)) + 2
elif maxiter < 1:
raise ValueError('Number of iterations must be positive')
# choose tolerance for numerically zero values
# t = A.dtype.char
# eps = np.finfo(np.float).eps
# feps = np.finfo(np.single).eps
# geps = np.finfo(np.longfloat).eps
# _array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2}
# numerically_zero = {0: feps*1e3, 1: eps*1e6,
# 2: geps*1e6}[_array_precision[t]]
# setup method
r = b - A*x
z = M*r
p = z.copy()
zz = np.inner(z.conjugate(), z)
# use preconditioner norm
normr = np.sqrt(zz)
if residuals is not None:
residuals[:] = [normr] # initial residual
# Check initial guess ( scaling by b, if b != 0,
# must account for case when norm(b) is very small)
normb = norm(b)
if normb == 0.0:
normb = 1.0
if normr < tol*normb:
return (postprocess(x), 0)
# Scale tol by ||r_0||_M
if normr != 0.0:
tol = tol*normr
# How often should r be recomputed
recompute_r = 8
iter = 0
Az = A*z
rAz = np.inner(r.conjugate(), Az)
Ap = A*p
while True:
rAz_old = rAz
alpha = rAz / np.inner(Ap.conjugate(), Ap) # 3
x += alpha * p # 4
if np.mod(iter, recompute_r) and iter > 0: # 5
r -= alpha * Ap
else:
r = b - A*x
z = M*r
Az = A*z
rAz = np.inner(r.conjugate(), Az)
beta = rAz/rAz_old # 6
p *= beta # 7
p += z
Ap *= beta # 8
Ap += Az
iter += 1
zz = np.inner(z.conjugate(), z)
normr = np.sqrt(zz) # use preconditioner norm
if residuals is not None:
residuals.append(normr)
if callback is not None:
callback(x)
if normr < tol:
return (postprocess(x), 0)
elif zz == 0.0:
# important to test after testing normr < tol. rz == 0.0 is an
# indicator of convergence when r = 0.0
warn("\nSingular preconditioner detected in CR, ceasing \
iterations\n")
return (postprocess(x), -1)
if iter == maxiter:
return (postprocess(x), iter) | python | def cr(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None,
callback=None, residuals=None):
"""Conjugate Residual algorithm.
Solves the linear system Ax = b. Left preconditioning is supported.
The matrix A must be Hermitian symmetric (but not necessarily definite).
Parameters
----------
A : array, matrix, sparse matrix, LinearOperator
n x n, linear system to solve
b : array, matrix
right hand side, shape is (n,) or (n,1)
x0 : array, matrix
initial guess, default is a vector of zeros
tol : float
relative convergence tolerance, i.e. tol is scaled by the
preconditioner norm of r_0, or ||r_0||_M.
maxiter : int
maximum number of allowed iterations
xtype : type
dtype for the solution, default is automatic type detection
M : array, matrix, sparse matrix, LinearOperator
n x n, inverted preconditioner, i.e. solve M A x = M b.
callback : function
User-supplied function is called after each iteration as
callback(xk), where xk is the current solution vector
residuals : list
residuals contains the residual norm history,
including the initial residual. The preconditioner norm
is used, instead of the Euclidean norm.
Returns
-------
(xNew, info)
xNew : an updated guess to the solution of Ax = b
info : halting status of cr
== =======================================
0 successful exit
>0 convergence to tolerance not achieved,
return iteration count instead.
<0 numerical breakdown, or illegal input
== =======================================
Notes
-----
The LinearOperator class is in scipy.sparse.linalg.interface.
Use this class if you prefer to define A or M as a mat-vec routine
as opposed to explicitly constructing the matrix. A.psolve(..) is
still supported as a legacy.
The 2-norm of the preconditioned residual is used both for halting and
returned in the residuals list.
Examples
--------
>>> from pyamg.krylov.cr import cr
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> from pyamg.gallery import poisson
>>> A = poisson((10,10))
>>> b = np.ones((A.shape[0],))
>>> (x,flag) = cr(A,b, maxiter=2, tol=1e-8)
>>> print norm(b - A*x)
10.9370700187
References
----------
.. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
Second Edition", SIAM, pp. 262-67, 2003
http://www-users.cs.umn.edu/~saad/books.html
"""
A, M, x, b, postprocess = make_system(A, M, x0, b)
# n = len(b)
# Ensure that warnings are always reissued from this function
import warnings
warnings.filterwarnings('always', module='pyamg\.krylov\._cr')
# determine maxiter
if maxiter is None:
maxiter = int(1.3*len(b)) + 2
elif maxiter < 1:
raise ValueError('Number of iterations must be positive')
# choose tolerance for numerically zero values
# t = A.dtype.char
# eps = np.finfo(np.float).eps
# feps = np.finfo(np.single).eps
# geps = np.finfo(np.longfloat).eps
# _array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2}
# numerically_zero = {0: feps*1e3, 1: eps*1e6,
# 2: geps*1e6}[_array_precision[t]]
# setup method
r = b - A*x
z = M*r
p = z.copy()
zz = np.inner(z.conjugate(), z)
# use preconditioner norm
normr = np.sqrt(zz)
if residuals is not None:
residuals[:] = [normr] # initial residual
# Check initial guess ( scaling by b, if b != 0,
# must account for case when norm(b) is very small)
normb = norm(b)
if normb == 0.0:
normb = 1.0
if normr < tol*normb:
return (postprocess(x), 0)
# Scale tol by ||r_0||_M
if normr != 0.0:
tol = tol*normr
# How often should r be recomputed
recompute_r = 8
iter = 0
Az = A*z
rAz = np.inner(r.conjugate(), Az)
Ap = A*p
while True:
rAz_old = rAz
alpha = rAz / np.inner(Ap.conjugate(), Ap) # 3
x += alpha * p # 4
if np.mod(iter, recompute_r) and iter > 0: # 5
r -= alpha * Ap
else:
r = b - A*x
z = M*r
Az = A*z
rAz = np.inner(r.conjugate(), Az)
beta = rAz/rAz_old # 6
p *= beta # 7
p += z
Ap *= beta # 8
Ap += Az
iter += 1
zz = np.inner(z.conjugate(), z)
normr = np.sqrt(zz) # use preconditioner norm
if residuals is not None:
residuals.append(normr)
if callback is not None:
callback(x)
if normr < tol:
return (postprocess(x), 0)
elif zz == 0.0:
# important to test after testing normr < tol. rz == 0.0 is an
# indicator of convergence when r = 0.0
warn("\nSingular preconditioner detected in CR, ceasing \
iterations\n")
return (postprocess(x), -1)
if iter == maxiter:
return (postprocess(x), iter) | [
"def",
"cr",
"(",
"A",
",",
"b",
",",
"x0",
"=",
"None",
",",
"tol",
"=",
"1e-5",
",",
"maxiter",
"=",
"None",
",",
"xtype",
"=",
"None",
",",
"M",
"=",
"None",
",",
"callback",
"=",
"None",
",",
"residuals",
"=",
"None",
")",
":",
"A",
",",
... | Conjugate Residual algorithm.
Solves the linear system Ax = b. Left preconditioning is supported.
The matrix A must be Hermitian symmetric (but not necessarily definite).
Parameters
----------
A : array, matrix, sparse matrix, LinearOperator
n x n, linear system to solve
b : array, matrix
right hand side, shape is (n,) or (n,1)
x0 : array, matrix
initial guess, default is a vector of zeros
tol : float
relative convergence tolerance, i.e. tol is scaled by the
preconditioner norm of r_0, or ||r_0||_M.
maxiter : int
maximum number of allowed iterations
xtype : type
dtype for the solution, default is automatic type detection
M : array, matrix, sparse matrix, LinearOperator
n x n, inverted preconditioner, i.e. solve M A x = M b.
callback : function
User-supplied function is called after each iteration as
callback(xk), where xk is the current solution vector
residuals : list
residuals contains the residual norm history,
including the initial residual. The preconditioner norm
is used, instead of the Euclidean norm.
Returns
-------
(xNew, info)
xNew : an updated guess to the solution of Ax = b
info : halting status of cr
== =======================================
0 successful exit
>0 convergence to tolerance not achieved,
return iteration count instead.
<0 numerical breakdown, or illegal input
== =======================================
Notes
-----
The LinearOperator class is in scipy.sparse.linalg.interface.
Use this class if you prefer to define A or M as a mat-vec routine
as opposed to explicitly constructing the matrix. A.psolve(..) is
still supported as a legacy.
The 2-norm of the preconditioned residual is used both for halting and
returned in the residuals list.
Examples
--------
>>> from pyamg.krylov.cr import cr
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> from pyamg.gallery import poisson
>>> A = poisson((10,10))
>>> b = np.ones((A.shape[0],))
>>> (x,flag) = cr(A,b, maxiter=2, tol=1e-8)
>>> print norm(b - A*x)
10.9370700187
References
----------
.. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
Second Edition", SIAM, pp. 262-67, 2003
http://www-users.cs.umn.edu/~saad/books.html | [
"Conjugate",
"Residual",
"algorithm",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/krylov/_cr.py#L11-L185 | train | 209,232 |
pyamg/pyamg | pyamg/util/BSR_utils.py | BSR_Get_Row | def BSR_Get_Row(A, i):
"""Return row i in BSR matrix A.
Only nonzero entries are returned
Parameters
----------
A : bsr_matrix
Input matrix
i : int
Row number
Returns
-------
z : array
Actual nonzero values for row i colindx Array of column indices for the
nonzeros of row i
Examples
--------
>>> from numpy import array
>>> from scipy.sparse import bsr_matrix
>>> from pyamg.util.BSR_utils import BSR_Get_Row
>>> indptr = array([0,2,3,6])
>>> indices = array([0,2,2,0,1,2])
>>> data = array([1,2,3,4,5,6]).repeat(4).reshape(6,2,2)
>>> B = bsr_matrix( (data,indices,indptr), shape=(6,6) )
>>> Brow = BSR_Get_Row(B,2)
>>> print Brow[1]
[4 5]
"""
blocksize = A.blocksize[0]
BlockIndx = int(i/blocksize)
rowstart = A.indptr[BlockIndx]
rowend = A.indptr[BlockIndx+1]
localRowIndx = i % blocksize
# Get z
indys = A.data[rowstart:rowend, localRowIndx, :].nonzero()
z = A.data[rowstart:rowend, localRowIndx, :][indys[0], indys[1]]
colindx = np.zeros((1, z.__len__()), dtype=np.int32)
counter = 0
for j in range(rowstart, rowend):
coloffset = blocksize*A.indices[j]
indys = A.data[j, localRowIndx, :].nonzero()[0]
increment = indys.shape[0]
colindx[0, counter:(counter+increment)] = coloffset + indys
counter += increment
return np.mat(z).T, colindx[0, :] | python | def BSR_Get_Row(A, i):
"""Return row i in BSR matrix A.
Only nonzero entries are returned
Parameters
----------
A : bsr_matrix
Input matrix
i : int
Row number
Returns
-------
z : array
Actual nonzero values for row i colindx Array of column indices for the
nonzeros of row i
Examples
--------
>>> from numpy import array
>>> from scipy.sparse import bsr_matrix
>>> from pyamg.util.BSR_utils import BSR_Get_Row
>>> indptr = array([0,2,3,6])
>>> indices = array([0,2,2,0,1,2])
>>> data = array([1,2,3,4,5,6]).repeat(4).reshape(6,2,2)
>>> B = bsr_matrix( (data,indices,indptr), shape=(6,6) )
>>> Brow = BSR_Get_Row(B,2)
>>> print Brow[1]
[4 5]
"""
blocksize = A.blocksize[0]
BlockIndx = int(i/blocksize)
rowstart = A.indptr[BlockIndx]
rowend = A.indptr[BlockIndx+1]
localRowIndx = i % blocksize
# Get z
indys = A.data[rowstart:rowend, localRowIndx, :].nonzero()
z = A.data[rowstart:rowend, localRowIndx, :][indys[0], indys[1]]
colindx = np.zeros((1, z.__len__()), dtype=np.int32)
counter = 0
for j in range(rowstart, rowend):
coloffset = blocksize*A.indices[j]
indys = A.data[j, localRowIndx, :].nonzero()[0]
increment = indys.shape[0]
colindx[0, counter:(counter+increment)] = coloffset + indys
counter += increment
return np.mat(z).T, colindx[0, :] | [
"def",
"BSR_Get_Row",
"(",
"A",
",",
"i",
")",
":",
"blocksize",
"=",
"A",
".",
"blocksize",
"[",
"0",
"]",
"BlockIndx",
"=",
"int",
"(",
"i",
"/",
"blocksize",
")",
"rowstart",
"=",
"A",
".",
"indptr",
"[",
"BlockIndx",
"]",
"rowend",
"=",
"A",
... | Return row i in BSR matrix A.
Only nonzero entries are returned
Parameters
----------
A : bsr_matrix
Input matrix
i : int
Row number
Returns
-------
z : array
Actual nonzero values for row i colindx Array of column indices for the
nonzeros of row i
Examples
--------
>>> from numpy import array
>>> from scipy.sparse import bsr_matrix
>>> from pyamg.util.BSR_utils import BSR_Get_Row
>>> indptr = array([0,2,3,6])
>>> indices = array([0,2,2,0,1,2])
>>> data = array([1,2,3,4,5,6]).repeat(4).reshape(6,2,2)
>>> B = bsr_matrix( (data,indices,indptr), shape=(6,6) )
>>> Brow = BSR_Get_Row(B,2)
>>> print Brow[1]
[4 5] | [
"Return",
"row",
"i",
"in",
"BSR",
"matrix",
"A",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/BSR_utils.py#L9-L61 | train | 209,233 |
pyamg/pyamg | pyamg/util/BSR_utils.py | BSR_Row_WriteScalar | def BSR_Row_WriteScalar(A, i, x):
"""Write a scalar at each nonzero location in row i of BSR matrix A.
Parameters
----------
A : bsr_matrix
Input matrix
i : int
Row number
x : float
Scalar to overwrite nonzeros of row i in A
Returns
-------
A : bsr_matrix
All nonzeros in row i of A have been overwritten with x.
If x is a vector, the first length(x) nonzeros in row i
of A have been overwritten with entries from x
Examples
--------
>>> from numpy import array
>>> from scipy.sparse import bsr_matrix
>>> from pyamg.util.BSR_utils import BSR_Row_WriteScalar
>>> indptr = array([0,2,3,6])
>>> indices = array([0,2,2,0,1,2])
>>> data = array([1,2,3,4,5,6]).repeat(4).reshape(6,2,2)
>>> B = bsr_matrix( (data,indices,indptr), shape=(6,6) )
>>> BSR_Row_WriteScalar(B,5,22)
"""
blocksize = A.blocksize[0]
BlockIndx = int(i/blocksize)
rowstart = A.indptr[BlockIndx]
rowend = A.indptr[BlockIndx+1]
localRowIndx = i % blocksize
# for j in range(rowstart, rowend):
# indys = A.data[j,localRowIndx,:].nonzero()[0]
# increment = indys.shape[0]
# A.data[j,localRowIndx,indys] = x
indys = A.data[rowstart:rowend, localRowIndx, :].nonzero()
A.data[rowstart:rowend, localRowIndx, :][indys[0], indys[1]] = x | python | def BSR_Row_WriteScalar(A, i, x):
"""Write a scalar at each nonzero location in row i of BSR matrix A.
Parameters
----------
A : bsr_matrix
Input matrix
i : int
Row number
x : float
Scalar to overwrite nonzeros of row i in A
Returns
-------
A : bsr_matrix
All nonzeros in row i of A have been overwritten with x.
If x is a vector, the first length(x) nonzeros in row i
of A have been overwritten with entries from x
Examples
--------
>>> from numpy import array
>>> from scipy.sparse import bsr_matrix
>>> from pyamg.util.BSR_utils import BSR_Row_WriteScalar
>>> indptr = array([0,2,3,6])
>>> indices = array([0,2,2,0,1,2])
>>> data = array([1,2,3,4,5,6]).repeat(4).reshape(6,2,2)
>>> B = bsr_matrix( (data,indices,indptr), shape=(6,6) )
>>> BSR_Row_WriteScalar(B,5,22)
"""
blocksize = A.blocksize[0]
BlockIndx = int(i/blocksize)
rowstart = A.indptr[BlockIndx]
rowend = A.indptr[BlockIndx+1]
localRowIndx = i % blocksize
# for j in range(rowstart, rowend):
# indys = A.data[j,localRowIndx,:].nonzero()[0]
# increment = indys.shape[0]
# A.data[j,localRowIndx,indys] = x
indys = A.data[rowstart:rowend, localRowIndx, :].nonzero()
A.data[rowstart:rowend, localRowIndx, :][indys[0], indys[1]] = x | [
"def",
"BSR_Row_WriteScalar",
"(",
"A",
",",
"i",
",",
"x",
")",
":",
"blocksize",
"=",
"A",
".",
"blocksize",
"[",
"0",
"]",
"BlockIndx",
"=",
"int",
"(",
"i",
"/",
"blocksize",
")",
"rowstart",
"=",
"A",
".",
"indptr",
"[",
"BlockIndx",
"]",
"row... | Write a scalar at each nonzero location in row i of BSR matrix A.
Parameters
----------
A : bsr_matrix
Input matrix
i : int
Row number
x : float
Scalar to overwrite nonzeros of row i in A
Returns
-------
A : bsr_matrix
All nonzeros in row i of A have been overwritten with x.
If x is a vector, the first length(x) nonzeros in row i
of A have been overwritten with entries from x
Examples
--------
>>> from numpy import array
>>> from scipy.sparse import bsr_matrix
>>> from pyamg.util.BSR_utils import BSR_Row_WriteScalar
>>> indptr = array([0,2,3,6])
>>> indices = array([0,2,2,0,1,2])
>>> data = array([1,2,3,4,5,6]).repeat(4).reshape(6,2,2)
>>> B = bsr_matrix( (data,indices,indptr), shape=(6,6) )
>>> BSR_Row_WriteScalar(B,5,22) | [
"Write",
"a",
"scalar",
"at",
"each",
"nonzero",
"location",
"in",
"row",
"i",
"of",
"BSR",
"matrix",
"A",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/BSR_utils.py#L64-L107 | train | 209,234 |
pyamg/pyamg | pyamg/util/BSR_utils.py | BSR_Row_WriteVect | def BSR_Row_WriteVect(A, i, x):
"""Overwrite the nonzeros in row i of BSR matrix A with the vector x.
length(x) and nnz(A[i,:]) must be equivalent
Parameters
----------
A : bsr_matrix
Matrix assumed to be in BSR format
i : int
Row number
x : array
Array of values to overwrite nonzeros in row i of A
Returns
-------
A : bsr_matrix
The nonzeros in row i of A have been
overwritten with entries from x. x must be same
length as nonzeros of row i. This is guaranteed
when this routine is used with vectors derived form
Get_BSR_Row
Examples
--------
>>> from numpy import array
>>> from scipy.sparse import bsr_matrix
>>> from pyamg.util.BSR_utils import BSR_Row_WriteVect
>>> indptr = array([0,2,3,6])
>>> indices = array([0,2,2,0,1,2])
>>> data = array([1,2,3,4,5,6]).repeat(4).reshape(6,2,2)
>>> B = bsr_matrix( (data,indices,indptr), shape=(6,6) )
>>> BSR_Row_WriteVect(B,5,array([11,22,33,44,55,66]))
"""
blocksize = A.blocksize[0]
BlockIndx = int(i/blocksize)
rowstart = A.indptr[BlockIndx]
rowend = A.indptr[BlockIndx+1]
localRowIndx = i % blocksize
# like matlab slicing:
x = x.__array__().reshape((max(x.shape),))
# counter = 0
# for j in range(rowstart, rowend):
# indys = A.data[j,localRowIndx,:].nonzero()[0]
# increment = min(indys.shape[0], blocksize)
# A.data[j,localRowIndx,indys] = x[counter:(counter+increment), 0]
# counter += increment
indys = A.data[rowstart:rowend, localRowIndx, :].nonzero()
A.data[rowstart:rowend, localRowIndx, :][indys[0], indys[1]] = x | python | def BSR_Row_WriteVect(A, i, x):
"""Overwrite the nonzeros in row i of BSR matrix A with the vector x.
length(x) and nnz(A[i,:]) must be equivalent
Parameters
----------
A : bsr_matrix
Matrix assumed to be in BSR format
i : int
Row number
x : array
Array of values to overwrite nonzeros in row i of A
Returns
-------
A : bsr_matrix
The nonzeros in row i of A have been
overwritten with entries from x. x must be same
length as nonzeros of row i. This is guaranteed
when this routine is used with vectors derived form
Get_BSR_Row
Examples
--------
>>> from numpy import array
>>> from scipy.sparse import bsr_matrix
>>> from pyamg.util.BSR_utils import BSR_Row_WriteVect
>>> indptr = array([0,2,3,6])
>>> indices = array([0,2,2,0,1,2])
>>> data = array([1,2,3,4,5,6]).repeat(4).reshape(6,2,2)
>>> B = bsr_matrix( (data,indices,indptr), shape=(6,6) )
>>> BSR_Row_WriteVect(B,5,array([11,22,33,44,55,66]))
"""
blocksize = A.blocksize[0]
BlockIndx = int(i/blocksize)
rowstart = A.indptr[BlockIndx]
rowend = A.indptr[BlockIndx+1]
localRowIndx = i % blocksize
# like matlab slicing:
x = x.__array__().reshape((max(x.shape),))
# counter = 0
# for j in range(rowstart, rowend):
# indys = A.data[j,localRowIndx,:].nonzero()[0]
# increment = min(indys.shape[0], blocksize)
# A.data[j,localRowIndx,indys] = x[counter:(counter+increment), 0]
# counter += increment
indys = A.data[rowstart:rowend, localRowIndx, :].nonzero()
A.data[rowstart:rowend, localRowIndx, :][indys[0], indys[1]] = x | [
"def",
"BSR_Row_WriteVect",
"(",
"A",
",",
"i",
",",
"x",
")",
":",
"blocksize",
"=",
"A",
".",
"blocksize",
"[",
"0",
"]",
"BlockIndx",
"=",
"int",
"(",
"i",
"/",
"blocksize",
")",
"rowstart",
"=",
"A",
".",
"indptr",
"[",
"BlockIndx",
"]",
"rowen... | Overwrite the nonzeros in row i of BSR matrix A with the vector x.
length(x) and nnz(A[i,:]) must be equivalent
Parameters
----------
A : bsr_matrix
Matrix assumed to be in BSR format
i : int
Row number
x : array
Array of values to overwrite nonzeros in row i of A
Returns
-------
A : bsr_matrix
The nonzeros in row i of A have been
overwritten with entries from x. x must be same
length as nonzeros of row i. This is guaranteed
when this routine is used with vectors derived form
Get_BSR_Row
Examples
--------
>>> from numpy import array
>>> from scipy.sparse import bsr_matrix
>>> from pyamg.util.BSR_utils import BSR_Row_WriteVect
>>> indptr = array([0,2,3,6])
>>> indices = array([0,2,2,0,1,2])
>>> data = array([1,2,3,4,5,6]).repeat(4).reshape(6,2,2)
>>> B = bsr_matrix( (data,indices,indptr), shape=(6,6) )
>>> BSR_Row_WriteVect(B,5,array([11,22,33,44,55,66])) | [
"Overwrite",
"the",
"nonzeros",
"in",
"row",
"i",
"of",
"BSR",
"matrix",
"A",
"with",
"the",
"vector",
"x",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/BSR_utils.py#L110-L162 | train | 209,235 |
pyamg/pyamg | pyamg/classical/interpolate.py | direct_interpolation | def direct_interpolation(A, C, splitting):
"""Create prolongator using direct interpolation.
Parameters
----------
A : csr_matrix
NxN matrix in CSR format
C : csr_matrix
Strength-of-Connection matrix
Must have zero diagonal
splitting : array
C/F splitting stored in an array of length N
Returns
-------
P : csr_matrix
Prolongator using direct interpolation
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.classical import direct_interpolation
>>> import numpy as np
>>> A = poisson((5,),format='csr')
>>> splitting = np.array([1,0,1,0,1], dtype='intc')
>>> P = direct_interpolation(A, A, splitting)
>>> print P.todense()
[[ 1. 0. 0. ]
[ 0.5 0.5 0. ]
[ 0. 1. 0. ]
[ 0. 0.5 0.5]
[ 0. 0. 1. ]]
"""
if not isspmatrix_csr(A):
raise TypeError('expected csr_matrix for A')
if not isspmatrix_csr(C):
raise TypeError('expected csr_matrix for C')
# Interpolation weights are computed based on entries in A, but subject to
# the sparsity pattern of C. So, copy the entries of A into the
# sparsity pattern of C.
C = C.copy()
C.data[:] = 1.0
C = C.multiply(A)
Pp = np.empty_like(A.indptr)
amg_core.rs_direct_interpolation_pass1(A.shape[0],
C.indptr, C.indices, splitting, Pp)
nnz = Pp[-1]
Pj = np.empty(nnz, dtype=Pp.dtype)
Px = np.empty(nnz, dtype=A.dtype)
amg_core.rs_direct_interpolation_pass2(A.shape[0],
A.indptr, A.indices, A.data,
C.indptr, C.indices, C.data,
splitting,
Pp, Pj, Px)
return csr_matrix((Px, Pj, Pp)) | python | def direct_interpolation(A, C, splitting):
"""Create prolongator using direct interpolation.
Parameters
----------
A : csr_matrix
NxN matrix in CSR format
C : csr_matrix
Strength-of-Connection matrix
Must have zero diagonal
splitting : array
C/F splitting stored in an array of length N
Returns
-------
P : csr_matrix
Prolongator using direct interpolation
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.classical import direct_interpolation
>>> import numpy as np
>>> A = poisson((5,),format='csr')
>>> splitting = np.array([1,0,1,0,1], dtype='intc')
>>> P = direct_interpolation(A, A, splitting)
>>> print P.todense()
[[ 1. 0. 0. ]
[ 0.5 0.5 0. ]
[ 0. 1. 0. ]
[ 0. 0.5 0.5]
[ 0. 0. 1. ]]
"""
if not isspmatrix_csr(A):
raise TypeError('expected csr_matrix for A')
if not isspmatrix_csr(C):
raise TypeError('expected csr_matrix for C')
# Interpolation weights are computed based on entries in A, but subject to
# the sparsity pattern of C. So, copy the entries of A into the
# sparsity pattern of C.
C = C.copy()
C.data[:] = 1.0
C = C.multiply(A)
Pp = np.empty_like(A.indptr)
amg_core.rs_direct_interpolation_pass1(A.shape[0],
C.indptr, C.indices, splitting, Pp)
nnz = Pp[-1]
Pj = np.empty(nnz, dtype=Pp.dtype)
Px = np.empty(nnz, dtype=A.dtype)
amg_core.rs_direct_interpolation_pass2(A.shape[0],
A.indptr, A.indices, A.data,
C.indptr, C.indices, C.data,
splitting,
Pp, Pj, Px)
return csr_matrix((Px, Pj, Pp)) | [
"def",
"direct_interpolation",
"(",
"A",
",",
"C",
",",
"splitting",
")",
":",
"if",
"not",
"isspmatrix_csr",
"(",
"A",
")",
":",
"raise",
"TypeError",
"(",
"'expected csr_matrix for A'",
")",
"if",
"not",
"isspmatrix_csr",
"(",
"C",
")",
":",
"raise",
"Ty... | Create prolongator using direct interpolation.
Parameters
----------
A : csr_matrix
NxN matrix in CSR format
C : csr_matrix
Strength-of-Connection matrix
Must have zero diagonal
splitting : array
C/F splitting stored in an array of length N
Returns
-------
P : csr_matrix
Prolongator using direct interpolation
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.classical import direct_interpolation
>>> import numpy as np
>>> A = poisson((5,),format='csr')
>>> splitting = np.array([1,0,1,0,1], dtype='intc')
>>> P = direct_interpolation(A, A, splitting)
>>> print P.todense()
[[ 1. 0. 0. ]
[ 0.5 0.5 0. ]
[ 0. 1. 0. ]
[ 0. 0.5 0.5]
[ 0. 0. 1. ]] | [
"Create",
"prolongator",
"using",
"direct",
"interpolation",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/classical/interpolate.py#L11-L73 | train | 209,236 |
pyamg/pyamg | pyamg/krylov/_gmres_mgs.py | apply_givens | def apply_givens(Q, v, k):
"""Apply the first k Givens rotations in Q to v.
Parameters
----------
Q : list
list of consecutive 2x2 Givens rotations
v : array
vector to apply the rotations to
k : int
number of rotations to apply.
Returns
-------
v is changed in place
Notes
-----
This routine is specialized for GMRES. It assumes that the first Givens
rotation is for dofs 0 and 1, the second Givens rotation is for
dofs 1 and 2, and so on.
"""
for j in range(k):
Qloc = Q[j]
v[j:j+2] = np.dot(Qloc, v[j:j+2]) | python | def apply_givens(Q, v, k):
"""Apply the first k Givens rotations in Q to v.
Parameters
----------
Q : list
list of consecutive 2x2 Givens rotations
v : array
vector to apply the rotations to
k : int
number of rotations to apply.
Returns
-------
v is changed in place
Notes
-----
This routine is specialized for GMRES. It assumes that the first Givens
rotation is for dofs 0 and 1, the second Givens rotation is for
dofs 1 and 2, and so on.
"""
for j in range(k):
Qloc = Q[j]
v[j:j+2] = np.dot(Qloc, v[j:j+2]) | [
"def",
"apply_givens",
"(",
"Q",
",",
"v",
",",
"k",
")",
":",
"for",
"j",
"in",
"range",
"(",
"k",
")",
":",
"Qloc",
"=",
"Q",
"[",
"j",
"]",
"v",
"[",
"j",
":",
"j",
"+",
"2",
"]",
"=",
"np",
".",
"dot",
"(",
"Qloc",
",",
"v",
"[",
... | Apply the first k Givens rotations in Q to v.
Parameters
----------
Q : list
list of consecutive 2x2 Givens rotations
v : array
vector to apply the rotations to
k : int
number of rotations to apply.
Returns
-------
v is changed in place
Notes
-----
This routine is specialized for GMRES. It assumes that the first Givens
rotation is for dofs 0 and 1, the second Givens rotation is for
dofs 1 and 2, and so on. | [
"Apply",
"the",
"first",
"k",
"Givens",
"rotations",
"in",
"Q",
"to",
"v",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/krylov/_gmres_mgs.py#L13-L38 | train | 209,237 |
pyamg/pyamg | pyamg/gallery/diffusion.py | diffusion_stencil_2d | def diffusion_stencil_2d(epsilon=1.0, theta=0.0, type='FE'):
"""Rotated Anisotropic diffusion in 2d of the form.
-div Q A Q^T grad u
Q = [cos(theta) -sin(theta)]
[sin(theta) cos(theta)]
A = [1 0 ]
[0 eps ]
Parameters
----------
epsilon : float, optional
Anisotropic diffusion coefficient: -div A grad u,
where A = [1 0; 0 epsilon]. The default is isotropic, epsilon=1.0
theta : float, optional
Rotation angle `theta` in radians defines -div Q A Q^T grad,
where Q = [cos(`theta`) -sin(`theta`); sin(`theta`) cos(`theta`)].
type : {'FE','FD'}
Specifies the discretization as Q1 finite element (FE) or 2nd order
finite difference (FD)
The default is `theta` = 0.0
Returns
-------
stencil : numpy array
A 3x3 diffusion stencil
See Also
--------
stencil_grid, poisson
Notes
-----
Not all combinations are supported.
Examples
--------
>>> import scipy as sp
>>> from pyamg.gallery.diffusion import diffusion_stencil_2d
>>> sten = diffusion_stencil_2d(epsilon=0.0001,theta=sp.pi/6,type='FD')
>>> print sten
[[-0.2164847 -0.750025 0.2164847]
[-0.250075 2.0002 -0.250075 ]
[ 0.2164847 -0.750025 -0.2164847]]
"""
eps = float(epsilon) # for brevity
theta = float(theta)
C = np.cos(theta)
S = np.sin(theta)
CS = C*S
CC = C**2
SS = S**2
if(type == 'FE'):
"""FE approximation to::
- (eps c^2 + s^2) u_xx +
-2(eps - 1) c s u_xy +
- ( c^2 + eps s^2) u_yy
[ -c^2*eps-s^2+3*c*s*(eps-1)-c^2-s^2*eps,
2*c^2*eps+2*s^2-4*c^2-4*s^2*eps,
-c^2*eps-s^2-3*c*s*(eps-1)-c^2-s^2*eps]
[-4*c^2*eps-4*s^2+2*c^2+2*s^2*eps,
8*c^2*eps+8*s^2+8*c^2+8*s^2*eps,
-4*c^2*eps-4*s^2+2*c^2+2*s^2*eps]
[-c^2*eps-s^2-3*c*s*(eps-1)-c^2-s^2*eps,
2*c^2*eps+2*s^2-4*c^2-4*s^2*eps,
-c^2*eps-s^2+3*c*s*(eps-1)-c^2-s^2*eps]
c = cos(theta)
s = sin(theta)
"""
a = (-1*eps - 1)*CC + (-1*eps - 1)*SS + (3*eps - 3)*CS
b = (2*eps - 4)*CC + (-4*eps + 2)*SS
c = (-1*eps - 1)*CC + (-1*eps - 1)*SS + (-3*eps + 3)*CS
d = (-4*eps + 2)*CC + (2*eps - 4)*SS
e = (8*eps + 8)*CC + (8*eps + 8)*SS
stencil = np.array([[a, b, c],
[d, e, d],
[c, b, a]]) / 6.0
elif type == 'FD':
"""FD approximation to:
- (eps c^2 + s^2) u_xx +
-2(eps - 1) c s u_xy +
- ( c^2 + eps s^2) u_yy
c = cos(theta)
s = sin(theta)
A = [ 1/2(eps - 1) c s -(c^2 + eps s^2) -1/2(eps - 1) c s ]
[ ]
[ -(eps c^2 + s^2) 2 (eps + 1) -(eps c^2 + s^2) ]
[ ]
[ -1/2(eps - 1) c s -(c^2 + eps s^2) 1/2(eps - 1) c s ]
"""
a = 0.5*(eps - 1)*CS
b = -(eps*SS + CC)
c = -a
d = -(eps*CC + SS)
e = 2.0*(eps + 1)
stencil = np.array([[a, b, c],
[d, e, d],
[c, b, a]])
return stencil | python | def diffusion_stencil_2d(epsilon=1.0, theta=0.0, type='FE'):
"""Rotated Anisotropic diffusion in 2d of the form.
-div Q A Q^T grad u
Q = [cos(theta) -sin(theta)]
[sin(theta) cos(theta)]
A = [1 0 ]
[0 eps ]
Parameters
----------
epsilon : float, optional
Anisotropic diffusion coefficient: -div A grad u,
where A = [1 0; 0 epsilon]. The default is isotropic, epsilon=1.0
theta : float, optional
Rotation angle `theta` in radians defines -div Q A Q^T grad,
where Q = [cos(`theta`) -sin(`theta`); sin(`theta`) cos(`theta`)].
type : {'FE','FD'}
Specifies the discretization as Q1 finite element (FE) or 2nd order
finite difference (FD)
The default is `theta` = 0.0
Returns
-------
stencil : numpy array
A 3x3 diffusion stencil
See Also
--------
stencil_grid, poisson
Notes
-----
Not all combinations are supported.
Examples
--------
>>> import scipy as sp
>>> from pyamg.gallery.diffusion import diffusion_stencil_2d
>>> sten = diffusion_stencil_2d(epsilon=0.0001,theta=sp.pi/6,type='FD')
>>> print sten
[[-0.2164847 -0.750025 0.2164847]
[-0.250075 2.0002 -0.250075 ]
[ 0.2164847 -0.750025 -0.2164847]]
"""
eps = float(epsilon) # for brevity
theta = float(theta)
C = np.cos(theta)
S = np.sin(theta)
CS = C*S
CC = C**2
SS = S**2
if(type == 'FE'):
"""FE approximation to::
- (eps c^2 + s^2) u_xx +
-2(eps - 1) c s u_xy +
- ( c^2 + eps s^2) u_yy
[ -c^2*eps-s^2+3*c*s*(eps-1)-c^2-s^2*eps,
2*c^2*eps+2*s^2-4*c^2-4*s^2*eps,
-c^2*eps-s^2-3*c*s*(eps-1)-c^2-s^2*eps]
[-4*c^2*eps-4*s^2+2*c^2+2*s^2*eps,
8*c^2*eps+8*s^2+8*c^2+8*s^2*eps,
-4*c^2*eps-4*s^2+2*c^2+2*s^2*eps]
[-c^2*eps-s^2-3*c*s*(eps-1)-c^2-s^2*eps,
2*c^2*eps+2*s^2-4*c^2-4*s^2*eps,
-c^2*eps-s^2+3*c*s*(eps-1)-c^2-s^2*eps]
c = cos(theta)
s = sin(theta)
"""
a = (-1*eps - 1)*CC + (-1*eps - 1)*SS + (3*eps - 3)*CS
b = (2*eps - 4)*CC + (-4*eps + 2)*SS
c = (-1*eps - 1)*CC + (-1*eps - 1)*SS + (-3*eps + 3)*CS
d = (-4*eps + 2)*CC + (2*eps - 4)*SS
e = (8*eps + 8)*CC + (8*eps + 8)*SS
stencil = np.array([[a, b, c],
[d, e, d],
[c, b, a]]) / 6.0
elif type == 'FD':
"""FD approximation to:
- (eps c^2 + s^2) u_xx +
-2(eps - 1) c s u_xy +
- ( c^2 + eps s^2) u_yy
c = cos(theta)
s = sin(theta)
A = [ 1/2(eps - 1) c s -(c^2 + eps s^2) -1/2(eps - 1) c s ]
[ ]
[ -(eps c^2 + s^2) 2 (eps + 1) -(eps c^2 + s^2) ]
[ ]
[ -1/2(eps - 1) c s -(c^2 + eps s^2) 1/2(eps - 1) c s ]
"""
a = 0.5*(eps - 1)*CS
b = -(eps*SS + CC)
c = -a
d = -(eps*CC + SS)
e = 2.0*(eps + 1)
stencil = np.array([[a, b, c],
[d, e, d],
[c, b, a]])
return stencil | [
"def",
"diffusion_stencil_2d",
"(",
"epsilon",
"=",
"1.0",
",",
"theta",
"=",
"0.0",
",",
"type",
"=",
"'FE'",
")",
":",
"eps",
"=",
"float",
"(",
"epsilon",
")",
"# for brevity",
"theta",
"=",
"float",
"(",
"theta",
")",
"C",
"=",
"np",
".",
"cos",
... | Rotated Anisotropic diffusion in 2d of the form.
-div Q A Q^T grad u
Q = [cos(theta) -sin(theta)]
[sin(theta) cos(theta)]
A = [1 0 ]
[0 eps ]
Parameters
----------
epsilon : float, optional
Anisotropic diffusion coefficient: -div A grad u,
where A = [1 0; 0 epsilon]. The default is isotropic, epsilon=1.0
theta : float, optional
Rotation angle `theta` in radians defines -div Q A Q^T grad,
where Q = [cos(`theta`) -sin(`theta`); sin(`theta`) cos(`theta`)].
type : {'FE','FD'}
Specifies the discretization as Q1 finite element (FE) or 2nd order
finite difference (FD)
The default is `theta` = 0.0
Returns
-------
stencil : numpy array
A 3x3 diffusion stencil
See Also
--------
stencil_grid, poisson
Notes
-----
Not all combinations are supported.
Examples
--------
>>> import scipy as sp
>>> from pyamg.gallery.diffusion import diffusion_stencil_2d
>>> sten = diffusion_stencil_2d(epsilon=0.0001,theta=sp.pi/6,type='FD')
>>> print sten
[[-0.2164847 -0.750025 0.2164847]
[-0.250075 2.0002 -0.250075 ]
[ 0.2164847 -0.750025 -0.2164847]] | [
"Rotated",
"Anisotropic",
"diffusion",
"in",
"2d",
"of",
"the",
"form",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/gallery/diffusion.py#L18-L135 | train | 209,238 |
pyamg/pyamg | pyamg/gallery/diffusion.py | _symbolic_rotation_helper | def _symbolic_rotation_helper():
"""Use SymPy to generate the 3D rotation matrix and products for diffusion_stencil_3d."""
from sympy import symbols, Matrix
cpsi, spsi = symbols('cpsi, spsi')
cth, sth = symbols('cth, sth')
cphi, sphi = symbols('cphi, sphi')
Rpsi = Matrix([[cpsi, spsi, 0], [-spsi, cpsi, 0], [0, 0, 1]])
Rth = Matrix([[1, 0, 0], [0, cth, sth], [0, -sth, cth]])
Rphi = Matrix([[cphi, sphi, 0], [-sphi, cphi, 0], [0, 0, 1]])
Q = Rpsi * Rth * Rphi
epsy, epsz = symbols('epsy, epsz')
A = Matrix([[1, 0, 0], [0, epsy, 0], [0, 0, epsz]])
D = Q * A * Q.T
for i in range(3):
for j in range(3):
print('D[%d, %d] = %s' % (i, j, D[i, j])) | python | def _symbolic_rotation_helper():
"""Use SymPy to generate the 3D rotation matrix and products for diffusion_stencil_3d."""
from sympy import symbols, Matrix
cpsi, spsi = symbols('cpsi, spsi')
cth, sth = symbols('cth, sth')
cphi, sphi = symbols('cphi, sphi')
Rpsi = Matrix([[cpsi, spsi, 0], [-spsi, cpsi, 0], [0, 0, 1]])
Rth = Matrix([[1, 0, 0], [0, cth, sth], [0, -sth, cth]])
Rphi = Matrix([[cphi, sphi, 0], [-sphi, cphi, 0], [0, 0, 1]])
Q = Rpsi * Rth * Rphi
epsy, epsz = symbols('epsy, epsz')
A = Matrix([[1, 0, 0], [0, epsy, 0], [0, 0, epsz]])
D = Q * A * Q.T
for i in range(3):
for j in range(3):
print('D[%d, %d] = %s' % (i, j, D[i, j])) | [
"def",
"_symbolic_rotation_helper",
"(",
")",
":",
"from",
"sympy",
"import",
"symbols",
",",
"Matrix",
"cpsi",
",",
"spsi",
"=",
"symbols",
"(",
"'cpsi, spsi'",
")",
"cth",
",",
"sth",
"=",
"symbols",
"(",
"'cth, sth'",
")",
"cphi",
",",
"sphi",
"=",
"s... | Use SymPy to generate the 3D rotation matrix and products for diffusion_stencil_3d. | [
"Use",
"SymPy",
"to",
"generate",
"the",
"3D",
"rotation",
"matrix",
"and",
"products",
"for",
"diffusion_stencil_3d",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/gallery/diffusion.py#L138-L158 | train | 209,239 |
pyamg/pyamg | pyamg/gallery/diffusion.py | _symbolic_product_helper | def _symbolic_product_helper():
"""Use SymPy to generate the 3D products for diffusion_stencil_3d."""
from sympy import symbols, Matrix
D11, D12, D13, D21, D22, D23, D31, D32, D33 =\
symbols('D11, D12, D13, D21, D22, D23, D31, D32, D33')
D = Matrix([[D11, D12, D13], [D21, D22, D23], [D31, D32, D33]])
grad = Matrix([['dx', 'dy', 'dz']]).T
div = grad.T
a = div * D * grad
print(a[0]) | python | def _symbolic_product_helper():
"""Use SymPy to generate the 3D products for diffusion_stencil_3d."""
from sympy import symbols, Matrix
D11, D12, D13, D21, D22, D23, D31, D32, D33 =\
symbols('D11, D12, D13, D21, D22, D23, D31, D32, D33')
D = Matrix([[D11, D12, D13], [D21, D22, D23], [D31, D32, D33]])
grad = Matrix([['dx', 'dy', 'dz']]).T
div = grad.T
a = div * D * grad
print(a[0]) | [
"def",
"_symbolic_product_helper",
"(",
")",
":",
"from",
"sympy",
"import",
"symbols",
",",
"Matrix",
"D11",
",",
"D12",
",",
"D13",
",",
"D21",
",",
"D22",
",",
"D23",
",",
"D31",
",",
"D32",
",",
"D33",
"=",
"symbols",
"(",
"'D11, D12, D13, D21, D22, ... | Use SymPy to generate the 3D products for diffusion_stencil_3d. | [
"Use",
"SymPy",
"to",
"generate",
"the",
"3D",
"products",
"for",
"diffusion_stencil_3d",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/gallery/diffusion.py#L161-L174 | train | 209,240 |
pyamg/pyamg | pyamg/relaxation/relaxation.py | make_system | def make_system(A, x, b, formats=None):
"""Return A,x,b suitable for relaxation or raise an exception.
Parameters
----------
A : sparse-matrix
n x n system
x : array
n-vector, initial guess
b : array
n-vector, right-hand side
formats: {'csr', 'csc', 'bsr', 'lil', 'dok',...}
desired sparse matrix format
default is no change to A's format
Returns
-------
(A,x,b), where A is in the desired sparse-matrix format
and x and b are "raveled", i.e. (n,) vectors.
Notes
-----
Does some rudimentary error checking on the system,
such as checking for compatible dimensions and checking
for compatible type, i.e. float or complex.
Examples
--------
>>> from pyamg.relaxation.relaxation import make_system
>>> from pyamg.gallery import poisson
>>> import numpy as np
>>> A = poisson((10,10), format='csr')
>>> x = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> (A,x,b) = make_system(A,x,b,formats=['csc'])
>>> print str(x.shape)
(100,)
>>> print str(b.shape)
(100,)
>>> print A.format
csc
"""
if formats is None:
pass
elif formats == ['csr']:
if sparse.isspmatrix_csr(A):
pass
elif sparse.isspmatrix_bsr(A):
A = A.tocsr()
else:
warn('implicit conversion to CSR', sparse.SparseEfficiencyWarning)
A = sparse.csr_matrix(A)
else:
if sparse.isspmatrix(A) and A.format in formats:
pass
else:
A = sparse.csr_matrix(A).asformat(formats[0])
if not isinstance(x, np.ndarray):
raise ValueError('expected numpy array for argument x')
if not isinstance(b, np.ndarray):
raise ValueError('expected numpy array for argument b')
M, N = A.shape
if M != N:
raise ValueError('expected square matrix')
if x.shape not in [(M,), (M, 1)]:
raise ValueError('x has invalid dimensions')
if b.shape not in [(M,), (M, 1)]:
raise ValueError('b has invalid dimensions')
if A.dtype != x.dtype or A.dtype != b.dtype:
raise TypeError('arguments A, x, and b must have the same dtype')
if not x.flags.carray:
raise ValueError('x must be contiguous in memory')
x = np.ravel(x)
b = np.ravel(b)
return A, x, b | python | def make_system(A, x, b, formats=None):
"""Return A,x,b suitable for relaxation or raise an exception.
Parameters
----------
A : sparse-matrix
n x n system
x : array
n-vector, initial guess
b : array
n-vector, right-hand side
formats: {'csr', 'csc', 'bsr', 'lil', 'dok',...}
desired sparse matrix format
default is no change to A's format
Returns
-------
(A,x,b), where A is in the desired sparse-matrix format
and x and b are "raveled", i.e. (n,) vectors.
Notes
-----
Does some rudimentary error checking on the system,
such as checking for compatible dimensions and checking
for compatible type, i.e. float or complex.
Examples
--------
>>> from pyamg.relaxation.relaxation import make_system
>>> from pyamg.gallery import poisson
>>> import numpy as np
>>> A = poisson((10,10), format='csr')
>>> x = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> (A,x,b) = make_system(A,x,b,formats=['csc'])
>>> print str(x.shape)
(100,)
>>> print str(b.shape)
(100,)
>>> print A.format
csc
"""
if formats is None:
pass
elif formats == ['csr']:
if sparse.isspmatrix_csr(A):
pass
elif sparse.isspmatrix_bsr(A):
A = A.tocsr()
else:
warn('implicit conversion to CSR', sparse.SparseEfficiencyWarning)
A = sparse.csr_matrix(A)
else:
if sparse.isspmatrix(A) and A.format in formats:
pass
else:
A = sparse.csr_matrix(A).asformat(formats[0])
if not isinstance(x, np.ndarray):
raise ValueError('expected numpy array for argument x')
if not isinstance(b, np.ndarray):
raise ValueError('expected numpy array for argument b')
M, N = A.shape
if M != N:
raise ValueError('expected square matrix')
if x.shape not in [(M,), (M, 1)]:
raise ValueError('x has invalid dimensions')
if b.shape not in [(M,), (M, 1)]:
raise ValueError('b has invalid dimensions')
if A.dtype != x.dtype or A.dtype != b.dtype:
raise TypeError('arguments A, x, and b must have the same dtype')
if not x.flags.carray:
raise ValueError('x must be contiguous in memory')
x = np.ravel(x)
b = np.ravel(b)
return A, x, b | [
"def",
"make_system",
"(",
"A",
",",
"x",
",",
"b",
",",
"formats",
"=",
"None",
")",
":",
"if",
"formats",
"is",
"None",
":",
"pass",
"elif",
"formats",
"==",
"[",
"'csr'",
"]",
":",
"if",
"sparse",
".",
"isspmatrix_csr",
"(",
"A",
")",
":",
"pa... | Return A,x,b suitable for relaxation or raise an exception.
Parameters
----------
A : sparse-matrix
n x n system
x : array
n-vector, initial guess
b : array
n-vector, right-hand side
formats: {'csr', 'csc', 'bsr', 'lil', 'dok',...}
desired sparse matrix format
default is no change to A's format
Returns
-------
(A,x,b), where A is in the desired sparse-matrix format
and x and b are "raveled", i.e. (n,) vectors.
Notes
-----
Does some rudimentary error checking on the system,
such as checking for compatible dimensions and checking
for compatible type, i.e. float or complex.
Examples
--------
>>> from pyamg.relaxation.relaxation import make_system
>>> from pyamg.gallery import poisson
>>> import numpy as np
>>> A = poisson((10,10), format='csr')
>>> x = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> (A,x,b) = make_system(A,x,b,formats=['csc'])
>>> print str(x.shape)
(100,)
>>> print str(b.shape)
(100,)
>>> print A.format
csc | [
"Return",
"A",
"x",
"b",
"suitable",
"for",
"relaxation",
"or",
"raise",
"an",
"exception",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/relaxation/relaxation.py#L20-L103 | train | 209,241 |
pyamg/pyamg | pyamg/relaxation/relaxation.py | sor | def sor(A, x, b, omega, iterations=1, sweep='forward'):
"""Perform SOR iteration on the linear system Ax=b.
Parameters
----------
A : csr_matrix, bsr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
omega : scalar
Damping parameter
iterations : int
Number of iterations to perform
sweep : {'forward','backward','symmetric'}
Direction of sweep
Returns
-------
Nothing, x will be modified in place.
Notes
-----
When omega=1.0, SOR is equivalent to Gauss-Seidel.
Examples
--------
>>> # Use SOR as stand-along solver
>>> from pyamg.relaxation.relaxation import sor
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((10,10), format='csr')
>>> x0 = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> sor(A, x0, b, 1.33, iterations=10)
>>> print norm(b-A*x0)
3.03888724811
>>> #
>>> # Use SOR as the multigrid smoother
>>> from pyamg import smoothed_aggregation_solver
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('sor', {'sweep':'symmetric', 'omega' : 1.33}),
... postsmoother=('sor', {'sweep':'symmetric', 'omega' : 1.33}))
>>> x0 = np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)
"""
A, x, b = make_system(A, x, b, formats=['csr', 'bsr'])
x_old = np.empty_like(x)
for i in range(iterations):
x_old[:] = x
gauss_seidel(A, x, b, iterations=1, sweep=sweep)
x *= omega
x_old *= (1-omega)
x += x_old | python | def sor(A, x, b, omega, iterations=1, sweep='forward'):
"""Perform SOR iteration on the linear system Ax=b.
Parameters
----------
A : csr_matrix, bsr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
omega : scalar
Damping parameter
iterations : int
Number of iterations to perform
sweep : {'forward','backward','symmetric'}
Direction of sweep
Returns
-------
Nothing, x will be modified in place.
Notes
-----
When omega=1.0, SOR is equivalent to Gauss-Seidel.
Examples
--------
>>> # Use SOR as stand-along solver
>>> from pyamg.relaxation.relaxation import sor
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((10,10), format='csr')
>>> x0 = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> sor(A, x0, b, 1.33, iterations=10)
>>> print norm(b-A*x0)
3.03888724811
>>> #
>>> # Use SOR as the multigrid smoother
>>> from pyamg import smoothed_aggregation_solver
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('sor', {'sweep':'symmetric', 'omega' : 1.33}),
... postsmoother=('sor', {'sweep':'symmetric', 'omega' : 1.33}))
>>> x0 = np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)
"""
A, x, b = make_system(A, x, b, formats=['csr', 'bsr'])
x_old = np.empty_like(x)
for i in range(iterations):
x_old[:] = x
gauss_seidel(A, x, b, iterations=1, sweep=sweep)
x *= omega
x_old *= (1-omega)
x += x_old | [
"def",
"sor",
"(",
"A",
",",
"x",
",",
"b",
",",
"omega",
",",
"iterations",
"=",
"1",
",",
"sweep",
"=",
"'forward'",
")",
":",
"A",
",",
"x",
",",
"b",
"=",
"make_system",
"(",
"A",
",",
"x",
",",
"b",
",",
"formats",
"=",
"[",
"'csr'",
"... | Perform SOR iteration on the linear system Ax=b.
Parameters
----------
A : csr_matrix, bsr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
omega : scalar
Damping parameter
iterations : int
Number of iterations to perform
sweep : {'forward','backward','symmetric'}
Direction of sweep
Returns
-------
Nothing, x will be modified in place.
Notes
-----
When omega=1.0, SOR is equivalent to Gauss-Seidel.
Examples
--------
>>> # Use SOR as stand-along solver
>>> from pyamg.relaxation.relaxation import sor
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((10,10), format='csr')
>>> x0 = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> sor(A, x0, b, 1.33, iterations=10)
>>> print norm(b-A*x0)
3.03888724811
>>> #
>>> # Use SOR as the multigrid smoother
>>> from pyamg import smoothed_aggregation_solver
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('sor', {'sweep':'symmetric', 'omega' : 1.33}),
... postsmoother=('sor', {'sweep':'symmetric', 'omega' : 1.33}))
>>> x0 = np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals) | [
"Perform",
"SOR",
"iteration",
"on",
"the",
"linear",
"system",
"Ax",
"=",
"b",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/relaxation/relaxation.py#L106-L168 | train | 209,242 |
pyamg/pyamg | pyamg/relaxation/relaxation.py | schwarz | def schwarz(A, x, b, iterations=1, subdomain=None, subdomain_ptr=None,
inv_subblock=None, inv_subblock_ptr=None, sweep='forward'):
"""Perform Overlapping multiplicative Schwarz on the linear system Ax=b.
Parameters
----------
A : csr_matrix, bsr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
iterations : int
Number of iterations to perform
subdomain : int array
Linear array containing each subdomain's elements
subdomain_ptr : int array
Pointer in subdomain, such that
subdomain[subdomain_ptr[i]:subdomain_ptr[i+1]]]
contains the _sorted_ indices in subdomain i
inv_subblock : int_array
Linear array containing each subdomain's
inverted diagonal block of A
inv_subblock_ptr : int array
Pointer in inv_subblock, such that
inv_subblock[inv_subblock_ptr[i]:inv_subblock_ptr[i+1]]]
contains the inverted diagonal block of A for the
i-th subdomain in _row_ major order
sweep : {'forward','backward','symmetric'}
Direction of sweep
Returns
-------
Nothing, x will be modified in place.
Notes
-----
If subdomains is None, then a point-wise iteration takes place,
with the overlapping region defined by each degree-of-freedom's
neighbors in the matrix graph.
If subdomains is not None, but subblocks is, then the subblocks
are formed internally.
Currently only supports CSR matrices
Examples
--------
>>> # Use Overlapping Schwarz as a Stand-Alone Solver
>>> from pyamg.relaxation.relaxation import schwarz
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((10,10), format='csr')
>>> x0 = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> schwarz(A, x0, b, iterations=10)
>>> print norm(b-A*x0)
0.126326160522
>>> #
>>> # Schwarz as the Multigrid Smoother
>>> from pyamg import smoothed_aggregation_solver
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother='schwarz',
... postsmoother='schwarz')
>>> x0=np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)
"""
A, x, b = make_system(A, x, b, formats=['csr'])
A.sort_indices()
if subdomain is None and inv_subblock is not None:
raise ValueError("inv_subblock must be None if subdomain is None")
# If no subdomains are defined, default is to use the sparsity pattern of A
# to define the overlapping regions
(subdomain, subdomain_ptr, inv_subblock, inv_subblock_ptr) = \
schwarz_parameters(A, subdomain, subdomain_ptr,
inv_subblock, inv_subblock_ptr)
if sweep == 'forward':
row_start, row_stop, row_step = 0, subdomain_ptr.shape[0]-1, 1
elif sweep == 'backward':
row_start, row_stop, row_step = subdomain_ptr.shape[0]-2, -1, -1
elif sweep == 'symmetric':
for iter in range(iterations):
schwarz(A, x, b, iterations=1, subdomain=subdomain,
subdomain_ptr=subdomain_ptr, inv_subblock=inv_subblock,
inv_subblock_ptr=inv_subblock_ptr, sweep='forward')
schwarz(A, x, b, iterations=1, subdomain=subdomain,
subdomain_ptr=subdomain_ptr, inv_subblock=inv_subblock,
inv_subblock_ptr=inv_subblock_ptr, sweep='backward')
return
else:
raise ValueError("valid sweep directions are 'forward',\
'backward', and 'symmetric'")
# Call C code, need to make sure that subdomains are sorted and unique
for iter in range(iterations):
amg_core.overlapping_schwarz_csr(A.indptr, A.indices, A.data,
x, b, inv_subblock, inv_subblock_ptr,
subdomain, subdomain_ptr,
subdomain_ptr.shape[0]-1, A.shape[0],
row_start, row_stop, row_step) | python | def schwarz(A, x, b, iterations=1, subdomain=None, subdomain_ptr=None,
inv_subblock=None, inv_subblock_ptr=None, sweep='forward'):
"""Perform Overlapping multiplicative Schwarz on the linear system Ax=b.
Parameters
----------
A : csr_matrix, bsr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
iterations : int
Number of iterations to perform
subdomain : int array
Linear array containing each subdomain's elements
subdomain_ptr : int array
Pointer in subdomain, such that
subdomain[subdomain_ptr[i]:subdomain_ptr[i+1]]]
contains the _sorted_ indices in subdomain i
inv_subblock : int_array
Linear array containing each subdomain's
inverted diagonal block of A
inv_subblock_ptr : int array
Pointer in inv_subblock, such that
inv_subblock[inv_subblock_ptr[i]:inv_subblock_ptr[i+1]]]
contains the inverted diagonal block of A for the
i-th subdomain in _row_ major order
sweep : {'forward','backward','symmetric'}
Direction of sweep
Returns
-------
Nothing, x will be modified in place.
Notes
-----
If subdomains is None, then a point-wise iteration takes place,
with the overlapping region defined by each degree-of-freedom's
neighbors in the matrix graph.
If subdomains is not None, but subblocks is, then the subblocks
are formed internally.
Currently only supports CSR matrices
Examples
--------
>>> # Use Overlapping Schwarz as a Stand-Alone Solver
>>> from pyamg.relaxation.relaxation import schwarz
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((10,10), format='csr')
>>> x0 = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> schwarz(A, x0, b, iterations=10)
>>> print norm(b-A*x0)
0.126326160522
>>> #
>>> # Schwarz as the Multigrid Smoother
>>> from pyamg import smoothed_aggregation_solver
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother='schwarz',
... postsmoother='schwarz')
>>> x0=np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)
"""
A, x, b = make_system(A, x, b, formats=['csr'])
A.sort_indices()
if subdomain is None and inv_subblock is not None:
raise ValueError("inv_subblock must be None if subdomain is None")
# If no subdomains are defined, default is to use the sparsity pattern of A
# to define the overlapping regions
(subdomain, subdomain_ptr, inv_subblock, inv_subblock_ptr) = \
schwarz_parameters(A, subdomain, subdomain_ptr,
inv_subblock, inv_subblock_ptr)
if sweep == 'forward':
row_start, row_stop, row_step = 0, subdomain_ptr.shape[0]-1, 1
elif sweep == 'backward':
row_start, row_stop, row_step = subdomain_ptr.shape[0]-2, -1, -1
elif sweep == 'symmetric':
for iter in range(iterations):
schwarz(A, x, b, iterations=1, subdomain=subdomain,
subdomain_ptr=subdomain_ptr, inv_subblock=inv_subblock,
inv_subblock_ptr=inv_subblock_ptr, sweep='forward')
schwarz(A, x, b, iterations=1, subdomain=subdomain,
subdomain_ptr=subdomain_ptr, inv_subblock=inv_subblock,
inv_subblock_ptr=inv_subblock_ptr, sweep='backward')
return
else:
raise ValueError("valid sweep directions are 'forward',\
'backward', and 'symmetric'")
# Call C code, need to make sure that subdomains are sorted and unique
for iter in range(iterations):
amg_core.overlapping_schwarz_csr(A.indptr, A.indices, A.data,
x, b, inv_subblock, inv_subblock_ptr,
subdomain, subdomain_ptr,
subdomain_ptr.shape[0]-1, A.shape[0],
row_start, row_stop, row_step) | [
"def",
"schwarz",
"(",
"A",
",",
"x",
",",
"b",
",",
"iterations",
"=",
"1",
",",
"subdomain",
"=",
"None",
",",
"subdomain_ptr",
"=",
"None",
",",
"inv_subblock",
"=",
"None",
",",
"inv_subblock_ptr",
"=",
"None",
",",
"sweep",
"=",
"'forward'",
")",
... | Perform Overlapping multiplicative Schwarz on the linear system Ax=b.
Parameters
----------
A : csr_matrix, bsr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
iterations : int
Number of iterations to perform
subdomain : int array
Linear array containing each subdomain's elements
subdomain_ptr : int array
Pointer in subdomain, such that
subdomain[subdomain_ptr[i]:subdomain_ptr[i+1]]]
contains the _sorted_ indices in subdomain i
inv_subblock : int_array
Linear array containing each subdomain's
inverted diagonal block of A
inv_subblock_ptr : int array
Pointer in inv_subblock, such that
inv_subblock[inv_subblock_ptr[i]:inv_subblock_ptr[i+1]]]
contains the inverted diagonal block of A for the
i-th subdomain in _row_ major order
sweep : {'forward','backward','symmetric'}
Direction of sweep
Returns
-------
Nothing, x will be modified in place.
Notes
-----
If subdomains is None, then a point-wise iteration takes place,
with the overlapping region defined by each degree-of-freedom's
neighbors in the matrix graph.
If subdomains is not None, but subblocks is, then the subblocks
are formed internally.
Currently only supports CSR matrices
Examples
--------
>>> # Use Overlapping Schwarz as a Stand-Alone Solver
>>> from pyamg.relaxation.relaxation import schwarz
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((10,10), format='csr')
>>> x0 = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> schwarz(A, x0, b, iterations=10)
>>> print norm(b-A*x0)
0.126326160522
>>> #
>>> # Schwarz as the Multigrid Smoother
>>> from pyamg import smoothed_aggregation_solver
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother='schwarz',
... postsmoother='schwarz')
>>> x0=np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals) | [
"Perform",
"Overlapping",
"multiplicative",
"Schwarz",
"on",
"the",
"linear",
"system",
"Ax",
"=",
"b",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/relaxation/relaxation.py#L171-L277 | train | 209,243 |
pyamg/pyamg | pyamg/relaxation/relaxation.py | gauss_seidel | def gauss_seidel(A, x, b, iterations=1, sweep='forward'):
"""Perform Gauss-Seidel iteration on the linear system Ax=b.
Parameters
----------
A : csr_matrix, bsr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
iterations : int
Number of iterations to perform
sweep : {'forward','backward','symmetric'}
Direction of sweep
Returns
-------
Nothing, x will be modified in place.
Examples
--------
>>> # Use Gauss-Seidel as a Stand-Alone Solver
>>> from pyamg.relaxation.relaxation import gauss_seidel
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((10,10), format='csr')
>>> x0 = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> gauss_seidel(A, x0, b, iterations=10)
>>> print norm(b-A*x0)
4.00733716236
>>> #
>>> # Use Gauss-Seidel as the Multigrid Smoother
>>> from pyamg import smoothed_aggregation_solver
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('gauss_seidel', {'sweep':'symmetric'}),
... postsmoother=('gauss_seidel', {'sweep':'symmetric'}))
>>> x0=np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)
"""
A, x, b = make_system(A, x, b, formats=['csr', 'bsr'])
if sparse.isspmatrix_csr(A):
blocksize = 1
else:
R, C = A.blocksize
if R != C:
raise ValueError('BSR blocks must be square')
blocksize = R
if sweep == 'forward':
row_start, row_stop, row_step = 0, int(len(x)/blocksize), 1
elif sweep == 'backward':
row_start, row_stop, row_step = int(len(x)/blocksize)-1, -1, -1
elif sweep == 'symmetric':
for iter in range(iterations):
gauss_seidel(A, x, b, iterations=1, sweep='forward')
gauss_seidel(A, x, b, iterations=1, sweep='backward')
return
else:
raise ValueError("valid sweep directions are 'forward',\
'backward', and 'symmetric'")
if sparse.isspmatrix_csr(A):
for iter in range(iterations):
amg_core.gauss_seidel(A.indptr, A.indices, A.data, x, b,
row_start, row_stop, row_step)
else:
for iter in range(iterations):
amg_core.bsr_gauss_seidel(A.indptr, A.indices, np.ravel(A.data),
x, b, row_start, row_stop, row_step, R) | python | def gauss_seidel(A, x, b, iterations=1, sweep='forward'):
"""Perform Gauss-Seidel iteration on the linear system Ax=b.
Parameters
----------
A : csr_matrix, bsr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
iterations : int
Number of iterations to perform
sweep : {'forward','backward','symmetric'}
Direction of sweep
Returns
-------
Nothing, x will be modified in place.
Examples
--------
>>> # Use Gauss-Seidel as a Stand-Alone Solver
>>> from pyamg.relaxation.relaxation import gauss_seidel
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((10,10), format='csr')
>>> x0 = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> gauss_seidel(A, x0, b, iterations=10)
>>> print norm(b-A*x0)
4.00733716236
>>> #
>>> # Use Gauss-Seidel as the Multigrid Smoother
>>> from pyamg import smoothed_aggregation_solver
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('gauss_seidel', {'sweep':'symmetric'}),
... postsmoother=('gauss_seidel', {'sweep':'symmetric'}))
>>> x0=np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)
"""
A, x, b = make_system(A, x, b, formats=['csr', 'bsr'])
if sparse.isspmatrix_csr(A):
blocksize = 1
else:
R, C = A.blocksize
if R != C:
raise ValueError('BSR blocks must be square')
blocksize = R
if sweep == 'forward':
row_start, row_stop, row_step = 0, int(len(x)/blocksize), 1
elif sweep == 'backward':
row_start, row_stop, row_step = int(len(x)/blocksize)-1, -1, -1
elif sweep == 'symmetric':
for iter in range(iterations):
gauss_seidel(A, x, b, iterations=1, sweep='forward')
gauss_seidel(A, x, b, iterations=1, sweep='backward')
return
else:
raise ValueError("valid sweep directions are 'forward',\
'backward', and 'symmetric'")
if sparse.isspmatrix_csr(A):
for iter in range(iterations):
amg_core.gauss_seidel(A.indptr, A.indices, A.data, x, b,
row_start, row_stop, row_step)
else:
for iter in range(iterations):
amg_core.bsr_gauss_seidel(A.indptr, A.indices, np.ravel(A.data),
x, b, row_start, row_stop, row_step, R) | [
"def",
"gauss_seidel",
"(",
"A",
",",
"x",
",",
"b",
",",
"iterations",
"=",
"1",
",",
"sweep",
"=",
"'forward'",
")",
":",
"A",
",",
"x",
",",
"b",
"=",
"make_system",
"(",
"A",
",",
"x",
",",
"b",
",",
"formats",
"=",
"[",
"'csr'",
",",
"'b... | Perform Gauss-Seidel iteration on the linear system Ax=b.
Parameters
----------
A : csr_matrix, bsr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
iterations : int
Number of iterations to perform
sweep : {'forward','backward','symmetric'}
Direction of sweep
Returns
-------
Nothing, x will be modified in place.
Examples
--------
>>> # Use Gauss-Seidel as a Stand-Alone Solver
>>> from pyamg.relaxation.relaxation import gauss_seidel
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((10,10), format='csr')
>>> x0 = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> gauss_seidel(A, x0, b, iterations=10)
>>> print norm(b-A*x0)
4.00733716236
>>> #
>>> # Use Gauss-Seidel as the Multigrid Smoother
>>> from pyamg import smoothed_aggregation_solver
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('gauss_seidel', {'sweep':'symmetric'}),
... postsmoother=('gauss_seidel', {'sweep':'symmetric'}))
>>> x0=np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals) | [
"Perform",
"Gauss",
"-",
"Seidel",
"iteration",
"on",
"the",
"linear",
"system",
"Ax",
"=",
"b",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/relaxation/relaxation.py#L280-L355 | train | 209,244 |
pyamg/pyamg | pyamg/relaxation/relaxation.py | jacobi | def jacobi(A, x, b, iterations=1, omega=1.0):
"""Perform Jacobi iteration on the linear system Ax=b.
Parameters
----------
A : csr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
iterations : int
Number of iterations to perform
omega : scalar
Damping parameter
Returns
-------
Nothing, x will be modified in place.
Examples
--------
>>> # Use Jacobi as a Stand-Alone Solver
>>> from pyamg.relaxation.relaxation.relaxation import jacobi
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((10,10), format='csr')
>>> x0 = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> jacobi(A, x0, b, iterations=10, omega=1.0)
>>> print norm(b-A*x0)
5.83475132751
>>> #
>>> # Use Jacobi as the Multigrid Smoother
>>> from pyamg import smoothed_aggregation_solver
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('jacobi', {'omega': 4.0/3.0, 'iterations' : 2}),
... postsmoother=('jacobi', {'omega': 4.0/3.0, 'iterations' : 2}))
>>> x0=np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)
"""
A, x, b = make_system(A, x, b, formats=['csr', 'bsr'])
sweep = slice(None)
(row_start, row_stop, row_step) = sweep.indices(A.shape[0])
if (row_stop - row_start) * row_step <= 0: # no work to do
return
temp = np.empty_like(x)
# Create uniform type, convert possibly complex scalars to length 1 arrays
[omega] = type_prep(A.dtype, [omega])
if sparse.isspmatrix_csr(A):
for iter in range(iterations):
amg_core.jacobi(A.indptr, A.indices, A.data, x, b, temp,
row_start, row_stop, row_step, omega)
else:
R, C = A.blocksize
if R != C:
raise ValueError('BSR blocks must be square')
row_start = int(row_start / R)
row_stop = int(row_stop / R)
for iter in range(iterations):
amg_core.bsr_jacobi(A.indptr, A.indices, np.ravel(A.data),
x, b, temp, row_start, row_stop,
row_step, R, omega) | python | def jacobi(A, x, b, iterations=1, omega=1.0):
"""Perform Jacobi iteration on the linear system Ax=b.
Parameters
----------
A : csr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
iterations : int
Number of iterations to perform
omega : scalar
Damping parameter
Returns
-------
Nothing, x will be modified in place.
Examples
--------
>>> # Use Jacobi as a Stand-Alone Solver
>>> from pyamg.relaxation.relaxation.relaxation import jacobi
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((10,10), format='csr')
>>> x0 = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> jacobi(A, x0, b, iterations=10, omega=1.0)
>>> print norm(b-A*x0)
5.83475132751
>>> #
>>> # Use Jacobi as the Multigrid Smoother
>>> from pyamg import smoothed_aggregation_solver
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('jacobi', {'omega': 4.0/3.0, 'iterations' : 2}),
... postsmoother=('jacobi', {'omega': 4.0/3.0, 'iterations' : 2}))
>>> x0=np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)
"""
A, x, b = make_system(A, x, b, formats=['csr', 'bsr'])
sweep = slice(None)
(row_start, row_stop, row_step) = sweep.indices(A.shape[0])
if (row_stop - row_start) * row_step <= 0: # no work to do
return
temp = np.empty_like(x)
# Create uniform type, convert possibly complex scalars to length 1 arrays
[omega] = type_prep(A.dtype, [omega])
if sparse.isspmatrix_csr(A):
for iter in range(iterations):
amg_core.jacobi(A.indptr, A.indices, A.data, x, b, temp,
row_start, row_stop, row_step, omega)
else:
R, C = A.blocksize
if R != C:
raise ValueError('BSR blocks must be square')
row_start = int(row_start / R)
row_stop = int(row_stop / R)
for iter in range(iterations):
amg_core.bsr_jacobi(A.indptr, A.indices, np.ravel(A.data),
x, b, temp, row_start, row_stop,
row_step, R, omega) | [
"def",
"jacobi",
"(",
"A",
",",
"x",
",",
"b",
",",
"iterations",
"=",
"1",
",",
"omega",
"=",
"1.0",
")",
":",
"A",
",",
"x",
",",
"b",
"=",
"make_system",
"(",
"A",
",",
"x",
",",
"b",
",",
"formats",
"=",
"[",
"'csr'",
",",
"'bsr'",
"]",... | Perform Jacobi iteration on the linear system Ax=b.
Parameters
----------
A : csr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
iterations : int
Number of iterations to perform
omega : scalar
Damping parameter
Returns
-------
Nothing, x will be modified in place.
Examples
--------
>>> # Use Jacobi as a Stand-Alone Solver
>>> from pyamg.relaxation.relaxation.relaxation import jacobi
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((10,10), format='csr')
>>> x0 = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> jacobi(A, x0, b, iterations=10, omega=1.0)
>>> print norm(b-A*x0)
5.83475132751
>>> #
>>> # Use Jacobi as the Multigrid Smoother
>>> from pyamg import smoothed_aggregation_solver
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('jacobi', {'omega': 4.0/3.0, 'iterations' : 2}),
... postsmoother=('jacobi', {'omega': 4.0/3.0, 'iterations' : 2}))
>>> x0=np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals) | [
"Perform",
"Jacobi",
"iteration",
"on",
"the",
"linear",
"system",
"Ax",
"=",
"b",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/relaxation/relaxation.py#L358-L429 | train | 209,245 |
pyamg/pyamg | pyamg/relaxation/relaxation.py | block_jacobi | def block_jacobi(A, x, b, Dinv=None, blocksize=1, iterations=1, omega=1.0):
"""Perform block Jacobi iteration on the linear system Ax=b.
Parameters
----------
A : csr_matrix or bsr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
Dinv : array
Array holding block diagonal inverses of A
size (N/blocksize, blocksize, blocksize)
blocksize : int
Desired dimension of blocks
iterations : int
Number of iterations to perform
omega : scalar
Damping parameter
Returns
-------
Nothing, x will be modified in place.
Examples
--------
>>> # Use block Jacobi as a Stand-Alone Solver
>>> from pyamg.relaxation.relaxation import block_jacobi
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((10,10), format='csr')
>>> x0 = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> block_jacobi(A, x0, b, blocksize=4, iterations=10, omega=1.0)
>>> print norm(b-A*x0)
4.66474230129
>>> #
>>> # Use block Jacobi as the Multigrid Smoother
>>> from pyamg import smoothed_aggregation_solver
>>> opts = {'omega': 4.0/3.0, 'iterations' : 2, 'blocksize' : 4}
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('block_jacobi', opts),
... postsmoother=('block_jacobi', opts))
>>> x0=np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)
"""
A, x, b = make_system(A, x, b, formats=['csr', 'bsr'])
A = A.tobsr(blocksize=(blocksize, blocksize))
if Dinv is None:
Dinv = get_block_diag(A, blocksize=blocksize, inv_flag=True)
elif Dinv.shape[0] != int(A.shape[0]/blocksize):
raise ValueError('Dinv and A have incompatible dimensions')
elif (Dinv.shape[1] != blocksize) or (Dinv.shape[2] != blocksize):
raise ValueError('Dinv and blocksize are incompatible')
sweep = slice(None)
(row_start, row_stop, row_step) = sweep.indices(int(A.shape[0]/blocksize))
if (row_stop - row_start) * row_step <= 0: # no work to do
return
temp = np.empty_like(x)
# Create uniform type, convert possibly complex scalars to length 1 arrays
[omega] = type_prep(A.dtype, [omega])
for iter in range(iterations):
amg_core.block_jacobi(A.indptr, A.indices, np.ravel(A.data),
x, b, np.ravel(Dinv), temp,
row_start, row_stop, row_step,
omega, blocksize) | python | def block_jacobi(A, x, b, Dinv=None, blocksize=1, iterations=1, omega=1.0):
"""Perform block Jacobi iteration on the linear system Ax=b.
Parameters
----------
A : csr_matrix or bsr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
Dinv : array
Array holding block diagonal inverses of A
size (N/blocksize, blocksize, blocksize)
blocksize : int
Desired dimension of blocks
iterations : int
Number of iterations to perform
omega : scalar
Damping parameter
Returns
-------
Nothing, x will be modified in place.
Examples
--------
>>> # Use block Jacobi as a Stand-Alone Solver
>>> from pyamg.relaxation.relaxation import block_jacobi
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((10,10), format='csr')
>>> x0 = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> block_jacobi(A, x0, b, blocksize=4, iterations=10, omega=1.0)
>>> print norm(b-A*x0)
4.66474230129
>>> #
>>> # Use block Jacobi as the Multigrid Smoother
>>> from pyamg import smoothed_aggregation_solver
>>> opts = {'omega': 4.0/3.0, 'iterations' : 2, 'blocksize' : 4}
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('block_jacobi', opts),
... postsmoother=('block_jacobi', opts))
>>> x0=np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)
"""
A, x, b = make_system(A, x, b, formats=['csr', 'bsr'])
A = A.tobsr(blocksize=(blocksize, blocksize))
if Dinv is None:
Dinv = get_block_diag(A, blocksize=blocksize, inv_flag=True)
elif Dinv.shape[0] != int(A.shape[0]/blocksize):
raise ValueError('Dinv and A have incompatible dimensions')
elif (Dinv.shape[1] != blocksize) or (Dinv.shape[2] != blocksize):
raise ValueError('Dinv and blocksize are incompatible')
sweep = slice(None)
(row_start, row_stop, row_step) = sweep.indices(int(A.shape[0]/blocksize))
if (row_stop - row_start) * row_step <= 0: # no work to do
return
temp = np.empty_like(x)
# Create uniform type, convert possibly complex scalars to length 1 arrays
[omega] = type_prep(A.dtype, [omega])
for iter in range(iterations):
amg_core.block_jacobi(A.indptr, A.indices, np.ravel(A.data),
x, b, np.ravel(Dinv), temp,
row_start, row_stop, row_step,
omega, blocksize) | [
"def",
"block_jacobi",
"(",
"A",
",",
"x",
",",
"b",
",",
"Dinv",
"=",
"None",
",",
"blocksize",
"=",
"1",
",",
"iterations",
"=",
"1",
",",
"omega",
"=",
"1.0",
")",
":",
"A",
",",
"x",
",",
"b",
"=",
"make_system",
"(",
"A",
",",
"x",
",",
... | Perform block Jacobi iteration on the linear system Ax=b.
Parameters
----------
A : csr_matrix or bsr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
Dinv : array
Array holding block diagonal inverses of A
size (N/blocksize, blocksize, blocksize)
blocksize : int
Desired dimension of blocks
iterations : int
Number of iterations to perform
omega : scalar
Damping parameter
Returns
-------
Nothing, x will be modified in place.
Examples
--------
>>> # Use block Jacobi as a Stand-Alone Solver
>>> from pyamg.relaxation.relaxation import block_jacobi
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((10,10), format='csr')
>>> x0 = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> block_jacobi(A, x0, b, blocksize=4, iterations=10, omega=1.0)
>>> print norm(b-A*x0)
4.66474230129
>>> #
>>> # Use block Jacobi as the Multigrid Smoother
>>> from pyamg import smoothed_aggregation_solver
>>> opts = {'omega': 4.0/3.0, 'iterations' : 2, 'blocksize' : 4}
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('block_jacobi', opts),
... postsmoother=('block_jacobi', opts))
>>> x0=np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals) | [
"Perform",
"block",
"Jacobi",
"iteration",
"on",
"the",
"linear",
"system",
"Ax",
"=",
"b",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/relaxation/relaxation.py#L432-L508 | train | 209,246 |
pyamg/pyamg | pyamg/relaxation/relaxation.py | block_gauss_seidel | def block_gauss_seidel(A, x, b, iterations=1, sweep='forward', blocksize=1,
Dinv=None):
"""Perform block Gauss-Seidel iteration on the linear system Ax=b.
Parameters
----------
A : csr_matrix, bsr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
iterations : int
Number of iterations to perform
sweep : {'forward','backward','symmetric'}
Direction of sweep
Dinv : array
Array holding block diagonal inverses of A
size (N/blocksize, blocksize, blocksize)
blocksize : int
Desired dimension of blocks
Returns
-------
Nothing, x will be modified in place.
Examples
--------
>>> # Use Gauss-Seidel as a Stand-Alone Solver
>>> from pyamg.relaxation.relaxation import block_gauss_seidel
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((10,10), format='csr')
>>> x0 = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> block_gauss_seidel(A, x0, b, iterations=10, blocksize=4,
sweep='symmetric')
>>> print norm(b-A*x0)
0.958333817624
>>> #
>>> # Use Gauss-Seidel as the Multigrid Smoother
>>> from pyamg import smoothed_aggregation_solver
>>> opts = {'sweep':'symmetric', 'blocksize' : 4}
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('block_gauss_seidel', opts),
... postsmoother=('block_gauss_seidel', opts))
>>> x0=np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)
"""
A, x, b = make_system(A, x, b, formats=['csr', 'bsr'])
A = A.tobsr(blocksize=(blocksize, blocksize))
if Dinv is None:
Dinv = get_block_diag(A, blocksize=blocksize, inv_flag=True)
elif Dinv.shape[0] != int(A.shape[0]/blocksize):
raise ValueError('Dinv and A have incompatible dimensions')
elif (Dinv.shape[1] != blocksize) or (Dinv.shape[2] != blocksize):
raise ValueError('Dinv and blocksize are incompatible')
if sweep == 'forward':
row_start, row_stop, row_step = 0, int(len(x)/blocksize), 1
elif sweep == 'backward':
row_start, row_stop, row_step = int(len(x)/blocksize)-1, -1, -1
elif sweep == 'symmetric':
for iter in range(iterations):
block_gauss_seidel(A, x, b, iterations=1, sweep='forward',
blocksize=blocksize, Dinv=Dinv)
block_gauss_seidel(A, x, b, iterations=1, sweep='backward',
blocksize=blocksize, Dinv=Dinv)
return
else:
raise ValueError("valid sweep directions are 'forward',\
'backward', and 'symmetric'")
for iter in range(iterations):
amg_core.block_gauss_seidel(A.indptr, A.indices, np.ravel(A.data),
x, b, np.ravel(Dinv),
row_start, row_stop, row_step, blocksize) | python | def block_gauss_seidel(A, x, b, iterations=1, sweep='forward', blocksize=1,
Dinv=None):
"""Perform block Gauss-Seidel iteration on the linear system Ax=b.
Parameters
----------
A : csr_matrix, bsr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
iterations : int
Number of iterations to perform
sweep : {'forward','backward','symmetric'}
Direction of sweep
Dinv : array
Array holding block diagonal inverses of A
size (N/blocksize, blocksize, blocksize)
blocksize : int
Desired dimension of blocks
Returns
-------
Nothing, x will be modified in place.
Examples
--------
>>> # Use Gauss-Seidel as a Stand-Alone Solver
>>> from pyamg.relaxation.relaxation import block_gauss_seidel
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((10,10), format='csr')
>>> x0 = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> block_gauss_seidel(A, x0, b, iterations=10, blocksize=4,
sweep='symmetric')
>>> print norm(b-A*x0)
0.958333817624
>>> #
>>> # Use Gauss-Seidel as the Multigrid Smoother
>>> from pyamg import smoothed_aggregation_solver
>>> opts = {'sweep':'symmetric', 'blocksize' : 4}
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('block_gauss_seidel', opts),
... postsmoother=('block_gauss_seidel', opts))
>>> x0=np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)
"""
A, x, b = make_system(A, x, b, formats=['csr', 'bsr'])
A = A.tobsr(blocksize=(blocksize, blocksize))
if Dinv is None:
Dinv = get_block_diag(A, blocksize=blocksize, inv_flag=True)
elif Dinv.shape[0] != int(A.shape[0]/blocksize):
raise ValueError('Dinv and A have incompatible dimensions')
elif (Dinv.shape[1] != blocksize) or (Dinv.shape[2] != blocksize):
raise ValueError('Dinv and blocksize are incompatible')
if sweep == 'forward':
row_start, row_stop, row_step = 0, int(len(x)/blocksize), 1
elif sweep == 'backward':
row_start, row_stop, row_step = int(len(x)/blocksize)-1, -1, -1
elif sweep == 'symmetric':
for iter in range(iterations):
block_gauss_seidel(A, x, b, iterations=1, sweep='forward',
blocksize=blocksize, Dinv=Dinv)
block_gauss_seidel(A, x, b, iterations=1, sweep='backward',
blocksize=blocksize, Dinv=Dinv)
return
else:
raise ValueError("valid sweep directions are 'forward',\
'backward', and 'symmetric'")
for iter in range(iterations):
amg_core.block_gauss_seidel(A.indptr, A.indices, np.ravel(A.data),
x, b, np.ravel(Dinv),
row_start, row_stop, row_step, blocksize) | [
"def",
"block_gauss_seidel",
"(",
"A",
",",
"x",
",",
"b",
",",
"iterations",
"=",
"1",
",",
"sweep",
"=",
"'forward'",
",",
"blocksize",
"=",
"1",
",",
"Dinv",
"=",
"None",
")",
":",
"A",
",",
"x",
",",
"b",
"=",
"make_system",
"(",
"A",
",",
... | Perform block Gauss-Seidel iteration on the linear system Ax=b.
Parameters
----------
A : csr_matrix, bsr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
iterations : int
Number of iterations to perform
sweep : {'forward','backward','symmetric'}
Direction of sweep
Dinv : array
Array holding block diagonal inverses of A
size (N/blocksize, blocksize, blocksize)
blocksize : int
Desired dimension of blocks
Returns
-------
Nothing, x will be modified in place.
Examples
--------
>>> # Use Gauss-Seidel as a Stand-Alone Solver
>>> from pyamg.relaxation.relaxation import block_gauss_seidel
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((10,10), format='csr')
>>> x0 = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> block_gauss_seidel(A, x0, b, iterations=10, blocksize=4,
sweep='symmetric')
>>> print norm(b-A*x0)
0.958333817624
>>> #
>>> # Use Gauss-Seidel as the Multigrid Smoother
>>> from pyamg import smoothed_aggregation_solver
>>> opts = {'sweep':'symmetric', 'blocksize' : 4}
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('block_gauss_seidel', opts),
... postsmoother=('block_gauss_seidel', opts))
>>> x0=np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals) | [
"Perform",
"block",
"Gauss",
"-",
"Seidel",
"iteration",
"on",
"the",
"linear",
"system",
"Ax",
"=",
"b",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/relaxation/relaxation.py#L511-L593 | train | 209,247 |
pyamg/pyamg | pyamg/relaxation/relaxation.py | polynomial | def polynomial(A, x, b, coefficients, iterations=1):
"""Apply a polynomial smoother to the system Ax=b.
Parameters
----------
A : sparse matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
coefficients : array_like
Coefficients of the polynomial. See Notes section for details.
iterations : int
Number of iterations to perform
Returns
-------
Nothing, x will be modified in place.
Notes
-----
The smoother has the form x[:] = x + p(A) (b - A*x) where p(A) is a
polynomial in A whose scalar coefficients are specified (in descending
order) by argument 'coefficients'.
- Richardson iteration p(A) = c_0:
polynomial_smoother(A, x, b, [c_0])
- Linear smoother p(A) = c_1*A + c_0:
polynomial_smoother(A, x, b, [c_1, c_0])
- Quadratic smoother p(A) = c_2*A^2 + c_1*A + c_0:
polynomial_smoother(A, x, b, [c_2, c_1, c_0])
Here, Horner's Rule is applied to avoid computing A^k directly.
For efficience, the method detects the case x = 0 one matrix-vector
product is avoided (since (b - A*x) is b).
Examples
--------
>>> # The polynomial smoother is not currently used directly
>>> # in PyAMG. It is only used by the chebyshev smoothing option,
>>> # which automatically calculates the correct coefficients.
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> from pyamg.aggregation import smoothed_aggregation_solver
>>> A = poisson((10,10), format='csr')
>>> b = np.ones((A.shape[0],1))
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('chebyshev', {'degree':3, 'iterations':1}),
... postsmoother=('chebyshev', {'degree':3, 'iterations':1}))
>>> x0=np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)
"""
A, x, b = make_system(A, x, b, formats=None)
for i in range(iterations):
from pyamg.util.linalg import norm
if norm(x) == 0:
residual = b
else:
residual = (b - A*x)
h = coefficients[0]*residual
for c in coefficients[1:]:
h = c*residual + A*h
x += h | python | def polynomial(A, x, b, coefficients, iterations=1):
"""Apply a polynomial smoother to the system Ax=b.
Parameters
----------
A : sparse matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
coefficients : array_like
Coefficients of the polynomial. See Notes section for details.
iterations : int
Number of iterations to perform
Returns
-------
Nothing, x will be modified in place.
Notes
-----
The smoother has the form x[:] = x + p(A) (b - A*x) where p(A) is a
polynomial in A whose scalar coefficients are specified (in descending
order) by argument 'coefficients'.
- Richardson iteration p(A) = c_0:
polynomial_smoother(A, x, b, [c_0])
- Linear smoother p(A) = c_1*A + c_0:
polynomial_smoother(A, x, b, [c_1, c_0])
- Quadratic smoother p(A) = c_2*A^2 + c_1*A + c_0:
polynomial_smoother(A, x, b, [c_2, c_1, c_0])
Here, Horner's Rule is applied to avoid computing A^k directly.
For efficience, the method detects the case x = 0 one matrix-vector
product is avoided (since (b - A*x) is b).
Examples
--------
>>> # The polynomial smoother is not currently used directly
>>> # in PyAMG. It is only used by the chebyshev smoothing option,
>>> # which automatically calculates the correct coefficients.
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> from pyamg.aggregation import smoothed_aggregation_solver
>>> A = poisson((10,10), format='csr')
>>> b = np.ones((A.shape[0],1))
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('chebyshev', {'degree':3, 'iterations':1}),
... postsmoother=('chebyshev', {'degree':3, 'iterations':1}))
>>> x0=np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)
"""
A, x, b = make_system(A, x, b, formats=None)
for i in range(iterations):
from pyamg.util.linalg import norm
if norm(x) == 0:
residual = b
else:
residual = (b - A*x)
h = coefficients[0]*residual
for c in coefficients[1:]:
h = c*residual + A*h
x += h | [
"def",
"polynomial",
"(",
"A",
",",
"x",
",",
"b",
",",
"coefficients",
",",
"iterations",
"=",
"1",
")",
":",
"A",
",",
"x",
",",
"b",
"=",
"make_system",
"(",
"A",
",",
"x",
",",
"b",
",",
"formats",
"=",
"None",
")",
"for",
"i",
"in",
"ran... | Apply a polynomial smoother to the system Ax=b.
Parameters
----------
A : sparse matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
coefficients : array_like
Coefficients of the polynomial. See Notes section for details.
iterations : int
Number of iterations to perform
Returns
-------
Nothing, x will be modified in place.
Notes
-----
The smoother has the form x[:] = x + p(A) (b - A*x) where p(A) is a
polynomial in A whose scalar coefficients are specified (in descending
order) by argument 'coefficients'.
- Richardson iteration p(A) = c_0:
polynomial_smoother(A, x, b, [c_0])
- Linear smoother p(A) = c_1*A + c_0:
polynomial_smoother(A, x, b, [c_1, c_0])
- Quadratic smoother p(A) = c_2*A^2 + c_1*A + c_0:
polynomial_smoother(A, x, b, [c_2, c_1, c_0])
Here, Horner's Rule is applied to avoid computing A^k directly.
For efficience, the method detects the case x = 0 one matrix-vector
product is avoided (since (b - A*x) is b).
Examples
--------
>>> # The polynomial smoother is not currently used directly
>>> # in PyAMG. It is only used by the chebyshev smoothing option,
>>> # which automatically calculates the correct coefficients.
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> from pyamg.aggregation import smoothed_aggregation_solver
>>> A = poisson((10,10), format='csr')
>>> b = np.ones((A.shape[0],1))
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('chebyshev', {'degree':3, 'iterations':1}),
... postsmoother=('chebyshev', {'degree':3, 'iterations':1}))
>>> x0=np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals) | [
"Apply",
"a",
"polynomial",
"smoother",
"to",
"the",
"system",
"Ax",
"=",
"b",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/relaxation/relaxation.py#L596-L671 | train | 209,248 |
pyamg/pyamg | pyamg/relaxation/relaxation.py | gauss_seidel_indexed | def gauss_seidel_indexed(A, x, b, indices, iterations=1, sweep='forward'):
"""Perform indexed Gauss-Seidel iteration on the linear system Ax=b.
In indexed Gauss-Seidel, the sequence in which unknowns are relaxed is
specified explicitly. In contrast, the standard Gauss-Seidel method
always performs complete sweeps of all variables in increasing or
decreasing order. The indexed method may be used to implement
specialized smoothers, like F-smoothing in Classical AMG.
Parameters
----------
A : csr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
indices : ndarray
Row indices to relax.
iterations : int
Number of iterations to perform
sweep : {'forward','backward','symmetric'}
Direction of sweep
Returns
-------
Nothing, x will be modified in place.
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.relaxation.relaxation import gauss_seidel_indexed
>>> import numpy as np
>>> A = poisson((4,), format='csr')
>>> x = np.array([0.0, 0.0, 0.0, 0.0])
>>> b = np.array([0.0, 1.0, 2.0, 3.0])
>>> gauss_seidel_indexed(A, x, b, [0,1,2,3]) # relax all rows in order
>>> gauss_seidel_indexed(A, x, b, [0,1]) # relax first two rows
>>> gauss_seidel_indexed(A, x, b, [2,0]) # relax row 2, then row 0
>>> gauss_seidel_indexed(A, x, b, [2,3], sweep='backward') # 3, then 2
>>> gauss_seidel_indexed(A, x, b, [2,0,2]) # relax row 2, 0, 2
"""
A, x, b = make_system(A, x, b, formats=['csr'])
indices = np.asarray(indices, dtype='intc')
# if indices.min() < 0:
# raise ValueError('row index (%d) is invalid' % indices.min())
# if indices.max() >= A.shape[0]
# raise ValueError('row index (%d) is invalid' % indices.max())
if sweep == 'forward':
row_start, row_stop, row_step = 0, len(indices), 1
elif sweep == 'backward':
row_start, row_stop, row_step = len(indices)-1, -1, -1
elif sweep == 'symmetric':
for iter in range(iterations):
gauss_seidel_indexed(A, x, b, indices, iterations=1,
sweep='forward')
gauss_seidel_indexed(A, x, b, indices, iterations=1,
sweep='backward')
return
else:
raise ValueError("valid sweep directions are 'forward',\
'backward', and 'symmetric'")
for iter in range(iterations):
amg_core.gauss_seidel_indexed(A.indptr, A.indices, A.data,
x, b, indices,
row_start, row_stop, row_step) | python | def gauss_seidel_indexed(A, x, b, indices, iterations=1, sweep='forward'):
"""Perform indexed Gauss-Seidel iteration on the linear system Ax=b.
In indexed Gauss-Seidel, the sequence in which unknowns are relaxed is
specified explicitly. In contrast, the standard Gauss-Seidel method
always performs complete sweeps of all variables in increasing or
decreasing order. The indexed method may be used to implement
specialized smoothers, like F-smoothing in Classical AMG.
Parameters
----------
A : csr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
indices : ndarray
Row indices to relax.
iterations : int
Number of iterations to perform
sweep : {'forward','backward','symmetric'}
Direction of sweep
Returns
-------
Nothing, x will be modified in place.
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.relaxation.relaxation import gauss_seidel_indexed
>>> import numpy as np
>>> A = poisson((4,), format='csr')
>>> x = np.array([0.0, 0.0, 0.0, 0.0])
>>> b = np.array([0.0, 1.0, 2.0, 3.0])
>>> gauss_seidel_indexed(A, x, b, [0,1,2,3]) # relax all rows in order
>>> gauss_seidel_indexed(A, x, b, [0,1]) # relax first two rows
>>> gauss_seidel_indexed(A, x, b, [2,0]) # relax row 2, then row 0
>>> gauss_seidel_indexed(A, x, b, [2,3], sweep='backward') # 3, then 2
>>> gauss_seidel_indexed(A, x, b, [2,0,2]) # relax row 2, 0, 2
"""
A, x, b = make_system(A, x, b, formats=['csr'])
indices = np.asarray(indices, dtype='intc')
# if indices.min() < 0:
# raise ValueError('row index (%d) is invalid' % indices.min())
# if indices.max() >= A.shape[0]
# raise ValueError('row index (%d) is invalid' % indices.max())
if sweep == 'forward':
row_start, row_stop, row_step = 0, len(indices), 1
elif sweep == 'backward':
row_start, row_stop, row_step = len(indices)-1, -1, -1
elif sweep == 'symmetric':
for iter in range(iterations):
gauss_seidel_indexed(A, x, b, indices, iterations=1,
sweep='forward')
gauss_seidel_indexed(A, x, b, indices, iterations=1,
sweep='backward')
return
else:
raise ValueError("valid sweep directions are 'forward',\
'backward', and 'symmetric'")
for iter in range(iterations):
amg_core.gauss_seidel_indexed(A.indptr, A.indices, A.data,
x, b, indices,
row_start, row_stop, row_step) | [
"def",
"gauss_seidel_indexed",
"(",
"A",
",",
"x",
",",
"b",
",",
"indices",
",",
"iterations",
"=",
"1",
",",
"sweep",
"=",
"'forward'",
")",
":",
"A",
",",
"x",
",",
"b",
"=",
"make_system",
"(",
"A",
",",
"x",
",",
"b",
",",
"formats",
"=",
... | Perform indexed Gauss-Seidel iteration on the linear system Ax=b.
In indexed Gauss-Seidel, the sequence in which unknowns are relaxed is
specified explicitly. In contrast, the standard Gauss-Seidel method
always performs complete sweeps of all variables in increasing or
decreasing order. The indexed method may be used to implement
specialized smoothers, like F-smoothing in Classical AMG.
Parameters
----------
A : csr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
indices : ndarray
Row indices to relax.
iterations : int
Number of iterations to perform
sweep : {'forward','backward','symmetric'}
Direction of sweep
Returns
-------
Nothing, x will be modified in place.
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.relaxation.relaxation import gauss_seidel_indexed
>>> import numpy as np
>>> A = poisson((4,), format='csr')
>>> x = np.array([0.0, 0.0, 0.0, 0.0])
>>> b = np.array([0.0, 1.0, 2.0, 3.0])
>>> gauss_seidel_indexed(A, x, b, [0,1,2,3]) # relax all rows in order
>>> gauss_seidel_indexed(A, x, b, [0,1]) # relax first two rows
>>> gauss_seidel_indexed(A, x, b, [2,0]) # relax row 2, then row 0
>>> gauss_seidel_indexed(A, x, b, [2,3], sweep='backward') # 3, then 2
>>> gauss_seidel_indexed(A, x, b, [2,0,2]) # relax row 2, 0, 2 | [
"Perform",
"indexed",
"Gauss",
"-",
"Seidel",
"iteration",
"on",
"the",
"linear",
"system",
"Ax",
"=",
"b",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/relaxation/relaxation.py#L674-L744 | train | 209,249 |
pyamg/pyamg | pyamg/relaxation/relaxation.py | jacobi_ne | def jacobi_ne(A, x, b, iterations=1, omega=1.0):
"""Perform Jacobi iterations on the linear system A A.H x = A.H b.
Also known as Cimmino relaxation
Parameters
----------
A : csr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
iterations : int
Number of iterations to perform
omega : scalar
Damping parameter
Returns
-------
Nothing, x will be modified in place.
References
----------
.. [1] Brandt, Ta'asan.
"Multigrid Method For Nearly Singular And Slightly Indefinite Problems."
1985. NASA Technical Report Numbers: ICASE-85-57; NAS 1.26:178026;
NASA-CR-178026;
.. [2] Kaczmarz. Angenaeherte Aufloesung von Systemen Linearer
Gleichungen. Bull. Acad. Polon. Sci. Lett. A 35, 355-57. 1937
.. [3] Cimmino. La ricerca scientifica ser. II 1.
Pubbliz. dell'Inst. pre le Appl. del Calculo 34, 326-333, 1938.
Examples
--------
>>> # Use NE Jacobi as a Stand-Alone Solver
>>> from pyamg.relaxation.relaxation import jacobi_ne
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((50,50), format='csr')
>>> x0 = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> jacobi_ne(A, x0, b, iterations=10, omega=2.0/3.0)
>>> print norm(b-A*x0)
49.3886046066
>>> #
>>> # Use NE Jacobi as the Multigrid Smoother
>>> from pyamg import smoothed_aggregation_solver
>>> opts = {'iterations' : 2, 'omega' : 4.0/3.0}
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('jacobi_ne', opts),
... postsmoother=('jacobi_ne', opts))
>>> x0=np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)
"""
A, x, b = make_system(A, x, b, formats=['csr'])
sweep = slice(None)
(row_start, row_stop, row_step) = sweep.indices(A.shape[0])
temp = np.zeros_like(x)
# Dinv for A*A.H
Dinv = get_diagonal(A, norm_eq=2, inv=True)
# Create uniform type, convert possibly complex scalars to length 1 arrays
[omega] = type_prep(A.dtype, [omega])
for i in range(iterations):
delta = (np.ravel(b - A*x)*np.ravel(Dinv)).astype(A.dtype)
amg_core.jacobi_ne(A.indptr, A.indices, A.data,
x, b, delta, temp, row_start,
row_stop, row_step, omega) | python | def jacobi_ne(A, x, b, iterations=1, omega=1.0):
"""Perform Jacobi iterations on the linear system A A.H x = A.H b.
Also known as Cimmino relaxation
Parameters
----------
A : csr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
iterations : int
Number of iterations to perform
omega : scalar
Damping parameter
Returns
-------
Nothing, x will be modified in place.
References
----------
.. [1] Brandt, Ta'asan.
"Multigrid Method For Nearly Singular And Slightly Indefinite Problems."
1985. NASA Technical Report Numbers: ICASE-85-57; NAS 1.26:178026;
NASA-CR-178026;
.. [2] Kaczmarz. Angenaeherte Aufloesung von Systemen Linearer
Gleichungen. Bull. Acad. Polon. Sci. Lett. A 35, 355-57. 1937
.. [3] Cimmino. La ricerca scientifica ser. II 1.
Pubbliz. dell'Inst. pre le Appl. del Calculo 34, 326-333, 1938.
Examples
--------
>>> # Use NE Jacobi as a Stand-Alone Solver
>>> from pyamg.relaxation.relaxation import jacobi_ne
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((50,50), format='csr')
>>> x0 = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> jacobi_ne(A, x0, b, iterations=10, omega=2.0/3.0)
>>> print norm(b-A*x0)
49.3886046066
>>> #
>>> # Use NE Jacobi as the Multigrid Smoother
>>> from pyamg import smoothed_aggregation_solver
>>> opts = {'iterations' : 2, 'omega' : 4.0/3.0}
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('jacobi_ne', opts),
... postsmoother=('jacobi_ne', opts))
>>> x0=np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)
"""
A, x, b = make_system(A, x, b, formats=['csr'])
sweep = slice(None)
(row_start, row_stop, row_step) = sweep.indices(A.shape[0])
temp = np.zeros_like(x)
# Dinv for A*A.H
Dinv = get_diagonal(A, norm_eq=2, inv=True)
# Create uniform type, convert possibly complex scalars to length 1 arrays
[omega] = type_prep(A.dtype, [omega])
for i in range(iterations):
delta = (np.ravel(b - A*x)*np.ravel(Dinv)).astype(A.dtype)
amg_core.jacobi_ne(A.indptr, A.indices, A.data,
x, b, delta, temp, row_start,
row_stop, row_step, omega) | [
"def",
"jacobi_ne",
"(",
"A",
",",
"x",
",",
"b",
",",
"iterations",
"=",
"1",
",",
"omega",
"=",
"1.0",
")",
":",
"A",
",",
"x",
",",
"b",
"=",
"make_system",
"(",
"A",
",",
"x",
",",
"b",
",",
"formats",
"=",
"[",
"'csr'",
"]",
")",
"swee... | Perform Jacobi iterations on the linear system A A.H x = A.H b.
Also known as Cimmino relaxation
Parameters
----------
A : csr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
iterations : int
Number of iterations to perform
omega : scalar
Damping parameter
Returns
-------
Nothing, x will be modified in place.
References
----------
.. [1] Brandt, Ta'asan.
"Multigrid Method For Nearly Singular And Slightly Indefinite Problems."
1985. NASA Technical Report Numbers: ICASE-85-57; NAS 1.26:178026;
NASA-CR-178026;
.. [2] Kaczmarz. Angenaeherte Aufloesung von Systemen Linearer
Gleichungen. Bull. Acad. Polon. Sci. Lett. A 35, 355-57. 1937
.. [3] Cimmino. La ricerca scientifica ser. II 1.
Pubbliz. dell'Inst. pre le Appl. del Calculo 34, 326-333, 1938.
Examples
--------
>>> # Use NE Jacobi as a Stand-Alone Solver
>>> from pyamg.relaxation.relaxation import jacobi_ne
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((50,50), format='csr')
>>> x0 = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> jacobi_ne(A, x0, b, iterations=10, omega=2.0/3.0)
>>> print norm(b-A*x0)
49.3886046066
>>> #
>>> # Use NE Jacobi as the Multigrid Smoother
>>> from pyamg import smoothed_aggregation_solver
>>> opts = {'iterations' : 2, 'omega' : 4.0/3.0}
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('jacobi_ne', opts),
... postsmoother=('jacobi_ne', opts))
>>> x0=np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals) | [
"Perform",
"Jacobi",
"iterations",
"on",
"the",
"linear",
"system",
"A",
"A",
".",
"H",
"x",
"=",
"A",
".",
"H",
"b",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/relaxation/relaxation.py#L747-L825 | train | 209,250 |
pyamg/pyamg | pyamg/relaxation/relaxation.py | gauss_seidel_ne | def gauss_seidel_ne(A, x, b, iterations=1, sweep='forward', omega=1.0,
Dinv=None):
"""Perform Gauss-Seidel iterations on the linear system A A.H x = b.
Also known as Kaczmarz relaxation
Parameters
----------
A : csr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
iterations : int
Number of iterations to perform
sweep : {'forward','backward','symmetric'}
Direction of sweep
omega : float
Relaxation parameter typically in (0, 2)
if omega != 1.0, then algorithm becomes SOR on A A.H
Dinv : ndarray
Inverse of diag(A A.H), (length N)
Returns
-------
Nothing, x will be modified in place.
References
----------
.. [1] Brandt, Ta'asan.
"Multigrid Method For Nearly Singular And Slightly Indefinite Problems."
1985. NASA Technical Report Numbers: ICASE-85-57; NAS 1.26:178026;
NASA-CR-178026;
.. [2] Kaczmarz. Angenaeherte Aufloesung von Systemen Linearer
Gleichungen. Bull. Acad. Polon. Sci. Lett. A 35, 355-57. 1937
Examples
--------
>>> # Use NE Gauss-Seidel as a Stand-Alone Solver
>>> from pyamg.relaxation.relaxation import gauss_seidel_ne
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((10,10), format='csr')
>>> x0 = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> gauss_seidel_ne(A, x0, b, iterations=10, sweep='symmetric')
>>> print norm(b-A*x0)
8.47576806771
>>> #
>>> # Use NE Gauss-Seidel as the Multigrid Smoother
>>> from pyamg import smoothed_aggregation_solver
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('gauss_seidel_ne', {'sweep' : 'symmetric'}),
... postsmoother=('gauss_seidel_ne', {'sweep' : 'symmetric'}))
>>> x0=np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)
"""
A, x, b = make_system(A, x, b, formats=['csr'])
# Dinv for A*A.H
if Dinv is None:
Dinv = np.ravel(get_diagonal(A, norm_eq=2, inv=True))
if sweep == 'forward':
row_start, row_stop, row_step = 0, len(x), 1
elif sweep == 'backward':
row_start, row_stop, row_step = len(x)-1, -1, -1
elif sweep == 'symmetric':
for iter in range(iterations):
gauss_seidel_ne(A, x, b, iterations=1, sweep='forward',
omega=omega, Dinv=Dinv)
gauss_seidel_ne(A, x, b, iterations=1, sweep='backward',
omega=omega, Dinv=Dinv)
return
else:
raise ValueError("valid sweep directions are 'forward',\
'backward', and 'symmetric'")
for i in range(iterations):
amg_core.gauss_seidel_ne(A.indptr, A.indices, A.data,
x, b, row_start,
row_stop, row_step, Dinv, omega) | python | def gauss_seidel_ne(A, x, b, iterations=1, sweep='forward', omega=1.0,
Dinv=None):
"""Perform Gauss-Seidel iterations on the linear system A A.H x = b.
Also known as Kaczmarz relaxation
Parameters
----------
A : csr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
iterations : int
Number of iterations to perform
sweep : {'forward','backward','symmetric'}
Direction of sweep
omega : float
Relaxation parameter typically in (0, 2)
if omega != 1.0, then algorithm becomes SOR on A A.H
Dinv : ndarray
Inverse of diag(A A.H), (length N)
Returns
-------
Nothing, x will be modified in place.
References
----------
.. [1] Brandt, Ta'asan.
"Multigrid Method For Nearly Singular And Slightly Indefinite Problems."
1985. NASA Technical Report Numbers: ICASE-85-57; NAS 1.26:178026;
NASA-CR-178026;
.. [2] Kaczmarz. Angenaeherte Aufloesung von Systemen Linearer
Gleichungen. Bull. Acad. Polon. Sci. Lett. A 35, 355-57. 1937
Examples
--------
>>> # Use NE Gauss-Seidel as a Stand-Alone Solver
>>> from pyamg.relaxation.relaxation import gauss_seidel_ne
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((10,10), format='csr')
>>> x0 = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> gauss_seidel_ne(A, x0, b, iterations=10, sweep='symmetric')
>>> print norm(b-A*x0)
8.47576806771
>>> #
>>> # Use NE Gauss-Seidel as the Multigrid Smoother
>>> from pyamg import smoothed_aggregation_solver
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('gauss_seidel_ne', {'sweep' : 'symmetric'}),
... postsmoother=('gauss_seidel_ne', {'sweep' : 'symmetric'}))
>>> x0=np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)
"""
A, x, b = make_system(A, x, b, formats=['csr'])
# Dinv for A*A.H
if Dinv is None:
Dinv = np.ravel(get_diagonal(A, norm_eq=2, inv=True))
if sweep == 'forward':
row_start, row_stop, row_step = 0, len(x), 1
elif sweep == 'backward':
row_start, row_stop, row_step = len(x)-1, -1, -1
elif sweep == 'symmetric':
for iter in range(iterations):
gauss_seidel_ne(A, x, b, iterations=1, sweep='forward',
omega=omega, Dinv=Dinv)
gauss_seidel_ne(A, x, b, iterations=1, sweep='backward',
omega=omega, Dinv=Dinv)
return
else:
raise ValueError("valid sweep directions are 'forward',\
'backward', and 'symmetric'")
for i in range(iterations):
amg_core.gauss_seidel_ne(A.indptr, A.indices, A.data,
x, b, row_start,
row_stop, row_step, Dinv, omega) | [
"def",
"gauss_seidel_ne",
"(",
"A",
",",
"x",
",",
"b",
",",
"iterations",
"=",
"1",
",",
"sweep",
"=",
"'forward'",
",",
"omega",
"=",
"1.0",
",",
"Dinv",
"=",
"None",
")",
":",
"A",
",",
"x",
",",
"b",
"=",
"make_system",
"(",
"A",
",",
"x",
... | Perform Gauss-Seidel iterations on the linear system A A.H x = b.
Also known as Kaczmarz relaxation
Parameters
----------
A : csr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
iterations : int
Number of iterations to perform
sweep : {'forward','backward','symmetric'}
Direction of sweep
omega : float
Relaxation parameter typically in (0, 2)
if omega != 1.0, then algorithm becomes SOR on A A.H
Dinv : ndarray
Inverse of diag(A A.H), (length N)
Returns
-------
Nothing, x will be modified in place.
References
----------
.. [1] Brandt, Ta'asan.
"Multigrid Method For Nearly Singular And Slightly Indefinite Problems."
1985. NASA Technical Report Numbers: ICASE-85-57; NAS 1.26:178026;
NASA-CR-178026;
.. [2] Kaczmarz. Angenaeherte Aufloesung von Systemen Linearer
Gleichungen. Bull. Acad. Polon. Sci. Lett. A 35, 355-57. 1937
Examples
--------
>>> # Use NE Gauss-Seidel as a Stand-Alone Solver
>>> from pyamg.relaxation.relaxation import gauss_seidel_ne
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((10,10), format='csr')
>>> x0 = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> gauss_seidel_ne(A, x0, b, iterations=10, sweep='symmetric')
>>> print norm(b-A*x0)
8.47576806771
>>> #
>>> # Use NE Gauss-Seidel as the Multigrid Smoother
>>> from pyamg import smoothed_aggregation_solver
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('gauss_seidel_ne', {'sweep' : 'symmetric'}),
... postsmoother=('gauss_seidel_ne', {'sweep' : 'symmetric'}))
>>> x0=np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals) | [
"Perform",
"Gauss",
"-",
"Seidel",
"iterations",
"on",
"the",
"linear",
"system",
"A",
"A",
".",
"H",
"x",
"=",
"b",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/relaxation/relaxation.py#L828-L915 | train | 209,251 |
pyamg/pyamg | pyamg/relaxation/relaxation.py | gauss_seidel_nr | def gauss_seidel_nr(A, x, b, iterations=1, sweep='forward', omega=1.0,
Dinv=None):
"""Perform Gauss-Seidel iterations on the linear system A.H A x = A.H b.
Parameters
----------
A : csr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
iterations : int
Number of iterations to perform
sweep : {'forward','backward','symmetric'}
Direction of sweep
omega : float
Relaxation parameter typically in (0, 2)
if omega != 1.0, then algorithm becomes SOR on A.H A
Dinv : ndarray
Inverse of diag(A.H A), (length N)
Returns
-------
Nothing, x will be modified in place.
References
----------
.. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
Second Edition", SIAM, pp. 247-9, 2003
http://www-users.cs.umn.edu/~saad/books.html
Examples
--------
>>> # Use NR Gauss-Seidel as a Stand-Alone Solver
>>> from pyamg.relaxation.relaxation import gauss_seidel_nr
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((10,10), format='csr')
>>> x0 = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> gauss_seidel_nr(A, x0, b, iterations=10, sweep='symmetric')
>>> print norm(b-A*x0)
8.45044864352
>>> #
>>> # Use NR Gauss-Seidel as the Multigrid Smoother
>>> from pyamg import smoothed_aggregation_solver
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('gauss_seidel_nr', {'sweep' : 'symmetric'}),
... postsmoother=('gauss_seidel_nr', {'sweep' : 'symmetric'}))
>>> x0=np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)
"""
A, x, b = make_system(A, x, b, formats=['csc'])
# Dinv for A.H*A
if Dinv is None:
Dinv = np.ravel(get_diagonal(A, norm_eq=1, inv=True))
if sweep == 'forward':
col_start, col_stop, col_step = 0, len(x), 1
elif sweep == 'backward':
col_start, col_stop, col_step = len(x)-1, -1, -1
elif sweep == 'symmetric':
for iter in range(iterations):
gauss_seidel_nr(A, x, b, iterations=1, sweep='forward',
omega=omega, Dinv=Dinv)
gauss_seidel_nr(A, x, b, iterations=1, sweep='backward',
omega=omega, Dinv=Dinv)
return
else:
raise ValueError("valid sweep directions are 'forward',\
'backward', and 'symmetric'")
# Calculate initial residual
r = b - A*x
for i in range(iterations):
amg_core.gauss_seidel_nr(A.indptr, A.indices, A.data,
x, r, col_start,
col_stop, col_step, Dinv, omega) | python | def gauss_seidel_nr(A, x, b, iterations=1, sweep='forward', omega=1.0,
Dinv=None):
"""Perform Gauss-Seidel iterations on the linear system A.H A x = A.H b.
Parameters
----------
A : csr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
iterations : int
Number of iterations to perform
sweep : {'forward','backward','symmetric'}
Direction of sweep
omega : float
Relaxation parameter typically in (0, 2)
if omega != 1.0, then algorithm becomes SOR on A.H A
Dinv : ndarray
Inverse of diag(A.H A), (length N)
Returns
-------
Nothing, x will be modified in place.
References
----------
.. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
Second Edition", SIAM, pp. 247-9, 2003
http://www-users.cs.umn.edu/~saad/books.html
Examples
--------
>>> # Use NR Gauss-Seidel as a Stand-Alone Solver
>>> from pyamg.relaxation.relaxation import gauss_seidel_nr
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((10,10), format='csr')
>>> x0 = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> gauss_seidel_nr(A, x0, b, iterations=10, sweep='symmetric')
>>> print norm(b-A*x0)
8.45044864352
>>> #
>>> # Use NR Gauss-Seidel as the Multigrid Smoother
>>> from pyamg import smoothed_aggregation_solver
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('gauss_seidel_nr', {'sweep' : 'symmetric'}),
... postsmoother=('gauss_seidel_nr', {'sweep' : 'symmetric'}))
>>> x0=np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals)
"""
A, x, b = make_system(A, x, b, formats=['csc'])
# Dinv for A.H*A
if Dinv is None:
Dinv = np.ravel(get_diagonal(A, norm_eq=1, inv=True))
if sweep == 'forward':
col_start, col_stop, col_step = 0, len(x), 1
elif sweep == 'backward':
col_start, col_stop, col_step = len(x)-1, -1, -1
elif sweep == 'symmetric':
for iter in range(iterations):
gauss_seidel_nr(A, x, b, iterations=1, sweep='forward',
omega=omega, Dinv=Dinv)
gauss_seidel_nr(A, x, b, iterations=1, sweep='backward',
omega=omega, Dinv=Dinv)
return
else:
raise ValueError("valid sweep directions are 'forward',\
'backward', and 'symmetric'")
# Calculate initial residual
r = b - A*x
for i in range(iterations):
amg_core.gauss_seidel_nr(A.indptr, A.indices, A.data,
x, r, col_start,
col_stop, col_step, Dinv, omega) | [
"def",
"gauss_seidel_nr",
"(",
"A",
",",
"x",
",",
"b",
",",
"iterations",
"=",
"1",
",",
"sweep",
"=",
"'forward'",
",",
"omega",
"=",
"1.0",
",",
"Dinv",
"=",
"None",
")",
":",
"A",
",",
"x",
",",
"b",
"=",
"make_system",
"(",
"A",
",",
"x",
... | Perform Gauss-Seidel iterations on the linear system A.H A x = A.H b.
Parameters
----------
A : csr_matrix
Sparse NxN matrix
x : ndarray
Approximate solution (length N)
b : ndarray
Right-hand side (length N)
iterations : int
Number of iterations to perform
sweep : {'forward','backward','symmetric'}
Direction of sweep
omega : float
Relaxation parameter typically in (0, 2)
if omega != 1.0, then algorithm becomes SOR on A.H A
Dinv : ndarray
Inverse of diag(A.H A), (length N)
Returns
-------
Nothing, x will be modified in place.
References
----------
.. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
Second Edition", SIAM, pp. 247-9, 2003
http://www-users.cs.umn.edu/~saad/books.html
Examples
--------
>>> # Use NR Gauss-Seidel as a Stand-Alone Solver
>>> from pyamg.relaxation.relaxation import gauss_seidel_nr
>>> from pyamg.gallery import poisson
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> A = poisson((10,10), format='csr')
>>> x0 = np.zeros((A.shape[0],1))
>>> b = np.ones((A.shape[0],1))
>>> gauss_seidel_nr(A, x0, b, iterations=10, sweep='symmetric')
>>> print norm(b-A*x0)
8.45044864352
>>> #
>>> # Use NR Gauss-Seidel as the Multigrid Smoother
>>> from pyamg import smoothed_aggregation_solver
>>> sa = smoothed_aggregation_solver(A, B=np.ones((A.shape[0],1)),
... coarse_solver='pinv2', max_coarse=50,
... presmoother=('gauss_seidel_nr', {'sweep' : 'symmetric'}),
... postsmoother=('gauss_seidel_nr', {'sweep' : 'symmetric'}))
>>> x0=np.zeros((A.shape[0],1))
>>> residuals=[]
>>> x = sa.solve(b, x0=x0, tol=1e-8, residuals=residuals) | [
"Perform",
"Gauss",
"-",
"Seidel",
"iterations",
"on",
"the",
"linear",
"system",
"A",
".",
"H",
"A",
"x",
"=",
"A",
".",
"H",
"b",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/relaxation/relaxation.py#L918-L1003 | train | 209,252 |
pyamg/pyamg | pyamg/relaxation/relaxation.py | schwarz_parameters | def schwarz_parameters(A, subdomain=None, subdomain_ptr=None,
inv_subblock=None, inv_subblock_ptr=None):
"""Set Schwarz parameters.
Helper function for setting up Schwarz relaxation. This function avoids
recomputing the subdomains and block inverses manytimes, e.g., it avoids a
costly double computation when setting up pre and post smoothing with
Schwarz.
Parameters
----------
A {csr_matrix}
Returns
-------
A.schwarz_parameters[0] is subdomain
A.schwarz_parameters[1] is subdomain_ptr
A.schwarz_parameters[2] is inv_subblock
A.schwarz_parameters[3] is inv_subblock_ptr
"""
# Check if A has a pre-existing set of Schwarz parameters
if hasattr(A, 'schwarz_parameters'):
if subdomain is not None and subdomain_ptr is not None:
# check that the existing parameters correspond to the same
# subdomains
if np.array(A.schwarz_parameters[0] == subdomain).all() and \
np.array(A.schwarz_parameters[1] == subdomain_ptr).all():
return A.schwarz_parameters
else:
return A.schwarz_parameters
# Default is to use the overlapping regions defined by A's sparsity pattern
if subdomain is None or subdomain_ptr is None:
subdomain_ptr = A.indptr.copy()
subdomain = A.indices.copy()
# Extract each subdomain's block from the matrix
if inv_subblock is None or inv_subblock_ptr is None:
inv_subblock_ptr = np.zeros(subdomain_ptr.shape,
dtype=A.indices.dtype)
blocksize = (subdomain_ptr[1:] - subdomain_ptr[:-1])
inv_subblock_ptr[1:] = np.cumsum(blocksize*blocksize)
# Extract each block column from A
inv_subblock = np.zeros((inv_subblock_ptr[-1],), dtype=A.dtype)
amg_core.extract_subblocks(A.indptr, A.indices, A.data, inv_subblock,
inv_subblock_ptr, subdomain, subdomain_ptr,
int(subdomain_ptr.shape[0]-1), A.shape[0])
# Choose tolerance for which singular values are zero in *gelss below
t = A.dtype.char
eps = np.finfo(np.float).eps
feps = np.finfo(np.single).eps
geps = np.finfo(np.longfloat).eps
_array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2}
cond = {0: feps*1e3, 1: eps*1e6, 2: geps*1e6}[_array_precision[t]]
# Invert each block column
my_pinv, = la.get_lapack_funcs(['gelss'],
(np.ones((1,), dtype=A.dtype)))
for i in range(subdomain_ptr.shape[0]-1):
m = blocksize[i]
rhs = sp.eye(m, m, dtype=A.dtype)
j0 = inv_subblock_ptr[i]
j1 = inv_subblock_ptr[i+1]
gelssoutput = my_pinv(inv_subblock[j0:j1].reshape(m, m),
rhs, cond=cond, overwrite_a=True,
overwrite_b=True)
inv_subblock[j0:j1] = np.ravel(gelssoutput[1])
A.schwarz_parameters = (subdomain, subdomain_ptr, inv_subblock,
inv_subblock_ptr)
return A.schwarz_parameters | python | def schwarz_parameters(A, subdomain=None, subdomain_ptr=None,
inv_subblock=None, inv_subblock_ptr=None):
"""Set Schwarz parameters.
Helper function for setting up Schwarz relaxation. This function avoids
recomputing the subdomains and block inverses manytimes, e.g., it avoids a
costly double computation when setting up pre and post smoothing with
Schwarz.
Parameters
----------
A {csr_matrix}
Returns
-------
A.schwarz_parameters[0] is subdomain
A.schwarz_parameters[1] is subdomain_ptr
A.schwarz_parameters[2] is inv_subblock
A.schwarz_parameters[3] is inv_subblock_ptr
"""
# Check if A has a pre-existing set of Schwarz parameters
if hasattr(A, 'schwarz_parameters'):
if subdomain is not None and subdomain_ptr is not None:
# check that the existing parameters correspond to the same
# subdomains
if np.array(A.schwarz_parameters[0] == subdomain).all() and \
np.array(A.schwarz_parameters[1] == subdomain_ptr).all():
return A.schwarz_parameters
else:
return A.schwarz_parameters
# Default is to use the overlapping regions defined by A's sparsity pattern
if subdomain is None or subdomain_ptr is None:
subdomain_ptr = A.indptr.copy()
subdomain = A.indices.copy()
# Extract each subdomain's block from the matrix
if inv_subblock is None or inv_subblock_ptr is None:
inv_subblock_ptr = np.zeros(subdomain_ptr.shape,
dtype=A.indices.dtype)
blocksize = (subdomain_ptr[1:] - subdomain_ptr[:-1])
inv_subblock_ptr[1:] = np.cumsum(blocksize*blocksize)
# Extract each block column from A
inv_subblock = np.zeros((inv_subblock_ptr[-1],), dtype=A.dtype)
amg_core.extract_subblocks(A.indptr, A.indices, A.data, inv_subblock,
inv_subblock_ptr, subdomain, subdomain_ptr,
int(subdomain_ptr.shape[0]-1), A.shape[0])
# Choose tolerance for which singular values are zero in *gelss below
t = A.dtype.char
eps = np.finfo(np.float).eps
feps = np.finfo(np.single).eps
geps = np.finfo(np.longfloat).eps
_array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2}
cond = {0: feps*1e3, 1: eps*1e6, 2: geps*1e6}[_array_precision[t]]
# Invert each block column
my_pinv, = la.get_lapack_funcs(['gelss'],
(np.ones((1,), dtype=A.dtype)))
for i in range(subdomain_ptr.shape[0]-1):
m = blocksize[i]
rhs = sp.eye(m, m, dtype=A.dtype)
j0 = inv_subblock_ptr[i]
j1 = inv_subblock_ptr[i+1]
gelssoutput = my_pinv(inv_subblock[j0:j1].reshape(m, m),
rhs, cond=cond, overwrite_a=True,
overwrite_b=True)
inv_subblock[j0:j1] = np.ravel(gelssoutput[1])
A.schwarz_parameters = (subdomain, subdomain_ptr, inv_subblock,
inv_subblock_ptr)
return A.schwarz_parameters | [
"def",
"schwarz_parameters",
"(",
"A",
",",
"subdomain",
"=",
"None",
",",
"subdomain_ptr",
"=",
"None",
",",
"inv_subblock",
"=",
"None",
",",
"inv_subblock_ptr",
"=",
"None",
")",
":",
"# Check if A has a pre-existing set of Schwarz parameters",
"if",
"hasattr",
"... | Set Schwarz parameters.
Helper function for setting up Schwarz relaxation. This function avoids
recomputing the subdomains and block inverses manytimes, e.g., it avoids a
costly double computation when setting up pre and post smoothing with
Schwarz.
Parameters
----------
A {csr_matrix}
Returns
-------
A.schwarz_parameters[0] is subdomain
A.schwarz_parameters[1] is subdomain_ptr
A.schwarz_parameters[2] is inv_subblock
A.schwarz_parameters[3] is inv_subblock_ptr | [
"Set",
"Schwarz",
"parameters",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/relaxation/relaxation.py#L1017-L1089 | train | 209,253 |
pyamg/pyamg | pyamg/krylov/_cg.py | cg | def cg(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None,
callback=None, residuals=None):
"""Conjugate Gradient algorithm.
Solves the linear system Ax = b. Left preconditioning is supported.
Parameters
----------
A : array, matrix, sparse matrix, LinearOperator
n x n, linear system to solve
b : array, matrix
right hand side, shape is (n,) or (n,1)
x0 : array, matrix
initial guess, default is a vector of zeros
tol : float
relative convergence tolerance, i.e. tol is scaled by the
preconditioner norm of r_0, or ||r_0||_M.
maxiter : int
maximum number of allowed iterations
xtype : type
dtype for the solution, default is automatic type detection
M : array, matrix, sparse matrix, LinearOperator
n x n, inverted preconditioner, i.e. solve M A x = M b.
callback : function
User-supplied function is called after each iteration as
callback(xk), where xk is the current solution vector
residuals : list
residuals contains the residual norm history,
including the initial residual. The preconditioner norm
is used, instead of the Euclidean norm.
Returns
-------
(xNew, info)
xNew : an updated guess to the solution of Ax = b
info : halting status of cg
== =======================================
0 successful exit
>0 convergence to tolerance not achieved,
return iteration count instead.
<0 numerical breakdown, or illegal input
== =======================================
Notes
-----
The LinearOperator class is in scipy.sparse.linalg.interface.
Use this class if you prefer to define A or M as a mat-vec routine
as opposed to explicitly constructing the matrix. A.psolve(..) is
still supported as a legacy.
The residual in the preconditioner norm is both used for halting and
returned in the residuals list.
Examples
--------
>>> from pyamg.krylov.cg import cg
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> from pyamg.gallery import poisson
>>> A = poisson((10,10))
>>> b = np.ones((A.shape[0],))
>>> (x,flag) = cg(A,b, maxiter=2, tol=1e-8)
>>> print norm(b - A*x)
10.9370700187
References
----------
.. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
Second Edition", SIAM, pp. 262-67, 2003
http://www-users.cs.umn.edu/~saad/books.html
"""
A, M, x, b, postprocess = make_system(A, M, x0, b)
# Ensure that warnings are always reissued from this function
import warnings
warnings.filterwarnings('always', module='pyamg\.krylov\._cg')
# determine maxiter
if maxiter is None:
maxiter = int(1.3*len(b)) + 2
elif maxiter < 1:
raise ValueError('Number of iterations must be positive')
# choose tolerance for numerically zero values
# t = A.dtype.char
# eps = np.finfo(np.float).eps
# feps = np.finfo(np.single).eps
# geps = np.finfo(np.longfloat).eps
# _array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2}
# numerically_zero = {0: feps*1e3, 1: eps*1e6,
# 2: geps*1e6}[_array_precision[t]]
# setup method
r = b - A*x
z = M*r
p = z.copy()
rz = np.inner(r.conjugate(), z)
# use preconditioner norm
normr = np.sqrt(rz)
if residuals is not None:
residuals[:] = [normr] # initial residual
# Check initial guess ( scaling by b, if b != 0,
# must account for case when norm(b) is very small)
normb = norm(b)
if normb == 0.0:
normb = 1.0
if normr < tol*normb:
return (postprocess(x), 0)
# Scale tol by ||r_0||_M
if normr != 0.0:
tol = tol*normr
# How often should r be recomputed
recompute_r = 8
iter = 0
while True:
Ap = A*p
rz_old = rz
# Step number in Saad's pseudocode
pAp = np.inner(Ap.conjugate(), p) # check curvature of A
if pAp < 0.0:
warn("\nIndefinite matrix detected in CG, aborting\n")
return (postprocess(x), -1)
alpha = rz/pAp # 3
x += alpha * p # 4
if np.mod(iter, recompute_r) and iter > 0: # 5
r -= alpha * Ap
else:
r = b - A*x
z = M*r # 6
rz = np.inner(r.conjugate(), z)
if rz < 0.0: # check curvature of M
warn("\nIndefinite preconditioner detected in CG, aborting\n")
return (postprocess(x), -1)
beta = rz/rz_old # 7
p *= beta # 8
p += z
iter += 1
normr = np.sqrt(rz) # use preconditioner norm
if residuals is not None:
residuals.append(normr)
if callback is not None:
callback(x)
if normr < tol:
return (postprocess(x), 0)
elif rz == 0.0:
# important to test after testing normr < tol. rz == 0.0 is an
# indicator of convergence when r = 0.0
warn("\nSingular preconditioner detected in CG, ceasing \
iterations\n")
return (postprocess(x), -1)
if iter == maxiter:
return (postprocess(x), iter) | python | def cg(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None,
callback=None, residuals=None):
"""Conjugate Gradient algorithm.
Solves the linear system Ax = b. Left preconditioning is supported.
Parameters
----------
A : array, matrix, sparse matrix, LinearOperator
n x n, linear system to solve
b : array, matrix
right hand side, shape is (n,) or (n,1)
x0 : array, matrix
initial guess, default is a vector of zeros
tol : float
relative convergence tolerance, i.e. tol is scaled by the
preconditioner norm of r_0, or ||r_0||_M.
maxiter : int
maximum number of allowed iterations
xtype : type
dtype for the solution, default is automatic type detection
M : array, matrix, sparse matrix, LinearOperator
n x n, inverted preconditioner, i.e. solve M A x = M b.
callback : function
User-supplied function is called after each iteration as
callback(xk), where xk is the current solution vector
residuals : list
residuals contains the residual norm history,
including the initial residual. The preconditioner norm
is used, instead of the Euclidean norm.
Returns
-------
(xNew, info)
xNew : an updated guess to the solution of Ax = b
info : halting status of cg
== =======================================
0 successful exit
>0 convergence to tolerance not achieved,
return iteration count instead.
<0 numerical breakdown, or illegal input
== =======================================
Notes
-----
The LinearOperator class is in scipy.sparse.linalg.interface.
Use this class if you prefer to define A or M as a mat-vec routine
as opposed to explicitly constructing the matrix. A.psolve(..) is
still supported as a legacy.
The residual in the preconditioner norm is both used for halting and
returned in the residuals list.
Examples
--------
>>> from pyamg.krylov.cg import cg
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> from pyamg.gallery import poisson
>>> A = poisson((10,10))
>>> b = np.ones((A.shape[0],))
>>> (x,flag) = cg(A,b, maxiter=2, tol=1e-8)
>>> print norm(b - A*x)
10.9370700187
References
----------
.. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
Second Edition", SIAM, pp. 262-67, 2003
http://www-users.cs.umn.edu/~saad/books.html
"""
A, M, x, b, postprocess = make_system(A, M, x0, b)
# Ensure that warnings are always reissued from this function
import warnings
warnings.filterwarnings('always', module='pyamg\.krylov\._cg')
# determine maxiter
if maxiter is None:
maxiter = int(1.3*len(b)) + 2
elif maxiter < 1:
raise ValueError('Number of iterations must be positive')
# choose tolerance for numerically zero values
# t = A.dtype.char
# eps = np.finfo(np.float).eps
# feps = np.finfo(np.single).eps
# geps = np.finfo(np.longfloat).eps
# _array_precision = {'f': 0, 'd': 1, 'g': 2, 'F': 0, 'D': 1, 'G': 2}
# numerically_zero = {0: feps*1e3, 1: eps*1e6,
# 2: geps*1e6}[_array_precision[t]]
# setup method
r = b - A*x
z = M*r
p = z.copy()
rz = np.inner(r.conjugate(), z)
# use preconditioner norm
normr = np.sqrt(rz)
if residuals is not None:
residuals[:] = [normr] # initial residual
# Check initial guess ( scaling by b, if b != 0,
# must account for case when norm(b) is very small)
normb = norm(b)
if normb == 0.0:
normb = 1.0
if normr < tol*normb:
return (postprocess(x), 0)
# Scale tol by ||r_0||_M
if normr != 0.0:
tol = tol*normr
# How often should r be recomputed
recompute_r = 8
iter = 0
while True:
Ap = A*p
rz_old = rz
# Step number in Saad's pseudocode
pAp = np.inner(Ap.conjugate(), p) # check curvature of A
if pAp < 0.0:
warn("\nIndefinite matrix detected in CG, aborting\n")
return (postprocess(x), -1)
alpha = rz/pAp # 3
x += alpha * p # 4
if np.mod(iter, recompute_r) and iter > 0: # 5
r -= alpha * Ap
else:
r = b - A*x
z = M*r # 6
rz = np.inner(r.conjugate(), z)
if rz < 0.0: # check curvature of M
warn("\nIndefinite preconditioner detected in CG, aborting\n")
return (postprocess(x), -1)
beta = rz/rz_old # 7
p *= beta # 8
p += z
iter += 1
normr = np.sqrt(rz) # use preconditioner norm
if residuals is not None:
residuals.append(normr)
if callback is not None:
callback(x)
if normr < tol:
return (postprocess(x), 0)
elif rz == 0.0:
# important to test after testing normr < tol. rz == 0.0 is an
# indicator of convergence when r = 0.0
warn("\nSingular preconditioner detected in CG, ceasing \
iterations\n")
return (postprocess(x), -1)
if iter == maxiter:
return (postprocess(x), iter) | [
"def",
"cg",
"(",
"A",
",",
"b",
",",
"x0",
"=",
"None",
",",
"tol",
"=",
"1e-5",
",",
"maxiter",
"=",
"None",
",",
"xtype",
"=",
"None",
",",
"M",
"=",
"None",
",",
"callback",
"=",
"None",
",",
"residuals",
"=",
"None",
")",
":",
"A",
",",
... | Conjugate Gradient algorithm.
Solves the linear system Ax = b. Left preconditioning is supported.
Parameters
----------
A : array, matrix, sparse matrix, LinearOperator
n x n, linear system to solve
b : array, matrix
right hand side, shape is (n,) or (n,1)
x0 : array, matrix
initial guess, default is a vector of zeros
tol : float
relative convergence tolerance, i.e. tol is scaled by the
preconditioner norm of r_0, or ||r_0||_M.
maxiter : int
maximum number of allowed iterations
xtype : type
dtype for the solution, default is automatic type detection
M : array, matrix, sparse matrix, LinearOperator
n x n, inverted preconditioner, i.e. solve M A x = M b.
callback : function
User-supplied function is called after each iteration as
callback(xk), where xk is the current solution vector
residuals : list
residuals contains the residual norm history,
including the initial residual. The preconditioner norm
is used, instead of the Euclidean norm.
Returns
-------
(xNew, info)
xNew : an updated guess to the solution of Ax = b
info : halting status of cg
== =======================================
0 successful exit
>0 convergence to tolerance not achieved,
return iteration count instead.
<0 numerical breakdown, or illegal input
== =======================================
Notes
-----
The LinearOperator class is in scipy.sparse.linalg.interface.
Use this class if you prefer to define A or M as a mat-vec routine
as opposed to explicitly constructing the matrix. A.psolve(..) is
still supported as a legacy.
The residual in the preconditioner norm is both used for halting and
returned in the residuals list.
Examples
--------
>>> from pyamg.krylov.cg import cg
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> from pyamg.gallery import poisson
>>> A = poisson((10,10))
>>> b = np.ones((A.shape[0],))
>>> (x,flag) = cg(A,b, maxiter=2, tol=1e-8)
>>> print norm(b - A*x)
10.9370700187
References
----------
.. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
Second Edition", SIAM, pp. 262-67, 2003
http://www-users.cs.umn.edu/~saad/books.html | [
"Conjugate",
"Gradient",
"algorithm",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/krylov/_cg.py#L10-L182 | train | 209,254 |
pyamg/pyamg | pyamg/krylov/_bicgstab.py | bicgstab | def bicgstab(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None,
callback=None, residuals=None):
"""Biconjugate Gradient Algorithm with Stabilization.
Solves the linear system Ax = b. Left preconditioning is supported.
Parameters
----------
A : array, matrix, sparse matrix, LinearOperator
n x n, linear system to solve
b : array, matrix
right hand side, shape is (n,) or (n,1)
x0 : array, matrix
initial guess, default is a vector of zeros
tol : float
relative convergence tolerance, i.e. tol is scaled by ||r_0||_2
maxiter : int
maximum number of allowed iterations
xtype : type
dtype for the solution, default is automatic type detection
M : array, matrix, sparse matrix, LinearOperator
n x n, inverted preconditioner, i.e. solve M A A.H x = M b.
callback : function
User-supplied function is called after each iteration as
callback(xk), where xk is the current solution vector
residuals : list
residuals has the residual norm history,
including the initial residual, appended to it
Returns
-------
(xNew, info)
xNew : an updated guess to the solution of Ax = b
info : halting status of bicgstab
== ======================================
0 successful exit
>0 convergence to tolerance not achieved,
return iteration count instead.
<0 numerical breakdown, or illegal input
== ======================================
Notes
-----
The LinearOperator class is in scipy.sparse.linalg.interface.
Use this class if you prefer to define A or M as a mat-vec routine
as opposed to explicitly constructing the matrix. A.psolve(..) is
still supported as a legacy.
Examples
--------
>>> from pyamg.krylov.bicgstab import bicgstab
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> from pyamg.gallery import poisson
>>> A = poisson((10,10))
>>> b = np.ones((A.shape[0],))
>>> (x,flag) = bicgstab(A,b, maxiter=2, tol=1e-8)
>>> print norm(b - A*x)
4.68163045309
References
----------
.. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
Second Edition", SIAM, pp. 231-234, 2003
http://www-users.cs.umn.edu/~saad/books.html
"""
# Convert inputs to linear system, with error checking
A, M, x, b, postprocess = make_system(A, M, x0, b)
# Ensure that warnings are always reissued from this function
import warnings
warnings.filterwarnings('always', module='pyamg\.krylov\._bicgstab')
# Check iteration numbers
if maxiter is None:
maxiter = len(x) + 5
elif maxiter < 1:
raise ValueError('Number of iterations must be positive')
# Prep for method
r = b - A*x
normr = norm(r)
if residuals is not None:
residuals[:] = [normr]
# Check initial guess ( scaling by b, if b != 0,
# must account for case when norm(b) is very small)
normb = norm(b)
if normb == 0.0:
normb = 1.0
if normr < tol*normb:
return (postprocess(x), 0)
# Scale tol by ||r_0||_2
if normr != 0.0:
tol = tol*normr
# Is this a one dimensional matrix?
if A.shape[0] == 1:
entry = np.ravel(A*np.array([1.0], dtype=xtype))
return (postprocess(b/entry), 0)
rstar = r.copy()
p = r.copy()
rrstarOld = np.inner(rstar.conjugate(), r)
iter = 0
# Begin BiCGStab
while True:
Mp = M*p
AMp = A*Mp
# alpha = (r_j, rstar) / (A*M*p_j, rstar)
alpha = rrstarOld/np.inner(rstar.conjugate(), AMp)
# s_j = r_j - alpha*A*M*p_j
s = r - alpha*AMp
Ms = M*s
AMs = A*Ms
# omega = (A*M*s_j, s_j)/(A*M*s_j, A*M*s_j)
omega = np.inner(AMs.conjugate(), s)/np.inner(AMs.conjugate(), AMs)
# x_{j+1} = x_j + alpha*M*p_j + omega*M*s_j
x = x + alpha*Mp + omega*Ms
# r_{j+1} = s_j - omega*A*M*s
r = s - omega*AMs
# beta_j = (r_{j+1}, rstar)/(r_j, rstar) * (alpha/omega)
rrstarNew = np.inner(rstar.conjugate(), r)
beta = (rrstarNew / rrstarOld) * (alpha / omega)
rrstarOld = rrstarNew
# p_{j+1} = r_{j+1} + beta*(p_j - omega*A*M*p)
p = r + beta*(p - omega*AMp)
iter += 1
normr = norm(r)
if residuals is not None:
residuals.append(normr)
if callback is not None:
callback(x)
if normr < tol:
return (postprocess(x), 0)
if iter == maxiter:
return (postprocess(x), iter) | python | def bicgstab(A, b, x0=None, tol=1e-5, maxiter=None, xtype=None, M=None,
callback=None, residuals=None):
"""Biconjugate Gradient Algorithm with Stabilization.
Solves the linear system Ax = b. Left preconditioning is supported.
Parameters
----------
A : array, matrix, sparse matrix, LinearOperator
n x n, linear system to solve
b : array, matrix
right hand side, shape is (n,) or (n,1)
x0 : array, matrix
initial guess, default is a vector of zeros
tol : float
relative convergence tolerance, i.e. tol is scaled by ||r_0||_2
maxiter : int
maximum number of allowed iterations
xtype : type
dtype for the solution, default is automatic type detection
M : array, matrix, sparse matrix, LinearOperator
n x n, inverted preconditioner, i.e. solve M A A.H x = M b.
callback : function
User-supplied function is called after each iteration as
callback(xk), where xk is the current solution vector
residuals : list
residuals has the residual norm history,
including the initial residual, appended to it
Returns
-------
(xNew, info)
xNew : an updated guess to the solution of Ax = b
info : halting status of bicgstab
== ======================================
0 successful exit
>0 convergence to tolerance not achieved,
return iteration count instead.
<0 numerical breakdown, or illegal input
== ======================================
Notes
-----
The LinearOperator class is in scipy.sparse.linalg.interface.
Use this class if you prefer to define A or M as a mat-vec routine
as opposed to explicitly constructing the matrix. A.psolve(..) is
still supported as a legacy.
Examples
--------
>>> from pyamg.krylov.bicgstab import bicgstab
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> from pyamg.gallery import poisson
>>> A = poisson((10,10))
>>> b = np.ones((A.shape[0],))
>>> (x,flag) = bicgstab(A,b, maxiter=2, tol=1e-8)
>>> print norm(b - A*x)
4.68163045309
References
----------
.. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
Second Edition", SIAM, pp. 231-234, 2003
http://www-users.cs.umn.edu/~saad/books.html
"""
# Convert inputs to linear system, with error checking
A, M, x, b, postprocess = make_system(A, M, x0, b)
# Ensure that warnings are always reissued from this function
import warnings
warnings.filterwarnings('always', module='pyamg\.krylov\._bicgstab')
# Check iteration numbers
if maxiter is None:
maxiter = len(x) + 5
elif maxiter < 1:
raise ValueError('Number of iterations must be positive')
# Prep for method
r = b - A*x
normr = norm(r)
if residuals is not None:
residuals[:] = [normr]
# Check initial guess ( scaling by b, if b != 0,
# must account for case when norm(b) is very small)
normb = norm(b)
if normb == 0.0:
normb = 1.0
if normr < tol*normb:
return (postprocess(x), 0)
# Scale tol by ||r_0||_2
if normr != 0.0:
tol = tol*normr
# Is this a one dimensional matrix?
if A.shape[0] == 1:
entry = np.ravel(A*np.array([1.0], dtype=xtype))
return (postprocess(b/entry), 0)
rstar = r.copy()
p = r.copy()
rrstarOld = np.inner(rstar.conjugate(), r)
iter = 0
# Begin BiCGStab
while True:
Mp = M*p
AMp = A*Mp
# alpha = (r_j, rstar) / (A*M*p_j, rstar)
alpha = rrstarOld/np.inner(rstar.conjugate(), AMp)
# s_j = r_j - alpha*A*M*p_j
s = r - alpha*AMp
Ms = M*s
AMs = A*Ms
# omega = (A*M*s_j, s_j)/(A*M*s_j, A*M*s_j)
omega = np.inner(AMs.conjugate(), s)/np.inner(AMs.conjugate(), AMs)
# x_{j+1} = x_j + alpha*M*p_j + omega*M*s_j
x = x + alpha*Mp + omega*Ms
# r_{j+1} = s_j - omega*A*M*s
r = s - omega*AMs
# beta_j = (r_{j+1}, rstar)/(r_j, rstar) * (alpha/omega)
rrstarNew = np.inner(rstar.conjugate(), r)
beta = (rrstarNew / rrstarOld) * (alpha / omega)
rrstarOld = rrstarNew
# p_{j+1} = r_{j+1} + beta*(p_j - omega*A*M*p)
p = r + beta*(p - omega*AMp)
iter += 1
normr = norm(r)
if residuals is not None:
residuals.append(normr)
if callback is not None:
callback(x)
if normr < tol:
return (postprocess(x), 0)
if iter == maxiter:
return (postprocess(x), iter) | [
"def",
"bicgstab",
"(",
"A",
",",
"b",
",",
"x0",
"=",
"None",
",",
"tol",
"=",
"1e-5",
",",
"maxiter",
"=",
"None",
",",
"xtype",
"=",
"None",
",",
"M",
"=",
"None",
",",
"callback",
"=",
"None",
",",
"residuals",
"=",
"None",
")",
":",
"# Con... | Biconjugate Gradient Algorithm with Stabilization.
Solves the linear system Ax = b. Left preconditioning is supported.
Parameters
----------
A : array, matrix, sparse matrix, LinearOperator
n x n, linear system to solve
b : array, matrix
right hand side, shape is (n,) or (n,1)
x0 : array, matrix
initial guess, default is a vector of zeros
tol : float
relative convergence tolerance, i.e. tol is scaled by ||r_0||_2
maxiter : int
maximum number of allowed iterations
xtype : type
dtype for the solution, default is automatic type detection
M : array, matrix, sparse matrix, LinearOperator
n x n, inverted preconditioner, i.e. solve M A A.H x = M b.
callback : function
User-supplied function is called after each iteration as
callback(xk), where xk is the current solution vector
residuals : list
residuals has the residual norm history,
including the initial residual, appended to it
Returns
-------
(xNew, info)
xNew : an updated guess to the solution of Ax = b
info : halting status of bicgstab
== ======================================
0 successful exit
>0 convergence to tolerance not achieved,
return iteration count instead.
<0 numerical breakdown, or illegal input
== ======================================
Notes
-----
The LinearOperator class is in scipy.sparse.linalg.interface.
Use this class if you prefer to define A or M as a mat-vec routine
as opposed to explicitly constructing the matrix. A.psolve(..) is
still supported as a legacy.
Examples
--------
>>> from pyamg.krylov.bicgstab import bicgstab
>>> from pyamg.util.linalg import norm
>>> import numpy as np
>>> from pyamg.gallery import poisson
>>> A = poisson((10,10))
>>> b = np.ones((A.shape[0],))
>>> (x,flag) = bicgstab(A,b, maxiter=2, tol=1e-8)
>>> print norm(b - A*x)
4.68163045309
References
----------
.. [1] Yousef Saad, "Iterative Methods for Sparse Linear Systems,
Second Edition", SIAM, pp. 231-234, 2003
http://www-users.cs.umn.edu/~saad/books.html | [
"Biconjugate",
"Gradient",
"Algorithm",
"with",
"Stabilization",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/krylov/_bicgstab.py#L9-L165 | train | 209,255 |
pyamg/pyamg | pyamg/multilevel.py | multilevel_solver.operator_complexity | def operator_complexity(self):
"""Operator complexity of this multigrid hierarchy.
Defined as:
Number of nonzeros in the matrix on all levels /
Number of nonzeros in the matrix on the finest level
"""
return sum([level.A.nnz for level in self.levels]) /\
float(self.levels[0].A.nnz) | python | def operator_complexity(self):
"""Operator complexity of this multigrid hierarchy.
Defined as:
Number of nonzeros in the matrix on all levels /
Number of nonzeros in the matrix on the finest level
"""
return sum([level.A.nnz for level in self.levels]) /\
float(self.levels[0].A.nnz) | [
"def",
"operator_complexity",
"(",
"self",
")",
":",
"return",
"sum",
"(",
"[",
"level",
".",
"A",
".",
"nnz",
"for",
"level",
"in",
"self",
".",
"levels",
"]",
")",
"/",
"float",
"(",
"self",
".",
"levels",
"[",
"0",
"]",
".",
"A",
".",
"nnz",
... | Operator complexity of this multigrid hierarchy.
Defined as:
Number of nonzeros in the matrix on all levels /
Number of nonzeros in the matrix on the finest level | [
"Operator",
"complexity",
"of",
"this",
"multigrid",
"hierarchy",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/multilevel.py#L249-L258 | train | 209,256 |
pyamg/pyamg | pyamg/multilevel.py | multilevel_solver.grid_complexity | def grid_complexity(self):
"""Grid complexity of this multigrid hierarchy.
Defined as:
Number of unknowns on all levels /
Number of unknowns on the finest level
"""
return sum([level.A.shape[0] for level in self.levels]) /\
float(self.levels[0].A.shape[0]) | python | def grid_complexity(self):
"""Grid complexity of this multigrid hierarchy.
Defined as:
Number of unknowns on all levels /
Number of unknowns on the finest level
"""
return sum([level.A.shape[0] for level in self.levels]) /\
float(self.levels[0].A.shape[0]) | [
"def",
"grid_complexity",
"(",
"self",
")",
":",
"return",
"sum",
"(",
"[",
"level",
".",
"A",
".",
"shape",
"[",
"0",
"]",
"for",
"level",
"in",
"self",
".",
"levels",
"]",
")",
"/",
"float",
"(",
"self",
".",
"levels",
"[",
"0",
"]",
".",
"A"... | Grid complexity of this multigrid hierarchy.
Defined as:
Number of unknowns on all levels /
Number of unknowns on the finest level | [
"Grid",
"complexity",
"of",
"this",
"multigrid",
"hierarchy",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/multilevel.py#L260-L269 | train | 209,257 |
pyamg/pyamg | pyamg/multilevel.py | multilevel_solver.aspreconditioner | def aspreconditioner(self, cycle='V'):
"""Create a preconditioner using this multigrid cycle.
Parameters
----------
cycle : {'V','W','F','AMLI'}
Type of multigrid cycle to perform in each iteration.
Returns
-------
precond : LinearOperator
Preconditioner suitable for the iterative solvers in defined in
the scipy.sparse.linalg module (e.g. cg, gmres) and any other
solver that uses the LinearOperator interface. Refer to the
LinearOperator documentation in scipy.sparse.linalg
See Also
--------
multilevel_solver.solve, scipy.sparse.linalg.LinearOperator
Examples
--------
>>> from pyamg.aggregation import smoothed_aggregation_solver
>>> from pyamg.gallery import poisson
>>> from scipy.sparse.linalg import cg
>>> import scipy as sp
>>> A = poisson((100, 100), format='csr') # matrix
>>> b = sp.rand(A.shape[0]) # random RHS
>>> ml = smoothed_aggregation_solver(A) # AMG solver
>>> M = ml.aspreconditioner(cycle='V') # preconditioner
>>> x, info = cg(A, b, tol=1e-8, maxiter=30, M=M) # solve with CG
"""
from scipy.sparse.linalg import LinearOperator
shape = self.levels[0].A.shape
dtype = self.levels[0].A.dtype
def matvec(b):
return self.solve(b, maxiter=1, cycle=cycle, tol=1e-12)
return LinearOperator(shape, matvec, dtype=dtype) | python | def aspreconditioner(self, cycle='V'):
"""Create a preconditioner using this multigrid cycle.
Parameters
----------
cycle : {'V','W','F','AMLI'}
Type of multigrid cycle to perform in each iteration.
Returns
-------
precond : LinearOperator
Preconditioner suitable for the iterative solvers in defined in
the scipy.sparse.linalg module (e.g. cg, gmres) and any other
solver that uses the LinearOperator interface. Refer to the
LinearOperator documentation in scipy.sparse.linalg
See Also
--------
multilevel_solver.solve, scipy.sparse.linalg.LinearOperator
Examples
--------
>>> from pyamg.aggregation import smoothed_aggregation_solver
>>> from pyamg.gallery import poisson
>>> from scipy.sparse.linalg import cg
>>> import scipy as sp
>>> A = poisson((100, 100), format='csr') # matrix
>>> b = sp.rand(A.shape[0]) # random RHS
>>> ml = smoothed_aggregation_solver(A) # AMG solver
>>> M = ml.aspreconditioner(cycle='V') # preconditioner
>>> x, info = cg(A, b, tol=1e-8, maxiter=30, M=M) # solve with CG
"""
from scipy.sparse.linalg import LinearOperator
shape = self.levels[0].A.shape
dtype = self.levels[0].A.dtype
def matvec(b):
return self.solve(b, maxiter=1, cycle=cycle, tol=1e-12)
return LinearOperator(shape, matvec, dtype=dtype) | [
"def",
"aspreconditioner",
"(",
"self",
",",
"cycle",
"=",
"'V'",
")",
":",
"from",
"scipy",
".",
"sparse",
".",
"linalg",
"import",
"LinearOperator",
"shape",
"=",
"self",
".",
"levels",
"[",
"0",
"]",
".",
"A",
".",
"shape",
"dtype",
"=",
"self",
"... | Create a preconditioner using this multigrid cycle.
Parameters
----------
cycle : {'V','W','F','AMLI'}
Type of multigrid cycle to perform in each iteration.
Returns
-------
precond : LinearOperator
Preconditioner suitable for the iterative solvers in defined in
the scipy.sparse.linalg module (e.g. cg, gmres) and any other
solver that uses the LinearOperator interface. Refer to the
LinearOperator documentation in scipy.sparse.linalg
See Also
--------
multilevel_solver.solve, scipy.sparse.linalg.LinearOperator
Examples
--------
>>> from pyamg.aggregation import smoothed_aggregation_solver
>>> from pyamg.gallery import poisson
>>> from scipy.sparse.linalg import cg
>>> import scipy as sp
>>> A = poisson((100, 100), format='csr') # matrix
>>> b = sp.rand(A.shape[0]) # random RHS
>>> ml = smoothed_aggregation_solver(A) # AMG solver
>>> M = ml.aspreconditioner(cycle='V') # preconditioner
>>> x, info = cg(A, b, tol=1e-8, maxiter=30, M=M) # solve with CG | [
"Create",
"a",
"preconditioner",
"using",
"this",
"multigrid",
"cycle",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/multilevel.py#L275-L316 | train | 209,258 |
pyamg/pyamg | pyamg/multilevel.py | multilevel_solver.__solve | def __solve(self, lvl, x, b, cycle):
"""Multigrid cycling.
Parameters
----------
lvl : int
Solve problem on level `lvl`
x : numpy array
Initial guess `x` and return correction
b : numpy array
Right-hand side for Ax=b
cycle : {'V','W','F','AMLI'}
Recursively called cycling function. The
Defines the cycling used:
cycle = 'V', V-cycle
cycle = 'W', W-cycle
cycle = 'F', F-cycle
cycle = 'AMLI', AMLI-cycle
"""
A = self.levels[lvl].A
self.levels[lvl].presmoother(A, x, b)
residual = b - A * x
coarse_b = self.levels[lvl].R * residual
coarse_x = np.zeros_like(coarse_b)
if lvl == len(self.levels) - 2:
coarse_x[:] = self.coarse_solver(self.levels[-1].A, coarse_b)
else:
if cycle == 'V':
self.__solve(lvl + 1, coarse_x, coarse_b, 'V')
elif cycle == 'W':
self.__solve(lvl + 1, coarse_x, coarse_b, cycle)
self.__solve(lvl + 1, coarse_x, coarse_b, cycle)
elif cycle == 'F':
self.__solve(lvl + 1, coarse_x, coarse_b, cycle)
self.__solve(lvl + 1, coarse_x, coarse_b, 'V')
elif cycle == "AMLI":
# Run nAMLI AMLI cycles, which compute "optimal" corrections by
# orthogonalizing the coarse-grid corrections in the A-norm
nAMLI = 2
Ac = self.levels[lvl + 1].A
p = np.zeros((nAMLI, coarse_b.shape[0]), dtype=coarse_b.dtype)
beta = np.zeros((nAMLI, nAMLI), dtype=coarse_b.dtype)
for k in range(nAMLI):
# New search direction --> M^{-1}*residual
p[k, :] = 1
self.__solve(lvl + 1, p[k, :].reshape(coarse_b.shape),
coarse_b, cycle)
# Orthogonalize new search direction to old directions
for j in range(k): # loops from j = 0...(k-1)
beta[k, j] = np.inner(p[j, :].conj(), Ac * p[k, :]) /\
np.inner(p[j, :].conj(), Ac * p[j, :])
p[k, :] -= beta[k, j] * p[j, :]
# Compute step size
Ap = Ac * p[k, :]
alpha = np.inner(p[k, :].conj(), np.ravel(coarse_b)) /\
np.inner(p[k, :].conj(), Ap)
# Update solution
coarse_x += alpha * p[k, :].reshape(coarse_x.shape)
# Update residual
coarse_b -= alpha * Ap.reshape(coarse_b.shape)
else:
raise TypeError('Unrecognized cycle type (%s)' % cycle)
x += self.levels[lvl].P * coarse_x # coarse grid correction
self.levels[lvl].postsmoother(A, x, b) | python | def __solve(self, lvl, x, b, cycle):
"""Multigrid cycling.
Parameters
----------
lvl : int
Solve problem on level `lvl`
x : numpy array
Initial guess `x` and return correction
b : numpy array
Right-hand side for Ax=b
cycle : {'V','W','F','AMLI'}
Recursively called cycling function. The
Defines the cycling used:
cycle = 'V', V-cycle
cycle = 'W', W-cycle
cycle = 'F', F-cycle
cycle = 'AMLI', AMLI-cycle
"""
A = self.levels[lvl].A
self.levels[lvl].presmoother(A, x, b)
residual = b - A * x
coarse_b = self.levels[lvl].R * residual
coarse_x = np.zeros_like(coarse_b)
if lvl == len(self.levels) - 2:
coarse_x[:] = self.coarse_solver(self.levels[-1].A, coarse_b)
else:
if cycle == 'V':
self.__solve(lvl + 1, coarse_x, coarse_b, 'V')
elif cycle == 'W':
self.__solve(lvl + 1, coarse_x, coarse_b, cycle)
self.__solve(lvl + 1, coarse_x, coarse_b, cycle)
elif cycle == 'F':
self.__solve(lvl + 1, coarse_x, coarse_b, cycle)
self.__solve(lvl + 1, coarse_x, coarse_b, 'V')
elif cycle == "AMLI":
# Run nAMLI AMLI cycles, which compute "optimal" corrections by
# orthogonalizing the coarse-grid corrections in the A-norm
nAMLI = 2
Ac = self.levels[lvl + 1].A
p = np.zeros((nAMLI, coarse_b.shape[0]), dtype=coarse_b.dtype)
beta = np.zeros((nAMLI, nAMLI), dtype=coarse_b.dtype)
for k in range(nAMLI):
# New search direction --> M^{-1}*residual
p[k, :] = 1
self.__solve(lvl + 1, p[k, :].reshape(coarse_b.shape),
coarse_b, cycle)
# Orthogonalize new search direction to old directions
for j in range(k): # loops from j = 0...(k-1)
beta[k, j] = np.inner(p[j, :].conj(), Ac * p[k, :]) /\
np.inner(p[j, :].conj(), Ac * p[j, :])
p[k, :] -= beta[k, j] * p[j, :]
# Compute step size
Ap = Ac * p[k, :]
alpha = np.inner(p[k, :].conj(), np.ravel(coarse_b)) /\
np.inner(p[k, :].conj(), Ap)
# Update solution
coarse_x += alpha * p[k, :].reshape(coarse_x.shape)
# Update residual
coarse_b -= alpha * Ap.reshape(coarse_b.shape)
else:
raise TypeError('Unrecognized cycle type (%s)' % cycle)
x += self.levels[lvl].P * coarse_x # coarse grid correction
self.levels[lvl].postsmoother(A, x, b) | [
"def",
"__solve",
"(",
"self",
",",
"lvl",
",",
"x",
",",
"b",
",",
"cycle",
")",
":",
"A",
"=",
"self",
".",
"levels",
"[",
"lvl",
"]",
".",
"A",
"self",
".",
"levels",
"[",
"lvl",
"]",
".",
"presmoother",
"(",
"A",
",",
"x",
",",
"b",
")"... | Multigrid cycling.
Parameters
----------
lvl : int
Solve problem on level `lvl`
x : numpy array
Initial guess `x` and return correction
b : numpy array
Right-hand side for Ax=b
cycle : {'V','W','F','AMLI'}
Recursively called cycling function. The
Defines the cycling used:
cycle = 'V', V-cycle
cycle = 'W', W-cycle
cycle = 'F', F-cycle
cycle = 'AMLI', AMLI-cycle | [
"Multigrid",
"cycling",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/multilevel.py#L485-L559 | train | 209,259 |
pyamg/pyamg | pyamg/graph.py | maximal_independent_set | def maximal_independent_set(G, algo='serial', k=None):
"""Compute a maximal independent vertex set for a graph.
Parameters
----------
G : sparse matrix
Symmetric matrix, preferably in sparse CSR or CSC format
The nonzeros of G represent the edges of an undirected graph.
algo : {'serial', 'parallel'}
Algorithm used to compute the MIS
* serial : greedy serial algorithm
* parallel : variant of Luby's parallel MIS algorithm
Returns
-------
S : array
S[i] = 1 if vertex i is in the MIS
S[i] = 0 otherwise
Notes
-----
Diagonal entries in the G (self loops) will be ignored.
Luby's algorithm is significantly more expensive than the
greedy serial algorithm.
"""
G = asgraph(G)
N = G.shape[0]
mis = np.empty(N, dtype='intc')
mis[:] = -1
if k is None:
if algo == 'serial':
fn = amg_core.maximal_independent_set_serial
fn(N, G.indptr, G.indices, -1, 1, 0, mis)
elif algo == 'parallel':
fn = amg_core.maximal_independent_set_parallel
fn(N, G.indptr, G.indices, -1, 1, 0, mis, sp.rand(N), -1)
else:
raise ValueError('unknown algorithm (%s)' % algo)
else:
fn = amg_core.maximal_independent_set_k_parallel
fn(N, G.indptr, G.indices, k, mis, sp.rand(N), -1)
return mis | python | def maximal_independent_set(G, algo='serial', k=None):
"""Compute a maximal independent vertex set for a graph.
Parameters
----------
G : sparse matrix
Symmetric matrix, preferably in sparse CSR or CSC format
The nonzeros of G represent the edges of an undirected graph.
algo : {'serial', 'parallel'}
Algorithm used to compute the MIS
* serial : greedy serial algorithm
* parallel : variant of Luby's parallel MIS algorithm
Returns
-------
S : array
S[i] = 1 if vertex i is in the MIS
S[i] = 0 otherwise
Notes
-----
Diagonal entries in the G (self loops) will be ignored.
Luby's algorithm is significantly more expensive than the
greedy serial algorithm.
"""
G = asgraph(G)
N = G.shape[0]
mis = np.empty(N, dtype='intc')
mis[:] = -1
if k is None:
if algo == 'serial':
fn = amg_core.maximal_independent_set_serial
fn(N, G.indptr, G.indices, -1, 1, 0, mis)
elif algo == 'parallel':
fn = amg_core.maximal_independent_set_parallel
fn(N, G.indptr, G.indices, -1, 1, 0, mis, sp.rand(N), -1)
else:
raise ValueError('unknown algorithm (%s)' % algo)
else:
fn = amg_core.maximal_independent_set_k_parallel
fn(N, G.indptr, G.indices, k, mis, sp.rand(N), -1)
return mis | [
"def",
"maximal_independent_set",
"(",
"G",
",",
"algo",
"=",
"'serial'",
",",
"k",
"=",
"None",
")",
":",
"G",
"=",
"asgraph",
"(",
"G",
")",
"N",
"=",
"G",
".",
"shape",
"[",
"0",
"]",
"mis",
"=",
"np",
".",
"empty",
"(",
"N",
",",
"dtype",
... | Compute a maximal independent vertex set for a graph.
Parameters
----------
G : sparse matrix
Symmetric matrix, preferably in sparse CSR or CSC format
The nonzeros of G represent the edges of an undirected graph.
algo : {'serial', 'parallel'}
Algorithm used to compute the MIS
* serial : greedy serial algorithm
* parallel : variant of Luby's parallel MIS algorithm
Returns
-------
S : array
S[i] = 1 if vertex i is in the MIS
S[i] = 0 otherwise
Notes
-----
Diagonal entries in the G (self loops) will be ignored.
Luby's algorithm is significantly more expensive than the
greedy serial algorithm. | [
"Compute",
"a",
"maximal",
"independent",
"vertex",
"set",
"for",
"a",
"graph",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/graph.py#L32-L78 | train | 209,260 |
pyamg/pyamg | pyamg/graph.py | vertex_coloring | def vertex_coloring(G, method='MIS'):
"""Compute a vertex coloring of a graph.
Parameters
----------
G : sparse matrix
Symmetric matrix, preferably in sparse CSR or CSC format
The nonzeros of G represent the edges of an undirected graph.
method : string
Algorithm used to compute the vertex coloring:
* 'MIS' - Maximal Independent Set
* 'JP' - Jones-Plassmann (parallel)
* 'LDF' - Largest-Degree-First (parallel)
Returns
-------
coloring : array
An array of vertex colors (integers beginning at 0)
Notes
-----
Diagonal entries in the G (self loops) will be ignored.
"""
G = asgraph(G)
N = G.shape[0]
coloring = np.empty(N, dtype='intc')
if method == 'MIS':
fn = amg_core.vertex_coloring_mis
fn(N, G.indptr, G.indices, coloring)
elif method == 'JP':
fn = amg_core.vertex_coloring_jones_plassmann
fn(N, G.indptr, G.indices, coloring, sp.rand(N))
elif method == 'LDF':
fn = amg_core.vertex_coloring_LDF
fn(N, G.indptr, G.indices, coloring, sp.rand(N))
else:
raise ValueError('unknown method (%s)' % method)
return coloring | python | def vertex_coloring(G, method='MIS'):
"""Compute a vertex coloring of a graph.
Parameters
----------
G : sparse matrix
Symmetric matrix, preferably in sparse CSR or CSC format
The nonzeros of G represent the edges of an undirected graph.
method : string
Algorithm used to compute the vertex coloring:
* 'MIS' - Maximal Independent Set
* 'JP' - Jones-Plassmann (parallel)
* 'LDF' - Largest-Degree-First (parallel)
Returns
-------
coloring : array
An array of vertex colors (integers beginning at 0)
Notes
-----
Diagonal entries in the G (self loops) will be ignored.
"""
G = asgraph(G)
N = G.shape[0]
coloring = np.empty(N, dtype='intc')
if method == 'MIS':
fn = amg_core.vertex_coloring_mis
fn(N, G.indptr, G.indices, coloring)
elif method == 'JP':
fn = amg_core.vertex_coloring_jones_plassmann
fn(N, G.indptr, G.indices, coloring, sp.rand(N))
elif method == 'LDF':
fn = amg_core.vertex_coloring_LDF
fn(N, G.indptr, G.indices, coloring, sp.rand(N))
else:
raise ValueError('unknown method (%s)' % method)
return coloring | [
"def",
"vertex_coloring",
"(",
"G",
",",
"method",
"=",
"'MIS'",
")",
":",
"G",
"=",
"asgraph",
"(",
"G",
")",
"N",
"=",
"G",
".",
"shape",
"[",
"0",
"]",
"coloring",
"=",
"np",
".",
"empty",
"(",
"N",
",",
"dtype",
"=",
"'intc'",
")",
"if",
... | Compute a vertex coloring of a graph.
Parameters
----------
G : sparse matrix
Symmetric matrix, preferably in sparse CSR or CSC format
The nonzeros of G represent the edges of an undirected graph.
method : string
Algorithm used to compute the vertex coloring:
* 'MIS' - Maximal Independent Set
* 'JP' - Jones-Plassmann (parallel)
* 'LDF' - Largest-Degree-First (parallel)
Returns
-------
coloring : array
An array of vertex colors (integers beginning at 0)
Notes
-----
Diagonal entries in the G (self loops) will be ignored. | [
"Compute",
"a",
"vertex",
"coloring",
"of",
"a",
"graph",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/graph.py#L81-L123 | train | 209,261 |
pyamg/pyamg | pyamg/graph.py | bellman_ford | def bellman_ford(G, seeds, maxiter=None):
"""Bellman-Ford iteration.
Parameters
----------
G : sparse matrix
Returns
-------
distances : array
nearest_seed : array
References
----------
CLR
"""
G = asgraph(G)
N = G.shape[0]
if maxiter is not None and maxiter < 0:
raise ValueError('maxiter must be positive')
if G.dtype == complex:
raise ValueError('Bellman-Ford algorithm only defined for real\
weights')
seeds = np.asarray(seeds, dtype='intc')
distances = np.empty(N, dtype=G.dtype)
distances[:] = max_value(G.dtype)
distances[seeds] = 0
nearest_seed = np.empty(N, dtype='intc')
nearest_seed[:] = -1
nearest_seed[seeds] = seeds
old_distances = np.empty_like(distances)
iter = 0
while maxiter is None or iter < maxiter:
old_distances[:] = distances
amg_core.bellman_ford(N, G.indptr, G.indices, G.data, distances,
nearest_seed)
if (old_distances == distances).all():
break
return (distances, nearest_seed) | python | def bellman_ford(G, seeds, maxiter=None):
"""Bellman-Ford iteration.
Parameters
----------
G : sparse matrix
Returns
-------
distances : array
nearest_seed : array
References
----------
CLR
"""
G = asgraph(G)
N = G.shape[0]
if maxiter is not None and maxiter < 0:
raise ValueError('maxiter must be positive')
if G.dtype == complex:
raise ValueError('Bellman-Ford algorithm only defined for real\
weights')
seeds = np.asarray(seeds, dtype='intc')
distances = np.empty(N, dtype=G.dtype)
distances[:] = max_value(G.dtype)
distances[seeds] = 0
nearest_seed = np.empty(N, dtype='intc')
nearest_seed[:] = -1
nearest_seed[seeds] = seeds
old_distances = np.empty_like(distances)
iter = 0
while maxiter is None or iter < maxiter:
old_distances[:] = distances
amg_core.bellman_ford(N, G.indptr, G.indices, G.data, distances,
nearest_seed)
if (old_distances == distances).all():
break
return (distances, nearest_seed) | [
"def",
"bellman_ford",
"(",
"G",
",",
"seeds",
",",
"maxiter",
"=",
"None",
")",
":",
"G",
"=",
"asgraph",
"(",
"G",
")",
"N",
"=",
"G",
".",
"shape",
"[",
"0",
"]",
"if",
"maxiter",
"is",
"not",
"None",
"and",
"maxiter",
"<",
"0",
":",
"raise"... | Bellman-Ford iteration.
Parameters
----------
G : sparse matrix
Returns
-------
distances : array
nearest_seed : array
References
----------
CLR | [
"Bellman",
"-",
"Ford",
"iteration",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/graph.py#L126-L174 | train | 209,262 |
pyamg/pyamg | pyamg/graph.py | lloyd_cluster | def lloyd_cluster(G, seeds, maxiter=10):
"""Perform Lloyd clustering on graph with weighted edges.
Parameters
----------
G : csr_matrix, csc_matrix
A sparse NxN matrix where each nonzero entry G[i,j] is the distance
between nodes i and j.
seeds : int array
If seeds is an integer, then its value determines the number of
clusters. Otherwise, seeds is an array of unique integers between 0
and N-1 that will be used as the initial seeds for clustering.
maxiter : int
The maximum number of iterations to perform.
Returns
-------
distances : array
final distances
clusters : int array
id of each cluster of points
seeds : int array
index of each seed
Notes
-----
If G has complex values, abs(G) is used instead.
"""
G = asgraph(G)
N = G.shape[0]
if G.dtype.kind == 'c':
# complex dtype
G = np.abs(G)
# interpret seeds argument
if np.isscalar(seeds):
seeds = np.random.permutation(N)[:seeds]
seeds = seeds.astype('intc')
else:
seeds = np.array(seeds, dtype='intc')
if len(seeds) < 1:
raise ValueError('at least one seed is required')
if seeds.min() < 0:
raise ValueError('invalid seed index (%d)' % seeds.min())
if seeds.max() >= N:
raise ValueError('invalid seed index (%d)' % seeds.max())
clusters = np.empty(N, dtype='intc')
distances = np.empty(N, dtype=G.dtype)
for i in range(maxiter):
last_seeds = seeds.copy()
amg_core.lloyd_cluster(N, G.indptr, G.indices, G.data,
len(seeds), distances, clusters, seeds)
if (seeds == last_seeds).all():
break
return (distances, clusters, seeds) | python | def lloyd_cluster(G, seeds, maxiter=10):
"""Perform Lloyd clustering on graph with weighted edges.
Parameters
----------
G : csr_matrix, csc_matrix
A sparse NxN matrix where each nonzero entry G[i,j] is the distance
between nodes i and j.
seeds : int array
If seeds is an integer, then its value determines the number of
clusters. Otherwise, seeds is an array of unique integers between 0
and N-1 that will be used as the initial seeds for clustering.
maxiter : int
The maximum number of iterations to perform.
Returns
-------
distances : array
final distances
clusters : int array
id of each cluster of points
seeds : int array
index of each seed
Notes
-----
If G has complex values, abs(G) is used instead.
"""
G = asgraph(G)
N = G.shape[0]
if G.dtype.kind == 'c':
# complex dtype
G = np.abs(G)
# interpret seeds argument
if np.isscalar(seeds):
seeds = np.random.permutation(N)[:seeds]
seeds = seeds.astype('intc')
else:
seeds = np.array(seeds, dtype='intc')
if len(seeds) < 1:
raise ValueError('at least one seed is required')
if seeds.min() < 0:
raise ValueError('invalid seed index (%d)' % seeds.min())
if seeds.max() >= N:
raise ValueError('invalid seed index (%d)' % seeds.max())
clusters = np.empty(N, dtype='intc')
distances = np.empty(N, dtype=G.dtype)
for i in range(maxiter):
last_seeds = seeds.copy()
amg_core.lloyd_cluster(N, G.indptr, G.indices, G.data,
len(seeds), distances, clusters, seeds)
if (seeds == last_seeds).all():
break
return (distances, clusters, seeds) | [
"def",
"lloyd_cluster",
"(",
"G",
",",
"seeds",
",",
"maxiter",
"=",
"10",
")",
":",
"G",
"=",
"asgraph",
"(",
"G",
")",
"N",
"=",
"G",
".",
"shape",
"[",
"0",
"]",
"if",
"G",
".",
"dtype",
".",
"kind",
"==",
"'c'",
":",
"# complex dtype",
"G",... | Perform Lloyd clustering on graph with weighted edges.
Parameters
----------
G : csr_matrix, csc_matrix
A sparse NxN matrix where each nonzero entry G[i,j] is the distance
between nodes i and j.
seeds : int array
If seeds is an integer, then its value determines the number of
clusters. Otherwise, seeds is an array of unique integers between 0
and N-1 that will be used as the initial seeds for clustering.
maxiter : int
The maximum number of iterations to perform.
Returns
-------
distances : array
final distances
clusters : int array
id of each cluster of points
seeds : int array
index of each seed
Notes
-----
If G has complex values, abs(G) is used instead. | [
"Perform",
"Lloyd",
"clustering",
"on",
"graph",
"with",
"weighted",
"edges",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/graph.py#L177-L240 | train | 209,263 |
pyamg/pyamg | pyamg/graph.py | breadth_first_search | def breadth_first_search(G, seed):
"""Breadth First search of a graph.
Parameters
----------
G : csr_matrix, csc_matrix
A sparse NxN matrix where each nonzero entry G[i,j] is the distance
between nodes i and j.
seed : int
Index of the seed location
Returns
-------
order : int array
Breadth first order
level : int array
Final levels
Examples
--------
0---2
| /
| /
1---4---7---8---9
| /| /
| / | /
3/ 6/
|
|
5
>>> import numpy as np
>>> import pyamg
>>> import scipy.sparse as sparse
>>> edges = np.array([[0,1],[0,2],[1,2],[1,3],[1,4],[3,4],[3,5],
[4,6], [4,7], [6,7], [7,8], [8,9]])
>>> N = np.max(edges.ravel())+1
>>> data = np.ones((edges.shape[0],))
>>> A = sparse.coo_matrix((data, (edges[:,0], edges[:,1])), shape=(N,N))
>>> c, l = pyamg.graph.breadth_first_search(A, 0)
>>> print(l)
>>> print(c)
[0 1 1 2 2 3 3 3 4 5]
[0 1 2 3 4 5 6 7 8 9]
"""
G = asgraph(G)
N = G.shape[0]
order = np.empty(N, G.indptr.dtype)
level = np.empty(N, G.indptr.dtype)
level[:] = -1
BFS = amg_core.breadth_first_search
BFS(G.indptr, G.indices, int(seed), order, level)
return order, level | python | def breadth_first_search(G, seed):
"""Breadth First search of a graph.
Parameters
----------
G : csr_matrix, csc_matrix
A sparse NxN matrix where each nonzero entry G[i,j] is the distance
between nodes i and j.
seed : int
Index of the seed location
Returns
-------
order : int array
Breadth first order
level : int array
Final levels
Examples
--------
0---2
| /
| /
1---4---7---8---9
| /| /
| / | /
3/ 6/
|
|
5
>>> import numpy as np
>>> import pyamg
>>> import scipy.sparse as sparse
>>> edges = np.array([[0,1],[0,2],[1,2],[1,3],[1,4],[3,4],[3,5],
[4,6], [4,7], [6,7], [7,8], [8,9]])
>>> N = np.max(edges.ravel())+1
>>> data = np.ones((edges.shape[0],))
>>> A = sparse.coo_matrix((data, (edges[:,0], edges[:,1])), shape=(N,N))
>>> c, l = pyamg.graph.breadth_first_search(A, 0)
>>> print(l)
>>> print(c)
[0 1 1 2 2 3 3 3 4 5]
[0 1 2 3 4 5 6 7 8 9]
"""
G = asgraph(G)
N = G.shape[0]
order = np.empty(N, G.indptr.dtype)
level = np.empty(N, G.indptr.dtype)
level[:] = -1
BFS = amg_core.breadth_first_search
BFS(G.indptr, G.indices, int(seed), order, level)
return order, level | [
"def",
"breadth_first_search",
"(",
"G",
",",
"seed",
")",
":",
"G",
"=",
"asgraph",
"(",
"G",
")",
"N",
"=",
"G",
".",
"shape",
"[",
"0",
"]",
"order",
"=",
"np",
".",
"empty",
"(",
"N",
",",
"G",
".",
"indptr",
".",
"dtype",
")",
"level",
"... | Breadth First search of a graph.
Parameters
----------
G : csr_matrix, csc_matrix
A sparse NxN matrix where each nonzero entry G[i,j] is the distance
between nodes i and j.
seed : int
Index of the seed location
Returns
-------
order : int array
Breadth first order
level : int array
Final levels
Examples
--------
0---2
| /
| /
1---4---7---8---9
| /| /
| / | /
3/ 6/
|
|
5
>>> import numpy as np
>>> import pyamg
>>> import scipy.sparse as sparse
>>> edges = np.array([[0,1],[0,2],[1,2],[1,3],[1,4],[3,4],[3,5],
[4,6], [4,7], [6,7], [7,8], [8,9]])
>>> N = np.max(edges.ravel())+1
>>> data = np.ones((edges.shape[0],))
>>> A = sparse.coo_matrix((data, (edges[:,0], edges[:,1])), shape=(N,N))
>>> c, l = pyamg.graph.breadth_first_search(A, 0)
>>> print(l)
>>> print(c)
[0 1 1 2 2 3 3 3 4 5]
[0 1 2 3 4 5 6 7 8 9] | [
"Breadth",
"First",
"search",
"of",
"a",
"graph",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/graph.py#L243-L298 | train | 209,264 |
pyamg/pyamg | pyamg/graph.py | connected_components | def connected_components(G):
"""Compute the connected components of a graph.
The connected components of a graph G, which is represented by a
symmetric sparse matrix, are labeled with the integers 0,1,..(K-1) where
K is the number of components.
Parameters
----------
G : symmetric matrix, preferably in sparse CSR or CSC format
The nonzeros of G represent the edges of an undirected graph.
Returns
-------
components : ndarray
An array of component labels for each vertex of the graph.
Notes
-----
If the nonzero structure of G is not symmetric, then the
result is undefined.
Examples
--------
>>> from pyamg.graph import connected_components
>>> print connected_components( [[0,1,0],[1,0,1],[0,1,0]] )
[0 0 0]
>>> print connected_components( [[0,1,0],[1,0,0],[0,0,0]] )
[0 0 1]
>>> print connected_components( [[0,0,0],[0,0,0],[0,0,0]] )
[0 1 2]
>>> print connected_components( [[0,1,0,0],[1,0,0,0],[0,0,0,1],[0,0,1,0]] )
[0 0 1 1]
"""
G = asgraph(G)
N = G.shape[0]
components = np.empty(N, G.indptr.dtype)
fn = amg_core.connected_components
fn(N, G.indptr, G.indices, components)
return components | python | def connected_components(G):
"""Compute the connected components of a graph.
The connected components of a graph G, which is represented by a
symmetric sparse matrix, are labeled with the integers 0,1,..(K-1) where
K is the number of components.
Parameters
----------
G : symmetric matrix, preferably in sparse CSR or CSC format
The nonzeros of G represent the edges of an undirected graph.
Returns
-------
components : ndarray
An array of component labels for each vertex of the graph.
Notes
-----
If the nonzero structure of G is not symmetric, then the
result is undefined.
Examples
--------
>>> from pyamg.graph import connected_components
>>> print connected_components( [[0,1,0],[1,0,1],[0,1,0]] )
[0 0 0]
>>> print connected_components( [[0,1,0],[1,0,0],[0,0,0]] )
[0 0 1]
>>> print connected_components( [[0,0,0],[0,0,0],[0,0,0]] )
[0 1 2]
>>> print connected_components( [[0,1,0,0],[1,0,0,0],[0,0,0,1],[0,0,1,0]] )
[0 0 1 1]
"""
G = asgraph(G)
N = G.shape[0]
components = np.empty(N, G.indptr.dtype)
fn = amg_core.connected_components
fn(N, G.indptr, G.indices, components)
return components | [
"def",
"connected_components",
"(",
"G",
")",
":",
"G",
"=",
"asgraph",
"(",
"G",
")",
"N",
"=",
"G",
".",
"shape",
"[",
"0",
"]",
"components",
"=",
"np",
".",
"empty",
"(",
"N",
",",
"G",
".",
"indptr",
".",
"dtype",
")",
"fn",
"=",
"amg_core... | Compute the connected components of a graph.
The connected components of a graph G, which is represented by a
symmetric sparse matrix, are labeled with the integers 0,1,..(K-1) where
K is the number of components.
Parameters
----------
G : symmetric matrix, preferably in sparse CSR or CSC format
The nonzeros of G represent the edges of an undirected graph.
Returns
-------
components : ndarray
An array of component labels for each vertex of the graph.
Notes
-----
If the nonzero structure of G is not symmetric, then the
result is undefined.
Examples
--------
>>> from pyamg.graph import connected_components
>>> print connected_components( [[0,1,0],[1,0,1],[0,1,0]] )
[0 0 0]
>>> print connected_components( [[0,1,0],[1,0,0],[0,0,0]] )
[0 0 1]
>>> print connected_components( [[0,0,0],[0,0,0],[0,0,0]] )
[0 1 2]
>>> print connected_components( [[0,1,0,0],[1,0,0,0],[0,0,0,1],[0,0,1,0]] )
[0 0 1 1] | [
"Compute",
"the",
"connected",
"components",
"of",
"a",
"graph",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/graph.py#L301-L344 | train | 209,265 |
pyamg/pyamg | pyamg/graph.py | symmetric_rcm | def symmetric_rcm(A):
"""Symmetric Reverse Cutthill-McKee.
Parameters
----------
A : sparse matrix
Sparse matrix
Returns
-------
B : sparse matrix
Permuted matrix with reordering
Notes
-----
Get a pseudo-peripheral node, then call BFS
Examples
--------
>>> from pyamg import gallery
>>> from pyamg.graph import symmetric_rcm
>>> n = 200
>>> density = 1.0/n
>>> A = gallery.sprand(n, n, density, format='csr')
>>> S = A + A.T
>>> # try the visualizations
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(121)
>>> plt.spy(S,marker='.')
>>> plt.subplot(122)
>>> plt.spy(symmetric_rcm(S),marker='.')
See Also
--------
pseudo_peripheral_node
"""
n = A.shape[0]
root, order, level = pseudo_peripheral_node(A)
Perm = sparse.identity(n, format='csr')
p = level.argsort()
Perm = Perm[p, :]
return Perm * A * Perm.T | python | def symmetric_rcm(A):
"""Symmetric Reverse Cutthill-McKee.
Parameters
----------
A : sparse matrix
Sparse matrix
Returns
-------
B : sparse matrix
Permuted matrix with reordering
Notes
-----
Get a pseudo-peripheral node, then call BFS
Examples
--------
>>> from pyamg import gallery
>>> from pyamg.graph import symmetric_rcm
>>> n = 200
>>> density = 1.0/n
>>> A = gallery.sprand(n, n, density, format='csr')
>>> S = A + A.T
>>> # try the visualizations
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(121)
>>> plt.spy(S,marker='.')
>>> plt.subplot(122)
>>> plt.spy(symmetric_rcm(S),marker='.')
See Also
--------
pseudo_peripheral_node
"""
n = A.shape[0]
root, order, level = pseudo_peripheral_node(A)
Perm = sparse.identity(n, format='csr')
p = level.argsort()
Perm = Perm[p, :]
return Perm * A * Perm.T | [
"def",
"symmetric_rcm",
"(",
"A",
")",
":",
"n",
"=",
"A",
".",
"shape",
"[",
"0",
"]",
"root",
",",
"order",
",",
"level",
"=",
"pseudo_peripheral_node",
"(",
"A",
")",
"Perm",
"=",
"sparse",
".",
"identity",
"(",
"n",
",",
"format",
"=",
"'csr'",... | Symmetric Reverse Cutthill-McKee.
Parameters
----------
A : sparse matrix
Sparse matrix
Returns
-------
B : sparse matrix
Permuted matrix with reordering
Notes
-----
Get a pseudo-peripheral node, then call BFS
Examples
--------
>>> from pyamg import gallery
>>> from pyamg.graph import symmetric_rcm
>>> n = 200
>>> density = 1.0/n
>>> A = gallery.sprand(n, n, density, format='csr')
>>> S = A + A.T
>>> # try the visualizations
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(121)
>>> plt.spy(S,marker='.')
>>> plt.subplot(122)
>>> plt.spy(symmetric_rcm(S),marker='.')
See Also
--------
pseudo_peripheral_node | [
"Symmetric",
"Reverse",
"Cutthill",
"-",
"McKee",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/graph.py#L347-L393 | train | 209,266 |
pyamg/pyamg | pyamg/graph.py | pseudo_peripheral_node | def pseudo_peripheral_node(A):
"""Find a pseudo peripheral node.
Parameters
----------
A : sparse matrix
Sparse matrix
Returns
-------
x : int
Locaiton of the node
order : array
BFS ordering
level : array
BFS levels
Notes
-----
Algorithm in Saad
"""
from pyamg.graph import breadth_first_search
n = A.shape[0]
valence = np.diff(A.indptr)
# select an initial node x, set delta = 0
x = int(np.random.rand() * n)
delta = 0
while True:
# do a level-set traversal from x
order, level = breadth_first_search(A, x)
# select a node y in the last level with min degree
maxlevel = level.max()
lastnodes = np.where(level == maxlevel)[0]
lastnodesvalence = valence[lastnodes]
minlastnodesvalence = lastnodesvalence.min()
y = np.where(lastnodesvalence == minlastnodesvalence)[0][0]
y = lastnodes[y]
# if d(x,y)>delta, set, and go to bfs above
if level[y] > delta:
x = y
delta = level[y]
else:
return x, order, level | python | def pseudo_peripheral_node(A):
"""Find a pseudo peripheral node.
Parameters
----------
A : sparse matrix
Sparse matrix
Returns
-------
x : int
Locaiton of the node
order : array
BFS ordering
level : array
BFS levels
Notes
-----
Algorithm in Saad
"""
from pyamg.graph import breadth_first_search
n = A.shape[0]
valence = np.diff(A.indptr)
# select an initial node x, set delta = 0
x = int(np.random.rand() * n)
delta = 0
while True:
# do a level-set traversal from x
order, level = breadth_first_search(A, x)
# select a node y in the last level with min degree
maxlevel = level.max()
lastnodes = np.where(level == maxlevel)[0]
lastnodesvalence = valence[lastnodes]
minlastnodesvalence = lastnodesvalence.min()
y = np.where(lastnodesvalence == minlastnodesvalence)[0][0]
y = lastnodes[y]
# if d(x,y)>delta, set, and go to bfs above
if level[y] > delta:
x = y
delta = level[y]
else:
return x, order, level | [
"def",
"pseudo_peripheral_node",
"(",
"A",
")",
":",
"from",
"pyamg",
".",
"graph",
"import",
"breadth_first_search",
"n",
"=",
"A",
".",
"shape",
"[",
"0",
"]",
"valence",
"=",
"np",
".",
"diff",
"(",
"A",
".",
"indptr",
")",
"# select an initial node x, ... | Find a pseudo peripheral node.
Parameters
----------
A : sparse matrix
Sparse matrix
Returns
-------
x : int
Locaiton of the node
order : array
BFS ordering
level : array
BFS levels
Notes
-----
Algorithm in Saad | [
"Find",
"a",
"pseudo",
"peripheral",
"node",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/graph.py#L396-L444 | train | 209,267 |
pyamg/pyamg | pyamg/util/utils.py | profile_solver | def profile_solver(ml, accel=None, **kwargs):
"""Profile a particular multilevel object.
Parameters
----------
ml : multilevel
Fully constructed multilevel object
accel : function pointer
Pointer to a valid Krylov solver (e.g. gmres, cg)
Returns
-------
residuals : array
Array of residuals for each iteration
See Also
--------
multilevel.psolve, multilevel.solve
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import spdiags, csr_matrix
>>> from scipy.sparse.linalg import cg
>>> from pyamg.classical import ruge_stuben_solver
>>> from pyamg.util.utils import profile_solver
>>> n=100
>>> e = np.ones((n,1)).ravel()
>>> data = [ -1*e, 2*e, -1*e ]
>>> A = csr_matrix(spdiags(data,[-1,0,1],n,n))
>>> b = A*np.ones(A.shape[0])
>>> ml = ruge_stuben_solver(A, max_coarse=10)
>>> res = profile_solver(ml,accel=cg)
"""
A = ml.levels[0].A
b = A * sp.rand(A.shape[0], 1)
residuals = []
if accel is None:
ml.solve(b, residuals=residuals, **kwargs)
else:
def callback(x):
residuals.append(norm(np.ravel(b) - np.ravel(A*x)))
M = ml.aspreconditioner(cycle=kwargs.get('cycle', 'V'))
accel(A, b, M=M, callback=callback, **kwargs)
return np.asarray(residuals) | python | def profile_solver(ml, accel=None, **kwargs):
"""Profile a particular multilevel object.
Parameters
----------
ml : multilevel
Fully constructed multilevel object
accel : function pointer
Pointer to a valid Krylov solver (e.g. gmres, cg)
Returns
-------
residuals : array
Array of residuals for each iteration
See Also
--------
multilevel.psolve, multilevel.solve
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import spdiags, csr_matrix
>>> from scipy.sparse.linalg import cg
>>> from pyamg.classical import ruge_stuben_solver
>>> from pyamg.util.utils import profile_solver
>>> n=100
>>> e = np.ones((n,1)).ravel()
>>> data = [ -1*e, 2*e, -1*e ]
>>> A = csr_matrix(spdiags(data,[-1,0,1],n,n))
>>> b = A*np.ones(A.shape[0])
>>> ml = ruge_stuben_solver(A, max_coarse=10)
>>> res = profile_solver(ml,accel=cg)
"""
A = ml.levels[0].A
b = A * sp.rand(A.shape[0], 1)
residuals = []
if accel is None:
ml.solve(b, residuals=residuals, **kwargs)
else:
def callback(x):
residuals.append(norm(np.ravel(b) - np.ravel(A*x)))
M = ml.aspreconditioner(cycle=kwargs.get('cycle', 'V'))
accel(A, b, M=M, callback=callback, **kwargs)
return np.asarray(residuals) | [
"def",
"profile_solver",
"(",
"ml",
",",
"accel",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"A",
"=",
"ml",
".",
"levels",
"[",
"0",
"]",
".",
"A",
"b",
"=",
"A",
"*",
"sp",
".",
"rand",
"(",
"A",
".",
"shape",
"[",
"0",
"]",
",",
"... | Profile a particular multilevel object.
Parameters
----------
ml : multilevel
Fully constructed multilevel object
accel : function pointer
Pointer to a valid Krylov solver (e.g. gmres, cg)
Returns
-------
residuals : array
Array of residuals for each iteration
See Also
--------
multilevel.psolve, multilevel.solve
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import spdiags, csr_matrix
>>> from scipy.sparse.linalg import cg
>>> from pyamg.classical import ruge_stuben_solver
>>> from pyamg.util.utils import profile_solver
>>> n=100
>>> e = np.ones((n,1)).ravel()
>>> data = [ -1*e, 2*e, -1*e ]
>>> A = csr_matrix(spdiags(data,[-1,0,1],n,n))
>>> b = A*np.ones(A.shape[0])
>>> ml = ruge_stuben_solver(A, max_coarse=10)
>>> res = profile_solver(ml,accel=cg) | [
"Profile",
"a",
"particular",
"multilevel",
"object",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/utils.py#L42-L89 | train | 209,268 |
pyamg/pyamg | pyamg/util/utils.py | diag_sparse | def diag_sparse(A):
"""Return a diagonal.
If A is a sparse matrix (e.g. csr_matrix or csc_matrix)
- return the diagonal of A as an array
Otherwise
- return a csr_matrix with A on the diagonal
Parameters
----------
A : sparse matrix or 1d array
General sparse matrix or array of diagonal entries
Returns
-------
B : array or sparse matrix
Diagonal sparse is returned as csr if A is dense otherwise return an
array of the diagonal
Examples
--------
>>> import numpy as np
>>> from pyamg.util.utils import diag_sparse
>>> d = 2.0*np.ones((3,)).ravel()
>>> print diag_sparse(d).todense()
[[ 2. 0. 0.]
[ 0. 2. 0.]
[ 0. 0. 2.]]
"""
if isspmatrix(A):
return A.diagonal()
else:
if(np.ndim(A) != 1):
raise ValueError('input diagonal array expected to be 1d')
return csr_matrix((np.asarray(A), np.arange(len(A)),
np.arange(len(A)+1)), (len(A), len(A))) | python | def diag_sparse(A):
"""Return a diagonal.
If A is a sparse matrix (e.g. csr_matrix or csc_matrix)
- return the diagonal of A as an array
Otherwise
- return a csr_matrix with A on the diagonal
Parameters
----------
A : sparse matrix or 1d array
General sparse matrix or array of diagonal entries
Returns
-------
B : array or sparse matrix
Diagonal sparse is returned as csr if A is dense otherwise return an
array of the diagonal
Examples
--------
>>> import numpy as np
>>> from pyamg.util.utils import diag_sparse
>>> d = 2.0*np.ones((3,)).ravel()
>>> print diag_sparse(d).todense()
[[ 2. 0. 0.]
[ 0. 2. 0.]
[ 0. 0. 2.]]
"""
if isspmatrix(A):
return A.diagonal()
else:
if(np.ndim(A) != 1):
raise ValueError('input diagonal array expected to be 1d')
return csr_matrix((np.asarray(A), np.arange(len(A)),
np.arange(len(A)+1)), (len(A), len(A))) | [
"def",
"diag_sparse",
"(",
"A",
")",
":",
"if",
"isspmatrix",
"(",
"A",
")",
":",
"return",
"A",
".",
"diagonal",
"(",
")",
"else",
":",
"if",
"(",
"np",
".",
"ndim",
"(",
"A",
")",
"!=",
"1",
")",
":",
"raise",
"ValueError",
"(",
"'input diagona... | Return a diagonal.
If A is a sparse matrix (e.g. csr_matrix or csc_matrix)
- return the diagonal of A as an array
Otherwise
- return a csr_matrix with A on the diagonal
Parameters
----------
A : sparse matrix or 1d array
General sparse matrix or array of diagonal entries
Returns
-------
B : array or sparse matrix
Diagonal sparse is returned as csr if A is dense otherwise return an
array of the diagonal
Examples
--------
>>> import numpy as np
>>> from pyamg.util.utils import diag_sparse
>>> d = 2.0*np.ones((3,)).ravel()
>>> print diag_sparse(d).todense()
[[ 2. 0. 0.]
[ 0. 2. 0.]
[ 0. 0. 2.]] | [
"Return",
"a",
"diagonal",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/utils.py#L92-L129 | train | 209,269 |
pyamg/pyamg | pyamg/util/utils.py | scale_rows | def scale_rows(A, v, copy=True):
"""Scale the sparse rows of a matrix.
Parameters
----------
A : sparse matrix
Sparse matrix with M rows
v : array_like
Array of M scales
copy : {True,False}
- If copy=True, then the matrix is copied to a new and different return
matrix (e.g. B=scale_rows(A,v))
- If copy=False, then the matrix is overwritten deeply (e.g.
scale_rows(A,v,copy=False) overwrites A)
Returns
-------
A : sparse matrix
Scaled sparse matrix in original format
See Also
--------
scipy.sparse._sparsetools.csr_scale_rows, scale_columns
Notes
-----
- if A is a csc_matrix, the transpose A.T is passed to scale_columns
- if A is not csr, csc, or bsr, it is converted to csr and sent
to scale_rows
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import spdiags
>>> from pyamg.util.utils import scale_rows
>>> n=5
>>> e = np.ones((n,1)).ravel()
>>> data = [ -1*e, 2*e, -1*e ]
>>> A = spdiags(data,[-1,0,1],n,n-1).tocsr()
>>> B = scale_rows(A,5*np.ones((A.shape[0],1)))
"""
v = np.ravel(v)
M, N = A.shape
if not isspmatrix(A):
raise ValueError('scale rows needs a sparse matrix')
if M != len(v):
raise ValueError('scale vector has incompatible shape')
if copy:
A = A.copy()
A.data = np.asarray(A.data, dtype=upcast(A.dtype, v.dtype))
else:
v = np.asarray(v, dtype=A.dtype)
if isspmatrix_csr(A):
csr_scale_rows(M, N, A.indptr, A.indices, A.data, v)
elif isspmatrix_bsr(A):
R, C = A.blocksize
bsr_scale_rows(int(M/R), int(N/C), R, C, A.indptr, A.indices,
np.ravel(A.data), v)
elif isspmatrix_csc(A):
pyamg.amg_core.csc_scale_rows(M, N, A.indptr, A.indices, A.data, v)
else:
fmt = A.format
A = scale_rows(csr_matrix(A), v).asformat(fmt)
return A | python | def scale_rows(A, v, copy=True):
"""Scale the sparse rows of a matrix.
Parameters
----------
A : sparse matrix
Sparse matrix with M rows
v : array_like
Array of M scales
copy : {True,False}
- If copy=True, then the matrix is copied to a new and different return
matrix (e.g. B=scale_rows(A,v))
- If copy=False, then the matrix is overwritten deeply (e.g.
scale_rows(A,v,copy=False) overwrites A)
Returns
-------
A : sparse matrix
Scaled sparse matrix in original format
See Also
--------
scipy.sparse._sparsetools.csr_scale_rows, scale_columns
Notes
-----
- if A is a csc_matrix, the transpose A.T is passed to scale_columns
- if A is not csr, csc, or bsr, it is converted to csr and sent
to scale_rows
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import spdiags
>>> from pyamg.util.utils import scale_rows
>>> n=5
>>> e = np.ones((n,1)).ravel()
>>> data = [ -1*e, 2*e, -1*e ]
>>> A = spdiags(data,[-1,0,1],n,n-1).tocsr()
>>> B = scale_rows(A,5*np.ones((A.shape[0],1)))
"""
v = np.ravel(v)
M, N = A.shape
if not isspmatrix(A):
raise ValueError('scale rows needs a sparse matrix')
if M != len(v):
raise ValueError('scale vector has incompatible shape')
if copy:
A = A.copy()
A.data = np.asarray(A.data, dtype=upcast(A.dtype, v.dtype))
else:
v = np.asarray(v, dtype=A.dtype)
if isspmatrix_csr(A):
csr_scale_rows(M, N, A.indptr, A.indices, A.data, v)
elif isspmatrix_bsr(A):
R, C = A.blocksize
bsr_scale_rows(int(M/R), int(N/C), R, C, A.indptr, A.indices,
np.ravel(A.data), v)
elif isspmatrix_csc(A):
pyamg.amg_core.csc_scale_rows(M, N, A.indptr, A.indices, A.data, v)
else:
fmt = A.format
A = scale_rows(csr_matrix(A), v).asformat(fmt)
return A | [
"def",
"scale_rows",
"(",
"A",
",",
"v",
",",
"copy",
"=",
"True",
")",
":",
"v",
"=",
"np",
".",
"ravel",
"(",
"v",
")",
"M",
",",
"N",
"=",
"A",
".",
"shape",
"if",
"not",
"isspmatrix",
"(",
"A",
")",
":",
"raise",
"ValueError",
"(",
"'scal... | Scale the sparse rows of a matrix.
Parameters
----------
A : sparse matrix
Sparse matrix with M rows
v : array_like
Array of M scales
copy : {True,False}
- If copy=True, then the matrix is copied to a new and different return
matrix (e.g. B=scale_rows(A,v))
- If copy=False, then the matrix is overwritten deeply (e.g.
scale_rows(A,v,copy=False) overwrites A)
Returns
-------
A : sparse matrix
Scaled sparse matrix in original format
See Also
--------
scipy.sparse._sparsetools.csr_scale_rows, scale_columns
Notes
-----
- if A is a csc_matrix, the transpose A.T is passed to scale_columns
- if A is not csr, csc, or bsr, it is converted to csr and sent
to scale_rows
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import spdiags
>>> from pyamg.util.utils import scale_rows
>>> n=5
>>> e = np.ones((n,1)).ravel()
>>> data = [ -1*e, 2*e, -1*e ]
>>> A = spdiags(data,[-1,0,1],n,n-1).tocsr()
>>> B = scale_rows(A,5*np.ones((A.shape[0],1))) | [
"Scale",
"the",
"sparse",
"rows",
"of",
"a",
"matrix",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/utils.py#L132-L202 | train | 209,270 |
pyamg/pyamg | pyamg/util/utils.py | scale_columns | def scale_columns(A, v, copy=True):
"""Scale the sparse columns of a matrix.
Parameters
----------
A : sparse matrix
Sparse matrix with N rows
v : array_like
Array of N scales
copy : {True,False}
- If copy=True, then the matrix is copied to a new and different return
matrix (e.g. B=scale_columns(A,v))
- If copy=False, then the matrix is overwritten deeply (e.g.
scale_columns(A,v,copy=False) overwrites A)
Returns
-------
A : sparse matrix
Scaled sparse matrix in original format
See Also
--------
scipy.sparse._sparsetools.csr_scale_columns, scale_rows
Notes
-----
- if A is a csc_matrix, the transpose A.T is passed to scale_rows
- if A is not csr, csc, or bsr, it is converted to csr and sent to
scale_rows
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import spdiags
>>> from pyamg.util.utils import scale_columns
>>> n=5
>>> e = np.ones((n,1)).ravel()
>>> data = [ -1*e, 2*e, -1*e ]
>>> A = spdiags(data,[-1,0,1],n,n-1).tocsr()
>>> print scale_columns(A,5*np.ones((A.shape[1],1))).todense()
[[ 10. -5. 0. 0.]
[ -5. 10. -5. 0.]
[ 0. -5. 10. -5.]
[ 0. 0. -5. 10.]
[ 0. 0. 0. -5.]]
"""
v = np.ravel(v)
M, N = A.shape
if not isspmatrix(A):
raise ValueError('scale columns needs a sparse matrix')
if N != len(v):
raise ValueError('scale vector has incompatible shape')
if copy:
A = A.copy()
A.data = np.asarray(A.data, dtype=upcast(A.dtype, v.dtype))
else:
v = np.asarray(v, dtype=A.dtype)
if isspmatrix_csr(A):
csr_scale_columns(M, N, A.indptr, A.indices, A.data, v)
elif isspmatrix_bsr(A):
R, C = A.blocksize
bsr_scale_columns(int(M/R), int(N/C), R, C, A.indptr, A.indices,
np.ravel(A.data), v)
elif isspmatrix_csc(A):
pyamg.amg_core.csc_scale_columns(M, N, A.indptr, A.indices, A.data, v)
else:
fmt = A.format
A = scale_columns(csr_matrix(A), v).asformat(fmt)
return A | python | def scale_columns(A, v, copy=True):
"""Scale the sparse columns of a matrix.
Parameters
----------
A : sparse matrix
Sparse matrix with N rows
v : array_like
Array of N scales
copy : {True,False}
- If copy=True, then the matrix is copied to a new and different return
matrix (e.g. B=scale_columns(A,v))
- If copy=False, then the matrix is overwritten deeply (e.g.
scale_columns(A,v,copy=False) overwrites A)
Returns
-------
A : sparse matrix
Scaled sparse matrix in original format
See Also
--------
scipy.sparse._sparsetools.csr_scale_columns, scale_rows
Notes
-----
- if A is a csc_matrix, the transpose A.T is passed to scale_rows
- if A is not csr, csc, or bsr, it is converted to csr and sent to
scale_rows
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import spdiags
>>> from pyamg.util.utils import scale_columns
>>> n=5
>>> e = np.ones((n,1)).ravel()
>>> data = [ -1*e, 2*e, -1*e ]
>>> A = spdiags(data,[-1,0,1],n,n-1).tocsr()
>>> print scale_columns(A,5*np.ones((A.shape[1],1))).todense()
[[ 10. -5. 0. 0.]
[ -5. 10. -5. 0.]
[ 0. -5. 10. -5.]
[ 0. 0. -5. 10.]
[ 0. 0. 0. -5.]]
"""
v = np.ravel(v)
M, N = A.shape
if not isspmatrix(A):
raise ValueError('scale columns needs a sparse matrix')
if N != len(v):
raise ValueError('scale vector has incompatible shape')
if copy:
A = A.copy()
A.data = np.asarray(A.data, dtype=upcast(A.dtype, v.dtype))
else:
v = np.asarray(v, dtype=A.dtype)
if isspmatrix_csr(A):
csr_scale_columns(M, N, A.indptr, A.indices, A.data, v)
elif isspmatrix_bsr(A):
R, C = A.blocksize
bsr_scale_columns(int(M/R), int(N/C), R, C, A.indptr, A.indices,
np.ravel(A.data), v)
elif isspmatrix_csc(A):
pyamg.amg_core.csc_scale_columns(M, N, A.indptr, A.indices, A.data, v)
else:
fmt = A.format
A = scale_columns(csr_matrix(A), v).asformat(fmt)
return A | [
"def",
"scale_columns",
"(",
"A",
",",
"v",
",",
"copy",
"=",
"True",
")",
":",
"v",
"=",
"np",
".",
"ravel",
"(",
"v",
")",
"M",
",",
"N",
"=",
"A",
".",
"shape",
"if",
"not",
"isspmatrix",
"(",
"A",
")",
":",
"raise",
"ValueError",
"(",
"'s... | Scale the sparse columns of a matrix.
Parameters
----------
A : sparse matrix
Sparse matrix with N rows
v : array_like
Array of N scales
copy : {True,False}
- If copy=True, then the matrix is copied to a new and different return
matrix (e.g. B=scale_columns(A,v))
- If copy=False, then the matrix is overwritten deeply (e.g.
scale_columns(A,v,copy=False) overwrites A)
Returns
-------
A : sparse matrix
Scaled sparse matrix in original format
See Also
--------
scipy.sparse._sparsetools.csr_scale_columns, scale_rows
Notes
-----
- if A is a csc_matrix, the transpose A.T is passed to scale_rows
- if A is not csr, csc, or bsr, it is converted to csr and sent to
scale_rows
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import spdiags
>>> from pyamg.util.utils import scale_columns
>>> n=5
>>> e = np.ones((n,1)).ravel()
>>> data = [ -1*e, 2*e, -1*e ]
>>> A = spdiags(data,[-1,0,1],n,n-1).tocsr()
>>> print scale_columns(A,5*np.ones((A.shape[1],1))).todense()
[[ 10. -5. 0. 0.]
[ -5. 10. -5. 0.]
[ 0. -5. 10. -5.]
[ 0. 0. -5. 10.]
[ 0. 0. 0. -5.]] | [
"Scale",
"the",
"sparse",
"columns",
"of",
"a",
"matrix",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/utils.py#L205-L280 | train | 209,271 |
pyamg/pyamg | pyamg/util/utils.py | type_prep | def type_prep(upcast_type, varlist):
"""Upcast variables to a type.
Loop over all elements of varlist and convert them to upcasttype
The only difference with pyamg.util.utils.to_type(...), is that scalars
are wrapped into (1,0) arrays. This is desirable when passing
the numpy complex data type to C routines and complex scalars aren't
handled correctly
Parameters
----------
upcast_type : data type
e.g. complex, float64 or complex128
varlist : list
list may contain arrays, mat's, sparse matrices, or scalars
the elements may be float, int or complex
Returns
-------
Returns upcast-ed varlist to upcast_type
Notes
-----
Useful when harmonizing the types of variables, such as
if A and b are complex, but x,y and z are not.
Examples
--------
>>> import numpy as np
>>> from pyamg.util.utils import type_prep
>>> from scipy.sparse.sputils import upcast
>>> x = np.ones((5,1))
>>> y = 2.0j*np.ones((5,1))
>>> z = 2.3
>>> varlist = type_prep(upcast(x.dtype, y.dtype), [x, y, z])
"""
varlist = to_type(upcast_type, varlist)
for i in range(len(varlist)):
if np.isscalar(varlist[i]):
varlist[i] = np.array([varlist[i]])
return varlist | python | def type_prep(upcast_type, varlist):
"""Upcast variables to a type.
Loop over all elements of varlist and convert them to upcasttype
The only difference with pyamg.util.utils.to_type(...), is that scalars
are wrapped into (1,0) arrays. This is desirable when passing
the numpy complex data type to C routines and complex scalars aren't
handled correctly
Parameters
----------
upcast_type : data type
e.g. complex, float64 or complex128
varlist : list
list may contain arrays, mat's, sparse matrices, or scalars
the elements may be float, int or complex
Returns
-------
Returns upcast-ed varlist to upcast_type
Notes
-----
Useful when harmonizing the types of variables, such as
if A and b are complex, but x,y and z are not.
Examples
--------
>>> import numpy as np
>>> from pyamg.util.utils import type_prep
>>> from scipy.sparse.sputils import upcast
>>> x = np.ones((5,1))
>>> y = 2.0j*np.ones((5,1))
>>> z = 2.3
>>> varlist = type_prep(upcast(x.dtype, y.dtype), [x, y, z])
"""
varlist = to_type(upcast_type, varlist)
for i in range(len(varlist)):
if np.isscalar(varlist[i]):
varlist[i] = np.array([varlist[i]])
return varlist | [
"def",
"type_prep",
"(",
"upcast_type",
",",
"varlist",
")",
":",
"varlist",
"=",
"to_type",
"(",
"upcast_type",
",",
"varlist",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"varlist",
")",
")",
":",
"if",
"np",
".",
"isscalar",
"(",
"varlist",
"... | Upcast variables to a type.
Loop over all elements of varlist and convert them to upcasttype
The only difference with pyamg.util.utils.to_type(...), is that scalars
are wrapped into (1,0) arrays. This is desirable when passing
the numpy complex data type to C routines and complex scalars aren't
handled correctly
Parameters
----------
upcast_type : data type
e.g. complex, float64 or complex128
varlist : list
list may contain arrays, mat's, sparse matrices, or scalars
the elements may be float, int or complex
Returns
-------
Returns upcast-ed varlist to upcast_type
Notes
-----
Useful when harmonizing the types of variables, such as
if A and b are complex, but x,y and z are not.
Examples
--------
>>> import numpy as np
>>> from pyamg.util.utils import type_prep
>>> from scipy.sparse.sputils import upcast
>>> x = np.ones((5,1))
>>> y = 2.0j*np.ones((5,1))
>>> z = 2.3
>>> varlist = type_prep(upcast(x.dtype, y.dtype), [x, y, z]) | [
"Upcast",
"variables",
"to",
"a",
"type",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/utils.py#L433-L475 | train | 209,272 |
pyamg/pyamg | pyamg/util/utils.py | to_type | def to_type(upcast_type, varlist):
"""Loop over all elements of varlist and convert them to upcasttype.
Parameters
----------
upcast_type : data type
e.g. complex, float64 or complex128
varlist : list
list may contain arrays, mat's, sparse matrices, or scalars
the elements may be float, int or complex
Returns
-------
Returns upcast-ed varlist to upcast_type
Notes
-----
Useful when harmonizing the types of variables, such as
if A and b are complex, but x,y and z are not.
Examples
--------
>>> import numpy as np
>>> from pyamg.util.utils import to_type
>>> from scipy.sparse.sputils import upcast
>>> x = np.ones((5,1))
>>> y = 2.0j*np.ones((5,1))
>>> varlist = to_type(upcast(x.dtype, y.dtype), [x, y])
"""
# convert_type = type(np.array([0], upcast_type)[0])
for i in range(len(varlist)):
# convert scalars to complex
if np.isscalar(varlist[i]):
varlist[i] = np.array([varlist[i]], upcast_type)[0]
else:
# convert sparse and dense mats to complex
try:
if varlist[i].dtype != upcast_type:
varlist[i] = varlist[i].astype(upcast_type)
except AttributeError:
warn('Failed to cast in to_type')
pass
return varlist | python | def to_type(upcast_type, varlist):
"""Loop over all elements of varlist and convert them to upcasttype.
Parameters
----------
upcast_type : data type
e.g. complex, float64 or complex128
varlist : list
list may contain arrays, mat's, sparse matrices, or scalars
the elements may be float, int or complex
Returns
-------
Returns upcast-ed varlist to upcast_type
Notes
-----
Useful when harmonizing the types of variables, such as
if A and b are complex, but x,y and z are not.
Examples
--------
>>> import numpy as np
>>> from pyamg.util.utils import to_type
>>> from scipy.sparse.sputils import upcast
>>> x = np.ones((5,1))
>>> y = 2.0j*np.ones((5,1))
>>> varlist = to_type(upcast(x.dtype, y.dtype), [x, y])
"""
# convert_type = type(np.array([0], upcast_type)[0])
for i in range(len(varlist)):
# convert scalars to complex
if np.isscalar(varlist[i]):
varlist[i] = np.array([varlist[i]], upcast_type)[0]
else:
# convert sparse and dense mats to complex
try:
if varlist[i].dtype != upcast_type:
varlist[i] = varlist[i].astype(upcast_type)
except AttributeError:
warn('Failed to cast in to_type')
pass
return varlist | [
"def",
"to_type",
"(",
"upcast_type",
",",
"varlist",
")",
":",
"# convert_type = type(np.array([0], upcast_type)[0])",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"varlist",
")",
")",
":",
"# convert scalars to complex",
"if",
"np",
".",
"isscalar",
"(",
"varlist... | Loop over all elements of varlist and convert them to upcasttype.
Parameters
----------
upcast_type : data type
e.g. complex, float64 or complex128
varlist : list
list may contain arrays, mat's, sparse matrices, or scalars
the elements may be float, int or complex
Returns
-------
Returns upcast-ed varlist to upcast_type
Notes
-----
Useful when harmonizing the types of variables, such as
if A and b are complex, but x,y and z are not.
Examples
--------
>>> import numpy as np
>>> from pyamg.util.utils import to_type
>>> from scipy.sparse.sputils import upcast
>>> x = np.ones((5,1))
>>> y = 2.0j*np.ones((5,1))
>>> varlist = to_type(upcast(x.dtype, y.dtype), [x, y]) | [
"Loop",
"over",
"all",
"elements",
"of",
"varlist",
"and",
"convert",
"them",
"to",
"upcasttype",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/utils.py#L478-L524 | train | 209,273 |
pyamg/pyamg | pyamg/util/utils.py | get_block_diag | def get_block_diag(A, blocksize, inv_flag=True):
"""Return the block diagonal of A, in array form.
Parameters
----------
A : csr_matrix
assumed to be square
blocksize : int
square block size for the diagonal
inv_flag : bool
if True, return the inverse of the block diagonal
Returns
-------
block_diag : array
block diagonal of A in array form,
array size is (A.shape[0]/blocksize, blocksize, blocksize)
Examples
--------
>>> from scipy import arange
>>> from scipy.sparse import csr_matrix
>>> from pyamg.util import get_block_diag
>>> A = csr_matrix(arange(36).reshape(6,6))
>>> block_diag_inv = get_block_diag(A, blocksize=2, inv_flag=False)
>>> print block_diag_inv
[[[ 0. 1.]
[ 6. 7.]]
<BLANKLINE>
[[ 14. 15.]
[ 20. 21.]]
<BLANKLINE>
[[ 28. 29.]
[ 34. 35.]]]
>>> block_diag_inv = get_block_diag(A, blocksize=2, inv_flag=True)
"""
if not isspmatrix(A):
raise TypeError('Expected sparse matrix')
if A.shape[0] != A.shape[1]:
raise ValueError("Expected square matrix")
if sp.mod(A.shape[0], blocksize) != 0:
raise ValueError("blocksize and A.shape must be compatible")
# If the block diagonal of A already exists, return that
if hasattr(A, 'block_D_inv') and inv_flag:
if (A.block_D_inv.shape[1] == blocksize) and\
(A.block_D_inv.shape[2] == blocksize) and \
(A.block_D_inv.shape[0] == int(A.shape[0]/blocksize)):
return A.block_D_inv
elif hasattr(A, 'block_D') and (not inv_flag):
if (A.block_D.shape[1] == blocksize) and\
(A.block_D.shape[2] == blocksize) and \
(A.block_D.shape[0] == int(A.shape[0]/blocksize)):
return A.block_D
# Convert to BSR
if not isspmatrix_bsr(A):
A = bsr_matrix(A, blocksize=(blocksize, blocksize))
if A.blocksize != (blocksize, blocksize):
A = A.tobsr(blocksize=(blocksize, blocksize))
# Peel off block diagonal by extracting block entries from the now BSR
# matrix A
A = A.asfptype()
block_diag = sp.zeros((int(A.shape[0]/blocksize), blocksize, blocksize),
dtype=A.dtype)
AAIJ = (sp.arange(1, A.indices.shape[0]+1), A.indices, A.indptr)
shape = (int(A.shape[0]/blocksize), int(A.shape[0]/blocksize))
diag_entries = csr_matrix(AAIJ, shape=shape).diagonal()
diag_entries -= 1
nonzero_mask = (diag_entries != -1)
diag_entries = diag_entries[nonzero_mask]
if diag_entries.shape != (0,):
block_diag[nonzero_mask, :, :] = A.data[diag_entries, :, :]
if inv_flag:
# Invert each block
if block_diag.shape[1] < 7:
# This specialized routine lacks robustness for large matrices
pyamg.amg_core.pinv_array(block_diag.ravel(), block_diag.shape[0],
block_diag.shape[1], 'T')
else:
pinv_array(block_diag)
A.block_D_inv = block_diag
else:
A.block_D = block_diag
return block_diag | python | def get_block_diag(A, blocksize, inv_flag=True):
"""Return the block diagonal of A, in array form.
Parameters
----------
A : csr_matrix
assumed to be square
blocksize : int
square block size for the diagonal
inv_flag : bool
if True, return the inverse of the block diagonal
Returns
-------
block_diag : array
block diagonal of A in array form,
array size is (A.shape[0]/blocksize, blocksize, blocksize)
Examples
--------
>>> from scipy import arange
>>> from scipy.sparse import csr_matrix
>>> from pyamg.util import get_block_diag
>>> A = csr_matrix(arange(36).reshape(6,6))
>>> block_diag_inv = get_block_diag(A, blocksize=2, inv_flag=False)
>>> print block_diag_inv
[[[ 0. 1.]
[ 6. 7.]]
<BLANKLINE>
[[ 14. 15.]
[ 20. 21.]]
<BLANKLINE>
[[ 28. 29.]
[ 34. 35.]]]
>>> block_diag_inv = get_block_diag(A, blocksize=2, inv_flag=True)
"""
if not isspmatrix(A):
raise TypeError('Expected sparse matrix')
if A.shape[0] != A.shape[1]:
raise ValueError("Expected square matrix")
if sp.mod(A.shape[0], blocksize) != 0:
raise ValueError("blocksize and A.shape must be compatible")
# If the block diagonal of A already exists, return that
if hasattr(A, 'block_D_inv') and inv_flag:
if (A.block_D_inv.shape[1] == blocksize) and\
(A.block_D_inv.shape[2] == blocksize) and \
(A.block_D_inv.shape[0] == int(A.shape[0]/blocksize)):
return A.block_D_inv
elif hasattr(A, 'block_D') and (not inv_flag):
if (A.block_D.shape[1] == blocksize) and\
(A.block_D.shape[2] == blocksize) and \
(A.block_D.shape[0] == int(A.shape[0]/blocksize)):
return A.block_D
# Convert to BSR
if not isspmatrix_bsr(A):
A = bsr_matrix(A, blocksize=(blocksize, blocksize))
if A.blocksize != (blocksize, blocksize):
A = A.tobsr(blocksize=(blocksize, blocksize))
# Peel off block diagonal by extracting block entries from the now BSR
# matrix A
A = A.asfptype()
block_diag = sp.zeros((int(A.shape[0]/blocksize), blocksize, blocksize),
dtype=A.dtype)
AAIJ = (sp.arange(1, A.indices.shape[0]+1), A.indices, A.indptr)
shape = (int(A.shape[0]/blocksize), int(A.shape[0]/blocksize))
diag_entries = csr_matrix(AAIJ, shape=shape).diagonal()
diag_entries -= 1
nonzero_mask = (diag_entries != -1)
diag_entries = diag_entries[nonzero_mask]
if diag_entries.shape != (0,):
block_diag[nonzero_mask, :, :] = A.data[diag_entries, :, :]
if inv_flag:
# Invert each block
if block_diag.shape[1] < 7:
# This specialized routine lacks robustness for large matrices
pyamg.amg_core.pinv_array(block_diag.ravel(), block_diag.shape[0],
block_diag.shape[1], 'T')
else:
pinv_array(block_diag)
A.block_D_inv = block_diag
else:
A.block_D = block_diag
return block_diag | [
"def",
"get_block_diag",
"(",
"A",
",",
"blocksize",
",",
"inv_flag",
"=",
"True",
")",
":",
"if",
"not",
"isspmatrix",
"(",
"A",
")",
":",
"raise",
"TypeError",
"(",
"'Expected sparse matrix'",
")",
"if",
"A",
".",
"shape",
"[",
"0",
"]",
"!=",
"A",
... | Return the block diagonal of A, in array form.
Parameters
----------
A : csr_matrix
assumed to be square
blocksize : int
square block size for the diagonal
inv_flag : bool
if True, return the inverse of the block diagonal
Returns
-------
block_diag : array
block diagonal of A in array form,
array size is (A.shape[0]/blocksize, blocksize, blocksize)
Examples
--------
>>> from scipy import arange
>>> from scipy.sparse import csr_matrix
>>> from pyamg.util import get_block_diag
>>> A = csr_matrix(arange(36).reshape(6,6))
>>> block_diag_inv = get_block_diag(A, blocksize=2, inv_flag=False)
>>> print block_diag_inv
[[[ 0. 1.]
[ 6. 7.]]
<BLANKLINE>
[[ 14. 15.]
[ 20. 21.]]
<BLANKLINE>
[[ 28. 29.]
[ 34. 35.]]]
>>> block_diag_inv = get_block_diag(A, blocksize=2, inv_flag=True) | [
"Return",
"the",
"block",
"diagonal",
"of",
"A",
"in",
"array",
"form",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/utils.py#L590-L679 | train | 209,274 |
pyamg/pyamg | pyamg/util/utils.py | amalgamate | def amalgamate(A, blocksize):
"""Amalgamate matrix A.
Parameters
----------
A : csr_matrix
Matrix to amalgamate
blocksize : int
blocksize to use while amalgamating
Returns
-------
A_amal : csr_matrix
Amalgamated matrix A, first, convert A to BSR with square blocksize
and then return a CSR matrix of ones using the resulting BSR indptr and
indices
Notes
-----
inverse operation of UnAmal for square matrices
Examples
--------
>>> from numpy import array
>>> from scipy.sparse import csr_matrix
>>> from pyamg.util.utils import amalgamate
>>> row = array([0,0,1])
>>> col = array([0,2,1])
>>> data = array([1,2,3])
>>> A = csr_matrix( (data,(row,col)), shape=(4,4) )
>>> A.todense()
matrix([[1, 0, 2, 0],
[0, 3, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
>>> amalgamate(A,2).todense()
matrix([[ 1., 1.],
[ 0., 0.]])
"""
if blocksize == 1:
return A
elif sp.mod(A.shape[0], blocksize) != 0:
raise ValueError("Incompatible blocksize")
A = A.tobsr(blocksize=(blocksize, blocksize))
A.sort_indices()
subI = (np.ones(A.indices.shape), A.indices, A.indptr)
shape = (int(A.shape[0]/A.blocksize[0]),
int(A.shape[1]/A.blocksize[1]))
return csr_matrix(subI, shape=shape) | python | def amalgamate(A, blocksize):
"""Amalgamate matrix A.
Parameters
----------
A : csr_matrix
Matrix to amalgamate
blocksize : int
blocksize to use while amalgamating
Returns
-------
A_amal : csr_matrix
Amalgamated matrix A, first, convert A to BSR with square blocksize
and then return a CSR matrix of ones using the resulting BSR indptr and
indices
Notes
-----
inverse operation of UnAmal for square matrices
Examples
--------
>>> from numpy import array
>>> from scipy.sparse import csr_matrix
>>> from pyamg.util.utils import amalgamate
>>> row = array([0,0,1])
>>> col = array([0,2,1])
>>> data = array([1,2,3])
>>> A = csr_matrix( (data,(row,col)), shape=(4,4) )
>>> A.todense()
matrix([[1, 0, 2, 0],
[0, 3, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
>>> amalgamate(A,2).todense()
matrix([[ 1., 1.],
[ 0., 0.]])
"""
if blocksize == 1:
return A
elif sp.mod(A.shape[0], blocksize) != 0:
raise ValueError("Incompatible blocksize")
A = A.tobsr(blocksize=(blocksize, blocksize))
A.sort_indices()
subI = (np.ones(A.indices.shape), A.indices, A.indptr)
shape = (int(A.shape[0]/A.blocksize[0]),
int(A.shape[1]/A.blocksize[1]))
return csr_matrix(subI, shape=shape) | [
"def",
"amalgamate",
"(",
"A",
",",
"blocksize",
")",
":",
"if",
"blocksize",
"==",
"1",
":",
"return",
"A",
"elif",
"sp",
".",
"mod",
"(",
"A",
".",
"shape",
"[",
"0",
"]",
",",
"blocksize",
")",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"\"In... | Amalgamate matrix A.
Parameters
----------
A : csr_matrix
Matrix to amalgamate
blocksize : int
blocksize to use while amalgamating
Returns
-------
A_amal : csr_matrix
Amalgamated matrix A, first, convert A to BSR with square blocksize
and then return a CSR matrix of ones using the resulting BSR indptr and
indices
Notes
-----
inverse operation of UnAmal for square matrices
Examples
--------
>>> from numpy import array
>>> from scipy.sparse import csr_matrix
>>> from pyamg.util.utils import amalgamate
>>> row = array([0,0,1])
>>> col = array([0,2,1])
>>> data = array([1,2,3])
>>> A = csr_matrix( (data,(row,col)), shape=(4,4) )
>>> A.todense()
matrix([[1, 0, 2, 0],
[0, 3, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]])
>>> amalgamate(A,2).todense()
matrix([[ 1., 1.],
[ 0., 0.]]) | [
"Amalgamate",
"matrix",
"A",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/utils.py#L682-L732 | train | 209,275 |
pyamg/pyamg | pyamg/util/utils.py | UnAmal | def UnAmal(A, RowsPerBlock, ColsPerBlock):
"""Unamalgamate a CSR A with blocks of 1's.
This operation is equivalent to
replacing each entry of A with ones(RowsPerBlock, ColsPerBlock), i.e., this
is equivalent to setting all of A's nonzeros to 1 and then doing a
Kronecker product between A and ones(RowsPerBlock, ColsPerBlock).
Parameters
----------
A : csr_matrix
Amalgamted matrix
RowsPerBlock : int
Give A blocks of size (RowsPerBlock, ColsPerBlock)
ColsPerBlock : int
Give A blocks of size (RowsPerBlock, ColsPerBlock)
Returns
-------
A : bsr_matrix
Returns A.data[:] = 1, followed by a Kronecker product of A and
ones(RowsPerBlock, ColsPerBlock)
Examples
--------
>>> from numpy import array
>>> from scipy.sparse import csr_matrix
>>> from pyamg.util.utils import UnAmal
>>> row = array([0,0,1,2,2,2])
>>> col = array([0,2,2,0,1,2])
>>> data = array([1,2,3,4,5,6])
>>> A = csr_matrix( (data,(row,col)), shape=(3,3) )
>>> A.todense()
matrix([[1, 0, 2],
[0, 0, 3],
[4, 5, 6]])
>>> UnAmal(A,2,2).todense()
matrix([[ 1., 1., 0., 0., 1., 1.],
[ 1., 1., 0., 0., 1., 1.],
[ 0., 0., 0., 0., 1., 1.],
[ 0., 0., 0., 0., 1., 1.],
[ 1., 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1., 1.]])
"""
data = np.ones((A.indices.shape[0], RowsPerBlock, ColsPerBlock))
blockI = (data, A.indices, A.indptr)
shape = (RowsPerBlock*A.shape[0], ColsPerBlock*A.shape[1])
return bsr_matrix(blockI, shape=shape) | python | def UnAmal(A, RowsPerBlock, ColsPerBlock):
"""Unamalgamate a CSR A with blocks of 1's.
This operation is equivalent to
replacing each entry of A with ones(RowsPerBlock, ColsPerBlock), i.e., this
is equivalent to setting all of A's nonzeros to 1 and then doing a
Kronecker product between A and ones(RowsPerBlock, ColsPerBlock).
Parameters
----------
A : csr_matrix
Amalgamted matrix
RowsPerBlock : int
Give A blocks of size (RowsPerBlock, ColsPerBlock)
ColsPerBlock : int
Give A blocks of size (RowsPerBlock, ColsPerBlock)
Returns
-------
A : bsr_matrix
Returns A.data[:] = 1, followed by a Kronecker product of A and
ones(RowsPerBlock, ColsPerBlock)
Examples
--------
>>> from numpy import array
>>> from scipy.sparse import csr_matrix
>>> from pyamg.util.utils import UnAmal
>>> row = array([0,0,1,2,2,2])
>>> col = array([0,2,2,0,1,2])
>>> data = array([1,2,3,4,5,6])
>>> A = csr_matrix( (data,(row,col)), shape=(3,3) )
>>> A.todense()
matrix([[1, 0, 2],
[0, 0, 3],
[4, 5, 6]])
>>> UnAmal(A,2,2).todense()
matrix([[ 1., 1., 0., 0., 1., 1.],
[ 1., 1., 0., 0., 1., 1.],
[ 0., 0., 0., 0., 1., 1.],
[ 0., 0., 0., 0., 1., 1.],
[ 1., 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1., 1.]])
"""
data = np.ones((A.indices.shape[0], RowsPerBlock, ColsPerBlock))
blockI = (data, A.indices, A.indptr)
shape = (RowsPerBlock*A.shape[0], ColsPerBlock*A.shape[1])
return bsr_matrix(blockI, shape=shape) | [
"def",
"UnAmal",
"(",
"A",
",",
"RowsPerBlock",
",",
"ColsPerBlock",
")",
":",
"data",
"=",
"np",
".",
"ones",
"(",
"(",
"A",
".",
"indices",
".",
"shape",
"[",
"0",
"]",
",",
"RowsPerBlock",
",",
"ColsPerBlock",
")",
")",
"blockI",
"=",
"(",
"data... | Unamalgamate a CSR A with blocks of 1's.
This operation is equivalent to
replacing each entry of A with ones(RowsPerBlock, ColsPerBlock), i.e., this
is equivalent to setting all of A's nonzeros to 1 and then doing a
Kronecker product between A and ones(RowsPerBlock, ColsPerBlock).
Parameters
----------
A : csr_matrix
Amalgamted matrix
RowsPerBlock : int
Give A blocks of size (RowsPerBlock, ColsPerBlock)
ColsPerBlock : int
Give A blocks of size (RowsPerBlock, ColsPerBlock)
Returns
-------
A : bsr_matrix
Returns A.data[:] = 1, followed by a Kronecker product of A and
ones(RowsPerBlock, ColsPerBlock)
Examples
--------
>>> from numpy import array
>>> from scipy.sparse import csr_matrix
>>> from pyamg.util.utils import UnAmal
>>> row = array([0,0,1,2,2,2])
>>> col = array([0,2,2,0,1,2])
>>> data = array([1,2,3,4,5,6])
>>> A = csr_matrix( (data,(row,col)), shape=(3,3) )
>>> A.todense()
matrix([[1, 0, 2],
[0, 0, 3],
[4, 5, 6]])
>>> UnAmal(A,2,2).todense()
matrix([[ 1., 1., 0., 0., 1., 1.],
[ 1., 1., 0., 0., 1., 1.],
[ 0., 0., 0., 0., 1., 1.],
[ 0., 0., 0., 0., 1., 1.],
[ 1., 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1., 1.]]) | [
"Unamalgamate",
"a",
"CSR",
"A",
"with",
"blocks",
"of",
"1",
"s",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/utils.py#L735-L783 | train | 209,276 |
pyamg/pyamg | pyamg/util/utils.py | print_table | def print_table(table, title='', delim='|', centering='center', col_padding=2,
header=True, headerchar='-'):
"""Print a table from a list of lists representing the rows of a table.
Parameters
----------
table : list
list of lists, e.g. a table with 3 columns and 2 rows could be
[ ['0,0', '0,1', '0,2'], ['1,0', '1,1', '1,2'] ]
title : string
Printed centered above the table
delim : string
character to delimit columns
centering : {'left', 'right', 'center'}
chooses justification for columns
col_padding : int
number of blank spaces to add to each column
header : {True, False}
Does the first entry of table contain column headers?
headerchar : {string}
character to separate column headers from rest of table
Returns
-------
string representing table that's ready to be printed
Notes
-----
The string for the table will have correctly justified columns
with extra padding added into each column entry to ensure columns align.
The characters to delimit the columns can be user defined. This
should be useful for printing convergence data from tests.
Examples
--------
>>> from pyamg.util.utils import print_table
>>> table = [ ['cos(0)', 'cos(pi/2)', 'cos(pi)'], ['0.0', '1.0', '0.0'] ]
>>> table1 = print_table(table) # string to print
>>> table2 = print_table(table, delim='||')
>>> table3 = print_table(table, headerchar='*')
>>> table4 = print_table(table, col_padding=6, centering='left')
"""
table_str = '\n'
# sometimes, the table will be passed in as (title, table)
if isinstance(table, tuple):
title = table[0]
table = table[1]
# Calculate each column's width
colwidths = []
for i in range(len(table)):
# extend colwidths for row i
for k in range(len(table[i]) - len(colwidths)):
colwidths.append(-1)
# Update colwidths if table[i][j] is wider than colwidth[j]
for j in range(len(table[i])):
if len(table[i][j]) > colwidths[j]:
colwidths[j] = len(table[i][j])
# Factor in extra column padding
for i in range(len(colwidths)):
colwidths[i] += col_padding
# Total table width
ttwidth = sum(colwidths) + len(delim)*(len(colwidths)-1)
# Print Title
if len(title) > 0:
title = title.split("\n")
for i in range(len(title)):
table_str += str.center(title[i], ttwidth) + '\n'
table_str += "\n"
# Choose centering scheme
centering = centering.lower()
if centering == 'center':
centering = str.center
if centering == 'right':
centering = str.rjust
if centering == 'left':
centering = str.ljust
if header:
# Append Column Headers
for elmt, elmtwidth in zip(table[0], colwidths):
table_str += centering(str(elmt), elmtwidth) + delim
if table[0] != []:
table_str = table_str[:-len(delim)] + '\n'
# Append Header Separator
# Total Column Width Total Col Delimiter Widths
if len(headerchar) == 0:
headerchar = ' '
table_str += headerchar *\
int(sp.ceil(float(ttwidth)/float(len(headerchar)))) + '\n'
table = table[1:]
for row in table:
for elmt, elmtwidth in zip(row, colwidths):
table_str += centering(str(elmt), elmtwidth) + delim
if row != []:
table_str = table_str[:-len(delim)] + '\n'
else:
table_str += '\n'
return table_str | python | def print_table(table, title='', delim='|', centering='center', col_padding=2,
header=True, headerchar='-'):
"""Print a table from a list of lists representing the rows of a table.
Parameters
----------
table : list
list of lists, e.g. a table with 3 columns and 2 rows could be
[ ['0,0', '0,1', '0,2'], ['1,0', '1,1', '1,2'] ]
title : string
Printed centered above the table
delim : string
character to delimit columns
centering : {'left', 'right', 'center'}
chooses justification for columns
col_padding : int
number of blank spaces to add to each column
header : {True, False}
Does the first entry of table contain column headers?
headerchar : {string}
character to separate column headers from rest of table
Returns
-------
string representing table that's ready to be printed
Notes
-----
The string for the table will have correctly justified columns
with extra padding added into each column entry to ensure columns align.
The characters to delimit the columns can be user defined. This
should be useful for printing convergence data from tests.
Examples
--------
>>> from pyamg.util.utils import print_table
>>> table = [ ['cos(0)', 'cos(pi/2)', 'cos(pi)'], ['0.0', '1.0', '0.0'] ]
>>> table1 = print_table(table) # string to print
>>> table2 = print_table(table, delim='||')
>>> table3 = print_table(table, headerchar='*')
>>> table4 = print_table(table, col_padding=6, centering='left')
"""
table_str = '\n'
# sometimes, the table will be passed in as (title, table)
if isinstance(table, tuple):
title = table[0]
table = table[1]
# Calculate each column's width
colwidths = []
for i in range(len(table)):
# extend colwidths for row i
for k in range(len(table[i]) - len(colwidths)):
colwidths.append(-1)
# Update colwidths if table[i][j] is wider than colwidth[j]
for j in range(len(table[i])):
if len(table[i][j]) > colwidths[j]:
colwidths[j] = len(table[i][j])
# Factor in extra column padding
for i in range(len(colwidths)):
colwidths[i] += col_padding
# Total table width
ttwidth = sum(colwidths) + len(delim)*(len(colwidths)-1)
# Print Title
if len(title) > 0:
title = title.split("\n")
for i in range(len(title)):
table_str += str.center(title[i], ttwidth) + '\n'
table_str += "\n"
# Choose centering scheme
centering = centering.lower()
if centering == 'center':
centering = str.center
if centering == 'right':
centering = str.rjust
if centering == 'left':
centering = str.ljust
if header:
# Append Column Headers
for elmt, elmtwidth in zip(table[0], colwidths):
table_str += centering(str(elmt), elmtwidth) + delim
if table[0] != []:
table_str = table_str[:-len(delim)] + '\n'
# Append Header Separator
# Total Column Width Total Col Delimiter Widths
if len(headerchar) == 0:
headerchar = ' '
table_str += headerchar *\
int(sp.ceil(float(ttwidth)/float(len(headerchar)))) + '\n'
table = table[1:]
for row in table:
for elmt, elmtwidth in zip(row, colwidths):
table_str += centering(str(elmt), elmtwidth) + delim
if row != []:
table_str = table_str[:-len(delim)] + '\n'
else:
table_str += '\n'
return table_str | [
"def",
"print_table",
"(",
"table",
",",
"title",
"=",
"''",
",",
"delim",
"=",
"'|'",
",",
"centering",
"=",
"'center'",
",",
"col_padding",
"=",
"2",
",",
"header",
"=",
"True",
",",
"headerchar",
"=",
"'-'",
")",
":",
"table_str",
"=",
"'\\n'",
"#... | Print a table from a list of lists representing the rows of a table.
Parameters
----------
table : list
list of lists, e.g. a table with 3 columns and 2 rows could be
[ ['0,0', '0,1', '0,2'], ['1,0', '1,1', '1,2'] ]
title : string
Printed centered above the table
delim : string
character to delimit columns
centering : {'left', 'right', 'center'}
chooses justification for columns
col_padding : int
number of blank spaces to add to each column
header : {True, False}
Does the first entry of table contain column headers?
headerchar : {string}
character to separate column headers from rest of table
Returns
-------
string representing table that's ready to be printed
Notes
-----
The string for the table will have correctly justified columns
with extra padding added into each column entry to ensure columns align.
The characters to delimit the columns can be user defined. This
should be useful for printing convergence data from tests.
Examples
--------
>>> from pyamg.util.utils import print_table
>>> table = [ ['cos(0)', 'cos(pi/2)', 'cos(pi)'], ['0.0', '1.0', '0.0'] ]
>>> table1 = print_table(table) # string to print
>>> table2 = print_table(table, delim='||')
>>> table3 = print_table(table, headerchar='*')
>>> table4 = print_table(table, col_padding=6, centering='left') | [
"Print",
"a",
"table",
"from",
"a",
"list",
"of",
"lists",
"representing",
"the",
"rows",
"of",
"a",
"table",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/utils.py#L786-L896 | train | 209,277 |
pyamg/pyamg | pyamg/util/utils.py | Coord2RBM | def Coord2RBM(numNodes, numPDEs, x, y, z):
"""Convert 2D or 3D coordinates into Rigid body modes.
For use as near nullspace modes in elasticity AMG solvers.
Parameters
----------
numNodes : int
Number of nodes
numPDEs :
Number of dofs per node
x,y,z : array_like
Coordinate vectors
Returns
-------
rbm : matrix
A matrix of size (numNodes*numPDEs) x (1 | 6) containing the 6 rigid
body modes
Examples
--------
>>> import numpy as np
>>> from pyamg.util.utils import Coord2RBM
>>> a = np.array([0,1,2])
>>> Coord2RBM(3,6,a,a,a)
matrix([[ 1., 0., 0., 0., 0., -0.],
[ 0., 1., 0., -0., 0., 0.],
[ 0., 0., 1., 0., -0., 0.],
[ 0., 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 1., 0.],
[ 0., 0., 0., 0., 0., 1.],
[ 1., 0., 0., 0., 1., -1.],
[ 0., 1., 0., -1., 0., 1.],
[ 0., 0., 1., 1., -1., 0.],
[ 0., 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 1., 0.],
[ 0., 0., 0., 0., 0., 1.],
[ 1., 0., 0., 0., 2., -2.],
[ 0., 1., 0., -2., 0., 2.],
[ 0., 0., 1., 2., -2., 0.],
[ 0., 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 1., 0.],
[ 0., 0., 0., 0., 0., 1.]])
"""
# check inputs
if(numPDEs == 1):
numcols = 1
elif((numPDEs == 3) or (numPDEs == 6)):
numcols = 6
else:
raise ValueError("Coord2RBM(...) only supports 1, 3 or 6 PDEs per\
spatial location,i.e. numPDEs = [1 | 3 | 6].\
You've entered " + str(numPDEs) + ".")
if((max(x.shape) != numNodes) or
(max(y.shape) != numNodes) or
(max(z.shape) != numNodes)):
raise ValueError("Coord2RBM(...) requires coordinate vectors of equal\
length. Length must be numNodes = " + str(numNodes))
# if( (min(x.shape) != 1) or (min(y.shape) != 1) or (min(z.shape) != 1) ):
# raise ValueError("Coord2RBM(...) requires coordinate vectors that are
# (numNodes x 1) or (1 x numNodes).")
# preallocate rbm
rbm = np.mat(np.zeros((numNodes*numPDEs, numcols)))
for node in range(numNodes):
dof = node*numPDEs
if(numPDEs == 1):
rbm[node] = 1.0
if(numPDEs == 6):
for ii in range(3, 6): # lower half = [ 0 I ]
for jj in range(0, 6):
if(ii == jj):
rbm[dof+ii, jj] = 1.0
else:
rbm[dof+ii, jj] = 0.0
if((numPDEs == 3) or (numPDEs == 6)):
for ii in range(0, 3): # upper left = [ I ]
for jj in range(0, 3):
if(ii == jj):
rbm[dof+ii, jj] = 1.0
else:
rbm[dof+ii, jj] = 0.0
for ii in range(0, 3): # upper right = [ Q ]
for jj in range(3, 6):
if(ii == (jj-3)):
rbm[dof+ii, jj] = 0.0
else:
if((ii+jj) == 4):
rbm[dof+ii, jj] = z[node]
elif((ii+jj) == 5):
rbm[dof+ii, jj] = y[node]
elif((ii+jj) == 6):
rbm[dof+ii, jj] = x[node]
else:
rbm[dof+ii, jj] = 0.0
ii = 0
jj = 5
rbm[dof+ii, jj] *= -1.0
ii = 1
jj = 3
rbm[dof+ii, jj] *= -1.0
ii = 2
jj = 4
rbm[dof+ii, jj] *= -1.0
return rbm | python | def Coord2RBM(numNodes, numPDEs, x, y, z):
"""Convert 2D or 3D coordinates into Rigid body modes.
For use as near nullspace modes in elasticity AMG solvers.
Parameters
----------
numNodes : int
Number of nodes
numPDEs :
Number of dofs per node
x,y,z : array_like
Coordinate vectors
Returns
-------
rbm : matrix
A matrix of size (numNodes*numPDEs) x (1 | 6) containing the 6 rigid
body modes
Examples
--------
>>> import numpy as np
>>> from pyamg.util.utils import Coord2RBM
>>> a = np.array([0,1,2])
>>> Coord2RBM(3,6,a,a,a)
matrix([[ 1., 0., 0., 0., 0., -0.],
[ 0., 1., 0., -0., 0., 0.],
[ 0., 0., 1., 0., -0., 0.],
[ 0., 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 1., 0.],
[ 0., 0., 0., 0., 0., 1.],
[ 1., 0., 0., 0., 1., -1.],
[ 0., 1., 0., -1., 0., 1.],
[ 0., 0., 1., 1., -1., 0.],
[ 0., 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 1., 0.],
[ 0., 0., 0., 0., 0., 1.],
[ 1., 0., 0., 0., 2., -2.],
[ 0., 1., 0., -2., 0., 2.],
[ 0., 0., 1., 2., -2., 0.],
[ 0., 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 1., 0.],
[ 0., 0., 0., 0., 0., 1.]])
"""
# check inputs
if(numPDEs == 1):
numcols = 1
elif((numPDEs == 3) or (numPDEs == 6)):
numcols = 6
else:
raise ValueError("Coord2RBM(...) only supports 1, 3 or 6 PDEs per\
spatial location,i.e. numPDEs = [1 | 3 | 6].\
You've entered " + str(numPDEs) + ".")
if((max(x.shape) != numNodes) or
(max(y.shape) != numNodes) or
(max(z.shape) != numNodes)):
raise ValueError("Coord2RBM(...) requires coordinate vectors of equal\
length. Length must be numNodes = " + str(numNodes))
# if( (min(x.shape) != 1) or (min(y.shape) != 1) or (min(z.shape) != 1) ):
# raise ValueError("Coord2RBM(...) requires coordinate vectors that are
# (numNodes x 1) or (1 x numNodes).")
# preallocate rbm
rbm = np.mat(np.zeros((numNodes*numPDEs, numcols)))
for node in range(numNodes):
dof = node*numPDEs
if(numPDEs == 1):
rbm[node] = 1.0
if(numPDEs == 6):
for ii in range(3, 6): # lower half = [ 0 I ]
for jj in range(0, 6):
if(ii == jj):
rbm[dof+ii, jj] = 1.0
else:
rbm[dof+ii, jj] = 0.0
if((numPDEs == 3) or (numPDEs == 6)):
for ii in range(0, 3): # upper left = [ I ]
for jj in range(0, 3):
if(ii == jj):
rbm[dof+ii, jj] = 1.0
else:
rbm[dof+ii, jj] = 0.0
for ii in range(0, 3): # upper right = [ Q ]
for jj in range(3, 6):
if(ii == (jj-3)):
rbm[dof+ii, jj] = 0.0
else:
if((ii+jj) == 4):
rbm[dof+ii, jj] = z[node]
elif((ii+jj) == 5):
rbm[dof+ii, jj] = y[node]
elif((ii+jj) == 6):
rbm[dof+ii, jj] = x[node]
else:
rbm[dof+ii, jj] = 0.0
ii = 0
jj = 5
rbm[dof+ii, jj] *= -1.0
ii = 1
jj = 3
rbm[dof+ii, jj] *= -1.0
ii = 2
jj = 4
rbm[dof+ii, jj] *= -1.0
return rbm | [
"def",
"Coord2RBM",
"(",
"numNodes",
",",
"numPDEs",
",",
"x",
",",
"y",
",",
"z",
")",
":",
"# check inputs",
"if",
"(",
"numPDEs",
"==",
"1",
")",
":",
"numcols",
"=",
"1",
"elif",
"(",
"(",
"numPDEs",
"==",
"3",
")",
"or",
"(",
"numPDEs",
"=="... | Convert 2D or 3D coordinates into Rigid body modes.
For use as near nullspace modes in elasticity AMG solvers.
Parameters
----------
numNodes : int
Number of nodes
numPDEs :
Number of dofs per node
x,y,z : array_like
Coordinate vectors
Returns
-------
rbm : matrix
A matrix of size (numNodes*numPDEs) x (1 | 6) containing the 6 rigid
body modes
Examples
--------
>>> import numpy as np
>>> from pyamg.util.utils import Coord2RBM
>>> a = np.array([0,1,2])
>>> Coord2RBM(3,6,a,a,a)
matrix([[ 1., 0., 0., 0., 0., -0.],
[ 0., 1., 0., -0., 0., 0.],
[ 0., 0., 1., 0., -0., 0.],
[ 0., 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 1., 0.],
[ 0., 0., 0., 0., 0., 1.],
[ 1., 0., 0., 0., 1., -1.],
[ 0., 1., 0., -1., 0., 1.],
[ 0., 0., 1., 1., -1., 0.],
[ 0., 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 1., 0.],
[ 0., 0., 0., 0., 0., 1.],
[ 1., 0., 0., 0., 2., -2.],
[ 0., 1., 0., -2., 0., 2.],
[ 0., 0., 1., 2., -2., 0.],
[ 0., 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 1., 0.],
[ 0., 0., 0., 0., 0., 1.]]) | [
"Convert",
"2D",
"or",
"3D",
"coordinates",
"into",
"Rigid",
"body",
"modes",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/utils.py#L997-L1114 | train | 209,278 |
pyamg/pyamg | pyamg/util/utils.py | relaxation_as_linear_operator | def relaxation_as_linear_operator(method, A, b):
"""Create a linear operator that applies a relaxation method for the given right-hand-side.
Parameters
----------
methods : {tuple or string}
Relaxation descriptor: Each tuple must be of the form ('method','opts')
where 'method' is the name of a supported smoother, e.g., gauss_seidel,
and 'opts' a dict of keyword arguments to the smoother, e.g., opts =
{'sweep':symmetric}. If string, must be that of a supported smoother,
e.g., gauss_seidel.
Returns
-------
linear operator that applies the relaxation method to a vector for a
fixed right-hand-side, b.
Notes
-----
This method is primarily used to improve B during the aggregation setup
phase. Here b = 0, and each relaxation call can improve the quality of B,
especially near the boundaries.
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import relaxation_as_linear_operator
>>> import numpy as np
>>> A = poisson((100,100), format='csr') # matrix
>>> B = np.ones((A.shape[0],1)) # Candidate vector
>>> b = np.zeros((A.shape[0])) # RHS
>>> relax = relaxation_as_linear_operator('gauss_seidel', A, b)
>>> B = relax*B
"""
from pyamg import relaxation
from scipy.sparse.linalg.interface import LinearOperator
import pyamg.multilevel
def unpack_arg(v):
if isinstance(v, tuple):
return v[0], v[1]
else:
return v, {}
# setup variables
accepted_methods = ['gauss_seidel', 'block_gauss_seidel', 'sor',
'gauss_seidel_ne', 'gauss_seidel_nr', 'jacobi',
'block_jacobi', 'richardson', 'schwarz',
'strength_based_schwarz', 'jacobi_ne']
b = np.array(b, dtype=A.dtype)
fn, kwargs = unpack_arg(method)
lvl = pyamg.multilevel_solver.level()
lvl.A = A
# Retrieve setup call from relaxation.smoothing for this relaxation method
if not accepted_methods.__contains__(fn):
raise NameError("invalid relaxation method: ", fn)
try:
setup_smoother = getattr(relaxation.smoothing, 'setup_' + fn)
except NameError:
raise NameError("invalid presmoother method: ", fn)
# Get relaxation routine that takes only (A, x, b) as parameters
relax = setup_smoother(lvl, **kwargs)
# Define matvec
def matvec(x):
xcopy = x.copy()
relax(A, xcopy, b)
return xcopy
return LinearOperator(A.shape, matvec, dtype=A.dtype) | python | def relaxation_as_linear_operator(method, A, b):
"""Create a linear operator that applies a relaxation method for the given right-hand-side.
Parameters
----------
methods : {tuple or string}
Relaxation descriptor: Each tuple must be of the form ('method','opts')
where 'method' is the name of a supported smoother, e.g., gauss_seidel,
and 'opts' a dict of keyword arguments to the smoother, e.g., opts =
{'sweep':symmetric}. If string, must be that of a supported smoother,
e.g., gauss_seidel.
Returns
-------
linear operator that applies the relaxation method to a vector for a
fixed right-hand-side, b.
Notes
-----
This method is primarily used to improve B during the aggregation setup
phase. Here b = 0, and each relaxation call can improve the quality of B,
especially near the boundaries.
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import relaxation_as_linear_operator
>>> import numpy as np
>>> A = poisson((100,100), format='csr') # matrix
>>> B = np.ones((A.shape[0],1)) # Candidate vector
>>> b = np.zeros((A.shape[0])) # RHS
>>> relax = relaxation_as_linear_operator('gauss_seidel', A, b)
>>> B = relax*B
"""
from pyamg import relaxation
from scipy.sparse.linalg.interface import LinearOperator
import pyamg.multilevel
def unpack_arg(v):
if isinstance(v, tuple):
return v[0], v[1]
else:
return v, {}
# setup variables
accepted_methods = ['gauss_seidel', 'block_gauss_seidel', 'sor',
'gauss_seidel_ne', 'gauss_seidel_nr', 'jacobi',
'block_jacobi', 'richardson', 'schwarz',
'strength_based_schwarz', 'jacobi_ne']
b = np.array(b, dtype=A.dtype)
fn, kwargs = unpack_arg(method)
lvl = pyamg.multilevel_solver.level()
lvl.A = A
# Retrieve setup call from relaxation.smoothing for this relaxation method
if not accepted_methods.__contains__(fn):
raise NameError("invalid relaxation method: ", fn)
try:
setup_smoother = getattr(relaxation.smoothing, 'setup_' + fn)
except NameError:
raise NameError("invalid presmoother method: ", fn)
# Get relaxation routine that takes only (A, x, b) as parameters
relax = setup_smoother(lvl, **kwargs)
# Define matvec
def matvec(x):
xcopy = x.copy()
relax(A, xcopy, b)
return xcopy
return LinearOperator(A.shape, matvec, dtype=A.dtype) | [
"def",
"relaxation_as_linear_operator",
"(",
"method",
",",
"A",
",",
"b",
")",
":",
"from",
"pyamg",
"import",
"relaxation",
"from",
"scipy",
".",
"sparse",
".",
"linalg",
".",
"interface",
"import",
"LinearOperator",
"import",
"pyamg",
".",
"multilevel",
"de... | Create a linear operator that applies a relaxation method for the given right-hand-side.
Parameters
----------
methods : {tuple or string}
Relaxation descriptor: Each tuple must be of the form ('method','opts')
where 'method' is the name of a supported smoother, e.g., gauss_seidel,
and 'opts' a dict of keyword arguments to the smoother, e.g., opts =
{'sweep':symmetric}. If string, must be that of a supported smoother,
e.g., gauss_seidel.
Returns
-------
linear operator that applies the relaxation method to a vector for a
fixed right-hand-side, b.
Notes
-----
This method is primarily used to improve B during the aggregation setup
phase. Here b = 0, and each relaxation call can improve the quality of B,
especially near the boundaries.
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import relaxation_as_linear_operator
>>> import numpy as np
>>> A = poisson((100,100), format='csr') # matrix
>>> B = np.ones((A.shape[0],1)) # Candidate vector
>>> b = np.zeros((A.shape[0])) # RHS
>>> relax = relaxation_as_linear_operator('gauss_seidel', A, b)
>>> B = relax*B | [
"Create",
"a",
"linear",
"operator",
"that",
"applies",
"a",
"relaxation",
"method",
"for",
"the",
"given",
"right",
"-",
"hand",
"-",
"side",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/utils.py#L1117-L1189 | train | 209,279 |
pyamg/pyamg | pyamg/util/utils.py | scale_T | def scale_T(T, P_I, I_F):
"""Scale T with a block diagonal matrix.
Helper function that scales T with a right multiplication by a block
diagonal inverse, so that T is the identity at C-node rows.
Parameters
----------
T : {bsr_matrix}
Tentative prolongator, with square blocks in the BSR data structure,
and a non-overlapping block-diagonal structure
P_I : {bsr_matrix}
Interpolation operator that carries out only simple injection from the
coarse grid to fine grid Cpts nodes
I_F : {bsr_matrix}
Identity operator on Fpts, i.e., the action of this matrix zeros
out entries in a vector at all Cpts, leaving Fpts untouched
Returns
-------
T : {bsr_matrix}
Tentative prolongator scaled to be identity at C-pt nodes
Examples
--------
>>> from scipy.sparse import csr_matrix, bsr_matrix
>>> from scipy import matrix, array
>>> from pyamg.util.utils import scale_T
>>> T = matrix([[ 1.0, 0., 0. ],
... [ 0.5, 0., 0. ],
... [ 0. , 1., 0. ],
... [ 0. , 0.5, 0. ],
... [ 0. , 0., 1. ],
... [ 0. , 0., 0.25 ]])
>>> P_I = matrix([[ 0., 0., 0. ],
... [ 1., 0., 0. ],
... [ 0., 1., 0. ],
... [ 0., 0., 0. ],
... [ 0., 0., 0. ],
... [ 0., 0., 1. ]])
>>> I_F = matrix([[ 1., 0., 0., 0., 0., 0.],
... [ 0., 0., 0., 0., 0., 0.],
... [ 0., 0., 0., 0., 0., 0.],
... [ 0., 0., 0., 1., 0., 0.],
... [ 0., 0., 0., 0., 1., 0.],
... [ 0., 0., 0., 0., 0., 0.]])
>>> scale_T(bsr_matrix(T), bsr_matrix(P_I), bsr_matrix(I_F)).todense()
matrix([[ 2. , 0. , 0. ],
[ 1. , 0. , 0. ],
[ 0. , 1. , 0. ],
[ 0. , 0.5, 0. ],
[ 0. , 0. , 4. ],
[ 0. , 0. , 1. ]])
Notes
-----
This routine is primarily used in
pyamg.aggregation.smooth.energy_prolongation_smoother, where it is used to
generate a suitable initial guess for the energy-minimization process, when
root-node style SA is used. This function, scale_T, takes an existing
tentative prolongator and ensures that it injects from the coarse-grid to
fine-grid root-nodes.
When generating initial guesses for root-node style prolongation operators,
this function is usually called after pyamg.uti.utils.filter_operator
This function assumes that the eventual coarse-grid nullspace vectors
equal coarse-grid injection applied to the fine-grid nullspace vectors.
"""
if not isspmatrix_bsr(T):
raise TypeError('Expected BSR matrix T')
elif T.blocksize[0] != T.blocksize[1]:
raise TypeError('Expected BSR matrix T with square blocks')
if not isspmatrix_bsr(P_I):
raise TypeError('Expected BSR matrix P_I')
elif P_I.blocksize[0] != P_I.blocksize[1]:
raise TypeError('Expected BSR matrix P_I with square blocks')
if not isspmatrix_bsr(I_F):
raise TypeError('Expected BSR matrix I_F')
elif I_F.blocksize[0] != I_F.blocksize[1]:
raise TypeError('Expected BSR matrix I_F with square blocks')
if (I_F.blocksize[0] != P_I.blocksize[0]) or\
(I_F.blocksize[0] != T.blocksize[0]):
raise TypeError('Expected identical blocksize in I_F, P_I and T')
# Only do if we have a non-trivial coarse-grid
if P_I.nnz > 0:
# Construct block diagonal inverse D
D = P_I.T*T
if D.nnz > 0:
# changes D in place
pinv_array(D.data)
# Scale T to be identity at root-nodes
T = T*D
# Ensure coarse-grid injection
T = I_F*T + P_I
return T | python | def scale_T(T, P_I, I_F):
"""Scale T with a block diagonal matrix.
Helper function that scales T with a right multiplication by a block
diagonal inverse, so that T is the identity at C-node rows.
Parameters
----------
T : {bsr_matrix}
Tentative prolongator, with square blocks in the BSR data structure,
and a non-overlapping block-diagonal structure
P_I : {bsr_matrix}
Interpolation operator that carries out only simple injection from the
coarse grid to fine grid Cpts nodes
I_F : {bsr_matrix}
Identity operator on Fpts, i.e., the action of this matrix zeros
out entries in a vector at all Cpts, leaving Fpts untouched
Returns
-------
T : {bsr_matrix}
Tentative prolongator scaled to be identity at C-pt nodes
Examples
--------
>>> from scipy.sparse import csr_matrix, bsr_matrix
>>> from scipy import matrix, array
>>> from pyamg.util.utils import scale_T
>>> T = matrix([[ 1.0, 0., 0. ],
... [ 0.5, 0., 0. ],
... [ 0. , 1., 0. ],
... [ 0. , 0.5, 0. ],
... [ 0. , 0., 1. ],
... [ 0. , 0., 0.25 ]])
>>> P_I = matrix([[ 0., 0., 0. ],
... [ 1., 0., 0. ],
... [ 0., 1., 0. ],
... [ 0., 0., 0. ],
... [ 0., 0., 0. ],
... [ 0., 0., 1. ]])
>>> I_F = matrix([[ 1., 0., 0., 0., 0., 0.],
... [ 0., 0., 0., 0., 0., 0.],
... [ 0., 0., 0., 0., 0., 0.],
... [ 0., 0., 0., 1., 0., 0.],
... [ 0., 0., 0., 0., 1., 0.],
... [ 0., 0., 0., 0., 0., 0.]])
>>> scale_T(bsr_matrix(T), bsr_matrix(P_I), bsr_matrix(I_F)).todense()
matrix([[ 2. , 0. , 0. ],
[ 1. , 0. , 0. ],
[ 0. , 1. , 0. ],
[ 0. , 0.5, 0. ],
[ 0. , 0. , 4. ],
[ 0. , 0. , 1. ]])
Notes
-----
This routine is primarily used in
pyamg.aggregation.smooth.energy_prolongation_smoother, where it is used to
generate a suitable initial guess for the energy-minimization process, when
root-node style SA is used. This function, scale_T, takes an existing
tentative prolongator and ensures that it injects from the coarse-grid to
fine-grid root-nodes.
When generating initial guesses for root-node style prolongation operators,
this function is usually called after pyamg.uti.utils.filter_operator
This function assumes that the eventual coarse-grid nullspace vectors
equal coarse-grid injection applied to the fine-grid nullspace vectors.
"""
if not isspmatrix_bsr(T):
raise TypeError('Expected BSR matrix T')
elif T.blocksize[0] != T.blocksize[1]:
raise TypeError('Expected BSR matrix T with square blocks')
if not isspmatrix_bsr(P_I):
raise TypeError('Expected BSR matrix P_I')
elif P_I.blocksize[0] != P_I.blocksize[1]:
raise TypeError('Expected BSR matrix P_I with square blocks')
if not isspmatrix_bsr(I_F):
raise TypeError('Expected BSR matrix I_F')
elif I_F.blocksize[0] != I_F.blocksize[1]:
raise TypeError('Expected BSR matrix I_F with square blocks')
if (I_F.blocksize[0] != P_I.blocksize[0]) or\
(I_F.blocksize[0] != T.blocksize[0]):
raise TypeError('Expected identical blocksize in I_F, P_I and T')
# Only do if we have a non-trivial coarse-grid
if P_I.nnz > 0:
# Construct block diagonal inverse D
D = P_I.T*T
if D.nnz > 0:
# changes D in place
pinv_array(D.data)
# Scale T to be identity at root-nodes
T = T*D
# Ensure coarse-grid injection
T = I_F*T + P_I
return T | [
"def",
"scale_T",
"(",
"T",
",",
"P_I",
",",
"I_F",
")",
":",
"if",
"not",
"isspmatrix_bsr",
"(",
"T",
")",
":",
"raise",
"TypeError",
"(",
"'Expected BSR matrix T'",
")",
"elif",
"T",
".",
"blocksize",
"[",
"0",
"]",
"!=",
"T",
".",
"blocksize",
"["... | Scale T with a block diagonal matrix.
Helper function that scales T with a right multiplication by a block
diagonal inverse, so that T is the identity at C-node rows.
Parameters
----------
T : {bsr_matrix}
Tentative prolongator, with square blocks in the BSR data structure,
and a non-overlapping block-diagonal structure
P_I : {bsr_matrix}
Interpolation operator that carries out only simple injection from the
coarse grid to fine grid Cpts nodes
I_F : {bsr_matrix}
Identity operator on Fpts, i.e., the action of this matrix zeros
out entries in a vector at all Cpts, leaving Fpts untouched
Returns
-------
T : {bsr_matrix}
Tentative prolongator scaled to be identity at C-pt nodes
Examples
--------
>>> from scipy.sparse import csr_matrix, bsr_matrix
>>> from scipy import matrix, array
>>> from pyamg.util.utils import scale_T
>>> T = matrix([[ 1.0, 0., 0. ],
... [ 0.5, 0., 0. ],
... [ 0. , 1., 0. ],
... [ 0. , 0.5, 0. ],
... [ 0. , 0., 1. ],
... [ 0. , 0., 0.25 ]])
>>> P_I = matrix([[ 0., 0., 0. ],
... [ 1., 0., 0. ],
... [ 0., 1., 0. ],
... [ 0., 0., 0. ],
... [ 0., 0., 0. ],
... [ 0., 0., 1. ]])
>>> I_F = matrix([[ 1., 0., 0., 0., 0., 0.],
... [ 0., 0., 0., 0., 0., 0.],
... [ 0., 0., 0., 0., 0., 0.],
... [ 0., 0., 0., 1., 0., 0.],
... [ 0., 0., 0., 0., 1., 0.],
... [ 0., 0., 0., 0., 0., 0.]])
>>> scale_T(bsr_matrix(T), bsr_matrix(P_I), bsr_matrix(I_F)).todense()
matrix([[ 2. , 0. , 0. ],
[ 1. , 0. , 0. ],
[ 0. , 1. , 0. ],
[ 0. , 0.5, 0. ],
[ 0. , 0. , 4. ],
[ 0. , 0. , 1. ]])
Notes
-----
This routine is primarily used in
pyamg.aggregation.smooth.energy_prolongation_smoother, where it is used to
generate a suitable initial guess for the energy-minimization process, when
root-node style SA is used. This function, scale_T, takes an existing
tentative prolongator and ensures that it injects from the coarse-grid to
fine-grid root-nodes.
When generating initial guesses for root-node style prolongation operators,
this function is usually called after pyamg.uti.utils.filter_operator
This function assumes that the eventual coarse-grid nullspace vectors
equal coarse-grid injection applied to the fine-grid nullspace vectors. | [
"Scale",
"T",
"with",
"a",
"block",
"diagonal",
"matrix",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/utils.py#L1347-L1447 | train | 209,280 |
pyamg/pyamg | pyamg/util/utils.py | compute_BtBinv | def compute_BtBinv(B, C):
"""Create block inverses.
Helper function that creates inv(B_i.T B_i) for each block row i in C,
where B_i is B restricted to the sparsity pattern of block row i.
Parameters
----------
B : {array}
(M,k) array, typically near-nullspace modes for coarse grid, i.e., B_c.
C : {csr_matrix, bsr_matrix}
Sparse NxM matrix, whose sparsity structure (i.e., matrix graph)
is used to determine BtBinv.
Returns
-------
BtBinv : {array}
BtBinv[i] = inv(B_i.T B_i), where B_i is B restricted to the nonzero
pattern of block row i in C.
Examples
--------
>>> from numpy import array
>>> from scipy.sparse import bsr_matrix
>>> from pyamg.util.utils import compute_BtBinv
>>> T = array([[ 1., 0.],
... [ 1., 0.],
... [ 0., .5],
... [ 0., .25]])
>>> T = bsr_matrix(T)
>>> B = array([[1.],[2.]])
>>> compute_BtBinv(B, T)
array([[[ 1. ]],
<BLANKLINE>
[[ 1. ]],
<BLANKLINE>
[[ 0.25]],
<BLANKLINE>
[[ 0.25]]])
Notes
-----
The principal calling routines are
aggregation.smooth.energy_prolongation_smoother, and
util.utils.filter_operator.
BtBinv is used in the prolongation smoothing process that incorporates B
into the span of prolongation with row-wise projection operators. It is
these projection operators that BtBinv is part of.
"""
if not isspmatrix_bsr(C) and not isspmatrix_csr(C):
raise TypeError('Expected bsr_matrix or csr_matrix for C')
if C.shape[1] != B.shape[0]:
raise TypeError('Expected matching dimensions such that C*B')
# Problem parameters
if isspmatrix_bsr(C):
ColsPerBlock = C.blocksize[1]
RowsPerBlock = C.blocksize[0]
else:
ColsPerBlock = 1
RowsPerBlock = 1
Ncoarse = C.shape[1]
Nfine = C.shape[0]
NullDim = B.shape[1]
Nnodes = int(Nfine/RowsPerBlock)
# Construct BtB
BtBinv = np.zeros((Nnodes, NullDim, NullDim), dtype=B.dtype)
BsqCols = sum(range(NullDim+1))
Bsq = np.zeros((Ncoarse, BsqCols), dtype=B.dtype)
counter = 0
for i in range(NullDim):
for j in range(i, NullDim):
Bsq[:, counter] = np.conjugate(np.ravel(np.asarray(B[:, i]))) * \
np.ravel(np.asarray(B[:, j]))
counter = counter + 1
# This specialized C-routine calculates (B.T B) for each row using Bsq
pyamg.amg_core.calc_BtB(NullDim, Nnodes, ColsPerBlock,
np.ravel(np.asarray(Bsq)),
BsqCols, np.ravel(np.asarray(BtBinv)),
C.indptr, C.indices)
# Invert each block of BtBinv, noting that amg_core.calc_BtB(...) returns
# values in column-major form, thus necessitating the deep transpose
# This is the old call to a specialized routine, but lacks robustness
# pyamg.amg_core.pinv_array(np.ravel(BtBinv), Nnodes, NullDim, 'F')
BtBinv = BtBinv.transpose((0, 2, 1)).copy()
pinv_array(BtBinv)
return BtBinv | python | def compute_BtBinv(B, C):
"""Create block inverses.
Helper function that creates inv(B_i.T B_i) for each block row i in C,
where B_i is B restricted to the sparsity pattern of block row i.
Parameters
----------
B : {array}
(M,k) array, typically near-nullspace modes for coarse grid, i.e., B_c.
C : {csr_matrix, bsr_matrix}
Sparse NxM matrix, whose sparsity structure (i.e., matrix graph)
is used to determine BtBinv.
Returns
-------
BtBinv : {array}
BtBinv[i] = inv(B_i.T B_i), where B_i is B restricted to the nonzero
pattern of block row i in C.
Examples
--------
>>> from numpy import array
>>> from scipy.sparse import bsr_matrix
>>> from pyamg.util.utils import compute_BtBinv
>>> T = array([[ 1., 0.],
... [ 1., 0.],
... [ 0., .5],
... [ 0., .25]])
>>> T = bsr_matrix(T)
>>> B = array([[1.],[2.]])
>>> compute_BtBinv(B, T)
array([[[ 1. ]],
<BLANKLINE>
[[ 1. ]],
<BLANKLINE>
[[ 0.25]],
<BLANKLINE>
[[ 0.25]]])
Notes
-----
The principal calling routines are
aggregation.smooth.energy_prolongation_smoother, and
util.utils.filter_operator.
BtBinv is used in the prolongation smoothing process that incorporates B
into the span of prolongation with row-wise projection operators. It is
these projection operators that BtBinv is part of.
"""
if not isspmatrix_bsr(C) and not isspmatrix_csr(C):
raise TypeError('Expected bsr_matrix or csr_matrix for C')
if C.shape[1] != B.shape[0]:
raise TypeError('Expected matching dimensions such that C*B')
# Problem parameters
if isspmatrix_bsr(C):
ColsPerBlock = C.blocksize[1]
RowsPerBlock = C.blocksize[0]
else:
ColsPerBlock = 1
RowsPerBlock = 1
Ncoarse = C.shape[1]
Nfine = C.shape[0]
NullDim = B.shape[1]
Nnodes = int(Nfine/RowsPerBlock)
# Construct BtB
BtBinv = np.zeros((Nnodes, NullDim, NullDim), dtype=B.dtype)
BsqCols = sum(range(NullDim+1))
Bsq = np.zeros((Ncoarse, BsqCols), dtype=B.dtype)
counter = 0
for i in range(NullDim):
for j in range(i, NullDim):
Bsq[:, counter] = np.conjugate(np.ravel(np.asarray(B[:, i]))) * \
np.ravel(np.asarray(B[:, j]))
counter = counter + 1
# This specialized C-routine calculates (B.T B) for each row using Bsq
pyamg.amg_core.calc_BtB(NullDim, Nnodes, ColsPerBlock,
np.ravel(np.asarray(Bsq)),
BsqCols, np.ravel(np.asarray(BtBinv)),
C.indptr, C.indices)
# Invert each block of BtBinv, noting that amg_core.calc_BtB(...) returns
# values in column-major form, thus necessitating the deep transpose
# This is the old call to a specialized routine, but lacks robustness
# pyamg.amg_core.pinv_array(np.ravel(BtBinv), Nnodes, NullDim, 'F')
BtBinv = BtBinv.transpose((0, 2, 1)).copy()
pinv_array(BtBinv)
return BtBinv | [
"def",
"compute_BtBinv",
"(",
"B",
",",
"C",
")",
":",
"if",
"not",
"isspmatrix_bsr",
"(",
"C",
")",
"and",
"not",
"isspmatrix_csr",
"(",
"C",
")",
":",
"raise",
"TypeError",
"(",
"'Expected bsr_matrix or csr_matrix for C'",
")",
"if",
"C",
".",
"shape",
"... | Create block inverses.
Helper function that creates inv(B_i.T B_i) for each block row i in C,
where B_i is B restricted to the sparsity pattern of block row i.
Parameters
----------
B : {array}
(M,k) array, typically near-nullspace modes for coarse grid, i.e., B_c.
C : {csr_matrix, bsr_matrix}
Sparse NxM matrix, whose sparsity structure (i.e., matrix graph)
is used to determine BtBinv.
Returns
-------
BtBinv : {array}
BtBinv[i] = inv(B_i.T B_i), where B_i is B restricted to the nonzero
pattern of block row i in C.
Examples
--------
>>> from numpy import array
>>> from scipy.sparse import bsr_matrix
>>> from pyamg.util.utils import compute_BtBinv
>>> T = array([[ 1., 0.],
... [ 1., 0.],
... [ 0., .5],
... [ 0., .25]])
>>> T = bsr_matrix(T)
>>> B = array([[1.],[2.]])
>>> compute_BtBinv(B, T)
array([[[ 1. ]],
<BLANKLINE>
[[ 1. ]],
<BLANKLINE>
[[ 0.25]],
<BLANKLINE>
[[ 0.25]]])
Notes
-----
The principal calling routines are
aggregation.smooth.energy_prolongation_smoother, and
util.utils.filter_operator.
BtBinv is used in the prolongation smoothing process that incorporates B
into the span of prolongation with row-wise projection operators. It is
these projection operators that BtBinv is part of. | [
"Create",
"block",
"inverses",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/utils.py#L1599-L1690 | train | 209,281 |
pyamg/pyamg | pyamg/util/utils.py | eliminate_diag_dom_nodes | def eliminate_diag_dom_nodes(A, C, theta=1.02):
r"""Eliminate diagonally dominance.
Helper function that eliminates diagonally dominant rows and cols from A
in the separate matrix C. This is useful because it eliminates nodes in C
which we don't want coarsened. These eliminated nodes in C just become
the rows and columns of the identity.
Parameters
----------
A : {csr_matrix, bsr_matrix}
Sparse NxN matrix
C : {csr_matrix}
Sparse MxM matrix, where M is the number of nodes in A. M=N if A
is CSR or is BSR with blocksize 1. Otherwise M = N/blocksize.
theta : {float}
determines diagonal dominance threshhold
Returns
-------
C : {csr_matrix}
C updated such that the rows and columns corresponding to diagonally
dominant rows in A have been eliminated and replaced with rows and
columns of the identity.
Notes
-----
Diagonal dominance is defined as
:math:`\| (e_i, A) - a_{ii} \|_1 < \\theta a_{ii}`
that is, the 1-norm of the off diagonal elements in row i must be less than
theta times the diagonal element.
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import eliminate_diag_dom_nodes
>>> A = poisson( (4,), format='csr' )
>>> C = eliminate_diag_dom_nodes(A, A.copy(), 1.1)
>>> C.todense()
matrix([[ 1., 0., 0., 0.],
[ 0., 2., -1., 0.],
[ 0., -1., 2., 0.],
[ 0., 0., 0., 1.]])
"""
# Find the diagonally dominant rows in A.
A_abs = A.copy()
A_abs.data = np.abs(A_abs.data)
D_abs = get_diagonal(A_abs, norm_eq=0, inv=False)
diag_dom_rows = (D_abs > (theta*(A_abs*np.ones((A_abs.shape[0],),
dtype=A_abs) - D_abs)))
# Account for BSR matrices and translate diag_dom_rows from dofs to nodes
bsize = blocksize(A_abs)
if bsize > 1:
diag_dom_rows = np.array(diag_dom_rows, dtype=int)
diag_dom_rows = diag_dom_rows.reshape(-1, bsize)
diag_dom_rows = np.sum(diag_dom_rows, axis=1)
diag_dom_rows = (diag_dom_rows == bsize)
# Replace these rows/cols in # C with rows/cols of the identity.
Id = eye(C.shape[0], C.shape[1], format='csr')
Id.data[diag_dom_rows] = 0.0
C = Id * C * Id
Id.data[diag_dom_rows] = 1.0
Id.data[np.where(diag_dom_rows == 0)[0]] = 0.0
C = C + Id
del A_abs
return C | python | def eliminate_diag_dom_nodes(A, C, theta=1.02):
r"""Eliminate diagonally dominance.
Helper function that eliminates diagonally dominant rows and cols from A
in the separate matrix C. This is useful because it eliminates nodes in C
which we don't want coarsened. These eliminated nodes in C just become
the rows and columns of the identity.
Parameters
----------
A : {csr_matrix, bsr_matrix}
Sparse NxN matrix
C : {csr_matrix}
Sparse MxM matrix, where M is the number of nodes in A. M=N if A
is CSR or is BSR with blocksize 1. Otherwise M = N/blocksize.
theta : {float}
determines diagonal dominance threshhold
Returns
-------
C : {csr_matrix}
C updated such that the rows and columns corresponding to diagonally
dominant rows in A have been eliminated and replaced with rows and
columns of the identity.
Notes
-----
Diagonal dominance is defined as
:math:`\| (e_i, A) - a_{ii} \|_1 < \\theta a_{ii}`
that is, the 1-norm of the off diagonal elements in row i must be less than
theta times the diagonal element.
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import eliminate_diag_dom_nodes
>>> A = poisson( (4,), format='csr' )
>>> C = eliminate_diag_dom_nodes(A, A.copy(), 1.1)
>>> C.todense()
matrix([[ 1., 0., 0., 0.],
[ 0., 2., -1., 0.],
[ 0., -1., 2., 0.],
[ 0., 0., 0., 1.]])
"""
# Find the diagonally dominant rows in A.
A_abs = A.copy()
A_abs.data = np.abs(A_abs.data)
D_abs = get_diagonal(A_abs, norm_eq=0, inv=False)
diag_dom_rows = (D_abs > (theta*(A_abs*np.ones((A_abs.shape[0],),
dtype=A_abs) - D_abs)))
# Account for BSR matrices and translate diag_dom_rows from dofs to nodes
bsize = blocksize(A_abs)
if bsize > 1:
diag_dom_rows = np.array(diag_dom_rows, dtype=int)
diag_dom_rows = diag_dom_rows.reshape(-1, bsize)
diag_dom_rows = np.sum(diag_dom_rows, axis=1)
diag_dom_rows = (diag_dom_rows == bsize)
# Replace these rows/cols in # C with rows/cols of the identity.
Id = eye(C.shape[0], C.shape[1], format='csr')
Id.data[diag_dom_rows] = 0.0
C = Id * C * Id
Id.data[diag_dom_rows] = 1.0
Id.data[np.where(diag_dom_rows == 0)[0]] = 0.0
C = C + Id
del A_abs
return C | [
"def",
"eliminate_diag_dom_nodes",
"(",
"A",
",",
"C",
",",
"theta",
"=",
"1.02",
")",
":",
"# Find the diagonally dominant rows in A.",
"A_abs",
"=",
"A",
".",
"copy",
"(",
")",
"A_abs",
".",
"data",
"=",
"np",
".",
"abs",
"(",
"A_abs",
".",
"data",
")"... | r"""Eliminate diagonally dominance.
Helper function that eliminates diagonally dominant rows and cols from A
in the separate matrix C. This is useful because it eliminates nodes in C
which we don't want coarsened. These eliminated nodes in C just become
the rows and columns of the identity.
Parameters
----------
A : {csr_matrix, bsr_matrix}
Sparse NxN matrix
C : {csr_matrix}
Sparse MxM matrix, where M is the number of nodes in A. M=N if A
is CSR or is BSR with blocksize 1. Otherwise M = N/blocksize.
theta : {float}
determines diagonal dominance threshhold
Returns
-------
C : {csr_matrix}
C updated such that the rows and columns corresponding to diagonally
dominant rows in A have been eliminated and replaced with rows and
columns of the identity.
Notes
-----
Diagonal dominance is defined as
:math:`\| (e_i, A) - a_{ii} \|_1 < \\theta a_{ii}`
that is, the 1-norm of the off diagonal elements in row i must be less than
theta times the diagonal element.
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import eliminate_diag_dom_nodes
>>> A = poisson( (4,), format='csr' )
>>> C = eliminate_diag_dom_nodes(A, A.copy(), 1.1)
>>> C.todense()
matrix([[ 1., 0., 0., 0.],
[ 0., 2., -1., 0.],
[ 0., -1., 2., 0.],
[ 0., 0., 0., 1.]]) | [
"r",
"Eliminate",
"diagonally",
"dominance",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/utils.py#L1693-L1762 | train | 209,282 |
pyamg/pyamg | pyamg/util/utils.py | remove_diagonal | def remove_diagonal(S):
"""Remove the diagonal of the matrix S.
Parameters
----------
S : csr_matrix
Square matrix
Returns
-------
S : csr_matrix
Strength matrix with the diagonal removed
Notes
-----
This is needed by all the splitting routines which operate on matrix graphs
with an assumed zero diagonal
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import remove_diagonal
>>> A = poisson( (4,), format='csr' )
>>> C = remove_diagonal(A)
>>> C.todense()
matrix([[ 0., -1., 0., 0.],
[-1., 0., -1., 0.],
[ 0., -1., 0., -1.],
[ 0., 0., -1., 0.]])
"""
if not isspmatrix_csr(S):
raise TypeError('expected csr_matrix')
if S.shape[0] != S.shape[1]:
raise ValueError('expected square matrix, shape=%s' % (S.shape,))
S = coo_matrix(S)
mask = S.row != S.col
S.row = S.row[mask]
S.col = S.col[mask]
S.data = S.data[mask]
return S.tocsr() | python | def remove_diagonal(S):
"""Remove the diagonal of the matrix S.
Parameters
----------
S : csr_matrix
Square matrix
Returns
-------
S : csr_matrix
Strength matrix with the diagonal removed
Notes
-----
This is needed by all the splitting routines which operate on matrix graphs
with an assumed zero diagonal
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import remove_diagonal
>>> A = poisson( (4,), format='csr' )
>>> C = remove_diagonal(A)
>>> C.todense()
matrix([[ 0., -1., 0., 0.],
[-1., 0., -1., 0.],
[ 0., -1., 0., -1.],
[ 0., 0., -1., 0.]])
"""
if not isspmatrix_csr(S):
raise TypeError('expected csr_matrix')
if S.shape[0] != S.shape[1]:
raise ValueError('expected square matrix, shape=%s' % (S.shape,))
S = coo_matrix(S)
mask = S.row != S.col
S.row = S.row[mask]
S.col = S.col[mask]
S.data = S.data[mask]
return S.tocsr() | [
"def",
"remove_diagonal",
"(",
"S",
")",
":",
"if",
"not",
"isspmatrix_csr",
"(",
"S",
")",
":",
"raise",
"TypeError",
"(",
"'expected csr_matrix'",
")",
"if",
"S",
".",
"shape",
"[",
"0",
"]",
"!=",
"S",
".",
"shape",
"[",
"1",
"]",
":",
"raise",
... | Remove the diagonal of the matrix S.
Parameters
----------
S : csr_matrix
Square matrix
Returns
-------
S : csr_matrix
Strength matrix with the diagonal removed
Notes
-----
This is needed by all the splitting routines which operate on matrix graphs
with an assumed zero diagonal
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import remove_diagonal
>>> A = poisson( (4,), format='csr' )
>>> C = remove_diagonal(A)
>>> C.todense()
matrix([[ 0., -1., 0., 0.],
[-1., 0., -1., 0.],
[ 0., -1., 0., -1.],
[ 0., 0., -1., 0.]]) | [
"Remove",
"the",
"diagonal",
"of",
"the",
"matrix",
"S",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/utils.py#L1765-L1809 | train | 209,283 |
pyamg/pyamg | pyamg/util/utils.py | scale_rows_by_largest_entry | def scale_rows_by_largest_entry(S):
"""Scale each row in S by it's largest in magnitude entry.
Parameters
----------
S : csr_matrix
Returns
-------
S : csr_matrix
Each row has been scaled by it's largest in magnitude entry
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import scale_rows_by_largest_entry
>>> A = poisson( (4,), format='csr' )
>>> A.data[1] = 5.0
>>> A = scale_rows_by_largest_entry(A)
>>> A.todense()
matrix([[ 0.4, 1. , 0. , 0. ],
[-0.5, 1. , -0.5, 0. ],
[ 0. , -0.5, 1. , -0.5],
[ 0. , 0. , -0.5, 1. ]])
"""
if not isspmatrix_csr(S):
raise TypeError('expected csr_matrix')
# Scale S by the largest magnitude entry in each row
largest_row_entry = np.zeros((S.shape[0],), dtype=S.dtype)
pyamg.amg_core.maximum_row_value(S.shape[0], largest_row_entry,
S.indptr, S.indices, S.data)
largest_row_entry[largest_row_entry != 0] =\
1.0 / largest_row_entry[largest_row_entry != 0]
S = scale_rows(S, largest_row_entry, copy=True)
return S | python | def scale_rows_by_largest_entry(S):
"""Scale each row in S by it's largest in magnitude entry.
Parameters
----------
S : csr_matrix
Returns
-------
S : csr_matrix
Each row has been scaled by it's largest in magnitude entry
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import scale_rows_by_largest_entry
>>> A = poisson( (4,), format='csr' )
>>> A.data[1] = 5.0
>>> A = scale_rows_by_largest_entry(A)
>>> A.todense()
matrix([[ 0.4, 1. , 0. , 0. ],
[-0.5, 1. , -0.5, 0. ],
[ 0. , -0.5, 1. , -0.5],
[ 0. , 0. , -0.5, 1. ]])
"""
if not isspmatrix_csr(S):
raise TypeError('expected csr_matrix')
# Scale S by the largest magnitude entry in each row
largest_row_entry = np.zeros((S.shape[0],), dtype=S.dtype)
pyamg.amg_core.maximum_row_value(S.shape[0], largest_row_entry,
S.indptr, S.indices, S.data)
largest_row_entry[largest_row_entry != 0] =\
1.0 / largest_row_entry[largest_row_entry != 0]
S = scale_rows(S, largest_row_entry, copy=True)
return S | [
"def",
"scale_rows_by_largest_entry",
"(",
"S",
")",
":",
"if",
"not",
"isspmatrix_csr",
"(",
"S",
")",
":",
"raise",
"TypeError",
"(",
"'expected csr_matrix'",
")",
"# Scale S by the largest magnitude entry in each row",
"largest_row_entry",
"=",
"np",
".",
"zeros",
... | Scale each row in S by it's largest in magnitude entry.
Parameters
----------
S : csr_matrix
Returns
-------
S : csr_matrix
Each row has been scaled by it's largest in magnitude entry
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import scale_rows_by_largest_entry
>>> A = poisson( (4,), format='csr' )
>>> A.data[1] = 5.0
>>> A = scale_rows_by_largest_entry(A)
>>> A.todense()
matrix([[ 0.4, 1. , 0. , 0. ],
[-0.5, 1. , -0.5, 0. ],
[ 0. , -0.5, 1. , -0.5],
[ 0. , 0. , -0.5, 1. ]]) | [
"Scale",
"each",
"row",
"in",
"S",
"by",
"it",
"s",
"largest",
"in",
"magnitude",
"entry",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/utils.py#L1812-L1850 | train | 209,284 |
pyamg/pyamg | pyamg/util/utils.py | levelize_strength_or_aggregation | def levelize_strength_or_aggregation(to_levelize, max_levels, max_coarse):
"""Turn parameter into a list per level.
Helper function to preprocess the strength and aggregation parameters
passed to smoothed_aggregation_solver and rootnode_solver.
Parameters
----------
to_levelize : {string, tuple, list}
Parameter to preprocess, i.e., levelize and convert to a level-by-level
list such that entry i specifies the parameter at level i
max_levels : int
Defines the maximum number of levels considered
max_coarse : int
Defines the maximum coarse grid size allowed
Returns
-------
(max_levels, max_coarse, to_levelize) : tuple
New max_levels and max_coarse values and then the parameter list
to_levelize, such that entry i specifies the parameter choice at level
i. max_levels and max_coarse are returned, because they may be updated
if strength or aggregation set a predefined coarsening and possibly
change these values.
Notes
--------
This routine is needed because the user will pass in a parameter option
such as smooth='jacobi', or smooth=['jacobi', None], and this option must
be "levelized", or converted to a list of length max_levels such that entry
[i] in that list is the parameter choice for level i.
The parameter choice in to_levelize can be a string, tuple or list. If
it is a string or tuple, then that option is assumed to be the
parameter setting at every level. If to_levelize is inititally a list,
if the length of the list is less than max_levels, the last entry in the
list defines that parameter for all subsequent levels.
Examples
--------
>>> from pyamg.util.utils import levelize_strength_or_aggregation
>>> strength = ['evolution', 'classical']
>>> levelize_strength_or_aggregation(strength, 4, 10)
(4, 10, ['evolution', 'classical', 'classical'])
"""
if isinstance(to_levelize, tuple):
if to_levelize[0] == 'predefined':
to_levelize = [to_levelize]
max_levels = 2
max_coarse = 0
else:
to_levelize = [to_levelize for i in range(max_levels-1)]
elif isinstance(to_levelize, str):
if to_levelize == 'predefined':
raise ValueError('predefined to_levelize requires a user-provided\
CSR matrix representing strength or aggregation\
i.e., (\'predefined\', {\'C\' : CSR_MAT}).')
else:
to_levelize = [to_levelize for i in range(max_levels-1)]
elif isinstance(to_levelize, list):
if isinstance(to_levelize[-1], tuple) and\
(to_levelize[-1][0] == 'predefined'):
# to_levelize is a list that ends with a predefined operator
max_levels = len(to_levelize) + 1
max_coarse = 0
else:
# to_levelize a list that __doesn't__ end with 'predefined'
if len(to_levelize) < max_levels-1:
mlz = max_levels - 1 - len(to_levelize)
toext = [to_levelize[-1] for i in range(mlz)]
to_levelize.extend(toext)
elif to_levelize is None:
to_levelize = [(None, {}) for i in range(max_levels-1)]
else:
raise ValueError('invalid to_levelize')
return max_levels, max_coarse, to_levelize | python | def levelize_strength_or_aggregation(to_levelize, max_levels, max_coarse):
"""Turn parameter into a list per level.
Helper function to preprocess the strength and aggregation parameters
passed to smoothed_aggregation_solver and rootnode_solver.
Parameters
----------
to_levelize : {string, tuple, list}
Parameter to preprocess, i.e., levelize and convert to a level-by-level
list such that entry i specifies the parameter at level i
max_levels : int
Defines the maximum number of levels considered
max_coarse : int
Defines the maximum coarse grid size allowed
Returns
-------
(max_levels, max_coarse, to_levelize) : tuple
New max_levels and max_coarse values and then the parameter list
to_levelize, such that entry i specifies the parameter choice at level
i. max_levels and max_coarse are returned, because they may be updated
if strength or aggregation set a predefined coarsening and possibly
change these values.
Notes
--------
This routine is needed because the user will pass in a parameter option
such as smooth='jacobi', or smooth=['jacobi', None], and this option must
be "levelized", or converted to a list of length max_levels such that entry
[i] in that list is the parameter choice for level i.
The parameter choice in to_levelize can be a string, tuple or list. If
it is a string or tuple, then that option is assumed to be the
parameter setting at every level. If to_levelize is inititally a list,
if the length of the list is less than max_levels, the last entry in the
list defines that parameter for all subsequent levels.
Examples
--------
>>> from pyamg.util.utils import levelize_strength_or_aggregation
>>> strength = ['evolution', 'classical']
>>> levelize_strength_or_aggregation(strength, 4, 10)
(4, 10, ['evolution', 'classical', 'classical'])
"""
if isinstance(to_levelize, tuple):
if to_levelize[0] == 'predefined':
to_levelize = [to_levelize]
max_levels = 2
max_coarse = 0
else:
to_levelize = [to_levelize for i in range(max_levels-1)]
elif isinstance(to_levelize, str):
if to_levelize == 'predefined':
raise ValueError('predefined to_levelize requires a user-provided\
CSR matrix representing strength or aggregation\
i.e., (\'predefined\', {\'C\' : CSR_MAT}).')
else:
to_levelize = [to_levelize for i in range(max_levels-1)]
elif isinstance(to_levelize, list):
if isinstance(to_levelize[-1], tuple) and\
(to_levelize[-1][0] == 'predefined'):
# to_levelize is a list that ends with a predefined operator
max_levels = len(to_levelize) + 1
max_coarse = 0
else:
# to_levelize a list that __doesn't__ end with 'predefined'
if len(to_levelize) < max_levels-1:
mlz = max_levels - 1 - len(to_levelize)
toext = [to_levelize[-1] for i in range(mlz)]
to_levelize.extend(toext)
elif to_levelize is None:
to_levelize = [(None, {}) for i in range(max_levels-1)]
else:
raise ValueError('invalid to_levelize')
return max_levels, max_coarse, to_levelize | [
"def",
"levelize_strength_or_aggregation",
"(",
"to_levelize",
",",
"max_levels",
",",
"max_coarse",
")",
":",
"if",
"isinstance",
"(",
"to_levelize",
",",
"tuple",
")",
":",
"if",
"to_levelize",
"[",
"0",
"]",
"==",
"'predefined'",
":",
"to_levelize",
"=",
"[... | Turn parameter into a list per level.
Helper function to preprocess the strength and aggregation parameters
passed to smoothed_aggregation_solver and rootnode_solver.
Parameters
----------
to_levelize : {string, tuple, list}
Parameter to preprocess, i.e., levelize and convert to a level-by-level
list such that entry i specifies the parameter at level i
max_levels : int
Defines the maximum number of levels considered
max_coarse : int
Defines the maximum coarse grid size allowed
Returns
-------
(max_levels, max_coarse, to_levelize) : tuple
New max_levels and max_coarse values and then the parameter list
to_levelize, such that entry i specifies the parameter choice at level
i. max_levels and max_coarse are returned, because they may be updated
if strength or aggregation set a predefined coarsening and possibly
change these values.
Notes
--------
This routine is needed because the user will pass in a parameter option
such as smooth='jacobi', or smooth=['jacobi', None], and this option must
be "levelized", or converted to a list of length max_levels such that entry
[i] in that list is the parameter choice for level i.
The parameter choice in to_levelize can be a string, tuple or list. If
it is a string or tuple, then that option is assumed to be the
parameter setting at every level. If to_levelize is inititally a list,
if the length of the list is less than max_levels, the last entry in the
list defines that parameter for all subsequent levels.
Examples
--------
>>> from pyamg.util.utils import levelize_strength_or_aggregation
>>> strength = ['evolution', 'classical']
>>> levelize_strength_or_aggregation(strength, 4, 10)
(4, 10, ['evolution', 'classical', 'classical']) | [
"Turn",
"parameter",
"into",
"a",
"list",
"per",
"level",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/utils.py#L1853-L1934 | train | 209,285 |
pyamg/pyamg | pyamg/util/utils.py | levelize_smooth_or_improve_candidates | def levelize_smooth_or_improve_candidates(to_levelize, max_levels):
"""Turn parameter in to a list per level.
Helper function to preprocess the smooth and improve_candidates
parameters passed to smoothed_aggregation_solver and rootnode_solver.
Parameters
----------
to_levelize : {string, tuple, list}
Parameter to preprocess, i.e., levelize and convert to a level-by-level
list such that entry i specifies the parameter at level i
max_levels : int
Defines the maximum number of levels considered
Returns
-------
to_levelize : list
The parameter list such that entry i specifies the parameter choice
at level i.
Notes
--------
This routine is needed because the user will pass in a parameter option
such as smooth='jacobi', or smooth=['jacobi', None], and this option must
be "levelized", or converted to a list of length max_levels such that entry
[i] in that list is the parameter choice for level i.
The parameter choice in to_levelize can be a string, tuple or list. If
it is a string or tuple, then that option is assumed to be the
parameter setting at every level. If to_levelize is inititally a list,
if the length of the list is less than max_levels, the last entry in the
list defines that parameter for all subsequent levels.
Examples
--------
>>> from pyamg.util.utils import levelize_smooth_or_improve_candidates
>>> improve_candidates = ['gauss_seidel', None]
>>> levelize_smooth_or_improve_candidates(improve_candidates, 4)
['gauss_seidel', None, None, None]
"""
if isinstance(to_levelize, tuple) or isinstance(to_levelize, str):
to_levelize = [to_levelize for i in range(max_levels)]
elif isinstance(to_levelize, list):
if len(to_levelize) < max_levels:
mlz = max_levels - len(to_levelize)
toext = [to_levelize[-1] for i in range(mlz)]
to_levelize.extend(toext)
elif to_levelize is None:
to_levelize = [(None, {}) for i in range(max_levels)]
return to_levelize | python | def levelize_smooth_or_improve_candidates(to_levelize, max_levels):
"""Turn parameter in to a list per level.
Helper function to preprocess the smooth and improve_candidates
parameters passed to smoothed_aggregation_solver and rootnode_solver.
Parameters
----------
to_levelize : {string, tuple, list}
Parameter to preprocess, i.e., levelize and convert to a level-by-level
list such that entry i specifies the parameter at level i
max_levels : int
Defines the maximum number of levels considered
Returns
-------
to_levelize : list
The parameter list such that entry i specifies the parameter choice
at level i.
Notes
--------
This routine is needed because the user will pass in a parameter option
such as smooth='jacobi', or smooth=['jacobi', None], and this option must
be "levelized", or converted to a list of length max_levels such that entry
[i] in that list is the parameter choice for level i.
The parameter choice in to_levelize can be a string, tuple or list. If
it is a string or tuple, then that option is assumed to be the
parameter setting at every level. If to_levelize is inititally a list,
if the length of the list is less than max_levels, the last entry in the
list defines that parameter for all subsequent levels.
Examples
--------
>>> from pyamg.util.utils import levelize_smooth_or_improve_candidates
>>> improve_candidates = ['gauss_seidel', None]
>>> levelize_smooth_or_improve_candidates(improve_candidates, 4)
['gauss_seidel', None, None, None]
"""
if isinstance(to_levelize, tuple) or isinstance(to_levelize, str):
to_levelize = [to_levelize for i in range(max_levels)]
elif isinstance(to_levelize, list):
if len(to_levelize) < max_levels:
mlz = max_levels - len(to_levelize)
toext = [to_levelize[-1] for i in range(mlz)]
to_levelize.extend(toext)
elif to_levelize is None:
to_levelize = [(None, {}) for i in range(max_levels)]
return to_levelize | [
"def",
"levelize_smooth_or_improve_candidates",
"(",
"to_levelize",
",",
"max_levels",
")",
":",
"if",
"isinstance",
"(",
"to_levelize",
",",
"tuple",
")",
"or",
"isinstance",
"(",
"to_levelize",
",",
"str",
")",
":",
"to_levelize",
"=",
"[",
"to_levelize",
"for... | Turn parameter in to a list per level.
Helper function to preprocess the smooth and improve_candidates
parameters passed to smoothed_aggregation_solver and rootnode_solver.
Parameters
----------
to_levelize : {string, tuple, list}
Parameter to preprocess, i.e., levelize and convert to a level-by-level
list such that entry i specifies the parameter at level i
max_levels : int
Defines the maximum number of levels considered
Returns
-------
to_levelize : list
The parameter list such that entry i specifies the parameter choice
at level i.
Notes
--------
This routine is needed because the user will pass in a parameter option
such as smooth='jacobi', or smooth=['jacobi', None], and this option must
be "levelized", or converted to a list of length max_levels such that entry
[i] in that list is the parameter choice for level i.
The parameter choice in to_levelize can be a string, tuple or list. If
it is a string or tuple, then that option is assumed to be the
parameter setting at every level. If to_levelize is inititally a list,
if the length of the list is less than max_levels, the last entry in the
list defines that parameter for all subsequent levels.
Examples
--------
>>> from pyamg.util.utils import levelize_smooth_or_improve_candidates
>>> improve_candidates = ['gauss_seidel', None]
>>> levelize_smooth_or_improve_candidates(improve_candidates, 4)
['gauss_seidel', None, None, None] | [
"Turn",
"parameter",
"in",
"to",
"a",
"list",
"per",
"level",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/utils.py#L1937-L1988 | train | 209,286 |
pyamg/pyamg | pyamg/util/utils.py | filter_matrix_columns | def filter_matrix_columns(A, theta):
"""Filter each column of A with tol.
i.e., drop all entries in column k where
abs(A[i,k]) < tol max( abs(A[:,k]) )
Parameters
----------
A : sparse_matrix
theta : float
In range [0,1) and defines drop-tolerance used to filter the columns
of A
Returns
-------
A_filter : sparse_matrix
Each column has been filtered by dropping all entries where
abs(A[i,k]) < tol max( abs(A[:,k]) )
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import filter_matrix_columns
>>> from scipy import array
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix( array([[ 0.24, 1. , 0. ],
... [-0.5 , 1. , -0.5 ],
... [ 0. , 0.49, 1. ],
... [ 0. , 0. , -0.5 ]]) )
>>> filter_matrix_columns(A, 0.5).todense()
matrix([[ 0. , 1. , 0. ],
[-0.5, 1. , -0.5],
[ 0. , 0. , 1. ],
[ 0. , 0. , -0.5]])
"""
if not isspmatrix(A):
raise ValueError("Sparse matrix input needed")
if isspmatrix_bsr(A):
blocksize = A.blocksize
Aformat = A.format
if (theta < 0) or (theta >= 1.0):
raise ValueError("theta must be in [0,1)")
# Apply drop-tolerance to each column of A, which is most easily
# accessed by converting to CSC. We apply the drop-tolerance with
# amg_core.classical_strength_of_connection(), which ignores
# diagonal entries, thus necessitating the trick where we add
# A.shape[1] to each of the column indices
A = A.copy().tocsc()
A_filter = A.copy()
A.indices += A.shape[1]
A_filter.indices += A.shape[1]
# classical_strength_of_connection takes an absolute value internally
pyamg.amg_core.classical_strength_of_connection_abs(
A.shape[1],
theta,
A.indptr,
A.indices,
A.data,
A_filter.indptr,
A_filter.indices,
A_filter.data)
A_filter.indices[:A_filter.indptr[-1]] -= A_filter.shape[1]
A_filter = csc_matrix((A_filter.data[:A_filter.indptr[-1]],
A_filter.indices[:A_filter.indptr[-1]],
A_filter.indptr), shape=A_filter.shape)
del A
if Aformat == 'bsr':
A_filter = A_filter.tobsr(blocksize)
else:
A_filter = A_filter.asformat(Aformat)
return A_filter | python | def filter_matrix_columns(A, theta):
"""Filter each column of A with tol.
i.e., drop all entries in column k where
abs(A[i,k]) < tol max( abs(A[:,k]) )
Parameters
----------
A : sparse_matrix
theta : float
In range [0,1) and defines drop-tolerance used to filter the columns
of A
Returns
-------
A_filter : sparse_matrix
Each column has been filtered by dropping all entries where
abs(A[i,k]) < tol max( abs(A[:,k]) )
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import filter_matrix_columns
>>> from scipy import array
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix( array([[ 0.24, 1. , 0. ],
... [-0.5 , 1. , -0.5 ],
... [ 0. , 0.49, 1. ],
... [ 0. , 0. , -0.5 ]]) )
>>> filter_matrix_columns(A, 0.5).todense()
matrix([[ 0. , 1. , 0. ],
[-0.5, 1. , -0.5],
[ 0. , 0. , 1. ],
[ 0. , 0. , -0.5]])
"""
if not isspmatrix(A):
raise ValueError("Sparse matrix input needed")
if isspmatrix_bsr(A):
blocksize = A.blocksize
Aformat = A.format
if (theta < 0) or (theta >= 1.0):
raise ValueError("theta must be in [0,1)")
# Apply drop-tolerance to each column of A, which is most easily
# accessed by converting to CSC. We apply the drop-tolerance with
# amg_core.classical_strength_of_connection(), which ignores
# diagonal entries, thus necessitating the trick where we add
# A.shape[1] to each of the column indices
A = A.copy().tocsc()
A_filter = A.copy()
A.indices += A.shape[1]
A_filter.indices += A.shape[1]
# classical_strength_of_connection takes an absolute value internally
pyamg.amg_core.classical_strength_of_connection_abs(
A.shape[1],
theta,
A.indptr,
A.indices,
A.data,
A_filter.indptr,
A_filter.indices,
A_filter.data)
A_filter.indices[:A_filter.indptr[-1]] -= A_filter.shape[1]
A_filter = csc_matrix((A_filter.data[:A_filter.indptr[-1]],
A_filter.indices[:A_filter.indptr[-1]],
A_filter.indptr), shape=A_filter.shape)
del A
if Aformat == 'bsr':
A_filter = A_filter.tobsr(blocksize)
else:
A_filter = A_filter.asformat(Aformat)
return A_filter | [
"def",
"filter_matrix_columns",
"(",
"A",
",",
"theta",
")",
":",
"if",
"not",
"isspmatrix",
"(",
"A",
")",
":",
"raise",
"ValueError",
"(",
"\"Sparse matrix input needed\"",
")",
"if",
"isspmatrix_bsr",
"(",
"A",
")",
":",
"blocksize",
"=",
"A",
".",
"blo... | Filter each column of A with tol.
i.e., drop all entries in column k where
abs(A[i,k]) < tol max( abs(A[:,k]) )
Parameters
----------
A : sparse_matrix
theta : float
In range [0,1) and defines drop-tolerance used to filter the columns
of A
Returns
-------
A_filter : sparse_matrix
Each column has been filtered by dropping all entries where
abs(A[i,k]) < tol max( abs(A[:,k]) )
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import filter_matrix_columns
>>> from scipy import array
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix( array([[ 0.24, 1. , 0. ],
... [-0.5 , 1. , -0.5 ],
... [ 0. , 0.49, 1. ],
... [ 0. , 0. , -0.5 ]]) )
>>> filter_matrix_columns(A, 0.5).todense()
matrix([[ 0. , 1. , 0. ],
[-0.5, 1. , -0.5],
[ 0. , 0. , 1. ],
[ 0. , 0. , -0.5]]) | [
"Filter",
"each",
"column",
"of",
"A",
"with",
"tol",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/utils.py#L1991-L2067 | train | 209,287 |
pyamg/pyamg | pyamg/util/utils.py | filter_matrix_rows | def filter_matrix_rows(A, theta):
"""Filter each row of A with tol.
i.e., drop all entries in row k where
abs(A[i,k]) < tol max( abs(A[:,k]) )
Parameters
----------
A : sparse_matrix
theta : float
In range [0,1) and defines drop-tolerance used to filter the row of A
Returns
-------
A_filter : sparse_matrix
Each row has been filtered by dropping all entries where
abs(A[i,k]) < tol max( abs(A[:,k]) )
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import filter_matrix_rows
>>> from scipy import array
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix( array([[ 0.24, -0.5 , 0. , 0. ],
... [ 1. , 1. , 0.49, 0. ],
... [ 0. , -0.5 , 1. , -0.5 ]]) )
>>> filter_matrix_rows(A, 0.5).todense()
matrix([[ 0. , -0.5, 0. , 0. ],
[ 1. , 1. , 0. , 0. ],
[ 0. , -0.5, 1. , -0.5]])
"""
if not isspmatrix(A):
raise ValueError("Sparse matrix input needed")
if isspmatrix_bsr(A):
blocksize = A.blocksize
Aformat = A.format
A = A.tocsr()
if (theta < 0) or (theta >= 1.0):
raise ValueError("theta must be in [0,1)")
# Apply drop-tolerance to each row of A. We apply the drop-tolerance with
# amg_core.classical_strength_of_connection(), which ignores diagonal
# entries, thus necessitating the trick where we add A.shape[0] to each of
# the row indices
A_filter = A.copy()
A.indices += A.shape[0]
A_filter.indices += A.shape[0]
# classical_strength_of_connection takes an absolute value internally
pyamg.amg_core.classical_strength_of_connection_abs(
A.shape[0],
theta,
A.indptr,
A.indices,
A.data,
A_filter.indptr,
A_filter.indices,
A_filter.data)
A_filter.indices[:A_filter.indptr[-1]] -= A_filter.shape[0]
A_filter = csr_matrix((A_filter.data[:A_filter.indptr[-1]],
A_filter.indices[:A_filter.indptr[-1]],
A_filter.indptr), shape=A_filter.shape)
if Aformat == 'bsr':
A_filter = A_filter.tobsr(blocksize)
else:
A_filter = A_filter.asformat(Aformat)
A.indices -= A.shape[0]
return A_filter | python | def filter_matrix_rows(A, theta):
"""Filter each row of A with tol.
i.e., drop all entries in row k where
abs(A[i,k]) < tol max( abs(A[:,k]) )
Parameters
----------
A : sparse_matrix
theta : float
In range [0,1) and defines drop-tolerance used to filter the row of A
Returns
-------
A_filter : sparse_matrix
Each row has been filtered by dropping all entries where
abs(A[i,k]) < tol max( abs(A[:,k]) )
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import filter_matrix_rows
>>> from scipy import array
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix( array([[ 0.24, -0.5 , 0. , 0. ],
... [ 1. , 1. , 0.49, 0. ],
... [ 0. , -0.5 , 1. , -0.5 ]]) )
>>> filter_matrix_rows(A, 0.5).todense()
matrix([[ 0. , -0.5, 0. , 0. ],
[ 1. , 1. , 0. , 0. ],
[ 0. , -0.5, 1. , -0.5]])
"""
if not isspmatrix(A):
raise ValueError("Sparse matrix input needed")
if isspmatrix_bsr(A):
blocksize = A.blocksize
Aformat = A.format
A = A.tocsr()
if (theta < 0) or (theta >= 1.0):
raise ValueError("theta must be in [0,1)")
# Apply drop-tolerance to each row of A. We apply the drop-tolerance with
# amg_core.classical_strength_of_connection(), which ignores diagonal
# entries, thus necessitating the trick where we add A.shape[0] to each of
# the row indices
A_filter = A.copy()
A.indices += A.shape[0]
A_filter.indices += A.shape[0]
# classical_strength_of_connection takes an absolute value internally
pyamg.amg_core.classical_strength_of_connection_abs(
A.shape[0],
theta,
A.indptr,
A.indices,
A.data,
A_filter.indptr,
A_filter.indices,
A_filter.data)
A_filter.indices[:A_filter.indptr[-1]] -= A_filter.shape[0]
A_filter = csr_matrix((A_filter.data[:A_filter.indptr[-1]],
A_filter.indices[:A_filter.indptr[-1]],
A_filter.indptr), shape=A_filter.shape)
if Aformat == 'bsr':
A_filter = A_filter.tobsr(blocksize)
else:
A_filter = A_filter.asformat(Aformat)
A.indices -= A.shape[0]
return A_filter | [
"def",
"filter_matrix_rows",
"(",
"A",
",",
"theta",
")",
":",
"if",
"not",
"isspmatrix",
"(",
"A",
")",
":",
"raise",
"ValueError",
"(",
"\"Sparse matrix input needed\"",
")",
"if",
"isspmatrix_bsr",
"(",
"A",
")",
":",
"blocksize",
"=",
"A",
".",
"blocks... | Filter each row of A with tol.
i.e., drop all entries in row k where
abs(A[i,k]) < tol max( abs(A[:,k]) )
Parameters
----------
A : sparse_matrix
theta : float
In range [0,1) and defines drop-tolerance used to filter the row of A
Returns
-------
A_filter : sparse_matrix
Each row has been filtered by dropping all entries where
abs(A[i,k]) < tol max( abs(A[:,k]) )
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import filter_matrix_rows
>>> from scipy import array
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix( array([[ 0.24, -0.5 , 0. , 0. ],
... [ 1. , 1. , 0.49, 0. ],
... [ 0. , -0.5 , 1. , -0.5 ]]) )
>>> filter_matrix_rows(A, 0.5).todense()
matrix([[ 0. , -0.5, 0. , 0. ],
[ 1. , 1. , 0. , 0. ],
[ 0. , -0.5, 1. , -0.5]]) | [
"Filter",
"each",
"row",
"of",
"A",
"with",
"tol",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/utils.py#L2070-L2142 | train | 209,288 |
pyamg/pyamg | pyamg/util/utils.py | truncate_rows | def truncate_rows(A, nz_per_row):
"""Truncate the rows of A by keeping only the largest in magnitude entries in each row.
Parameters
----------
A : sparse_matrix
nz_per_row : int
Determines how many entries in each row to keep
Returns
-------
A : sparse_matrix
Each row has been truncated to at most nz_per_row entries
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import truncate_rows
>>> from scipy import array
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix( array([[-0.24, -0.5 , 0. , 0. ],
... [ 1. , -1.1 , 0.49, 0.1 ],
... [ 0. , 0.4 , 1. , 0.5 ]]) )
>>> truncate_rows(A, 2).todense()
matrix([[-0.24, -0.5 , 0. , 0. ],
[ 1. , -1.1 , 0. , 0. ],
[ 0. , 0. , 1. , 0.5 ]])
"""
if not isspmatrix(A):
raise ValueError("Sparse matrix input needed")
if isspmatrix_bsr(A):
blocksize = A.blocksize
if isspmatrix_csr(A):
A = A.copy() # don't modify A in-place
Aformat = A.format
A = A.tocsr()
nz_per_row = int(nz_per_row)
# Truncate rows of A, and then convert A back to original format
pyamg.amg_core.truncate_rows_csr(A.shape[0], nz_per_row, A.indptr,
A.indices, A.data)
A.eliminate_zeros()
if Aformat == 'bsr':
A = A.tobsr(blocksize)
else:
A = A.asformat(Aformat)
return A | python | def truncate_rows(A, nz_per_row):
"""Truncate the rows of A by keeping only the largest in magnitude entries in each row.
Parameters
----------
A : sparse_matrix
nz_per_row : int
Determines how many entries in each row to keep
Returns
-------
A : sparse_matrix
Each row has been truncated to at most nz_per_row entries
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import truncate_rows
>>> from scipy import array
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix( array([[-0.24, -0.5 , 0. , 0. ],
... [ 1. , -1.1 , 0.49, 0.1 ],
... [ 0. , 0.4 , 1. , 0.5 ]]) )
>>> truncate_rows(A, 2).todense()
matrix([[-0.24, -0.5 , 0. , 0. ],
[ 1. , -1.1 , 0. , 0. ],
[ 0. , 0. , 1. , 0.5 ]])
"""
if not isspmatrix(A):
raise ValueError("Sparse matrix input needed")
if isspmatrix_bsr(A):
blocksize = A.blocksize
if isspmatrix_csr(A):
A = A.copy() # don't modify A in-place
Aformat = A.format
A = A.tocsr()
nz_per_row = int(nz_per_row)
# Truncate rows of A, and then convert A back to original format
pyamg.amg_core.truncate_rows_csr(A.shape[0], nz_per_row, A.indptr,
A.indices, A.data)
A.eliminate_zeros()
if Aformat == 'bsr':
A = A.tobsr(blocksize)
else:
A = A.asformat(Aformat)
return A | [
"def",
"truncate_rows",
"(",
"A",
",",
"nz_per_row",
")",
":",
"if",
"not",
"isspmatrix",
"(",
"A",
")",
":",
"raise",
"ValueError",
"(",
"\"Sparse matrix input needed\"",
")",
"if",
"isspmatrix_bsr",
"(",
"A",
")",
":",
"blocksize",
"=",
"A",
".",
"blocks... | Truncate the rows of A by keeping only the largest in magnitude entries in each row.
Parameters
----------
A : sparse_matrix
nz_per_row : int
Determines how many entries in each row to keep
Returns
-------
A : sparse_matrix
Each row has been truncated to at most nz_per_row entries
Examples
--------
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import truncate_rows
>>> from scipy import array
>>> from scipy.sparse import csr_matrix
>>> A = csr_matrix( array([[-0.24, -0.5 , 0. , 0. ],
... [ 1. , -1.1 , 0.49, 0.1 ],
... [ 0. , 0.4 , 1. , 0.5 ]]) )
>>> truncate_rows(A, 2).todense()
matrix([[-0.24, -0.5 , 0. , 0. ],
[ 1. , -1.1 , 0. , 0. ],
[ 0. , 0. , 1. , 0.5 ]]) | [
"Truncate",
"the",
"rows",
"of",
"A",
"by",
"keeping",
"only",
"the",
"largest",
"in",
"magnitude",
"entries",
"in",
"each",
"row",
"."
] | 89dc54aa27e278f65d2f54bdaf16ab97d7768fa6 | https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/utils.py#L2145-L2195 | train | 209,289 |
nderkach/airbnb-python | airbnb/api.py | require_auth | def require_auth(function):
"""
A decorator that wraps the passed in function and raises exception
if access token is missing
"""
@functools.wraps(function)
def wrapper(self, *args, **kwargs):
if not self.access_token():
raise MissingAccessTokenError
return function(self, *args, **kwargs)
return wrapper | python | def require_auth(function):
"""
A decorator that wraps the passed in function and raises exception
if access token is missing
"""
@functools.wraps(function)
def wrapper(self, *args, **kwargs):
if not self.access_token():
raise MissingAccessTokenError
return function(self, *args, **kwargs)
return wrapper | [
"def",
"require_auth",
"(",
"function",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"function",
")",
"def",
"wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"self",
".",
"access_token",
"(",
")",
":",
"ra... | A decorator that wraps the passed in function and raises exception
if access token is missing | [
"A",
"decorator",
"that",
"wraps",
"the",
"passed",
"in",
"function",
"and",
"raises",
"exception",
"if",
"access",
"token",
"is",
"missing"
] | 0b3ed69518e41383eca93ae11b24247f3cc69a27 | https://github.com/nderkach/airbnb-python/blob/0b3ed69518e41383eca93ae11b24247f3cc69a27/airbnb/api.py#L42-L52 | train | 209,290 |
nderkach/airbnb-python | airbnb/api.py | randomizable | def randomizable(function):
"""
A decorator which randomizes requests if needed
"""
@functools.wraps(function)
def wrapper(self, *args, **kwargs):
if self.randomize:
self.randomize_headers()
return function(self, *args, **kwargs)
return wrapper | python | def randomizable(function):
"""
A decorator which randomizes requests if needed
"""
@functools.wraps(function)
def wrapper(self, *args, **kwargs):
if self.randomize:
self.randomize_headers()
return function(self, *args, **kwargs)
return wrapper | [
"def",
"randomizable",
"(",
"function",
")",
":",
"@",
"functools",
".",
"wraps",
"(",
"function",
")",
"def",
"wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"randomize",
":",
"self",
".",
"randomize_h... | A decorator which randomizes requests if needed | [
"A",
"decorator",
"which",
"randomizes",
"requests",
"if",
"needed"
] | 0b3ed69518e41383eca93ae11b24247f3cc69a27 | https://github.com/nderkach/airbnb-python/blob/0b3ed69518e41383eca93ae11b24247f3cc69a27/airbnb/api.py#L55-L64 | train | 209,291 |
nderkach/airbnb-python | airbnb/api.py | Api.get_profile | def get_profile(self):
"""
Get my own profile
"""
r = self._session.get(API_URL + "/logins/me")
r.raise_for_status()
return r.json() | python | def get_profile(self):
"""
Get my own profile
"""
r = self._session.get(API_URL + "/logins/me")
r.raise_for_status()
return r.json() | [
"def",
"get_profile",
"(",
"self",
")",
":",
"r",
"=",
"self",
".",
"_session",
".",
"get",
"(",
"API_URL",
"+",
"\"/logins/me\"",
")",
"r",
".",
"raise_for_status",
"(",
")",
"return",
"r",
".",
"json",
"(",
")"
] | Get my own profile | [
"Get",
"my",
"own",
"profile"
] | 0b3ed69518e41383eca93ae11b24247f3cc69a27 | https://github.com/nderkach/airbnb-python/blob/0b3ed69518e41383eca93ae11b24247f3cc69a27/airbnb/api.py#L176-L183 | train | 209,292 |
nderkach/airbnb-python | airbnb/api.py | Api.get_calendar | def get_calendar(self, listing_id, starting_month=datetime.datetime.now().month, starting_year=datetime.datetime.now().year, calendar_months=12):
"""
Get availability calendar for a given listing
"""
params = {
'year': str(starting_year),
'listing_id': str(listing_id),
'_format': 'with_conditions',
'count': str(calendar_months),
'month': str(starting_month)
}
r = self._session.get(API_URL + "/calendar_months", params=params)
r.raise_for_status()
return r.json() | python | def get_calendar(self, listing_id, starting_month=datetime.datetime.now().month, starting_year=datetime.datetime.now().year, calendar_months=12):
"""
Get availability calendar for a given listing
"""
params = {
'year': str(starting_year),
'listing_id': str(listing_id),
'_format': 'with_conditions',
'count': str(calendar_months),
'month': str(starting_month)
}
r = self._session.get(API_URL + "/calendar_months", params=params)
r.raise_for_status()
return r.json() | [
"def",
"get_calendar",
"(",
"self",
",",
"listing_id",
",",
"starting_month",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"month",
",",
"starting_year",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"year",
",",
"calendar_... | Get availability calendar for a given listing | [
"Get",
"availability",
"calendar",
"for",
"a",
"given",
"listing"
] | 0b3ed69518e41383eca93ae11b24247f3cc69a27 | https://github.com/nderkach/airbnb-python/blob/0b3ed69518e41383eca93ae11b24247f3cc69a27/airbnb/api.py#L186-L201 | train | 209,293 |
nderkach/airbnb-python | airbnb/api.py | Api.get_reviews | def get_reviews(self, listing_id, offset=0, limit=20):
"""
Get reviews for a given listing
"""
params = {
'_order': 'language_country',
'listing_id': str(listing_id),
'_offset': str(offset),
'role': 'all',
'_limit': str(limit),
'_format': 'for_mobile_client',
}
print(self._session.headers)
r = self._session.get(API_URL + "/reviews", params=params)
r.raise_for_status()
return r.json() | python | def get_reviews(self, listing_id, offset=0, limit=20):
"""
Get reviews for a given listing
"""
params = {
'_order': 'language_country',
'listing_id': str(listing_id),
'_offset': str(offset),
'role': 'all',
'_limit': str(limit),
'_format': 'for_mobile_client',
}
print(self._session.headers)
r = self._session.get(API_URL + "/reviews", params=params)
r.raise_for_status()
return r.json() | [
"def",
"get_reviews",
"(",
"self",
",",
"listing_id",
",",
"offset",
"=",
"0",
",",
"limit",
"=",
"20",
")",
":",
"params",
"=",
"{",
"'_order'",
":",
"'language_country'",
",",
"'listing_id'",
":",
"str",
"(",
"listing_id",
")",
",",
"'_offset'",
":",
... | Get reviews for a given listing | [
"Get",
"reviews",
"for",
"a",
"given",
"listing"
] | 0b3ed69518e41383eca93ae11b24247f3cc69a27 | https://github.com/nderkach/airbnb-python/blob/0b3ed69518e41383eca93ae11b24247f3cc69a27/airbnb/api.py#L204-L222 | train | 209,294 |
nderkach/airbnb-python | airbnb/api.py | Api.get_listing_calendar | def get_listing_calendar(self, listing_id, starting_date=datetime.datetime.now(), calendar_months=6):
"""
Get host availability calendar for a given listing
"""
params = {
'_format': 'host_calendar_detailed'
}
starting_date_str = starting_date.strftime("%Y-%m-%d")
ending_date_str = (
starting_date + datetime.timedelta(days=30)).strftime("%Y-%m-%d")
r = self._session.get(API_URL + "/calendars/{}/{}/{}".format(
str(listing_id), starting_date_str, ending_date_str), params=params)
r.raise_for_status()
return r.json() | python | def get_listing_calendar(self, listing_id, starting_date=datetime.datetime.now(), calendar_months=6):
"""
Get host availability calendar for a given listing
"""
params = {
'_format': 'host_calendar_detailed'
}
starting_date_str = starting_date.strftime("%Y-%m-%d")
ending_date_str = (
starting_date + datetime.timedelta(days=30)).strftime("%Y-%m-%d")
r = self._session.get(API_URL + "/calendars/{}/{}/{}".format(
str(listing_id), starting_date_str, ending_date_str), params=params)
r.raise_for_status()
return r.json() | [
"def",
"get_listing_calendar",
"(",
"self",
",",
"listing_id",
",",
"starting_date",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
",",
"calendar_months",
"=",
"6",
")",
":",
"params",
"=",
"{",
"'_format'",
":",
"'host_calendar_detailed'",
"}",
"s... | Get host availability calendar for a given listing | [
"Get",
"host",
"availability",
"calendar",
"for",
"a",
"given",
"listing"
] | 0b3ed69518e41383eca93ae11b24247f3cc69a27 | https://github.com/nderkach/airbnb-python/blob/0b3ed69518e41383eca93ae11b24247f3cc69a27/airbnb/api.py#L228-L244 | train | 209,295 |
fabioz/PyDev.Debugger | pydev_ipython/matplotlibtools.py | find_gui_and_backend | def find_gui_and_backend():
"""Return the gui and mpl backend."""
matplotlib = sys.modules['matplotlib']
# WARNING: this assumes matplotlib 1.1 or newer!!
backend = matplotlib.rcParams['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
return gui, backend | python | def find_gui_and_backend():
"""Return the gui and mpl backend."""
matplotlib = sys.modules['matplotlib']
# WARNING: this assumes matplotlib 1.1 or newer!!
backend = matplotlib.rcParams['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
return gui, backend | [
"def",
"find_gui_and_backend",
"(",
")",
":",
"matplotlib",
"=",
"sys",
".",
"modules",
"[",
"'matplotlib'",
"]",
"# WARNING: this assumes matplotlib 1.1 or newer!!",
"backend",
"=",
"matplotlib",
".",
"rcParams",
"[",
"'backend'",
"]",
"# In this case, we need to find wh... | Return the gui and mpl backend. | [
"Return",
"the",
"gui",
"and",
"mpl",
"backend",
"."
] | ed9c4307662a5593b8a7f1f3389ecd0e79b8c503 | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydev_ipython/matplotlibtools.py#L43-L51 | train | 209,296 |
fabioz/PyDev.Debugger | pydev_ipython/matplotlibtools.py | is_interactive_backend | def is_interactive_backend(backend):
""" Check if backend is interactive """
matplotlib = sys.modules['matplotlib']
from matplotlib.rcsetup import interactive_bk, non_interactive_bk # @UnresolvedImport
if backend in interactive_bk:
return True
elif backend in non_interactive_bk:
return False
else:
return matplotlib.is_interactive() | python | def is_interactive_backend(backend):
""" Check if backend is interactive """
matplotlib = sys.modules['matplotlib']
from matplotlib.rcsetup import interactive_bk, non_interactive_bk # @UnresolvedImport
if backend in interactive_bk:
return True
elif backend in non_interactive_bk:
return False
else:
return matplotlib.is_interactive() | [
"def",
"is_interactive_backend",
"(",
"backend",
")",
":",
"matplotlib",
"=",
"sys",
".",
"modules",
"[",
"'matplotlib'",
"]",
"from",
"matplotlib",
".",
"rcsetup",
"import",
"interactive_bk",
",",
"non_interactive_bk",
"# @UnresolvedImport",
"if",
"backend",
"in",
... | Check if backend is interactive | [
"Check",
"if",
"backend",
"is",
"interactive"
] | ed9c4307662a5593b8a7f1f3389ecd0e79b8c503 | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydev_ipython/matplotlibtools.py#L54-L63 | train | 209,297 |
fabioz/PyDev.Debugger | pydev_ipython/matplotlibtools.py | activate_matplotlib | def activate_matplotlib(enable_gui_function):
"""Set interactive to True for interactive backends.
enable_gui_function - Function which enables gui, should be run in the main thread.
"""
matplotlib = sys.modules['matplotlib']
gui, backend = find_gui_and_backend()
is_interactive = is_interactive_backend(backend)
if is_interactive:
enable_gui_function(gui)
if not matplotlib.is_interactive():
sys.stdout.write("Backend %s is interactive backend. Turning interactive mode on.\n" % backend)
matplotlib.interactive(True)
else:
if matplotlib.is_interactive():
sys.stdout.write("Backend %s is non-interactive backend. Turning interactive mode off.\n" % backend)
matplotlib.interactive(False)
patch_use(enable_gui_function)
patch_is_interactive() | python | def activate_matplotlib(enable_gui_function):
"""Set interactive to True for interactive backends.
enable_gui_function - Function which enables gui, should be run in the main thread.
"""
matplotlib = sys.modules['matplotlib']
gui, backend = find_gui_and_backend()
is_interactive = is_interactive_backend(backend)
if is_interactive:
enable_gui_function(gui)
if not matplotlib.is_interactive():
sys.stdout.write("Backend %s is interactive backend. Turning interactive mode on.\n" % backend)
matplotlib.interactive(True)
else:
if matplotlib.is_interactive():
sys.stdout.write("Backend %s is non-interactive backend. Turning interactive mode off.\n" % backend)
matplotlib.interactive(False)
patch_use(enable_gui_function)
patch_is_interactive() | [
"def",
"activate_matplotlib",
"(",
"enable_gui_function",
")",
":",
"matplotlib",
"=",
"sys",
".",
"modules",
"[",
"'matplotlib'",
"]",
"gui",
",",
"backend",
"=",
"find_gui_and_backend",
"(",
")",
"is_interactive",
"=",
"is_interactive_backend",
"(",
"backend",
"... | Set interactive to True for interactive backends.
enable_gui_function - Function which enables gui, should be run in the main thread. | [
"Set",
"interactive",
"to",
"True",
"for",
"interactive",
"backends",
".",
"enable_gui_function",
"-",
"Function",
"which",
"enables",
"gui",
"should",
"be",
"run",
"in",
"the",
"main",
"thread",
"."
] | ed9c4307662a5593b8a7f1f3389ecd0e79b8c503 | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydev_ipython/matplotlibtools.py#L90-L107 | train | 209,298 |
fabioz/PyDev.Debugger | pydev_ipython/matplotlibtools.py | flag_calls | def flag_calls(func):
"""Wrap a function to detect and flag when it gets called.
This is a decorator which takes a function and wraps it in a function with
a 'called' attribute. wrapper.called is initialized to False.
The wrapper.called attribute is set to False right before each call to the
wrapped function, so if the call fails it remains False. After the call
completes, wrapper.called is set to True and the output is returned.
Testing for truth in wrapper.called allows you to determine if a call to
func() was attempted and succeeded."""
# don't wrap twice
if hasattr(func, 'called'):
return func
def wrapper(*args, **kw):
wrapper.called = False
out = func(*args, **kw)
wrapper.called = True
return out
wrapper.called = False
wrapper.__doc__ = func.__doc__
return wrapper | python | def flag_calls(func):
"""Wrap a function to detect and flag when it gets called.
This is a decorator which takes a function and wraps it in a function with
a 'called' attribute. wrapper.called is initialized to False.
The wrapper.called attribute is set to False right before each call to the
wrapped function, so if the call fails it remains False. After the call
completes, wrapper.called is set to True and the output is returned.
Testing for truth in wrapper.called allows you to determine if a call to
func() was attempted and succeeded."""
# don't wrap twice
if hasattr(func, 'called'):
return func
def wrapper(*args, **kw):
wrapper.called = False
out = func(*args, **kw)
wrapper.called = True
return out
wrapper.called = False
wrapper.__doc__ = func.__doc__
return wrapper | [
"def",
"flag_calls",
"(",
"func",
")",
":",
"# don't wrap twice",
"if",
"hasattr",
"(",
"func",
",",
"'called'",
")",
":",
"return",
"func",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kw",
")",
":",
"wrapper",
".",
"called",
"=",
"False",
"o... | Wrap a function to detect and flag when it gets called.
This is a decorator which takes a function and wraps it in a function with
a 'called' attribute. wrapper.called is initialized to False.
The wrapper.called attribute is set to False right before each call to the
wrapped function, so if the call fails it remains False. After the call
completes, wrapper.called is set to True and the output is returned.
Testing for truth in wrapper.called allows you to determine if a call to
func() was attempted and succeeded. | [
"Wrap",
"a",
"function",
"to",
"detect",
"and",
"flag",
"when",
"it",
"gets",
"called",
"."
] | ed9c4307662a5593b8a7f1f3389ecd0e79b8c503 | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydev_ipython/matplotlibtools.py#L110-L135 | train | 209,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.