id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
231,800
odlgroup/odl
odl/tomo/geometry/parallel.py
ParallelBeamGeometry.angles
def angles(self): """All angles of this geometry as an array. If ``motion_params.ndim == 1``, the array has shape ``(N,)``, where ``N`` is the number of angles. Otherwise, the array shape is ``(ndim, N)``, where ``N`` is the total number of angles, and ``ndim`` is ``motion_partitioin.ndim``. The order of axes is chosen such that ``geometry.angles`` can be used directly as input to any of the other methods of the geometry. """ if self.motion_partition.ndim == 1: return self.motion_grid.coord_vectors[0] else: return self.motion_grid.points().T
python
def angles(self): if self.motion_partition.ndim == 1: return self.motion_grid.coord_vectors[0] else: return self.motion_grid.points().T
[ "def", "angles", "(", "self", ")", ":", "if", "self", ".", "motion_partition", ".", "ndim", "==", "1", ":", "return", "self", ".", "motion_grid", ".", "coord_vectors", "[", "0", "]", "else", ":", "return", "self", ".", "motion_grid", ".", "points", "("...
All angles of this geometry as an array. If ``motion_params.ndim == 1``, the array has shape ``(N,)``, where ``N`` is the number of angles. Otherwise, the array shape is ``(ndim, N)``, where ``N`` is the total number of angles, and ``ndim`` is ``motion_partitioin.ndim``. The order of axes is chosen such that ``geometry.angles`` can be used directly as input to any of the other methods of the geometry.
[ "All", "angles", "of", "this", "geometry", "as", "an", "array", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/geometry/parallel.py#L74-L89
231,801
odlgroup/odl
odl/tomo/geometry/parallel.py
ParallelBeamGeometry.det_to_src
def det_to_src(self, angle, dparam): """Direction from a detector location to the source. The direction vector is computed as follows:: dir = rotation_matrix(angle).dot(detector.surface_normal(dparam)) Note that for flat detectors, ``surface_normal`` does not depend on the parameter ``dparam``, hence this function is constant in that variable. Parameters ---------- angle : `array-like` or sequence One or several (Euler) angles in radians at which to evaluate. If ``motion_params.ndim >= 2``, a sequence of that length must be provided. dparam : `array-like` or sequence Detector parameter(s) at which to evaluate. If ``det_params.ndim >= 2``, a sequence of that length must be provided. Returns ------- det_to_src : `numpy.ndarray` Vector(s) pointing from a detector point to the source (at infinity). The shape of the returned array is obtained from the (broadcast) shapes of ``angle`` and ``dparam``, and broadcasting is supported within both parameters and between them. The precise definition of the shape is ``broadcast(bcast_angle, bcast_dparam).shape + (ndim,)``, where ``bcast_angle`` is - ``angle`` if `motion_params` is 1D, - ``broadcast(*angle)`` otherwise, and ``bcast_dparam`` defined analogously. Examples -------- The method works with single parameter values, in which case a single vector is returned: >>> apart = odl.uniform_partition(0, np.pi, 10) >>> dpart = odl.uniform_partition(-1, 1, 20) >>> geom = odl.tomo.Parallel2dGeometry(apart, dpart) >>> geom.det_to_src(0, 0) array([ 0., -1.]) >>> geom.det_to_src(0, 1) array([ 0., -1.]) >>> dir = geom.det_to_src(np.pi / 2, 0) >>> np.allclose(dir, [1, 0]) True >>> dir = geom.det_to_src(np.pi / 2, 1) >>> np.allclose(dir, [1, 0]) True Both variables support vectorized calls, i.e., stacks of parameters can be provided. The order of axes in the output (left of the ``ndim`` axis for the vector dimension) corresponds to the order of arguments: >>> dirs = geom.det_to_src(0, [-1, 0, 0.5, 1]) >>> dirs array([[ 0., -1.], [ 0., -1.], [ 0., -1.], [ 0., -1.]]) >>> dirs.shape # (num_dparams, ndim) (4, 2) >>> dirs = geom.det_to_src([0, np.pi / 2, np.pi], 0) >>> np.allclose(dirs, [[0, -1], ... [1, 0], ... [0, 1]]) True >>> dirs.shape # (num_angles, ndim) (3, 2) >>> # Providing 3 pairs of parameters, resulting in 3 vectors >>> dirs = geom.det_to_src([0, np.pi / 2, np.pi], [-1, 0, 1]) >>> dirs[0] # Corresponds to angle = 0, dparam = -1 array([ 0., -1.]) >>> dirs.shape (3, 2) >>> # Pairs of parameters arranged in arrays of same size >>> geom.det_to_src(np.zeros((4, 5)), np.zeros((4, 5))).shape (4, 5, 2) >>> # "Outer product" type evaluation using broadcasting >>> geom.det_to_src(np.zeros((4, 1)), np.zeros((1, 5))).shape (4, 5, 2) """ # Always call the downstream methods with vectorized arguments # to be able to reliably manipulate the final axes of the result if self.motion_params.ndim == 1: squeeze_angle = (np.shape(angle) == ()) angle = np.array(angle, dtype=float, copy=False, ndmin=1) matrix = self.rotation_matrix(angle) # shape (m, ndim, ndim) else: squeeze_angle = (np.broadcast(*angle).shape == ()) angle = tuple(np.array(a, dtype=float, copy=False, ndmin=1) for a in angle) matrix = self.rotation_matrix(angle) # shape (m, ndim, ndim) if self.det_params.ndim == 1: squeeze_dparam = (np.shape(dparam) == ()) dparam = np.array(dparam, dtype=float, copy=False, ndmin=1) else: squeeze_dparam = (np.broadcast(*dparam).shape == ()) dparam = tuple(np.array(p, dtype=float, copy=False, ndmin=1) for p in dparam) normal = self.detector.surface_normal(dparam) # shape (d, ndim) # Perform matrix-vector multiplication along the last axis of both # `matrix` and `normal` while "zipping" all axes that do not # participate in the matrix-vector product. In other words, the axes # are labelled # [0, 1, ..., r-1, r, r+1] for `matrix` and # [0, 1, ..., r-1, r+1] for `normal`, and the output axes are set to # [0, 1, ..., r-1, r]. This automatically supports broadcasting # along the axes 0, ..., r-1. matrix_axes = list(range(matrix.ndim)) normal_axes = list(range(matrix.ndim - 2)) + [matrix_axes[-1]] out_axes = list(range(matrix.ndim - 1)) det_to_src = np.einsum(matrix, matrix_axes, normal, normal_axes, out_axes) if squeeze_angle and squeeze_dparam: det_to_src = det_to_src.squeeze() return det_to_src
python
def det_to_src(self, angle, dparam): # Always call the downstream methods with vectorized arguments # to be able to reliably manipulate the final axes of the result if self.motion_params.ndim == 1: squeeze_angle = (np.shape(angle) == ()) angle = np.array(angle, dtype=float, copy=False, ndmin=1) matrix = self.rotation_matrix(angle) # shape (m, ndim, ndim) else: squeeze_angle = (np.broadcast(*angle).shape == ()) angle = tuple(np.array(a, dtype=float, copy=False, ndmin=1) for a in angle) matrix = self.rotation_matrix(angle) # shape (m, ndim, ndim) if self.det_params.ndim == 1: squeeze_dparam = (np.shape(dparam) == ()) dparam = np.array(dparam, dtype=float, copy=False, ndmin=1) else: squeeze_dparam = (np.broadcast(*dparam).shape == ()) dparam = tuple(np.array(p, dtype=float, copy=False, ndmin=1) for p in dparam) normal = self.detector.surface_normal(dparam) # shape (d, ndim) # Perform matrix-vector multiplication along the last axis of both # `matrix` and `normal` while "zipping" all axes that do not # participate in the matrix-vector product. In other words, the axes # are labelled # [0, 1, ..., r-1, r, r+1] for `matrix` and # [0, 1, ..., r-1, r+1] for `normal`, and the output axes are set to # [0, 1, ..., r-1, r]. This automatically supports broadcasting # along the axes 0, ..., r-1. matrix_axes = list(range(matrix.ndim)) normal_axes = list(range(matrix.ndim - 2)) + [matrix_axes[-1]] out_axes = list(range(matrix.ndim - 1)) det_to_src = np.einsum(matrix, matrix_axes, normal, normal_axes, out_axes) if squeeze_angle and squeeze_dparam: det_to_src = det_to_src.squeeze() return det_to_src
[ "def", "det_to_src", "(", "self", ",", "angle", ",", "dparam", ")", ":", "# Always call the downstream methods with vectorized arguments", "# to be able to reliably manipulate the final axes of the result", "if", "self", ".", "motion_params", ".", "ndim", "==", "1", ":", "s...
Direction from a detector location to the source. The direction vector is computed as follows:: dir = rotation_matrix(angle).dot(detector.surface_normal(dparam)) Note that for flat detectors, ``surface_normal`` does not depend on the parameter ``dparam``, hence this function is constant in that variable. Parameters ---------- angle : `array-like` or sequence One or several (Euler) angles in radians at which to evaluate. If ``motion_params.ndim >= 2``, a sequence of that length must be provided. dparam : `array-like` or sequence Detector parameter(s) at which to evaluate. If ``det_params.ndim >= 2``, a sequence of that length must be provided. Returns ------- det_to_src : `numpy.ndarray` Vector(s) pointing from a detector point to the source (at infinity). The shape of the returned array is obtained from the (broadcast) shapes of ``angle`` and ``dparam``, and broadcasting is supported within both parameters and between them. The precise definition of the shape is ``broadcast(bcast_angle, bcast_dparam).shape + (ndim,)``, where ``bcast_angle`` is - ``angle`` if `motion_params` is 1D, - ``broadcast(*angle)`` otherwise, and ``bcast_dparam`` defined analogously. Examples -------- The method works with single parameter values, in which case a single vector is returned: >>> apart = odl.uniform_partition(0, np.pi, 10) >>> dpart = odl.uniform_partition(-1, 1, 20) >>> geom = odl.tomo.Parallel2dGeometry(apart, dpart) >>> geom.det_to_src(0, 0) array([ 0., -1.]) >>> geom.det_to_src(0, 1) array([ 0., -1.]) >>> dir = geom.det_to_src(np.pi / 2, 0) >>> np.allclose(dir, [1, 0]) True >>> dir = geom.det_to_src(np.pi / 2, 1) >>> np.allclose(dir, [1, 0]) True Both variables support vectorized calls, i.e., stacks of parameters can be provided. The order of axes in the output (left of the ``ndim`` axis for the vector dimension) corresponds to the order of arguments: >>> dirs = geom.det_to_src(0, [-1, 0, 0.5, 1]) >>> dirs array([[ 0., -1.], [ 0., -1.], [ 0., -1.], [ 0., -1.]]) >>> dirs.shape # (num_dparams, ndim) (4, 2) >>> dirs = geom.det_to_src([0, np.pi / 2, np.pi], 0) >>> np.allclose(dirs, [[0, -1], ... [1, 0], ... [0, 1]]) True >>> dirs.shape # (num_angles, ndim) (3, 2) >>> # Providing 3 pairs of parameters, resulting in 3 vectors >>> dirs = geom.det_to_src([0, np.pi / 2, np.pi], [-1, 0, 1]) >>> dirs[0] # Corresponds to angle = 0, dparam = -1 array([ 0., -1.]) >>> dirs.shape (3, 2) >>> # Pairs of parameters arranged in arrays of same size >>> geom.det_to_src(np.zeros((4, 5)), np.zeros((4, 5))).shape (4, 5, 2) >>> # "Outer product" type evaluation using broadcasting >>> geom.det_to_src(np.zeros((4, 1)), np.zeros((1, 5))).shape (4, 5, 2)
[ "Direction", "from", "a", "detector", "location", "to", "the", "source", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/geometry/parallel.py#L196-L325
231,802
odlgroup/odl
odl/tomo/geometry/parallel.py
Parallel2dGeometry.frommatrix
def frommatrix(cls, apart, dpart, init_matrix, **kwargs): """Create an instance of `Parallel2dGeometry` using a matrix. This alternative constructor uses a matrix to rotate and translate the default configuration. It is most useful when the transformation to be applied is already given as a matrix. Parameters ---------- apart : 1-dim. `RectPartition` Partition of the angle interval. dpart : 1-dim. `RectPartition` Partition of the detector parameter interval. init_matrix : `array_like`, shape ``(2, 2)`` or ``(2, 3)``, optional Transformation matrix whose left ``(2, 2)`` block is multiplied with the default ``det_pos_init`` and ``det_axis_init`` to determine the new vectors. If present, the third column acts as a translation after the initial transformation. The resulting ``det_axis_init`` will be normalized. kwargs : Further keyword arguments passed to the class constructor. Returns ------- geometry : `Parallel2dGeometry` Examples -------- Mirror the second unit vector, creating a left-handed system: >>> apart = odl.uniform_partition(0, np.pi, 10) >>> dpart = odl.uniform_partition(-1, 1, 20) >>> matrix = np.array([[1, 0], ... [0, -1]]) >>> geom = Parallel2dGeometry.frommatrix(apart, dpart, matrix) >>> e_x, e_y = np.eye(2) # standard unit vectors >>> np.allclose(geom.det_pos_init, -e_y) True >>> np.allclose(geom.det_axis_init, e_x) True >>> np.allclose(geom.translation, (0, 0)) True Adding a translation with a third matrix column: >>> matrix = np.array([[1, 0, 1], ... [0, -1, 1]]) >>> geom = Parallel2dGeometry.frommatrix(apart, dpart, matrix) >>> np.allclose(geom.translation, (1, 1)) True >>> np.allclose(geom.det_pos_init, -e_y + (1, 1)) True """ # Get transformation and translation parts from `init_matrix` init_matrix = np.asarray(init_matrix, dtype=float) if init_matrix.shape not in ((2, 2), (2, 3)): raise ValueError('`matrix` must have shape (2, 2) or (2, 3), ' 'got array with shape {}' ''.format(init_matrix.shape)) trafo_matrix = init_matrix[:, :2] translation = init_matrix[:, 2:].squeeze() # Transform the default vectors default_det_pos_init = cls._default_config['det_pos_init'] default_det_axis_init = cls._default_config['det_axis_init'] vecs_to_transform = [default_det_axis_init] transformed_vecs = transform_system( default_det_pos_init, None, vecs_to_transform, matrix=trafo_matrix) # Use the standard constructor with these vectors det_pos, det_axis = transformed_vecs if translation.size != 0: kwargs['translation'] = translation return cls(apart, dpart, det_pos, det_axis_init=det_axis, **kwargs)
python
def frommatrix(cls, apart, dpart, init_matrix, **kwargs): # Get transformation and translation parts from `init_matrix` init_matrix = np.asarray(init_matrix, dtype=float) if init_matrix.shape not in ((2, 2), (2, 3)): raise ValueError('`matrix` must have shape (2, 2) or (2, 3), ' 'got array with shape {}' ''.format(init_matrix.shape)) trafo_matrix = init_matrix[:, :2] translation = init_matrix[:, 2:].squeeze() # Transform the default vectors default_det_pos_init = cls._default_config['det_pos_init'] default_det_axis_init = cls._default_config['det_axis_init'] vecs_to_transform = [default_det_axis_init] transformed_vecs = transform_system( default_det_pos_init, None, vecs_to_transform, matrix=trafo_matrix) # Use the standard constructor with these vectors det_pos, det_axis = transformed_vecs if translation.size != 0: kwargs['translation'] = translation return cls(apart, dpart, det_pos, det_axis_init=det_axis, **kwargs)
[ "def", "frommatrix", "(", "cls", ",", "apart", ",", "dpart", ",", "init_matrix", ",", "*", "*", "kwargs", ")", ":", "# Get transformation and translation parts from `init_matrix`", "init_matrix", "=", "np", ".", "asarray", "(", "init_matrix", ",", "dtype", "=", ...
Create an instance of `Parallel2dGeometry` using a matrix. This alternative constructor uses a matrix to rotate and translate the default configuration. It is most useful when the transformation to be applied is already given as a matrix. Parameters ---------- apart : 1-dim. `RectPartition` Partition of the angle interval. dpart : 1-dim. `RectPartition` Partition of the detector parameter interval. init_matrix : `array_like`, shape ``(2, 2)`` or ``(2, 3)``, optional Transformation matrix whose left ``(2, 2)`` block is multiplied with the default ``det_pos_init`` and ``det_axis_init`` to determine the new vectors. If present, the third column acts as a translation after the initial transformation. The resulting ``det_axis_init`` will be normalized. kwargs : Further keyword arguments passed to the class constructor. Returns ------- geometry : `Parallel2dGeometry` Examples -------- Mirror the second unit vector, creating a left-handed system: >>> apart = odl.uniform_partition(0, np.pi, 10) >>> dpart = odl.uniform_partition(-1, 1, 20) >>> matrix = np.array([[1, 0], ... [0, -1]]) >>> geom = Parallel2dGeometry.frommatrix(apart, dpart, matrix) >>> e_x, e_y = np.eye(2) # standard unit vectors >>> np.allclose(geom.det_pos_init, -e_y) True >>> np.allclose(geom.det_axis_init, e_x) True >>> np.allclose(geom.translation, (0, 0)) True Adding a translation with a third matrix column: >>> matrix = np.array([[1, 0, 1], ... [0, -1, 1]]) >>> geom = Parallel2dGeometry.frommatrix(apart, dpart, matrix) >>> np.allclose(geom.translation, (1, 1)) True >>> np.allclose(geom.det_pos_init, -e_y + (1, 1)) True
[ "Create", "an", "instance", "of", "Parallel2dGeometry", "using", "a", "matrix", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/geometry/parallel.py#L486-L561
231,803
odlgroup/odl
odl/tomo/geometry/parallel.py
Parallel3dEulerGeometry.det_axes
def det_axes(self, angles): """Return the detector axes tuple at ``angles``. Parameters ---------- angles : `array-like` or sequence Euler angles in radians describing the rotation of the detector. The length of the provided argument (along the first axis in case of an array) must be equal to the number of Euler angles in this geometry. Returns ------- axes : `numpy.ndarray` Unit vector(s) along which the detector is aligned. If ``angles`` is a single pair (or triplet) of Euler angles, the returned array has shape ``(2, 3)``, otherwise ``broadcast(*angles).shape + (2, 3)``. Notes ----- To get an array that enumerates the detector axes in the first dimension, move the second-to-last axis to the first position: axes = det_axes(angle) axes_enumeration = np.moveaxis(deriv, -2, 0) Examples -------- Calling the method with a single set of angles produces a ``(2, 3)`` array of vertically stacked vectors: >>> apart = odl.uniform_partition([0, 0], [np.pi, 2 * np.pi], ... (10, 20)) >>> dpart = odl.uniform_partition([-1, -1], [1, 1], (20, 20)) >>> geom = Parallel3dEulerGeometry(apart, dpart) >>> geom.det_axes([0, 0]) array([[ 1., 0., 0.], [ 0., 0., 1.]]) >>> np.allclose(geom.det_axes([np.pi / 2, 0]), [[0, 1, 0], ... [0, 0, 1]]) True The method is vectorized, i.e., it can be called with multiple angle parameters at once. Each of the angle arrays can have different shapes and will be broadcast against each other to determine the final shape: >>> # The first axis enumerates the angles >>> np.allclose(geom.det_axes(([0, np.pi / 2], [0, 0])), ... [[[1, 0, 0], ... [0, 0, 1]], ... [[0, 1, 0], ... [0, 0, 1]]]) True >>> # Pairs of Euler angles in a (4, 5) array each >>> geom.det_axes((np.zeros((4, 5)), np.zeros((4, 5)))).shape (4, 5, 2, 3) >>> # Using broadcasting for "outer product" type result >>> geom.det_axes((np.zeros((4, 1)), np.zeros((1, 5)))).shape (4, 5, 2, 3) """ # Transpose to take dot along axis 1 axes = self.rotation_matrix(angles).dot(self.det_axes_init.T) # `axes` has shape (a, 3, 2), need to roll the last dimensions # to the second to last place return np.rollaxis(axes, -1, -2)
python
def det_axes(self, angles): # Transpose to take dot along axis 1 axes = self.rotation_matrix(angles).dot(self.det_axes_init.T) # `axes` has shape (a, 3, 2), need to roll the last dimensions # to the second to last place return np.rollaxis(axes, -1, -2)
[ "def", "det_axes", "(", "self", ",", "angles", ")", ":", "# Transpose to take dot along axis 1", "axes", "=", "self", ".", "rotation_matrix", "(", "angles", ")", ".", "dot", "(", "self", ".", "det_axes_init", ".", "T", ")", "# `axes` has shape (a, 3, 2), need to r...
Return the detector axes tuple at ``angles``. Parameters ---------- angles : `array-like` or sequence Euler angles in radians describing the rotation of the detector. The length of the provided argument (along the first axis in case of an array) must be equal to the number of Euler angles in this geometry. Returns ------- axes : `numpy.ndarray` Unit vector(s) along which the detector is aligned. If ``angles`` is a single pair (or triplet) of Euler angles, the returned array has shape ``(2, 3)``, otherwise ``broadcast(*angles).shape + (2, 3)``. Notes ----- To get an array that enumerates the detector axes in the first dimension, move the second-to-last axis to the first position: axes = det_axes(angle) axes_enumeration = np.moveaxis(deriv, -2, 0) Examples -------- Calling the method with a single set of angles produces a ``(2, 3)`` array of vertically stacked vectors: >>> apart = odl.uniform_partition([0, 0], [np.pi, 2 * np.pi], ... (10, 20)) >>> dpart = odl.uniform_partition([-1, -1], [1, 1], (20, 20)) >>> geom = Parallel3dEulerGeometry(apart, dpart) >>> geom.det_axes([0, 0]) array([[ 1., 0., 0.], [ 0., 0., 1.]]) >>> np.allclose(geom.det_axes([np.pi / 2, 0]), [[0, 1, 0], ... [0, 0, 1]]) True The method is vectorized, i.e., it can be called with multiple angle parameters at once. Each of the angle arrays can have different shapes and will be broadcast against each other to determine the final shape: >>> # The first axis enumerates the angles >>> np.allclose(geom.det_axes(([0, np.pi / 2], [0, 0])), ... [[[1, 0, 0], ... [0, 0, 1]], ... [[0, 1, 0], ... [0, 0, 1]]]) True >>> # Pairs of Euler angles in a (4, 5) array each >>> geom.det_axes((np.zeros((4, 5)), np.zeros((4, 5)))).shape (4, 5, 2, 3) >>> # Using broadcasting for "outer product" type result >>> geom.det_axes((np.zeros((4, 1)), np.zeros((1, 5)))).shape (4, 5, 2, 3)
[ "Return", "the", "detector", "axes", "tuple", "at", "angles", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/geometry/parallel.py#L947-L1013
231,804
odlgroup/odl
odl/tomo/geometry/parallel.py
Parallel3dEulerGeometry.rotation_matrix
def rotation_matrix(self, angles): """Return the rotation matrix to the system state at ``angles``. Parameters ---------- angles : `array-like` or sequence Euler angles in radians describing the rotation of the detector. The length of the provided argument (along the first axis in case of an array) must be equal to the number of Euler angles in this geometry. Returns ------- rot : `numpy.ndarray` Rotation matrix (or matrices) mapping vectors at the initial state to the ones in the state defined by ``angles``. The rotation is extrinsic, i.e., defined in the "world" coordinate system. If ``angles`` is a single pair (or triplet) of Euler angles, an array of shape ``(3, 3)`` representing a single matrix is returned. Otherwise, the shape of the returned array is ``broadcast(*angles).shape + (3, 3)``. """ squeeze_out = (np.broadcast(*angles).shape == ()) angles_in = angles angles = tuple(np.array(angle, dtype=float, copy=False, ndmin=1) for angle in angles) if (self.check_bounds and not is_inside_bounds(angles, self.motion_params)): raise ValueError('`angles` {} not in the valid range ' '{}'.format(angles_in, self.motion_params)) matrix = euler_matrix(*angles) if squeeze_out: matrix = matrix.squeeze() return matrix
python
def rotation_matrix(self, angles): squeeze_out = (np.broadcast(*angles).shape == ()) angles_in = angles angles = tuple(np.array(angle, dtype=float, copy=False, ndmin=1) for angle in angles) if (self.check_bounds and not is_inside_bounds(angles, self.motion_params)): raise ValueError('`angles` {} not in the valid range ' '{}'.format(angles_in, self.motion_params)) matrix = euler_matrix(*angles) if squeeze_out: matrix = matrix.squeeze() return matrix
[ "def", "rotation_matrix", "(", "self", ",", "angles", ")", ":", "squeeze_out", "=", "(", "np", ".", "broadcast", "(", "*", "angles", ")", ".", "shape", "==", "(", ")", ")", "angles_in", "=", "angles", "angles", "=", "tuple", "(", "np", ".", "array", ...
Return the rotation matrix to the system state at ``angles``. Parameters ---------- angles : `array-like` or sequence Euler angles in radians describing the rotation of the detector. The length of the provided argument (along the first axis in case of an array) must be equal to the number of Euler angles in this geometry. Returns ------- rot : `numpy.ndarray` Rotation matrix (or matrices) mapping vectors at the initial state to the ones in the state defined by ``angles``. The rotation is extrinsic, i.e., defined in the "world" coordinate system. If ``angles`` is a single pair (or triplet) of Euler angles, an array of shape ``(3, 3)`` representing a single matrix is returned. Otherwise, the shape of the returned array is ``broadcast(*angles).shape + (3, 3)``.
[ "Return", "the", "rotation", "matrix", "to", "the", "system", "state", "at", "angles", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/geometry/parallel.py#L1015-L1051
231,805
odlgroup/odl
odl/tomo/geometry/parallel.py
Parallel3dAxisGeometry.frommatrix
def frommatrix(cls, apart, dpart, init_matrix, **kwargs): """Create an instance of `Parallel3dAxisGeometry` using a matrix. This alternative constructor uses a matrix to rotate and translate the default configuration. It is most useful when the transformation to be applied is already given as a matrix. Parameters ---------- apart : 1-dim. `RectPartition` Partition of the parameter interval. dpart : 2-dim. `RectPartition` Partition of the detector parameter set. init_matrix : `array_like`, shape ``(3, 3)`` or ``(3, 4)``, optional Transformation matrix whose left ``(3, 3)`` block is multiplied with the default ``det_pos_init`` and ``det_axes_init`` to determine the new vectors. If present, the fourth column acts as a translation after the initial transformation. The resulting ``det_axes_init`` will be normalized. kwargs : Further keyword arguments passed to the class constructor. Returns ------- geometry : `Parallel3dAxisGeometry` Examples -------- Map unit vectors ``e_y -> e_z`` and ``e_z -> -e_y``, keeping the right-handedness: >>> apart = odl.uniform_partition(0, np.pi, 10) >>> dpart = odl.uniform_partition([-1, -1], [1, 1], (20, 20)) >>> matrix = np.array([[1, 0, 0], ... [0, 0, -1], ... [0, 1, 0]]) >>> geom = Parallel3dAxisGeometry.frommatrix( ... apart, dpart, init_matrix=matrix) >>> geom.axis array([ 0., -1., 0.]) >>> geom.det_pos_init array([ 0., 0., 1.]) >>> geom.det_axes_init array([[ 1., 0., 0.], [ 0., -1., 0.]]) Adding a translation with a fourth matrix column: >>> matrix = np.array([[0, 0, -1, 0], ... [0, 1, 0, 1], ... [1, 0, 0, 1]]) >>> geom = Parallel3dAxisGeometry.frommatrix(apart, dpart, matrix) >>> geom.translation array([ 0., 1., 1.]) >>> geom.det_pos_init # (0, 1, 0) + (0, 1, 1) array([ 0., 2., 1.]) """ # Get transformation and translation parts from `init_matrix` init_matrix = np.asarray(init_matrix, dtype=float) if init_matrix.shape not in ((3, 3), (3, 4)): raise ValueError('`matrix` must have shape (3, 3) or (3, 4), ' 'got array with shape {}' ''.format(init_matrix.shape)) trafo_matrix = init_matrix[:, :3] translation = init_matrix[:, 3:].squeeze() # Transform the default vectors default_axis = cls._default_config['axis'] default_det_pos_init = cls._default_config['det_pos_init'] default_det_axes_init = cls._default_config['det_axes_init'] vecs_to_transform = (default_det_pos_init,) + default_det_axes_init transformed_vecs = transform_system( default_axis, None, vecs_to_transform, matrix=trafo_matrix) # Use the standard constructor with these vectors axis, det_pos, det_axis_0, det_axis_1 = transformed_vecs if translation.size != 0: kwargs['translation'] = translation return cls(apart, dpart, axis, det_pos_init=det_pos, det_axes_init=[det_axis_0, det_axis_1], **kwargs)
python
def frommatrix(cls, apart, dpart, init_matrix, **kwargs): # Get transformation and translation parts from `init_matrix` init_matrix = np.asarray(init_matrix, dtype=float) if init_matrix.shape not in ((3, 3), (3, 4)): raise ValueError('`matrix` must have shape (3, 3) or (3, 4), ' 'got array with shape {}' ''.format(init_matrix.shape)) trafo_matrix = init_matrix[:, :3] translation = init_matrix[:, 3:].squeeze() # Transform the default vectors default_axis = cls._default_config['axis'] default_det_pos_init = cls._default_config['det_pos_init'] default_det_axes_init = cls._default_config['det_axes_init'] vecs_to_transform = (default_det_pos_init,) + default_det_axes_init transformed_vecs = transform_system( default_axis, None, vecs_to_transform, matrix=trafo_matrix) # Use the standard constructor with these vectors axis, det_pos, det_axis_0, det_axis_1 = transformed_vecs if translation.size != 0: kwargs['translation'] = translation return cls(apart, dpart, axis, det_pos_init=det_pos, det_axes_init=[det_axis_0, det_axis_1], **kwargs)
[ "def", "frommatrix", "(", "cls", ",", "apart", ",", "dpart", ",", "init_matrix", ",", "*", "*", "kwargs", ")", ":", "# Get transformation and translation parts from `init_matrix`", "init_matrix", "=", "np", ".", "asarray", "(", "init_matrix", ",", "dtype", "=", ...
Create an instance of `Parallel3dAxisGeometry` using a matrix. This alternative constructor uses a matrix to rotate and translate the default configuration. It is most useful when the transformation to be applied is already given as a matrix. Parameters ---------- apart : 1-dim. `RectPartition` Partition of the parameter interval. dpart : 2-dim. `RectPartition` Partition of the detector parameter set. init_matrix : `array_like`, shape ``(3, 3)`` or ``(3, 4)``, optional Transformation matrix whose left ``(3, 3)`` block is multiplied with the default ``det_pos_init`` and ``det_axes_init`` to determine the new vectors. If present, the fourth column acts as a translation after the initial transformation. The resulting ``det_axes_init`` will be normalized. kwargs : Further keyword arguments passed to the class constructor. Returns ------- geometry : `Parallel3dAxisGeometry` Examples -------- Map unit vectors ``e_y -> e_z`` and ``e_z -> -e_y``, keeping the right-handedness: >>> apart = odl.uniform_partition(0, np.pi, 10) >>> dpart = odl.uniform_partition([-1, -1], [1, 1], (20, 20)) >>> matrix = np.array([[1, 0, 0], ... [0, 0, -1], ... [0, 1, 0]]) >>> geom = Parallel3dAxisGeometry.frommatrix( ... apart, dpart, init_matrix=matrix) >>> geom.axis array([ 0., -1., 0.]) >>> geom.det_pos_init array([ 0., 0., 1.]) >>> geom.det_axes_init array([[ 1., 0., 0.], [ 0., -1., 0.]]) Adding a translation with a fourth matrix column: >>> matrix = np.array([[0, 0, -1, 0], ... [0, 1, 0, 1], ... [1, 0, 0, 1]]) >>> geom = Parallel3dAxisGeometry.frommatrix(apart, dpart, matrix) >>> geom.translation array([ 0., 1., 1.]) >>> geom.det_pos_init # (0, 1, 0) + (0, 1, 1) array([ 0., 2., 1.])
[ "Create", "an", "instance", "of", "Parallel3dAxisGeometry", "using", "a", "matrix", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/geometry/parallel.py#L1258-L1340
231,806
odlgroup/odl
odl/util/ufuncs.py
wrap_ufunc_base
def wrap_ufunc_base(name, n_in, n_out, doc): """Return ufunc wrapper for implementation-agnostic ufunc classes.""" ufunc = getattr(np, name) if n_in == 1: if n_out == 1: def wrapper(self, out=None, **kwargs): if out is None or isinstance(out, (type(self.elem), type(self.elem.data))): out = (out,) return self.elem.__array_ufunc__( ufunc, '__call__', self.elem, out=out, **kwargs) elif n_out == 2: def wrapper(self, out=None, **kwargs): if out is None: out = (None, None) return self.elem.__array_ufunc__( ufunc, '__call__', self.elem, out=out, **kwargs) else: raise NotImplementedError elif n_in == 2: if n_out == 1: def wrapper(self, x2, out=None, **kwargs): return self.elem.__array_ufunc__( ufunc, '__call__', self.elem, x2, out=(out,), **kwargs) else: raise NotImplementedError else: raise NotImplementedError wrapper.__name__ = wrapper.__qualname__ = name wrapper.__doc__ = doc return wrapper
python
def wrap_ufunc_base(name, n_in, n_out, doc): ufunc = getattr(np, name) if n_in == 1: if n_out == 1: def wrapper(self, out=None, **kwargs): if out is None or isinstance(out, (type(self.elem), type(self.elem.data))): out = (out,) return self.elem.__array_ufunc__( ufunc, '__call__', self.elem, out=out, **kwargs) elif n_out == 2: def wrapper(self, out=None, **kwargs): if out is None: out = (None, None) return self.elem.__array_ufunc__( ufunc, '__call__', self.elem, out=out, **kwargs) else: raise NotImplementedError elif n_in == 2: if n_out == 1: def wrapper(self, x2, out=None, **kwargs): return self.elem.__array_ufunc__( ufunc, '__call__', self.elem, x2, out=(out,), **kwargs) else: raise NotImplementedError else: raise NotImplementedError wrapper.__name__ = wrapper.__qualname__ = name wrapper.__doc__ = doc return wrapper
[ "def", "wrap_ufunc_base", "(", "name", ",", "n_in", ",", "n_out", ",", "doc", ")", ":", "ufunc", "=", "getattr", "(", "np", ",", "name", ")", "if", "n_in", "==", "1", ":", "if", "n_out", "==", "1", ":", "def", "wrapper", "(", "self", ",", "out", ...
Return ufunc wrapper for implementation-agnostic ufunc classes.
[ "Return", "ufunc", "wrapper", "for", "implementation", "-", "agnostic", "ufunc", "classes", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/util/ufuncs.py#L75-L112
231,807
odlgroup/odl
odl/util/ufuncs.py
wrap_ufunc_productspace
def wrap_ufunc_productspace(name, n_in, n_out, doc): """Return ufunc wrapper for `ProductSpaceUfuncs`.""" if n_in == 1: if n_out == 1: def wrapper(self, out=None, **kwargs): if out is None: result = [getattr(x.ufuncs, name)(**kwargs) for x in self.elem] return self.elem.space.element(result) else: for x, out_x in zip(self.elem, out): getattr(x.ufuncs, name)(out=out_x, **kwargs) return out elif n_out == 2: def wrapper(self, out1=None, out2=None, **kwargs): if out1 is None: out1 = self.elem.space.element() if out2 is None: out2 = self.elem.space.element() for x, out1_x, out2_x in zip(self.elem, out1, out2): getattr(x.ufuncs, name)(out1=out1_x, out2=out2_x, **kwargs) return out1, out2 else: raise NotImplementedError elif n_in == 2: if n_out == 1: def wrapper(self, x2, out=None, **kwargs): if x2 in self.elem.space: if out is None: result = [getattr(x.ufuncs, name)(x2p, **kwargs) for x, x2p in zip(self.elem, x2)] return self.elem.space.element(result) else: for x, x2p, outp in zip(self.elem, x2, out): getattr(x.ufuncs, name)(x2p, out=outp, **kwargs) return out else: if out is None: result = [getattr(x.ufuncs, name)(x2, **kwargs) for x in self.elem] return self.elem.space.element(result) else: for x, outp in zip(self.elem, out): getattr(x.ufuncs, name)(x2, out=outp, **kwargs) return out else: raise NotImplementedError else: raise NotImplementedError wrapper.__name__ = wrapper.__qualname__ = name wrapper.__doc__ = doc return wrapper
python
def wrap_ufunc_productspace(name, n_in, n_out, doc): if n_in == 1: if n_out == 1: def wrapper(self, out=None, **kwargs): if out is None: result = [getattr(x.ufuncs, name)(**kwargs) for x in self.elem] return self.elem.space.element(result) else: for x, out_x in zip(self.elem, out): getattr(x.ufuncs, name)(out=out_x, **kwargs) return out elif n_out == 2: def wrapper(self, out1=None, out2=None, **kwargs): if out1 is None: out1 = self.elem.space.element() if out2 is None: out2 = self.elem.space.element() for x, out1_x, out2_x in zip(self.elem, out1, out2): getattr(x.ufuncs, name)(out1=out1_x, out2=out2_x, **kwargs) return out1, out2 else: raise NotImplementedError elif n_in == 2: if n_out == 1: def wrapper(self, x2, out=None, **kwargs): if x2 in self.elem.space: if out is None: result = [getattr(x.ufuncs, name)(x2p, **kwargs) for x, x2p in zip(self.elem, x2)] return self.elem.space.element(result) else: for x, x2p, outp in zip(self.elem, x2, out): getattr(x.ufuncs, name)(x2p, out=outp, **kwargs) return out else: if out is None: result = [getattr(x.ufuncs, name)(x2, **kwargs) for x in self.elem] return self.elem.space.element(result) else: for x, outp in zip(self.elem, out): getattr(x.ufuncs, name)(x2, out=outp, **kwargs) return out else: raise NotImplementedError else: raise NotImplementedError wrapper.__name__ = wrapper.__qualname__ = name wrapper.__doc__ = doc return wrapper
[ "def", "wrap_ufunc_productspace", "(", "name", ",", "n_in", ",", "n_out", ",", "doc", ")", ":", "if", "n_in", "==", "1", ":", "if", "n_out", "==", "1", ":", "def", "wrapper", "(", "self", ",", "out", "=", "None", ",", "*", "*", "kwargs", ")", ":"...
Return ufunc wrapper for `ProductSpaceUfuncs`.
[ "Return", "ufunc", "wrapper", "for", "ProductSpaceUfuncs", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/util/ufuncs.py#L186-L242
231,808
odlgroup/odl
odl/solvers/iterative/iterative.py
landweber
def landweber(op, x, rhs, niter, omega=None, projection=None, callback=None): r"""Optimized implementation of Landweber's method. Solves the inverse problem:: A(x) = rhs Parameters ---------- op : `Operator` Operator in the inverse problem. ``op.derivative(x).adjoint`` must be well-defined for ``x`` in the operator domain. x : ``op.domain`` element Element to which the result is written. Its initial value is used as starting point of the iteration, and its values are updated in each iteration step. rhs : ``op.range`` element Right-hand side of the equation defining the inverse problem. niter : int Number of iterations. omega : positive float, optional Relaxation parameter in the iteration. Default: ``1 / op.norm(estimate=True) ** 2`` projection : callable, optional Function that can be used to modify the iterates in each iteration, for example enforcing positivity. The function should take one argument and modify it in-place. callback : callable, optional Object executing code per iteration, e.g. plotting each iterate. Notes ----- This method calculates an approximate least-squares solution of the inverse problem of the first kind .. math:: \mathcal{A} (x) = y, for a given :math:`y\in \mathcal{Y}`, i.e. an approximate solution :math:`x^*` to .. math:: \min_{x\in \mathcal{X}} \| \mathcal{A}(x) - y \|_{\mathcal{Y}}^2 for a (Frechet-) differentiable operator :math:`\mathcal{A}: \mathcal{X} \to \mathcal{Y}` between Hilbert spaces :math:`\mathcal{X}` and :math:`\mathcal{Y}`. The method starts from an initial guess :math:`x_0` and uses the iteration .. math:: x_{k+1} = x_k - \omega \ \partial \mathcal{A}(x)^* (\mathcal{A}(x_k) - y), where :math:`\partial \mathcal{A}(x)` is the Frechet derivative of :math:`\mathcal{A}` at :math:`x` and :math:`\omega` is a relaxation parameter. For linear problems, a choice :math:`0 < \omega < 2/\lVert \mathcal{A}^2\rVert` guarantees convergence, where :math:`\lVert\mathcal{A}\rVert` stands for the operator norm of :math:`\mathcal{A}`. Users may also optionally provide a projection to project each iterate onto some subset. For example enforcing positivity. This implementation uses a minimum amount of memory copies by applying re-usable temporaries and in-place evaluation. The method is also described in a `Wikipedia article <https://en.wikipedia.org/wiki/Landweber_iteration>`_. """ # TODO: add a book reference if x not in op.domain: raise TypeError('`x` {!r} is not in the domain of `op` {!r}' ''.format(x, op.domain)) if omega is None: omega = 1 / op.norm(estimate=True) ** 2 # Reusable temporaries tmp_ran = op.range.element() tmp_dom = op.domain.element() for _ in range(niter): op(x, out=tmp_ran) tmp_ran -= rhs op.derivative(x).adjoint(tmp_ran, out=tmp_dom) x.lincomb(1, x, -omega, tmp_dom) if projection is not None: projection(x) if callback is not None: callback(x)
python
def landweber(op, x, rhs, niter, omega=None, projection=None, callback=None): r"""Optimized implementation of Landweber's method. Solves the inverse problem:: A(x) = rhs Parameters ---------- op : `Operator` Operator in the inverse problem. ``op.derivative(x).adjoint`` must be well-defined for ``x`` in the operator domain. x : ``op.domain`` element Element to which the result is written. Its initial value is used as starting point of the iteration, and its values are updated in each iteration step. rhs : ``op.range`` element Right-hand side of the equation defining the inverse problem. niter : int Number of iterations. omega : positive float, optional Relaxation parameter in the iteration. Default: ``1 / op.norm(estimate=True) ** 2`` projection : callable, optional Function that can be used to modify the iterates in each iteration, for example enforcing positivity. The function should take one argument and modify it in-place. callback : callable, optional Object executing code per iteration, e.g. plotting each iterate. Notes ----- This method calculates an approximate least-squares solution of the inverse problem of the first kind .. math:: \mathcal{A} (x) = y, for a given :math:`y\in \mathcal{Y}`, i.e. an approximate solution :math:`x^*` to .. math:: \min_{x\in \mathcal{X}} \| \mathcal{A}(x) - y \|_{\mathcal{Y}}^2 for a (Frechet-) differentiable operator :math:`\mathcal{A}: \mathcal{X} \to \mathcal{Y}` between Hilbert spaces :math:`\mathcal{X}` and :math:`\mathcal{Y}`. The method starts from an initial guess :math:`x_0` and uses the iteration .. math:: x_{k+1} = x_k - \omega \ \partial \mathcal{A}(x)^* (\mathcal{A}(x_k) - y), where :math:`\partial \mathcal{A}(x)` is the Frechet derivative of :math:`\mathcal{A}` at :math:`x` and :math:`\omega` is a relaxation parameter. For linear problems, a choice :math:`0 < \omega < 2/\lVert \mathcal{A}^2\rVert` guarantees convergence, where :math:`\lVert\mathcal{A}\rVert` stands for the operator norm of :math:`\mathcal{A}`. Users may also optionally provide a projection to project each iterate onto some subset. For example enforcing positivity. This implementation uses a minimum amount of memory copies by applying re-usable temporaries and in-place evaluation. The method is also described in a `Wikipedia article <https://en.wikipedia.org/wiki/Landweber_iteration>`_. """ # TODO: add a book reference if x not in op.domain: raise TypeError('`x` {!r} is not in the domain of `op` {!r}' ''.format(x, op.domain)) if omega is None: omega = 1 / op.norm(estimate=True) ** 2 # Reusable temporaries tmp_ran = op.range.element() tmp_dom = op.domain.element() for _ in range(niter): op(x, out=tmp_ran) tmp_ran -= rhs op.derivative(x).adjoint(tmp_ran, out=tmp_dom) x.lincomb(1, x, -omega, tmp_dom) if projection is not None: projection(x) if callback is not None: callback(x)
[ "def", "landweber", "(", "op", ",", "x", ",", "rhs", ",", "niter", ",", "omega", "=", "None", ",", "projection", "=", "None", ",", "callback", "=", "None", ")", ":", "# TODO: add a book reference", "if", "x", "not", "in", "op", ".", "domain", ":", "r...
r"""Optimized implementation of Landweber's method. Solves the inverse problem:: A(x) = rhs Parameters ---------- op : `Operator` Operator in the inverse problem. ``op.derivative(x).adjoint`` must be well-defined for ``x`` in the operator domain. x : ``op.domain`` element Element to which the result is written. Its initial value is used as starting point of the iteration, and its values are updated in each iteration step. rhs : ``op.range`` element Right-hand side of the equation defining the inverse problem. niter : int Number of iterations. omega : positive float, optional Relaxation parameter in the iteration. Default: ``1 / op.norm(estimate=True) ** 2`` projection : callable, optional Function that can be used to modify the iterates in each iteration, for example enforcing positivity. The function should take one argument and modify it in-place. callback : callable, optional Object executing code per iteration, e.g. plotting each iterate. Notes ----- This method calculates an approximate least-squares solution of the inverse problem of the first kind .. math:: \mathcal{A} (x) = y, for a given :math:`y\in \mathcal{Y}`, i.e. an approximate solution :math:`x^*` to .. math:: \min_{x\in \mathcal{X}} \| \mathcal{A}(x) - y \|_{\mathcal{Y}}^2 for a (Frechet-) differentiable operator :math:`\mathcal{A}: \mathcal{X} \to \mathcal{Y}` between Hilbert spaces :math:`\mathcal{X}` and :math:`\mathcal{Y}`. The method starts from an initial guess :math:`x_0` and uses the iteration .. math:: x_{k+1} = x_k - \omega \ \partial \mathcal{A}(x)^* (\mathcal{A}(x_k) - y), where :math:`\partial \mathcal{A}(x)` is the Frechet derivative of :math:`\mathcal{A}` at :math:`x` and :math:`\omega` is a relaxation parameter. For linear problems, a choice :math:`0 < \omega < 2/\lVert \mathcal{A}^2\rVert` guarantees convergence, where :math:`\lVert\mathcal{A}\rVert` stands for the operator norm of :math:`\mathcal{A}`. Users may also optionally provide a projection to project each iterate onto some subset. For example enforcing positivity. This implementation uses a minimum amount of memory copies by applying re-usable temporaries and in-place evaluation. The method is also described in a `Wikipedia article <https://en.wikipedia.org/wiki/Landweber_iteration>`_.
[ "r", "Optimized", "implementation", "of", "Landweber", "s", "method", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/iterative/iterative.py#L26-L120
231,809
odlgroup/odl
odl/solvers/iterative/iterative.py
conjugate_gradient
def conjugate_gradient(op, x, rhs, niter, callback=None): """Optimized implementation of CG for self-adjoint operators. This method solves the inverse problem (of the first kind):: A(x) = y for a linear and self-adjoint `Operator` ``A``. It uses a minimum amount of memory copies by applying re-usable temporaries and in-place evaluation. The method is described (for linear systems) in a `Wikipedia article <https://en.wikipedia.org/wiki/Conjugate_gradient_method>`_. Parameters ---------- op : linear `Operator` Operator in the inverse problem. It must be linear and self-adjoint. This implies in particular that its domain and range are equal. x : ``op.domain`` element Element to which the result is written. Its initial value is used as starting point of the iteration, and its values are updated in each iteration step. rhs : ``op.range`` element Right-hand side of the equation defining the inverse problem. niter : int Number of iterations. callback : callable, optional Object executing code per iteration, e.g. plotting each iterate. See Also -------- conjugate_gradient_normal : Solver for nonsymmetric matrices """ # TODO: add a book reference # TODO: update doc if op.domain != op.range: raise ValueError('operator needs to be self-adjoint') if x not in op.domain: raise TypeError('`x` {!r} is not in the domain of `op` {!r}' ''.format(x, op.domain)) r = op(x) r.lincomb(1, rhs, -1, r) # r = rhs - A x p = r.copy() d = op.domain.element() # Extra storage for storing A x sqnorm_r_old = r.norm() ** 2 # Only recalculate norm after update if sqnorm_r_old == 0: # Return if no step forward return for _ in range(niter): op(p, out=d) # d = A p inner_p_d = p.inner(d) if inner_p_d == 0.0: # Return if step is 0 return alpha = sqnorm_r_old / inner_p_d x.lincomb(1, x, alpha, p) # x = x + alpha*p r.lincomb(1, r, -alpha, d) # r = r - alpha*d sqnorm_r_new = r.norm() ** 2 beta = sqnorm_r_new / sqnorm_r_old sqnorm_r_old = sqnorm_r_new p.lincomb(1, r, beta, p) # p = s + b * p if callback is not None: callback(x)
python
def conjugate_gradient(op, x, rhs, niter, callback=None): # TODO: add a book reference # TODO: update doc if op.domain != op.range: raise ValueError('operator needs to be self-adjoint') if x not in op.domain: raise TypeError('`x` {!r} is not in the domain of `op` {!r}' ''.format(x, op.domain)) r = op(x) r.lincomb(1, rhs, -1, r) # r = rhs - A x p = r.copy() d = op.domain.element() # Extra storage for storing A x sqnorm_r_old = r.norm() ** 2 # Only recalculate norm after update if sqnorm_r_old == 0: # Return if no step forward return for _ in range(niter): op(p, out=d) # d = A p inner_p_d = p.inner(d) if inner_p_d == 0.0: # Return if step is 0 return alpha = sqnorm_r_old / inner_p_d x.lincomb(1, x, alpha, p) # x = x + alpha*p r.lincomb(1, r, -alpha, d) # r = r - alpha*d sqnorm_r_new = r.norm() ** 2 beta = sqnorm_r_new / sqnorm_r_old sqnorm_r_old = sqnorm_r_new p.lincomb(1, r, beta, p) # p = s + b * p if callback is not None: callback(x)
[ "def", "conjugate_gradient", "(", "op", ",", "x", ",", "rhs", ",", "niter", ",", "callback", "=", "None", ")", ":", "# TODO: add a book reference", "# TODO: update doc", "if", "op", ".", "domain", "!=", "op", ".", "range", ":", "raise", "ValueError", "(", ...
Optimized implementation of CG for self-adjoint operators. This method solves the inverse problem (of the first kind):: A(x) = y for a linear and self-adjoint `Operator` ``A``. It uses a minimum amount of memory copies by applying re-usable temporaries and in-place evaluation. The method is described (for linear systems) in a `Wikipedia article <https://en.wikipedia.org/wiki/Conjugate_gradient_method>`_. Parameters ---------- op : linear `Operator` Operator in the inverse problem. It must be linear and self-adjoint. This implies in particular that its domain and range are equal. x : ``op.domain`` element Element to which the result is written. Its initial value is used as starting point of the iteration, and its values are updated in each iteration step. rhs : ``op.range`` element Right-hand side of the equation defining the inverse problem. niter : int Number of iterations. callback : callable, optional Object executing code per iteration, e.g. plotting each iterate. See Also -------- conjugate_gradient_normal : Solver for nonsymmetric matrices
[ "Optimized", "implementation", "of", "CG", "for", "self", "-", "adjoint", "operators", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/iterative/iterative.py#L123-L201
231,810
odlgroup/odl
odl/solvers/iterative/iterative.py
conjugate_gradient_normal
def conjugate_gradient_normal(op, x, rhs, niter=1, callback=None): """Optimized implementation of CG for the normal equation. This method solves the inverse problem (of the first kind) :: A(x) == rhs with a linear `Operator` ``A`` by looking at the normal equation :: A.adjoint(A(x)) == A.adjoint(rhs) It uses a minimum amount of memory copies by applying re-usable temporaries and in-place evaluation. The method is described (for linear systems) in a `Wikipedia article <https://en.wikipedia.org/wiki/Conjugate_gradient_method#\ Conjugate_gradient_on_the_normal_equations>`_. Parameters ---------- op : `Operator` Operator in the inverse problem. If not linear, it must have an implementation of `Operator.derivative`, which in turn must implement `Operator.adjoint`, i.e. the call ``op.derivative(x).adjoint`` must be valid. x : ``op.domain`` element Element to which the result is written. Its initial value is used as starting point of the iteration, and its values are updated in each iteration step. rhs : ``op.range`` element Right-hand side of the equation defining the inverse problem niter : int Number of iterations. callback : callable, optional Object executing code per iteration, e.g. plotting each iterate. See Also -------- conjugate_gradient : Optimized solver for symmetric matrices odl.solvers.smooth.nonlinear_cg.conjugate_gradient_nonlinear : Equivalent solver for the nonlinear case """ # TODO: add a book reference # TODO: update doc if x not in op.domain: raise TypeError('`x` {!r} is not in the domain of `op` {!r}' ''.format(x, op.domain)) d = op(x) d.lincomb(1, rhs, -1, d) # d = rhs - A x p = op.derivative(x).adjoint(d) s = p.copy() q = op.range.element() sqnorm_s_old = s.norm() ** 2 # Only recalculate norm after update for _ in range(niter): op(p, out=q) # q = A p sqnorm_q = q.norm() ** 2 if sqnorm_q == 0.0: # Return if residual is 0 return a = sqnorm_s_old / sqnorm_q x.lincomb(1, x, a, p) # x = x + a*p d.lincomb(1, d, -a, q) # d = d - a*Ap op.derivative(p).adjoint(d, out=s) # s = A^T d sqnorm_s_new = s.norm() ** 2 b = sqnorm_s_new / sqnorm_s_old sqnorm_s_old = sqnorm_s_new p.lincomb(1, s, b, p) # p = s + b * p if callback is not None: callback(x)
python
def conjugate_gradient_normal(op, x, rhs, niter=1, callback=None): # TODO: add a book reference # TODO: update doc if x not in op.domain: raise TypeError('`x` {!r} is not in the domain of `op` {!r}' ''.format(x, op.domain)) d = op(x) d.lincomb(1, rhs, -1, d) # d = rhs - A x p = op.derivative(x).adjoint(d) s = p.copy() q = op.range.element() sqnorm_s_old = s.norm() ** 2 # Only recalculate norm after update for _ in range(niter): op(p, out=q) # q = A p sqnorm_q = q.norm() ** 2 if sqnorm_q == 0.0: # Return if residual is 0 return a = sqnorm_s_old / sqnorm_q x.lincomb(1, x, a, p) # x = x + a*p d.lincomb(1, d, -a, q) # d = d - a*Ap op.derivative(p).adjoint(d, out=s) # s = A^T d sqnorm_s_new = s.norm() ** 2 b = sqnorm_s_new / sqnorm_s_old sqnorm_s_old = sqnorm_s_new p.lincomb(1, s, b, p) # p = s + b * p if callback is not None: callback(x)
[ "def", "conjugate_gradient_normal", "(", "op", ",", "x", ",", "rhs", ",", "niter", "=", "1", ",", "callback", "=", "None", ")", ":", "# TODO: add a book reference", "# TODO: update doc", "if", "x", "not", "in", "op", ".", "domain", ":", "raise", "TypeError",...
Optimized implementation of CG for the normal equation. This method solves the inverse problem (of the first kind) :: A(x) == rhs with a linear `Operator` ``A`` by looking at the normal equation :: A.adjoint(A(x)) == A.adjoint(rhs) It uses a minimum amount of memory copies by applying re-usable temporaries and in-place evaluation. The method is described (for linear systems) in a `Wikipedia article <https://en.wikipedia.org/wiki/Conjugate_gradient_method#\ Conjugate_gradient_on_the_normal_equations>`_. Parameters ---------- op : `Operator` Operator in the inverse problem. If not linear, it must have an implementation of `Operator.derivative`, which in turn must implement `Operator.adjoint`, i.e. the call ``op.derivative(x).adjoint`` must be valid. x : ``op.domain`` element Element to which the result is written. Its initial value is used as starting point of the iteration, and its values are updated in each iteration step. rhs : ``op.range`` element Right-hand side of the equation defining the inverse problem niter : int Number of iterations. callback : callable, optional Object executing code per iteration, e.g. plotting each iterate. See Also -------- conjugate_gradient : Optimized solver for symmetric matrices odl.solvers.smooth.nonlinear_cg.conjugate_gradient_nonlinear : Equivalent solver for the nonlinear case
[ "Optimized", "implementation", "of", "CG", "for", "the", "normal", "equation", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/iterative/iterative.py#L204-L279
231,811
odlgroup/odl
odl/solvers/iterative/iterative.py
gauss_newton
def gauss_newton(op, x, rhs, niter, zero_seq=exp_zero_seq(2.0), callback=None): """Optimized implementation of a Gauss-Newton method. This method solves the inverse problem (of the first kind):: A(x) = y for a (Frechet-) differentiable `Operator` ``A`` using a Gauss-Newton iteration. It uses a minimum amount of memory copies by applying re-usable temporaries and in-place evaluation. A variant of the method applied to a specific problem is described in a `Wikipedia article <https://en.wikipedia.org/wiki/Gauss%E2%80%93Newton_algorithm>`_. Parameters ---------- op : `Operator` Operator in the inverse problem. If not linear, it must have an implementation of `Operator.derivative`, which in turn must implement `Operator.adjoint`, i.e. the call ``op.derivative(x).adjoint`` must be valid. x : ``op.domain`` element Element to which the result is written. Its initial value is used as starting point of the iteration, and its values are updated in each iteration step. rhs : ``op.range`` element Right-hand side of the equation defining the inverse problem niter : int Maximum number of iterations. zero_seq : iterable, optional Zero sequence whose values are used for the regularization of the linearized problem in each Newton step. callback : callable, optional Object executing code per iteration, e.g. plotting each iterate. """ if x not in op.domain: raise TypeError('`x` {!r} is not in the domain of `op` {!r}' ''.format(x, op.domain)) x0 = x.copy() id_op = IdentityOperator(op.domain) dx = op.domain.zero() tmp_dom = op.domain.element() u = op.domain.element() tmp_ran = op.range.element() v = op.range.element() for _ in range(niter): tm = next(zero_seq) deriv = op.derivative(x) deriv_adjoint = deriv.adjoint # v = rhs - op(x) - deriv(x0-x) # u = deriv.T(v) op(x, out=tmp_ran) # eval op(x) v.lincomb(1, rhs, -1, tmp_ran) # assign v = rhs - op(x) tmp_dom.lincomb(1, x0, -1, x) # assign temp tmp_dom = x0 - x deriv(tmp_dom, out=tmp_ran) # eval deriv(x0-x) v -= tmp_ran # assign v = rhs-op(x)-deriv(x0-x) deriv_adjoint(v, out=u) # eval/assign u = deriv.T(v) # Solve equation Tikhonov regularized system # (deriv.T o deriv + tm * id_op)^-1 u = dx tikh_op = OperatorSum(OperatorComp(deriv.adjoint, deriv), tm * id_op, tmp_dom) # TODO: allow user to select other method conjugate_gradient(tikh_op, dx, u, 3) # Update x x.lincomb(1, x0, 1, dx) # x = x0 + dx if callback is not None: callback(x)
python
def gauss_newton(op, x, rhs, niter, zero_seq=exp_zero_seq(2.0), callback=None): if x not in op.domain: raise TypeError('`x` {!r} is not in the domain of `op` {!r}' ''.format(x, op.domain)) x0 = x.copy() id_op = IdentityOperator(op.domain) dx = op.domain.zero() tmp_dom = op.domain.element() u = op.domain.element() tmp_ran = op.range.element() v = op.range.element() for _ in range(niter): tm = next(zero_seq) deriv = op.derivative(x) deriv_adjoint = deriv.adjoint # v = rhs - op(x) - deriv(x0-x) # u = deriv.T(v) op(x, out=tmp_ran) # eval op(x) v.lincomb(1, rhs, -1, tmp_ran) # assign v = rhs - op(x) tmp_dom.lincomb(1, x0, -1, x) # assign temp tmp_dom = x0 - x deriv(tmp_dom, out=tmp_ran) # eval deriv(x0-x) v -= tmp_ran # assign v = rhs-op(x)-deriv(x0-x) deriv_adjoint(v, out=u) # eval/assign u = deriv.T(v) # Solve equation Tikhonov regularized system # (deriv.T o deriv + tm * id_op)^-1 u = dx tikh_op = OperatorSum(OperatorComp(deriv.adjoint, deriv), tm * id_op, tmp_dom) # TODO: allow user to select other method conjugate_gradient(tikh_op, dx, u, 3) # Update x x.lincomb(1, x0, 1, dx) # x = x0 + dx if callback is not None: callback(x)
[ "def", "gauss_newton", "(", "op", ",", "x", ",", "rhs", ",", "niter", ",", "zero_seq", "=", "exp_zero_seq", "(", "2.0", ")", ",", "callback", "=", "None", ")", ":", "if", "x", "not", "in", "op", ".", "domain", ":", "raise", "TypeError", "(", "'`x` ...
Optimized implementation of a Gauss-Newton method. This method solves the inverse problem (of the first kind):: A(x) = y for a (Frechet-) differentiable `Operator` ``A`` using a Gauss-Newton iteration. It uses a minimum amount of memory copies by applying re-usable temporaries and in-place evaluation. A variant of the method applied to a specific problem is described in a `Wikipedia article <https://en.wikipedia.org/wiki/Gauss%E2%80%93Newton_algorithm>`_. Parameters ---------- op : `Operator` Operator in the inverse problem. If not linear, it must have an implementation of `Operator.derivative`, which in turn must implement `Operator.adjoint`, i.e. the call ``op.derivative(x).adjoint`` must be valid. x : ``op.domain`` element Element to which the result is written. Its initial value is used as starting point of the iteration, and its values are updated in each iteration step. rhs : ``op.range`` element Right-hand side of the equation defining the inverse problem niter : int Maximum number of iterations. zero_seq : iterable, optional Zero sequence whose values are used for the regularization of the linearized problem in each Newton step. callback : callable, optional Object executing code per iteration, e.g. plotting each iterate.
[ "Optimized", "implementation", "of", "a", "Gauss", "-", "Newton", "method", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/iterative/iterative.py#L310-L389
231,812
odlgroup/odl
odl/solvers/iterative/iterative.py
kaczmarz
def kaczmarz(ops, x, rhs, niter, omega=1, projection=None, random=False, callback=None, callback_loop='outer'): r"""Optimized implementation of Kaczmarz's method. Solves the inverse problem given by the set of equations:: A_n(x) = rhs_n This is also known as the Landweber-Kaczmarz's method, since the method coincides with the Landweber method for a single operator. Parameters ---------- ops : sequence of `Operator`'s Operators in the inverse problem. ``op[i].derivative(x).adjoint`` must be well-defined for ``x`` in the operator domain and for all ``i``. x : ``op.domain`` element Element to which the result is written. Its initial value is used as starting point of the iteration, and its values are updated in each iteration step. rhs : sequence of ``ops[i].range`` elements Right-hand side of the equation defining the inverse problem. niter : int Number of iterations. omega : positive float or sequence of positive floats, optional Relaxation parameter in the iteration. If a single float is given the same step is used for all operators, otherwise separate steps are used. projection : callable, optional Function that can be used to modify the iterates in each iteration, for example enforcing positivity. The function should take one argument and modify it in-place. random : bool, optional If `True`, the order of the operators is randomized in each iteration. callback : callable, optional Object executing code per iteration, e.g. plotting each iterate. callback_loop : {'inner', 'outer'} Whether the callback should be called in the inner or outer loop. Notes ----- This method calculates an approximate least-squares solution of the inverse problem of the first kind .. math:: \mathcal{A}_i (x) = y_i \quad 1 \leq i \leq n, for a given :math:`y_n \in \mathcal{Y}_n`, i.e. an approximate solution :math:`x^*` to .. math:: \min_{x\in \mathcal{X}} \sum_{i=1}^n \| \mathcal{A}_i(x) - y_i \|_{\mathcal{Y}_i}^2 for a (Frechet-) differentiable operator :math:`\mathcal{A}: \mathcal{X} \to \mathcal{Y}` between Hilbert spaces :math:`\mathcal{X}` and :math:`\mathcal{Y}`. The method starts from an initial guess :math:`x_0` and uses the iteration .. math:: x_{k+1} = x_k - \omega_{[k]} \ \partial \mathcal{A}_{[k]}(x_k)^* (\mathcal{A}_{[k]}(x_k) - y_{[k]}), where :math:`\partial \mathcal{A}_{[k]}(x_k)` is the Frechet derivative of :math:`\mathcal{A}_{[k]}` at :math:`x_k`, :math:`\omega_{[k]}` is a relaxation parameter and :math:`[k] := k \text{ mod } n`. For linear problems, a choice :math:`0 < \omega_i < 2/\lVert \mathcal{A}_{i}^2\rVert` guarantees convergence, where :math:`\|\mathcal{A}_{i}\|` stands for the operator norm of :math:`\mathcal{A}_{i}`. This implementation uses a minimum amount of memory copies by applying re-usable temporaries and in-place evaluation. The method is also described in a `Wikipedia article <https://en.wikipedia.org/wiki/Kaczmarz_method>`_. and in Natterer, F. Mathematical Methods in Image Reconstruction, section 5.3.2. See Also -------- landweber """ domain = ops[0].domain if any(domain != opi.domain for opi in ops): raise ValueError('domains of `ops` are not all equal') if x not in domain: raise TypeError('`x` {!r} is not in the domain of `ops` {!r}' ''.format(x, domain)) if len(ops) != len(rhs): raise ValueError('`number of `ops` {} does not match number of ' '`rhs` {}'.format(len(ops), len(rhs))) omega = normalized_scalar_param_list(omega, len(ops), param_conv=float) # Reusable elements in the range, one per type of space ranges = [opi.range for opi in ops] unique_ranges = set(ranges) tmp_rans = {ran: ran.element() for ran in unique_ranges} # Single reusable element in the domain tmp_dom = domain.element() # Iteratively find solution for _ in range(niter): if random: rng = np.random.permutation(range(len(ops))) else: rng = range(len(ops)) for i in rng: # Find residual tmp_ran = tmp_rans[ops[i].range] ops[i](x, out=tmp_ran) tmp_ran -= rhs[i] # Update x ops[i].derivative(x).adjoint(tmp_ran, out=tmp_dom) x.lincomb(1, x, -omega[i], tmp_dom) if projection is not None: projection(x) if callback is not None and callback_loop == 'inner': callback(x) if callback is not None and callback_loop == 'outer': callback(x)
python
def kaczmarz(ops, x, rhs, niter, omega=1, projection=None, random=False, callback=None, callback_loop='outer'): r"""Optimized implementation of Kaczmarz's method. Solves the inverse problem given by the set of equations:: A_n(x) = rhs_n This is also known as the Landweber-Kaczmarz's method, since the method coincides with the Landweber method for a single operator. Parameters ---------- ops : sequence of `Operator`'s Operators in the inverse problem. ``op[i].derivative(x).adjoint`` must be well-defined for ``x`` in the operator domain and for all ``i``. x : ``op.domain`` element Element to which the result is written. Its initial value is used as starting point of the iteration, and its values are updated in each iteration step. rhs : sequence of ``ops[i].range`` elements Right-hand side of the equation defining the inverse problem. niter : int Number of iterations. omega : positive float or sequence of positive floats, optional Relaxation parameter in the iteration. If a single float is given the same step is used for all operators, otherwise separate steps are used. projection : callable, optional Function that can be used to modify the iterates in each iteration, for example enforcing positivity. The function should take one argument and modify it in-place. random : bool, optional If `True`, the order of the operators is randomized in each iteration. callback : callable, optional Object executing code per iteration, e.g. plotting each iterate. callback_loop : {'inner', 'outer'} Whether the callback should be called in the inner or outer loop. Notes ----- This method calculates an approximate least-squares solution of the inverse problem of the first kind .. math:: \mathcal{A}_i (x) = y_i \quad 1 \leq i \leq n, for a given :math:`y_n \in \mathcal{Y}_n`, i.e. an approximate solution :math:`x^*` to .. math:: \min_{x\in \mathcal{X}} \sum_{i=1}^n \| \mathcal{A}_i(x) - y_i \|_{\mathcal{Y}_i}^2 for a (Frechet-) differentiable operator :math:`\mathcal{A}: \mathcal{X} \to \mathcal{Y}` between Hilbert spaces :math:`\mathcal{X}` and :math:`\mathcal{Y}`. The method starts from an initial guess :math:`x_0` and uses the iteration .. math:: x_{k+1} = x_k - \omega_{[k]} \ \partial \mathcal{A}_{[k]}(x_k)^* (\mathcal{A}_{[k]}(x_k) - y_{[k]}), where :math:`\partial \mathcal{A}_{[k]}(x_k)` is the Frechet derivative of :math:`\mathcal{A}_{[k]}` at :math:`x_k`, :math:`\omega_{[k]}` is a relaxation parameter and :math:`[k] := k \text{ mod } n`. For linear problems, a choice :math:`0 < \omega_i < 2/\lVert \mathcal{A}_{i}^2\rVert` guarantees convergence, where :math:`\|\mathcal{A}_{i}\|` stands for the operator norm of :math:`\mathcal{A}_{i}`. This implementation uses a minimum amount of memory copies by applying re-usable temporaries and in-place evaluation. The method is also described in a `Wikipedia article <https://en.wikipedia.org/wiki/Kaczmarz_method>`_. and in Natterer, F. Mathematical Methods in Image Reconstruction, section 5.3.2. See Also -------- landweber """ domain = ops[0].domain if any(domain != opi.domain for opi in ops): raise ValueError('domains of `ops` are not all equal') if x not in domain: raise TypeError('`x` {!r} is not in the domain of `ops` {!r}' ''.format(x, domain)) if len(ops) != len(rhs): raise ValueError('`number of `ops` {} does not match number of ' '`rhs` {}'.format(len(ops), len(rhs))) omega = normalized_scalar_param_list(omega, len(ops), param_conv=float) # Reusable elements in the range, one per type of space ranges = [opi.range for opi in ops] unique_ranges = set(ranges) tmp_rans = {ran: ran.element() for ran in unique_ranges} # Single reusable element in the domain tmp_dom = domain.element() # Iteratively find solution for _ in range(niter): if random: rng = np.random.permutation(range(len(ops))) else: rng = range(len(ops)) for i in rng: # Find residual tmp_ran = tmp_rans[ops[i].range] ops[i](x, out=tmp_ran) tmp_ran -= rhs[i] # Update x ops[i].derivative(x).adjoint(tmp_ran, out=tmp_dom) x.lincomb(1, x, -omega[i], tmp_dom) if projection is not None: projection(x) if callback is not None and callback_loop == 'inner': callback(x) if callback is not None and callback_loop == 'outer': callback(x)
[ "def", "kaczmarz", "(", "ops", ",", "x", ",", "rhs", ",", "niter", ",", "omega", "=", "1", ",", "projection", "=", "None", ",", "random", "=", "False", ",", "callback", "=", "None", ",", "callback_loop", "=", "'outer'", ")", ":", "domain", "=", "op...
r"""Optimized implementation of Kaczmarz's method. Solves the inverse problem given by the set of equations:: A_n(x) = rhs_n This is also known as the Landweber-Kaczmarz's method, since the method coincides with the Landweber method for a single operator. Parameters ---------- ops : sequence of `Operator`'s Operators in the inverse problem. ``op[i].derivative(x).adjoint`` must be well-defined for ``x`` in the operator domain and for all ``i``. x : ``op.domain`` element Element to which the result is written. Its initial value is used as starting point of the iteration, and its values are updated in each iteration step. rhs : sequence of ``ops[i].range`` elements Right-hand side of the equation defining the inverse problem. niter : int Number of iterations. omega : positive float or sequence of positive floats, optional Relaxation parameter in the iteration. If a single float is given the same step is used for all operators, otherwise separate steps are used. projection : callable, optional Function that can be used to modify the iterates in each iteration, for example enforcing positivity. The function should take one argument and modify it in-place. random : bool, optional If `True`, the order of the operators is randomized in each iteration. callback : callable, optional Object executing code per iteration, e.g. plotting each iterate. callback_loop : {'inner', 'outer'} Whether the callback should be called in the inner or outer loop. Notes ----- This method calculates an approximate least-squares solution of the inverse problem of the first kind .. math:: \mathcal{A}_i (x) = y_i \quad 1 \leq i \leq n, for a given :math:`y_n \in \mathcal{Y}_n`, i.e. an approximate solution :math:`x^*` to .. math:: \min_{x\in \mathcal{X}} \sum_{i=1}^n \| \mathcal{A}_i(x) - y_i \|_{\mathcal{Y}_i}^2 for a (Frechet-) differentiable operator :math:`\mathcal{A}: \mathcal{X} \to \mathcal{Y}` between Hilbert spaces :math:`\mathcal{X}` and :math:`\mathcal{Y}`. The method starts from an initial guess :math:`x_0` and uses the iteration .. math:: x_{k+1} = x_k - \omega_{[k]} \ \partial \mathcal{A}_{[k]}(x_k)^* (\mathcal{A}_{[k]}(x_k) - y_{[k]}), where :math:`\partial \mathcal{A}_{[k]}(x_k)` is the Frechet derivative of :math:`\mathcal{A}_{[k]}` at :math:`x_k`, :math:`\omega_{[k]}` is a relaxation parameter and :math:`[k] := k \text{ mod } n`. For linear problems, a choice :math:`0 < \omega_i < 2/\lVert \mathcal{A}_{i}^2\rVert` guarantees convergence, where :math:`\|\mathcal{A}_{i}\|` stands for the operator norm of :math:`\mathcal{A}_{i}`. This implementation uses a minimum amount of memory copies by applying re-usable temporaries and in-place evaluation. The method is also described in a `Wikipedia article <https://en.wikipedia.org/wiki/Kaczmarz_method>`_. and in Natterer, F. Mathematical Methods in Image Reconstruction, section 5.3.2. See Also -------- landweber
[ "r", "Optimized", "implementation", "of", "Kaczmarz", "s", "method", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/iterative/iterative.py#L392-L522
231,813
odlgroup/odl
odl/solvers/smooth/nonlinear_cg.py
conjugate_gradient_nonlinear
def conjugate_gradient_nonlinear(f, x, line_search=1.0, maxiter=1000, nreset=0, tol=1e-16, beta_method='FR', callback=None): r"""Conjugate gradient for nonlinear problems. Parameters ---------- f : `Functional` Functional with ``f.gradient``. x : ``op.domain`` element Vector to which the result is written. Its initial value is used as starting point of the iteration, and its values are updated in each iteration step. line_search : float or `LineSearch`, optional Strategy to choose the step length. If a float is given, it is used as a fixed step length. maxiter : int, optional Maximum number of iterations to perform. nreset : int, optional Number of times the solver should be reset. Default: no reset. tol : float, optional Tolerance that should be used to terminating the iteration. beta_method : {'FR', 'PR', 'HS', 'DY'}, optional Method to calculate ``beta`` in the iterates. - ``'FR'`` : Fletcher-Reeves - ``'PR'`` : Polak-Ribiere - ``'HS'`` : Hestenes-Stiefel - ``'DY'`` : Dai-Yuan callback : callable, optional Object executing code per iteration, e.g. plotting each iterate. Notes ----- This is a general and optimized implementation of the nonlinear conjguate gradient method for solving a general unconstrained optimization problem .. math:: \min f(x) for a differentiable functional :math:`f: \mathcal{X}\to \mathbb{R}` on a Hilbert space :math:`\mathcal{X}`. It does so by finding a zero of the gradient .. math:: \nabla f: \mathcal{X} \to \mathcal{X}. The method is described in a `Wikipedia article <https://en.wikipedia.org/wiki/Nonlinear_conjugate_gradient_method>`_. See Also -------- odl.solvers.smooth.newton.bfgs_method : Quasi-Newton solver for the same problem odl.solvers.iterative.iterative.conjugate_gradient : Optimized solver for least-squares problem with linear and symmetric operator odl.solvers.iterative.iterative.conjugate_gradient_normal : Equivalent solver but for least-squares problem with linear operator """ if x not in f.domain: raise TypeError('`x` {!r} is not in the domain of `f` {!r}' ''.format(x, f.domain)) if not callable(line_search): line_search = ConstantLineSearch(line_search) if beta_method not in ['FR', 'PR', 'HS', 'DY']: raise ValueError('unknown ``beta_method``') for _ in range(nreset + 1): # First iteration is done without beta dx = -f.gradient(x) dir_derivative = -dx.inner(dx) if abs(dir_derivative) < tol: return a = line_search(x, dx, dir_derivative) x.lincomb(1, x, a, dx) # x = x + a * dx s = dx # for 'HS' and 'DY' beta methods for _ in range(maxiter // (nreset + 1)): # Compute dx as -grad f dx, dx_old = -f.gradient(x), dx # Calculate "beta" if beta_method == 'FR': beta = dx.inner(dx) / dx_old.inner(dx_old) elif beta_method == 'PR': beta = dx.inner(dx - dx_old) / dx_old.inner(dx_old) elif beta_method == 'HS': beta = - dx.inner(dx - dx_old) / s.inner(dx - dx_old) elif beta_method == 'DY': beta = - dx.inner(dx) / s.inner(dx - dx_old) else: raise RuntimeError('unknown ``beta_method``') # Reset beta if negative. beta = max(0, beta) # Update search direction s.lincomb(1, dx, beta, s) # s = dx + beta * s # Find optimal step along s dir_derivative = -dx.inner(s) if abs(dir_derivative) <= tol: return a = line_search(x, s, dir_derivative) # Update position x.lincomb(1, x, a, s) # x = x + a * s if callback is not None: callback(x)
python
def conjugate_gradient_nonlinear(f, x, line_search=1.0, maxiter=1000, nreset=0, tol=1e-16, beta_method='FR', callback=None): r"""Conjugate gradient for nonlinear problems. Parameters ---------- f : `Functional` Functional with ``f.gradient``. x : ``op.domain`` element Vector to which the result is written. Its initial value is used as starting point of the iteration, and its values are updated in each iteration step. line_search : float or `LineSearch`, optional Strategy to choose the step length. If a float is given, it is used as a fixed step length. maxiter : int, optional Maximum number of iterations to perform. nreset : int, optional Number of times the solver should be reset. Default: no reset. tol : float, optional Tolerance that should be used to terminating the iteration. beta_method : {'FR', 'PR', 'HS', 'DY'}, optional Method to calculate ``beta`` in the iterates. - ``'FR'`` : Fletcher-Reeves - ``'PR'`` : Polak-Ribiere - ``'HS'`` : Hestenes-Stiefel - ``'DY'`` : Dai-Yuan callback : callable, optional Object executing code per iteration, e.g. plotting each iterate. Notes ----- This is a general and optimized implementation of the nonlinear conjguate gradient method for solving a general unconstrained optimization problem .. math:: \min f(x) for a differentiable functional :math:`f: \mathcal{X}\to \mathbb{R}` on a Hilbert space :math:`\mathcal{X}`. It does so by finding a zero of the gradient .. math:: \nabla f: \mathcal{X} \to \mathcal{X}. The method is described in a `Wikipedia article <https://en.wikipedia.org/wiki/Nonlinear_conjugate_gradient_method>`_. See Also -------- odl.solvers.smooth.newton.bfgs_method : Quasi-Newton solver for the same problem odl.solvers.iterative.iterative.conjugate_gradient : Optimized solver for least-squares problem with linear and symmetric operator odl.solvers.iterative.iterative.conjugate_gradient_normal : Equivalent solver but for least-squares problem with linear operator """ if x not in f.domain: raise TypeError('`x` {!r} is not in the domain of `f` {!r}' ''.format(x, f.domain)) if not callable(line_search): line_search = ConstantLineSearch(line_search) if beta_method not in ['FR', 'PR', 'HS', 'DY']: raise ValueError('unknown ``beta_method``') for _ in range(nreset + 1): # First iteration is done without beta dx = -f.gradient(x) dir_derivative = -dx.inner(dx) if abs(dir_derivative) < tol: return a = line_search(x, dx, dir_derivative) x.lincomb(1, x, a, dx) # x = x + a * dx s = dx # for 'HS' and 'DY' beta methods for _ in range(maxiter // (nreset + 1)): # Compute dx as -grad f dx, dx_old = -f.gradient(x), dx # Calculate "beta" if beta_method == 'FR': beta = dx.inner(dx) / dx_old.inner(dx_old) elif beta_method == 'PR': beta = dx.inner(dx - dx_old) / dx_old.inner(dx_old) elif beta_method == 'HS': beta = - dx.inner(dx - dx_old) / s.inner(dx - dx_old) elif beta_method == 'DY': beta = - dx.inner(dx) / s.inner(dx - dx_old) else: raise RuntimeError('unknown ``beta_method``') # Reset beta if negative. beta = max(0, beta) # Update search direction s.lincomb(1, dx, beta, s) # s = dx + beta * s # Find optimal step along s dir_derivative = -dx.inner(s) if abs(dir_derivative) <= tol: return a = line_search(x, s, dir_derivative) # Update position x.lincomb(1, x, a, s) # x = x + a * s if callback is not None: callback(x)
[ "def", "conjugate_gradient_nonlinear", "(", "f", ",", "x", ",", "line_search", "=", "1.0", ",", "maxiter", "=", "1000", ",", "nreset", "=", "0", ",", "tol", "=", "1e-16", ",", "beta_method", "=", "'FR'", ",", "callback", "=", "None", ")", ":", "if", ...
r"""Conjugate gradient for nonlinear problems. Parameters ---------- f : `Functional` Functional with ``f.gradient``. x : ``op.domain`` element Vector to which the result is written. Its initial value is used as starting point of the iteration, and its values are updated in each iteration step. line_search : float or `LineSearch`, optional Strategy to choose the step length. If a float is given, it is used as a fixed step length. maxiter : int, optional Maximum number of iterations to perform. nreset : int, optional Number of times the solver should be reset. Default: no reset. tol : float, optional Tolerance that should be used to terminating the iteration. beta_method : {'FR', 'PR', 'HS', 'DY'}, optional Method to calculate ``beta`` in the iterates. - ``'FR'`` : Fletcher-Reeves - ``'PR'`` : Polak-Ribiere - ``'HS'`` : Hestenes-Stiefel - ``'DY'`` : Dai-Yuan callback : callable, optional Object executing code per iteration, e.g. plotting each iterate. Notes ----- This is a general and optimized implementation of the nonlinear conjguate gradient method for solving a general unconstrained optimization problem .. math:: \min f(x) for a differentiable functional :math:`f: \mathcal{X}\to \mathbb{R}` on a Hilbert space :math:`\mathcal{X}`. It does so by finding a zero of the gradient .. math:: \nabla f: \mathcal{X} \to \mathcal{X}. The method is described in a `Wikipedia article <https://en.wikipedia.org/wiki/Nonlinear_conjugate_gradient_method>`_. See Also -------- odl.solvers.smooth.newton.bfgs_method : Quasi-Newton solver for the same problem odl.solvers.iterative.iterative.conjugate_gradient : Optimized solver for least-squares problem with linear and symmetric operator odl.solvers.iterative.iterative.conjugate_gradient_normal : Equivalent solver but for least-squares problem with linear operator
[ "r", "Conjugate", "gradient", "for", "nonlinear", "problems", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/smooth/nonlinear_cg.py#L19-L135
231,814
odlgroup/odl
odl/discr/discretization.py
tspace_type
def tspace_type(space, impl, dtype=None): """Select the correct corresponding tensor space. Parameters ---------- space : `LinearSpace` Template space from which to infer an adequate tensor space. If it has a ``field`` attribute, ``dtype`` must be consistent with it. impl : string Implementation backend for the tensor space. dtype : optional Data type which the space is supposed to use. If ``None`` is given, the space type is purely determined from ``space`` and ``impl``. Otherwise, it must be compatible with the field of ``space``. Returns ------- stype : type Space type selected after the space's field, the backend and the data type. """ field_type = type(getattr(space, 'field', None)) if dtype is None: pass elif is_real_floating_dtype(dtype): if field_type is None or field_type == ComplexNumbers: raise TypeError('real floating data type {!r} requires space ' 'field to be of type RealNumbers, got {}' ''.format(dtype, field_type)) elif is_complex_floating_dtype(dtype): if field_type is None or field_type == RealNumbers: raise TypeError('complex floating data type {!r} requires space ' 'field to be of type ComplexNumbers, got {!r}' ''.format(dtype, field_type)) elif is_numeric_dtype(dtype): if field_type == ComplexNumbers: raise TypeError('non-floating data type {!r} requires space field ' 'to be of type RealNumbers, got {!r}' .format(dtype, field_type)) try: return tensor_space_impl(impl) except ValueError: raise NotImplementedError('no corresponding tensor space available ' 'for space {!r} and implementation {!r}' ''.format(space, impl))
python
def tspace_type(space, impl, dtype=None): field_type = type(getattr(space, 'field', None)) if dtype is None: pass elif is_real_floating_dtype(dtype): if field_type is None or field_type == ComplexNumbers: raise TypeError('real floating data type {!r} requires space ' 'field to be of type RealNumbers, got {}' ''.format(dtype, field_type)) elif is_complex_floating_dtype(dtype): if field_type is None or field_type == RealNumbers: raise TypeError('complex floating data type {!r} requires space ' 'field to be of type ComplexNumbers, got {!r}' ''.format(dtype, field_type)) elif is_numeric_dtype(dtype): if field_type == ComplexNumbers: raise TypeError('non-floating data type {!r} requires space field ' 'to be of type RealNumbers, got {!r}' .format(dtype, field_type)) try: return tensor_space_impl(impl) except ValueError: raise NotImplementedError('no corresponding tensor space available ' 'for space {!r} and implementation {!r}' ''.format(space, impl))
[ "def", "tspace_type", "(", "space", ",", "impl", ",", "dtype", "=", "None", ")", ":", "field_type", "=", "type", "(", "getattr", "(", "space", ",", "'field'", ",", "None", ")", ")", "if", "dtype", "is", "None", ":", "pass", "elif", "is_real_floating_dt...
Select the correct corresponding tensor space. Parameters ---------- space : `LinearSpace` Template space from which to infer an adequate tensor space. If it has a ``field`` attribute, ``dtype`` must be consistent with it. impl : string Implementation backend for the tensor space. dtype : optional Data type which the space is supposed to use. If ``None`` is given, the space type is purely determined from ``space`` and ``impl``. Otherwise, it must be compatible with the field of ``space``. Returns ------- stype : type Space type selected after the space's field, the backend and the data type.
[ "Select", "the", "correct", "corresponding", "tensor", "space", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/discretization.py#L483-L530
231,815
odlgroup/odl
odl/discr/discretization.py
DiscretizedSpace._lincomb
def _lincomb(self, a, x1, b, x2, out): """Raw linear combination.""" self.tspace._lincomb(a, x1.tensor, b, x2.tensor, out.tensor)
python
def _lincomb(self, a, x1, b, x2, out): self.tspace._lincomb(a, x1.tensor, b, x2.tensor, out.tensor)
[ "def", "_lincomb", "(", "self", ",", "a", ",", "x1", ",", "b", ",", "x2", ",", "out", ")", ":", "self", ".", "tspace", ".", "_lincomb", "(", "a", ",", "x1", ".", "tensor", ",", "b", ",", "x2", ".", "tensor", ",", "out", ".", "tensor", ")" ]
Raw linear combination.
[ "Raw", "linear", "combination", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/discretization.py#L253-L255
231,816
odlgroup/odl
odl/discr/discretization.py
DiscretizedSpace._dist
def _dist(self, x1, x2): """Raw distance between two elements.""" return self.tspace._dist(x1.tensor, x2.tensor)
python
def _dist(self, x1, x2): return self.tspace._dist(x1.tensor, x2.tensor)
[ "def", "_dist", "(", "self", ",", "x1", ",", "x2", ")", ":", "return", "self", ".", "tspace", ".", "_dist", "(", "x1", ".", "tensor", ",", "x2", ".", "tensor", ")" ]
Raw distance between two elements.
[ "Raw", "distance", "between", "two", "elements", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/discretization.py#L257-L259
231,817
odlgroup/odl
odl/discr/discretization.py
DiscretizedSpace._inner
def _inner(self, x1, x2): """Raw inner product of two elements.""" return self.tspace._inner(x1.tensor, x2.tensor)
python
def _inner(self, x1, x2): return self.tspace._inner(x1.tensor, x2.tensor)
[ "def", "_inner", "(", "self", ",", "x1", ",", "x2", ")", ":", "return", "self", ".", "tspace", ".", "_inner", "(", "x1", ".", "tensor", ",", "x2", ".", "tensor", ")" ]
Raw inner product of two elements.
[ "Raw", "inner", "product", "of", "two", "elements", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/discretization.py#L265-L267
231,818
odlgroup/odl
odl/discr/discretization.py
DiscretizedSpaceElement.sampling
def sampling(self, ufunc, **kwargs): """Sample a continuous function and assign to this element. Parameters ---------- ufunc : ``self.space.fspace`` element The continuous function that should be samplingicted. kwargs : Additional arugments for the sampling operator implementation Examples -------- >>> space = odl.uniform_discr(0, 1, 5) >>> x = space.element() Assign x according to a continuous function: >>> x.sampling(lambda t: t) >>> x # Print values at grid points (which are centered) uniform_discr(0.0, 1.0, 5).element([ 0.1, 0.3, 0.5, 0.7, 0.9]) See Also -------- DiscretizedSpace.sampling : For full description """ self.space.sampling(ufunc, out=self.tensor, **kwargs)
python
def sampling(self, ufunc, **kwargs): self.space.sampling(ufunc, out=self.tensor, **kwargs)
[ "def", "sampling", "(", "self", ",", "ufunc", ",", "*", "*", "kwargs", ")", ":", "self", ".", "space", ".", "sampling", "(", "ufunc", ",", "out", "=", "self", ".", "tensor", ",", "*", "*", "kwargs", ")" ]
Sample a continuous function and assign to this element. Parameters ---------- ufunc : ``self.space.fspace`` element The continuous function that should be samplingicted. kwargs : Additional arugments for the sampling operator implementation Examples -------- >>> space = odl.uniform_discr(0, 1, 5) >>> x = space.element() Assign x according to a continuous function: >>> x.sampling(lambda t: t) >>> x # Print values at grid points (which are centered) uniform_discr(0.0, 1.0, 5).element([ 0.1, 0.3, 0.5, 0.7, 0.9]) See Also -------- DiscretizedSpace.sampling : For full description
[ "Sample", "a", "continuous", "function", "and", "assign", "to", "this", "element", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/discretization.py#L414-L439
231,819
odlgroup/odl
odl/operator/tensor_ops.py
_normalize_sampling_points
def _normalize_sampling_points(sampling_points, ndim): """Normalize points to an ndim-long list of linear index arrays. This helper converts sampling indices for `SamplingOperator` from integers or array-like objects to a list of length ``ndim``, where each entry is a `numpy.ndarray` with ``dtype=int``. The function also checks if all arrays have equal lengths, and that they fulfill ``array.ndim=1`` (or ``size=0`` for if ``ndim == 0``). The result of this normalization is intended to be used for indexing an ``ndim``-dimensional array at ``sampling_points`` via NumPy fancy indexing, i.e., ``result = ndim_array[sampling_points]``. """ sampling_points_in = sampling_points if ndim == 0: sampling_points = [np.array(sampling_points, dtype=int, copy=False)] if sampling_points[0].size != 0: raise ValueError('`sampling_points` must be empty for ' '0-dim. `domain`') elif ndim == 1: if isinstance(sampling_points, Integral): sampling_points = (sampling_points,) sampling_points = np.array(sampling_points, dtype=int, copy=False, ndmin=1) # Handle possible list of length one if sampling_points.ndim == 2 and sampling_points.shape[0] == 1: sampling_points = sampling_points[0] sampling_points = [sampling_points] if sampling_points[0].ndim > 1: raise ValueError('expected 1D index (array), got {}' ''.format(sampling_points_in)) else: try: iter(sampling_points) except TypeError: raise TypeError('`sampling_points` must be a sequence ' 'for domain with ndim > 1') else: if np.ndim(sampling_points) == 1: sampling_points = [np.array(p, dtype=int) for p in sampling_points] else: sampling_points = [ np.array(pts, dtype=int, copy=False, ndmin=1) for pts in sampling_points] if any(pts.ndim != 1 for pts in sampling_points): raise ValueError( 'index arrays in `sampling_points` must be 1D, ' 'got {!r}'.format(sampling_points_in)) return sampling_points
python
def _normalize_sampling_points(sampling_points, ndim): sampling_points_in = sampling_points if ndim == 0: sampling_points = [np.array(sampling_points, dtype=int, copy=False)] if sampling_points[0].size != 0: raise ValueError('`sampling_points` must be empty for ' '0-dim. `domain`') elif ndim == 1: if isinstance(sampling_points, Integral): sampling_points = (sampling_points,) sampling_points = np.array(sampling_points, dtype=int, copy=False, ndmin=1) # Handle possible list of length one if sampling_points.ndim == 2 and sampling_points.shape[0] == 1: sampling_points = sampling_points[0] sampling_points = [sampling_points] if sampling_points[0].ndim > 1: raise ValueError('expected 1D index (array), got {}' ''.format(sampling_points_in)) else: try: iter(sampling_points) except TypeError: raise TypeError('`sampling_points` must be a sequence ' 'for domain with ndim > 1') else: if np.ndim(sampling_points) == 1: sampling_points = [np.array(p, dtype=int) for p in sampling_points] else: sampling_points = [ np.array(pts, dtype=int, copy=False, ndmin=1) for pts in sampling_points] if any(pts.ndim != 1 for pts in sampling_points): raise ValueError( 'index arrays in `sampling_points` must be 1D, ' 'got {!r}'.format(sampling_points_in)) return sampling_points
[ "def", "_normalize_sampling_points", "(", "sampling_points", ",", "ndim", ")", ":", "sampling_points_in", "=", "sampling_points", "if", "ndim", "==", "0", ":", "sampling_points", "=", "[", "np", ".", "array", "(", "sampling_points", ",", "dtype", "=", "int", "...
Normalize points to an ndim-long list of linear index arrays. This helper converts sampling indices for `SamplingOperator` from integers or array-like objects to a list of length ``ndim``, where each entry is a `numpy.ndarray` with ``dtype=int``. The function also checks if all arrays have equal lengths, and that they fulfill ``array.ndim=1`` (or ``size=0`` for if ``ndim == 0``). The result of this normalization is intended to be used for indexing an ``ndim``-dimensional array at ``sampling_points`` via NumPy fancy indexing, i.e., ``result = ndim_array[sampling_points]``.
[ "Normalize", "points", "to", "an", "ndim", "-", "long", "list", "of", "linear", "index", "arrays", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/operator/tensor_ops.py#L979-L1031
231,820
odlgroup/odl
odl/operator/tensor_ops.py
PointwiseNorm.derivative
def derivative(self, vf): """Derivative of the point-wise norm operator at ``vf``. The derivative at ``F`` of the point-wise norm operator ``N`` with finite exponent ``p`` and weights ``w`` is the pointwise inner product with the vector field :: x --> N(F)(x)^(1-p) * [ F_j(x) * |F_j(x)|^(p-2) ]_j Note that this is not well-defined for ``F = 0``. If ``p < 2``, any zero component will result in a singularity. Parameters ---------- vf : `domain` `element-like` Vector field ``F`` at which to evaluate the derivative. Returns ------- deriv : `PointwiseInner` Derivative operator at the given point ``vf``. Raises ------ NotImplementedError * if the vector field space is complex, since the derivative is not linear in that case * if the exponent is ``inf`` """ if self.domain.field == ComplexNumbers(): raise NotImplementedError('operator not Frechet-differentiable ' 'on a complex space') if self.exponent == float('inf'): raise NotImplementedError('operator not Frechet-differentiable ' 'for exponent = inf') vf = self.domain.element(vf) vf_pwnorm_fac = self(vf) if self.exponent != 2: # optimize away most common case. vf_pwnorm_fac **= (self.exponent - 1) inner_vf = vf.copy() for gi in inner_vf: gi *= gi.ufuncs.absolute().ufuncs.power(self.exponent - 2) if self.exponent >= 2: # Any component that is zero is not divided with nz = (vf_pwnorm_fac.asarray() != 0) gi[nz] /= vf_pwnorm_fac[nz] else: # For exponents < 2 there will be a singularity if any # component is zero. This results in inf or nan. See the # documentation for further details. gi /= vf_pwnorm_fac return PointwiseInner(self.domain, inner_vf, weighting=self.weights)
python
def derivative(self, vf): if self.domain.field == ComplexNumbers(): raise NotImplementedError('operator not Frechet-differentiable ' 'on a complex space') if self.exponent == float('inf'): raise NotImplementedError('operator not Frechet-differentiable ' 'for exponent = inf') vf = self.domain.element(vf) vf_pwnorm_fac = self(vf) if self.exponent != 2: # optimize away most common case. vf_pwnorm_fac **= (self.exponent - 1) inner_vf = vf.copy() for gi in inner_vf: gi *= gi.ufuncs.absolute().ufuncs.power(self.exponent - 2) if self.exponent >= 2: # Any component that is zero is not divided with nz = (vf_pwnorm_fac.asarray() != 0) gi[nz] /= vf_pwnorm_fac[nz] else: # For exponents < 2 there will be a singularity if any # component is zero. This results in inf or nan. See the # documentation for further details. gi /= vf_pwnorm_fac return PointwiseInner(self.domain, inner_vf, weighting=self.weights)
[ "def", "derivative", "(", "self", ",", "vf", ")", ":", "if", "self", ".", "domain", ".", "field", "==", "ComplexNumbers", "(", ")", ":", "raise", "NotImplementedError", "(", "'operator not Frechet-differentiable '", "'on a complex space'", ")", "if", "self", "."...
Derivative of the point-wise norm operator at ``vf``. The derivative at ``F`` of the point-wise norm operator ``N`` with finite exponent ``p`` and weights ``w`` is the pointwise inner product with the vector field :: x --> N(F)(x)^(1-p) * [ F_j(x) * |F_j(x)|^(p-2) ]_j Note that this is not well-defined for ``F = 0``. If ``p < 2``, any zero component will result in a singularity. Parameters ---------- vf : `domain` `element-like` Vector field ``F`` at which to evaluate the derivative. Returns ------- deriv : `PointwiseInner` Derivative operator at the given point ``vf``. Raises ------ NotImplementedError * if the vector field space is complex, since the derivative is not linear in that case * if the exponent is ``inf``
[ "Derivative", "of", "the", "point", "-", "wise", "norm", "operator", "at", "vf", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/operator/tensor_ops.py#L302-L358
231,821
odlgroup/odl
odl/operator/tensor_ops.py
MatrixOperator.adjoint
def adjoint(self): """Adjoint operator represented by the adjoint matrix. Returns ------- adjoint : `MatrixOperator` """ return MatrixOperator(self.matrix.conj().T, domain=self.range, range=self.domain, axis=self.axis)
python
def adjoint(self): return MatrixOperator(self.matrix.conj().T, domain=self.range, range=self.domain, axis=self.axis)
[ "def", "adjoint", "(", "self", ")", ":", "return", "MatrixOperator", "(", "self", ".", "matrix", ".", "conj", "(", ")", ".", "T", ",", "domain", "=", "self", ".", "range", ",", "range", "=", "self", ".", "domain", ",", "axis", "=", "self", ".", "...
Adjoint operator represented by the adjoint matrix. Returns ------- adjoint : `MatrixOperator`
[ "Adjoint", "operator", "represented", "by", "the", "adjoint", "matrix", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/operator/tensor_ops.py#L875-L884
231,822
odlgroup/odl
odl/operator/tensor_ops.py
MatrixOperator.inverse
def inverse(self): """Inverse operator represented by the inverse matrix. Taking the inverse causes sparse matrices to become dense and is generally very heavy computationally since the matrix is inverted numerically (an O(n^3) operation). It is recommended to instead use one of the solvers available in the ``odl.solvers`` package. Returns ------- inverse : `MatrixOperator` """ # Lazy import to improve `import odl` time import scipy.sparse if scipy.sparse.isspmatrix(self.matrix): dense_matrix = self.matrix.toarray() else: dense_matrix = self.matrix return MatrixOperator(np.linalg.inv(dense_matrix), domain=self.range, range=self.domain, axis=self.axis)
python
def inverse(self): # Lazy import to improve `import odl` time import scipy.sparse if scipy.sparse.isspmatrix(self.matrix): dense_matrix = self.matrix.toarray() else: dense_matrix = self.matrix return MatrixOperator(np.linalg.inv(dense_matrix), domain=self.range, range=self.domain, axis=self.axis)
[ "def", "inverse", "(", "self", ")", ":", "# Lazy import to improve `import odl` time", "import", "scipy", ".", "sparse", "if", "scipy", ".", "sparse", ".", "isspmatrix", "(", "self", ".", "matrix", ")", ":", "dense_matrix", "=", "self", ".", "matrix", ".", "...
Inverse operator represented by the inverse matrix. Taking the inverse causes sparse matrices to become dense and is generally very heavy computationally since the matrix is inverted numerically (an O(n^3) operation). It is recommended to instead use one of the solvers available in the ``odl.solvers`` package. Returns ------- inverse : `MatrixOperator`
[ "Inverse", "operator", "represented", "by", "the", "inverse", "matrix", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/operator/tensor_ops.py#L887-L909
231,823
odlgroup/odl
odl/operator/tensor_ops.py
SamplingOperator._call
def _call(self, x): """Return values at indices, possibly weighted.""" out = x.asarray().ravel()[self._indices_flat] if self.variant == 'point_eval': weights = 1.0 elif self.variant == 'integrate': weights = getattr(self.domain, 'cell_volume', 1.0) else: raise RuntimeError('bad variant {!r}'.format(self.variant)) if weights != 1.0: out *= weights return out
python
def _call(self, x): out = x.asarray().ravel()[self._indices_flat] if self.variant == 'point_eval': weights = 1.0 elif self.variant == 'integrate': weights = getattr(self.domain, 'cell_volume', 1.0) else: raise RuntimeError('bad variant {!r}'.format(self.variant)) if weights != 1.0: out *= weights return out
[ "def", "_call", "(", "self", ",", "x", ")", ":", "out", "=", "x", ".", "asarray", "(", ")", ".", "ravel", "(", ")", "[", "self", ".", "_indices_flat", "]", "if", "self", ".", "variant", "==", "'point_eval'", ":", "weights", "=", "1.0", "elif", "s...
Return values at indices, possibly weighted.
[ "Return", "values", "at", "indices", "possibly", "weighted", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/operator/tensor_ops.py#L1142-L1156
231,824
odlgroup/odl
odl/operator/tensor_ops.py
SamplingOperator.adjoint
def adjoint(self): """Adjoint of the sampling operator, a `WeightedSumSamplingOperator`. If each sampling point occurs only once, the adjoint consists in inserting the given values into the output at the sampling points. Duplicate sampling points are weighted with their multiplicity. Examples -------- >>> space = odl.uniform_discr([-1, -1], [1, 1], shape=(2, 3)) >>> sampling_points = [[0, 1, 1, 0], ... [0, 1, 2, 0]] >>> op = odl.SamplingOperator(space, sampling_points) >>> x = space.element([[1, 2, 3], ... [4, 5, 6]]) >>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10 True The ``'integrate'`` variant adjoint puts ones at the indices in ``sampling_points``, multiplied by their multiplicity: >>> op = odl.SamplingOperator(space, sampling_points, ... variant='integrate') >>> op.adjoint(op.range.one()) # (0, 0) occurs twice uniform_discr([-1., -1.], [ 1., 1.], (2, 3)).element( [[ 2., 0., 0.], [ 0., 1., 1.]] ) >>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10 True """ if self.variant == 'point_eval': variant = 'dirac' elif self.variant == 'integrate': variant = 'char_fun' else: raise RuntimeError('bad variant {!r}'.format(self.variant)) return WeightedSumSamplingOperator(self.domain, self.sampling_points, variant)
python
def adjoint(self): if self.variant == 'point_eval': variant = 'dirac' elif self.variant == 'integrate': variant = 'char_fun' else: raise RuntimeError('bad variant {!r}'.format(self.variant)) return WeightedSumSamplingOperator(self.domain, self.sampling_points, variant)
[ "def", "adjoint", "(", "self", ")", ":", "if", "self", ".", "variant", "==", "'point_eval'", ":", "variant", "=", "'dirac'", "elif", "self", ".", "variant", "==", "'integrate'", ":", "variant", "=", "'char_fun'", "else", ":", "raise", "RuntimeError", "(", ...
Adjoint of the sampling operator, a `WeightedSumSamplingOperator`. If each sampling point occurs only once, the adjoint consists in inserting the given values into the output at the sampling points. Duplicate sampling points are weighted with their multiplicity. Examples -------- >>> space = odl.uniform_discr([-1, -1], [1, 1], shape=(2, 3)) >>> sampling_points = [[0, 1, 1, 0], ... [0, 1, 2, 0]] >>> op = odl.SamplingOperator(space, sampling_points) >>> x = space.element([[1, 2, 3], ... [4, 5, 6]]) >>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10 True The ``'integrate'`` variant adjoint puts ones at the indices in ``sampling_points``, multiplied by their multiplicity: >>> op = odl.SamplingOperator(space, sampling_points, ... variant='integrate') >>> op.adjoint(op.range.one()) # (0, 0) occurs twice uniform_discr([-1., -1.], [ 1., 1.], (2, 3)).element( [[ 2., 0., 0.], [ 0., 1., 1.]] ) >>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10 True
[ "Adjoint", "of", "the", "sampling", "operator", "a", "WeightedSumSamplingOperator", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/operator/tensor_ops.py#L1159-L1199
231,825
odlgroup/odl
odl/operator/tensor_ops.py
WeightedSumSamplingOperator._call
def _call(self, x): """Sum all values if indices are given multiple times.""" y = np.bincount(self._indices_flat, weights=x, minlength=self.range.size) out = y.reshape(self.range.shape) if self.variant == 'dirac': weights = getattr(self.range, 'cell_volume', 1.0) elif self.variant == 'char_fun': weights = 1.0 else: raise RuntimeError('The variant "{!r}" is not yet supported' ''.format(self.variant)) if weights != 1.0: out /= weights return out
python
def _call(self, x): y = np.bincount(self._indices_flat, weights=x, minlength=self.range.size) out = y.reshape(self.range.shape) if self.variant == 'dirac': weights = getattr(self.range, 'cell_volume', 1.0) elif self.variant == 'char_fun': weights = 1.0 else: raise RuntimeError('The variant "{!r}" is not yet supported' ''.format(self.variant)) if weights != 1.0: out /= weights return out
[ "def", "_call", "(", "self", ",", "x", ")", ":", "y", "=", "np", ".", "bincount", "(", "self", ".", "_indices_flat", ",", "weights", "=", "x", ",", "minlength", "=", "self", ".", "range", ".", "size", ")", "out", "=", "y", ".", "reshape", "(", ...
Sum all values if indices are given multiple times.
[ "Sum", "all", "values", "if", "indices", "are", "given", "multiple", "times", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/operator/tensor_ops.py#L1341-L1359
231,826
odlgroup/odl
odl/operator/tensor_ops.py
WeightedSumSamplingOperator.adjoint
def adjoint(self): """Adjoint of this operator, a `SamplingOperator`. The ``'char_fun'`` variant of this operator corresponds to the ``'integrate'`` sampling operator, and ``'dirac'`` corresponds to ``'point_eval'``. Examples -------- >>> space = odl.uniform_discr([-1, -1], [1, 1], shape=(2, 3)) >>> # Point (0, 0) occurs twice >>> sampling_points = [[0, 1, 1, 0], ... [0, 1, 2, 0]] >>> op = odl.WeightedSumSamplingOperator(space, sampling_points, ... variant='dirac') >>> y = op.range.element([[1, 2, 3], ... [4, 5, 6]]) >>> op.adjoint(y) rn(4).element([ 1., 5., 6., 1.]) >>> x = op.domain.element([1, 2, 3, 4]) >>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10 True >>> op = odl.WeightedSumSamplingOperator(space, sampling_points, ... variant='char_fun') >>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10 True """ if self.variant == 'dirac': variant = 'point_eval' elif self.variant == 'char_fun': variant = 'integrate' else: raise RuntimeError('The variant "{!r}" is not yet supported' ''.format(self.variant)) return SamplingOperator(self.range, self.sampling_points, variant)
python
def adjoint(self): if self.variant == 'dirac': variant = 'point_eval' elif self.variant == 'char_fun': variant = 'integrate' else: raise RuntimeError('The variant "{!r}" is not yet supported' ''.format(self.variant)) return SamplingOperator(self.range, self.sampling_points, variant)
[ "def", "adjoint", "(", "self", ")", ":", "if", "self", ".", "variant", "==", "'dirac'", ":", "variant", "=", "'point_eval'", "elif", "self", ".", "variant", "==", "'char_fun'", ":", "variant", "=", "'integrate'", "else", ":", "raise", "RuntimeError", "(", ...
Adjoint of this operator, a `SamplingOperator`. The ``'char_fun'`` variant of this operator corresponds to the ``'integrate'`` sampling operator, and ``'dirac'`` corresponds to ``'point_eval'``. Examples -------- >>> space = odl.uniform_discr([-1, -1], [1, 1], shape=(2, 3)) >>> # Point (0, 0) occurs twice >>> sampling_points = [[0, 1, 1, 0], ... [0, 1, 2, 0]] >>> op = odl.WeightedSumSamplingOperator(space, sampling_points, ... variant='dirac') >>> y = op.range.element([[1, 2, 3], ... [4, 5, 6]]) >>> op.adjoint(y) rn(4).element([ 1., 5., 6., 1.]) >>> x = op.domain.element([1, 2, 3, 4]) >>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10 True >>> op = odl.WeightedSumSamplingOperator(space, sampling_points, ... variant='char_fun') >>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10 True
[ "Adjoint", "of", "this", "operator", "a", "SamplingOperator", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/operator/tensor_ops.py#L1362-L1397
231,827
odlgroup/odl
odl/operator/tensor_ops.py
FlatteningOperator.inverse
def inverse(self): """Operator that reshapes to original shape. Examples -------- >>> space = odl.uniform_discr([-1, -1], [1, 1], shape=(2, 4)) >>> op = odl.FlatteningOperator(space) >>> y = op.range.element([1, 2, 3, 4, 5, 6, 7, 8]) >>> op.inverse(y) uniform_discr([-1., -1.], [ 1., 1.], (2, 4)).element( [[ 1., 2., 3., 4.], [ 5., 6., 7., 8.]] ) >>> op = odl.FlatteningOperator(space, order='F') >>> op.inverse(y) uniform_discr([-1., -1.], [ 1., 1.], (2, 4)).element( [[ 1., 3., 5., 7.], [ 2., 4., 6., 8.]] ) >>> op(op.inverse(y)) == y True """ op = self scaling = getattr(self.domain, 'cell_volume', 1.0) class FlatteningOperatorInverse(Operator): """Inverse of `FlatteningOperator`. This operator reshapes a flat vector back to original shape:: FlatteningOperatorInverse(x) == reshape(x, orig_shape) """ def __init__(self): """Initialize a new instance.""" super(FlatteningOperatorInverse, self).__init__( op.range, op.domain, linear=True) def _call(self, x): """Reshape ``x`` back to n-dim. shape.""" return np.reshape(x.asarray(), self.range.shape, order=op.order) def adjoint(self): """Adjoint of this operator, a scaled `FlatteningOperator`.""" return scaling * op def inverse(self): """Inverse of this operator.""" return op def __repr__(self): """Return ``repr(self)``.""" return '{!r}.inverse'.format(op) def __str__(self): """Return ``str(self)``.""" return repr(self) return FlatteningOperatorInverse()
python
def inverse(self): op = self scaling = getattr(self.domain, 'cell_volume', 1.0) class FlatteningOperatorInverse(Operator): """Inverse of `FlatteningOperator`. This operator reshapes a flat vector back to original shape:: FlatteningOperatorInverse(x) == reshape(x, orig_shape) """ def __init__(self): """Initialize a new instance.""" super(FlatteningOperatorInverse, self).__init__( op.range, op.domain, linear=True) def _call(self, x): """Reshape ``x`` back to n-dim. shape.""" return np.reshape(x.asarray(), self.range.shape, order=op.order) def adjoint(self): """Adjoint of this operator, a scaled `FlatteningOperator`.""" return scaling * op def inverse(self): """Inverse of this operator.""" return op def __repr__(self): """Return ``repr(self)``.""" return '{!r}.inverse'.format(op) def __str__(self): """Return ``str(self)``.""" return repr(self) return FlatteningOperatorInverse()
[ "def", "inverse", "(", "self", ")", ":", "op", "=", "self", "scaling", "=", "getattr", "(", "self", ".", "domain", ",", "'cell_volume'", ",", "1.0", ")", "class", "FlatteningOperatorInverse", "(", "Operator", ")", ":", "\"\"\"Inverse of `FlatteningOperator`.\n\n...
Operator that reshapes to original shape. Examples -------- >>> space = odl.uniform_discr([-1, -1], [1, 1], shape=(2, 4)) >>> op = odl.FlatteningOperator(space) >>> y = op.range.element([1, 2, 3, 4, 5, 6, 7, 8]) >>> op.inverse(y) uniform_discr([-1., -1.], [ 1., 1.], (2, 4)).element( [[ 1., 2., 3., 4.], [ 5., 6., 7., 8.]] ) >>> op = odl.FlatteningOperator(space, order='F') >>> op.inverse(y) uniform_discr([-1., -1.], [ 1., 1.], (2, 4)).element( [[ 1., 3., 5., 7.], [ 2., 4., 6., 8.]] ) >>> op(op.inverse(y)) == y True
[ "Operator", "that", "reshapes", "to", "original", "shape", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/operator/tensor_ops.py#L1495-L1555
231,828
odlgroup/odl
odl/contrib/mrc/mrc.py
MRCHeaderProperties.data_shape
def data_shape(self): """Shape tuple of the whole data block as determined from `header`. If no header is available (i.e., before it has been initialized), or any of the header entries ``'nx', 'ny', 'nz'`` is missing, -1 is returned, which makes reshaping a no-op. Otherwise, the returned shape is ``(nx, ny, nz)``. Note: this is the shape of the data as defined by the header. For a non-trivial axis ordering, the shape of actual data will be different. See Also -------- data_storage_shape data_axis_order """ if not self.header: return -1 try: nx = self.header['nx']['value'] ny = self.header['ny']['value'] nz = self.header['nz']['value'] except KeyError: return -1 else: return tuple(int(n) for n in (nx, ny, nz))
python
def data_shape(self): if not self.header: return -1 try: nx = self.header['nx']['value'] ny = self.header['ny']['value'] nz = self.header['nz']['value'] except KeyError: return -1 else: return tuple(int(n) for n in (nx, ny, nz))
[ "def", "data_shape", "(", "self", ")", ":", "if", "not", "self", ".", "header", ":", "return", "-", "1", "try", ":", "nx", "=", "self", ".", "header", "[", "'nx'", "]", "[", "'value'", "]", "ny", "=", "self", ".", "header", "[", "'ny'", "]", "[...
Shape tuple of the whole data block as determined from `header`. If no header is available (i.e., before it has been initialized), or any of the header entries ``'nx', 'ny', 'nz'`` is missing, -1 is returned, which makes reshaping a no-op. Otherwise, the returned shape is ``(nx, ny, nz)``. Note: this is the shape of the data as defined by the header. For a non-trivial axis ordering, the shape of actual data will be different. See Also -------- data_storage_shape data_axis_order
[ "Shape", "tuple", "of", "the", "whole", "data", "block", "as", "determined", "from", "header", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/mrc/mrc.py#L228-L254
231,829
odlgroup/odl
odl/contrib/mrc/mrc.py
MRCHeaderProperties.data_storage_shape
def data_storage_shape(self): """Shape tuple of the data as stored in the file. If no header is available (i.e., before it has been initialized), or any of the header entries ``'nx', 'ny', 'nz'`` is missing, -1 is returned, which makes reshaping a no-op. Otherwise, the returned shape is a permutation of `data_shape`, i.e., ``(nx, ny, nz)``, according to `data_axis_order` in the following way:: data_shape[i] == data_storage_shape[data_axis_order[i]] See Also -------- data_shape data_axis_order """ if self.data_shape == -1: return -1 else: return tuple(self.data_shape[ax] for ax in np.argsort(self.data_axis_order))
python
def data_storage_shape(self): if self.data_shape == -1: return -1 else: return tuple(self.data_shape[ax] for ax in np.argsort(self.data_axis_order))
[ "def", "data_storage_shape", "(", "self", ")", ":", "if", "self", ".", "data_shape", "==", "-", "1", ":", "return", "-", "1", "else", ":", "return", "tuple", "(", "self", ".", "data_shape", "[", "ax", "]", "for", "ax", "in", "np", ".", "argsort", "...
Shape tuple of the data as stored in the file. If no header is available (i.e., before it has been initialized), or any of the header entries ``'nx', 'ny', 'nz'`` is missing, -1 is returned, which makes reshaping a no-op. Otherwise, the returned shape is a permutation of `data_shape`, i.e., ``(nx, ny, nz)``, according to `data_axis_order` in the following way:: data_shape[i] == data_storage_shape[data_axis_order[i]] See Also -------- data_shape data_axis_order
[ "Shape", "tuple", "of", "the", "data", "as", "stored", "in", "the", "file", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/mrc/mrc.py#L257-L278
231,830
odlgroup/odl
odl/contrib/mrc/mrc.py
MRCHeaderProperties.data_dtype
def data_dtype(self): """Data type of the data block as determined from `header`. If no header is available (i.e., before it has been initialized), or the header entry ``'mode'`` is missing, the data type gained from the ``dtype`` argument in the initializer is returned. Otherwise, it is determined from ``mode``. """ if not self.header: return self._init_data_dtype try: mode = int(self.header['mode']['value']) except KeyError: return self._init_data_dtype else: try: return MRC_MODE_TO_NPY_DTYPE[mode] except KeyError: raise ValueError('data mode {} not supported'.format(mode))
python
def data_dtype(self): if not self.header: return self._init_data_dtype try: mode = int(self.header['mode']['value']) except KeyError: return self._init_data_dtype else: try: return MRC_MODE_TO_NPY_DTYPE[mode] except KeyError: raise ValueError('data mode {} not supported'.format(mode))
[ "def", "data_dtype", "(", "self", ")", ":", "if", "not", "self", ".", "header", ":", "return", "self", ".", "_init_data_dtype", "try", ":", "mode", "=", "int", "(", "self", ".", "header", "[", "'mode'", "]", "[", "'value'", "]", ")", "except", "KeyEr...
Data type of the data block as determined from `header`. If no header is available (i.e., before it has been initialized), or the header entry ``'mode'`` is missing, the data type gained from the ``dtype`` argument in the initializer is returned. Otherwise, it is determined from ``mode``.
[ "Data", "type", "of", "the", "data", "block", "as", "determined", "from", "header", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/mrc/mrc.py#L281-L299
231,831
odlgroup/odl
odl/contrib/mrc/mrc.py
MRCHeaderProperties.cell_sides_angstrom
def cell_sides_angstrom(self): """Array of sizes of a unit cell in Angstroms. The value is determined from the ``'cella'`` entry in `header`. """ return np.asarray( self.header['cella']['value'], dtype=float) / self.data_shape
python
def cell_sides_angstrom(self): return np.asarray( self.header['cella']['value'], dtype=float) / self.data_shape
[ "def", "cell_sides_angstrom", "(", "self", ")", ":", "return", "np", ".", "asarray", "(", "self", ".", "header", "[", "'cella'", "]", "[", "'value'", "]", ",", "dtype", "=", "float", ")", "/", "self", ".", "data_shape" ]
Array of sizes of a unit cell in Angstroms. The value is determined from the ``'cella'`` entry in `header`.
[ "Array", "of", "sizes", "of", "a", "unit", "cell", "in", "Angstroms", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/mrc/mrc.py#L359-L365
231,832
odlgroup/odl
odl/contrib/mrc/mrc.py
MRCHeaderProperties.labels
def labels(self): """Return the 10-tuple of text labels from `header`. The value is determined from the header entries ``'nlabl'`` and ``'label'``. """ label_array = self.header['label']['value'] labels = tuple(''.join(row.astype(str)) for row in label_array) try: nlabels = int(self.header['nlabl']['value']) except KeyError: nlabels = len(labels) # Check if there are nontrivial labels after the number given in # the header. If yes, ignore the 'nlabl' information and return # all labels. if any(label.strip() for label in labels[nlabels:]): return labels else: return labels[:nlabels]
python
def labels(self): label_array = self.header['label']['value'] labels = tuple(''.join(row.astype(str)) for row in label_array) try: nlabels = int(self.header['nlabl']['value']) except KeyError: nlabels = len(labels) # Check if there are nontrivial labels after the number given in # the header. If yes, ignore the 'nlabl' information and return # all labels. if any(label.strip() for label in labels[nlabels:]): return labels else: return labels[:nlabels]
[ "def", "labels", "(", "self", ")", ":", "label_array", "=", "self", ".", "header", "[", "'label'", "]", "[", "'value'", "]", "labels", "=", "tuple", "(", "''", ".", "join", "(", "row", ".", "astype", "(", "str", ")", ")", "for", "row", "in", "lab...
Return the 10-tuple of text labels from `header`. The value is determined from the header entries ``'nlabl'`` and ``'label'``.
[ "Return", "the", "10", "-", "tuple", "of", "text", "labels", "from", "header", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/mrc/mrc.py#L404-L424
231,833
odlgroup/odl
odl/contrib/mrc/mrc.py
FileReaderMRC.read_extended_header
def read_extended_header(self, groupby='field', force_type=''): """Read the extended header according to `extended_header_type`. Currently, only the FEI extended header format is supported. See `print_fei_ext_header_spec` or `this homepage`_ for the format specification. The extended header usually has one header section per image (slice), in case of the FEI header 128 bytes each, with a total of 1024 sections. Parameters ---------- groupby : {'field', 'section'}, optional How to group the values in the extended header sections. ``'field'`` : make an array per section field, e.g.:: 'defocus': [dval1, dval2, ..., dval1024], 'exp_time': [tval1, tval2, ..., tval1024], ... ``'section'`` : make a dictionary for each section, e.g.:: {'defocus': dval1, 'exp_time': tval1}, {'defocus': dval2, 'exp_time': tval2}, ... If the number of images is smaller than 1024, the last values are all set to zero. force_type : string, optional If given, this value overrides the `extended_header_type` from `header`. Currently supported: ``'FEI1'`` Returns ------- ext_header: `OrderedDict` or tuple For ``groupby == 'field'``, a dictionary with the field names as keys, like in the example. For ``groupby == 'section'``, a tuple of dictionaries as shown above. The returned data structures store no offsets, in contrast to the regular header. See Also -------- References ---------- .. _this homepage: http://www.2dx.unibas.ch/documentation/mrc-software/fei-\ extended-mrc-format-not-used-by-2dx """ ext_header_type = str(force_type).upper() or self.extended_header_type if ext_header_type != 'FEI1': raise ValueError("extended header type '{}' not supported" "".format(self.extended_header_type)) groupby, groupby_in = str(groupby).lower(), groupby ext_header_len = int(self.header['nsymbt']['value']) if ext_header_len % MRC_FEI_SECTION_SIZE: raise ValueError('extended header length {} from header is ' 'not divisible by extended header section size ' '{}'.format(ext_header_len, MRC_FEI_SECTION_SIZE)) num_sections = ext_header_len // MRC_FEI_SECTION_SIZE if num_sections != MRC_FEI_NUM_SECTIONS: raise ValueError('calculated number of sections ({}) not equal to ' 'expected number of sections ({})' ''.format(num_sections, MRC_FEI_NUM_SECTIONS)) section_fields = header_fields_from_table( MRC_FEI_EXT_HEADER_SECTION, keys=MRC_SPEC_KEYS, dtype_map=MRC_DTYPE_TO_NPY_DTYPE) # Make a list for each field and append the values for that # field. Then create an array from that list and store it # under the field name. ext_header = OrderedDict() for field in section_fields: value_list = [] field_offset = field['offset'] field_dtype = field['dtype'] field_dshape = field['dshape'] # Compute some parameters num_items = int(np.prod(field_dshape)) size_bytes = num_items * field_dtype.itemsize fmt = '{}{}'.format(num_items, field_dtype.char) for section in range(num_sections): # Get the bytestring from the right position in the file, # unpack it and append the value to the list. start = section * MRC_FEI_SECTION_SIZE + field_offset self.file.seek(start) packed_value = self.file.read(size_bytes) value_list.append(struct.unpack(fmt, packed_value)) ext_header[field['name']] = np.array(value_list, dtype=field_dtype) if groupby == 'field': return ext_header elif groupby == 'section': # Transpose the data and return as tuple. return tuple({key: ext_header[key][i] for key in ext_header} for i in range(num_sections)) else: raise ValueError("`groupby` '{}' not understood" "".format(groupby_in))
python
def read_extended_header(self, groupby='field', force_type=''): ext_header_type = str(force_type).upper() or self.extended_header_type if ext_header_type != 'FEI1': raise ValueError("extended header type '{}' not supported" "".format(self.extended_header_type)) groupby, groupby_in = str(groupby).lower(), groupby ext_header_len = int(self.header['nsymbt']['value']) if ext_header_len % MRC_FEI_SECTION_SIZE: raise ValueError('extended header length {} from header is ' 'not divisible by extended header section size ' '{}'.format(ext_header_len, MRC_FEI_SECTION_SIZE)) num_sections = ext_header_len // MRC_FEI_SECTION_SIZE if num_sections != MRC_FEI_NUM_SECTIONS: raise ValueError('calculated number of sections ({}) not equal to ' 'expected number of sections ({})' ''.format(num_sections, MRC_FEI_NUM_SECTIONS)) section_fields = header_fields_from_table( MRC_FEI_EXT_HEADER_SECTION, keys=MRC_SPEC_KEYS, dtype_map=MRC_DTYPE_TO_NPY_DTYPE) # Make a list for each field and append the values for that # field. Then create an array from that list and store it # under the field name. ext_header = OrderedDict() for field in section_fields: value_list = [] field_offset = field['offset'] field_dtype = field['dtype'] field_dshape = field['dshape'] # Compute some parameters num_items = int(np.prod(field_dshape)) size_bytes = num_items * field_dtype.itemsize fmt = '{}{}'.format(num_items, field_dtype.char) for section in range(num_sections): # Get the bytestring from the right position in the file, # unpack it and append the value to the list. start = section * MRC_FEI_SECTION_SIZE + field_offset self.file.seek(start) packed_value = self.file.read(size_bytes) value_list.append(struct.unpack(fmt, packed_value)) ext_header[field['name']] = np.array(value_list, dtype=field_dtype) if groupby == 'field': return ext_header elif groupby == 'section': # Transpose the data and return as tuple. return tuple({key: ext_header[key][i] for key in ext_header} for i in range(num_sections)) else: raise ValueError("`groupby` '{}' not understood" "".format(groupby_in))
[ "def", "read_extended_header", "(", "self", ",", "groupby", "=", "'field'", ",", "force_type", "=", "''", ")", ":", "ext_header_type", "=", "str", "(", "force_type", ")", ".", "upper", "(", ")", "or", "self", ".", "extended_header_type", "if", "ext_header_ty...
Read the extended header according to `extended_header_type`. Currently, only the FEI extended header format is supported. See `print_fei_ext_header_spec` or `this homepage`_ for the format specification. The extended header usually has one header section per image (slice), in case of the FEI header 128 bytes each, with a total of 1024 sections. Parameters ---------- groupby : {'field', 'section'}, optional How to group the values in the extended header sections. ``'field'`` : make an array per section field, e.g.:: 'defocus': [dval1, dval2, ..., dval1024], 'exp_time': [tval1, tval2, ..., tval1024], ... ``'section'`` : make a dictionary for each section, e.g.:: {'defocus': dval1, 'exp_time': tval1}, {'defocus': dval2, 'exp_time': tval2}, ... If the number of images is smaller than 1024, the last values are all set to zero. force_type : string, optional If given, this value overrides the `extended_header_type` from `header`. Currently supported: ``'FEI1'`` Returns ------- ext_header: `OrderedDict` or tuple For ``groupby == 'field'``, a dictionary with the field names as keys, like in the example. For ``groupby == 'section'``, a tuple of dictionaries as shown above. The returned data structures store no offsets, in contrast to the regular header. See Also -------- References ---------- .. _this homepage: http://www.2dx.unibas.ch/documentation/mrc-software/fei-\ extended-mrc-format-not-used-by-2dx
[ "Read", "the", "extended", "header", "according", "to", "extended_header_type", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/mrc/mrc.py#L477-L589
231,834
odlgroup/odl
odl/contrib/mrc/mrc.py
FileReaderMRC.read_data
def read_data(self, dstart=None, dend=None, swap_axes=True): """Read the data from `file` and return it as Numpy array. Parameters ---------- dstart : int, optional Offset in bytes of the data field. By default, it is equal to ``header_size``. Backwards indexing with negative values is also supported. Use a value larger than the header size to extract a data subset. dend : int, optional End position in bytes until which data is read (exclusive). Backwards indexing with negative values is also supported. Use a value different from the file size to extract a data subset. swap_axes : bool, optional If ``True``, use `data_axis_order` to swap the axes in the returned array. In that case, the shape of the array may no longer agree with `data_storage_shape`. Returns ------- data : `numpy.ndarray` The data read from `file`. """ data = super(FileReaderMRC, self).read_data(dstart, dend) data = data.reshape(self.data_shape, order='F') if swap_axes: data = np.transpose(data, axes=self.data_axis_order) assert data.shape == self.data_shape return data
python
def read_data(self, dstart=None, dend=None, swap_axes=True): data = super(FileReaderMRC, self).read_data(dstart, dend) data = data.reshape(self.data_shape, order='F') if swap_axes: data = np.transpose(data, axes=self.data_axis_order) assert data.shape == self.data_shape return data
[ "def", "read_data", "(", "self", ",", "dstart", "=", "None", ",", "dend", "=", "None", ",", "swap_axes", "=", "True", ")", ":", "data", "=", "super", "(", "FileReaderMRC", ",", "self", ")", ".", "read_data", "(", "dstart", ",", "dend", ")", "data", ...
Read the data from `file` and return it as Numpy array. Parameters ---------- dstart : int, optional Offset in bytes of the data field. By default, it is equal to ``header_size``. Backwards indexing with negative values is also supported. Use a value larger than the header size to extract a data subset. dend : int, optional End position in bytes until which data is read (exclusive). Backwards indexing with negative values is also supported. Use a value different from the file size to extract a data subset. swap_axes : bool, optional If ``True``, use `data_axis_order` to swap the axes in the returned array. In that case, the shape of the array may no longer agree with `data_storage_shape`. Returns ------- data : `numpy.ndarray` The data read from `file`.
[ "Read", "the", "data", "from", "file", "and", "return", "it", "as", "Numpy", "array", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/mrc/mrc.py#L591-L620
231,835
odlgroup/odl
odl/util/utility.py
dedent
def dedent(string, indent_str=' ', max_levels=None): """Revert the effect of indentation. Examples -------- Remove a simple one-level indentation: >>> text = '''<->This is line 1. ... <->Next line. ... <->And another one.''' >>> print(text) <->This is line 1. <->Next line. <->And another one. >>> print(dedent(text, '<->')) This is line 1. Next line. And another one. Multiple levels of indentation: >>> text = '''<->Level 1. ... <-><->Level 2. ... <-><-><->Level 3.''' >>> print(text) <->Level 1. <-><->Level 2. <-><-><->Level 3. >>> print(dedent(text, '<->')) Level 1. <->Level 2. <-><->Level 3. >>> text = '''<-><->Level 2. ... <-><-><->Level 3.''' >>> print(text) <-><->Level 2. <-><-><->Level 3. >>> print(dedent(text, '<->')) Level 2. <->Level 3. >>> print(dedent(text, '<->', max_levels=1)) <->Level 2. <-><->Level 3. """ if len(indent_str) == 0: return string lines = string.splitlines() # Determine common (minumum) number of indentation levels, capped at # `max_levels` if given def num_indents(line): max_num = int(np.ceil(len(line) / len(indent_str))) for i in range(max_num): if line.startswith(indent_str): line = line[len(indent_str):] else: break return i num_levels = num_indents(min(lines, key=num_indents)) if max_levels is not None: num_levels = min(num_levels, max_levels) # Dedent dedent_len = num_levels * len(indent_str) return '\n'.join(line[dedent_len:] for line in lines)
python
def dedent(string, indent_str=' ', max_levels=None): if len(indent_str) == 0: return string lines = string.splitlines() # Determine common (minumum) number of indentation levels, capped at # `max_levels` if given def num_indents(line): max_num = int(np.ceil(len(line) / len(indent_str))) for i in range(max_num): if line.startswith(indent_str): line = line[len(indent_str):] else: break return i num_levels = num_indents(min(lines, key=num_indents)) if max_levels is not None: num_levels = min(num_levels, max_levels) # Dedent dedent_len = num_levels * len(indent_str) return '\n'.join(line[dedent_len:] for line in lines)
[ "def", "dedent", "(", "string", ",", "indent_str", "=", "' '", ",", "max_levels", "=", "None", ")", ":", "if", "len", "(", "indent_str", ")", "==", "0", ":", "return", "string", "lines", "=", "string", ".", "splitlines", "(", ")", "# Determine common (...
Revert the effect of indentation. Examples -------- Remove a simple one-level indentation: >>> text = '''<->This is line 1. ... <->Next line. ... <->And another one.''' >>> print(text) <->This is line 1. <->Next line. <->And another one. >>> print(dedent(text, '<->')) This is line 1. Next line. And another one. Multiple levels of indentation: >>> text = '''<->Level 1. ... <-><->Level 2. ... <-><-><->Level 3.''' >>> print(text) <->Level 1. <-><->Level 2. <-><-><->Level 3. >>> print(dedent(text, '<->')) Level 1. <->Level 2. <-><->Level 3. >>> text = '''<-><->Level 2. ... <-><-><->Level 3.''' >>> print(text) <-><->Level 2. <-><-><->Level 3. >>> print(dedent(text, '<->')) Level 2. <->Level 3. >>> print(dedent(text, '<->', max_levels=1)) <->Level 2. <-><->Level 3.
[ "Revert", "the", "effect", "of", "indentation", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/util/utility.py#L89-L158
231,836
odlgroup/odl
odl/util/utility.py
array_str
def array_str(a, nprint=6): """Stringification of an array. Parameters ---------- a : `array-like` The array to print. nprint : int, optional Maximum number of elements to print per axis in ``a``. For larger arrays, a summary is printed, with ``nprint // 2`` elements on each side and ``...`` in the middle (per axis). Examples -------- Printing 1D arrays: >>> print(array_str(np.arange(4))) [0, 1, 2, 3] >>> print(array_str(np.arange(10))) [0, 1, 2, ..., 7, 8, 9] >>> print(array_str(np.arange(10), nprint=10)) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] For 2D and higher, the ``nprint`` limitation applies per axis: >>> print(array_str(np.arange(24).reshape(4, 6))) [[ 0, 1, 2, 3, 4, 5], [ 6, 7, 8, 9, 10, 11], [12, 13, 14, 15, 16, 17], [18, 19, 20, 21, 22, 23]] >>> print(array_str(np.arange(32).reshape(4, 8))) [[ 0, 1, 2, ..., 5, 6, 7], [ 8, 9, 10, ..., 13, 14, 15], [16, 17, 18, ..., 21, 22, 23], [24, 25, 26, ..., 29, 30, 31]] >>> print(array_str(np.arange(32).reshape(8, 4))) [[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], ..., [20, 21, 22, 23], [24, 25, 26, 27], [28, 29, 30, 31]] >>> print(array_str(np.arange(64).reshape(8, 8))) [[ 0, 1, 2, ..., 5, 6, 7], [ 8, 9, 10, ..., 13, 14, 15], [16, 17, 18, ..., 21, 22, 23], ..., [40, 41, 42, ..., 45, 46, 47], [48, 49, 50, ..., 53, 54, 55], [56, 57, 58, ..., 61, 62, 63]] Printing of empty arrays and 0D arrays: >>> print(array_str(np.array([]))) # 1D, size=0 [] >>> print(array_str(np.array(1.0))) # 0D, size=1 1.0 Small deviations from round numbers will be suppressed: >>> # 2.0000000000000004 in double precision >>> print(array_str((np.array([2.0]) ** 0.5) ** 2)) [ 2.] """ a = np.asarray(a) max_shape = tuple(n if n < nprint else nprint for n in a.shape) with npy_printoptions(threshold=int(np.prod(max_shape)), edgeitems=nprint // 2, suppress=True): a_str = np.array2string(a, separator=', ') return a_str
python
def array_str(a, nprint=6): a = np.asarray(a) max_shape = tuple(n if n < nprint else nprint for n in a.shape) with npy_printoptions(threshold=int(np.prod(max_shape)), edgeitems=nprint // 2, suppress=True): a_str = np.array2string(a, separator=', ') return a_str
[ "def", "array_str", "(", "a", ",", "nprint", "=", "6", ")", ":", "a", "=", "np", ".", "asarray", "(", "a", ")", "max_shape", "=", "tuple", "(", "n", "if", "n", "<", "nprint", "else", "nprint", "for", "n", "in", "a", ".", "shape", ")", "with", ...
Stringification of an array. Parameters ---------- a : `array-like` The array to print. nprint : int, optional Maximum number of elements to print per axis in ``a``. For larger arrays, a summary is printed, with ``nprint // 2`` elements on each side and ``...`` in the middle (per axis). Examples -------- Printing 1D arrays: >>> print(array_str(np.arange(4))) [0, 1, 2, 3] >>> print(array_str(np.arange(10))) [0, 1, 2, ..., 7, 8, 9] >>> print(array_str(np.arange(10), nprint=10)) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] For 2D and higher, the ``nprint`` limitation applies per axis: >>> print(array_str(np.arange(24).reshape(4, 6))) [[ 0, 1, 2, 3, 4, 5], [ 6, 7, 8, 9, 10, 11], [12, 13, 14, 15, 16, 17], [18, 19, 20, 21, 22, 23]] >>> print(array_str(np.arange(32).reshape(4, 8))) [[ 0, 1, 2, ..., 5, 6, 7], [ 8, 9, 10, ..., 13, 14, 15], [16, 17, 18, ..., 21, 22, 23], [24, 25, 26, ..., 29, 30, 31]] >>> print(array_str(np.arange(32).reshape(8, 4))) [[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], ..., [20, 21, 22, 23], [24, 25, 26, 27], [28, 29, 30, 31]] >>> print(array_str(np.arange(64).reshape(8, 8))) [[ 0, 1, 2, ..., 5, 6, 7], [ 8, 9, 10, ..., 13, 14, 15], [16, 17, 18, ..., 21, 22, 23], ..., [40, 41, 42, ..., 45, 46, 47], [48, 49, 50, ..., 53, 54, 55], [56, 57, 58, ..., 61, 62, 63]] Printing of empty arrays and 0D arrays: >>> print(array_str(np.array([]))) # 1D, size=0 [] >>> print(array_str(np.array(1.0))) # 0D, size=1 1.0 Small deviations from round numbers will be suppressed: >>> # 2.0000000000000004 in double precision >>> print(array_str((np.array([2.0]) ** 0.5) ** 2)) [ 2.]
[ "Stringification", "of", "an", "array", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/util/utility.py#L195-L267
231,837
odlgroup/odl
odl/util/utility.py
dtype_repr
def dtype_repr(dtype): """Stringify ``dtype`` for ``repr`` with default for int and float.""" dtype = np.dtype(dtype) if dtype == np.dtype(int): return "'int'" elif dtype == np.dtype(float): return "'float'" elif dtype == np.dtype(complex): return "'complex'" elif dtype.shape: return "('{}', {})".format(dtype.base, dtype.shape) else: return "'{}'".format(dtype)
python
def dtype_repr(dtype): dtype = np.dtype(dtype) if dtype == np.dtype(int): return "'int'" elif dtype == np.dtype(float): return "'float'" elif dtype == np.dtype(complex): return "'complex'" elif dtype.shape: return "('{}', {})".format(dtype.base, dtype.shape) else: return "'{}'".format(dtype)
[ "def", "dtype_repr", "(", "dtype", ")", ":", "dtype", "=", "np", ".", "dtype", "(", "dtype", ")", "if", "dtype", "==", "np", ".", "dtype", "(", "int", ")", ":", "return", "\"'int'\"", "elif", "dtype", "==", "np", ".", "dtype", "(", "float", ")", ...
Stringify ``dtype`` for ``repr`` with default for int and float.
[ "Stringify", "dtype", "for", "repr", "with", "default", "for", "int", "and", "float", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/util/utility.py#L270-L282
231,838
odlgroup/odl
odl/util/utility.py
is_numeric_dtype
def is_numeric_dtype(dtype): """Return ``True`` if ``dtype`` is a numeric type.""" dtype = np.dtype(dtype) return np.issubsctype(getattr(dtype, 'base', None), np.number)
python
def is_numeric_dtype(dtype): dtype = np.dtype(dtype) return np.issubsctype(getattr(dtype, 'base', None), np.number)
[ "def", "is_numeric_dtype", "(", "dtype", ")", ":", "dtype", "=", "np", ".", "dtype", "(", "dtype", ")", "return", "np", ".", "issubsctype", "(", "getattr", "(", "dtype", ",", "'base'", ",", "None", ")", ",", "np", ".", "number", ")" ]
Return ``True`` if ``dtype`` is a numeric type.
[ "Return", "True", "if", "dtype", "is", "a", "numeric", "type", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/util/utility.py#L355-L358
231,839
odlgroup/odl
odl/util/utility.py
is_int_dtype
def is_int_dtype(dtype): """Return ``True`` if ``dtype`` is an integer type.""" dtype = np.dtype(dtype) return np.issubsctype(getattr(dtype, 'base', None), np.integer)
python
def is_int_dtype(dtype): dtype = np.dtype(dtype) return np.issubsctype(getattr(dtype, 'base', None), np.integer)
[ "def", "is_int_dtype", "(", "dtype", ")", ":", "dtype", "=", "np", ".", "dtype", "(", "dtype", ")", "return", "np", ".", "issubsctype", "(", "getattr", "(", "dtype", ",", "'base'", ",", "None", ")", ",", "np", ".", "integer", ")" ]
Return ``True`` if ``dtype`` is an integer type.
[ "Return", "True", "if", "dtype", "is", "an", "integer", "type", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/util/utility.py#L362-L365
231,840
odlgroup/odl
odl/util/utility.py
is_real_floating_dtype
def is_real_floating_dtype(dtype): """Return ``True`` if ``dtype`` is a real floating point type.""" dtype = np.dtype(dtype) return np.issubsctype(getattr(dtype, 'base', None), np.floating)
python
def is_real_floating_dtype(dtype): dtype = np.dtype(dtype) return np.issubsctype(getattr(dtype, 'base', None), np.floating)
[ "def", "is_real_floating_dtype", "(", "dtype", ")", ":", "dtype", "=", "np", ".", "dtype", "(", "dtype", ")", "return", "np", ".", "issubsctype", "(", "getattr", "(", "dtype", ",", "'base'", ",", "None", ")", ",", "np", ".", "floating", ")" ]
Return ``True`` if ``dtype`` is a real floating point type.
[ "Return", "True", "if", "dtype", "is", "a", "real", "floating", "point", "type", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/util/utility.py#L381-L384
231,841
odlgroup/odl
odl/util/utility.py
is_complex_floating_dtype
def is_complex_floating_dtype(dtype): """Return ``True`` if ``dtype`` is a complex floating point type.""" dtype = np.dtype(dtype) return np.issubsctype(getattr(dtype, 'base', None), np.complexfloating)
python
def is_complex_floating_dtype(dtype): dtype = np.dtype(dtype) return np.issubsctype(getattr(dtype, 'base', None), np.complexfloating)
[ "def", "is_complex_floating_dtype", "(", "dtype", ")", ":", "dtype", "=", "np", ".", "dtype", "(", "dtype", ")", "return", "np", ".", "issubsctype", "(", "getattr", "(", "dtype", ",", "'base'", ",", "None", ")", ",", "np", ".", "complexfloating", ")" ]
Return ``True`` if ``dtype`` is a complex floating point type.
[ "Return", "True", "if", "dtype", "is", "a", "complex", "floating", "point", "type", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/util/utility.py#L388-L391
231,842
odlgroup/odl
odl/util/utility.py
real_dtype
def real_dtype(dtype, default=None): """Return the real counterpart of ``dtype`` if existing. Parameters ---------- dtype : Real or complex floating point data type. It can be given in any way the `numpy.dtype` constructor understands. default : Object to be returned if no real counterpart is found for ``dtype``, except for ``None``, in which case an error is raised. Returns ------- real_dtype : `numpy.dtype` The real counterpart of ``dtype``. Raises ------ ValueError if there is no real counterpart to the given data type and ``default == None``. See Also -------- complex_dtype Examples -------- Convert scalar dtypes: >>> real_dtype(complex) dtype('float64') >>> real_dtype('complex64') dtype('float32') >>> real_dtype(float) dtype('float64') Dtypes with shape are also supported: >>> real_dtype(np.dtype((complex, (3,)))) dtype(('<f8', (3,))) >>> real_dtype(('complex64', (3,))) dtype(('<f4', (3,))) """ dtype, dtype_in = np.dtype(dtype), dtype if is_real_floating_dtype(dtype): return dtype try: real_base_dtype = TYPE_MAP_C2R[dtype.base] except KeyError: if default is not None: return default else: raise ValueError('no real counterpart exists for `dtype` {}' ''.format(dtype_repr(dtype_in))) else: return np.dtype((real_base_dtype, dtype.shape))
python
def real_dtype(dtype, default=None): dtype, dtype_in = np.dtype(dtype), dtype if is_real_floating_dtype(dtype): return dtype try: real_base_dtype = TYPE_MAP_C2R[dtype.base] except KeyError: if default is not None: return default else: raise ValueError('no real counterpart exists for `dtype` {}' ''.format(dtype_repr(dtype_in))) else: return np.dtype((real_base_dtype, dtype.shape))
[ "def", "real_dtype", "(", "dtype", ",", "default", "=", "None", ")", ":", "dtype", ",", "dtype_in", "=", "np", ".", "dtype", "(", "dtype", ")", ",", "dtype", "if", "is_real_floating_dtype", "(", "dtype", ")", ":", "return", "dtype", "try", ":", "real_b...
Return the real counterpart of ``dtype`` if existing. Parameters ---------- dtype : Real or complex floating point data type. It can be given in any way the `numpy.dtype` constructor understands. default : Object to be returned if no real counterpart is found for ``dtype``, except for ``None``, in which case an error is raised. Returns ------- real_dtype : `numpy.dtype` The real counterpart of ``dtype``. Raises ------ ValueError if there is no real counterpart to the given data type and ``default == None``. See Also -------- complex_dtype Examples -------- Convert scalar dtypes: >>> real_dtype(complex) dtype('float64') >>> real_dtype('complex64') dtype('float32') >>> real_dtype(float) dtype('float64') Dtypes with shape are also supported: >>> real_dtype(np.dtype((complex, (3,)))) dtype(('<f8', (3,))) >>> real_dtype(('complex64', (3,))) dtype(('<f4', (3,)))
[ "Return", "the", "real", "counterpart", "of", "dtype", "if", "existing", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/util/utility.py#L394-L453
231,843
odlgroup/odl
odl/util/utility.py
complex_dtype
def complex_dtype(dtype, default=None): """Return complex counterpart of ``dtype`` if existing, else ``default``. Parameters ---------- dtype : Real or complex floating point data type. It can be given in any way the `numpy.dtype` constructor understands. default : Object to be returned if no complex counterpart is found for ``dtype``, except for ``None``, in which case an error is raised. Returns ------- complex_dtype : `numpy.dtype` The complex counterpart of ``dtype``. Raises ------ ValueError if there is no complex counterpart to the given data type and ``default == None``. Examples -------- Convert scalar dtypes: >>> complex_dtype(float) dtype('complex128') >>> complex_dtype('float32') dtype('complex64') >>> complex_dtype(complex) dtype('complex128') Dtypes with shape are also supported: >>> complex_dtype(np.dtype((float, (3,)))) dtype(('<c16', (3,))) >>> complex_dtype(('float32', (3,))) dtype(('<c8', (3,))) """ dtype, dtype_in = np.dtype(dtype), dtype if is_complex_floating_dtype(dtype): return dtype try: complex_base_dtype = TYPE_MAP_R2C[dtype.base] except KeyError: if default is not None: return default else: raise ValueError('no complex counterpart exists for `dtype` {}' ''.format(dtype_repr(dtype_in))) else: return np.dtype((complex_base_dtype, dtype.shape))
python
def complex_dtype(dtype, default=None): dtype, dtype_in = np.dtype(dtype), dtype if is_complex_floating_dtype(dtype): return dtype try: complex_base_dtype = TYPE_MAP_R2C[dtype.base] except KeyError: if default is not None: return default else: raise ValueError('no complex counterpart exists for `dtype` {}' ''.format(dtype_repr(dtype_in))) else: return np.dtype((complex_base_dtype, dtype.shape))
[ "def", "complex_dtype", "(", "dtype", ",", "default", "=", "None", ")", ":", "dtype", ",", "dtype_in", "=", "np", ".", "dtype", "(", "dtype", ")", ",", "dtype", "if", "is_complex_floating_dtype", "(", "dtype", ")", ":", "return", "dtype", "try", ":", "...
Return complex counterpart of ``dtype`` if existing, else ``default``. Parameters ---------- dtype : Real or complex floating point data type. It can be given in any way the `numpy.dtype` constructor understands. default : Object to be returned if no complex counterpart is found for ``dtype``, except for ``None``, in which case an error is raised. Returns ------- complex_dtype : `numpy.dtype` The complex counterpart of ``dtype``. Raises ------ ValueError if there is no complex counterpart to the given data type and ``default == None``. Examples -------- Convert scalar dtypes: >>> complex_dtype(float) dtype('complex128') >>> complex_dtype('float32') dtype('complex64') >>> complex_dtype(complex) dtype('complex128') Dtypes with shape are also supported: >>> complex_dtype(np.dtype((float, (3,)))) dtype(('<c16', (3,))) >>> complex_dtype(('float32', (3,))) dtype(('<c8', (3,)))
[ "Return", "complex", "counterpart", "of", "dtype", "if", "existing", "else", "default", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/util/utility.py#L456-L511
231,844
odlgroup/odl
odl/util/utility.py
preload_first_arg
def preload_first_arg(instance, mode): """Decorator to preload the first argument of a call method. Parameters ---------- instance : Class instance to preload the call with mode : {'out-of-place', 'in-place'} 'out-of-place': call is out-of-place -- ``f(x, **kwargs)`` 'in-place': call is in-place -- ``f(x, out, **kwargs)`` Notes ----- The decorated function has the signature according to ``mode``. Examples -------- Define two functions which need some instance to act on and decorate them manually: >>> class A(object): ... '''My name is A.''' >>> a = A() ... >>> def f_oop(inst, x): ... print(inst.__doc__) ... >>> def f_ip(inst, out, x): ... print(inst.__doc__) ... >>> f_oop_new = preload_first_arg(a, 'out-of-place')(f_oop) >>> f_ip_new = preload_first_arg(a, 'in-place')(f_ip) ... >>> f_oop_new(0) My name is A. >>> f_ip_new(0, out=1) My name is A. Decorate upon definition: >>> @preload_first_arg(a, 'out-of-place') ... def set_x(obj, x): ... '''Function to set x in ``obj`` to a given value.''' ... obj.x = x >>> set_x(0) >>> a.x 0 The function's name and docstring are preserved: >>> set_x.__name__ 'set_x' >>> set_x.__doc__ 'Function to set x in ``obj`` to a given value.' """ def decorator(call): @wraps(call) def oop_wrapper(x, **kwargs): return call(instance, x, **kwargs) @wraps(call) def ip_wrapper(x, out, **kwargs): return call(instance, x, out, **kwargs) if mode == 'out-of-place': return oop_wrapper elif mode == 'in-place': return ip_wrapper else: raise ValueError('bad mode {!r}'.format(mode)) return decorator
python
def preload_first_arg(instance, mode): def decorator(call): @wraps(call) def oop_wrapper(x, **kwargs): return call(instance, x, **kwargs) @wraps(call) def ip_wrapper(x, out, **kwargs): return call(instance, x, out, **kwargs) if mode == 'out-of-place': return oop_wrapper elif mode == 'in-place': return ip_wrapper else: raise ValueError('bad mode {!r}'.format(mode)) return decorator
[ "def", "preload_first_arg", "(", "instance", ",", "mode", ")", ":", "def", "decorator", "(", "call", ")", ":", "@", "wraps", "(", "call", ")", "def", "oop_wrapper", "(", "x", ",", "*", "*", "kwargs", ")", ":", "return", "call", "(", "instance", ",", ...
Decorator to preload the first argument of a call method. Parameters ---------- instance : Class instance to preload the call with mode : {'out-of-place', 'in-place'} 'out-of-place': call is out-of-place -- ``f(x, **kwargs)`` 'in-place': call is in-place -- ``f(x, out, **kwargs)`` Notes ----- The decorated function has the signature according to ``mode``. Examples -------- Define two functions which need some instance to act on and decorate them manually: >>> class A(object): ... '''My name is A.''' >>> a = A() ... >>> def f_oop(inst, x): ... print(inst.__doc__) ... >>> def f_ip(inst, out, x): ... print(inst.__doc__) ... >>> f_oop_new = preload_first_arg(a, 'out-of-place')(f_oop) >>> f_ip_new = preload_first_arg(a, 'in-place')(f_ip) ... >>> f_oop_new(0) My name is A. >>> f_ip_new(0, out=1) My name is A. Decorate upon definition: >>> @preload_first_arg(a, 'out-of-place') ... def set_x(obj, x): ... '''Function to set x in ``obj`` to a given value.''' ... obj.x = x >>> set_x(0) >>> a.x 0 The function's name and docstring are preserved: >>> set_x.__name__ 'set_x' >>> set_x.__doc__ 'Function to set x in ``obj`` to a given value.'
[ "Decorator", "to", "preload", "the", "first", "argument", "of", "a", "call", "method", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/util/utility.py#L573-L648
231,845
odlgroup/odl
odl/util/utility.py
signature_string
def signature_string(posargs, optargs, sep=', ', mod='!r'): """Return a stringified signature from given arguments. Parameters ---------- posargs : sequence Positional argument values, always included in the returned string. They appear in the string as (roughly):: sep.join(str(arg) for arg in posargs) optargs : sequence of 3-tuples Optional arguments with names and defaults, given in the form:: [(name1, value1, default1), (name2, value2, default2), ...] Only those parameters that are different from the given default are included as ``name=value`` keyword pairs. **Note:** The comparison is done by using ``if value == default:``, which is not valid for, e.g., NumPy arrays. sep : string or sequence of strings, optional Separator(s) for the argument strings. A provided single string is used for all joining operations. A given sequence must have 3 entries ``pos_sep, opt_sep, part_sep``. The ``pos_sep`` and ``opt_sep`` strings are used for joining the respective sequences of argument strings, and ``part_sep`` joins these two joined strings. mod : string or callable or sequence, optional Format modifier(s) for the argument strings. In its most general form, ``mod`` is a sequence of 2 sequences ``pos_mod, opt_mod`` with ``len(pos_mod) == len(posargs)`` and ``len(opt_mod) == len(optargs)``. Each entry ``m`` in those sequences can be eiter a string, resulting in the following stringification of ``arg``:: arg_fmt = {{{}}}.format(m) arg_str = arg_fmt.format(arg) For a callable ``to_str``, the stringification is simply ``arg_str = to_str(arg)``. The entries ``pos_mod, opt_mod`` of ``mod`` can also be strings or callables instead of sequences, in which case the modifier applies to all corresponding arguments. Finally, if ``mod`` is a string or callable, it is applied to all arguments. The default behavior is to apply the "{!r}" (``repr``) conversion. For floating point scalars, the number of digits printed is determined by the ``precision`` value in NumPy's printing options, which can be temporarily modified with `npy_printoptions`. Returns ------- signature : string Stringification of a signature, typically used in the form:: '{}({})'.format(self.__class__.__name__, signature) Examples -------- Usage with non-trivial entries in both sequences, with a typical use case: >>> posargs = [1, 'hello', None] >>> optargs = [('dtype', 'float32', 'float64')] >>> signature_string(posargs, optargs) "1, 'hello', None, dtype='float32'" >>> '{}({})'.format('MyClass', signature_string(posargs, optargs)) "MyClass(1, 'hello', None, dtype='float32')" Empty sequences and optargs values equal to default are omitted: >>> posargs = ['hello'] >>> optargs = [('size', 1, 1)] >>> signature_string(posargs, optargs) "'hello'" >>> posargs = [] >>> optargs = [('size', 2, 1)] >>> signature_string(posargs, optargs) 'size=2' >>> posargs = [] >>> optargs = [('size', 1, 1)] >>> signature_string(posargs, optargs) '' Using a different separator, globally or per argument "category": >>> posargs = [1, 'hello', None] >>> optargs = [('dtype', 'float32', 'float64'), ... ('order', 'F', 'C')] >>> signature_string(posargs, optargs) "1, 'hello', None, dtype='float32', order='F'" >>> signature_string(posargs, optargs, sep=(',', ',', ', ')) "1,'hello',None, dtype='float32',order='F'" Using format modifiers: >>> posargs = ['hello', 2.345] >>> optargs = [('extent', 1.442, 1.0), ('spacing', 0.0151, 1.0)] >>> signature_string(posargs, optargs) "'hello', 2.345, extent=1.442, spacing=0.0151" >>> # Print only two significant digits for all arguments. >>> # NOTE: this also affects the string! >>> mod = ':.2' >>> signature_string(posargs, optargs, mod=mod) 'he, 2.3, extent=1.4, spacing=0.015' >>> mod = [['', ''], [':.3', ':.2']] # one modifier per argument >>> signature_string(posargs, optargs, mod=mod) "'hello', 2.345, extent=1.44, spacing=0.015" Using callables for stringification: >>> posargs = ['arg1', np.ones(3)] >>> optargs = [] >>> signature_string(posargs, optargs, mod=[['', array_str], []]) "'arg1', [ 1., 1., 1.]" The number of printed digits in floating point numbers can be changed with `npy_printoptions`: >>> posargs = ['hello', 0.123456789012345] >>> optargs = [('extent', 1.234567890123456, 1.0)] >>> signature_string(posargs, optargs) # default is 8 digits "'hello', 0.12345679, extent=1.2345679" >>> with npy_printoptions(precision=2): ... sig_str = signature_string(posargs, optargs) >>> sig_str "'hello', 0.12, extent=1.2" """ # Define the separators for the two possible cases if is_string(sep): pos_sep = opt_sep = part_sep = sep else: pos_sep, opt_sep, part_sep = sep # Get the stringified parts posargs_conv, optargs_conv = signature_string_parts(posargs, optargs, mod) # Join the arguments using the separators parts = [] if posargs_conv: parts.append(pos_sep.join(argstr for argstr in posargs_conv)) if optargs_conv: parts.append(opt_sep.join(optargs_conv)) return part_sep.join(parts)
python
def signature_string(posargs, optargs, sep=', ', mod='!r'): # Define the separators for the two possible cases if is_string(sep): pos_sep = opt_sep = part_sep = sep else: pos_sep, opt_sep, part_sep = sep # Get the stringified parts posargs_conv, optargs_conv = signature_string_parts(posargs, optargs, mod) # Join the arguments using the separators parts = [] if posargs_conv: parts.append(pos_sep.join(argstr for argstr in posargs_conv)) if optargs_conv: parts.append(opt_sep.join(optargs_conv)) return part_sep.join(parts)
[ "def", "signature_string", "(", "posargs", ",", "optargs", ",", "sep", "=", "', '", ",", "mod", "=", "'!r'", ")", ":", "# Define the separators for the two possible cases", "if", "is_string", "(", "sep", ")", ":", "pos_sep", "=", "opt_sep", "=", "part_sep", "=...
Return a stringified signature from given arguments. Parameters ---------- posargs : sequence Positional argument values, always included in the returned string. They appear in the string as (roughly):: sep.join(str(arg) for arg in posargs) optargs : sequence of 3-tuples Optional arguments with names and defaults, given in the form:: [(name1, value1, default1), (name2, value2, default2), ...] Only those parameters that are different from the given default are included as ``name=value`` keyword pairs. **Note:** The comparison is done by using ``if value == default:``, which is not valid for, e.g., NumPy arrays. sep : string or sequence of strings, optional Separator(s) for the argument strings. A provided single string is used for all joining operations. A given sequence must have 3 entries ``pos_sep, opt_sep, part_sep``. The ``pos_sep`` and ``opt_sep`` strings are used for joining the respective sequences of argument strings, and ``part_sep`` joins these two joined strings. mod : string or callable or sequence, optional Format modifier(s) for the argument strings. In its most general form, ``mod`` is a sequence of 2 sequences ``pos_mod, opt_mod`` with ``len(pos_mod) == len(posargs)`` and ``len(opt_mod) == len(optargs)``. Each entry ``m`` in those sequences can be eiter a string, resulting in the following stringification of ``arg``:: arg_fmt = {{{}}}.format(m) arg_str = arg_fmt.format(arg) For a callable ``to_str``, the stringification is simply ``arg_str = to_str(arg)``. The entries ``pos_mod, opt_mod`` of ``mod`` can also be strings or callables instead of sequences, in which case the modifier applies to all corresponding arguments. Finally, if ``mod`` is a string or callable, it is applied to all arguments. The default behavior is to apply the "{!r}" (``repr``) conversion. For floating point scalars, the number of digits printed is determined by the ``precision`` value in NumPy's printing options, which can be temporarily modified with `npy_printoptions`. Returns ------- signature : string Stringification of a signature, typically used in the form:: '{}({})'.format(self.__class__.__name__, signature) Examples -------- Usage with non-trivial entries in both sequences, with a typical use case: >>> posargs = [1, 'hello', None] >>> optargs = [('dtype', 'float32', 'float64')] >>> signature_string(posargs, optargs) "1, 'hello', None, dtype='float32'" >>> '{}({})'.format('MyClass', signature_string(posargs, optargs)) "MyClass(1, 'hello', None, dtype='float32')" Empty sequences and optargs values equal to default are omitted: >>> posargs = ['hello'] >>> optargs = [('size', 1, 1)] >>> signature_string(posargs, optargs) "'hello'" >>> posargs = [] >>> optargs = [('size', 2, 1)] >>> signature_string(posargs, optargs) 'size=2' >>> posargs = [] >>> optargs = [('size', 1, 1)] >>> signature_string(posargs, optargs) '' Using a different separator, globally or per argument "category": >>> posargs = [1, 'hello', None] >>> optargs = [('dtype', 'float32', 'float64'), ... ('order', 'F', 'C')] >>> signature_string(posargs, optargs) "1, 'hello', None, dtype='float32', order='F'" >>> signature_string(posargs, optargs, sep=(',', ',', ', ')) "1,'hello',None, dtype='float32',order='F'" Using format modifiers: >>> posargs = ['hello', 2.345] >>> optargs = [('extent', 1.442, 1.0), ('spacing', 0.0151, 1.0)] >>> signature_string(posargs, optargs) "'hello', 2.345, extent=1.442, spacing=0.0151" >>> # Print only two significant digits for all arguments. >>> # NOTE: this also affects the string! >>> mod = ':.2' >>> signature_string(posargs, optargs, mod=mod) 'he, 2.3, extent=1.4, spacing=0.015' >>> mod = [['', ''], [':.3', ':.2']] # one modifier per argument >>> signature_string(posargs, optargs, mod=mod) "'hello', 2.345, extent=1.44, spacing=0.015" Using callables for stringification: >>> posargs = ['arg1', np.ones(3)] >>> optargs = [] >>> signature_string(posargs, optargs, mod=[['', array_str], []]) "'arg1', [ 1., 1., 1.]" The number of printed digits in floating point numbers can be changed with `npy_printoptions`: >>> posargs = ['hello', 0.123456789012345] >>> optargs = [('extent', 1.234567890123456, 1.0)] >>> signature_string(posargs, optargs) # default is 8 digits "'hello', 0.12345679, extent=1.2345679" >>> with npy_printoptions(precision=2): ... sig_str = signature_string(posargs, optargs) >>> sig_str "'hello', 0.12, extent=1.2"
[ "Return", "a", "stringified", "signature", "from", "given", "arguments", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/util/utility.py#L732-L881
231,846
odlgroup/odl
odl/util/utility.py
_separators
def _separators(strings, linewidth): """Return separators that keep joined strings within the line width.""" if len(strings) <= 1: return () indent_len = 4 separators = [] cur_line_len = indent_len + len(strings[0]) + 1 if cur_line_len + 2 <= linewidth and '\n' not in strings[0]: # Next string might fit on same line separators.append(', ') cur_line_len += 1 # for the extra space else: # Use linebreak if string contains newline or doesn't fit separators.append(',\n') cur_line_len = indent_len for i, s in enumerate(strings[1:-1]): cur_line_len += len(s) + 1 if '\n' in s: # Use linebreak before and after if string contains newline separators[i] = ',\n' cur_line_len = indent_len separators.append(',\n') elif cur_line_len + 2 <= linewidth: # This string fits, next one might also fit on same line separators.append(', ') cur_line_len += 1 # for the extra space elif cur_line_len <= linewidth: # This string fits, but next one won't separators.append(',\n') cur_line_len = indent_len else: # This string doesn't fit but has no newlines in it separators[i] = ',\n' cur_line_len = indent_len + len(s) + 1 # Need to determine again what should come next if cur_line_len + 2 <= linewidth: # Next string might fit on same line separators.append(', ') else: separators.append(',\n') cur_line_len += len(strings[-1]) if cur_line_len + 1 > linewidth or '\n' in strings[-1]: # This string and a comma don't fit on this line separators[-1] = ',\n' return tuple(separators)
python
def _separators(strings, linewidth): if len(strings) <= 1: return () indent_len = 4 separators = [] cur_line_len = indent_len + len(strings[0]) + 1 if cur_line_len + 2 <= linewidth and '\n' not in strings[0]: # Next string might fit on same line separators.append(', ') cur_line_len += 1 # for the extra space else: # Use linebreak if string contains newline or doesn't fit separators.append(',\n') cur_line_len = indent_len for i, s in enumerate(strings[1:-1]): cur_line_len += len(s) + 1 if '\n' in s: # Use linebreak before and after if string contains newline separators[i] = ',\n' cur_line_len = indent_len separators.append(',\n') elif cur_line_len + 2 <= linewidth: # This string fits, next one might also fit on same line separators.append(', ') cur_line_len += 1 # for the extra space elif cur_line_len <= linewidth: # This string fits, but next one won't separators.append(',\n') cur_line_len = indent_len else: # This string doesn't fit but has no newlines in it separators[i] = ',\n' cur_line_len = indent_len + len(s) + 1 # Need to determine again what should come next if cur_line_len + 2 <= linewidth: # Next string might fit on same line separators.append(', ') else: separators.append(',\n') cur_line_len += len(strings[-1]) if cur_line_len + 1 > linewidth or '\n' in strings[-1]: # This string and a comma don't fit on this line separators[-1] = ',\n' return tuple(separators)
[ "def", "_separators", "(", "strings", ",", "linewidth", ")", ":", "if", "len", "(", "strings", ")", "<=", "1", ":", "return", "(", ")", "indent_len", "=", "4", "separators", "=", "[", "]", "cur_line_len", "=", "indent_len", "+", "len", "(", "strings", ...
Return separators that keep joined strings within the line width.
[ "Return", "separators", "that", "keep", "joined", "strings", "within", "the", "line", "width", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/util/utility.py#L1019-L1072
231,847
odlgroup/odl
odl/util/utility.py
repr_string
def repr_string(outer_string, inner_strings, allow_mixed_seps=True): r"""Return a pretty string for ``repr``. The returned string is formatted such that it does not extend beyond the line boundary if avoidable. The line width is taken from NumPy's printing options that can be retrieved with `numpy.get_printoptions`. They can be temporarily overridden using the `npy_printoptions` context manager. See Examples for details. Parameters ---------- outer_str : str Name of the class or function that should be printed outside the parentheses. inner_strings : sequence of sequence of str Stringifications of the positional and optional arguments. This is usually the return value of `signature_string_parts`. allow_mixed_seps : bool, optional If ``False`` and the string does not fit on one line, use ``',\n'`` to separate all strings. By default, a mixture of ``', '`` and ``',\n'`` is used to fit as much on one line as possible. In case some of the ``inner_strings`` span multiple lines, it is usually advisable to set ``allow_mixed_seps`` to ``False`` since the result tends to be more readable that way. Returns ------- repr_string : str Full string that can be returned by a class' ``__repr__`` method. Examples -------- Things that fit into one line are printed on one line: >>> outer_string = 'MyClass' >>> inner_strings = [('1', "'hello'", 'None'), ... ("dtype='float32'",)] >>> print(repr_string(outer_string, inner_strings)) MyClass(1, 'hello', None, dtype='float32') Otherwise, if a part of ``inner_strings`` fits on a line of its own, it is printed on one line, but separated from the other part with a line break: >>> outer_string = 'MyClass' >>> inner_strings = [('2.0', "'this_is_a_very_long_argument_string'"), ... ("long_opt_arg='another_quite_long_string'",)] >>> print(repr_string(outer_string, inner_strings)) MyClass( 2.0, 'this_is_a_very_long_argument_string', long_opt_arg='another_quite_long_string' ) If those parts are themselves too long, they are broken down into several lines: >>> outer_string = 'MyClass' >>> inner_strings = [("'this_is_a_very_long_argument_string'", ... "'another_very_long_argument_string'"), ... ("long_opt_arg='another_quite_long_string'", ... "long_opt2_arg='this_wont_fit_on_one_line_either'")] >>> print(repr_string(outer_string, inner_strings)) MyClass( 'this_is_a_very_long_argument_string', 'another_very_long_argument_string', long_opt_arg='another_quite_long_string', long_opt2_arg='this_wont_fit_on_one_line_either' ) The usage of mixed separators to optimally use horizontal space can be disabled by setting ``allow_mixed_seps=False``: >>> outer_string = 'MyClass' >>> inner_strings = [('2.0', "'this_is_a_very_long_argument_string'"), ... ("long_opt_arg='another_quite_long_string'",)] >>> print(repr_string(outer_string, inner_strings, allow_mixed_seps=False)) MyClass( 2.0, 'this_is_a_very_long_argument_string', long_opt_arg='another_quite_long_string' ) With the ``npy_printoptions`` context manager, the available line width can be changed: >>> outer_string = 'MyClass' >>> inner_strings = [('1', "'hello'", 'None'), ... ("dtype='float32'",)] >>> with npy_printoptions(linewidth=20): ... print(repr_string(outer_string, inner_strings)) MyClass( 1, 'hello', None, dtype='float32' ) """ linewidth = np.get_printoptions()['linewidth'] pos_strings, opt_strings = inner_strings # Length of the positional and optional argument parts of the signature, # including separators `', '` pos_sig_len = (sum(len(pstr) for pstr in pos_strings) + 2 * max((len(pos_strings) - 1), 0)) opt_sig_len = (sum(len(pstr) for pstr in opt_strings) + 2 * max((len(opt_strings) - 1), 0)) # Length of the one-line string, including 2 for the parentheses and # 2 for the joining ', ' repr_len = len(outer_string) + 2 + pos_sig_len + 2 + opt_sig_len if repr_len <= linewidth and not any('\n' in s for s in pos_strings + opt_strings): # Everything fits on one line fmt = '{}({})' pos_str = ', '.join(pos_strings) opt_str = ', '.join(opt_strings) parts_sep = ', ' else: # Need to split lines in some way fmt = '{}(\n{}\n)' if not allow_mixed_seps: pos_separators = [',\n'] * (len(pos_strings) - 1) else: pos_separators = _separators(pos_strings, linewidth) if len(pos_strings) == 0: pos_str = '' else: pos_str = pos_strings[0] for s, sep in zip(pos_strings[1:], pos_separators): pos_str = sep.join([pos_str, s]) if not allow_mixed_seps: opt_separators = [',\n'] * (len(opt_strings) - 1) else: opt_separators = _separators(opt_strings, linewidth) if len(opt_strings) == 0: opt_str = '' else: opt_str = opt_strings[0] for s, sep in zip(opt_strings[1:], opt_separators): opt_str = sep.join([opt_str, s]) # Check if we can put both parts on one line. This requires their # concatenation including 4 for indentation and 2 for ', ' to # be less than the line width. And they should contain no newline. if pos_str and opt_str: inner_len = 4 + len(pos_str) + 2 + len(opt_str) elif (pos_str and not opt_str) or (opt_str and not pos_str): inner_len = 4 + len(pos_str) + len(opt_str) else: inner_len = 0 if (not allow_mixed_seps or any('\n' in s for s in [pos_str, opt_str]) or inner_len > linewidth): parts_sep = ',\n' pos_str = indent(pos_str) opt_str = indent(opt_str) else: parts_sep = ', ' pos_str = indent(pos_str) # Don't indent `opt_str` parts = [s for s in [pos_str, opt_str] if s.strip()] # ignore empty inner_string = parts_sep.join(parts) return fmt.format(outer_string, inner_string)
python
def repr_string(outer_string, inner_strings, allow_mixed_seps=True): r"""Return a pretty string for ``repr``. The returned string is formatted such that it does not extend beyond the line boundary if avoidable. The line width is taken from NumPy's printing options that can be retrieved with `numpy.get_printoptions`. They can be temporarily overridden using the `npy_printoptions` context manager. See Examples for details. Parameters ---------- outer_str : str Name of the class or function that should be printed outside the parentheses. inner_strings : sequence of sequence of str Stringifications of the positional and optional arguments. This is usually the return value of `signature_string_parts`. allow_mixed_seps : bool, optional If ``False`` and the string does not fit on one line, use ``',\n'`` to separate all strings. By default, a mixture of ``', '`` and ``',\n'`` is used to fit as much on one line as possible. In case some of the ``inner_strings`` span multiple lines, it is usually advisable to set ``allow_mixed_seps`` to ``False`` since the result tends to be more readable that way. Returns ------- repr_string : str Full string that can be returned by a class' ``__repr__`` method. Examples -------- Things that fit into one line are printed on one line: >>> outer_string = 'MyClass' >>> inner_strings = [('1', "'hello'", 'None'), ... ("dtype='float32'",)] >>> print(repr_string(outer_string, inner_strings)) MyClass(1, 'hello', None, dtype='float32') Otherwise, if a part of ``inner_strings`` fits on a line of its own, it is printed on one line, but separated from the other part with a line break: >>> outer_string = 'MyClass' >>> inner_strings = [('2.0', "'this_is_a_very_long_argument_string'"), ... ("long_opt_arg='another_quite_long_string'",)] >>> print(repr_string(outer_string, inner_strings)) MyClass( 2.0, 'this_is_a_very_long_argument_string', long_opt_arg='another_quite_long_string' ) If those parts are themselves too long, they are broken down into several lines: >>> outer_string = 'MyClass' >>> inner_strings = [("'this_is_a_very_long_argument_string'", ... "'another_very_long_argument_string'"), ... ("long_opt_arg='another_quite_long_string'", ... "long_opt2_arg='this_wont_fit_on_one_line_either'")] >>> print(repr_string(outer_string, inner_strings)) MyClass( 'this_is_a_very_long_argument_string', 'another_very_long_argument_string', long_opt_arg='another_quite_long_string', long_opt2_arg='this_wont_fit_on_one_line_either' ) The usage of mixed separators to optimally use horizontal space can be disabled by setting ``allow_mixed_seps=False``: >>> outer_string = 'MyClass' >>> inner_strings = [('2.0', "'this_is_a_very_long_argument_string'"), ... ("long_opt_arg='another_quite_long_string'",)] >>> print(repr_string(outer_string, inner_strings, allow_mixed_seps=False)) MyClass( 2.0, 'this_is_a_very_long_argument_string', long_opt_arg='another_quite_long_string' ) With the ``npy_printoptions`` context manager, the available line width can be changed: >>> outer_string = 'MyClass' >>> inner_strings = [('1', "'hello'", 'None'), ... ("dtype='float32'",)] >>> with npy_printoptions(linewidth=20): ... print(repr_string(outer_string, inner_strings)) MyClass( 1, 'hello', None, dtype='float32' ) """ linewidth = np.get_printoptions()['linewidth'] pos_strings, opt_strings = inner_strings # Length of the positional and optional argument parts of the signature, # including separators `', '` pos_sig_len = (sum(len(pstr) for pstr in pos_strings) + 2 * max((len(pos_strings) - 1), 0)) opt_sig_len = (sum(len(pstr) for pstr in opt_strings) + 2 * max((len(opt_strings) - 1), 0)) # Length of the one-line string, including 2 for the parentheses and # 2 for the joining ', ' repr_len = len(outer_string) + 2 + pos_sig_len + 2 + opt_sig_len if repr_len <= linewidth and not any('\n' in s for s in pos_strings + opt_strings): # Everything fits on one line fmt = '{}({})' pos_str = ', '.join(pos_strings) opt_str = ', '.join(opt_strings) parts_sep = ', ' else: # Need to split lines in some way fmt = '{}(\n{}\n)' if not allow_mixed_seps: pos_separators = [',\n'] * (len(pos_strings) - 1) else: pos_separators = _separators(pos_strings, linewidth) if len(pos_strings) == 0: pos_str = '' else: pos_str = pos_strings[0] for s, sep in zip(pos_strings[1:], pos_separators): pos_str = sep.join([pos_str, s]) if not allow_mixed_seps: opt_separators = [',\n'] * (len(opt_strings) - 1) else: opt_separators = _separators(opt_strings, linewidth) if len(opt_strings) == 0: opt_str = '' else: opt_str = opt_strings[0] for s, sep in zip(opt_strings[1:], opt_separators): opt_str = sep.join([opt_str, s]) # Check if we can put both parts on one line. This requires their # concatenation including 4 for indentation and 2 for ', ' to # be less than the line width. And they should contain no newline. if pos_str and opt_str: inner_len = 4 + len(pos_str) + 2 + len(opt_str) elif (pos_str and not opt_str) or (opt_str and not pos_str): inner_len = 4 + len(pos_str) + len(opt_str) else: inner_len = 0 if (not allow_mixed_seps or any('\n' in s for s in [pos_str, opt_str]) or inner_len > linewidth): parts_sep = ',\n' pos_str = indent(pos_str) opt_str = indent(opt_str) else: parts_sep = ', ' pos_str = indent(pos_str) # Don't indent `opt_str` parts = [s for s in [pos_str, opt_str] if s.strip()] # ignore empty inner_string = parts_sep.join(parts) return fmt.format(outer_string, inner_string)
[ "def", "repr_string", "(", "outer_string", ",", "inner_strings", ",", "allow_mixed_seps", "=", "True", ")", ":", "linewidth", "=", "np", ".", "get_printoptions", "(", ")", "[", "'linewidth'", "]", "pos_strings", ",", "opt_strings", "=", "inner_strings", "# Lengt...
r"""Return a pretty string for ``repr``. The returned string is formatted such that it does not extend beyond the line boundary if avoidable. The line width is taken from NumPy's printing options that can be retrieved with `numpy.get_printoptions`. They can be temporarily overridden using the `npy_printoptions` context manager. See Examples for details. Parameters ---------- outer_str : str Name of the class or function that should be printed outside the parentheses. inner_strings : sequence of sequence of str Stringifications of the positional and optional arguments. This is usually the return value of `signature_string_parts`. allow_mixed_seps : bool, optional If ``False`` and the string does not fit on one line, use ``',\n'`` to separate all strings. By default, a mixture of ``', '`` and ``',\n'`` is used to fit as much on one line as possible. In case some of the ``inner_strings`` span multiple lines, it is usually advisable to set ``allow_mixed_seps`` to ``False`` since the result tends to be more readable that way. Returns ------- repr_string : str Full string that can be returned by a class' ``__repr__`` method. Examples -------- Things that fit into one line are printed on one line: >>> outer_string = 'MyClass' >>> inner_strings = [('1', "'hello'", 'None'), ... ("dtype='float32'",)] >>> print(repr_string(outer_string, inner_strings)) MyClass(1, 'hello', None, dtype='float32') Otherwise, if a part of ``inner_strings`` fits on a line of its own, it is printed on one line, but separated from the other part with a line break: >>> outer_string = 'MyClass' >>> inner_strings = [('2.0', "'this_is_a_very_long_argument_string'"), ... ("long_opt_arg='another_quite_long_string'",)] >>> print(repr_string(outer_string, inner_strings)) MyClass( 2.0, 'this_is_a_very_long_argument_string', long_opt_arg='another_quite_long_string' ) If those parts are themselves too long, they are broken down into several lines: >>> outer_string = 'MyClass' >>> inner_strings = [("'this_is_a_very_long_argument_string'", ... "'another_very_long_argument_string'"), ... ("long_opt_arg='another_quite_long_string'", ... "long_opt2_arg='this_wont_fit_on_one_line_either'")] >>> print(repr_string(outer_string, inner_strings)) MyClass( 'this_is_a_very_long_argument_string', 'another_very_long_argument_string', long_opt_arg='another_quite_long_string', long_opt2_arg='this_wont_fit_on_one_line_either' ) The usage of mixed separators to optimally use horizontal space can be disabled by setting ``allow_mixed_seps=False``: >>> outer_string = 'MyClass' >>> inner_strings = [('2.0', "'this_is_a_very_long_argument_string'"), ... ("long_opt_arg='another_quite_long_string'",)] >>> print(repr_string(outer_string, inner_strings, allow_mixed_seps=False)) MyClass( 2.0, 'this_is_a_very_long_argument_string', long_opt_arg='another_quite_long_string' ) With the ``npy_printoptions`` context manager, the available line width can be changed: >>> outer_string = 'MyClass' >>> inner_strings = [('1', "'hello'", 'None'), ... ("dtype='float32'",)] >>> with npy_printoptions(linewidth=20): ... print(repr_string(outer_string, inner_strings)) MyClass( 1, 'hello', None, dtype='float32' )
[ "r", "Return", "a", "pretty", "string", "for", "repr", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/util/utility.py#L1075-L1242
231,848
odlgroup/odl
odl/util/utility.py
attribute_repr_string
def attribute_repr_string(inst_str, attr_str): """Return a repr string for an attribute that respects line width. Parameters ---------- inst_str : str Stringification of a class instance. attr_str : str Name of the attribute (not including the ``'.'``). Returns ------- attr_repr_str : str Concatenation of the two strings in a way that the line width is respected. Examples -------- >>> inst_str = 'rn((2, 3))' >>> attr_str = 'byaxis' >>> print(attribute_repr_string(inst_str, attr_str)) rn((2, 3)).byaxis >>> inst_str = 'MyClass()' >>> attr_str = 'attr_name' >>> print(attribute_repr_string(inst_str, attr_str)) MyClass().attr_name >>> inst_str = 'MyClass' >>> attr_str = 'class_attr' >>> print(attribute_repr_string(inst_str, attr_str)) MyClass.class_attr >>> long_inst_str = ( ... "MyClass('long string that will definitely trigger a line break')" ... ) >>> long_attr_str = 'long_attribute_name' >>> print(attribute_repr_string(long_inst_str, long_attr_str)) MyClass( 'long string that will definitely trigger a line break' ).long_attribute_name """ linewidth = np.get_printoptions()['linewidth'] if (len(inst_str) + 1 + len(attr_str) <= linewidth or '(' not in inst_str): # Instance string + dot + attribute string fit in one line or # no parentheses -> keep instance string as-is and append attr string parts = [inst_str, attr_str] else: # TODO(kohr-h): use `maxsplit=1` kwarg, not supported in Py 2 left, rest = inst_str.split('(', 1) right, middle = rest[::-1].split(')', 1) middle, right = middle[::-1], right[::-1] if middle.startswith('\n') and middle.endswith('\n'): # Already on multiple lines new_inst_str = inst_str else: init_parts = [left] if middle: init_parts.append(indent(middle)) new_inst_str = '(\n'.join(init_parts) + '\n)' + right parts = [new_inst_str, attr_str] return '.'.join(parts)
python
def attribute_repr_string(inst_str, attr_str): linewidth = np.get_printoptions()['linewidth'] if (len(inst_str) + 1 + len(attr_str) <= linewidth or '(' not in inst_str): # Instance string + dot + attribute string fit in one line or # no parentheses -> keep instance string as-is and append attr string parts = [inst_str, attr_str] else: # TODO(kohr-h): use `maxsplit=1` kwarg, not supported in Py 2 left, rest = inst_str.split('(', 1) right, middle = rest[::-1].split(')', 1) middle, right = middle[::-1], right[::-1] if middle.startswith('\n') and middle.endswith('\n'): # Already on multiple lines new_inst_str = inst_str else: init_parts = [left] if middle: init_parts.append(indent(middle)) new_inst_str = '(\n'.join(init_parts) + '\n)' + right parts = [new_inst_str, attr_str] return '.'.join(parts)
[ "def", "attribute_repr_string", "(", "inst_str", ",", "attr_str", ")", ":", "linewidth", "=", "np", ".", "get_printoptions", "(", ")", "[", "'linewidth'", "]", "if", "(", "len", "(", "inst_str", ")", "+", "1", "+", "len", "(", "attr_str", ")", "<=", "l...
Return a repr string for an attribute that respects line width. Parameters ---------- inst_str : str Stringification of a class instance. attr_str : str Name of the attribute (not including the ``'.'``). Returns ------- attr_repr_str : str Concatenation of the two strings in a way that the line width is respected. Examples -------- >>> inst_str = 'rn((2, 3))' >>> attr_str = 'byaxis' >>> print(attribute_repr_string(inst_str, attr_str)) rn((2, 3)).byaxis >>> inst_str = 'MyClass()' >>> attr_str = 'attr_name' >>> print(attribute_repr_string(inst_str, attr_str)) MyClass().attr_name >>> inst_str = 'MyClass' >>> attr_str = 'class_attr' >>> print(attribute_repr_string(inst_str, attr_str)) MyClass.class_attr >>> long_inst_str = ( ... "MyClass('long string that will definitely trigger a line break')" ... ) >>> long_attr_str = 'long_attribute_name' >>> print(attribute_repr_string(long_inst_str, long_attr_str)) MyClass( 'long string that will definitely trigger a line break' ).long_attribute_name
[ "Return", "a", "repr", "string", "for", "an", "attribute", "that", "respects", "line", "width", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/util/utility.py#L1245-L1306
231,849
odlgroup/odl
odl/util/utility.py
method_repr_string
def method_repr_string(inst_str, meth_str, arg_strs=None, allow_mixed_seps=True): r"""Return a repr string for a method that respects line width. This function is useful to generate a ``repr`` string for a derived class that is created through a method, for instance :: functional.translated(x) as a better way of representing :: FunctionalTranslation(functional, x) Parameters ---------- inst_str : str Stringification of a class instance. meth_str : str Name of the method (not including the ``'.'``). arg_strs : sequence of str, optional Stringification of the arguments to the method. allow_mixed_seps : bool, optional If ``False`` and the argument strings do not fit on one line, use ``',\n'`` to separate all strings. By default, a mixture of ``', '`` and ``',\n'`` is used to fit as much on one line as possible. In case some of the ``arg_strs`` span multiple lines, it is usually advisable to set ``allow_mixed_seps`` to ``False`` since the result tends to be more readable that way. Returns ------- meth_repr_str : str Concatenation of all strings in a way that the line width is respected. Examples -------- >>> inst_str = 'MyClass' >>> meth_str = 'empty' >>> arg_strs = [] >>> print(method_repr_string(inst_str, meth_str, arg_strs)) MyClass.empty() >>> inst_str = 'MyClass' >>> meth_str = 'fromfile' >>> arg_strs = ["'tmpfile.txt'"] >>> print(method_repr_string(inst_str, meth_str, arg_strs)) MyClass.fromfile('tmpfile.txt') >>> inst_str = "MyClass('init string')" >>> meth_str = 'method' >>> arg_strs = ['2.0'] >>> print(method_repr_string(inst_str, meth_str, arg_strs)) MyClass('init string').method(2.0) >>> long_inst_str = ( ... "MyClass('long string that will definitely trigger a line break')" ... ) >>> meth_str = 'method' >>> long_arg1 = "'long argument string that should come on the next line'" >>> arg2 = 'param1=1' >>> arg3 = 'param2=2.0' >>> arg_strs = [long_arg1, arg2, arg3] >>> print(method_repr_string(long_inst_str, meth_str, arg_strs)) MyClass( 'long string that will definitely trigger a line break' ).method( 'long argument string that should come on the next line', param1=1, param2=2.0 ) >>> print(method_repr_string(long_inst_str, meth_str, arg_strs, ... allow_mixed_seps=False)) MyClass( 'long string that will definitely trigger a line break' ).method( 'long argument string that should come on the next line', param1=1, param2=2.0 ) """ linewidth = np.get_printoptions()['linewidth'] # Part up to the method name if (len(inst_str) + 1 + len(meth_str) + 1 <= linewidth or '(' not in inst_str): init_parts = [inst_str, meth_str] # Length of the line to the end of the method name meth_line_start_len = len(inst_str) + 1 + len(meth_str) else: # TODO(kohr-h): use `maxsplit=1` kwarg, not supported in Py 2 left, rest = inst_str.split('(', 1) right, middle = rest[::-1].split(')', 1) middle, right = middle[::-1], right[::-1] if middle.startswith('\n') and middle.endswith('\n'): # Already on multiple lines new_inst_str = inst_str else: new_inst_str = '(\n'.join([left, indent(middle)]) + '\n)' + right # Length of the line to the end of the method name, consisting of # ')' + '.' + <method name> meth_line_start_len = 1 + 1 + len(meth_str) init_parts = [new_inst_str, meth_str] # Method call part arg_str_oneline = ', '.join(arg_strs) if meth_line_start_len + 1 + len(arg_str_oneline) + 1 <= linewidth: meth_call_str = '(' + arg_str_oneline + ')' elif not arg_str_oneline: meth_call_str = '(\n)' else: if allow_mixed_seps: arg_seps = _separators(arg_strs, linewidth - 4) # indented else: arg_seps = [',\n'] * (len(arg_strs) - 1) full_arg_str = '' for arg_str, sep in zip_longest(arg_strs, arg_seps, fillvalue=''): full_arg_str += arg_str + sep meth_call_str = '(\n' + indent(full_arg_str) + '\n)' return '.'.join(init_parts) + meth_call_str
python
def method_repr_string(inst_str, meth_str, arg_strs=None, allow_mixed_seps=True): r"""Return a repr string for a method that respects line width. This function is useful to generate a ``repr`` string for a derived class that is created through a method, for instance :: functional.translated(x) as a better way of representing :: FunctionalTranslation(functional, x) Parameters ---------- inst_str : str Stringification of a class instance. meth_str : str Name of the method (not including the ``'.'``). arg_strs : sequence of str, optional Stringification of the arguments to the method. allow_mixed_seps : bool, optional If ``False`` and the argument strings do not fit on one line, use ``',\n'`` to separate all strings. By default, a mixture of ``', '`` and ``',\n'`` is used to fit as much on one line as possible. In case some of the ``arg_strs`` span multiple lines, it is usually advisable to set ``allow_mixed_seps`` to ``False`` since the result tends to be more readable that way. Returns ------- meth_repr_str : str Concatenation of all strings in a way that the line width is respected. Examples -------- >>> inst_str = 'MyClass' >>> meth_str = 'empty' >>> arg_strs = [] >>> print(method_repr_string(inst_str, meth_str, arg_strs)) MyClass.empty() >>> inst_str = 'MyClass' >>> meth_str = 'fromfile' >>> arg_strs = ["'tmpfile.txt'"] >>> print(method_repr_string(inst_str, meth_str, arg_strs)) MyClass.fromfile('tmpfile.txt') >>> inst_str = "MyClass('init string')" >>> meth_str = 'method' >>> arg_strs = ['2.0'] >>> print(method_repr_string(inst_str, meth_str, arg_strs)) MyClass('init string').method(2.0) >>> long_inst_str = ( ... "MyClass('long string that will definitely trigger a line break')" ... ) >>> meth_str = 'method' >>> long_arg1 = "'long argument string that should come on the next line'" >>> arg2 = 'param1=1' >>> arg3 = 'param2=2.0' >>> arg_strs = [long_arg1, arg2, arg3] >>> print(method_repr_string(long_inst_str, meth_str, arg_strs)) MyClass( 'long string that will definitely trigger a line break' ).method( 'long argument string that should come on the next line', param1=1, param2=2.0 ) >>> print(method_repr_string(long_inst_str, meth_str, arg_strs, ... allow_mixed_seps=False)) MyClass( 'long string that will definitely trigger a line break' ).method( 'long argument string that should come on the next line', param1=1, param2=2.0 ) """ linewidth = np.get_printoptions()['linewidth'] # Part up to the method name if (len(inst_str) + 1 + len(meth_str) + 1 <= linewidth or '(' not in inst_str): init_parts = [inst_str, meth_str] # Length of the line to the end of the method name meth_line_start_len = len(inst_str) + 1 + len(meth_str) else: # TODO(kohr-h): use `maxsplit=1` kwarg, not supported in Py 2 left, rest = inst_str.split('(', 1) right, middle = rest[::-1].split(')', 1) middle, right = middle[::-1], right[::-1] if middle.startswith('\n') and middle.endswith('\n'): # Already on multiple lines new_inst_str = inst_str else: new_inst_str = '(\n'.join([left, indent(middle)]) + '\n)' + right # Length of the line to the end of the method name, consisting of # ')' + '.' + <method name> meth_line_start_len = 1 + 1 + len(meth_str) init_parts = [new_inst_str, meth_str] # Method call part arg_str_oneline = ', '.join(arg_strs) if meth_line_start_len + 1 + len(arg_str_oneline) + 1 <= linewidth: meth_call_str = '(' + arg_str_oneline + ')' elif not arg_str_oneline: meth_call_str = '(\n)' else: if allow_mixed_seps: arg_seps = _separators(arg_strs, linewidth - 4) # indented else: arg_seps = [',\n'] * (len(arg_strs) - 1) full_arg_str = '' for arg_str, sep in zip_longest(arg_strs, arg_seps, fillvalue=''): full_arg_str += arg_str + sep meth_call_str = '(\n' + indent(full_arg_str) + '\n)' return '.'.join(init_parts) + meth_call_str
[ "def", "method_repr_string", "(", "inst_str", ",", "meth_str", ",", "arg_strs", "=", "None", ",", "allow_mixed_seps", "=", "True", ")", ":", "linewidth", "=", "np", ".", "get_printoptions", "(", ")", "[", "'linewidth'", "]", "# Part up to the method name", "if",...
r"""Return a repr string for a method that respects line width. This function is useful to generate a ``repr`` string for a derived class that is created through a method, for instance :: functional.translated(x) as a better way of representing :: FunctionalTranslation(functional, x) Parameters ---------- inst_str : str Stringification of a class instance. meth_str : str Name of the method (not including the ``'.'``). arg_strs : sequence of str, optional Stringification of the arguments to the method. allow_mixed_seps : bool, optional If ``False`` and the argument strings do not fit on one line, use ``',\n'`` to separate all strings. By default, a mixture of ``', '`` and ``',\n'`` is used to fit as much on one line as possible. In case some of the ``arg_strs`` span multiple lines, it is usually advisable to set ``allow_mixed_seps`` to ``False`` since the result tends to be more readable that way. Returns ------- meth_repr_str : str Concatenation of all strings in a way that the line width is respected. Examples -------- >>> inst_str = 'MyClass' >>> meth_str = 'empty' >>> arg_strs = [] >>> print(method_repr_string(inst_str, meth_str, arg_strs)) MyClass.empty() >>> inst_str = 'MyClass' >>> meth_str = 'fromfile' >>> arg_strs = ["'tmpfile.txt'"] >>> print(method_repr_string(inst_str, meth_str, arg_strs)) MyClass.fromfile('tmpfile.txt') >>> inst_str = "MyClass('init string')" >>> meth_str = 'method' >>> arg_strs = ['2.0'] >>> print(method_repr_string(inst_str, meth_str, arg_strs)) MyClass('init string').method(2.0) >>> long_inst_str = ( ... "MyClass('long string that will definitely trigger a line break')" ... ) >>> meth_str = 'method' >>> long_arg1 = "'long argument string that should come on the next line'" >>> arg2 = 'param1=1' >>> arg3 = 'param2=2.0' >>> arg_strs = [long_arg1, arg2, arg3] >>> print(method_repr_string(long_inst_str, meth_str, arg_strs)) MyClass( 'long string that will definitely trigger a line break' ).method( 'long argument string that should come on the next line', param1=1, param2=2.0 ) >>> print(method_repr_string(long_inst_str, meth_str, arg_strs, ... allow_mixed_seps=False)) MyClass( 'long string that will definitely trigger a line break' ).method( 'long argument string that should come on the next line', param1=1, param2=2.0 )
[ "r", "Return", "a", "repr", "string", "for", "a", "method", "that", "respects", "line", "width", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/util/utility.py#L1309-L1430
231,850
odlgroup/odl
odl/util/utility.py
pkg_supports
def pkg_supports(feature, pkg_version, pkg_feat_dict): """Return bool indicating whether a package supports ``feature``. Parameters ---------- feature : str Name of a potential feature of a package. pkg_version : str Version of the package that should be checked for presence of the feature. pkg_feat_dict : dict Specification of features of a package. Each item has the following form:: feature_name: version_specification Here, ``feature_name`` is a string that is matched against ``feature``, and ``version_specification`` is a string or a sequence of strings that specifies version sets. These specifications are the same as for ``setuptools`` requirements, just without the package name. A ``None`` entry signals "no support in any version", i.e., always ``False``. If a sequence of requirements are given, they are OR-ed together. See ``Examples`` for details. Returns ------- supports : bool ``True`` if ``pkg_version`` of the package in question supports ``feature``, ``False`` otherwise. Examples -------- >>> feat_dict = { ... 'feat1': '==0.5.1', ... 'feat2': '>0.6, <=0.9', # both required simultaneously ... 'feat3': ['>0.6', '<=0.9'], # only one required, i.e. always True ... 'feat4': ['==0.5.1', '>0.6, <=0.9'], ... 'feat5': None ... } >>> pkg_supports('feat1', '0.5.1', feat_dict) True >>> pkg_supports('feat1', '0.4', feat_dict) False >>> pkg_supports('feat2', '0.5.1', feat_dict) False >>> pkg_supports('feat2', '0.6.1', feat_dict) True >>> pkg_supports('feat2', '0.9', feat_dict) True >>> pkg_supports('feat2', '1.0', feat_dict) False >>> pkg_supports('feat3', '0.4', feat_dict) True >>> pkg_supports('feat3', '1.0', feat_dict) True >>> pkg_supports('feat4', '0.5.1', feat_dict) True >>> pkg_supports('feat4', '0.6', feat_dict) False >>> pkg_supports('feat4', '0.6.1', feat_dict) True >>> pkg_supports('feat4', '1.0', feat_dict) False >>> pkg_supports('feat5', '0.6.1', feat_dict) False >>> pkg_supports('feat5', '1.0', feat_dict) False """ from pkg_resources import parse_requirements feature = str(feature) pkg_version = str(pkg_version) supp_versions = pkg_feat_dict.get(feature, None) if supp_versions is None: return False # Make sequence from single string if is_string(supp_versions): supp_versions = [supp_versions] # Make valid package requirements ver_specs = ['pkg' + supp_ver for supp_ver in supp_versions] # Each parse_requirements list contains only one entry since we specify # only one package ver_reqs = [list(parse_requirements(ver_spec))[0] for ver_spec in ver_specs] # If one of the requirements in the list is met, return True for req in ver_reqs: if req.specifier.contains(pkg_version, prereleases=True): return True # No match return False
python
def pkg_supports(feature, pkg_version, pkg_feat_dict): from pkg_resources import parse_requirements feature = str(feature) pkg_version = str(pkg_version) supp_versions = pkg_feat_dict.get(feature, None) if supp_versions is None: return False # Make sequence from single string if is_string(supp_versions): supp_versions = [supp_versions] # Make valid package requirements ver_specs = ['pkg' + supp_ver for supp_ver in supp_versions] # Each parse_requirements list contains only one entry since we specify # only one package ver_reqs = [list(parse_requirements(ver_spec))[0] for ver_spec in ver_specs] # If one of the requirements in the list is met, return True for req in ver_reqs: if req.specifier.contains(pkg_version, prereleases=True): return True # No match return False
[ "def", "pkg_supports", "(", "feature", ",", "pkg_version", ",", "pkg_feat_dict", ")", ":", "from", "pkg_resources", "import", "parse_requirements", "feature", "=", "str", "(", "feature", ")", "pkg_version", "=", "str", "(", "pkg_version", ")", "supp_versions", "...
Return bool indicating whether a package supports ``feature``. Parameters ---------- feature : str Name of a potential feature of a package. pkg_version : str Version of the package that should be checked for presence of the feature. pkg_feat_dict : dict Specification of features of a package. Each item has the following form:: feature_name: version_specification Here, ``feature_name`` is a string that is matched against ``feature``, and ``version_specification`` is a string or a sequence of strings that specifies version sets. These specifications are the same as for ``setuptools`` requirements, just without the package name. A ``None`` entry signals "no support in any version", i.e., always ``False``. If a sequence of requirements are given, they are OR-ed together. See ``Examples`` for details. Returns ------- supports : bool ``True`` if ``pkg_version`` of the package in question supports ``feature``, ``False`` otherwise. Examples -------- >>> feat_dict = { ... 'feat1': '==0.5.1', ... 'feat2': '>0.6, <=0.9', # both required simultaneously ... 'feat3': ['>0.6', '<=0.9'], # only one required, i.e. always True ... 'feat4': ['==0.5.1', '>0.6, <=0.9'], ... 'feat5': None ... } >>> pkg_supports('feat1', '0.5.1', feat_dict) True >>> pkg_supports('feat1', '0.4', feat_dict) False >>> pkg_supports('feat2', '0.5.1', feat_dict) False >>> pkg_supports('feat2', '0.6.1', feat_dict) True >>> pkg_supports('feat2', '0.9', feat_dict) True >>> pkg_supports('feat2', '1.0', feat_dict) False >>> pkg_supports('feat3', '0.4', feat_dict) True >>> pkg_supports('feat3', '1.0', feat_dict) True >>> pkg_supports('feat4', '0.5.1', feat_dict) True >>> pkg_supports('feat4', '0.6', feat_dict) False >>> pkg_supports('feat4', '0.6.1', feat_dict) True >>> pkg_supports('feat4', '1.0', feat_dict) False >>> pkg_supports('feat5', '0.6.1', feat_dict) False >>> pkg_supports('feat5', '1.0', feat_dict) False
[ "Return", "bool", "indicating", "whether", "a", "package", "supports", "feature", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/util/utility.py#L1438-L1533
231,851
odlgroup/odl
odl/util/utility.py
unique
def unique(seq): """Return the unique values in a sequence. Parameters ---------- seq : sequence Sequence with (possibly duplicate) elements. Returns ------- unique : list Unique elements of ``seq``. Order is guaranteed to be the same as in seq. Examples -------- Determine unique elements in list >>> unique([1, 2, 3, 3]) [1, 2, 3] >>> unique((1, 'str', 'str')) [1, 'str'] The utility also works with unhashable types: >>> unique((1, [1], [1])) [1, [1]] """ # First check if all elements are hashable, if so O(n) can be done try: return list(OrderedDict.fromkeys(seq)) except TypeError: # Unhashable, resort to O(n^2) unique_values = [] for i in seq: if i not in unique_values: unique_values.append(i) return unique_values
python
def unique(seq): # First check if all elements are hashable, if so O(n) can be done try: return list(OrderedDict.fromkeys(seq)) except TypeError: # Unhashable, resort to O(n^2) unique_values = [] for i in seq: if i not in unique_values: unique_values.append(i) return unique_values
[ "def", "unique", "(", "seq", ")", ":", "# First check if all elements are hashable, if so O(n) can be done", "try", ":", "return", "list", "(", "OrderedDict", ".", "fromkeys", "(", "seq", ")", ")", "except", "TypeError", ":", "# Unhashable, resort to O(n^2)", "unique_va...
Return the unique values in a sequence. Parameters ---------- seq : sequence Sequence with (possibly duplicate) elements. Returns ------- unique : list Unique elements of ``seq``. Order is guaranteed to be the same as in seq. Examples -------- Determine unique elements in list >>> unique([1, 2, 3, 3]) [1, 2, 3] >>> unique((1, 'str', 'str')) [1, 'str'] The utility also works with unhashable types: >>> unique((1, [1], [1])) [1, [1]]
[ "Return", "the", "unique", "values", "in", "a", "sequence", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/util/utility.py#L1573-L1611
231,852
odlgroup/odl
odl/space/space_utils.py
vector
def vector(array, dtype=None, order=None, impl='numpy'): """Create a vector from an array-like object. Parameters ---------- array : `array-like` Array from which to create the vector. Scalars become one-dimensional vectors. dtype : optional Set the data type of the vector manually with this option. By default, the space type is inferred from the input data. order : {None, 'C', 'F'}, optional Axis ordering of the data storage. For the default ``None``, no contiguousness is enforced, avoiding a copy if possible. impl : str, optional Impmlementation back-end for the space. See `odl.space.entry_points.tensor_space_impl_names` for available options. Returns ------- vector : `Tensor` Vector created from the input array. Its concrete type depends on the provided arguments. Notes ----- This is a convenience function and not intended for use in speed-critical algorithms. Examples -------- Create one-dimensional vectors: >>> odl.vector([1, 2, 3]) # No automatic cast to float tensor_space(3, dtype=int).element([1, 2, 3]) >>> odl.vector([1, 2, 3], dtype=float) rn(3).element([ 1., 2., 3.]) >>> odl.vector([1, 2 - 1j, 3]) cn(3).element([ 1.+0.j, 2.-1.j, 3.+0.j]) Non-scalar types are also supported: >>> odl.vector([True, True, False]) tensor_space(3, dtype=bool).element([ True, True, False]) The function also supports multi-dimensional input: >>> odl.vector([[1, 2, 3], ... [4, 5, 6]]) tensor_space((2, 3), dtype=int).element( [[1, 2, 3], [4, 5, 6]] ) """ # Sanitize input arr = np.array(array, copy=False, order=order, ndmin=1) if arr.dtype is object: raise ValueError('invalid input data resulting in `dtype==object`') # Set dtype if dtype is not None: space_dtype = dtype else: space_dtype = arr.dtype space = tensor_space(arr.shape, dtype=space_dtype, impl=impl) return space.element(arr)
python
def vector(array, dtype=None, order=None, impl='numpy'): # Sanitize input arr = np.array(array, copy=False, order=order, ndmin=1) if arr.dtype is object: raise ValueError('invalid input data resulting in `dtype==object`') # Set dtype if dtype is not None: space_dtype = dtype else: space_dtype = arr.dtype space = tensor_space(arr.shape, dtype=space_dtype, impl=impl) return space.element(arr)
[ "def", "vector", "(", "array", ",", "dtype", "=", "None", ",", "order", "=", "None", ",", "impl", "=", "'numpy'", ")", ":", "# Sanitize input", "arr", "=", "np", ".", "array", "(", "array", ",", "copy", "=", "False", ",", "order", "=", "order", ","...
Create a vector from an array-like object. Parameters ---------- array : `array-like` Array from which to create the vector. Scalars become one-dimensional vectors. dtype : optional Set the data type of the vector manually with this option. By default, the space type is inferred from the input data. order : {None, 'C', 'F'}, optional Axis ordering of the data storage. For the default ``None``, no contiguousness is enforced, avoiding a copy if possible. impl : str, optional Impmlementation back-end for the space. See `odl.space.entry_points.tensor_space_impl_names` for available options. Returns ------- vector : `Tensor` Vector created from the input array. Its concrete type depends on the provided arguments. Notes ----- This is a convenience function and not intended for use in speed-critical algorithms. Examples -------- Create one-dimensional vectors: >>> odl.vector([1, 2, 3]) # No automatic cast to float tensor_space(3, dtype=int).element([1, 2, 3]) >>> odl.vector([1, 2, 3], dtype=float) rn(3).element([ 1., 2., 3.]) >>> odl.vector([1, 2 - 1j, 3]) cn(3).element([ 1.+0.j, 2.-1.j, 3.+0.j]) Non-scalar types are also supported: >>> odl.vector([True, True, False]) tensor_space(3, dtype=bool).element([ True, True, False]) The function also supports multi-dimensional input: >>> odl.vector([[1, 2, 3], ... [4, 5, 6]]) tensor_space((2, 3), dtype=int).element( [[1, 2, 3], [4, 5, 6]] )
[ "Create", "a", "vector", "from", "an", "array", "-", "like", "object", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/space_utils.py#L21-L88
231,853
odlgroup/odl
odl/space/space_utils.py
tensor_space
def tensor_space(shape, dtype=None, impl='numpy', **kwargs): """Return a tensor space with arbitrary scalar data type. Parameters ---------- shape : positive int or sequence of positive ints Number of entries per axis for elements in this space. A single integer results in a space with 1 axis. dtype : optional Data type of each element. Can be provided in any way the `numpy.dtype` function understands, e.g. as built-in type or as a string. For ``None``, the `TensorSpace.default_dtype` of the created space is used. impl : str, optional Impmlementation back-end for the space. See `odl.space.entry_points.tensor_space_impl_names` for available options. kwargs : Extra keyword arguments passed to the space constructor. Returns ------- space : `TensorSpace` Examples -------- Space of 3-tuples with ``uint64`` entries (although not strictly a vector space): >>> odl.tensor_space(3, dtype='uint64') tensor_space(3, dtype='uint64') 2x3 tensors with same data type: >>> odl.tensor_space((2, 3), dtype='uint64') tensor_space((2, 3), dtype='uint64') The default data type depends on the implementation. For ``impl='numpy'``, it is ``'float64'``: >>> ts = odl.tensor_space((2, 3)) >>> ts rn((2, 3)) >>> ts.dtype dtype('float64') See Also -------- rn, cn : Constructors for real and complex spaces """ tspace_cls = tensor_space_impl(impl) if dtype is None: dtype = tspace_cls.default_dtype() # Use args by keyword since the constructor may take other arguments # by position return tspace_cls(shape=shape, dtype=dtype, **kwargs)
python
def tensor_space(shape, dtype=None, impl='numpy', **kwargs): tspace_cls = tensor_space_impl(impl) if dtype is None: dtype = tspace_cls.default_dtype() # Use args by keyword since the constructor may take other arguments # by position return tspace_cls(shape=shape, dtype=dtype, **kwargs)
[ "def", "tensor_space", "(", "shape", ",", "dtype", "=", "None", ",", "impl", "=", "'numpy'", ",", "*", "*", "kwargs", ")", ":", "tspace_cls", "=", "tensor_space_impl", "(", "impl", ")", "if", "dtype", "is", "None", ":", "dtype", "=", "tspace_cls", ".",...
Return a tensor space with arbitrary scalar data type. Parameters ---------- shape : positive int or sequence of positive ints Number of entries per axis for elements in this space. A single integer results in a space with 1 axis. dtype : optional Data type of each element. Can be provided in any way the `numpy.dtype` function understands, e.g. as built-in type or as a string. For ``None``, the `TensorSpace.default_dtype` of the created space is used. impl : str, optional Impmlementation back-end for the space. See `odl.space.entry_points.tensor_space_impl_names` for available options. kwargs : Extra keyword arguments passed to the space constructor. Returns ------- space : `TensorSpace` Examples -------- Space of 3-tuples with ``uint64`` entries (although not strictly a vector space): >>> odl.tensor_space(3, dtype='uint64') tensor_space(3, dtype='uint64') 2x3 tensors with same data type: >>> odl.tensor_space((2, 3), dtype='uint64') tensor_space((2, 3), dtype='uint64') The default data type depends on the implementation. For ``impl='numpy'``, it is ``'float64'``: >>> ts = odl.tensor_space((2, 3)) >>> ts rn((2, 3)) >>> ts.dtype dtype('float64') See Also -------- rn, cn : Constructors for real and complex spaces
[ "Return", "a", "tensor", "space", "with", "arbitrary", "scalar", "data", "type", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/space_utils.py#L91-L149
231,854
odlgroup/odl
odl/space/space_utils.py
cn
def cn(shape, dtype=None, impl='numpy', **kwargs): """Return a space of complex tensors. Parameters ---------- shape : positive int or sequence of positive ints Number of entries per axis for elements in this space. A single integer results in a space with 1 axis. dtype : optional Data type of each element. Can be provided in any way the `numpy.dtype` function understands, e.g. as built-in type or as a string. Only complex floating-point data types are allowed. For ``None``, the `TensorSpace.default_dtype` of the created space is used in the form ``default_dtype(ComplexNumbers())``. impl : str, optional Impmlementation back-end for the space. See `odl.space.entry_points.tensor_space_impl_names` for available options. kwargs : Extra keyword arguments passed to the space constructor. Returns ------- cn : `TensorSpace` Examples -------- Space of complex 3-tuples with ``complex64`` entries: >>> odl.cn(3, dtype='complex64') cn(3, dtype='complex64') Complex 2x3 tensors with ``complex64`` entries: >>> odl.cn((2, 3), dtype='complex64') cn((2, 3), dtype='complex64') The default data type depends on the implementation. For ``impl='numpy'``, it is ``'complex128'``: >>> space = odl.cn((2, 3)) >>> space cn((2, 3)) >>> space.dtype dtype('complex128') See Also -------- tensor_space : Space of tensors with arbitrary scalar data type. rn : Real tensor space. """ cn_cls = tensor_space_impl(impl) if dtype is None: dtype = cn_cls.default_dtype(ComplexNumbers()) # Use args by keyword since the constructor may take other arguments # by position cn = cn_cls(shape=shape, dtype=dtype, **kwargs) if not cn.is_complex: raise ValueError('data type {!r} not a complex floating-point type.' ''.format(dtype)) return cn
python
def cn(shape, dtype=None, impl='numpy', **kwargs): cn_cls = tensor_space_impl(impl) if dtype is None: dtype = cn_cls.default_dtype(ComplexNumbers()) # Use args by keyword since the constructor may take other arguments # by position cn = cn_cls(shape=shape, dtype=dtype, **kwargs) if not cn.is_complex: raise ValueError('data type {!r} not a complex floating-point type.' ''.format(dtype)) return cn
[ "def", "cn", "(", "shape", ",", "dtype", "=", "None", ",", "impl", "=", "'numpy'", ",", "*", "*", "kwargs", ")", ":", "cn_cls", "=", "tensor_space_impl", "(", "impl", ")", "if", "dtype", "is", "None", ":", "dtype", "=", "cn_cls", ".", "default_dtype"...
Return a space of complex tensors. Parameters ---------- shape : positive int or sequence of positive ints Number of entries per axis for elements in this space. A single integer results in a space with 1 axis. dtype : optional Data type of each element. Can be provided in any way the `numpy.dtype` function understands, e.g. as built-in type or as a string. Only complex floating-point data types are allowed. For ``None``, the `TensorSpace.default_dtype` of the created space is used in the form ``default_dtype(ComplexNumbers())``. impl : str, optional Impmlementation back-end for the space. See `odl.space.entry_points.tensor_space_impl_names` for available options. kwargs : Extra keyword arguments passed to the space constructor. Returns ------- cn : `TensorSpace` Examples -------- Space of complex 3-tuples with ``complex64`` entries: >>> odl.cn(3, dtype='complex64') cn(3, dtype='complex64') Complex 2x3 tensors with ``complex64`` entries: >>> odl.cn((2, 3), dtype='complex64') cn((2, 3), dtype='complex64') The default data type depends on the implementation. For ``impl='numpy'``, it is ``'complex128'``: >>> space = odl.cn((2, 3)) >>> space cn((2, 3)) >>> space.dtype dtype('complex128') See Also -------- tensor_space : Space of tensors with arbitrary scalar data type. rn : Real tensor space.
[ "Return", "a", "space", "of", "complex", "tensors", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/space_utils.py#L152-L215
231,855
odlgroup/odl
odl/space/space_utils.py
rn
def rn(shape, dtype=None, impl='numpy', **kwargs): """Return a space of real tensors. Parameters ---------- shape : positive int or sequence of positive ints Number of entries per axis for elements in this space. A single integer results in a space with 1 axis. dtype : optional Data type of each element. Can be provided in any way the `numpy.dtype` function understands, e.g. as built-in type or as a string. Only real floating-point data types are allowed. For ``None``, the `TensorSpace.default_dtype` of the created space is used in the form ``default_dtype(RealNumbers())``. impl : str, optional Impmlementation back-end for the space. See `odl.space.entry_points.tensor_space_impl_names` for available options. kwargs : Extra keyword arguments passed to the space constructor. Returns ------- real_space : `TensorSpace` Examples -------- Space of real 3-tuples with ``float32`` entries: >>> odl.rn(3, dtype='float32') rn(3, dtype='float32') Real 2x3 tensors with ``float32`` entries: >>> odl.rn((2, 3), dtype='float32') rn((2, 3), dtype='float32') The default data type depends on the implementation. For ``impl='numpy'``, it is ``'float64'``: >>> ts = odl.rn((2, 3)) >>> ts rn((2, 3)) >>> ts.dtype dtype('float64') See Also -------- tensor_space : Space of tensors with arbitrary scalar data type. cn : Complex tensor space. """ rn_cls = tensor_space_impl(impl) if dtype is None: dtype = rn_cls.default_dtype(RealNumbers()) # Use args by keyword since the constructor may take other arguments # by position rn = rn_cls(shape=shape, dtype=dtype, **kwargs) if not rn.is_real: raise ValueError('data type {!r} not a real floating-point type.' ''.format(dtype)) return rn
python
def rn(shape, dtype=None, impl='numpy', **kwargs): rn_cls = tensor_space_impl(impl) if dtype is None: dtype = rn_cls.default_dtype(RealNumbers()) # Use args by keyword since the constructor may take other arguments # by position rn = rn_cls(shape=shape, dtype=dtype, **kwargs) if not rn.is_real: raise ValueError('data type {!r} not a real floating-point type.' ''.format(dtype)) return rn
[ "def", "rn", "(", "shape", ",", "dtype", "=", "None", ",", "impl", "=", "'numpy'", ",", "*", "*", "kwargs", ")", ":", "rn_cls", "=", "tensor_space_impl", "(", "impl", ")", "if", "dtype", "is", "None", ":", "dtype", "=", "rn_cls", ".", "default_dtype"...
Return a space of real tensors. Parameters ---------- shape : positive int or sequence of positive ints Number of entries per axis for elements in this space. A single integer results in a space with 1 axis. dtype : optional Data type of each element. Can be provided in any way the `numpy.dtype` function understands, e.g. as built-in type or as a string. Only real floating-point data types are allowed. For ``None``, the `TensorSpace.default_dtype` of the created space is used in the form ``default_dtype(RealNumbers())``. impl : str, optional Impmlementation back-end for the space. See `odl.space.entry_points.tensor_space_impl_names` for available options. kwargs : Extra keyword arguments passed to the space constructor. Returns ------- real_space : `TensorSpace` Examples -------- Space of real 3-tuples with ``float32`` entries: >>> odl.rn(3, dtype='float32') rn(3, dtype='float32') Real 2x3 tensors with ``float32`` entries: >>> odl.rn((2, 3), dtype='float32') rn((2, 3), dtype='float32') The default data type depends on the implementation. For ``impl='numpy'``, it is ``'float64'``: >>> ts = odl.rn((2, 3)) >>> ts rn((2, 3)) >>> ts.dtype dtype('float64') See Also -------- tensor_space : Space of tensors with arbitrary scalar data type. cn : Complex tensor space.
[ "Return", "a", "space", "of", "real", "tensors", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/space_utils.py#L218-L281
231,856
odlgroup/odl
odl/trafos/wavelet.py
WaveletTransformBase.scales
def scales(self): """Get the scales of each coefficient. Returns ------- scales : ``range`` element The scale of each coefficient, given by an integer. 0 for the lowest resolution and self.nlevels for the highest. """ if self.impl == 'pywt': if self.__variant == 'forward': discr_space = self.domain wavelet_space = self.range else: discr_space = self.range wavelet_space = self.domain shapes = pywt.wavedecn_shapes(discr_space.shape, self.pywt_wavelet, mode=self.pywt_pad_mode, level=self.nlevels, axes=self.axes) coeff_list = [np.full(shapes[0], 0)] for i in range(1, 1 + len(shapes[1:])): coeff_list.append({k: np.full(shapes[i][k], i) for k in shapes[i].keys()}) coeffs = pywt.ravel_coeffs(coeff_list, axes=self.axes)[0] return wavelet_space.element(coeffs) else: raise RuntimeError("bad `impl` '{}'".format(self.impl))
python
def scales(self): if self.impl == 'pywt': if self.__variant == 'forward': discr_space = self.domain wavelet_space = self.range else: discr_space = self.range wavelet_space = self.domain shapes = pywt.wavedecn_shapes(discr_space.shape, self.pywt_wavelet, mode=self.pywt_pad_mode, level=self.nlevels, axes=self.axes) coeff_list = [np.full(shapes[0], 0)] for i in range(1, 1 + len(shapes[1:])): coeff_list.append({k: np.full(shapes[i][k], i) for k in shapes[i].keys()}) coeffs = pywt.ravel_coeffs(coeff_list, axes=self.axes)[0] return wavelet_space.element(coeffs) else: raise RuntimeError("bad `impl` '{}'".format(self.impl))
[ "def", "scales", "(", "self", ")", ":", "if", "self", ".", "impl", "==", "'pywt'", ":", "if", "self", ".", "__variant", "==", "'forward'", ":", "discr_space", "=", "self", ".", "domain", "wavelet_space", "=", "self", ".", "range", "else", ":", "discr_s...
Get the scales of each coefficient. Returns ------- scales : ``range`` element The scale of each coefficient, given by an integer. 0 for the lowest resolution and self.nlevels for the highest.
[ "Get", "the", "scales", "of", "each", "coefficient", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/trafos/wavelet.py#L235-L262
231,857
odlgroup/odl
odl/trafos/wavelet.py
WaveletTransform._call
def _call(self, x): """Return wavelet transform of ``x``.""" if self.impl == 'pywt': coeffs = pywt.wavedecn( x, wavelet=self.pywt_wavelet, level=self.nlevels, mode=self.pywt_pad_mode, axes=self.axes) return pywt.ravel_coeffs(coeffs, axes=self.axes)[0] else: raise RuntimeError("bad `impl` '{}'".format(self.impl))
python
def _call(self, x): if self.impl == 'pywt': coeffs = pywt.wavedecn( x, wavelet=self.pywt_wavelet, level=self.nlevels, mode=self.pywt_pad_mode, axes=self.axes) return pywt.ravel_coeffs(coeffs, axes=self.axes)[0] else: raise RuntimeError("bad `impl` '{}'".format(self.impl))
[ "def", "_call", "(", "self", ",", "x", ")", ":", "if", "self", ".", "impl", "==", "'pywt'", ":", "coeffs", "=", "pywt", ".", "wavedecn", "(", "x", ",", "wavelet", "=", "self", ".", "pywt_wavelet", ",", "level", "=", "self", ".", "nlevels", ",", "...
Return wavelet transform of ``x``.
[ "Return", "wavelet", "transform", "of", "x", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/trafos/wavelet.py#L424-L432
231,858
odlgroup/odl
odl/trafos/wavelet.py
WaveletTransform.adjoint
def adjoint(self): """Adjoint wavelet transform. Returns ------- adjoint : `WaveletTransformInverse` If the transform is orthogonal, the adjoint is the inverse. Raises ------ OpNotImplementedError if `is_orthogonal` is ``False`` """ if self.is_orthogonal: scale = 1 / self.domain.partition.cell_volume return scale * self.inverse else: # TODO: put adjoint here return super(WaveletTransform, self).adjoint
python
def adjoint(self): if self.is_orthogonal: scale = 1 / self.domain.partition.cell_volume return scale * self.inverse else: # TODO: put adjoint here return super(WaveletTransform, self).adjoint
[ "def", "adjoint", "(", "self", ")", ":", "if", "self", ".", "is_orthogonal", ":", "scale", "=", "1", "/", "self", ".", "domain", ".", "partition", ".", "cell_volume", "return", "scale", "*", "self", ".", "inverse", "else", ":", "# TODO: put adjoint here", ...
Adjoint wavelet transform. Returns ------- adjoint : `WaveletTransformInverse` If the transform is orthogonal, the adjoint is the inverse. Raises ------ OpNotImplementedError if `is_orthogonal` is ``False``
[ "Adjoint", "wavelet", "transform", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/trafos/wavelet.py#L435-L453
231,859
odlgroup/odl
odl/trafos/wavelet.py
WaveletTransform.inverse
def inverse(self): """Inverse wavelet transform. Returns ------- inverse : `WaveletTransformInverse` See Also -------- adjoint """ return WaveletTransformInverse( range=self.domain, wavelet=self.pywt_wavelet, nlevels=self.nlevels, pad_mode=self.pad_mode, pad_const=self.pad_const, impl=self.impl, axes=self.axes)
python
def inverse(self): return WaveletTransformInverse( range=self.domain, wavelet=self.pywt_wavelet, nlevels=self.nlevels, pad_mode=self.pad_mode, pad_const=self.pad_const, impl=self.impl, axes=self.axes)
[ "def", "inverse", "(", "self", ")", ":", "return", "WaveletTransformInverse", "(", "range", "=", "self", ".", "domain", ",", "wavelet", "=", "self", ".", "pywt_wavelet", ",", "nlevels", "=", "self", ".", "nlevels", ",", "pad_mode", "=", "self", ".", "pad...
Inverse wavelet transform. Returns ------- inverse : `WaveletTransformInverse` See Also -------- adjoint
[ "Inverse", "wavelet", "transform", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/trafos/wavelet.py#L456-L470
231,860
odlgroup/odl
odl/trafos/wavelet.py
WaveletTransformInverse._call
def _call(self, coeffs): """Return the inverse wavelet transform of ``coeffs``.""" if self.impl == 'pywt': coeffs = pywt.unravel_coeffs(coeffs, coeff_slices=self._coeff_slices, coeff_shapes=self._coeff_shapes, output_format='wavedecn') recon = pywt.waverecn( coeffs, wavelet=self.pywt_wavelet, mode=self.pywt_pad_mode, axes=self.axes) recon_shape = self.range.shape if recon.shape != recon_shape: # If the original shape was odd along any transformed axes it # will have been rounded up to the next even size after the # reconstruction. The extra sample should be discarded. # The underlying reason is decimation by two in reconstruction # must keep ceil(N/2) samples in each band for perfect # reconstruction. Reconstruction then upsamples by two. # When N is odd, (2 * np.ceil(N/2)) != N. recon_slc = [] for i, (n_recon, n_intended) in enumerate(zip(recon.shape, recon_shape)): if n_recon == n_intended + 1: # Upsampling added one entry too much in this axis, # drop last one recon_slc.append(slice(-1)) elif n_recon == n_intended: recon_slc.append(slice(None)) else: raise ValueError( 'in axis {}: expected size {} or {} in ' '`recon_shape`, got {}' ''.format(i, n_recon - 1, n_recon, n_intended)) recon = recon[tuple(recon_slc)] return recon else: raise RuntimeError("bad `impl` '{}'".format(self.impl))
python
def _call(self, coeffs): if self.impl == 'pywt': coeffs = pywt.unravel_coeffs(coeffs, coeff_slices=self._coeff_slices, coeff_shapes=self._coeff_shapes, output_format='wavedecn') recon = pywt.waverecn( coeffs, wavelet=self.pywt_wavelet, mode=self.pywt_pad_mode, axes=self.axes) recon_shape = self.range.shape if recon.shape != recon_shape: # If the original shape was odd along any transformed axes it # will have been rounded up to the next even size after the # reconstruction. The extra sample should be discarded. # The underlying reason is decimation by two in reconstruction # must keep ceil(N/2) samples in each band for perfect # reconstruction. Reconstruction then upsamples by two. # When N is odd, (2 * np.ceil(N/2)) != N. recon_slc = [] for i, (n_recon, n_intended) in enumerate(zip(recon.shape, recon_shape)): if n_recon == n_intended + 1: # Upsampling added one entry too much in this axis, # drop last one recon_slc.append(slice(-1)) elif n_recon == n_intended: recon_slc.append(slice(None)) else: raise ValueError( 'in axis {}: expected size {} or {} in ' '`recon_shape`, got {}' ''.format(i, n_recon - 1, n_recon, n_intended)) recon = recon[tuple(recon_slc)] return recon else: raise RuntimeError("bad `impl` '{}'".format(self.impl))
[ "def", "_call", "(", "self", ",", "coeffs", ")", ":", "if", "self", ".", "impl", "==", "'pywt'", ":", "coeffs", "=", "pywt", ".", "unravel_coeffs", "(", "coeffs", ",", "coeff_slices", "=", "self", ".", "_coeff_slices", ",", "coeff_shapes", "=", "self", ...
Return the inverse wavelet transform of ``coeffs``.
[ "Return", "the", "inverse", "wavelet", "transform", "of", "coeffs", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/trafos/wavelet.py#L605-L642
231,861
odlgroup/odl
odl/contrib/solvers/spdhg/stochastic_primal_dual_hybrid_gradient.py
pdhg
def pdhg(x, f, g, A, tau, sigma, niter, **kwargs): """Computes a saddle point with PDHG. This algorithm is the same as "algorithm 1" in [CP2011a] but with extrapolation on the dual variable. Parameters ---------- x : primal variable This variable is both input and output of the method. f : function Functional Y -> IR_infty that has a convex conjugate with a proximal operator, i.e. f.convex_conj.proximal(sigma) : Y -> Y. g : function Functional X -> IR_infty that has a proximal operator, i.e. g.proximal(tau) : X -> X. A : function Operator A : X -> Y that possesses an adjoint: A.adjoint tau : scalar / vector / matrix Step size for primal variable. Note that the proximal operator of g has to be well-defined for this input. sigma : scalar Scalar / vector / matrix used as step size for dual variable. Note that the proximal operator related to f (see above) has to be well-defined for this input. niter : int Number of iterations Other Parameters ---------------- y: dual variable Dual variable is part of a product space z: variable Adjoint of dual variable, z = A^* y. theta : scalar Extrapolation factor. callback : callable Function called with the current iterate after each iteration. References ---------- [CP2011a] Chambolle, A and Pock, T. *A First-Order Primal-Dual Algorithm for Convex Problems with Applications to Imaging*. Journal of Mathematical Imaging and Vision, 40 (2011), pp 120-145. """ def fun_select(k): return [0] f = odl.solvers.SeparableSum(f) A = odl.BroadcastOperator(A, 1) # Dual variable y = kwargs.pop('y', None) if y is None: y_new = None else: y_new = A.range.element([y]) spdhg_generic(x, f, g, A, tau, [sigma], niter, fun_select, y=y_new, **kwargs) if y is not None: y.assign(y_new[0])
python
def pdhg(x, f, g, A, tau, sigma, niter, **kwargs): def fun_select(k): return [0] f = odl.solvers.SeparableSum(f) A = odl.BroadcastOperator(A, 1) # Dual variable y = kwargs.pop('y', None) if y is None: y_new = None else: y_new = A.range.element([y]) spdhg_generic(x, f, g, A, tau, [sigma], niter, fun_select, y=y_new, **kwargs) if y is not None: y.assign(y_new[0])
[ "def", "pdhg", "(", "x", ",", "f", ",", "g", ",", "A", ",", "tau", ",", "sigma", ",", "niter", ",", "*", "*", "kwargs", ")", ":", "def", "fun_select", "(", "k", ")", ":", "return", "[", "0", "]", "f", "=", "odl", ".", "solvers", ".", "Separ...
Computes a saddle point with PDHG. This algorithm is the same as "algorithm 1" in [CP2011a] but with extrapolation on the dual variable. Parameters ---------- x : primal variable This variable is both input and output of the method. f : function Functional Y -> IR_infty that has a convex conjugate with a proximal operator, i.e. f.convex_conj.proximal(sigma) : Y -> Y. g : function Functional X -> IR_infty that has a proximal operator, i.e. g.proximal(tau) : X -> X. A : function Operator A : X -> Y that possesses an adjoint: A.adjoint tau : scalar / vector / matrix Step size for primal variable. Note that the proximal operator of g has to be well-defined for this input. sigma : scalar Scalar / vector / matrix used as step size for dual variable. Note that the proximal operator related to f (see above) has to be well-defined for this input. niter : int Number of iterations Other Parameters ---------------- y: dual variable Dual variable is part of a product space z: variable Adjoint of dual variable, z = A^* y. theta : scalar Extrapolation factor. callback : callable Function called with the current iterate after each iteration. References ---------- [CP2011a] Chambolle, A and Pock, T. *A First-Order Primal-Dual Algorithm for Convex Problems with Applications to Imaging*. Journal of Mathematical Imaging and Vision, 40 (2011), pp 120-145.
[ "Computes", "a", "saddle", "point", "with", "PDHG", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/solvers/spdhg/stochastic_primal_dual_hybrid_gradient.py#L19-L84
231,862
odlgroup/odl
odl/contrib/solvers/spdhg/stochastic_primal_dual_hybrid_gradient.py
da_spdhg
def da_spdhg(x, f, g, A, tau, sigma_tilde, niter, mu, **kwargs): r"""Computes a saddle point with a PDHG and dual acceleration. It therefore requires the functionals f*_i to be mu[i] strongly convex. Parameters ---------- x : primal variable This variable is both input and output of the method. f : functions Functionals Y[i] -> IR_infty that all have a convex conjugate with a proximal operator, i.e. f[i].convex_conj.proximal(sigma[i]) : Y[i] -> Y[i]. g : function Functional X -> IR_infty that has a proximal operator, i.e. g.proximal(tau) : X -> X. A : functions Operators A[i] : X -> Y[i] that possess adjoints: A[i].adjoint tau : scalar Initial step size for primal variable. sigma_tilde : scalar Related to initial step size for dual variable. niter : int Number of iterations mu: list List of strong convexity constants of f*, i.e. mu[i] is the strong convexity constant of f*[i]. Other Parameters ---------------- y: dual variable Dual variable is part of a product space z: variable Adjoint of dual variable, z = A^* y. prob: list List of probabilities that an index i is selected each iteration. By default this is uniform serial sampling, p_i = 1/n. fun_select : function Function that selects blocks at every iteration IN -> {1,...,n}. By default this is serial sampling, fun_select(k) selects an index i \in {1,...,n} with probability p_i. extra: list List of local extrapolation paramters for every index i. By default extra_i = 1 / p_i. callback : callable, optional Function called with the current iterate after each iteration. References ---------- [CERS2017] A. Chambolle, M. J. Ehrhardt, P. Richtarik and C.-B. Schoenlieb, *Stochastic Primal-Dual Hybrid Gradient Algorithm with Arbitrary Sampling and Imaging Applications*. ArXiv: http://arxiv.org/abs/1706.04957 (2017). """ # Callback object callback = kwargs.pop('callback', None) if callback is not None and not callable(callback): raise TypeError('`callback` {} is not callable' ''.format(callback)) # Probabilities prob = kwargs.pop('prob', None) if prob is None: prob = [1 / len(A)] * len(A) # Selection function fun_select = kwargs.pop('fun_select', None) if fun_select is None: def fun_select(x): return [int(np.random.choice(len(A), 1, p=prob))] # Dual variable y = kwargs.pop('y', None) if y is None: y = A.range.zero() # Adjoint of dual variable z = kwargs.pop('z', None) if z is None and y.norm() == 0: z = A.domain.zero() # Extrapolation extra = kwargs.pop('extra', None) if extra is None: extra = [1 / p for p in prob] # Initialize variables z_relax = z.copy() dz = A.domain.element() y_old = A.range.element() # Save proximal operators prox_dual = [fi.convex_conj.proximal for fi in f] prox_primal = g.proximal # run the iterations for k in range(niter): # select block selected = fun_select(k) # update extrapolation parameter theta theta = float(1 / np.sqrt(1 + 2 * sigma_tilde)) # update primal variable # tmp = x - tau * z_relax; z_relax used as tmp variable z_relax.lincomb(1, x, -tau, z_relax) # x = prox(tmp) prox_primal(tau)(z_relax, out=x) # update dual variable and z, z_relax z_relax.assign(z) for i in selected: # compute the step sizes sigma_i based on sigma_tilde sigma_i = sigma_tilde / ( mu[i] * (prob[i] - 2 * (1 - prob[i]) * sigma_tilde)) # save old yi y_old[i].assign(y[i]) # tmp = Ai(x) A[i](x, out=y[i]) # tmp = y_old + sigma_i * Ai(x) y[i].lincomb(1, y_old[i], sigma_i, y[i]) # yi++ = fi*.prox_sigmai(yi) prox_dual[i](sigma_i)(y[i], out=y[i]) # update adjoint of dual variable y_old[i].lincomb(-1, y_old[i], 1, y[i]) A[i].adjoint(y_old[i], out=dz) z += dz # compute extrapolation z_relax.lincomb(1, z_relax, 1 + theta * extra[i], dz) # update the step sizes tau and sigma_tilde for acceleration sigma_tilde *= theta tau /= theta if callback is not None: callback([x, y])
python
def da_spdhg(x, f, g, A, tau, sigma_tilde, niter, mu, **kwargs): r"""Computes a saddle point with a PDHG and dual acceleration. It therefore requires the functionals f*_i to be mu[i] strongly convex. Parameters ---------- x : primal variable This variable is both input and output of the method. f : functions Functionals Y[i] -> IR_infty that all have a convex conjugate with a proximal operator, i.e. f[i].convex_conj.proximal(sigma[i]) : Y[i] -> Y[i]. g : function Functional X -> IR_infty that has a proximal operator, i.e. g.proximal(tau) : X -> X. A : functions Operators A[i] : X -> Y[i] that possess adjoints: A[i].adjoint tau : scalar Initial step size for primal variable. sigma_tilde : scalar Related to initial step size for dual variable. niter : int Number of iterations mu: list List of strong convexity constants of f*, i.e. mu[i] is the strong convexity constant of f*[i]. Other Parameters ---------------- y: dual variable Dual variable is part of a product space z: variable Adjoint of dual variable, z = A^* y. prob: list List of probabilities that an index i is selected each iteration. By default this is uniform serial sampling, p_i = 1/n. fun_select : function Function that selects blocks at every iteration IN -> {1,...,n}. By default this is serial sampling, fun_select(k) selects an index i \in {1,...,n} with probability p_i. extra: list List of local extrapolation paramters for every index i. By default extra_i = 1 / p_i. callback : callable, optional Function called with the current iterate after each iteration. References ---------- [CERS2017] A. Chambolle, M. J. Ehrhardt, P. Richtarik and C.-B. Schoenlieb, *Stochastic Primal-Dual Hybrid Gradient Algorithm with Arbitrary Sampling and Imaging Applications*. ArXiv: http://arxiv.org/abs/1706.04957 (2017). """ # Callback object callback = kwargs.pop('callback', None) if callback is not None and not callable(callback): raise TypeError('`callback` {} is not callable' ''.format(callback)) # Probabilities prob = kwargs.pop('prob', None) if prob is None: prob = [1 / len(A)] * len(A) # Selection function fun_select = kwargs.pop('fun_select', None) if fun_select is None: def fun_select(x): return [int(np.random.choice(len(A), 1, p=prob))] # Dual variable y = kwargs.pop('y', None) if y is None: y = A.range.zero() # Adjoint of dual variable z = kwargs.pop('z', None) if z is None and y.norm() == 0: z = A.domain.zero() # Extrapolation extra = kwargs.pop('extra', None) if extra is None: extra = [1 / p for p in prob] # Initialize variables z_relax = z.copy() dz = A.domain.element() y_old = A.range.element() # Save proximal operators prox_dual = [fi.convex_conj.proximal for fi in f] prox_primal = g.proximal # run the iterations for k in range(niter): # select block selected = fun_select(k) # update extrapolation parameter theta theta = float(1 / np.sqrt(1 + 2 * sigma_tilde)) # update primal variable # tmp = x - tau * z_relax; z_relax used as tmp variable z_relax.lincomb(1, x, -tau, z_relax) # x = prox(tmp) prox_primal(tau)(z_relax, out=x) # update dual variable and z, z_relax z_relax.assign(z) for i in selected: # compute the step sizes sigma_i based on sigma_tilde sigma_i = sigma_tilde / ( mu[i] * (prob[i] - 2 * (1 - prob[i]) * sigma_tilde)) # save old yi y_old[i].assign(y[i]) # tmp = Ai(x) A[i](x, out=y[i]) # tmp = y_old + sigma_i * Ai(x) y[i].lincomb(1, y_old[i], sigma_i, y[i]) # yi++ = fi*.prox_sigmai(yi) prox_dual[i](sigma_i)(y[i], out=y[i]) # update adjoint of dual variable y_old[i].lincomb(-1, y_old[i], 1, y[i]) A[i].adjoint(y_old[i], out=dz) z += dz # compute extrapolation z_relax.lincomb(1, z_relax, 1 + theta * extra[i], dz) # update the step sizes tau and sigma_tilde for acceleration sigma_tilde *= theta tau /= theta if callback is not None: callback([x, y])
[ "def", "da_spdhg", "(", "x", ",", "f", ",", "g", ",", "A", ",", "tau", ",", "sigma_tilde", ",", "niter", ",", "mu", ",", "*", "*", "kwargs", ")", ":", "# Callback object", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "None", ")", ...
r"""Computes a saddle point with a PDHG and dual acceleration. It therefore requires the functionals f*_i to be mu[i] strongly convex. Parameters ---------- x : primal variable This variable is both input and output of the method. f : functions Functionals Y[i] -> IR_infty that all have a convex conjugate with a proximal operator, i.e. f[i].convex_conj.proximal(sigma[i]) : Y[i] -> Y[i]. g : function Functional X -> IR_infty that has a proximal operator, i.e. g.proximal(tau) : X -> X. A : functions Operators A[i] : X -> Y[i] that possess adjoints: A[i].adjoint tau : scalar Initial step size for primal variable. sigma_tilde : scalar Related to initial step size for dual variable. niter : int Number of iterations mu: list List of strong convexity constants of f*, i.e. mu[i] is the strong convexity constant of f*[i]. Other Parameters ---------------- y: dual variable Dual variable is part of a product space z: variable Adjoint of dual variable, z = A^* y. prob: list List of probabilities that an index i is selected each iteration. By default this is uniform serial sampling, p_i = 1/n. fun_select : function Function that selects blocks at every iteration IN -> {1,...,n}. By default this is serial sampling, fun_select(k) selects an index i \in {1,...,n} with probability p_i. extra: list List of local extrapolation paramters for every index i. By default extra_i = 1 / p_i. callback : callable, optional Function called with the current iterate after each iteration. References ---------- [CERS2017] A. Chambolle, M. J. Ehrhardt, P. Richtarik and C.-B. Schoenlieb, *Stochastic Primal-Dual Hybrid Gradient Algorithm with Arbitrary Sampling and Imaging Applications*. ArXiv: http://arxiv.org/abs/1706.04957 (2017).
[ "r", "Computes", "a", "saddle", "point", "with", "a", "PDHG", "and", "dual", "acceleration", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/solvers/spdhg/stochastic_primal_dual_hybrid_gradient.py#L410-L552
231,863
odlgroup/odl
odl/set/space.py
LinearSpace.dist
def dist(self, x1, x2): """Return the distance between ``x1`` and ``x2``. Parameters ---------- x1, x2 : `LinearSpaceElement` Elements whose distance to compute. Returns ------- dist : float Distance between ``x1`` and ``x2``. """ if x1 not in self: raise LinearSpaceTypeError('`x1` {!r} is not an element of ' '{!r}'.format(x1, self)) if x2 not in self: raise LinearSpaceTypeError('`x2` {!r} is not an element of ' '{!r}'.format(x2, self)) return float(self._dist(x1, x2))
python
def dist(self, x1, x2): if x1 not in self: raise LinearSpaceTypeError('`x1` {!r} is not an element of ' '{!r}'.format(x1, self)) if x2 not in self: raise LinearSpaceTypeError('`x2` {!r} is not an element of ' '{!r}'.format(x2, self)) return float(self._dist(x1, x2))
[ "def", "dist", "(", "self", ",", "x1", ",", "x2", ")", ":", "if", "x1", "not", "in", "self", ":", "raise", "LinearSpaceTypeError", "(", "'`x1` {!r} is not an element of '", "'{!r}'", ".", "format", "(", "x1", ",", "self", ")", ")", "if", "x2", "not", "...
Return the distance between ``x1`` and ``x2``. Parameters ---------- x1, x2 : `LinearSpaceElement` Elements whose distance to compute. Returns ------- dist : float Distance between ``x1`` and ``x2``.
[ "Return", "the", "distance", "between", "x1", "and", "x2", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/set/space.py#L228-L247
231,864
odlgroup/odl
odl/set/space.py
LinearSpace.inner
def inner(self, x1, x2): """Return the inner product of ``x1`` and ``x2``. Parameters ---------- x1, x2 : `LinearSpaceElement` Elements whose inner product to compute. Returns ------- inner : `LinearSpace.field` element Inner product of ``x1`` and ``x2``. """ if x1 not in self: raise LinearSpaceTypeError('`x1` {!r} is not an element of ' '{!r}'.format(x1, self)) if x2 not in self: raise LinearSpaceTypeError('`x2` {!r} is not an element of ' '{!r}'.format(x2, self)) inner = self._inner(x1, x2) if self.field is None: return inner else: return self.field.element(self._inner(x1, x2))
python
def inner(self, x1, x2): if x1 not in self: raise LinearSpaceTypeError('`x1` {!r} is not an element of ' '{!r}'.format(x1, self)) if x2 not in self: raise LinearSpaceTypeError('`x2` {!r} is not an element of ' '{!r}'.format(x2, self)) inner = self._inner(x1, x2) if self.field is None: return inner else: return self.field.element(self._inner(x1, x2))
[ "def", "inner", "(", "self", ",", "x1", ",", "x2", ")", ":", "if", "x1", "not", "in", "self", ":", "raise", "LinearSpaceTypeError", "(", "'`x1` {!r} is not an element of '", "'{!r}'", ".", "format", "(", "x1", ",", "self", ")", ")", "if", "x2", "not", ...
Return the inner product of ``x1`` and ``x2``. Parameters ---------- x1, x2 : `LinearSpaceElement` Elements whose inner product to compute. Returns ------- inner : `LinearSpace.field` element Inner product of ``x1`` and ``x2``.
[ "Return", "the", "inner", "product", "of", "x1", "and", "x2", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/set/space.py#L267-L290
231,865
odlgroup/odl
odl/set/space.py
LinearSpace.multiply
def multiply(self, x1, x2, out=None): """Return the pointwise product of ``x1`` and ``x2``. Parameters ---------- x1, x2 : `LinearSpaceElement` Multiplicands in the product. out : `LinearSpaceElement`, optional Element to which the result is written. Returns ------- out : `LinearSpaceElement` Product of the elements. If ``out`` was provided, the returned object is a reference to it. """ if out is None: out = self.element() if out not in self: raise LinearSpaceTypeError('`out` {!r} is not an element of ' '{!r}'.format(out, self)) if x1 not in self: raise LinearSpaceTypeError('`x1` {!r} is not an element of ' '{!r}'.format(x1, self)) if x2 not in self: raise LinearSpaceTypeError('`x2` {!r} is not an element of ' '{!r}'.format(x2, self)) self._multiply(x1, x2, out) return out
python
def multiply(self, x1, x2, out=None): if out is None: out = self.element() if out not in self: raise LinearSpaceTypeError('`out` {!r} is not an element of ' '{!r}'.format(out, self)) if x1 not in self: raise LinearSpaceTypeError('`x1` {!r} is not an element of ' '{!r}'.format(x1, self)) if x2 not in self: raise LinearSpaceTypeError('`x2` {!r} is not an element of ' '{!r}'.format(x2, self)) self._multiply(x1, x2, out) return out
[ "def", "multiply", "(", "self", ",", "x1", ",", "x2", ",", "out", "=", "None", ")", ":", "if", "out", "is", "None", ":", "out", "=", "self", ".", "element", "(", ")", "if", "out", "not", "in", "self", ":", "raise", "LinearSpaceTypeError", "(", "'...
Return the pointwise product of ``x1`` and ``x2``. Parameters ---------- x1, x2 : `LinearSpaceElement` Multiplicands in the product. out : `LinearSpaceElement`, optional Element to which the result is written. Returns ------- out : `LinearSpaceElement` Product of the elements. If ``out`` was provided, the returned object is a reference to it.
[ "Return", "the", "pointwise", "product", "of", "x1", "and", "x2", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/set/space.py#L292-L321
231,866
odlgroup/odl
odl/set/space.py
LinearSpace.divide
def divide(self, x1, x2, out=None): """Return the pointwise quotient of ``x1`` and ``x2`` Parameters ---------- x1 : `LinearSpaceElement` Dividend in the quotient. x2 : `LinearSpaceElement` Divisor in the quotient. out : `LinearSpaceElement`, optional Element to which the result is written. Returns ------- out : `LinearSpaceElement` Quotient of the elements. If ``out`` was provided, the returned object is a reference to it. """ if out is None: out = self.element() if out not in self: raise LinearSpaceTypeError('`out` {!r} is not an element of ' '{!r}'.format(out, self)) if x1 not in self: raise LinearSpaceTypeError('`x1` {!r} is not an element of ' '{!r}'.format(x1, self)) if x2 not in self: raise LinearSpaceTypeError('`x2` {!r} is not an element of ' '{!r}'.format(x2, self)) self._divide(x1, x2, out) return out
python
def divide(self, x1, x2, out=None): if out is None: out = self.element() if out not in self: raise LinearSpaceTypeError('`out` {!r} is not an element of ' '{!r}'.format(out, self)) if x1 not in self: raise LinearSpaceTypeError('`x1` {!r} is not an element of ' '{!r}'.format(x1, self)) if x2 not in self: raise LinearSpaceTypeError('`x2` {!r} is not an element of ' '{!r}'.format(x2, self)) self._divide(x1, x2, out) return out
[ "def", "divide", "(", "self", ",", "x1", ",", "x2", ",", "out", "=", "None", ")", ":", "if", "out", "is", "None", ":", "out", "=", "self", ".", "element", "(", ")", "if", "out", "not", "in", "self", ":", "raise", "LinearSpaceTypeError", "(", "'`o...
Return the pointwise quotient of ``x1`` and ``x2`` Parameters ---------- x1 : `LinearSpaceElement` Dividend in the quotient. x2 : `LinearSpaceElement` Divisor in the quotient. out : `LinearSpaceElement`, optional Element to which the result is written. Returns ------- out : `LinearSpaceElement` Quotient of the elements. If ``out`` was provided, the returned object is a reference to it.
[ "Return", "the", "pointwise", "quotient", "of", "x1", "and", "x2" ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/set/space.py#L323-L354
231,867
odlgroup/odl
odl/trafos/backends/pywt_bindings.py
pywt_wavelet
def pywt_wavelet(wavelet): """Convert ``wavelet`` to a `pywt.Wavelet` instance.""" if isinstance(wavelet, pywt.Wavelet): return wavelet else: return pywt.Wavelet(wavelet)
python
def pywt_wavelet(wavelet): if isinstance(wavelet, pywt.Wavelet): return wavelet else: return pywt.Wavelet(wavelet)
[ "def", "pywt_wavelet", "(", "wavelet", ")", ":", "if", "isinstance", "(", "wavelet", ",", "pywt", ".", "Wavelet", ")", ":", "return", "wavelet", "else", ":", "return", "pywt", ".", "Wavelet", "(", "wavelet", ")" ]
Convert ``wavelet`` to a `pywt.Wavelet` instance.
[ "Convert", "wavelet", "to", "a", "pywt", ".", "Wavelet", "instance", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/trafos/backends/pywt_bindings.py#L46-L51
231,868
odlgroup/odl
odl/trafos/backends/pywt_bindings.py
pywt_pad_mode
def pywt_pad_mode(pad_mode, pad_const=0): """Convert ODL-style padding mode to pywt-style padding mode. Parameters ---------- pad_mode : str The ODL padding mode to use at the boundaries. pad_const : float, optional Value to use outside the signal boundaries when ``pad_mode`` is 'constant'. Only a value of 0. is supported by PyWavelets Returns ------- pad_mode_pywt : str The corresponding name of the requested padding mode in PyWavelets. See `signal extension modes`_. References ---------- .. _signal extension modes: https://pywavelets.readthedocs.io/en/latest/ref/signal-extension-modes.html """ pad_mode = str(pad_mode).lower() if pad_mode == 'constant' and pad_const != 0.0: raise ValueError('constant padding with constant != 0 not supported ' 'for `pywt` back-end') try: return PAD_MODES_ODL2PYWT[pad_mode] except KeyError: raise ValueError("`pad_mode` '{}' not understood".format(pad_mode))
python
def pywt_pad_mode(pad_mode, pad_const=0): pad_mode = str(pad_mode).lower() if pad_mode == 'constant' and pad_const != 0.0: raise ValueError('constant padding with constant != 0 not supported ' 'for `pywt` back-end') try: return PAD_MODES_ODL2PYWT[pad_mode] except KeyError: raise ValueError("`pad_mode` '{}' not understood".format(pad_mode))
[ "def", "pywt_pad_mode", "(", "pad_mode", ",", "pad_const", "=", "0", ")", ":", "pad_mode", "=", "str", "(", "pad_mode", ")", ".", "lower", "(", ")", "if", "pad_mode", "==", "'constant'", "and", "pad_const", "!=", "0.0", ":", "raise", "ValueError", "(", ...
Convert ODL-style padding mode to pywt-style padding mode. Parameters ---------- pad_mode : str The ODL padding mode to use at the boundaries. pad_const : float, optional Value to use outside the signal boundaries when ``pad_mode`` is 'constant'. Only a value of 0. is supported by PyWavelets Returns ------- pad_mode_pywt : str The corresponding name of the requested padding mode in PyWavelets. See `signal extension modes`_. References ---------- .. _signal extension modes: https://pywavelets.readthedocs.io/en/latest/ref/signal-extension-modes.html
[ "Convert", "ODL", "-", "style", "padding", "mode", "to", "pywt", "-", "style", "padding", "mode", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/trafos/backends/pywt_bindings.py#L54-L83
231,869
odlgroup/odl
odl/trafos/backends/pywt_bindings.py
precompute_raveled_slices
def precompute_raveled_slices(coeff_shapes, axes=None): """Return slices and shapes for raveled multilevel wavelet coefficients. The output is equivalent to the ``coeff_slices`` output of `pywt.ravel_coeffs`, but this function does not require computing a wavelet transform first. Parameters ---------- coeff_shapes : array-like A list of multilevel wavelet coefficient shapes as returned by `pywt.wavedecn_shapes`. axes : sequence of ints, optional Axes over which the DWT that created ``coeffs`` was performed. The default value of None corresponds to all axes. Returns ------- coeff_slices : list List of slices corresponding to each coefficient. As a 2D example, ``coeff_arr[coeff_slices[1]['dd']]`` would extract the first level detail coefficients from ``coeff_arr``. Examples -------- >>> import pywt >>> data_shape = (64, 64) >>> coeff_shapes = pywt.wavedecn_shapes(data_shape, wavelet='db2', level=3, ... mode='periodization') >>> coeff_slices = precompute_raveled_slices(coeff_shapes) >>> print(coeff_slices[0]) # approximation coefficients slice(None, 64, None) >>> d1_coeffs = coeff_slices[-1] # first level detail coefficients >>> (d1_coeffs['ad'], d1_coeffs['da'], d1_coeffs['dd']) (slice(1024, 2048, None), slice(2048, 3072, None), slice(3072, 4096, None)) """ # initialize with the approximation coefficients. a_shape = coeff_shapes[0] a_size = np.prod(a_shape) if len(coeff_shapes) == 1: # only a single approximation coefficient array was found return [slice(a_size), ] a_slice = slice(a_size) # initialize list of coefficient slices coeff_slices = [] coeff_slices.append(a_slice) # loop over the detail cofficients, embedding them in coeff_arr details_list = coeff_shapes[1:] offset = a_size for shape_dict in details_list: # new dictionaries for detail coefficient slices and shapes coeff_slices.append({}) keys = sorted(shape_dict.keys()) for key in keys: shape = shape_dict[key] size = np.prod(shape) sl = slice(offset, offset + size) offset += size coeff_slices[-1][key] = sl return coeff_slices
python
def precompute_raveled_slices(coeff_shapes, axes=None): # initialize with the approximation coefficients. a_shape = coeff_shapes[0] a_size = np.prod(a_shape) if len(coeff_shapes) == 1: # only a single approximation coefficient array was found return [slice(a_size), ] a_slice = slice(a_size) # initialize list of coefficient slices coeff_slices = [] coeff_slices.append(a_slice) # loop over the detail cofficients, embedding them in coeff_arr details_list = coeff_shapes[1:] offset = a_size for shape_dict in details_list: # new dictionaries for detail coefficient slices and shapes coeff_slices.append({}) keys = sorted(shape_dict.keys()) for key in keys: shape = shape_dict[key] size = np.prod(shape) sl = slice(offset, offset + size) offset += size coeff_slices[-1][key] = sl return coeff_slices
[ "def", "precompute_raveled_slices", "(", "coeff_shapes", ",", "axes", "=", "None", ")", ":", "# initialize with the approximation coefficients.", "a_shape", "=", "coeff_shapes", "[", "0", "]", "a_size", "=", "np", ".", "prod", "(", "a_shape", ")", "if", "len", "...
Return slices and shapes for raveled multilevel wavelet coefficients. The output is equivalent to the ``coeff_slices`` output of `pywt.ravel_coeffs`, but this function does not require computing a wavelet transform first. Parameters ---------- coeff_shapes : array-like A list of multilevel wavelet coefficient shapes as returned by `pywt.wavedecn_shapes`. axes : sequence of ints, optional Axes over which the DWT that created ``coeffs`` was performed. The default value of None corresponds to all axes. Returns ------- coeff_slices : list List of slices corresponding to each coefficient. As a 2D example, ``coeff_arr[coeff_slices[1]['dd']]`` would extract the first level detail coefficients from ``coeff_arr``. Examples -------- >>> import pywt >>> data_shape = (64, 64) >>> coeff_shapes = pywt.wavedecn_shapes(data_shape, wavelet='db2', level=3, ... mode='periodization') >>> coeff_slices = precompute_raveled_slices(coeff_shapes) >>> print(coeff_slices[0]) # approximation coefficients slice(None, 64, None) >>> d1_coeffs = coeff_slices[-1] # first level detail coefficients >>> (d1_coeffs['ad'], d1_coeffs['da'], d1_coeffs['dd']) (slice(1024, 2048, None), slice(2048, 3072, None), slice(3072, 4096, None))
[ "Return", "slices", "and", "shapes", "for", "raveled", "multilevel", "wavelet", "coefficients", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/trafos/backends/pywt_bindings.py#L86-L149
231,870
odlgroup/odl
odl/solvers/nonsmooth/proximal_operators.py
combine_proximals
def combine_proximals(*factory_list): r"""Combine proximal operators into a diagonal product space operator. This assumes the functional to be separable across variables in order to make use of the separable sum property of proximal operators. Parameters ---------- factory_list : sequence of callables Proximal operator factories to be combined. Returns ------- diag_op : function Returns a diagonal product space operator factory to be initialized with the same step size parameter Notes ----- That two functionals :math:`F` and :math:`G` are separable across variables means that :math:`F((x, y)) = F(x)` and :math:`G((x, y)) = G(y)`, and in this case the proximal operator of the sum is given by .. math:: \mathrm{prox}_{\sigma (F(x) + G(y))}(x, y) = (\mathrm{prox}_{\sigma F}(x), \mathrm{prox}_{\sigma G}(y)). """ def diag_op_factory(sigma): """Diagonal matrix of operators. Parameters ---------- sigma : positive float or sequence of positive floats Step size parameter(s), if a sequence, the length must match the length of the ``factory_list``. Returns ------- diag_op : `DiagonalOperator` """ if np.isscalar(sigma): sigma = [sigma] * len(factory_list) return DiagonalOperator( *[factory(sigmai) for sigmai, factory in zip(sigma, factory_list)]) return diag_op_factory
python
def combine_proximals(*factory_list): r"""Combine proximal operators into a diagonal product space operator. This assumes the functional to be separable across variables in order to make use of the separable sum property of proximal operators. Parameters ---------- factory_list : sequence of callables Proximal operator factories to be combined. Returns ------- diag_op : function Returns a diagonal product space operator factory to be initialized with the same step size parameter Notes ----- That two functionals :math:`F` and :math:`G` are separable across variables means that :math:`F((x, y)) = F(x)` and :math:`G((x, y)) = G(y)`, and in this case the proximal operator of the sum is given by .. math:: \mathrm{prox}_{\sigma (F(x) + G(y))}(x, y) = (\mathrm{prox}_{\sigma F}(x), \mathrm{prox}_{\sigma G}(y)). """ def diag_op_factory(sigma): """Diagonal matrix of operators. Parameters ---------- sigma : positive float or sequence of positive floats Step size parameter(s), if a sequence, the length must match the length of the ``factory_list``. Returns ------- diag_op : `DiagonalOperator` """ if np.isscalar(sigma): sigma = [sigma] * len(factory_list) return DiagonalOperator( *[factory(sigmai) for sigmai, factory in zip(sigma, factory_list)]) return diag_op_factory
[ "def", "combine_proximals", "(", "*", "factory_list", ")", ":", "def", "diag_op_factory", "(", "sigma", ")", ":", "\"\"\"Diagonal matrix of operators.\n\n Parameters\n ----------\n sigma : positive float or sequence of positive floats\n Step size parameter(s...
r"""Combine proximal operators into a diagonal product space operator. This assumes the functional to be separable across variables in order to make use of the separable sum property of proximal operators. Parameters ---------- factory_list : sequence of callables Proximal operator factories to be combined. Returns ------- diag_op : function Returns a diagonal product space operator factory to be initialized with the same step size parameter Notes ----- That two functionals :math:`F` and :math:`G` are separable across variables means that :math:`F((x, y)) = F(x)` and :math:`G((x, y)) = G(y)`, and in this case the proximal operator of the sum is given by .. math:: \mathrm{prox}_{\sigma (F(x) + G(y))}(x, y) = (\mathrm{prox}_{\sigma F}(x), \mathrm{prox}_{\sigma G}(y)).
[ "r", "Combine", "proximal", "operators", "into", "a", "diagonal", "product", "space", "operator", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/nonsmooth/proximal_operators.py#L48-L95
231,871
odlgroup/odl
odl/solvers/nonsmooth/proximal_operators.py
proximal_convex_conj
def proximal_convex_conj(prox_factory): r"""Calculate the proximal of the dual using Moreau decomposition. Parameters ---------- prox_factory : callable A factory function that, when called with a step size, returns the proximal operator of ``F`` Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- The Moreau identity states that for any convex function :math:`F` with convex conjugate :math:`F^*`, the proximals satisfy .. math:: \mathrm{prox}_{\sigma F^*}(x) +\sigma \, \mathrm{prox}_{F / \sigma}(x / \sigma) = x where :math:`\sigma` is a scalar step size. Using this, the proximal of the convex conjugate is given by .. math:: \mathrm{prox}_{\sigma F^*}(x) = x - \sigma \, \mathrm{prox}_{F / \sigma}(x / \sigma) Note that since :math:`(F^*)^* = F`, this can be used to get the proximal of the original function from the proximal of the convex conjugate. For reference on the Moreau identity, see [CP2011c]. References ---------- [CP2011c] Combettes, P L, and Pesquet, J-C. *Proximal splitting methods in signal processing.* In: Bauschke, H H, Burachik, R S, Combettes, P L, Elser, V, Luke, D R, and Wolkowicz, H. Fixed-point algorithms for inverse problems in science and engineering, Springer, 2011. """ def convex_conj_prox_factory(sigma): """Create proximal for the dual with a given sigma. Parameters ---------- sigma : positive float or array-like Step size parameter. Can be a pointwise positive space element or a sequence of positive floats if `prox_factory` supports that. Returns ------- proximal : `Operator` The proximal operator of ``s * F^*`` where ``s`` is the step size """ # Get the underlying space. At the same time, check if the given # prox_factory accepts stepsize objects of the type given by sigma. space = prox_factory(sigma).domain mult_inner = MultiplyOperator(1.0 / sigma, domain=space, range=space) mult_outer = MultiplyOperator(sigma, domain=space, range=space) result = (IdentityOperator(space) - mult_outer * prox_factory(1.0 / sigma) * mult_inner) return result return convex_conj_prox_factory
python
def proximal_convex_conj(prox_factory): r"""Calculate the proximal of the dual using Moreau decomposition. Parameters ---------- prox_factory : callable A factory function that, when called with a step size, returns the proximal operator of ``F`` Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- The Moreau identity states that for any convex function :math:`F` with convex conjugate :math:`F^*`, the proximals satisfy .. math:: \mathrm{prox}_{\sigma F^*}(x) +\sigma \, \mathrm{prox}_{F / \sigma}(x / \sigma) = x where :math:`\sigma` is a scalar step size. Using this, the proximal of the convex conjugate is given by .. math:: \mathrm{prox}_{\sigma F^*}(x) = x - \sigma \, \mathrm{prox}_{F / \sigma}(x / \sigma) Note that since :math:`(F^*)^* = F`, this can be used to get the proximal of the original function from the proximal of the convex conjugate. For reference on the Moreau identity, see [CP2011c]. References ---------- [CP2011c] Combettes, P L, and Pesquet, J-C. *Proximal splitting methods in signal processing.* In: Bauschke, H H, Burachik, R S, Combettes, P L, Elser, V, Luke, D R, and Wolkowicz, H. Fixed-point algorithms for inverse problems in science and engineering, Springer, 2011. """ def convex_conj_prox_factory(sigma): """Create proximal for the dual with a given sigma. Parameters ---------- sigma : positive float or array-like Step size parameter. Can be a pointwise positive space element or a sequence of positive floats if `prox_factory` supports that. Returns ------- proximal : `Operator` The proximal operator of ``s * F^*`` where ``s`` is the step size """ # Get the underlying space. At the same time, check if the given # prox_factory accepts stepsize objects of the type given by sigma. space = prox_factory(sigma).domain mult_inner = MultiplyOperator(1.0 / sigma, domain=space, range=space) mult_outer = MultiplyOperator(sigma, domain=space, range=space) result = (IdentityOperator(space) - mult_outer * prox_factory(1.0 / sigma) * mult_inner) return result return convex_conj_prox_factory
[ "def", "proximal_convex_conj", "(", "prox_factory", ")", ":", "def", "convex_conj_prox_factory", "(", "sigma", ")", ":", "\"\"\"Create proximal for the dual with a given sigma.\n\n Parameters\n ----------\n sigma : positive float or array-like\n Step size para...
r"""Calculate the proximal of the dual using Moreau decomposition. Parameters ---------- prox_factory : callable A factory function that, when called with a step size, returns the proximal operator of ``F`` Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- The Moreau identity states that for any convex function :math:`F` with convex conjugate :math:`F^*`, the proximals satisfy .. math:: \mathrm{prox}_{\sigma F^*}(x) +\sigma \, \mathrm{prox}_{F / \sigma}(x / \sigma) = x where :math:`\sigma` is a scalar step size. Using this, the proximal of the convex conjugate is given by .. math:: \mathrm{prox}_{\sigma F^*}(x) = x - \sigma \, \mathrm{prox}_{F / \sigma}(x / \sigma) Note that since :math:`(F^*)^* = F`, this can be used to get the proximal of the original function from the proximal of the convex conjugate. For reference on the Moreau identity, see [CP2011c]. References ---------- [CP2011c] Combettes, P L, and Pesquet, J-C. *Proximal splitting methods in signal processing.* In: Bauschke, H H, Burachik, R S, Combettes, P L, Elser, V, Luke, D R, and Wolkowicz, H. Fixed-point algorithms for inverse problems in science and engineering, Springer, 2011.
[ "r", "Calculate", "the", "proximal", "of", "the", "dual", "using", "Moreau", "decomposition", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/nonsmooth/proximal_operators.py#L98-L166
231,872
odlgroup/odl
odl/solvers/nonsmooth/proximal_operators.py
proximal_composition
def proximal_composition(proximal, operator, mu): r"""Proximal operator factory of functional composed with unitary operator. For a functional ``F`` and a linear unitary `Operator` ``L`` this is the factory for the proximal operator of ``F * L``. Parameters ---------- proximal : callable A factory function that, when called with a step size returns the proximal operator of ``F`` operator : `Operator` The operator to compose the functional with mu : ``operator.field`` element Scalar such that ``(operator.adjoint * operator)(x) = mu * x`` Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- Given a linear operator :math:`L` with the property that for a scalar :math:`\mu` .. math:: L^*(L(x)) = \mu * x and a convex function :math:`F`, the following identity holds .. math:: \mathrm{prox}_{\sigma F \circ L}(x) = x + \frac{1}{\mu} L^* \left( \mathrm{prox}_{\mu \sigma F}(Lx) - Lx \right) This factory function implements this functionality. There is no simple formula for more general operators. The function cannot verify that the operator is unitary, the user needs to verify this. For reference on the identity used, see [CP2011c]. References ---------- [CP2011c] Combettes, P L, and Pesquet, J-C. *Proximal splitting methods in signal processing.* In: Bauschke, H H, Burachik, R S, Combettes, P L, Elser, V, Luke, D R, and Wolkowicz, H. Fixed-point algorithms for inverse problems in science and engineering, Springer, 2011. """ def proximal_composition_factory(sigma): """Create proximal for the dual with a given sigma Parameters ---------- sigma : positive float Step size parameter Returns ------- proximal : `Operator` The proximal operator of ``prox[sigma * F * L](x)`` """ Id = IdentityOperator(operator.domain) Ir = IdentityOperator(operator.range) prox_muf = proximal(mu * sigma) return (Id + (1.0 / mu) * operator.adjoint * ((prox_muf - Ir) * operator)) return proximal_composition_factory
python
def proximal_composition(proximal, operator, mu): r"""Proximal operator factory of functional composed with unitary operator. For a functional ``F`` and a linear unitary `Operator` ``L`` this is the factory for the proximal operator of ``F * L``. Parameters ---------- proximal : callable A factory function that, when called with a step size returns the proximal operator of ``F`` operator : `Operator` The operator to compose the functional with mu : ``operator.field`` element Scalar such that ``(operator.adjoint * operator)(x) = mu * x`` Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- Given a linear operator :math:`L` with the property that for a scalar :math:`\mu` .. math:: L^*(L(x)) = \mu * x and a convex function :math:`F`, the following identity holds .. math:: \mathrm{prox}_{\sigma F \circ L}(x) = x + \frac{1}{\mu} L^* \left( \mathrm{prox}_{\mu \sigma F}(Lx) - Lx \right) This factory function implements this functionality. There is no simple formula for more general operators. The function cannot verify that the operator is unitary, the user needs to verify this. For reference on the identity used, see [CP2011c]. References ---------- [CP2011c] Combettes, P L, and Pesquet, J-C. *Proximal splitting methods in signal processing.* In: Bauschke, H H, Burachik, R S, Combettes, P L, Elser, V, Luke, D R, and Wolkowicz, H. Fixed-point algorithms for inverse problems in science and engineering, Springer, 2011. """ def proximal_composition_factory(sigma): """Create proximal for the dual with a given sigma Parameters ---------- sigma : positive float Step size parameter Returns ------- proximal : `Operator` The proximal operator of ``prox[sigma * F * L](x)`` """ Id = IdentityOperator(operator.domain) Ir = IdentityOperator(operator.range) prox_muf = proximal(mu * sigma) return (Id + (1.0 / mu) * operator.adjoint * ((prox_muf - Ir) * operator)) return proximal_composition_factory
[ "def", "proximal_composition", "(", "proximal", ",", "operator", ",", "mu", ")", ":", "def", "proximal_composition_factory", "(", "sigma", ")", ":", "\"\"\"Create proximal for the dual with a given sigma\n\n Parameters\n ----------\n sigma : positive float\n ...
r"""Proximal operator factory of functional composed with unitary operator. For a functional ``F`` and a linear unitary `Operator` ``L`` this is the factory for the proximal operator of ``F * L``. Parameters ---------- proximal : callable A factory function that, when called with a step size returns the proximal operator of ``F`` operator : `Operator` The operator to compose the functional with mu : ``operator.field`` element Scalar such that ``(operator.adjoint * operator)(x) = mu * x`` Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- Given a linear operator :math:`L` with the property that for a scalar :math:`\mu` .. math:: L^*(L(x)) = \mu * x and a convex function :math:`F`, the following identity holds .. math:: \mathrm{prox}_{\sigma F \circ L}(x) = x + \frac{1}{\mu} L^* \left( \mathrm{prox}_{\mu \sigma F}(Lx) - Lx \right) This factory function implements this functionality. There is no simple formula for more general operators. The function cannot verify that the operator is unitary, the user needs to verify this. For reference on the identity used, see [CP2011c]. References ---------- [CP2011c] Combettes, P L, and Pesquet, J-C. *Proximal splitting methods in signal processing.* In: Bauschke, H H, Burachik, R S, Combettes, P L, Elser, V, Luke, D R, and Wolkowicz, H. Fixed-point algorithms for inverse problems in science and engineering, Springer, 2011.
[ "r", "Proximal", "operator", "factory", "of", "functional", "composed", "with", "unitary", "operator", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/nonsmooth/proximal_operators.py#L403-L474
231,873
odlgroup/odl
odl/solvers/nonsmooth/proximal_operators.py
proximal_convex_conj_l2_squared
def proximal_convex_conj_l2_squared(space, lam=1, g=None): r"""Proximal operator factory of the convex conj of the squared l2-dist Function for the proximal operator of the convex conjugate of the functional F where F is the l2-norm (or distance to g, if given):: F(x) = lam ||x - g||_2^2 with x and g elements in ``space``, scaling factor lam, and given data g. Parameters ---------- space : `LinearSpace` Domain of F(x). Needs to be a Hilbert space. That is, have an inner product (`LinearSpace.inner`). lam : positive float, optional Scaling factor or regularization parameter. g : ``space`` element, optional An element in ``space``. Default: ``space.zero``. Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- The squared :math:`L_2`-norm/distance :math:`F` is given by .. math:: F(x) = \lambda \|x - g\|_2^2. The convex conjugate :math:`F^*` of :math:`F` is given by .. math:: F^*(y) = \frac{1}{4\lambda} \left( \| y\|_2^2 + \langle y, g \rangle \right) For a step size :math:`\sigma`, the proximal operator of :math:`\sigma F^*` is given by .. math:: \mathrm{prox}_{\sigma F^*}(y) = \frac{y - \sigma g}{1 + \sigma/(2 \lambda)} See Also -------- proximal_convex_conj_l2 : proximal without square proximal_l2_squared : proximal without convex conjugate """ lam = float(lam) if g is not None and g not in space: raise TypeError('{!r} is not an element of {!r}'.format(g, space)) class ProximalConvexConjL2Squared(Operator): """Proximal operator of the convex conj of the squared l2-norm/dist.""" def __init__(self, sigma): """Initialize a new instance. Parameters ---------- sigma : positive float or pointwise positive space.element Step size parameter. If scalar, it contains a global stepsize, otherwise the space.element defines a stepsize for each point. """ super(ProximalConvexConjL2Squared, self).__init__( domain=space, range=space, linear=g is None) if np.isscalar(sigma): self.sigma = float(sigma) else: self.sigma = space.element(sigma) def _call(self, x, out): """Apply the operator to ``x`` and store the result in ``out``""" # (x - sig*g) / (1 + sig/(2 lam)) sig = self.sigma if np.isscalar(sig): if g is None: out.lincomb(1 / (1 + 0.5 * sig / lam), x) else: out.lincomb(1 / (1 + 0.5 * sig / lam), x, -sig / (1 + 0.5 * sig / lam), g) elif sig in space: if g is None: x.divide(1 + 0.5 / lam * sig, out=out) else: if x is out: # Can't write to `out` since old `x` is still needed tmp = sig.multiply(g) out.lincomb(1, x, -1, tmp) else: sig.multiply(g, out=out) out.lincomb(1, x, -1, out) out.divide(1 + 0.5 / lam * sig, out=out) else: raise RuntimeError( '`sigma` is neither a scalar nor a space element.' ) return ProximalConvexConjL2Squared
python
def proximal_convex_conj_l2_squared(space, lam=1, g=None): r"""Proximal operator factory of the convex conj of the squared l2-dist Function for the proximal operator of the convex conjugate of the functional F where F is the l2-norm (or distance to g, if given):: F(x) = lam ||x - g||_2^2 with x and g elements in ``space``, scaling factor lam, and given data g. Parameters ---------- space : `LinearSpace` Domain of F(x). Needs to be a Hilbert space. That is, have an inner product (`LinearSpace.inner`). lam : positive float, optional Scaling factor or regularization parameter. g : ``space`` element, optional An element in ``space``. Default: ``space.zero``. Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- The squared :math:`L_2`-norm/distance :math:`F` is given by .. math:: F(x) = \lambda \|x - g\|_2^2. The convex conjugate :math:`F^*` of :math:`F` is given by .. math:: F^*(y) = \frac{1}{4\lambda} \left( \| y\|_2^2 + \langle y, g \rangle \right) For a step size :math:`\sigma`, the proximal operator of :math:`\sigma F^*` is given by .. math:: \mathrm{prox}_{\sigma F^*}(y) = \frac{y - \sigma g}{1 + \sigma/(2 \lambda)} See Also -------- proximal_convex_conj_l2 : proximal without square proximal_l2_squared : proximal without convex conjugate """ lam = float(lam) if g is not None and g not in space: raise TypeError('{!r} is not an element of {!r}'.format(g, space)) class ProximalConvexConjL2Squared(Operator): """Proximal operator of the convex conj of the squared l2-norm/dist.""" def __init__(self, sigma): """Initialize a new instance. Parameters ---------- sigma : positive float or pointwise positive space.element Step size parameter. If scalar, it contains a global stepsize, otherwise the space.element defines a stepsize for each point. """ super(ProximalConvexConjL2Squared, self).__init__( domain=space, range=space, linear=g is None) if np.isscalar(sigma): self.sigma = float(sigma) else: self.sigma = space.element(sigma) def _call(self, x, out): """Apply the operator to ``x`` and store the result in ``out``""" # (x - sig*g) / (1 + sig/(2 lam)) sig = self.sigma if np.isscalar(sig): if g is None: out.lincomb(1 / (1 + 0.5 * sig / lam), x) else: out.lincomb(1 / (1 + 0.5 * sig / lam), x, -sig / (1 + 0.5 * sig / lam), g) elif sig in space: if g is None: x.divide(1 + 0.5 / lam * sig, out=out) else: if x is out: # Can't write to `out` since old `x` is still needed tmp = sig.multiply(g) out.lincomb(1, x, -1, tmp) else: sig.multiply(g, out=out) out.lincomb(1, x, -1, out) out.divide(1 + 0.5 / lam * sig, out=out) else: raise RuntimeError( '`sigma` is neither a scalar nor a space element.' ) return ProximalConvexConjL2Squared
[ "def", "proximal_convex_conj_l2_squared", "(", "space", ",", "lam", "=", "1", ",", "g", "=", "None", ")", ":", "lam", "=", "float", "(", "lam", ")", "if", "g", "is", "not", "None", "and", "g", "not", "in", "space", ":", "raise", "TypeError", "(", "...
r"""Proximal operator factory of the convex conj of the squared l2-dist Function for the proximal operator of the convex conjugate of the functional F where F is the l2-norm (or distance to g, if given):: F(x) = lam ||x - g||_2^2 with x and g elements in ``space``, scaling factor lam, and given data g. Parameters ---------- space : `LinearSpace` Domain of F(x). Needs to be a Hilbert space. That is, have an inner product (`LinearSpace.inner`). lam : positive float, optional Scaling factor or regularization parameter. g : ``space`` element, optional An element in ``space``. Default: ``space.zero``. Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- The squared :math:`L_2`-norm/distance :math:`F` is given by .. math:: F(x) = \lambda \|x - g\|_2^2. The convex conjugate :math:`F^*` of :math:`F` is given by .. math:: F^*(y) = \frac{1}{4\lambda} \left( \| y\|_2^2 + \langle y, g \rangle \right) For a step size :math:`\sigma`, the proximal operator of :math:`\sigma F^*` is given by .. math:: \mathrm{prox}_{\sigma F^*}(y) = \frac{y - \sigma g}{1 + \sigma/(2 \lambda)} See Also -------- proximal_convex_conj_l2 : proximal without square proximal_l2_squared : proximal without convex conjugate
[ "r", "Proximal", "operator", "factory", "of", "the", "convex", "conj", "of", "the", "squared", "l2", "-", "dist" ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/nonsmooth/proximal_operators.py#L814-L916
231,874
odlgroup/odl
odl/solvers/nonsmooth/proximal_operators.py
proximal_linfty
def proximal_linfty(space): r"""Proximal operator factory of the ``l_\infty``-norm. Function for the proximal operator of the functional ``F`` where ``F`` is the ``l_\infty``-norm:: ``F(x) = \sup_i |x_i|`` Parameters ---------- space : `LinearSpace` Domain of ``F``. Returns ------- prox_factory : callable Factory for the proximal operator to be initialized. Notes ----- The proximal is computed by the Moreau identity and a projection onto an l1-ball [PB2014]. See Also -------- proj_l1 : projection onto l1-ball """ class ProximalLInfty(Operator): """Proximal operator of the linf-norm.""" def __init__(self, sigma): """Initialize a new instance. Parameters ---------- sigma : positive float Step size parameter """ super(ProximalLInfty, self).__init__( domain=space, range=space, linear=False) self.sigma = float(sigma) def _call(self, x, out): """Return ``self(x)``.""" radius = 1 if x is out: x = x.copy() proj_l1(x, radius, out) out.lincomb(-1, out, 1, x) return ProximalLInfty
python
def proximal_linfty(space): r"""Proximal operator factory of the ``l_\infty``-norm. Function for the proximal operator of the functional ``F`` where ``F`` is the ``l_\infty``-norm:: ``F(x) = \sup_i |x_i|`` Parameters ---------- space : `LinearSpace` Domain of ``F``. Returns ------- prox_factory : callable Factory for the proximal operator to be initialized. Notes ----- The proximal is computed by the Moreau identity and a projection onto an l1-ball [PB2014]. See Also -------- proj_l1 : projection onto l1-ball """ class ProximalLInfty(Operator): """Proximal operator of the linf-norm.""" def __init__(self, sigma): """Initialize a new instance. Parameters ---------- sigma : positive float Step size parameter """ super(ProximalLInfty, self).__init__( domain=space, range=space, linear=False) self.sigma = float(sigma) def _call(self, x, out): """Return ``self(x)``.""" radius = 1 if x is out: x = x.copy() proj_l1(x, radius, out) out.lincomb(-1, out, 1, x) return ProximalLInfty
[ "def", "proximal_linfty", "(", "space", ")", ":", "class", "ProximalLInfty", "(", "Operator", ")", ":", "\"\"\"Proximal operator of the linf-norm.\"\"\"", "def", "__init__", "(", "self", ",", "sigma", ")", ":", "\"\"\"Initialize a new instance.\n\n Parameters\n ...
r"""Proximal operator factory of the ``l_\infty``-norm. Function for the proximal operator of the functional ``F`` where ``F`` is the ``l_\infty``-norm:: ``F(x) = \sup_i |x_i|`` Parameters ---------- space : `LinearSpace` Domain of ``F``. Returns ------- prox_factory : callable Factory for the proximal operator to be initialized. Notes ----- The proximal is computed by the Moreau identity and a projection onto an l1-ball [PB2014]. See Also -------- proj_l1 : projection onto l1-ball
[ "r", "Proximal", "operator", "factory", "of", "the", "l_", "\\", "infty", "-", "norm", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/nonsmooth/proximal_operators.py#L1451-L1505
231,875
odlgroup/odl
odl/solvers/nonsmooth/proximal_operators.py
proj_l1
def proj_l1(x, radius=1, out=None): r"""Projection onto l1-ball. Projection onto:: ``{ x \in X | ||x||_1 \leq r}`` with ``r`` being the radius. Parameters ---------- space : `LinearSpace` Space / domain ``X``. radius : positive float, optional Radius ``r`` of the ball. Returns ------- prox_factory : callable Factory for the proximal operator to be initialized. Notes ----- The projection onto an l1-ball can be computed by projection onto a simplex, see [D+2008] for details. References ---------- [D+2008] Duchi, J., Shalev-Shwartz, S., Singer, Y., and Chandra, T. *Efficient Projections onto the L1-ball for Learning in High dimensions*. ICML 2008, pp. 272-279. http://doi.org/10.1145/1390156.1390191 See Also -------- proximal_linfty : proximal for l-infinity norm proj_simplex : projection onto simplex """ if out is None: out = x.space.element() u = x.ufuncs.absolute() v = x.ufuncs.sign() proj_simplex(u, radius, out) out *= v return out
python
def proj_l1(x, radius=1, out=None): r"""Projection onto l1-ball. Projection onto:: ``{ x \in X | ||x||_1 \leq r}`` with ``r`` being the radius. Parameters ---------- space : `LinearSpace` Space / domain ``X``. radius : positive float, optional Radius ``r`` of the ball. Returns ------- prox_factory : callable Factory for the proximal operator to be initialized. Notes ----- The projection onto an l1-ball can be computed by projection onto a simplex, see [D+2008] for details. References ---------- [D+2008] Duchi, J., Shalev-Shwartz, S., Singer, Y., and Chandra, T. *Efficient Projections onto the L1-ball for Learning in High dimensions*. ICML 2008, pp. 272-279. http://doi.org/10.1145/1390156.1390191 See Also -------- proximal_linfty : proximal for l-infinity norm proj_simplex : projection onto simplex """ if out is None: out = x.space.element() u = x.ufuncs.absolute() v = x.ufuncs.sign() proj_simplex(u, radius, out) out *= v return out
[ "def", "proj_l1", "(", "x", ",", "radius", "=", "1", ",", "out", "=", "None", ")", ":", "if", "out", "is", "None", ":", "out", "=", "x", ".", "space", ".", "element", "(", ")", "u", "=", "x", ".", "ufuncs", ".", "absolute", "(", ")", "v", "...
r"""Projection onto l1-ball. Projection onto:: ``{ x \in X | ||x||_1 \leq r}`` with ``r`` being the radius. Parameters ---------- space : `LinearSpace` Space / domain ``X``. radius : positive float, optional Radius ``r`` of the ball. Returns ------- prox_factory : callable Factory for the proximal operator to be initialized. Notes ----- The projection onto an l1-ball can be computed by projection onto a simplex, see [D+2008] for details. References ---------- [D+2008] Duchi, J., Shalev-Shwartz, S., Singer, Y., and Chandra, T. *Efficient Projections onto the L1-ball for Learning in High dimensions*. ICML 2008, pp. 272-279. http://doi.org/10.1145/1390156.1390191 See Also -------- proximal_linfty : proximal for l-infinity norm proj_simplex : projection onto simplex
[ "r", "Projection", "onto", "l1", "-", "ball", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/nonsmooth/proximal_operators.py#L1569-L1615
231,876
odlgroup/odl
odl/solvers/nonsmooth/proximal_operators.py
proj_simplex
def proj_simplex(x, diameter=1, out=None): r"""Projection onto simplex. Projection onto:: ``{ x \in X | x_i \geq 0, \sum_i x_i = r}`` with :math:`r` being the diameter. It is computed by the formula proposed in [D+2008]. Parameters ---------- space : `LinearSpace` Space / domain ``X``. diameter : positive float, optional Diameter of the simplex. Returns ------- prox_factory : callable Factory for the proximal operator to be initialized. Notes ----- The projection onto a simplex is not of closed-form but can be solved by a non-iterative algorithm, see [D+2008] for details. References ---------- [D+2008] Duchi, J., Shalev-Shwartz, S., Singer, Y., and Chandra, T. *Efficient Projections onto the L1-ball for Learning in High dimensions*. ICML 2008, pp. 272-279. http://doi.org/10.1145/1390156.1390191 See Also -------- proj_l1 : projection onto l1-norm ball """ if out is None: out = x.space.element() # sort values in descending order x_sor = x.asarray().flatten() x_sor.sort() x_sor = x_sor[::-1] # find critical index j = np.arange(1, x.size + 1) x_avrg = (1 / j) * (np.cumsum(x_sor) - diameter) crit = x_sor - x_avrg i = np.argwhere(crit >= 0).flatten().max() # output is a shifted and thresholded version of the input out[:] = np.maximum(x - x_avrg[i], 0) return out
python
def proj_simplex(x, diameter=1, out=None): r"""Projection onto simplex. Projection onto:: ``{ x \in X | x_i \geq 0, \sum_i x_i = r}`` with :math:`r` being the diameter. It is computed by the formula proposed in [D+2008]. Parameters ---------- space : `LinearSpace` Space / domain ``X``. diameter : positive float, optional Diameter of the simplex. Returns ------- prox_factory : callable Factory for the proximal operator to be initialized. Notes ----- The projection onto a simplex is not of closed-form but can be solved by a non-iterative algorithm, see [D+2008] for details. References ---------- [D+2008] Duchi, J., Shalev-Shwartz, S., Singer, Y., and Chandra, T. *Efficient Projections onto the L1-ball for Learning in High dimensions*. ICML 2008, pp. 272-279. http://doi.org/10.1145/1390156.1390191 See Also -------- proj_l1 : projection onto l1-norm ball """ if out is None: out = x.space.element() # sort values in descending order x_sor = x.asarray().flatten() x_sor.sort() x_sor = x_sor[::-1] # find critical index j = np.arange(1, x.size + 1) x_avrg = (1 / j) * (np.cumsum(x_sor) - diameter) crit = x_sor - x_avrg i = np.argwhere(crit >= 0).flatten().max() # output is a shifted and thresholded version of the input out[:] = np.maximum(x - x_avrg[i], 0) return out
[ "def", "proj_simplex", "(", "x", ",", "diameter", "=", "1", ",", "out", "=", "None", ")", ":", "if", "out", "is", "None", ":", "out", "=", "x", ".", "space", ".", "element", "(", ")", "# sort values in descending order", "x_sor", "=", "x", ".", "asar...
r"""Projection onto simplex. Projection onto:: ``{ x \in X | x_i \geq 0, \sum_i x_i = r}`` with :math:`r` being the diameter. It is computed by the formula proposed in [D+2008]. Parameters ---------- space : `LinearSpace` Space / domain ``X``. diameter : positive float, optional Diameter of the simplex. Returns ------- prox_factory : callable Factory for the proximal operator to be initialized. Notes ----- The projection onto a simplex is not of closed-form but can be solved by a non-iterative algorithm, see [D+2008] for details. References ---------- [D+2008] Duchi, J., Shalev-Shwartz, S., Singer, Y., and Chandra, T. *Efficient Projections onto the L1-ball for Learning in High dimensions*. ICML 2008, pp. 272-279. http://doi.org/10.1145/1390156.1390191 See Also -------- proj_l1 : projection onto l1-norm ball
[ "r", "Projection", "onto", "simplex", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/nonsmooth/proximal_operators.py#L1618-L1672
231,877
odlgroup/odl
odl/solvers/nonsmooth/proximal_operators.py
proximal_convex_conj_kl
def proximal_convex_conj_kl(space, lam=1, g=None): r"""Proximal operator factory of the convex conjugate of the KL divergence. Function returning the proximal operator of the convex conjugate of the functional F where F is the entropy-type Kullback-Leibler (KL) divergence:: F(x) = sum_i (x_i - g_i + g_i ln(g_i) - g_i ln(pos(x_i))) + ind_P(x) with ``x`` and ``g`` elements in the linear space ``X``, and ``g`` non-negative. Here, ``pos`` denotes the nonnegative part, and ``ind_P`` is the indicator function for nonnegativity. Parameters ---------- space : `TensorSpace` Space X which is the domain of the functional F lam : positive float, optional Scaling factor. g : ``space`` element, optional Data term, positive. If None it is take as the one-element. Returns ------- prox_factory : function Factory for the proximal operator to be initialized. See Also -------- proximal_convex_conj_kl_cross_entropy : proximal for releated functional Notes ----- The functional is given by the expression .. math:: F(x) = \sum_i (x_i - g_i + g_i \ln(g_i) - g_i \ln(pos(x_i))) + I_{x \geq 0}(x) The indicator function :math:`I_{x \geq 0}(x)` is used to restrict the domain of :math:`F` such that :math:`F` is defined over whole space :math:`X`. The non-negativity thresholding :math:`pos` is used to define :math:`F` in the real numbers. Note that the functional is not well-defined without a prior g. Hence, if g is omitted this will be interpreted as if g is equal to the one-element. The convex conjugate :math:`F^*` of :math:`F` is .. math:: F^*(p) = \sum_i (-g_i \ln(\text{pos}({1_X}_i - p_i))) + I_{1_X - p \geq 0}(p) where :math:`p` is the variable dual to :math:`x`, and :math:`1_X` is an element of the space :math:`X` with all components set to 1. The proximal operator of the convex conjugate of F is .. math:: \mathrm{prox}_{\sigma (\lambda F)^*}(x) = \frac{\lambda 1_X + x - \sqrt{(x - \lambda 1_X)^2 + 4 \lambda \sigma g}}{2} where :math:`\sigma` is the step size-like parameter, and :math:`\lambda` is the weighting in front of the function :math:`F`. KL based objectives are common in MLEM optimization problems and are often used when data noise governed by a multivariate Poisson probability distribution is significant. The intermediate image estimates can have negative values even though the converged solution will be non-negative. Non-negative intermediate image estimates can be enforced by adding an indicator function ind_P the primal objective. This functional :math:`F`, described above, is related to the Kullback-Leibler cross entropy functional. The KL cross entropy is the one described in `this Wikipedia article <https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence>`_, and the functional :math:`F` is obtained by switching place of the prior and the varialbe in the KL cross entropy functional. See the See Also section. """ lam = float(lam) if g is not None and g not in space: raise TypeError('{} is not an element of {}'.format(g, space)) class ProximalConvexConjKL(Operator): """Proximal operator of the convex conjugate of the KL divergence.""" def __init__(self, sigma): """Initialize a new instance. Parameters ---------- sigma : positive float """ super(ProximalConvexConjKL, self).__init__( domain=space, range=space, linear=False) self.sigma = float(sigma) def _call(self, x, out): """Return ``self(x, out=out)``.""" # (x + lam - sqrt((x - lam)^2 + 4*lam*sig*g)) / 2 # out = (x - lam)^2 if x is out: # Handle aliased `x` and `out` (need original `x` later on) x = x.copy() else: out.assign(x) out -= lam out.ufuncs.square(out=out) # out = ... + 4*lam*sigma*g # If g is None, it is taken as the one element if g is None: out += 4.0 * lam * self.sigma else: out.lincomb(1, out, 4.0 * lam * self.sigma, g) # out = x - sqrt(...) + lam out.ufuncs.sqrt(out=out) out.lincomb(1, x, -1, out) out += lam # out = 1/2 * ... out /= 2 return ProximalConvexConjKL
python
def proximal_convex_conj_kl(space, lam=1, g=None): r"""Proximal operator factory of the convex conjugate of the KL divergence. Function returning the proximal operator of the convex conjugate of the functional F where F is the entropy-type Kullback-Leibler (KL) divergence:: F(x) = sum_i (x_i - g_i + g_i ln(g_i) - g_i ln(pos(x_i))) + ind_P(x) with ``x`` and ``g`` elements in the linear space ``X``, and ``g`` non-negative. Here, ``pos`` denotes the nonnegative part, and ``ind_P`` is the indicator function for nonnegativity. Parameters ---------- space : `TensorSpace` Space X which is the domain of the functional F lam : positive float, optional Scaling factor. g : ``space`` element, optional Data term, positive. If None it is take as the one-element. Returns ------- prox_factory : function Factory for the proximal operator to be initialized. See Also -------- proximal_convex_conj_kl_cross_entropy : proximal for releated functional Notes ----- The functional is given by the expression .. math:: F(x) = \sum_i (x_i - g_i + g_i \ln(g_i) - g_i \ln(pos(x_i))) + I_{x \geq 0}(x) The indicator function :math:`I_{x \geq 0}(x)` is used to restrict the domain of :math:`F` such that :math:`F` is defined over whole space :math:`X`. The non-negativity thresholding :math:`pos` is used to define :math:`F` in the real numbers. Note that the functional is not well-defined without a prior g. Hence, if g is omitted this will be interpreted as if g is equal to the one-element. The convex conjugate :math:`F^*` of :math:`F` is .. math:: F^*(p) = \sum_i (-g_i \ln(\text{pos}({1_X}_i - p_i))) + I_{1_X - p \geq 0}(p) where :math:`p` is the variable dual to :math:`x`, and :math:`1_X` is an element of the space :math:`X` with all components set to 1. The proximal operator of the convex conjugate of F is .. math:: \mathrm{prox}_{\sigma (\lambda F)^*}(x) = \frac{\lambda 1_X + x - \sqrt{(x - \lambda 1_X)^2 + 4 \lambda \sigma g}}{2} where :math:`\sigma` is the step size-like parameter, and :math:`\lambda` is the weighting in front of the function :math:`F`. KL based objectives are common in MLEM optimization problems and are often used when data noise governed by a multivariate Poisson probability distribution is significant. The intermediate image estimates can have negative values even though the converged solution will be non-negative. Non-negative intermediate image estimates can be enforced by adding an indicator function ind_P the primal objective. This functional :math:`F`, described above, is related to the Kullback-Leibler cross entropy functional. The KL cross entropy is the one described in `this Wikipedia article <https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence>`_, and the functional :math:`F` is obtained by switching place of the prior and the varialbe in the KL cross entropy functional. See the See Also section. """ lam = float(lam) if g is not None and g not in space: raise TypeError('{} is not an element of {}'.format(g, space)) class ProximalConvexConjKL(Operator): """Proximal operator of the convex conjugate of the KL divergence.""" def __init__(self, sigma): """Initialize a new instance. Parameters ---------- sigma : positive float """ super(ProximalConvexConjKL, self).__init__( domain=space, range=space, linear=False) self.sigma = float(sigma) def _call(self, x, out): """Return ``self(x, out=out)``.""" # (x + lam - sqrt((x - lam)^2 + 4*lam*sig*g)) / 2 # out = (x - lam)^2 if x is out: # Handle aliased `x` and `out` (need original `x` later on) x = x.copy() else: out.assign(x) out -= lam out.ufuncs.square(out=out) # out = ... + 4*lam*sigma*g # If g is None, it is taken as the one element if g is None: out += 4.0 * lam * self.sigma else: out.lincomb(1, out, 4.0 * lam * self.sigma, g) # out = x - sqrt(...) + lam out.ufuncs.sqrt(out=out) out.lincomb(1, x, -1, out) out += lam # out = 1/2 * ... out /= 2 return ProximalConvexConjKL
[ "def", "proximal_convex_conj_kl", "(", "space", ",", "lam", "=", "1", ",", "g", "=", "None", ")", ":", "lam", "=", "float", "(", "lam", ")", "if", "g", "is", "not", "None", "and", "g", "not", "in", "space", ":", "raise", "TypeError", "(", "'{} is n...
r"""Proximal operator factory of the convex conjugate of the KL divergence. Function returning the proximal operator of the convex conjugate of the functional F where F is the entropy-type Kullback-Leibler (KL) divergence:: F(x) = sum_i (x_i - g_i + g_i ln(g_i) - g_i ln(pos(x_i))) + ind_P(x) with ``x`` and ``g`` elements in the linear space ``X``, and ``g`` non-negative. Here, ``pos`` denotes the nonnegative part, and ``ind_P`` is the indicator function for nonnegativity. Parameters ---------- space : `TensorSpace` Space X which is the domain of the functional F lam : positive float, optional Scaling factor. g : ``space`` element, optional Data term, positive. If None it is take as the one-element. Returns ------- prox_factory : function Factory for the proximal operator to be initialized. See Also -------- proximal_convex_conj_kl_cross_entropy : proximal for releated functional Notes ----- The functional is given by the expression .. math:: F(x) = \sum_i (x_i - g_i + g_i \ln(g_i) - g_i \ln(pos(x_i))) + I_{x \geq 0}(x) The indicator function :math:`I_{x \geq 0}(x)` is used to restrict the domain of :math:`F` such that :math:`F` is defined over whole space :math:`X`. The non-negativity thresholding :math:`pos` is used to define :math:`F` in the real numbers. Note that the functional is not well-defined without a prior g. Hence, if g is omitted this will be interpreted as if g is equal to the one-element. The convex conjugate :math:`F^*` of :math:`F` is .. math:: F^*(p) = \sum_i (-g_i \ln(\text{pos}({1_X}_i - p_i))) + I_{1_X - p \geq 0}(p) where :math:`p` is the variable dual to :math:`x`, and :math:`1_X` is an element of the space :math:`X` with all components set to 1. The proximal operator of the convex conjugate of F is .. math:: \mathrm{prox}_{\sigma (\lambda F)^*}(x) = \frac{\lambda 1_X + x - \sqrt{(x - \lambda 1_X)^2 + 4 \lambda \sigma g}}{2} where :math:`\sigma` is the step size-like parameter, and :math:`\lambda` is the weighting in front of the function :math:`F`. KL based objectives are common in MLEM optimization problems and are often used when data noise governed by a multivariate Poisson probability distribution is significant. The intermediate image estimates can have negative values even though the converged solution will be non-negative. Non-negative intermediate image estimates can be enforced by adding an indicator function ind_P the primal objective. This functional :math:`F`, described above, is related to the Kullback-Leibler cross entropy functional. The KL cross entropy is the one described in `this Wikipedia article <https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence>`_, and the functional :math:`F` is obtained by switching place of the prior and the varialbe in the KL cross entropy functional. See the See Also section.
[ "r", "Proximal", "operator", "factory", "of", "the", "convex", "conjugate", "of", "the", "KL", "divergence", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/nonsmooth/proximal_operators.py#L1675-L1804
231,878
odlgroup/odl
odl/solvers/nonsmooth/proximal_operators.py
proximal_convex_conj_kl_cross_entropy
def proximal_convex_conj_kl_cross_entropy(space, lam=1, g=None): r"""Proximal factory of the convex conj of cross entropy KL divergence. Function returning the proximal factory of the convex conjugate of the functional F, where F is the cross entropy Kullback-Leibler (KL) divergence given by:: F(x) = sum_i (x_i ln(pos(x_i)) - x_i ln(g_i) + g_i - x_i) + ind_P(x) with ``x`` and ``g`` in the linear space ``X``, and ``g`` non-negative. Here, ``pos`` denotes the nonnegative part, and ``ind_P`` is the indicator function for nonnegativity. Parameters ---------- space : `TensorSpace` Space X which is the domain of the functional F lam : positive float, optional Scaling factor. g : ``space`` element, optional Data term, positive. If None it is take as the one-element. Returns ------- prox_factory : function Factory for the proximal operator to be initialized. See Also -------- proximal_convex_conj_kl : proximal for related functional Notes ----- The functional is given by the expression .. math:: F(x) = \sum_i (x_i \ln(pos(x_i)) - x_i \ln(g_i) + g_i - x_i) + I_{x \geq 0}(x) The indicator function :math:`I_{x \geq 0}(x)` is used to restrict the domain of :math:`F` such that :math:`F` is defined over whole space :math:`X`. The non-negativity thresholding :math:`pos` is used to define :math:`F` in the real numbers. Note that the functional is not well-defined without a prior g. Hence, if g is omitted this will be interpreted as if g is equal to the one-element. The convex conjugate :math:`F^*` of :math:`F` is .. math:: F^*(p) = \sum_i g_i (exp(p_i) - 1) where :math:`p` is the variable dual to :math:`x`. The proximal operator of the convex conjugate of :math:`F` is .. math:: \mathrm{prox}_{\sigma (\lambda F)^*}(x) = x - \lambda W(\frac{\sigma}{\lambda} g e^{x/\lambda}) where :math:`\sigma` is the step size-like parameter, :math:`\lambda` is the weighting in front of the function :math:`F`, and :math:`W` is the Lambert W function (see, for example, the `Wikipedia article <https://en.wikipedia.org/wiki/Lambert_W_function>`_). For real-valued input x, the Lambert :math:`W` function is defined only for :math:`x \geq -1/e`, and it has two branches for values :math:`-1/e \leq x < 0`. However, for inteneded use-cases, where :math:`\lambda` and :math:`g` are positive, the argument of :math:`W` will always be positive. `Wikipedia article on Kullback Leibler divergence <https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence>`_. For further information about the functional, see for example `this article <http://ieeexplore.ieee.org/document/1056144/?arnumber=1056144>`_. The KL cross entropy functional :math:`F`, described above, is related to another functional functional also know as KL divergence. This functional is often used as data discrepancy term in inverse problems, when data is corrupted with Poisson noise. This functional is obtained by changing place of the prior and the variable. See the See Also section. """ lam = float(lam) if g is not None and g not in space: raise TypeError('{} is not an element of {}'.format(g, space)) class ProximalConvexConjKLCrossEntropy(Operator): """Proximal operator of conjugate of cross entropy KL divergence.""" def __init__(self, sigma): """Initialize a new instance. Parameters ---------- sigma : positive float """ self.sigma = float(sigma) super(ProximalConvexConjKLCrossEntropy, self).__init__( domain=space, range=space, linear=False) def _call(self, x, out): """Return ``self(x, out=out)``.""" # Lazy import to improve `import odl` time import scipy.special if g is None: # If g is None, it is taken as the one element # Different branches of lambertw is not an issue, see Notes lambw = scipy.special.lambertw( (self.sigma / lam) * np.exp(x / lam)) else: # Different branches of lambertw is not an issue, see Notes lambw = scipy.special.lambertw( (self.sigma / lam) * g * np.exp(x / lam)) if not np.issubsctype(self.domain.dtype, np.complexfloating): lambw = lambw.real lambw = x.space.element(lambw) out.lincomb(1, x, -lam, lambw) return ProximalConvexConjKLCrossEntropy
python
def proximal_convex_conj_kl_cross_entropy(space, lam=1, g=None): r"""Proximal factory of the convex conj of cross entropy KL divergence. Function returning the proximal factory of the convex conjugate of the functional F, where F is the cross entropy Kullback-Leibler (KL) divergence given by:: F(x) = sum_i (x_i ln(pos(x_i)) - x_i ln(g_i) + g_i - x_i) + ind_P(x) with ``x`` and ``g`` in the linear space ``X``, and ``g`` non-negative. Here, ``pos`` denotes the nonnegative part, and ``ind_P`` is the indicator function for nonnegativity. Parameters ---------- space : `TensorSpace` Space X which is the domain of the functional F lam : positive float, optional Scaling factor. g : ``space`` element, optional Data term, positive. If None it is take as the one-element. Returns ------- prox_factory : function Factory for the proximal operator to be initialized. See Also -------- proximal_convex_conj_kl : proximal for related functional Notes ----- The functional is given by the expression .. math:: F(x) = \sum_i (x_i \ln(pos(x_i)) - x_i \ln(g_i) + g_i - x_i) + I_{x \geq 0}(x) The indicator function :math:`I_{x \geq 0}(x)` is used to restrict the domain of :math:`F` such that :math:`F` is defined over whole space :math:`X`. The non-negativity thresholding :math:`pos` is used to define :math:`F` in the real numbers. Note that the functional is not well-defined without a prior g. Hence, if g is omitted this will be interpreted as if g is equal to the one-element. The convex conjugate :math:`F^*` of :math:`F` is .. math:: F^*(p) = \sum_i g_i (exp(p_i) - 1) where :math:`p` is the variable dual to :math:`x`. The proximal operator of the convex conjugate of :math:`F` is .. math:: \mathrm{prox}_{\sigma (\lambda F)^*}(x) = x - \lambda W(\frac{\sigma}{\lambda} g e^{x/\lambda}) where :math:`\sigma` is the step size-like parameter, :math:`\lambda` is the weighting in front of the function :math:`F`, and :math:`W` is the Lambert W function (see, for example, the `Wikipedia article <https://en.wikipedia.org/wiki/Lambert_W_function>`_). For real-valued input x, the Lambert :math:`W` function is defined only for :math:`x \geq -1/e`, and it has two branches for values :math:`-1/e \leq x < 0`. However, for inteneded use-cases, where :math:`\lambda` and :math:`g` are positive, the argument of :math:`W` will always be positive. `Wikipedia article on Kullback Leibler divergence <https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence>`_. For further information about the functional, see for example `this article <http://ieeexplore.ieee.org/document/1056144/?arnumber=1056144>`_. The KL cross entropy functional :math:`F`, described above, is related to another functional functional also know as KL divergence. This functional is often used as data discrepancy term in inverse problems, when data is corrupted with Poisson noise. This functional is obtained by changing place of the prior and the variable. See the See Also section. """ lam = float(lam) if g is not None and g not in space: raise TypeError('{} is not an element of {}'.format(g, space)) class ProximalConvexConjKLCrossEntropy(Operator): """Proximal operator of conjugate of cross entropy KL divergence.""" def __init__(self, sigma): """Initialize a new instance. Parameters ---------- sigma : positive float """ self.sigma = float(sigma) super(ProximalConvexConjKLCrossEntropy, self).__init__( domain=space, range=space, linear=False) def _call(self, x, out): """Return ``self(x, out=out)``.""" # Lazy import to improve `import odl` time import scipy.special if g is None: # If g is None, it is taken as the one element # Different branches of lambertw is not an issue, see Notes lambw = scipy.special.lambertw( (self.sigma / lam) * np.exp(x / lam)) else: # Different branches of lambertw is not an issue, see Notes lambw = scipy.special.lambertw( (self.sigma / lam) * g * np.exp(x / lam)) if not np.issubsctype(self.domain.dtype, np.complexfloating): lambw = lambw.real lambw = x.space.element(lambw) out.lincomb(1, x, -lam, lambw) return ProximalConvexConjKLCrossEntropy
[ "def", "proximal_convex_conj_kl_cross_entropy", "(", "space", ",", "lam", "=", "1", ",", "g", "=", "None", ")", ":", "lam", "=", "float", "(", "lam", ")", "if", "g", "is", "not", "None", "and", "g", "not", "in", "space", ":", "raise", "TypeError", "(...
r"""Proximal factory of the convex conj of cross entropy KL divergence. Function returning the proximal factory of the convex conjugate of the functional F, where F is the cross entropy Kullback-Leibler (KL) divergence given by:: F(x) = sum_i (x_i ln(pos(x_i)) - x_i ln(g_i) + g_i - x_i) + ind_P(x) with ``x`` and ``g`` in the linear space ``X``, and ``g`` non-negative. Here, ``pos`` denotes the nonnegative part, and ``ind_P`` is the indicator function for nonnegativity. Parameters ---------- space : `TensorSpace` Space X which is the domain of the functional F lam : positive float, optional Scaling factor. g : ``space`` element, optional Data term, positive. If None it is take as the one-element. Returns ------- prox_factory : function Factory for the proximal operator to be initialized. See Also -------- proximal_convex_conj_kl : proximal for related functional Notes ----- The functional is given by the expression .. math:: F(x) = \sum_i (x_i \ln(pos(x_i)) - x_i \ln(g_i) + g_i - x_i) + I_{x \geq 0}(x) The indicator function :math:`I_{x \geq 0}(x)` is used to restrict the domain of :math:`F` such that :math:`F` is defined over whole space :math:`X`. The non-negativity thresholding :math:`pos` is used to define :math:`F` in the real numbers. Note that the functional is not well-defined without a prior g. Hence, if g is omitted this will be interpreted as if g is equal to the one-element. The convex conjugate :math:`F^*` of :math:`F` is .. math:: F^*(p) = \sum_i g_i (exp(p_i) - 1) where :math:`p` is the variable dual to :math:`x`. The proximal operator of the convex conjugate of :math:`F` is .. math:: \mathrm{prox}_{\sigma (\lambda F)^*}(x) = x - \lambda W(\frac{\sigma}{\lambda} g e^{x/\lambda}) where :math:`\sigma` is the step size-like parameter, :math:`\lambda` is the weighting in front of the function :math:`F`, and :math:`W` is the Lambert W function (see, for example, the `Wikipedia article <https://en.wikipedia.org/wiki/Lambert_W_function>`_). For real-valued input x, the Lambert :math:`W` function is defined only for :math:`x \geq -1/e`, and it has two branches for values :math:`-1/e \leq x < 0`. However, for inteneded use-cases, where :math:`\lambda` and :math:`g` are positive, the argument of :math:`W` will always be positive. `Wikipedia article on Kullback Leibler divergence <https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence>`_. For further information about the functional, see for example `this article <http://ieeexplore.ieee.org/document/1056144/?arnumber=1056144>`_. The KL cross entropy functional :math:`F`, described above, is related to another functional functional also know as KL divergence. This functional is often used as data discrepancy term in inverse problems, when data is corrupted with Poisson noise. This functional is obtained by changing place of the prior and the variable. See the See Also section.
[ "r", "Proximal", "factory", "of", "the", "convex", "conj", "of", "cross", "entropy", "KL", "divergence", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/nonsmooth/proximal_operators.py#L1807-L1931
231,879
odlgroup/odl
odl/solvers/nonsmooth/proximal_operators.py
proximal_huber
def proximal_huber(space, gamma): """Proximal factory of the Huber norm. Parameters ---------- space : `TensorSpace` The domain of the functional gamma : float The smoothing parameter of the Huber norm functional. Returns ------- prox_factory : function Factory for the proximal operator to be initialized. See Also -------- odl.solvers.default_functionals.Huber : the Huber norm functional Notes ----- The proximal operator is given by given by the proximal operator of ``1/(2*gamma) * L2 norm`` in points that are ``<= gamma``, and by the proximal operator of the l1 norm in points that are ``> gamma``. """ gamma = float(gamma) class ProximalHuber(Operator): """Proximal operator of Huber norm.""" def __init__(self, sigma): """Initialize a new instance. Parameters ---------- sigma : positive float """ self.sigma = float(sigma) super(ProximalHuber, self).__init__(domain=space, range=space, linear=False) def _call(self, x, out): """Return ``self(x, out=out)``.""" if isinstance(self.domain, ProductSpace): norm = PointwiseNorm(self.domain, 2)(x) else: norm = x.ufuncs.absolute() mask = norm.ufuncs.less_equal(gamma + self.sigma) out[mask] = gamma / (gamma + self.sigma) * x[mask] mask.ufuncs.logical_not(out=mask) sign_x = x.ufuncs.sign() out[mask] = x[mask] - self.sigma * sign_x[mask] return out return ProximalHuber
python
def proximal_huber(space, gamma): gamma = float(gamma) class ProximalHuber(Operator): """Proximal operator of Huber norm.""" def __init__(self, sigma): """Initialize a new instance. Parameters ---------- sigma : positive float """ self.sigma = float(sigma) super(ProximalHuber, self).__init__(domain=space, range=space, linear=False) def _call(self, x, out): """Return ``self(x, out=out)``.""" if isinstance(self.domain, ProductSpace): norm = PointwiseNorm(self.domain, 2)(x) else: norm = x.ufuncs.absolute() mask = norm.ufuncs.less_equal(gamma + self.sigma) out[mask] = gamma / (gamma + self.sigma) * x[mask] mask.ufuncs.logical_not(out=mask) sign_x = x.ufuncs.sign() out[mask] = x[mask] - self.sigma * sign_x[mask] return out return ProximalHuber
[ "def", "proximal_huber", "(", "space", ",", "gamma", ")", ":", "gamma", "=", "float", "(", "gamma", ")", "class", "ProximalHuber", "(", "Operator", ")", ":", "\"\"\"Proximal operator of Huber norm.\"\"\"", "def", "__init__", "(", "self", ",", "sigma", ")", ":"...
Proximal factory of the Huber norm. Parameters ---------- space : `TensorSpace` The domain of the functional gamma : float The smoothing parameter of the Huber norm functional. Returns ------- prox_factory : function Factory for the proximal operator to be initialized. See Also -------- odl.solvers.default_functionals.Huber : the Huber norm functional Notes ----- The proximal operator is given by given by the proximal operator of ``1/(2*gamma) * L2 norm`` in points that are ``<= gamma``, and by the proximal operator of the l1 norm in points that are ``> gamma``.
[ "Proximal", "factory", "of", "the", "Huber", "norm", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/nonsmooth/proximal_operators.py#L1934-L1993
231,880
odlgroup/odl
odl/contrib/datasets/mri/tugraz.py
mri_head_reco_op_32_channel
def mri_head_reco_op_32_channel(): """Reconstruction operator for 32 channel MRI of a head. This is a T2 weighted TSE scan of a healthy volunteer. The reconstruction operator is the sum of the modulus of each channel. See the data source with DOI `10.5281/zenodo.800527`_ or the `project webpage`_ for further information. See Also -------- mri_head_data_32_channel References ---------- .. _10.5281/zenodo.800529: https://zenodo.org/record/800527 .. _project webpage: http://imsc.uni-graz.at/mobis/internal/\ platform_aktuell.html """ # To get the same rotation as in the reference article space = odl.uniform_discr(min_pt=[-115.2, -115.2], max_pt=[115.2, 115.2], shape=[256, 256], dtype=complex) trafo = odl.trafos.FourierTransform(space) return odl.ReductionOperator(odl.ComplexModulus(space) * trafo.inverse, 32)
python
def mri_head_reco_op_32_channel(): # To get the same rotation as in the reference article space = odl.uniform_discr(min_pt=[-115.2, -115.2], max_pt=[115.2, 115.2], shape=[256, 256], dtype=complex) trafo = odl.trafos.FourierTransform(space) return odl.ReductionOperator(odl.ComplexModulus(space) * trafo.inverse, 32)
[ "def", "mri_head_reco_op_32_channel", "(", ")", ":", "# To get the same rotation as in the reference article", "space", "=", "odl", ".", "uniform_discr", "(", "min_pt", "=", "[", "-", "115.2", ",", "-", "115.2", "]", ",", "max_pt", "=", "[", "115.2", ",", "115.2...
Reconstruction operator for 32 channel MRI of a head. This is a T2 weighted TSE scan of a healthy volunteer. The reconstruction operator is the sum of the modulus of each channel. See the data source with DOI `10.5281/zenodo.800527`_ or the `project webpage`_ for further information. See Also -------- mri_head_data_32_channel References ---------- .. _10.5281/zenodo.800529: https://zenodo.org/record/800527 .. _project webpage: http://imsc.uni-graz.at/mobis/internal/\ platform_aktuell.html
[ "Reconstruction", "operator", "for", "32", "channel", "MRI", "of", "a", "head", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/datasets/mri/tugraz.py#L130-L158
231,881
odlgroup/odl
odl/contrib/datasets/mri/tugraz.py
mri_knee_data_8_channel
def mri_knee_data_8_channel(): """Raw data for 8 channel MRI of a knee. This is an SE measurement of the knee of a healthy volunteer. The data has been rescaled so that the reconstruction fits approximately in [0, 1]. See the data source with DOI `10.5281/zenodo.800529`_ or the `project webpage`_ for further information. See Also -------- mri_knee_inverse_8_channel References ---------- .. _10.5281/zenodo.800529: https://zenodo.org/record/800529 .. _project webpage: http://imsc.uni-graz.at/mobis/internal/\ platform_aktuell.html """ # TODO: Store data in some ODL controlled url url = 'https://zenodo.org/record/800529/files/3_rawdata_knee_8ch.mat' dct = get_data('3_rawdata_knee_8ch.mat', subset=DATA_SUBSET, url=url) # Change axes to match ODL definitions data = flip(np.swapaxes(dct['rawdata'], 0, -1) * 9e3, 2) return data
python
def mri_knee_data_8_channel(): # TODO: Store data in some ODL controlled url url = 'https://zenodo.org/record/800529/files/3_rawdata_knee_8ch.mat' dct = get_data('3_rawdata_knee_8ch.mat', subset=DATA_SUBSET, url=url) # Change axes to match ODL definitions data = flip(np.swapaxes(dct['rawdata'], 0, -1) * 9e3, 2) return data
[ "def", "mri_knee_data_8_channel", "(", ")", ":", "# TODO: Store data in some ODL controlled url", "url", "=", "'https://zenodo.org/record/800529/files/3_rawdata_knee_8ch.mat'", "dct", "=", "get_data", "(", "'3_rawdata_knee_8ch.mat'", ",", "subset", "=", "DATA_SUBSET", ",", "url...
Raw data for 8 channel MRI of a knee. This is an SE measurement of the knee of a healthy volunteer. The data has been rescaled so that the reconstruction fits approximately in [0, 1]. See the data source with DOI `10.5281/zenodo.800529`_ or the `project webpage`_ for further information. See Also -------- mri_knee_inverse_8_channel References ---------- .. _10.5281/zenodo.800529: https://zenodo.org/record/800529 .. _project webpage: http://imsc.uni-graz.at/mobis/internal/\ platform_aktuell.html
[ "Raw", "data", "for", "8", "channel", "MRI", "of", "a", "knee", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/datasets/mri/tugraz.py#L161-L189
231,882
odlgroup/odl
odl/contrib/datasets/images/examples/cambridge_images.py
convert_to_odl
def convert_to_odl(image): """Convert image to ODL object.""" shape = image.shape if len(shape) == 2: space = odl.uniform_discr([0, 0], shape, shape) elif len(shape) == 3: d = shape[2] shape = shape[:2] image = np.transpose(image, (2, 0, 1)) space = odl.uniform_discr([0, 0], shape, shape) ** d image = space.element(image) return image
python
def convert_to_odl(image): shape = image.shape if len(shape) == 2: space = odl.uniform_discr([0, 0], shape, shape) elif len(shape) == 3: d = shape[2] shape = shape[:2] image = np.transpose(image, (2, 0, 1)) space = odl.uniform_discr([0, 0], shape, shape) ** d image = space.element(image) return image
[ "def", "convert_to_odl", "(", "image", ")", ":", "shape", "=", "image", ".", "shape", "if", "len", "(", "shape", ")", "==", "2", ":", "space", "=", "odl", ".", "uniform_discr", "(", "[", "0", ",", "0", "]", ",", "shape", ",", "shape", ")", "elif"...
Convert image to ODL object.
[ "Convert", "image", "to", "ODL", "object", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/datasets/images/examples/cambridge_images.py#L8-L23
231,883
odlgroup/odl
odl/set/domain.py
IntervalProd.mid_pt
def mid_pt(self): """Midpoint of this interval product.""" midp = (self.max_pt + self.min_pt) / 2. midp[~self.nondegen_byaxis] = self.min_pt[~self.nondegen_byaxis] return midp
python
def mid_pt(self): midp = (self.max_pt + self.min_pt) / 2. midp[~self.nondegen_byaxis] = self.min_pt[~self.nondegen_byaxis] return midp
[ "def", "mid_pt", "(", "self", ")", ":", "midp", "=", "(", "self", ".", "max_pt", "+", "self", ".", "min_pt", ")", "/", "2.", "midp", "[", "~", "self", ".", "nondegen_byaxis", "]", "=", "self", ".", "min_pt", "[", "~", "self", ".", "nondegen_byaxis"...
Midpoint of this interval product.
[ "Midpoint", "of", "this", "interval", "product", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/set/domain.py#L115-L119
231,884
odlgroup/odl
odl/set/domain.py
IntervalProd.element
def element(self, inp=None): """Return an element of this interval product. Parameters ---------- inp : float or `array-like`, optional Point to be cast to an element. Returns ------- element : `numpy.ndarray` or float Array (`ndim` > 1) or float version of ``inp`` if provided, otherwise ``self.mid_pt``. Examples -------- >>> interv = IntervalProd(0, 1) >>> interv.element(0.5) 0.5 """ if inp is None: return self.mid_pt elif inp in self: if self.ndim == 1: return float(inp) else: return np.asarray(inp) else: raise TypeError('`inp` {!r} is not a valid element of {!r}' ''.format(inp, self))
python
def element(self, inp=None): if inp is None: return self.mid_pt elif inp in self: if self.ndim == 1: return float(inp) else: return np.asarray(inp) else: raise TypeError('`inp` {!r} is not a valid element of {!r}' ''.format(inp, self))
[ "def", "element", "(", "self", ",", "inp", "=", "None", ")", ":", "if", "inp", "is", "None", ":", "return", "self", ".", "mid_pt", "elif", "inp", "in", "self", ":", "if", "self", ".", "ndim", "==", "1", ":", "return", "float", "(", "inp", ")", ...
Return an element of this interval product. Parameters ---------- inp : float or `array-like`, optional Point to be cast to an element. Returns ------- element : `numpy.ndarray` or float Array (`ndim` > 1) or float version of ``inp`` if provided, otherwise ``self.mid_pt``. Examples -------- >>> interv = IntervalProd(0, 1) >>> interv.element(0.5) 0.5
[ "Return", "an", "element", "of", "this", "interval", "product", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/set/domain.py#L158-L187
231,885
odlgroup/odl
odl/set/domain.py
IntervalProd.approx_equals
def approx_equals(self, other, atol): """Return ``True`` if ``other`` is equal to this set up to ``atol``. Parameters ---------- other : Object to be tested. atol : float Maximum allowed difference in maximum norm between the interval endpoints. Examples -------- >>> rbox1 = IntervalProd(0, 0.5) >>> rbox2 = IntervalProd(0, np.sqrt(0.5)**2) >>> rbox1.approx_equals(rbox2, atol=0) # Numerical error False >>> rbox1.approx_equals(rbox2, atol=1e-15) True """ if other is self: return True elif not isinstance(other, IntervalProd): return False return (np.allclose(self.min_pt, other.min_pt, atol=atol, rtol=0.0) and np.allclose(self.max_pt, other.max_pt, atol=atol, rtol=0.0))
python
def approx_equals(self, other, atol): if other is self: return True elif not isinstance(other, IntervalProd): return False return (np.allclose(self.min_pt, other.min_pt, atol=atol, rtol=0.0) and np.allclose(self.max_pt, other.max_pt, atol=atol, rtol=0.0))
[ "def", "approx_equals", "(", "self", ",", "other", ",", "atol", ")", ":", "if", "other", "is", "self", ":", "return", "True", "elif", "not", "isinstance", "(", "other", ",", "IntervalProd", ")", ":", "return", "False", "return", "(", "np", ".", "allclo...
Return ``True`` if ``other`` is equal to this set up to ``atol``. Parameters ---------- other : Object to be tested. atol : float Maximum allowed difference in maximum norm between the interval endpoints. Examples -------- >>> rbox1 = IntervalProd(0, 0.5) >>> rbox2 = IntervalProd(0, np.sqrt(0.5)**2) >>> rbox1.approx_equals(rbox2, atol=0) # Numerical error False >>> rbox1.approx_equals(rbox2, atol=1e-15) True
[ "Return", "True", "if", "other", "is", "equal", "to", "this", "set", "up", "to", "atol", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/set/domain.py#L189-L215
231,886
odlgroup/odl
odl/set/domain.py
IntervalProd.approx_contains
def approx_contains(self, point, atol): """Return ``True`` if ``point`` is "almost" contained in this set. Parameters ---------- point : `array-like` or float Point to be tested. Its length must be equal to `ndim`. In the 1d case, ``point`` can be given as a float. atol : float Maximum allowed distance in maximum norm from ``point`` to ``self``. Examples -------- >>> min_pt, max_pt = [-1, 0, 2], [-0.5, 0, 3] >>> rbox = IntervalProd(min_pt, max_pt) >>> # Numerical error >>> rbox.approx_contains([-1 + np.sqrt(0.5)**2, 0., 2.9], atol=0) False >>> rbox.approx_contains([-1 + np.sqrt(0.5)**2, 0., 2.9], atol=1e-9) True """ try: # Duck-typed check of type point = np.array(point, dtype=np.float, copy=False, ndmin=1) except (ValueError, TypeError): return False if point.size == 0: return True elif point.shape != (self.ndim,): return False return self.dist(point, exponent=np.inf) <= atol
python
def approx_contains(self, point, atol): try: # Duck-typed check of type point = np.array(point, dtype=np.float, copy=False, ndmin=1) except (ValueError, TypeError): return False if point.size == 0: return True elif point.shape != (self.ndim,): return False return self.dist(point, exponent=np.inf) <= atol
[ "def", "approx_contains", "(", "self", ",", "point", ",", "atol", ")", ":", "try", ":", "# Duck-typed check of type", "point", "=", "np", ".", "array", "(", "point", ",", "dtype", "=", "np", ".", "float", ",", "copy", "=", "False", ",", "ndmin", "=", ...
Return ``True`` if ``point`` is "almost" contained in this set. Parameters ---------- point : `array-like` or float Point to be tested. Its length must be equal to `ndim`. In the 1d case, ``point`` can be given as a float. atol : float Maximum allowed distance in maximum norm from ``point`` to ``self``. Examples -------- >>> min_pt, max_pt = [-1, 0, 2], [-0.5, 0, 3] >>> rbox = IntervalProd(min_pt, max_pt) >>> # Numerical error >>> rbox.approx_contains([-1 + np.sqrt(0.5)**2, 0., 2.9], atol=0) False >>> rbox.approx_contains([-1 + np.sqrt(0.5)**2, 0., 2.9], atol=1e-9) True
[ "Return", "True", "if", "point", "is", "almost", "contained", "in", "this", "set", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/set/domain.py#L232-L265
231,887
odlgroup/odl
odl/set/domain.py
IntervalProd.contains_all
def contains_all(self, other, atol=0.0): """Return ``True`` if all points defined by ``other`` are contained. Parameters ---------- other : Collection of points to be tested. Can be given as a single point, a ``(d, N)`` array-like where ``d`` is the number of dimensions, or a length-``d`` `meshgrid` tuple. atol : float, optional The maximum allowed distance in 'inf'-norm between the other set and this interval product. Returns ------- contains : bool ``True`` if all points are contained, ``False`` otherwise. Examples -------- >>> min_pt, max_pt = [-1, 0, 2], [-0.5, 0, 3] >>> rbox = IntervalProd(min_pt, max_pt) Arrays are expected in ``(ndim, npoints)`` shape: >>> arr = np.array([[-1, 0, 2], # defining one point at a time ... [-0.5, 0, 2]]) >>> rbox.contains_all(arr.T) True Implicit meshgrids defined by coordinate vectors: >>> from odl.discr.grid import sparse_meshgrid >>> vec1 = (-1, -0.9, -0.7) >>> vec2 = (0, 0, 0) >>> vec3 = (2.5, 2.75, 3) >>> mg = sparse_meshgrid(vec1, vec2, vec3) >>> rbox.contains_all(mg) True Works also with an arbitrary iterable: >>> rbox.contains_all([[-1, -0.5], # define points by axis ... [0, 0], ... [2, 2]]) True Grids are also accepted as input: >>> agrid = odl.uniform_grid(rbox.min_pt, rbox.max_pt, [3, 1, 3]) >>> rbox.contains_all(agrid) True """ atol = float(atol) # First try optimized methods if other in self: return True if hasattr(other, 'meshgrid'): return self.contains_all(other.meshgrid, atol=atol) elif is_valid_input_meshgrid(other, self.ndim): vecs = tuple(vec.squeeze() for vec in other) mins = np.fromiter((np.min(vec) for vec in vecs), dtype=float) maxs = np.fromiter((np.max(vec) for vec in vecs), dtype=float) return (np.all(mins >= self.min_pt - atol) and np.all(maxs <= self.max_pt + atol)) # Convert to array and check each element other = np.asarray(other) if is_valid_input_array(other, self.ndim): if self.ndim == 1: mins = np.min(other) maxs = np.max(other) else: mins = np.min(other, axis=1) maxs = np.max(other, axis=1) return np.all(mins >= self.min_pt) and np.all(maxs <= self.max_pt) else: return False
python
def contains_all(self, other, atol=0.0): atol = float(atol) # First try optimized methods if other in self: return True if hasattr(other, 'meshgrid'): return self.contains_all(other.meshgrid, atol=atol) elif is_valid_input_meshgrid(other, self.ndim): vecs = tuple(vec.squeeze() for vec in other) mins = np.fromiter((np.min(vec) for vec in vecs), dtype=float) maxs = np.fromiter((np.max(vec) for vec in vecs), dtype=float) return (np.all(mins >= self.min_pt - atol) and np.all(maxs <= self.max_pt + atol)) # Convert to array and check each element other = np.asarray(other) if is_valid_input_array(other, self.ndim): if self.ndim == 1: mins = np.min(other) maxs = np.max(other) else: mins = np.min(other, axis=1) maxs = np.max(other, axis=1) return np.all(mins >= self.min_pt) and np.all(maxs <= self.max_pt) else: return False
[ "def", "contains_all", "(", "self", ",", "other", ",", "atol", "=", "0.0", ")", ":", "atol", "=", "float", "(", "atol", ")", "# First try optimized methods", "if", "other", "in", "self", ":", "return", "True", "if", "hasattr", "(", "other", ",", "'meshgr...
Return ``True`` if all points defined by ``other`` are contained. Parameters ---------- other : Collection of points to be tested. Can be given as a single point, a ``(d, N)`` array-like where ``d`` is the number of dimensions, or a length-``d`` `meshgrid` tuple. atol : float, optional The maximum allowed distance in 'inf'-norm between the other set and this interval product. Returns ------- contains : bool ``True`` if all points are contained, ``False`` otherwise. Examples -------- >>> min_pt, max_pt = [-1, 0, 2], [-0.5, 0, 3] >>> rbox = IntervalProd(min_pt, max_pt) Arrays are expected in ``(ndim, npoints)`` shape: >>> arr = np.array([[-1, 0, 2], # defining one point at a time ... [-0.5, 0, 2]]) >>> rbox.contains_all(arr.T) True Implicit meshgrids defined by coordinate vectors: >>> from odl.discr.grid import sparse_meshgrid >>> vec1 = (-1, -0.9, -0.7) >>> vec2 = (0, 0, 0) >>> vec3 = (2.5, 2.75, 3) >>> mg = sparse_meshgrid(vec1, vec2, vec3) >>> rbox.contains_all(mg) True Works also with an arbitrary iterable: >>> rbox.contains_all([[-1, -0.5], # define points by axis ... [0, 0], ... [2, 2]]) True Grids are also accepted as input: >>> agrid = odl.uniform_grid(rbox.min_pt, rbox.max_pt, [3, 1, 3]) >>> rbox.contains_all(agrid) True
[ "Return", "True", "if", "all", "points", "defined", "by", "other", "are", "contained", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/set/domain.py#L327-L405
231,888
odlgroup/odl
odl/set/domain.py
IntervalProd.measure
def measure(self, ndim=None): """Return the Lebesgue measure of this interval product. Parameters ---------- ndim : int, optional Dimension of the measure to apply. ``None`` is interpreted as `true_ndim`, which always results in a finite and positive result (unless the set is a single point). Examples -------- >>> min_pt, max_pt = [-1, 2.5, 0], [-0.5, 10, 0] >>> rbox = IntervalProd(min_pt, max_pt) >>> rbox.measure() 3.75 >>> rbox.measure(ndim=3) 0.0 >>> rbox.measure(ndim=3) == rbox.volume True >>> rbox.measure(ndim=1) inf >>> rbox.measure() == rbox.squeeze().volume True """ if self.true_ndim == 0: return 0.0 if ndim is None: return self.measure(ndim=self.true_ndim) elif ndim < self.true_ndim: return np.inf elif ndim > self.true_ndim: return 0.0 else: return np.prod(self.extent[self.nondegen_byaxis])
python
def measure(self, ndim=None): if self.true_ndim == 0: return 0.0 if ndim is None: return self.measure(ndim=self.true_ndim) elif ndim < self.true_ndim: return np.inf elif ndim > self.true_ndim: return 0.0 else: return np.prod(self.extent[self.nondegen_byaxis])
[ "def", "measure", "(", "self", ",", "ndim", "=", "None", ")", ":", "if", "self", ".", "true_ndim", "==", "0", ":", "return", "0.0", "if", "ndim", "is", "None", ":", "return", "self", ".", "measure", "(", "ndim", "=", "self", ".", "true_ndim", ")", ...
Return the Lebesgue measure of this interval product. Parameters ---------- ndim : int, optional Dimension of the measure to apply. ``None`` is interpreted as `true_ndim`, which always results in a finite and positive result (unless the set is a single point). Examples -------- >>> min_pt, max_pt = [-1, 2.5, 0], [-0.5, 10, 0] >>> rbox = IntervalProd(min_pt, max_pt) >>> rbox.measure() 3.75 >>> rbox.measure(ndim=3) 0.0 >>> rbox.measure(ndim=3) == rbox.volume True >>> rbox.measure(ndim=1) inf >>> rbox.measure() == rbox.squeeze().volume True
[ "Return", "the", "Lebesgue", "measure", "of", "this", "interval", "product", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/set/domain.py#L407-L442
231,889
odlgroup/odl
odl/set/domain.py
IntervalProd.dist
def dist(self, point, exponent=2.0): """Return the distance of ``point`` to this set. Parameters ---------- point : `array-like` or float Point whose distance to calculate. Its length must be equal to the set's dimension. Can be a float in the 1d case. exponent : non-zero float or ``float('inf')``, optional Exponent of the norm used in the distance calculation. Returns ------- dist : float Distance to the interior of the IntervalProd. Points strictly inside have distance ``0.0``, points with ``NaN`` have distance ``float('inf')``. See Also -------- numpy.linalg.norm : norm used to compute the distance Examples -------- >>> min_pt, max_pt = [-1, 0, 2], [-0.5, 0, 3] >>> rbox = IntervalProd(min_pt, max_pt) >>> rbox.dist([-5, 3, 2]) 5.0 >>> rbox.dist([-5, 3, 2], exponent=float('inf')) 4.0 """ point = np.atleast_1d(point) if len(point) != self.ndim: raise ValueError('`point` must have length {}, got {}' ''.format(self.ndim, len(point))) if np.any(np.isnan(point)): return float('inf') i_larger = np.where(point > self.max_pt) i_smaller = np.where(point < self.min_pt) # Access [0] since np.where returns a tuple. if len(i_larger[0]) == 0 and len(i_smaller[0]) == 0: return 0.0 else: proj = np.concatenate((point[i_larger], point[i_smaller])) border = np.concatenate((self.max_pt[i_larger], self.min_pt[i_smaller])) return np.linalg.norm(proj - border, ord=exponent)
python
def dist(self, point, exponent=2.0): point = np.atleast_1d(point) if len(point) != self.ndim: raise ValueError('`point` must have length {}, got {}' ''.format(self.ndim, len(point))) if np.any(np.isnan(point)): return float('inf') i_larger = np.where(point > self.max_pt) i_smaller = np.where(point < self.min_pt) # Access [0] since np.where returns a tuple. if len(i_larger[0]) == 0 and len(i_smaller[0]) == 0: return 0.0 else: proj = np.concatenate((point[i_larger], point[i_smaller])) border = np.concatenate((self.max_pt[i_larger], self.min_pt[i_smaller])) return np.linalg.norm(proj - border, ord=exponent)
[ "def", "dist", "(", "self", ",", "point", ",", "exponent", "=", "2.0", ")", ":", "point", "=", "np", ".", "atleast_1d", "(", "point", ")", "if", "len", "(", "point", ")", "!=", "self", ".", "ndim", ":", "raise", "ValueError", "(", "'`point` must have...
Return the distance of ``point`` to this set. Parameters ---------- point : `array-like` or float Point whose distance to calculate. Its length must be equal to the set's dimension. Can be a float in the 1d case. exponent : non-zero float or ``float('inf')``, optional Exponent of the norm used in the distance calculation. Returns ------- dist : float Distance to the interior of the IntervalProd. Points strictly inside have distance ``0.0``, points with ``NaN`` have distance ``float('inf')``. See Also -------- numpy.linalg.norm : norm used to compute the distance Examples -------- >>> min_pt, max_pt = [-1, 0, 2], [-0.5, 0, 3] >>> rbox = IntervalProd(min_pt, max_pt) >>> rbox.dist([-5, 3, 2]) 5.0 >>> rbox.dist([-5, 3, 2], exponent=float('inf')) 4.0
[ "Return", "the", "distance", "of", "point", "to", "this", "set", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/set/domain.py#L444-L493
231,890
odlgroup/odl
odl/set/domain.py
IntervalProd.collapse
def collapse(self, indices, values): """Partly collapse the interval product to single values. Note that no changes are made in-place. Parameters ---------- indices : int or sequence of ints The indices of the dimensions along which to collapse. values : `array-like` or float The values to which to collapse. Must have the same length as ``indices``. Values must lie within the interval boundaries. Returns ------- collapsed : `IntervalProd` The collapsed set. Examples -------- >>> min_pt, max_pt = [-1, 0, 2], [-0.5, 1, 3] >>> rbox = IntervalProd(min_pt, max_pt) >>> rbox.collapse(1, 0) IntervalProd([-1., 0., 2.], [-0.5, 0. , 3. ]) >>> rbox.collapse([1, 2], [0, 2.5]) IntervalProd([-1. , 0. , 2.5], [-0.5, 0. , 2.5]) """ indices = np.atleast_1d(indices).astype('int64', casting='safe') values = np.atleast_1d(values) if len(indices) != len(values): raise ValueError('lengths of indices {} and values {} do not ' 'match ({} != {})' ''.format(indices, values, len(indices), len(values))) for axis, index in enumerate(indices): if not 0 <= index <= self.ndim: raise IndexError('in axis {}: index {} out of range 0 --> {}' ''.format(axis, index, self.ndim - 1)) if np.any(values < self.min_pt[indices]): raise ValueError('values {} not above the lower interval ' 'boundaries {}' ''.format(values, self.min_pt[indices])) if np.any(values > self.max_pt[indices]): raise ValueError('values {} not below the upper interval ' 'boundaries {}' ''.format(values, self.max_pt[indices])) b_new = self.min_pt.copy() b_new[indices] = values e_new = self.max_pt.copy() e_new[indices] = values return IntervalProd(b_new, e_new)
python
def collapse(self, indices, values): indices = np.atleast_1d(indices).astype('int64', casting='safe') values = np.atleast_1d(values) if len(indices) != len(values): raise ValueError('lengths of indices {} and values {} do not ' 'match ({} != {})' ''.format(indices, values, len(indices), len(values))) for axis, index in enumerate(indices): if not 0 <= index <= self.ndim: raise IndexError('in axis {}: index {} out of range 0 --> {}' ''.format(axis, index, self.ndim - 1)) if np.any(values < self.min_pt[indices]): raise ValueError('values {} not above the lower interval ' 'boundaries {}' ''.format(values, self.min_pt[indices])) if np.any(values > self.max_pt[indices]): raise ValueError('values {} not below the upper interval ' 'boundaries {}' ''.format(values, self.max_pt[indices])) b_new = self.min_pt.copy() b_new[indices] = values e_new = self.max_pt.copy() e_new[indices] = values return IntervalProd(b_new, e_new)
[ "def", "collapse", "(", "self", ",", "indices", ",", "values", ")", ":", "indices", "=", "np", ".", "atleast_1d", "(", "indices", ")", ".", "astype", "(", "'int64'", ",", "casting", "=", "'safe'", ")", "values", "=", "np", ".", "atleast_1d", "(", "va...
Partly collapse the interval product to single values. Note that no changes are made in-place. Parameters ---------- indices : int or sequence of ints The indices of the dimensions along which to collapse. values : `array-like` or float The values to which to collapse. Must have the same length as ``indices``. Values must lie within the interval boundaries. Returns ------- collapsed : `IntervalProd` The collapsed set. Examples -------- >>> min_pt, max_pt = [-1, 0, 2], [-0.5, 1, 3] >>> rbox = IntervalProd(min_pt, max_pt) >>> rbox.collapse(1, 0) IntervalProd([-1., 0., 2.], [-0.5, 0. , 3. ]) >>> rbox.collapse([1, 2], [0, 2.5]) IntervalProd([-1. , 0. , 2.5], [-0.5, 0. , 2.5])
[ "Partly", "collapse", "the", "interval", "product", "to", "single", "values", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/set/domain.py#L495-L551
231,891
odlgroup/odl
odl/set/domain.py
IntervalProd.squeeze
def squeeze(self): """Remove the degenerate dimensions. Note that no changes are made in-place. Returns ------- squeezed : `IntervalProd` Squeezed set. Examples -------- >>> min_pt, max_pt = [-1, 0, 2], [-0.5, 1, 3] >>> rbox = IntervalProd(min_pt, max_pt) >>> rbox.collapse(1, 0).squeeze() IntervalProd([-1., 2.], [-0.5, 3. ]) >>> rbox.collapse([1, 2], [0, 2.5]).squeeze() IntervalProd(-1.0, -0.5) >>> rbox.collapse([0, 1, 2], [-1, 0, 2.5]).squeeze() IntervalProd([], []) """ b_new = self.min_pt[self.nondegen_byaxis] e_new = self.max_pt[self.nondegen_byaxis] return IntervalProd(b_new, e_new)
python
def squeeze(self): b_new = self.min_pt[self.nondegen_byaxis] e_new = self.max_pt[self.nondegen_byaxis] return IntervalProd(b_new, e_new)
[ "def", "squeeze", "(", "self", ")", ":", "b_new", "=", "self", ".", "min_pt", "[", "self", ".", "nondegen_byaxis", "]", "e_new", "=", "self", ".", "max_pt", "[", "self", ".", "nondegen_byaxis", "]", "return", "IntervalProd", "(", "b_new", ",", "e_new", ...
Remove the degenerate dimensions. Note that no changes are made in-place. Returns ------- squeezed : `IntervalProd` Squeezed set. Examples -------- >>> min_pt, max_pt = [-1, 0, 2], [-0.5, 1, 3] >>> rbox = IntervalProd(min_pt, max_pt) >>> rbox.collapse(1, 0).squeeze() IntervalProd([-1., 2.], [-0.5, 3. ]) >>> rbox.collapse([1, 2], [0, 2.5]).squeeze() IntervalProd(-1.0, -0.5) >>> rbox.collapse([0, 1, 2], [-1, 0, 2.5]).squeeze() IntervalProd([], [])
[ "Remove", "the", "degenerate", "dimensions", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/set/domain.py#L553-L576
231,892
odlgroup/odl
odl/set/domain.py
IntervalProd.insert
def insert(self, index, *intvs): """Return a copy with ``intvs`` inserted before ``index``. The given interval products are inserted (as a block) into ``self``, yielding a new interval product whose number of dimensions is the sum of the numbers of dimensions of all involved interval products. Note that no changes are made in-place. Parameters ---------- index : int Index of the dimension before which ``other`` is to be inserted. Must fulfill ``-ndim <= index <= ndim``. Negative indices count backwards from ``self.ndim``. intv1, ..., intvN : `IntervalProd` Interval products to be inserted into ``self``. Returns ------- newintvp : `IntervalProd` The enlarged interval product. Examples -------- >>> intv = IntervalProd([-1, 2], [-0.5, 3]) >>> intv2 = IntervalProd(0, 1) >>> intv.insert(0, intv2) IntervalProd([ 0., -1., 2.], [ 1. , -0.5, 3. ]) >>> intv.insert(-1, intv2) IntervalProd([-1., 0., 2.], [-0.5, 1. , 3. ]) >>> intv.insert(1, intv2, intv2) IntervalProd([-1., 0., 0., 2.], [-0.5, 1. , 1. , 3. ]) """ index, index_in = safe_int_conv(index), index if not -self.ndim <= index <= self.ndim: raise IndexError('index {0} outside the valid range -{1} ... {1}' ''.format(index_in, self.ndim)) if index < 0: index += self.ndim if len(intvs) == 0: # Copy of `self` return IntervalProd(self.min_pt, self.max_pt) elif len(intvs) == 1: # Insert single interval product intv = intvs[0] if not isinstance(intv, IntervalProd): raise TypeError('{!r} is not a `IntervalProd` instance' ''.format(intv)) new_min_pt = np.empty(self.ndim + intv.ndim) new_max_pt = np.empty(self.ndim + intv.ndim) new_min_pt[: index] = self.min_pt[: index] new_max_pt[: index] = self.max_pt[: index] new_min_pt[index: index + intv.ndim] = intv.min_pt new_max_pt[index: index + intv.ndim] = intv.max_pt if index < self.ndim: # Avoid IndexError new_min_pt[index + intv.ndim:] = self.min_pt[index:] new_max_pt[index + intv.ndim:] = self.max_pt[index:] return IntervalProd(new_min_pt, new_max_pt) else: # Recursively insert one, then rest into the result return self.insert(index, intvs[0]).insert( index + intvs[0].ndim, *(intvs[1:]))
python
def insert(self, index, *intvs): index, index_in = safe_int_conv(index), index if not -self.ndim <= index <= self.ndim: raise IndexError('index {0} outside the valid range -{1} ... {1}' ''.format(index_in, self.ndim)) if index < 0: index += self.ndim if len(intvs) == 0: # Copy of `self` return IntervalProd(self.min_pt, self.max_pt) elif len(intvs) == 1: # Insert single interval product intv = intvs[0] if not isinstance(intv, IntervalProd): raise TypeError('{!r} is not a `IntervalProd` instance' ''.format(intv)) new_min_pt = np.empty(self.ndim + intv.ndim) new_max_pt = np.empty(self.ndim + intv.ndim) new_min_pt[: index] = self.min_pt[: index] new_max_pt[: index] = self.max_pt[: index] new_min_pt[index: index + intv.ndim] = intv.min_pt new_max_pt[index: index + intv.ndim] = intv.max_pt if index < self.ndim: # Avoid IndexError new_min_pt[index + intv.ndim:] = self.min_pt[index:] new_max_pt[index + intv.ndim:] = self.max_pt[index:] return IntervalProd(new_min_pt, new_max_pt) else: # Recursively insert one, then rest into the result return self.insert(index, intvs[0]).insert( index + intvs[0].ndim, *(intvs[1:]))
[ "def", "insert", "(", "self", ",", "index", ",", "*", "intvs", ")", ":", "index", ",", "index_in", "=", "safe_int_conv", "(", "index", ")", ",", "index", "if", "not", "-", "self", ".", "ndim", "<=", "index", "<=", "self", ".", "ndim", ":", "raise",...
Return a copy with ``intvs`` inserted before ``index``. The given interval products are inserted (as a block) into ``self``, yielding a new interval product whose number of dimensions is the sum of the numbers of dimensions of all involved interval products. Note that no changes are made in-place. Parameters ---------- index : int Index of the dimension before which ``other`` is to be inserted. Must fulfill ``-ndim <= index <= ndim``. Negative indices count backwards from ``self.ndim``. intv1, ..., intvN : `IntervalProd` Interval products to be inserted into ``self``. Returns ------- newintvp : `IntervalProd` The enlarged interval product. Examples -------- >>> intv = IntervalProd([-1, 2], [-0.5, 3]) >>> intv2 = IntervalProd(0, 1) >>> intv.insert(0, intv2) IntervalProd([ 0., -1., 2.], [ 1. , -0.5, 3. ]) >>> intv.insert(-1, intv2) IntervalProd([-1., 0., 2.], [-0.5, 1. , 3. ]) >>> intv.insert(1, intv2, intv2) IntervalProd([-1., 0., 0., 2.], [-0.5, 1. , 1. , 3. ])
[ "Return", "a", "copy", "with", "intvs", "inserted", "before", "index", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/set/domain.py#L578-L645
231,893
odlgroup/odl
odl/set/domain.py
IntervalProd.corners
def corners(self, order='C'): """Return the corner points as a single array. Parameters ---------- order : {'C', 'F'}, optional Ordering of the axes in which the corners appear in the output. ``'C'`` means that the first axis varies slowest and the last one fastest, vice versa in ``'F'`` ordering. Returns ------- corners : `numpy.ndarray` Array containing the corner coordinates. The size of the array is ``2^m x ndim``, where ``m`` is the number of non-degenerate axes, i.e. the corners are stored as rows. Examples -------- >>> intv = IntervalProd([-1, 2, 0], [-0.5, 3, 0.5]) >>> intv.corners() array([[-1. , 2. , 0. ], [-1. , 2. , 0.5], [-1. , 3. , 0. ], [-1. , 3. , 0.5], [-0.5, 2. , 0. ], [-0.5, 2. , 0.5], [-0.5, 3. , 0. ], [-0.5, 3. , 0.5]]) >>> intv.corners(order='F') array([[-1. , 2. , 0. ], [-0.5, 2. , 0. ], [-1. , 3. , 0. ], [-0.5, 3. , 0. ], [-1. , 2. , 0.5], [-0.5, 2. , 0.5], [-1. , 3. , 0.5], [-0.5, 3. , 0.5]]) """ from odl.discr.grid import RectGrid minmax_vecs = [0] * self.ndim for axis in np.where(~self.nondegen_byaxis)[0]: minmax_vecs[axis] = self.min_pt[axis] for axis in np.where(self.nondegen_byaxis)[0]: minmax_vecs[axis] = (self.min_pt[axis], self.max_pt[axis]) minmax_grid = RectGrid(*minmax_vecs) return minmax_grid.points(order=order)
python
def corners(self, order='C'): from odl.discr.grid import RectGrid minmax_vecs = [0] * self.ndim for axis in np.where(~self.nondegen_byaxis)[0]: minmax_vecs[axis] = self.min_pt[axis] for axis in np.where(self.nondegen_byaxis)[0]: minmax_vecs[axis] = (self.min_pt[axis], self.max_pt[axis]) minmax_grid = RectGrid(*minmax_vecs) return minmax_grid.points(order=order)
[ "def", "corners", "(", "self", ",", "order", "=", "'C'", ")", ":", "from", "odl", ".", "discr", ".", "grid", "import", "RectGrid", "minmax_vecs", "=", "[", "0", "]", "*", "self", ".", "ndim", "for", "axis", "in", "np", ".", "where", "(", "~", "se...
Return the corner points as a single array. Parameters ---------- order : {'C', 'F'}, optional Ordering of the axes in which the corners appear in the output. ``'C'`` means that the first axis varies slowest and the last one fastest, vice versa in ``'F'`` ordering. Returns ------- corners : `numpy.ndarray` Array containing the corner coordinates. The size of the array is ``2^m x ndim``, where ``m`` is the number of non-degenerate axes, i.e. the corners are stored as rows. Examples -------- >>> intv = IntervalProd([-1, 2, 0], [-0.5, 3, 0.5]) >>> intv.corners() array([[-1. , 2. , 0. ], [-1. , 2. , 0.5], [-1. , 3. , 0. ], [-1. , 3. , 0.5], [-0.5, 2. , 0. ], [-0.5, 2. , 0.5], [-0.5, 3. , 0. ], [-0.5, 3. , 0.5]]) >>> intv.corners(order='F') array([[-1. , 2. , 0. ], [-0.5, 2. , 0. ], [-1. , 3. , 0. ], [-0.5, 3. , 0. ], [-1. , 2. , 0.5], [-0.5, 2. , 0.5], [-1. , 3. , 0.5], [-0.5, 3. , 0.5]])
[ "Return", "the", "corner", "points", "as", "a", "single", "array", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/set/domain.py#L675-L723
231,894
odlgroup/odl
odl/tomo/operators/ray_trafo.py
RayTransform._call_real
def _call_real(self, x_real, out_real): """Real-space forward projection for the current set-up. This method also sets ``self._astra_projector`` for ``impl='astra_cuda'`` and enabled cache. """ if self.impl.startswith('astra'): backend, data_impl = self.impl.split('_') if data_impl == 'cpu': return astra_cpu_forward_projector( x_real, self.geometry, self.range.real_space, out_real) elif data_impl == 'cuda': if self._astra_wrapper is None: astra_wrapper = AstraCudaProjectorImpl( self.geometry, self.domain.real_space, self.range.real_space) if self.use_cache: self._astra_wrapper = astra_wrapper else: astra_wrapper = self._astra_wrapper return astra_wrapper.call_forward(x_real, out_real) else: # Should never happen raise RuntimeError('bad `impl` {!r}'.format(self.impl)) elif self.impl == 'skimage': return skimage_radon_forward(x_real, self.geometry, self.range.real_space, out_real) else: # Should never happen raise RuntimeError('bad `impl` {!r}'.format(self.impl))
python
def _call_real(self, x_real, out_real): if self.impl.startswith('astra'): backend, data_impl = self.impl.split('_') if data_impl == 'cpu': return astra_cpu_forward_projector( x_real, self.geometry, self.range.real_space, out_real) elif data_impl == 'cuda': if self._astra_wrapper is None: astra_wrapper = AstraCudaProjectorImpl( self.geometry, self.domain.real_space, self.range.real_space) if self.use_cache: self._astra_wrapper = astra_wrapper else: astra_wrapper = self._astra_wrapper return astra_wrapper.call_forward(x_real, out_real) else: # Should never happen raise RuntimeError('bad `impl` {!r}'.format(self.impl)) elif self.impl == 'skimage': return skimage_radon_forward(x_real, self.geometry, self.range.real_space, out_real) else: # Should never happen raise RuntimeError('bad `impl` {!r}'.format(self.impl))
[ "def", "_call_real", "(", "self", ",", "x_real", ",", "out_real", ")", ":", "if", "self", ".", "impl", ".", "startswith", "(", "'astra'", ")", ":", "backend", ",", "data_impl", "=", "self", ".", "impl", ".", "split", "(", "'_'", ")", "if", "data_impl...
Real-space forward projection for the current set-up. This method also sets ``self._astra_projector`` for ``impl='astra_cuda'`` and enabled cache.
[ "Real", "-", "space", "forward", "projection", "for", "the", "current", "set", "-", "up", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/operators/ray_trafo.py#L385-L417
231,895
odlgroup/odl
odl/tomo/operators/ray_trafo.py
RayBackProjection._call_real
def _call_real(self, x_real, out_real): """Real-space back-projection for the current set-up. This method also sets ``self._astra_backprojector`` for ``impl='astra_cuda'`` and enabled cache. """ if self.impl.startswith('astra'): backend, data_impl = self.impl.split('_') if data_impl == 'cpu': return astra_cpu_back_projector(x_real, self.geometry, self.range.real_space, out_real) elif data_impl == 'cuda': if self._astra_wrapper is None: astra_wrapper = AstraCudaBackProjectorImpl( self.geometry, self.range.real_space, self.domain.real_space) if self.use_cache: self._astra_wrapper = astra_wrapper else: astra_wrapper = self._astra_wrapper return astra_wrapper.call_backward(x_real, out_real) else: # Should never happen raise RuntimeError('bad `impl` {!r}'.format(self.impl)) elif self.impl == 'skimage': return skimage_radon_back_projector(x_real, self.geometry, self.range.real_space, out_real) else: # Should never happen raise RuntimeError('bad `impl` {!r}'.format(self.impl))
python
def _call_real(self, x_real, out_real): if self.impl.startswith('astra'): backend, data_impl = self.impl.split('_') if data_impl == 'cpu': return astra_cpu_back_projector(x_real, self.geometry, self.range.real_space, out_real) elif data_impl == 'cuda': if self._astra_wrapper is None: astra_wrapper = AstraCudaBackProjectorImpl( self.geometry, self.range.real_space, self.domain.real_space) if self.use_cache: self._astra_wrapper = astra_wrapper else: astra_wrapper = self._astra_wrapper return astra_wrapper.call_backward(x_real, out_real) else: # Should never happen raise RuntimeError('bad `impl` {!r}'.format(self.impl)) elif self.impl == 'skimage': return skimage_radon_back_projector(x_real, self.geometry, self.range.real_space, out_real) else: # Should never happen raise RuntimeError('bad `impl` {!r}'.format(self.impl))
[ "def", "_call_real", "(", "self", ",", "x_real", ",", "out_real", ")", ":", "if", "self", ".", "impl", ".", "startswith", "(", "'astra'", ")", ":", "backend", ",", "data_impl", "=", "self", ".", "impl", ".", "split", "(", "'_'", ")", "if", "data_impl...
Real-space back-projection for the current set-up. This method also sets ``self._astra_backprojector`` for ``impl='astra_cuda'`` and enabled cache.
[ "Real", "-", "space", "back", "-", "projection", "for", "the", "current", "set", "-", "up", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/operators/ray_trafo.py#L493-L526
231,896
odlgroup/odl
odl/solvers/iterative/statistical.py
mlem
def mlem(op, x, data, niter, callback=None, **kwargs): """Maximum Likelihood Expectation Maximation algorithm. Attempts to solve:: max_x L(x | data) where ``L(x | data)`` is the Poisson likelihood of ``x`` given ``data``. The likelihood depends on the forward operator ``op`` such that (approximately):: op(x) = data Parameters ---------- op : `Operator` Forward operator in the inverse problem. x : ``op.domain`` element Vector to which the result is written. Its initial value is used as starting point of the iteration, and its values are updated in each iteration step. The initial value of ``x`` should be non-negative. data : ``op.range`` `element-like` Right-hand side of the equation defining the inverse problem. niter : int Number of iterations. callback : callable, optional Function called with the current iterate after each iteration. Other Parameters ---------------- sensitivities : float or ``op.domain`` `element-like`, optional The algorithm contains a ``A^T 1`` term, if this parameter is given, it is replaced by it. Default: ``op.adjoint(op.range.one())`` Notes ----- Given a forward model :math:`A` and data :math:`g`, the algorithm attempts to find an :math:`x` that maximizes: .. math:: P(g | g \text{ is } Poisson(A(x)) \text{ distributed}). The algorithm is explicitly given by: .. math:: x_{n+1} = \frac{x_n}{A^* 1} A^* (g / A(x_n)) See Also -------- osmlem : Ordered subsets MLEM loglikelihood : Function for calculating the logarithm of the likelihood """ osmlem([op], x, [data], niter=niter, callback=callback, **kwargs)
python
def mlem(op, x, data, niter, callback=None, **kwargs): osmlem([op], x, [data], niter=niter, callback=callback, **kwargs)
[ "def", "mlem", "(", "op", ",", "x", ",", "data", ",", "niter", ",", "callback", "=", "None", ",", "*", "*", "kwargs", ")", ":", "osmlem", "(", "[", "op", "]", ",", "x", ",", "[", "data", "]", ",", "niter", "=", "niter", ",", "callback", "=", ...
Maximum Likelihood Expectation Maximation algorithm. Attempts to solve:: max_x L(x | data) where ``L(x | data)`` is the Poisson likelihood of ``x`` given ``data``. The likelihood depends on the forward operator ``op`` such that (approximately):: op(x) = data Parameters ---------- op : `Operator` Forward operator in the inverse problem. x : ``op.domain`` element Vector to which the result is written. Its initial value is used as starting point of the iteration, and its values are updated in each iteration step. The initial value of ``x`` should be non-negative. data : ``op.range`` `element-like` Right-hand side of the equation defining the inverse problem. niter : int Number of iterations. callback : callable, optional Function called with the current iterate after each iteration. Other Parameters ---------------- sensitivities : float or ``op.domain`` `element-like`, optional The algorithm contains a ``A^T 1`` term, if this parameter is given, it is replaced by it. Default: ``op.adjoint(op.range.one())`` Notes ----- Given a forward model :math:`A` and data :math:`g`, the algorithm attempts to find an :math:`x` that maximizes: .. math:: P(g | g \text{ is } Poisson(A(x)) \text{ distributed}). The algorithm is explicitly given by: .. math:: x_{n+1} = \frac{x_n}{A^* 1} A^* (g / A(x_n)) See Also -------- osmlem : Ordered subsets MLEM loglikelihood : Function for calculating the logarithm of the likelihood
[ "Maximum", "Likelihood", "Expectation", "Maximation", "algorithm", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/iterative/statistical.py#L17-L74
231,897
odlgroup/odl
odl/solvers/iterative/statistical.py
osmlem
def osmlem(op, x, data, niter, callback=None, **kwargs): r"""Ordered Subsets Maximum Likelihood Expectation Maximation algorithm. This solver attempts to solve:: max_x L(x | data) where ``L(x, | data)`` is the likelihood of ``x`` given ``data``. The likelihood depends on the forward operators ``op[0], ..., op[n-1]`` such that (approximately):: op[i](x) = data[i] Parameters ---------- op : sequence of `Operator` Forward operators in the inverse problem. x : ``op.domain`` element Vector to which the result is written. Its initial value is used as starting point of the iteration, and its values are updated in each iteration step. The initial value of ``x`` should be non-negative. data : sequence of ``op.range`` `element-like` Right-hand sides of the equation defining the inverse problem. niter : int Number of iterations. callback : callable, optional Function called with the current iterate after each iteration. Other Parameters ---------------- sensitivities : float or ``op.domain`` `element-like`, optional The algorithm contains an ``A^T 1`` term, if this parameter is given, it is replaced by it. Default: ``op[i].adjoint(op[i].range.one())`` Notes ----- Given forward models :math:`A_i`, and data :math:`g_i`, :math:`i = 1, ..., M`, the algorithm attempts to find an :math:`x` that maximizes: .. math:: \prod_{i=1}^M P(g_i | g_i \text{ is } Poisson(A_i(x)) \text{ distributed}). The algorithm is explicitly given by partial updates: .. math:: x_{n + m/M} = \frac{x_{n + (m - 1)/M}}{A_i^* 1} A_i^* (g_i / A_i(x_{n + (m - 1)/M})) for :math:`m = 1, ..., M` and :math:`x_{n+1} = x_{n + M/M}`. The algorithm is not guaranteed to converge, but works for many practical problems. References ---------- Natterer, F. Mathematical Methods in Image Reconstruction, section 5.3.2. See Also -------- mlem : Ordinary MLEM algorithm without subsets. loglikelihood : Function for calculating the logarithm of the likelihood """ n_ops = len(op) if len(data) != n_ops: raise ValueError('number of data ({}) does not match number of ' 'operators ({})'.format(len(data), n_ops)) if not all(x in opi.domain for opi in op): raise ValueError('`x` not an element in the domains of all operators') # Convert data to range elements data = [op[i].range.element(data[i]) for i in range(len(op))] # Parameter used to enforce positivity. # TODO: let users give this. eps = 1e-8 if np.any(np.less(x, 0)): raise ValueError('`x` must be non-negative') # Extract the sensitivites parameter sensitivities = kwargs.pop('sensitivities', None) if sensitivities is None: sensitivities = [np.maximum(opi.adjoint(opi.range.one()), eps) for opi in op] else: # Make sure the sensitivities is a list of the correct size. try: list(sensitivities) except TypeError: sensitivities = [sensitivities] * n_ops tmp_dom = op[0].domain.element() tmp_ran = [opi.range.element() for opi in op] for _ in range(niter): for i in range(n_ops): op[i](x, out=tmp_ran[i]) tmp_ran[i].ufuncs.maximum(eps, out=tmp_ran[i]) data[i].divide(tmp_ran[i], out=tmp_ran[i]) op[i].adjoint(tmp_ran[i], out=tmp_dom) tmp_dom /= sensitivities[i] x *= tmp_dom if callback is not None: callback(x)
python
def osmlem(op, x, data, niter, callback=None, **kwargs): r"""Ordered Subsets Maximum Likelihood Expectation Maximation algorithm. This solver attempts to solve:: max_x L(x | data) where ``L(x, | data)`` is the likelihood of ``x`` given ``data``. The likelihood depends on the forward operators ``op[0], ..., op[n-1]`` such that (approximately):: op[i](x) = data[i] Parameters ---------- op : sequence of `Operator` Forward operators in the inverse problem. x : ``op.domain`` element Vector to which the result is written. Its initial value is used as starting point of the iteration, and its values are updated in each iteration step. The initial value of ``x`` should be non-negative. data : sequence of ``op.range`` `element-like` Right-hand sides of the equation defining the inverse problem. niter : int Number of iterations. callback : callable, optional Function called with the current iterate after each iteration. Other Parameters ---------------- sensitivities : float or ``op.domain`` `element-like`, optional The algorithm contains an ``A^T 1`` term, if this parameter is given, it is replaced by it. Default: ``op[i].adjoint(op[i].range.one())`` Notes ----- Given forward models :math:`A_i`, and data :math:`g_i`, :math:`i = 1, ..., M`, the algorithm attempts to find an :math:`x` that maximizes: .. math:: \prod_{i=1}^M P(g_i | g_i \text{ is } Poisson(A_i(x)) \text{ distributed}). The algorithm is explicitly given by partial updates: .. math:: x_{n + m/M} = \frac{x_{n + (m - 1)/M}}{A_i^* 1} A_i^* (g_i / A_i(x_{n + (m - 1)/M})) for :math:`m = 1, ..., M` and :math:`x_{n+1} = x_{n + M/M}`. The algorithm is not guaranteed to converge, but works for many practical problems. References ---------- Natterer, F. Mathematical Methods in Image Reconstruction, section 5.3.2. See Also -------- mlem : Ordinary MLEM algorithm without subsets. loglikelihood : Function for calculating the logarithm of the likelihood """ n_ops = len(op) if len(data) != n_ops: raise ValueError('number of data ({}) does not match number of ' 'operators ({})'.format(len(data), n_ops)) if not all(x in opi.domain for opi in op): raise ValueError('`x` not an element in the domains of all operators') # Convert data to range elements data = [op[i].range.element(data[i]) for i in range(len(op))] # Parameter used to enforce positivity. # TODO: let users give this. eps = 1e-8 if np.any(np.less(x, 0)): raise ValueError('`x` must be non-negative') # Extract the sensitivites parameter sensitivities = kwargs.pop('sensitivities', None) if sensitivities is None: sensitivities = [np.maximum(opi.adjoint(opi.range.one()), eps) for opi in op] else: # Make sure the sensitivities is a list of the correct size. try: list(sensitivities) except TypeError: sensitivities = [sensitivities] * n_ops tmp_dom = op[0].domain.element() tmp_ran = [opi.range.element() for opi in op] for _ in range(niter): for i in range(n_ops): op[i](x, out=tmp_ran[i]) tmp_ran[i].ufuncs.maximum(eps, out=tmp_ran[i]) data[i].divide(tmp_ran[i], out=tmp_ran[i]) op[i].adjoint(tmp_ran[i], out=tmp_dom) tmp_dom /= sensitivities[i] x *= tmp_dom if callback is not None: callback(x)
[ "def", "osmlem", "(", "op", ",", "x", ",", "data", ",", "niter", ",", "callback", "=", "None", ",", "*", "*", "kwargs", ")", ":", "n_ops", "=", "len", "(", "op", ")", "if", "len", "(", "data", ")", "!=", "n_ops", ":", "raise", "ValueError", "("...
r"""Ordered Subsets Maximum Likelihood Expectation Maximation algorithm. This solver attempts to solve:: max_x L(x | data) where ``L(x, | data)`` is the likelihood of ``x`` given ``data``. The likelihood depends on the forward operators ``op[0], ..., op[n-1]`` such that (approximately):: op[i](x) = data[i] Parameters ---------- op : sequence of `Operator` Forward operators in the inverse problem. x : ``op.domain`` element Vector to which the result is written. Its initial value is used as starting point of the iteration, and its values are updated in each iteration step. The initial value of ``x`` should be non-negative. data : sequence of ``op.range`` `element-like` Right-hand sides of the equation defining the inverse problem. niter : int Number of iterations. callback : callable, optional Function called with the current iterate after each iteration. Other Parameters ---------------- sensitivities : float or ``op.domain`` `element-like`, optional The algorithm contains an ``A^T 1`` term, if this parameter is given, it is replaced by it. Default: ``op[i].adjoint(op[i].range.one())`` Notes ----- Given forward models :math:`A_i`, and data :math:`g_i`, :math:`i = 1, ..., M`, the algorithm attempts to find an :math:`x` that maximizes: .. math:: \prod_{i=1}^M P(g_i | g_i \text{ is } Poisson(A_i(x)) \text{ distributed}). The algorithm is explicitly given by partial updates: .. math:: x_{n + m/M} = \frac{x_{n + (m - 1)/M}}{A_i^* 1} A_i^* (g_i / A_i(x_{n + (m - 1)/M})) for :math:`m = 1, ..., M` and :math:`x_{n+1} = x_{n + M/M}`. The algorithm is not guaranteed to converge, but works for many practical problems. References ---------- Natterer, F. Mathematical Methods in Image Reconstruction, section 5.3.2. See Also -------- mlem : Ordinary MLEM algorithm without subsets. loglikelihood : Function for calculating the logarithm of the likelihood
[ "r", "Ordered", "Subsets", "Maximum", "Likelihood", "Expectation", "Maximation", "algorithm", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/iterative/statistical.py#L77-L189
231,898
odlgroup/odl
odl/solvers/iterative/statistical.py
poisson_log_likelihood
def poisson_log_likelihood(x, data): """Poisson log-likelihood of ``data`` given noise parametrized by ``x``. Parameters ---------- x : ``op.domain`` element Value to condition the log-likelihood on. data : ``op.range`` element Data whose log-likelihood given ``x`` shall be calculated. """ if np.any(np.less(x, 0)): raise ValueError('`x` must be non-negative') return np.sum(data * np.log(x + 1e-8) - x)
python
def poisson_log_likelihood(x, data): if np.any(np.less(x, 0)): raise ValueError('`x` must be non-negative') return np.sum(data * np.log(x + 1e-8) - x)
[ "def", "poisson_log_likelihood", "(", "x", ",", "data", ")", ":", "if", "np", ".", "any", "(", "np", ".", "less", "(", "x", ",", "0", ")", ")", ":", "raise", "ValueError", "(", "'`x` must be non-negative'", ")", "return", "np", ".", "sum", "(", "data...
Poisson log-likelihood of ``data`` given noise parametrized by ``x``. Parameters ---------- x : ``op.domain`` element Value to condition the log-likelihood on. data : ``op.range`` element Data whose log-likelihood given ``x`` shall be calculated.
[ "Poisson", "log", "-", "likelihood", "of", "data", "given", "noise", "parametrized", "by", "x", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/iterative/statistical.py#L192-L205
231,899
odlgroup/odl
odl/contrib/param_opt/examples/find_optimal_parameters.py
fom
def fom(reco, true_image): """Sobolev type FoM enforcing both gradient and absolute similarity.""" gradient = odl.Gradient(reco.space) return (gradient(reco - true_image).norm() + reco.space.dist(reco, true_image))
python
def fom(reco, true_image): gradient = odl.Gradient(reco.space) return (gradient(reco - true_image).norm() + reco.space.dist(reco, true_image))
[ "def", "fom", "(", "reco", ",", "true_image", ")", ":", "gradient", "=", "odl", ".", "Gradient", "(", "reco", ".", "space", ")", "return", "(", "gradient", "(", "reco", "-", "true_image", ")", ".", "norm", "(", ")", "+", "reco", ".", "space", ".", ...
Sobolev type FoM enforcing both gradient and absolute similarity.
[ "Sobolev", "type", "FoM", "enforcing", "both", "gradient", "and", "absolute", "similarity", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/param_opt/examples/find_optimal_parameters.py#L147-L151