id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
51
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
231,700
odlgroup/odl
odl/operator/operator.py
_default_call_out_of_place
def _default_call_out_of_place(op, x, **kwargs): """Default out-of-place evaluation. Parameters ---------- op : `Operator` Operator to call x : ``op.domain`` element Point in which to call the operator. kwargs: Optional arguments to the operator. Returns ------- out : `range` element An object in the operator range. The result of an operator evaluation. """ out = op.range.element() result = op._call_in_place(x, out, **kwargs) if result is not None and result is not out: raise ValueError('`op` returned a different value than `out`.' 'With in-place evaluation, the operator can ' 'only return nothing (`None`) or the `out` ' 'parameter.') return out
python
def _default_call_out_of_place(op, x, **kwargs): out = op.range.element() result = op._call_in_place(x, out, **kwargs) if result is not None and result is not out: raise ValueError('`op` returned a different value than `out`.' 'With in-place evaluation, the operator can ' 'only return nothing (`None`) or the `out` ' 'parameter.') return out
[ "def", "_default_call_out_of_place", "(", "op", ",", "x", ",", "*", "*", "kwargs", ")", ":", "out", "=", "op", ".", "range", ".", "element", "(", ")", "result", "=", "op", ".", "_call_in_place", "(", "x", ",", "out", ",", "*", "*", "kwargs", ")", ...
Default out-of-place evaluation. Parameters ---------- op : `Operator` Operator to call x : ``op.domain`` element Point in which to call the operator. kwargs: Optional arguments to the operator. Returns ------- out : `range` element An object in the operator range. The result of an operator evaluation.
[ "Default", "out", "-", "of", "-", "place", "evaluation", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/operator/operator.py#L31-L56
231,701
odlgroup/odl
odl/operator/operator.py
_function_signature
def _function_signature(func): """Return the signature of a callable as a string. Parameters ---------- func : callable Function whose signature to extract. Returns ------- sig : string Signature of the function. """ if sys.version_info.major > 2: # Python 3 already implements this functionality return func.__name__ + str(inspect.signature(func)) # In Python 2 we have to do it manually, unfortunately spec = inspect.getargspec(func) posargs = spec.args defaults = spec.defaults if spec.defaults is not None else [] varargs = spec.varargs kwargs = spec.keywords deflen = 0 if defaults is None else len(defaults) nodeflen = 0 if posargs is None else len(posargs) - deflen args = ['{}'.format(arg) for arg in posargs[:nodeflen]] args.extend('{}={}'.format(arg, dval) for arg, dval in zip(posargs[nodeflen:], defaults)) if varargs: args.append('*{}'.format(varargs)) if kwargs: args.append('**{}'.format(kwargs)) argstr = ', '.join(args) return '{}({})'.format(func.__name__, argstr)
python
def _function_signature(func): if sys.version_info.major > 2: # Python 3 already implements this functionality return func.__name__ + str(inspect.signature(func)) # In Python 2 we have to do it manually, unfortunately spec = inspect.getargspec(func) posargs = spec.args defaults = spec.defaults if spec.defaults is not None else [] varargs = spec.varargs kwargs = spec.keywords deflen = 0 if defaults is None else len(defaults) nodeflen = 0 if posargs is None else len(posargs) - deflen args = ['{}'.format(arg) for arg in posargs[:nodeflen]] args.extend('{}={}'.format(arg, dval) for arg, dval in zip(posargs[nodeflen:], defaults)) if varargs: args.append('*{}'.format(varargs)) if kwargs: args.append('**{}'.format(kwargs)) argstr = ', '.join(args) return '{}({})'.format(func.__name__, argstr)
[ "def", "_function_signature", "(", "func", ")", ":", "if", "sys", ".", "version_info", ".", "major", ">", "2", ":", "# Python 3 already implements this functionality", "return", "func", ".", "__name__", "+", "str", "(", "inspect", ".", "signature", "(", "func", ...
Return the signature of a callable as a string. Parameters ---------- func : callable Function whose signature to extract. Returns ------- sig : string Signature of the function.
[ "Return", "the", "signature", "of", "a", "callable", "as", "a", "string", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/operator/operator.py#L77-L113
231,702
odlgroup/odl
odl/operator/operator.py
Operator.norm
def norm(self, estimate=False, **kwargs): """Return the operator norm of this operator. If this operator is non-linear, this should be the Lipschitz constant. Parameters ---------- estimate : bool If true, estimate the operator norm. By default, it is estimated using `power_method_opnorm`, which is only applicable for linear operators. Subclasses are allowed to ignore this parameter if they can provide an exact value. Other Parameters ---------------- kwargs : If ``estimate`` is True, pass these arguments to the `power_method_opnorm` call. Returns ------- norm : float Examples -------- Some operators know their own operator norm and do not need an estimate >>> spc = odl.rn(3) >>> id = odl.IdentityOperator(spc) >>> id.norm(True) 1.0 For others, there is no closed form expression and an estimate is needed: >>> spc = odl.uniform_discr(0, 1, 3) >>> grad = odl.Gradient(spc) >>> opnorm = grad.norm(estimate=True) """ if not estimate: raise NotImplementedError('`Operator.norm()` not implemented, use ' '`Operator.norm(estimate=True)` to ' 'obtain an estimate.') else: norm = getattr(self, '__norm', None) if norm is not None: return norm else: from odl.operator.oputils import power_method_opnorm self.__norm = power_method_opnorm(self, **kwargs) return self.__norm
python
def norm(self, estimate=False, **kwargs): if not estimate: raise NotImplementedError('`Operator.norm()` not implemented, use ' '`Operator.norm(estimate=True)` to ' 'obtain an estimate.') else: norm = getattr(self, '__norm', None) if norm is not None: return norm else: from odl.operator.oputils import power_method_opnorm self.__norm = power_method_opnorm(self, **kwargs) return self.__norm
[ "def", "norm", "(", "self", ",", "estimate", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "not", "estimate", ":", "raise", "NotImplementedError", "(", "'`Operator.norm()` not implemented, use '", "'`Operator.norm(estimate=True)` to '", "'obtain an estimate.'",...
Return the operator norm of this operator. If this operator is non-linear, this should be the Lipschitz constant. Parameters ---------- estimate : bool If true, estimate the operator norm. By default, it is estimated using `power_method_opnorm`, which is only applicable for linear operators. Subclasses are allowed to ignore this parameter if they can provide an exact value. Other Parameters ---------------- kwargs : If ``estimate`` is True, pass these arguments to the `power_method_opnorm` call. Returns ------- norm : float Examples -------- Some operators know their own operator norm and do not need an estimate >>> spc = odl.rn(3) >>> id = odl.IdentityOperator(spc) >>> id.norm(True) 1.0 For others, there is no closed form expression and an estimate is needed: >>> spc = odl.uniform_discr(0, 1, 3) >>> grad = odl.Gradient(spc) >>> opnorm = grad.norm(estimate=True)
[ "Return", "the", "operator", "norm", "of", "this", "operator", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/operator/operator.py#L697-L748
231,703
odlgroup/odl
odl/operator/operator.py
OperatorSum.derivative
def derivative(self, x): """Return the operator derivative at ``x``. The derivative of a sum of two operators is equal to the sum of the derivatives. Parameters ---------- x : `domain` `element-like` Evaluation point of the derivative """ if self.is_linear: return self else: return OperatorSum(self.left.derivative(x), self.right.derivative(x), self.__tmp_dom, self.__tmp_ran)
python
def derivative(self, x): if self.is_linear: return self else: return OperatorSum(self.left.derivative(x), self.right.derivative(x), self.__tmp_dom, self.__tmp_ran)
[ "def", "derivative", "(", "self", ",", "x", ")", ":", "if", "self", ".", "is_linear", ":", "return", "self", "else", ":", "return", "OperatorSum", "(", "self", ".", "left", ".", "derivative", "(", "x", ")", ",", "self", ".", "right", ".", "derivative...
Return the operator derivative at ``x``. The derivative of a sum of two operators is equal to the sum of the derivatives. Parameters ---------- x : `domain` `element-like` Evaluation point of the derivative
[ "Return", "the", "operator", "derivative", "at", "x", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/operator/operator.py#L1162-L1178
231,704
odlgroup/odl
odl/operator/operator.py
OperatorComp.derivative
def derivative(self, x): """Return the operator derivative. The derivative of the operator composition follows the chain rule: ``OperatorComp(left, right).derivative(y) == OperatorComp(left.derivative(right(y)), right.derivative(y))`` Parameters ---------- x : `domain` `element-like` Evaluation point of the derivative. Needs to be usable as input for the ``right`` operator. """ if self.is_linear: return self else: if self.left.is_linear: left_deriv = self.left else: left_deriv = self.left.derivative(self.right(x)) right_deriv = self.right.derivative(x) return OperatorComp(left_deriv, right_deriv, self.__tmp)
python
def derivative(self, x): if self.is_linear: return self else: if self.left.is_linear: left_deriv = self.left else: left_deriv = self.left.derivative(self.right(x)) right_deriv = self.right.derivative(x) return OperatorComp(left_deriv, right_deriv, self.__tmp)
[ "def", "derivative", "(", "self", ",", "x", ")", ":", "if", "self", ".", "is_linear", ":", "return", "self", "else", ":", "if", "self", ".", "left", ".", "is_linear", ":", "left_deriv", "=", "self", ".", "left", "else", ":", "left_deriv", "=", "self"...
Return the operator derivative. The derivative of the operator composition follows the chain rule: ``OperatorComp(left, right).derivative(y) == OperatorComp(left.derivative(right(y)), right.derivative(y))`` Parameters ---------- x : `domain` `element-like` Evaluation point of the derivative. Needs to be usable as input for the ``right`` operator.
[ "Return", "the", "operator", "derivative", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/operator/operator.py#L1384-L1410
231,705
odlgroup/odl
odl/space/pspace.py
_indent
def _indent(x): """Indent a string by 4 characters.""" lines = x.splitlines() for i, line in enumerate(lines): lines[i] = ' ' + line return '\n'.join(lines)
python
def _indent(x): lines = x.splitlines() for i, line in enumerate(lines): lines[i] = ' ' + line return '\n'.join(lines)
[ "def", "_indent", "(", "x", ")", ":", "lines", "=", "x", ".", "splitlines", "(", ")", "for", "i", ",", "line", "in", "enumerate", "(", "lines", ")", ":", "lines", "[", "i", "]", "=", "' '", "+", "line", "return", "'\\n'", ".", "join", "(", "...
Indent a string by 4 characters.
[ "Indent", "a", "string", "by", "4", "characters", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/pspace.py#L1865-L1870
231,706
odlgroup/odl
odl/space/pspace.py
ProductSpace.shape
def shape(self): """Total spaces per axis, computed recursively. The recursion ends at the fist level that does not have a shape. Examples -------- >>> r2, r3 = odl.rn(2), odl.rn(3) >>> pspace = odl.ProductSpace(r2, r3) >>> pspace.shape (2,) >>> pspace2 = odl.ProductSpace(pspace, 3) >>> pspace2.shape (3, 2) If the space is a "pure" product space, shape recurses all the way into the components: >>> r2_2 = odl.ProductSpace(r2, 3) >>> r2_2.shape (3, 2) """ if len(self) == 0: return () elif self.is_power_space: try: sub_shape = self[0].shape except AttributeError: sub_shape = () else: sub_shape = () return (len(self),) + sub_shape
python
def shape(self): if len(self) == 0: return () elif self.is_power_space: try: sub_shape = self[0].shape except AttributeError: sub_shape = () else: sub_shape = () return (len(self),) + sub_shape
[ "def", "shape", "(", "self", ")", ":", "if", "len", "(", "self", ")", "==", "0", ":", "return", "(", ")", "elif", "self", ".", "is_power_space", ":", "try", ":", "sub_shape", "=", "self", "[", "0", "]", ".", "shape", "except", "AttributeError", ":"...
Total spaces per axis, computed recursively. The recursion ends at the fist level that does not have a shape. Examples -------- >>> r2, r3 = odl.rn(2), odl.rn(3) >>> pspace = odl.ProductSpace(r2, r3) >>> pspace.shape (2,) >>> pspace2 = odl.ProductSpace(pspace, 3) >>> pspace2.shape (3, 2) If the space is a "pure" product space, shape recurses all the way into the components: >>> r2_2 = odl.ProductSpace(r2, 3) >>> r2_2.shape (3, 2)
[ "Total", "spaces", "per", "axis", "computed", "recursively", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/pspace.py#L290-L322
231,707
odlgroup/odl
odl/space/pspace.py
ProductSpace.dtype
def dtype(self): """The data type of this space. This is only well defined if all subspaces have the same dtype. Raises ------ AttributeError If any of the subspaces does not implement `dtype` or if the dtype of the subspaces does not match. """ dtypes = [space.dtype for space in self.spaces] if all(dtype == dtypes[0] for dtype in dtypes): return dtypes[0] else: raise AttributeError("`dtype`'s of subspaces not equal")
python
def dtype(self): dtypes = [space.dtype for space in self.spaces] if all(dtype == dtypes[0] for dtype in dtypes): return dtypes[0] else: raise AttributeError("`dtype`'s of subspaces not equal")
[ "def", "dtype", "(", "self", ")", ":", "dtypes", "=", "[", "space", ".", "dtype", "for", "space", "in", "self", ".", "spaces", "]", "if", "all", "(", "dtype", "==", "dtypes", "[", "0", "]", "for", "dtype", "in", "dtypes", ")", ":", "return", "dty...
The data type of this space. This is only well defined if all subspaces have the same dtype. Raises ------ AttributeError If any of the subspaces does not implement `dtype` or if the dtype of the subspaces does not match.
[ "The", "data", "type", "of", "this", "space", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/pspace.py#L372-L388
231,708
odlgroup/odl
odl/space/pspace.py
ProductSpace.element
def element(self, inp=None, cast=True): """Create an element in the product space. Parameters ---------- inp : optional If ``inp`` is ``None``, a new element is created from scratch by allocation in the spaces. If ``inp`` is already an element of this space, it is re-wrapped. Otherwise, a new element is created from the components by calling the ``element()`` methods in the component spaces. cast : bool, optional If ``True``, casting is allowed. Otherwise, a ``TypeError`` is raised for input that is not a sequence of elements of the spaces that make up this product space. Returns ------- element : `ProductSpaceElement` The new element Examples -------- >>> r2, r3 = odl.rn(2), odl.rn(3) >>> vec_2, vec_3 = r2.element(), r3.element() >>> r2x3 = ProductSpace(r2, r3) >>> vec_2x3 = r2x3.element() >>> vec_2.space == vec_2x3[0].space True >>> vec_3.space == vec_2x3[1].space True Create an element of the product space >>> r2, r3 = odl.rn(2), odl.rn(3) >>> prod = ProductSpace(r2, r3) >>> x2 = r2.element([1, 2]) >>> x3 = r3.element([1, 2, 3]) >>> x = prod.element([x2, x3]) >>> x ProductSpace(rn(2), rn(3)).element([ [ 1., 2.], [ 1., 2., 3.] ]) """ # If data is given as keyword arg, prefer it over arg list if inp is None: inp = [space.element() for space in self.spaces] if inp in self: return inp if len(inp) != len(self): raise ValueError('length of `inp` {} does not match length of ' 'space {}'.format(len(inp), len(self))) if (all(isinstance(v, LinearSpaceElement) and v.space == space for v, space in zip(inp, self.spaces))): parts = list(inp) elif cast: # Delegate constructors parts = [space.element(arg) for arg, space in zip(inp, self.spaces)] else: raise TypeError('input {!r} not a sequence of elements of the ' 'component spaces'.format(inp)) return self.element_type(self, parts)
python
def element(self, inp=None, cast=True): # If data is given as keyword arg, prefer it over arg list if inp is None: inp = [space.element() for space in self.spaces] if inp in self: return inp if len(inp) != len(self): raise ValueError('length of `inp` {} does not match length of ' 'space {}'.format(len(inp), len(self))) if (all(isinstance(v, LinearSpaceElement) and v.space == space for v, space in zip(inp, self.spaces))): parts = list(inp) elif cast: # Delegate constructors parts = [space.element(arg) for arg, space in zip(inp, self.spaces)] else: raise TypeError('input {!r} not a sequence of elements of the ' 'component spaces'.format(inp)) return self.element_type(self, parts)
[ "def", "element", "(", "self", ",", "inp", "=", "None", ",", "cast", "=", "True", ")", ":", "# If data is given as keyword arg, prefer it over arg list", "if", "inp", "is", "None", ":", "inp", "=", "[", "space", ".", "element", "(", ")", "for", "space", "i...
Create an element in the product space. Parameters ---------- inp : optional If ``inp`` is ``None``, a new element is created from scratch by allocation in the spaces. If ``inp`` is already an element of this space, it is re-wrapped. Otherwise, a new element is created from the components by calling the ``element()`` methods in the component spaces. cast : bool, optional If ``True``, casting is allowed. Otherwise, a ``TypeError`` is raised for input that is not a sequence of elements of the spaces that make up this product space. Returns ------- element : `ProductSpaceElement` The new element Examples -------- >>> r2, r3 = odl.rn(2), odl.rn(3) >>> vec_2, vec_3 = r2.element(), r3.element() >>> r2x3 = ProductSpace(r2, r3) >>> vec_2x3 = r2x3.element() >>> vec_2.space == vec_2x3[0].space True >>> vec_3.space == vec_2x3[1].space True Create an element of the product space >>> r2, r3 = odl.rn(2), odl.rn(3) >>> prod = ProductSpace(r2, r3) >>> x2 = r2.element([1, 2]) >>> x3 = r3.element([1, 2, 3]) >>> x = prod.element([x2, x3]) >>> x ProductSpace(rn(2), rn(3)).element([ [ 1., 2.], [ 1., 2., 3.] ])
[ "Create", "an", "element", "in", "the", "product", "space", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/pspace.py#L439-L507
231,709
odlgroup/odl
odl/space/pspace.py
ProductSpace.examples
def examples(self): """Return examples from all sub-spaces.""" for examples in product(*[spc.examples for spc in self.spaces]): name = ', '.join(name for name, _ in examples) element = self.element([elem for _, elem in examples]) yield (name, element)
python
def examples(self): for examples in product(*[spc.examples for spc in self.spaces]): name = ', '.join(name for name, _ in examples) element = self.element([elem for _, elem in examples]) yield (name, element)
[ "def", "examples", "(", "self", ")", ":", "for", "examples", "in", "product", "(", "*", "[", "spc", ".", "examples", "for", "spc", "in", "self", ".", "spaces", "]", ")", ":", "name", "=", "', '", ".", "join", "(", "name", "for", "name", ",", "_",...
Return examples from all sub-spaces.
[ "Return", "examples", "from", "all", "sub", "-", "spaces", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/pspace.py#L510-L515
231,710
odlgroup/odl
odl/space/pspace.py
ProductSpaceElement.asarray
def asarray(self, out=None): """Extract the data of this vector as a numpy array. Only available if `is_power_space` is True. The ordering is such that it commutes with indexing:: self[ind].asarray() == self.asarray()[ind] Parameters ---------- out : `numpy.ndarray`, optional Array in which the result should be written in-place. Has to be contiguous and of the correct dtype and shape. Raises ------ ValueError If `is_power_space` is false. Examples -------- >>> spc = odl.ProductSpace(odl.rn(3), 2) >>> x = spc.element([[ 1., 2., 3.], ... [ 4., 5., 6.]]) >>> x.asarray() array([[ 1., 2., 3.], [ 4., 5., 6.]]) """ if not self.space.is_power_space: raise ValueError('cannot use `asarray` if `space.is_power_space` ' 'is `False`') else: if out is None: out = np.empty(self.shape, self.dtype) for i in range(len(self)): out[i] = np.asarray(self[i]) return out
python
def asarray(self, out=None): if not self.space.is_power_space: raise ValueError('cannot use `asarray` if `space.is_power_space` ' 'is `False`') else: if out is None: out = np.empty(self.shape, self.dtype) for i in range(len(self)): out[i] = np.asarray(self[i]) return out
[ "def", "asarray", "(", "self", ",", "out", "=", "None", ")", ":", "if", "not", "self", ".", "space", ".", "is_power_space", ":", "raise", "ValueError", "(", "'cannot use `asarray` if `space.is_power_space` '", "'is `False`'", ")", "else", ":", "if", "out", "is...
Extract the data of this vector as a numpy array. Only available if `is_power_space` is True. The ordering is such that it commutes with indexing:: self[ind].asarray() == self.asarray()[ind] Parameters ---------- out : `numpy.ndarray`, optional Array in which the result should be written in-place. Has to be contiguous and of the correct dtype and shape. Raises ------ ValueError If `is_power_space` is false. Examples -------- >>> spc = odl.ProductSpace(odl.rn(3), 2) >>> x = spc.element([[ 1., 2., 3.], ... [ 4., 5., 6.]]) >>> x.asarray() array([[ 1., 2., 3.], [ 4., 5., 6.]])
[ "Extract", "the", "data", "of", "this", "vector", "as", "a", "numpy", "array", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/pspace.py#L1002-L1041
231,711
odlgroup/odl
odl/space/pspace.py
ProductSpaceElement.real
def real(self): """Real part of the element. The real part can also be set using ``x.real = other``, where ``other`` is array-like or scalar. Examples -------- >>> space = odl.ProductSpace(odl.cn(3), odl.cn(2)) >>> x = space.element([[1 + 1j, 2, 3 - 3j], ... [-1 + 2j, -2 - 3j]]) >>> x.real ProductSpace(rn(3), rn(2)).element([ [ 1., 2., 3.], [-1., -2.] ]) The real part can also be set using different array-like types: >>> x.real = space.real_space.zero() >>> x ProductSpace(cn(3), cn(2)).element([ [ 0.+1.j, 0.+0.j, 0.-3.j], [ 0.+2.j, 0.-3.j] ]) >>> x.real = 1.0 >>> x ProductSpace(cn(3), cn(2)).element([ [ 1.+1.j, 1.+0.j, 1.-3.j], [ 1.+2.j, 1.-3.j] ]) >>> x.real = [[2, 3, 4], [5, 6]] >>> x ProductSpace(cn(3), cn(2)).element([ [ 2.+1.j, 3.+0.j, 4.-3.j], [ 5.+2.j, 6.-3.j] ]) """ real_part = [part.real for part in self.parts] return self.space.real_space.element(real_part)
python
def real(self): real_part = [part.real for part in self.parts] return self.space.real_space.element(real_part)
[ "def", "real", "(", "self", ")", ":", "real_part", "=", "[", "part", ".", "real", "for", "part", "in", "self", ".", "parts", "]", "return", "self", ".", "space", ".", "real_space", ".", "element", "(", "real_part", ")" ]
Real part of the element. The real part can also be set using ``x.real = other``, where ``other`` is array-like or scalar. Examples -------- >>> space = odl.ProductSpace(odl.cn(3), odl.cn(2)) >>> x = space.element([[1 + 1j, 2, 3 - 3j], ... [-1 + 2j, -2 - 3j]]) >>> x.real ProductSpace(rn(3), rn(2)).element([ [ 1., 2., 3.], [-1., -2.] ]) The real part can also be set using different array-like types: >>> x.real = space.real_space.zero() >>> x ProductSpace(cn(3), cn(2)).element([ [ 0.+1.j, 0.+0.j, 0.-3.j], [ 0.+2.j, 0.-3.j] ]) >>> x.real = 1.0 >>> x ProductSpace(cn(3), cn(2)).element([ [ 1.+1.j, 1.+0.j, 1.-3.j], [ 1.+2.j, 1.-3.j] ]) >>> x.real = [[2, 3, 4], [5, 6]] >>> x ProductSpace(cn(3), cn(2)).element([ [ 2.+1.j, 3.+0.j, 4.-3.j], [ 5.+2.j, 6.-3.j] ])
[ "Real", "part", "of", "the", "element", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/pspace.py#L1144-L1185
231,712
odlgroup/odl
odl/space/pspace.py
ProductSpaceElement.real
def real(self, newreal): """Setter for the real part. This method is invoked by ``x.real = other``. Parameters ---------- newreal : array-like or scalar Values to be assigned to the real part of this element. """ try: iter(newreal) except TypeError: # `newreal` is not iterable, assume it can be assigned to # all indexed parts for part in self.parts: part.real = newreal return if self.space.is_power_space: try: # Set same value in all parts for part in self.parts: part.real = newreal except (ValueError, TypeError): # Iterate over all parts and set them separately for part, new_re in zip(self.parts, newreal): part.real = new_re pass elif len(newreal) == len(self): for part, new_re in zip(self.parts, newreal): part.real = new_re else: raise ValueError( 'dimensions of the new real part does not match the space, ' 'got element {} to set real part of {}'.format(newreal, self))
python
def real(self, newreal): try: iter(newreal) except TypeError: # `newreal` is not iterable, assume it can be assigned to # all indexed parts for part in self.parts: part.real = newreal return if self.space.is_power_space: try: # Set same value in all parts for part in self.parts: part.real = newreal except (ValueError, TypeError): # Iterate over all parts and set them separately for part, new_re in zip(self.parts, newreal): part.real = new_re pass elif len(newreal) == len(self): for part, new_re in zip(self.parts, newreal): part.real = new_re else: raise ValueError( 'dimensions of the new real part does not match the space, ' 'got element {} to set real part of {}'.format(newreal, self))
[ "def", "real", "(", "self", ",", "newreal", ")", ":", "try", ":", "iter", "(", "newreal", ")", "except", "TypeError", ":", "# `newreal` is not iterable, assume it can be assigned to", "# all indexed parts", "for", "part", "in", "self", ".", "parts", ":", "part", ...
Setter for the real part. This method is invoked by ``x.real = other``. Parameters ---------- newreal : array-like or scalar Values to be assigned to the real part of this element.
[ "Setter", "for", "the", "real", "part", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/pspace.py#L1188-L1223
231,713
odlgroup/odl
odl/space/pspace.py
ProductSpaceElement.imag
def imag(self): """Imaginary part of the element. The imaginary part can also be set using ``x.imag = other``, where ``other`` is array-like or scalar. Examples -------- >>> space = odl.ProductSpace(odl.cn(3), odl.cn(2)) >>> x = space.element([[1 + 1j, 2, 3 - 3j], ... [-1 + 2j, -2 - 3j]]) >>> x.imag ProductSpace(rn(3), rn(2)).element([ [ 1., 0., -3.], [ 2., -3.] ]) The imaginary part can also be set using different array-like types: >>> x.imag = space.real_space.zero() >>> x ProductSpace(cn(3), cn(2)).element([ [ 1.+0.j, 2.+0.j, 3.+0.j], [-1.+0.j, -2.+0.j] ]) >>> x.imag = 1.0 >>> x ProductSpace(cn(3), cn(2)).element([ [ 1.+1.j, 2.+1.j, 3.+1.j], [-1.+1.j, -2.+1.j] ]) >>> x.imag = [[2, 3, 4], [5, 6]] >>> x ProductSpace(cn(3), cn(2)).element([ [ 1.+2.j, 2.+3.j, 3.+4.j], [-1.+5.j, -2.+6.j] ]) """ imag_part = [part.imag for part in self.parts] return self.space.real_space.element(imag_part)
python
def imag(self): imag_part = [part.imag for part in self.parts] return self.space.real_space.element(imag_part)
[ "def", "imag", "(", "self", ")", ":", "imag_part", "=", "[", "part", ".", "imag", "for", "part", "in", "self", ".", "parts", "]", "return", "self", ".", "space", ".", "real_space", ".", "element", "(", "imag_part", ")" ]
Imaginary part of the element. The imaginary part can also be set using ``x.imag = other``, where ``other`` is array-like or scalar. Examples -------- >>> space = odl.ProductSpace(odl.cn(3), odl.cn(2)) >>> x = space.element([[1 + 1j, 2, 3 - 3j], ... [-1 + 2j, -2 - 3j]]) >>> x.imag ProductSpace(rn(3), rn(2)).element([ [ 1., 0., -3.], [ 2., -3.] ]) The imaginary part can also be set using different array-like types: >>> x.imag = space.real_space.zero() >>> x ProductSpace(cn(3), cn(2)).element([ [ 1.+0.j, 2.+0.j, 3.+0.j], [-1.+0.j, -2.+0.j] ]) >>> x.imag = 1.0 >>> x ProductSpace(cn(3), cn(2)).element([ [ 1.+1.j, 2.+1.j, 3.+1.j], [-1.+1.j, -2.+1.j] ]) >>> x.imag = [[2, 3, 4], [5, 6]] >>> x ProductSpace(cn(3), cn(2)).element([ [ 1.+2.j, 2.+3.j, 3.+4.j], [-1.+5.j, -2.+6.j] ])
[ "Imaginary", "part", "of", "the", "element", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/pspace.py#L1226-L1268
231,714
odlgroup/odl
odl/space/pspace.py
ProductSpaceElement.conj
def conj(self): """Complex conjugate of the element.""" complex_conj = [part.conj() for part in self.parts] return self.space.element(complex_conj)
python
def conj(self): complex_conj = [part.conj() for part in self.parts] return self.space.element(complex_conj)
[ "def", "conj", "(", "self", ")", ":", "complex_conj", "=", "[", "part", ".", "conj", "(", ")", "for", "part", "in", "self", ".", "parts", "]", "return", "self", ".", "space", ".", "element", "(", "complex_conj", ")" ]
Complex conjugate of the element.
[ "Complex", "conjugate", "of", "the", "element", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/pspace.py#L1309-L1312
231,715
odlgroup/odl
odl/space/pspace.py
ProductSpaceElement.show
def show(self, title=None, indices=None, **kwargs): """Display the parts of this product space element graphically. Parameters ---------- title : string, optional Title of the figures indices : int, slice, tuple or list, optional Display parts of ``self`` in the way described in the following. A single list of integers selects the corresponding parts of this vector. For other tuples or lists, the first entry indexes the parts of this vector, and the remaining entries (if any) are used to slice into the parts. Handling those remaining indices is up to the ``show`` methods of the parts to be displayed. The types of the first entry trigger the following behaviors: - ``int``: take the part corresponding to this index - ``slice``: take a subset of the parts - ``None``: equivalent to ``slice(None)``, i.e., everything Typical use cases are displaying of selected parts, which can be achieved with a list, e.g., ``indices=[0, 2]`` for parts 0 and 2, and plotting of all parts sliced in a certain way, e.g., ``indices=[None, 20, None]`` for showing all parts sliced with indices ``[20, None]``. A single ``int``, ``slice``, ``list`` or ``None`` object indexes the parts only, i.e., is treated roughly as ``(indices, Ellipsis)``. In particular, for ``None``, all parts are shown with default slicing. in_figs : sequence of `matplotlib.figure.Figure`, optional Update these figures instead of creating new ones. Typically the return value of an earlier call to ``show`` is used for this parameter. kwargs Additional arguments passed on to the ``show`` methods of the parts. Returns ------- figs : tuple of `matplotlib.figure.Figure` The resulting figures. In an interactive shell, they are automatically displayed. See Also -------- odl.discr.lp_discr.DiscreteLpElement.show : Display of a discretized function odl.space.base_tensors.Tensor.show : Display of sequence type data odl.util.graphics.show_discrete_data : Underlying implementation """ if title is None: title = 'ProductSpaceElement' if indices is None: if len(self) < 5: indices = list(range(len(self))) else: indices = list(np.linspace(0, len(self) - 1, 4, dtype=int)) else: if (isinstance(indices, tuple) or (isinstance(indices, list) and not all(isinstance(idx, Integral) for idx in indices))): # Tuples or lists containing non-integers index by axis. # We use the first index for the current pspace and pass # on the rest. indices, kwargs['indices'] = indices[0], indices[1:] # Support `indices=[None, 0, None]` like syntax (`indices` is # the first entry as of now in that case) if indices is None: indices = slice(None) if isinstance(indices, slice): indices = list(range(*indices.indices(len(self)))) elif isinstance(indices, Integral): indices = [indices] else: # Use `indices` as-is pass in_figs = kwargs.pop('fig', None) in_figs = [None] * len(indices) if in_figs is None else in_figs figs = [] parts = self[indices] if len(parts) == 0: return () elif len(parts) == 1: # Don't extend the title if there is only one plot fig = parts[0].show(title=title, fig=in_figs[0], **kwargs) figs.append(fig) else: # Extend titles by indexed part to make them distinguishable for i, part, fig in zip(indices, parts, in_figs): fig = part.show(title='{}. Part {}'.format(title, i), fig=fig, **kwargs) figs.append(fig) return tuple(figs)
python
def show(self, title=None, indices=None, **kwargs): if title is None: title = 'ProductSpaceElement' if indices is None: if len(self) < 5: indices = list(range(len(self))) else: indices = list(np.linspace(0, len(self) - 1, 4, dtype=int)) else: if (isinstance(indices, tuple) or (isinstance(indices, list) and not all(isinstance(idx, Integral) for idx in indices))): # Tuples or lists containing non-integers index by axis. # We use the first index for the current pspace and pass # on the rest. indices, kwargs['indices'] = indices[0], indices[1:] # Support `indices=[None, 0, None]` like syntax (`indices` is # the first entry as of now in that case) if indices is None: indices = slice(None) if isinstance(indices, slice): indices = list(range(*indices.indices(len(self)))) elif isinstance(indices, Integral): indices = [indices] else: # Use `indices` as-is pass in_figs = kwargs.pop('fig', None) in_figs = [None] * len(indices) if in_figs is None else in_figs figs = [] parts = self[indices] if len(parts) == 0: return () elif len(parts) == 1: # Don't extend the title if there is only one plot fig = parts[0].show(title=title, fig=in_figs[0], **kwargs) figs.append(fig) else: # Extend titles by indexed part to make them distinguishable for i, part, fig in zip(indices, parts, in_figs): fig = part.show(title='{}. Part {}'.format(title, i), fig=fig, **kwargs) figs.append(fig) return tuple(figs)
[ "def", "show", "(", "self", ",", "title", "=", "None", ",", "indices", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "title", "is", "None", ":", "title", "=", "'ProductSpaceElement'", "if", "indices", "is", "None", ":", "if", "len", "(", "s...
Display the parts of this product space element graphically. Parameters ---------- title : string, optional Title of the figures indices : int, slice, tuple or list, optional Display parts of ``self`` in the way described in the following. A single list of integers selects the corresponding parts of this vector. For other tuples or lists, the first entry indexes the parts of this vector, and the remaining entries (if any) are used to slice into the parts. Handling those remaining indices is up to the ``show`` methods of the parts to be displayed. The types of the first entry trigger the following behaviors: - ``int``: take the part corresponding to this index - ``slice``: take a subset of the parts - ``None``: equivalent to ``slice(None)``, i.e., everything Typical use cases are displaying of selected parts, which can be achieved with a list, e.g., ``indices=[0, 2]`` for parts 0 and 2, and plotting of all parts sliced in a certain way, e.g., ``indices=[None, 20, None]`` for showing all parts sliced with indices ``[20, None]``. A single ``int``, ``slice``, ``list`` or ``None`` object indexes the parts only, i.e., is treated roughly as ``(indices, Ellipsis)``. In particular, for ``None``, all parts are shown with default slicing. in_figs : sequence of `matplotlib.figure.Figure`, optional Update these figures instead of creating new ones. Typically the return value of an earlier call to ``show`` is used for this parameter. kwargs Additional arguments passed on to the ``show`` methods of the parts. Returns ------- figs : tuple of `matplotlib.figure.Figure` The resulting figures. In an interactive shell, they are automatically displayed. See Also -------- odl.discr.lp_discr.DiscreteLpElement.show : Display of a discretized function odl.space.base_tensors.Tensor.show : Display of sequence type data odl.util.graphics.show_discrete_data : Underlying implementation
[ "Display", "the", "parts", "of", "this", "product", "space", "element", "graphically", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/pspace.py#L1371-L1479
231,716
odlgroup/odl
odl/space/pspace.py
ProductSpaceArrayWeighting.inner
def inner(self, x1, x2): """Calculate the array-weighted inner product of two elements. Parameters ---------- x1, x2 : `ProductSpaceElement` Elements whose inner product is calculated. Returns ------- inner : float or complex The inner product of the two provided elements. """ if self.exponent != 2.0: raise NotImplementedError('no inner product defined for ' 'exponent != 2 (got {})' ''.format(self.exponent)) inners = np.fromiter( (x1i.inner(x2i) for x1i, x2i in zip(x1, x2)), dtype=x1[0].space.dtype, count=len(x1)) inner = np.dot(inners, self.array) if is_real_dtype(x1[0].dtype): return float(inner) else: return complex(inner)
python
def inner(self, x1, x2): if self.exponent != 2.0: raise NotImplementedError('no inner product defined for ' 'exponent != 2 (got {})' ''.format(self.exponent)) inners = np.fromiter( (x1i.inner(x2i) for x1i, x2i in zip(x1, x2)), dtype=x1[0].space.dtype, count=len(x1)) inner = np.dot(inners, self.array) if is_real_dtype(x1[0].dtype): return float(inner) else: return complex(inner)
[ "def", "inner", "(", "self", ",", "x1", ",", "x2", ")", ":", "if", "self", ".", "exponent", "!=", "2.0", ":", "raise", "NotImplementedError", "(", "'no inner product defined for '", "'exponent != 2 (got {})'", "''", ".", "format", "(", "self", ".", "exponent",...
Calculate the array-weighted inner product of two elements. Parameters ---------- x1, x2 : `ProductSpaceElement` Elements whose inner product is calculated. Returns ------- inner : float or complex The inner product of the two provided elements.
[ "Calculate", "the", "array", "-", "weighted", "inner", "product", "of", "two", "elements", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/pspace.py#L1596-L1622
231,717
odlgroup/odl
odl/space/pspace.py
ProductSpaceArrayWeighting.norm
def norm(self, x): """Calculate the array-weighted norm of an element. Parameters ---------- x : `ProductSpaceElement` Element whose norm is calculated. Returns ------- norm : float The norm of the provided element. """ if self.exponent == 2.0: norm_squared = self.inner(x, x).real # TODO: optimize?! return np.sqrt(norm_squared) else: norms = np.fromiter( (xi.norm() for xi in x), dtype=np.float64, count=len(x)) if self.exponent in (1.0, float('inf')): norms *= self.array else: norms *= self.array ** (1.0 / self.exponent) return float(np.linalg.norm(norms, ord=self.exponent))
python
def norm(self, x): if self.exponent == 2.0: norm_squared = self.inner(x, x).real # TODO: optimize?! return np.sqrt(norm_squared) else: norms = np.fromiter( (xi.norm() for xi in x), dtype=np.float64, count=len(x)) if self.exponent in (1.0, float('inf')): norms *= self.array else: norms *= self.array ** (1.0 / self.exponent) return float(np.linalg.norm(norms, ord=self.exponent))
[ "def", "norm", "(", "self", ",", "x", ")", ":", "if", "self", ".", "exponent", "==", "2.0", ":", "norm_squared", "=", "self", ".", "inner", "(", "x", ",", "x", ")", ".", "real", "# TODO: optimize?!", "return", "np", ".", "sqrt", "(", "norm_squared", ...
Calculate the array-weighted norm of an element. Parameters ---------- x : `ProductSpaceElement` Element whose norm is calculated. Returns ------- norm : float The norm of the provided element.
[ "Calculate", "the", "array", "-", "weighted", "norm", "of", "an", "element", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/pspace.py#L1624-L1648
231,718
odlgroup/odl
odl/space/pspace.py
ProductSpaceConstWeighting.inner
def inner(self, x1, x2): """Calculate the constant-weighted inner product of two elements. Parameters ---------- x1, x2 : `ProductSpaceElement` Elements whose inner product is calculated. Returns ------- inner : float or complex The inner product of the two provided elements. """ if self.exponent != 2.0: raise NotImplementedError('no inner product defined for ' 'exponent != 2 (got {})' ''.format(self.exponent)) inners = np.fromiter( (x1i.inner(x2i) for x1i, x2i in zip(x1, x2)), dtype=x1[0].space.dtype, count=len(x1)) inner = self.const * np.sum(inners) return x1.space.field.element(inner)
python
def inner(self, x1, x2): if self.exponent != 2.0: raise NotImplementedError('no inner product defined for ' 'exponent != 2 (got {})' ''.format(self.exponent)) inners = np.fromiter( (x1i.inner(x2i) for x1i, x2i in zip(x1, x2)), dtype=x1[0].space.dtype, count=len(x1)) inner = self.const * np.sum(inners) return x1.space.field.element(inner)
[ "def", "inner", "(", "self", ",", "x1", ",", "x2", ")", ":", "if", "self", ".", "exponent", "!=", "2.0", ":", "raise", "NotImplementedError", "(", "'no inner product defined for '", "'exponent != 2 (got {})'", "''", ".", "format", "(", "self", ".", "exponent",...
Calculate the constant-weighted inner product of two elements. Parameters ---------- x1, x2 : `ProductSpaceElement` Elements whose inner product is calculated. Returns ------- inner : float or complex The inner product of the two provided elements.
[ "Calculate", "the", "constant", "-", "weighted", "inner", "product", "of", "two", "elements", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/pspace.py#L1704-L1727
231,719
odlgroup/odl
odl/space/pspace.py
ProductSpaceConstWeighting.dist
def dist(self, x1, x2): """Calculate the constant-weighted distance between two elements. Parameters ---------- x1, x2 : `ProductSpaceElement` Elements whose mutual distance is calculated. Returns ------- dist : float The distance between the elements. """ dnorms = np.fromiter( ((x1i - x2i).norm() for x1i, x2i in zip(x1, x2)), dtype=np.float64, count=len(x1)) if self.exponent == float('inf'): return self.const * np.linalg.norm(dnorms, ord=self.exponent) else: return (self.const ** (1 / self.exponent) * np.linalg.norm(dnorms, ord=self.exponent))
python
def dist(self, x1, x2): dnorms = np.fromiter( ((x1i - x2i).norm() for x1i, x2i in zip(x1, x2)), dtype=np.float64, count=len(x1)) if self.exponent == float('inf'): return self.const * np.linalg.norm(dnorms, ord=self.exponent) else: return (self.const ** (1 / self.exponent) * np.linalg.norm(dnorms, ord=self.exponent))
[ "def", "dist", "(", "self", ",", "x1", ",", "x2", ")", ":", "dnorms", "=", "np", ".", "fromiter", "(", "(", "(", "x1i", "-", "x2i", ")", ".", "norm", "(", ")", "for", "x1i", ",", "x2i", "in", "zip", "(", "x1", ",", "x2", ")", ")", ",", "d...
Calculate the constant-weighted distance between two elements. Parameters ---------- x1, x2 : `ProductSpaceElement` Elements whose mutual distance is calculated. Returns ------- dist : float The distance between the elements.
[ "Calculate", "the", "constant", "-", "weighted", "distance", "between", "two", "elements", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/pspace.py#L1756-L1777
231,720
odlgroup/odl
odl/tomo/util/utility.py
euler_matrix
def euler_matrix(phi, theta=None, psi=None): """Rotation matrix in 2 and 3 dimensions. Its rows represent the canonical unit vectors as seen from the rotated system while the columns are the rotated unit vectors as seen from the canonical system. Parameters ---------- phi : float or `array-like` Either 2D counter-clockwise rotation angle (in radians) or first Euler angle. theta, psi : float or `array-like`, optional Second and third Euler angles in radians. If both are ``None``, a 2D rotation matrix is computed. Otherwise a 3D rotation is computed, where the default ``None`` is equivalent to ``0.0``. The rotation is performed in "ZXZ" rotation order, see the Wikipedia article `Euler angles`_. Returns ------- mat : `numpy.ndarray` Rotation matrix corresponding to the given angles. The returned array has shape ``(ndim, ndim)`` if all angles represent single parameters, with ``ndim == 2`` for ``phi`` only and ``ndim == 3`` for 2 or 3 Euler angles. If any of the angle parameters is an array, the shape of the returned array is ``broadcast(phi, theta, psi).shape + (ndim, ndim)``. References ---------- .. _Euler angles: https://en.wikipedia.org/wiki/Euler_angles#Rotation_matrix """ if theta is None and psi is None: squeeze_out = (np.shape(phi) == ()) ndim = 2 phi = np.array(phi, dtype=float, copy=False, ndmin=1) theta = psi = 0.0 else: # `None` broadcasts like a scalar squeeze_out = (np.broadcast(phi, theta, psi).shape == ()) ndim = 3 phi = np.array(phi, dtype=float, copy=False, ndmin=1) if theta is None: theta = 0.0 if psi is None: psi = 0.0 theta = np.array(theta, dtype=float, copy=False, ndmin=1) psi = np.array(psi, dtype=float, copy=False, ndmin=1) ndim = 3 cph = np.cos(phi) sph = np.sin(phi) cth = np.cos(theta) sth = np.sin(theta) cps = np.cos(psi) sps = np.sin(psi) if ndim == 2: mat = np.array([[cph, -sph], [sph, cph]]) else: mat = np.array([ [cph * cps - sph * cth * sps, -cph * sps - sph * cth * cps, sph * sth], [sph * cps + cph * cth * sps, -sph * sps + cph * cth * cps, -cph * sth], [sth * sps + 0 * cph, sth * cps + 0 * cph, cth + 0 * (cph + cps)]]) # Make sure all components broadcast if squeeze_out: return mat.squeeze() else: # Move the `(ndim, ndim)` axes to the end extra_dims = len(np.broadcast(phi, theta, psi).shape) newaxes = list(range(2, 2 + extra_dims)) + [0, 1] return np.transpose(mat, newaxes)
python
def euler_matrix(phi, theta=None, psi=None): if theta is None and psi is None: squeeze_out = (np.shape(phi) == ()) ndim = 2 phi = np.array(phi, dtype=float, copy=False, ndmin=1) theta = psi = 0.0 else: # `None` broadcasts like a scalar squeeze_out = (np.broadcast(phi, theta, psi).shape == ()) ndim = 3 phi = np.array(phi, dtype=float, copy=False, ndmin=1) if theta is None: theta = 0.0 if psi is None: psi = 0.0 theta = np.array(theta, dtype=float, copy=False, ndmin=1) psi = np.array(psi, dtype=float, copy=False, ndmin=1) ndim = 3 cph = np.cos(phi) sph = np.sin(phi) cth = np.cos(theta) sth = np.sin(theta) cps = np.cos(psi) sps = np.sin(psi) if ndim == 2: mat = np.array([[cph, -sph], [sph, cph]]) else: mat = np.array([ [cph * cps - sph * cth * sps, -cph * sps - sph * cth * cps, sph * sth], [sph * cps + cph * cth * sps, -sph * sps + cph * cth * cps, -cph * sth], [sth * sps + 0 * cph, sth * cps + 0 * cph, cth + 0 * (cph + cps)]]) # Make sure all components broadcast if squeeze_out: return mat.squeeze() else: # Move the `(ndim, ndim)` axes to the end extra_dims = len(np.broadcast(phi, theta, psi).shape) newaxes = list(range(2, 2 + extra_dims)) + [0, 1] return np.transpose(mat, newaxes)
[ "def", "euler_matrix", "(", "phi", ",", "theta", "=", "None", ",", "psi", "=", "None", ")", ":", "if", "theta", "is", "None", "and", "psi", "is", "None", ":", "squeeze_out", "=", "(", "np", ".", "shape", "(", "phi", ")", "==", "(", ")", ")", "n...
Rotation matrix in 2 and 3 dimensions. Its rows represent the canonical unit vectors as seen from the rotated system while the columns are the rotated unit vectors as seen from the canonical system. Parameters ---------- phi : float or `array-like` Either 2D counter-clockwise rotation angle (in radians) or first Euler angle. theta, psi : float or `array-like`, optional Second and third Euler angles in radians. If both are ``None``, a 2D rotation matrix is computed. Otherwise a 3D rotation is computed, where the default ``None`` is equivalent to ``0.0``. The rotation is performed in "ZXZ" rotation order, see the Wikipedia article `Euler angles`_. Returns ------- mat : `numpy.ndarray` Rotation matrix corresponding to the given angles. The returned array has shape ``(ndim, ndim)`` if all angles represent single parameters, with ``ndim == 2`` for ``phi`` only and ``ndim == 3`` for 2 or 3 Euler angles. If any of the angle parameters is an array, the shape of the returned array is ``broadcast(phi, theta, psi).shape + (ndim, ndim)``. References ---------- .. _Euler angles: https://en.wikipedia.org/wiki/Euler_angles#Rotation_matrix
[ "Rotation", "matrix", "in", "2", "and", "3", "dimensions", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/util/utility.py#L17-L97
231,721
odlgroup/odl
odl/tomo/util/utility.py
axis_rotation
def axis_rotation(axis, angle, vectors, axis_shift=(0, 0, 0)): """Rotate a vector or an array of vectors around an axis in 3d. The rotation is computed by `Rodrigues' rotation formula`_. Parameters ---------- axis : `array-like`, shape ``(3,)`` Rotation axis, assumed to be a unit vector. angle : float Angle of the counter-clockwise rotation. vectors : `array-like`, shape ``(3,)`` or ``(N, 3)`` The vector(s) to be rotated. axis_shift : `array_like`, shape ``(3,)``, optional Shift the rotation center by this vector. Note that only shifts perpendicular to ``axis`` matter. Returns ------- rot_vec : `numpy.ndarray` The rotated vector(s). References ---------- .. _Rodrigues' rotation formula: https://en.wikipedia.org/wiki/Rodrigues'_rotation_formula Examples -------- Rotating around the third coordinate axis by and angle of 90 degrees: >>> axis = (0, 0, 1) >>> rot1 = axis_rotation(axis, angle=np.pi / 2, vectors=(1, 0, 0)) >>> np.allclose(rot1, (0, 1, 0)) True >>> rot2 = axis_rotation(axis, angle=np.pi / 2, vectors=(0, 1, 0)) >>> np.allclose(rot2, (-1, 0, 0)) True The rotation can be performed with shifted rotation center. A shift along the axis does not matter: >>> rot3 = axis_rotation(axis, angle=np.pi / 2, vectors=(1, 0, 0), ... axis_shift=(0, 0, 2)) >>> np.allclose(rot3, (0, 1, 0)) True The distance between the rotation center and the vector to be rotated determines the radius of the rotation circle: >>> # Rotation center in the point to be rotated, should do nothing >>> rot4 = axis_rotation(axis, angle=np.pi / 2, vectors=(1, 0, 0), ... axis_shift=(1, 0, 0)) >>> np.allclose(rot4, (1, 0, 0)) True >>> # Distance 2, thus rotates to (0, 2, 0) in the shifted system, >>> # resulting in (-1, 2, 0) from shifting back after rotating >>> rot5 = axis_rotation(axis, angle=np.pi / 2, vectors=(1, 0, 0), ... axis_shift=(-1, 0, 0)) >>> np.allclose(rot5, (-1, 2, 0)) True Rotation of multiple vectors can be done in bulk: >>> vectors = [[1, 0, 0], [0, 1, 0]] >>> rot = axis_rotation(axis, angle=np.pi / 2, vectors=vectors) >>> np.allclose(rot[0], (0, 1, 0)) True >>> np.allclose(rot[1], (-1, 0, 0)) True """ rot_matrix = axis_rotation_matrix(axis, angle) vectors = np.asarray(vectors, dtype=float) if vectors.shape == (3,): vectors = vectors[None, :] elif vectors.ndim == 2 and vectors.shape[1] == 3: pass else: raise ValueError('`vectors` must have shape (3,) or (N, 3), got array ' 'with shape {}'.format(vectors.shape)) # Get `axis_shift` part that is perpendicular to `axis` axis_shift = np.asarray(axis_shift, dtype=float) axis = np.asarray(axis, dtype=float) axis_shift = axis_shift - axis.dot(axis_shift) * axis # Shift vectors with the negative of the axis shift to move the rotation # center to the origin. Then rotate and shift back. centered_vecs = vectors - axis_shift[None, :] # Need to transpose the vectors to make the axis of length 3 come first rot_vecs = rot_matrix.dot(centered_vecs.T).T return axis_shift[None, :] + rot_vecs
python
def axis_rotation(axis, angle, vectors, axis_shift=(0, 0, 0)): rot_matrix = axis_rotation_matrix(axis, angle) vectors = np.asarray(vectors, dtype=float) if vectors.shape == (3,): vectors = vectors[None, :] elif vectors.ndim == 2 and vectors.shape[1] == 3: pass else: raise ValueError('`vectors` must have shape (3,) or (N, 3), got array ' 'with shape {}'.format(vectors.shape)) # Get `axis_shift` part that is perpendicular to `axis` axis_shift = np.asarray(axis_shift, dtype=float) axis = np.asarray(axis, dtype=float) axis_shift = axis_shift - axis.dot(axis_shift) * axis # Shift vectors with the negative of the axis shift to move the rotation # center to the origin. Then rotate and shift back. centered_vecs = vectors - axis_shift[None, :] # Need to transpose the vectors to make the axis of length 3 come first rot_vecs = rot_matrix.dot(centered_vecs.T).T return axis_shift[None, :] + rot_vecs
[ "def", "axis_rotation", "(", "axis", ",", "angle", ",", "vectors", ",", "axis_shift", "=", "(", "0", ",", "0", ",", "0", ")", ")", ":", "rot_matrix", "=", "axis_rotation_matrix", "(", "axis", ",", "angle", ")", "vectors", "=", "np", ".", "asarray", "...
Rotate a vector or an array of vectors around an axis in 3d. The rotation is computed by `Rodrigues' rotation formula`_. Parameters ---------- axis : `array-like`, shape ``(3,)`` Rotation axis, assumed to be a unit vector. angle : float Angle of the counter-clockwise rotation. vectors : `array-like`, shape ``(3,)`` or ``(N, 3)`` The vector(s) to be rotated. axis_shift : `array_like`, shape ``(3,)``, optional Shift the rotation center by this vector. Note that only shifts perpendicular to ``axis`` matter. Returns ------- rot_vec : `numpy.ndarray` The rotated vector(s). References ---------- .. _Rodrigues' rotation formula: https://en.wikipedia.org/wiki/Rodrigues'_rotation_formula Examples -------- Rotating around the third coordinate axis by and angle of 90 degrees: >>> axis = (0, 0, 1) >>> rot1 = axis_rotation(axis, angle=np.pi / 2, vectors=(1, 0, 0)) >>> np.allclose(rot1, (0, 1, 0)) True >>> rot2 = axis_rotation(axis, angle=np.pi / 2, vectors=(0, 1, 0)) >>> np.allclose(rot2, (-1, 0, 0)) True The rotation can be performed with shifted rotation center. A shift along the axis does not matter: >>> rot3 = axis_rotation(axis, angle=np.pi / 2, vectors=(1, 0, 0), ... axis_shift=(0, 0, 2)) >>> np.allclose(rot3, (0, 1, 0)) True The distance between the rotation center and the vector to be rotated determines the radius of the rotation circle: >>> # Rotation center in the point to be rotated, should do nothing >>> rot4 = axis_rotation(axis, angle=np.pi / 2, vectors=(1, 0, 0), ... axis_shift=(1, 0, 0)) >>> np.allclose(rot4, (1, 0, 0)) True >>> # Distance 2, thus rotates to (0, 2, 0) in the shifted system, >>> # resulting in (-1, 2, 0) from shifting back after rotating >>> rot5 = axis_rotation(axis, angle=np.pi / 2, vectors=(1, 0, 0), ... axis_shift=(-1, 0, 0)) >>> np.allclose(rot5, (-1, 2, 0)) True Rotation of multiple vectors can be done in bulk: >>> vectors = [[1, 0, 0], [0, 1, 0]] >>> rot = axis_rotation(axis, angle=np.pi / 2, vectors=vectors) >>> np.allclose(rot[0], (0, 1, 0)) True >>> np.allclose(rot[1], (-1, 0, 0)) True
[ "Rotate", "a", "vector", "or", "an", "array", "of", "vectors", "around", "an", "axis", "in", "3d", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/util/utility.py#L100-L191
231,722
odlgroup/odl
odl/tomo/util/utility.py
axis_rotation_matrix
def axis_rotation_matrix(axis, angle): """Matrix of the rotation around an axis in 3d. The matrix is computed according to `Rodriguez' rotation formula`_. Parameters ---------- axis : `array-like`, shape ``(3,)`` Rotation axis, assumed to be a unit vector. angle : float or `array-like` Angle(s) of counter-clockwise rotation. Returns ------- mat : `numpy.ndarray`, shape ``(3, 3)`` The axis rotation matrix. References ---------- .. _Rodriguez' rotation formula: https://en.wikipedia.org/wiki/Rodrigues'_rotation_formula """ scalar_out = (np.shape(angle) == ()) axis = np.asarray(axis) if axis.shape != (3,): raise ValueError('`axis` shape must be (3,), got {}' ''.format(axis.shape)) angle = np.array(angle, dtype=float, copy=False, ndmin=1) cross_mat = np.array([[0, -axis[2], axis[1]], [axis[2], 0, -axis[0]], [-axis[1], axis[0], 0]]) dy_mat = np.outer(axis, axis) id_mat = np.eye(3) cos_ang = np.cos(angle) sin_ang = np.sin(angle) # Add extra dimensions for broadcasting extra_dims = cos_ang.ndim mat_slc = (None,) * extra_dims + (slice(None), slice(None)) ang_slc = (slice(None),) * extra_dims + (None, None) # Matrices will have shape (1, ..., 1, ndim, ndim) cross_mat = cross_mat[mat_slc] dy_mat = dy_mat[mat_slc] id_mat = id_mat[mat_slc] # Angle arrays will have shape (..., 1, 1) cos_ang = cos_ang[ang_slc] sin_ang = sin_ang[ang_slc] axis_mat = cos_ang * id_mat + (1. - cos_ang) * dy_mat + sin_ang * cross_mat if scalar_out: return axis_mat.squeeze() else: return axis_mat
python
def axis_rotation_matrix(axis, angle): scalar_out = (np.shape(angle) == ()) axis = np.asarray(axis) if axis.shape != (3,): raise ValueError('`axis` shape must be (3,), got {}' ''.format(axis.shape)) angle = np.array(angle, dtype=float, copy=False, ndmin=1) cross_mat = np.array([[0, -axis[2], axis[1]], [axis[2], 0, -axis[0]], [-axis[1], axis[0], 0]]) dy_mat = np.outer(axis, axis) id_mat = np.eye(3) cos_ang = np.cos(angle) sin_ang = np.sin(angle) # Add extra dimensions for broadcasting extra_dims = cos_ang.ndim mat_slc = (None,) * extra_dims + (slice(None), slice(None)) ang_slc = (slice(None),) * extra_dims + (None, None) # Matrices will have shape (1, ..., 1, ndim, ndim) cross_mat = cross_mat[mat_slc] dy_mat = dy_mat[mat_slc] id_mat = id_mat[mat_slc] # Angle arrays will have shape (..., 1, 1) cos_ang = cos_ang[ang_slc] sin_ang = sin_ang[ang_slc] axis_mat = cos_ang * id_mat + (1. - cos_ang) * dy_mat + sin_ang * cross_mat if scalar_out: return axis_mat.squeeze() else: return axis_mat
[ "def", "axis_rotation_matrix", "(", "axis", ",", "angle", ")", ":", "scalar_out", "=", "(", "np", ".", "shape", "(", "angle", ")", "==", "(", ")", ")", "axis", "=", "np", ".", "asarray", "(", "axis", ")", "if", "axis", ".", "shape", "!=", "(", "3...
Matrix of the rotation around an axis in 3d. The matrix is computed according to `Rodriguez' rotation formula`_. Parameters ---------- axis : `array-like`, shape ``(3,)`` Rotation axis, assumed to be a unit vector. angle : float or `array-like` Angle(s) of counter-clockwise rotation. Returns ------- mat : `numpy.ndarray`, shape ``(3, 3)`` The axis rotation matrix. References ---------- .. _Rodriguez' rotation formula: https://en.wikipedia.org/wiki/Rodrigues'_rotation_formula
[ "Matrix", "of", "the", "rotation", "around", "an", "axis", "in", "3d", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/util/utility.py#L194-L248
231,723
odlgroup/odl
odl/tomo/util/utility.py
rotation_matrix_from_to
def rotation_matrix_from_to(from_vec, to_vec): r"""Return a matrix that rotates ``from_vec`` to ``to_vec`` in 2d or 3d. Since a rotation from one vector to another in 3 dimensions has (at least) one degree of freedom, this function makes deliberate but still arbitrary choices to fix these free parameters. See Notes for details. For the applied formula in 3d, see `this Wikipedia page about Rodrigues' rotation formula <https://en.wikipedia.org/wiki/Rodrigues'_rotation_formula>`_. Parameters ---------- from_vec, to_vec : `array-like`, shape ``(2,)`` or ``(3,)`` Vectors between which the returned matrix rotates. They should not be very close to zero or collinear. Returns ------- matrix : `numpy.ndarray`, shape ``(2, 2)`` or ``(3, 3)`` A matrix rotating ``from_vec`` to ``to_vec``. Note that the matrix does *not* include scaling, i.e. it is not guaranteed that ``matrix.dot(from_vec) == to_vec``. Examples -------- In two dimensions, rotation is simple: >>> from_vec, to_vec = [1, 0], [1, 1] >>> mat = rotation_matrix_from_to(from_vec, to_vec) >>> to_vec_normalized = np.divide(to_vec, np.linalg.norm(to_vec)) >>> np.allclose(mat.dot([1, 0]), to_vec_normalized) True >>> from_vec, to_vec = [1, 0], [-1, 1] >>> mat = rotation_matrix_from_to(from_vec, to_vec) >>> to_vec_normalized = np.divide(to_vec, np.linalg.norm(to_vec)) >>> np.allclose(mat.dot([1, 0]), to_vec_normalized) True Rotation in 3d by less than ``pi``: >>> from_vec, to_vec = [1, 0, 0], [-1, 1, 0] >>> mat = rotation_matrix_from_to(from_vec, to_vec) >>> to_vec_normalized = np.divide(to_vec, np.linalg.norm(to_vec)) >>> np.allclose(mat.dot([1, 0, 0]), to_vec_normalized) True Rotation by more than ``pi``: >>> from_vec, to_vec = [1, 0, 0], [-1, -1, 0] >>> mat = rotation_matrix_from_to(from_vec, to_vec) >>> to_vec_normalized = np.divide(to_vec, np.linalg.norm(to_vec)) >>> np.allclose(mat.dot([1, 0, 0]), to_vec_normalized) True Notes ----- In 3d, the matrix corresponds to a rotation around the normal vector :math:`\hat n = \hat u \times \hat v`, where :math:`\hat u` and :math:`\hat v` are the normalized versions of :math:`u`, the vector from which to rotate, and :math:`v`, the vector to which should be rotated. The rotation angle is determined as :math:`\alpha = \pm \arccos(\langle \hat u, \hat v \rangle)`. Its sign corresponds to the sign of :math:`\langle \hat b, \hat v\rangle`, where :math:`\hat b = \hat n \times \hat u` is the binormal vector. In the case that :math:`\hat u` and :math:`\hat v` are collinear, a perpendicular vector is chosen as :math:`\hat n = (1, 0, 0)` if :math:`v_1 = v_2 = 0`, else :math:`\hat n = (-v_2, v_1, v_3)`. The angle in this case is :math:`\alpha = 0` if :math:`\langle \hat u, \hat v \rangle > 0`, otherwise :math:`\alpha = \pi`. """ from_vec, from_vec_in = (np.array(from_vec, dtype=float, copy=True), from_vec) to_vec, to_vec_in = np.array(to_vec, dtype=float, copy=True), to_vec if from_vec.shape not in ((2,), (3,)): raise ValueError('`from_vec.shape` must be (2,) or (3,), got {}' ''.format(from_vec.shape)) if to_vec.shape not in ((2,), (3,)): raise ValueError('`to_vec.shape` must be (2,) or (3,), got {}' ''.format(to_vec.shape)) if from_vec.shape != to_vec.shape: raise ValueError('`from_vec.shape` and `to_vec.shape` not equal: ' '{} != {}' ''.format(from_vec.shape, to_vec.shape)) ndim = len(from_vec) # Normalize vectors from_vec_norm = np.linalg.norm(from_vec) if from_vec_norm < 1e-10: raise ValueError('`from_vec` {} too close to zero'.format(from_vec_in)) from_vec /= from_vec_norm to_vec_norm = np.linalg.norm(to_vec) if to_vec_norm < 1e-10: raise ValueError('`to_vec` {} too close to zero'.format(to_vec_in)) to_vec /= to_vec_norm if ndim == 2: dot = np.dot(from_vec, to_vec) from_rot = (-from_vec[1], from_vec[0]) if dot == 0: angle = np.pi / 2 if np.dot(from_rot, to_vec) > 0 else -np.pi / 2 elif np.array_equal(to_vec, -from_vec): angle = np.pi else: angle = (np.sign(np.dot(from_rot, to_vec)) * np.arccos(np.dot(from_vec, to_vec))) return np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]]) elif ndim == 3: # Determine normal normal = np.cross(from_vec, to_vec) normal_norm = np.linalg.norm(normal) if normal_norm < 1e-10: # Collinear vectors, use perpendicular vector and angle = 0 or pi normal = perpendicular_vector(from_vec) angle = 0 if np.dot(from_vec, to_vec) > 0 else np.pi return axis_rotation_matrix(normal, angle) else: # Usual case, determine binormal and sign of rotation angle normal /= normal_norm binormal = np.cross(normal, from_vec) angle = (np.sign(np.dot(binormal, to_vec)) * np.arccos(np.dot(from_vec, to_vec))) return axis_rotation_matrix(normal, angle) else: raise RuntimeError('bad ndim')
python
def rotation_matrix_from_to(from_vec, to_vec): r"""Return a matrix that rotates ``from_vec`` to ``to_vec`` in 2d or 3d. Since a rotation from one vector to another in 3 dimensions has (at least) one degree of freedom, this function makes deliberate but still arbitrary choices to fix these free parameters. See Notes for details. For the applied formula in 3d, see `this Wikipedia page about Rodrigues' rotation formula <https://en.wikipedia.org/wiki/Rodrigues'_rotation_formula>`_. Parameters ---------- from_vec, to_vec : `array-like`, shape ``(2,)`` or ``(3,)`` Vectors between which the returned matrix rotates. They should not be very close to zero or collinear. Returns ------- matrix : `numpy.ndarray`, shape ``(2, 2)`` or ``(3, 3)`` A matrix rotating ``from_vec`` to ``to_vec``. Note that the matrix does *not* include scaling, i.e. it is not guaranteed that ``matrix.dot(from_vec) == to_vec``. Examples -------- In two dimensions, rotation is simple: >>> from_vec, to_vec = [1, 0], [1, 1] >>> mat = rotation_matrix_from_to(from_vec, to_vec) >>> to_vec_normalized = np.divide(to_vec, np.linalg.norm(to_vec)) >>> np.allclose(mat.dot([1, 0]), to_vec_normalized) True >>> from_vec, to_vec = [1, 0], [-1, 1] >>> mat = rotation_matrix_from_to(from_vec, to_vec) >>> to_vec_normalized = np.divide(to_vec, np.linalg.norm(to_vec)) >>> np.allclose(mat.dot([1, 0]), to_vec_normalized) True Rotation in 3d by less than ``pi``: >>> from_vec, to_vec = [1, 0, 0], [-1, 1, 0] >>> mat = rotation_matrix_from_to(from_vec, to_vec) >>> to_vec_normalized = np.divide(to_vec, np.linalg.norm(to_vec)) >>> np.allclose(mat.dot([1, 0, 0]), to_vec_normalized) True Rotation by more than ``pi``: >>> from_vec, to_vec = [1, 0, 0], [-1, -1, 0] >>> mat = rotation_matrix_from_to(from_vec, to_vec) >>> to_vec_normalized = np.divide(to_vec, np.linalg.norm(to_vec)) >>> np.allclose(mat.dot([1, 0, 0]), to_vec_normalized) True Notes ----- In 3d, the matrix corresponds to a rotation around the normal vector :math:`\hat n = \hat u \times \hat v`, where :math:`\hat u` and :math:`\hat v` are the normalized versions of :math:`u`, the vector from which to rotate, and :math:`v`, the vector to which should be rotated. The rotation angle is determined as :math:`\alpha = \pm \arccos(\langle \hat u, \hat v \rangle)`. Its sign corresponds to the sign of :math:`\langle \hat b, \hat v\rangle`, where :math:`\hat b = \hat n \times \hat u` is the binormal vector. In the case that :math:`\hat u` and :math:`\hat v` are collinear, a perpendicular vector is chosen as :math:`\hat n = (1, 0, 0)` if :math:`v_1 = v_2 = 0`, else :math:`\hat n = (-v_2, v_1, v_3)`. The angle in this case is :math:`\alpha = 0` if :math:`\langle \hat u, \hat v \rangle > 0`, otherwise :math:`\alpha = \pi`. """ from_vec, from_vec_in = (np.array(from_vec, dtype=float, copy=True), from_vec) to_vec, to_vec_in = np.array(to_vec, dtype=float, copy=True), to_vec if from_vec.shape not in ((2,), (3,)): raise ValueError('`from_vec.shape` must be (2,) or (3,), got {}' ''.format(from_vec.shape)) if to_vec.shape not in ((2,), (3,)): raise ValueError('`to_vec.shape` must be (2,) or (3,), got {}' ''.format(to_vec.shape)) if from_vec.shape != to_vec.shape: raise ValueError('`from_vec.shape` and `to_vec.shape` not equal: ' '{} != {}' ''.format(from_vec.shape, to_vec.shape)) ndim = len(from_vec) # Normalize vectors from_vec_norm = np.linalg.norm(from_vec) if from_vec_norm < 1e-10: raise ValueError('`from_vec` {} too close to zero'.format(from_vec_in)) from_vec /= from_vec_norm to_vec_norm = np.linalg.norm(to_vec) if to_vec_norm < 1e-10: raise ValueError('`to_vec` {} too close to zero'.format(to_vec_in)) to_vec /= to_vec_norm if ndim == 2: dot = np.dot(from_vec, to_vec) from_rot = (-from_vec[1], from_vec[0]) if dot == 0: angle = np.pi / 2 if np.dot(from_rot, to_vec) > 0 else -np.pi / 2 elif np.array_equal(to_vec, -from_vec): angle = np.pi else: angle = (np.sign(np.dot(from_rot, to_vec)) * np.arccos(np.dot(from_vec, to_vec))) return np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]]) elif ndim == 3: # Determine normal normal = np.cross(from_vec, to_vec) normal_norm = np.linalg.norm(normal) if normal_norm < 1e-10: # Collinear vectors, use perpendicular vector and angle = 0 or pi normal = perpendicular_vector(from_vec) angle = 0 if np.dot(from_vec, to_vec) > 0 else np.pi return axis_rotation_matrix(normal, angle) else: # Usual case, determine binormal and sign of rotation angle normal /= normal_norm binormal = np.cross(normal, from_vec) angle = (np.sign(np.dot(binormal, to_vec)) * np.arccos(np.dot(from_vec, to_vec))) return axis_rotation_matrix(normal, angle) else: raise RuntimeError('bad ndim')
[ "def", "rotation_matrix_from_to", "(", "from_vec", ",", "to_vec", ")", ":", "from_vec", ",", "from_vec_in", "=", "(", "np", ".", "array", "(", "from_vec", ",", "dtype", "=", "float", ",", "copy", "=", "True", ")", ",", "from_vec", ")", "to_vec", ",", "...
r"""Return a matrix that rotates ``from_vec`` to ``to_vec`` in 2d or 3d. Since a rotation from one vector to another in 3 dimensions has (at least) one degree of freedom, this function makes deliberate but still arbitrary choices to fix these free parameters. See Notes for details. For the applied formula in 3d, see `this Wikipedia page about Rodrigues' rotation formula <https://en.wikipedia.org/wiki/Rodrigues'_rotation_formula>`_. Parameters ---------- from_vec, to_vec : `array-like`, shape ``(2,)`` or ``(3,)`` Vectors between which the returned matrix rotates. They should not be very close to zero or collinear. Returns ------- matrix : `numpy.ndarray`, shape ``(2, 2)`` or ``(3, 3)`` A matrix rotating ``from_vec`` to ``to_vec``. Note that the matrix does *not* include scaling, i.e. it is not guaranteed that ``matrix.dot(from_vec) == to_vec``. Examples -------- In two dimensions, rotation is simple: >>> from_vec, to_vec = [1, 0], [1, 1] >>> mat = rotation_matrix_from_to(from_vec, to_vec) >>> to_vec_normalized = np.divide(to_vec, np.linalg.norm(to_vec)) >>> np.allclose(mat.dot([1, 0]), to_vec_normalized) True >>> from_vec, to_vec = [1, 0], [-1, 1] >>> mat = rotation_matrix_from_to(from_vec, to_vec) >>> to_vec_normalized = np.divide(to_vec, np.linalg.norm(to_vec)) >>> np.allclose(mat.dot([1, 0]), to_vec_normalized) True Rotation in 3d by less than ``pi``: >>> from_vec, to_vec = [1, 0, 0], [-1, 1, 0] >>> mat = rotation_matrix_from_to(from_vec, to_vec) >>> to_vec_normalized = np.divide(to_vec, np.linalg.norm(to_vec)) >>> np.allclose(mat.dot([1, 0, 0]), to_vec_normalized) True Rotation by more than ``pi``: >>> from_vec, to_vec = [1, 0, 0], [-1, -1, 0] >>> mat = rotation_matrix_from_to(from_vec, to_vec) >>> to_vec_normalized = np.divide(to_vec, np.linalg.norm(to_vec)) >>> np.allclose(mat.dot([1, 0, 0]), to_vec_normalized) True Notes ----- In 3d, the matrix corresponds to a rotation around the normal vector :math:`\hat n = \hat u \times \hat v`, where :math:`\hat u` and :math:`\hat v` are the normalized versions of :math:`u`, the vector from which to rotate, and :math:`v`, the vector to which should be rotated. The rotation angle is determined as :math:`\alpha = \pm \arccos(\langle \hat u, \hat v \rangle)`. Its sign corresponds to the sign of :math:`\langle \hat b, \hat v\rangle`, where :math:`\hat b = \hat n \times \hat u` is the binormal vector. In the case that :math:`\hat u` and :math:`\hat v` are collinear, a perpendicular vector is chosen as :math:`\hat n = (1, 0, 0)` if :math:`v_1 = v_2 = 0`, else :math:`\hat n = (-v_2, v_1, v_3)`. The angle in this case is :math:`\alpha = 0` if :math:`\langle \hat u, \hat v \rangle > 0`, otherwise :math:`\alpha = \pi`.
[ "r", "Return", "a", "matrix", "that", "rotates", "from_vec", "to", "to_vec", "in", "2d", "or", "3d", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/util/utility.py#L251-L385
231,724
odlgroup/odl
odl/tomo/util/utility.py
transform_system
def transform_system(principal_vec, principal_default, other_vecs, matrix=None): """Transform vectors with either ``matrix`` or based on ``principal_vec``. The logic of this function is as follows: - If ``matrix`` is not ``None``, transform ``principal_vec`` and all vectors in ``other_vecs`` by ``matrix``, ignoring ``principal_default``. - If ``matrix`` is ``None``, compute the rotation matrix from ``principal_default`` to ``principal_vec``, not including the dilation. Apply that rotation to all vectors in ``other_vecs``. **Note:** All vectors must have the same shape and match the shape of ``matrix`` if given. Parameters ---------- principal_vec : `array-like`, shape ``(ndim,)`` Vector that defines the transformation if ``matrix`` is not provided. principal_default : `array-like`, shape ``(ndim,)`` Default value for ``principal_vec``. The deviation from this determines the transformation. If ``matrix`` is given, this has no effect. other_vecs : sequence of ``None`` or `array-like`'s with shape ``(ndim,)`` The other vectors that should be transformed. ``None`` entries are just appended as-is. matrix : `array-like`, shape ``(ndim, ndim)``, optional Explicit transformation matrix to be applied to the vectors. It is allowed to include a constant scaling but shouldn't have strongly varying directional scaling (bad condition). Returns ------- transformed_vecs : tuple of `numpy.ndarray`, shape ``(ndim,)`` The transformed vectors. The first entry is (the transformed) ``principal_vec``, followed by the transformed ``other_vecs``. Thus the length of the tuple is ``len(other_vecs) + 1``. """ transformed_vecs = [] principal_vec = np.asarray(principal_vec, dtype=float) ndim = principal_vec.shape[0] if matrix is None: # Separate into dilation and rotation. The dilation is only used # for comparison, not in the final matrix. principal_default = np.asarray(principal_default, dtype=float) pr_norm = np.linalg.norm(principal_vec) pr_default_norm = np.linalg.norm(principal_default) if pr_default_norm == 0.0 and pr_norm != 0.0: raise ValueError('no transformation from {} to {}' ''.format(principal_default, principal_vec)) elif pr_norm == 0.0 and pr_default_norm != 0.0: raise ValueError('transformation from {} to {} is singular' ''.format(principal_default, principal_vec)) elif pr_norm == 0.0 and pr_default_norm == 0.0: dilation = 1.0 else: dilation = (np.linalg.norm(principal_vec) / np.linalg.norm(principal_default)) # Determine the rotation part if np.allclose(principal_vec, dilation * principal_default): # Dilation only matrix = np.eye(ndim) else: matrix = rotation_matrix_from_to(principal_default, principal_vec) # This one goes straight in transformed_vecs.append(principal_vec) else: matrix = np.asarray(matrix, dtype=float) if matrix.shape != (ndim, ndim): raise ValueError('matrix shape must be {}, got {}' ''.format((ndim, ndim), matrix.shape)) # Check matrix condition svals = np.linalg.svd(matrix, compute_uv=False) condition = np.inf if 0.0 in svals else svals[0] / svals[-1] if condition > 1e6: raise np.linalg.LinAlgError( 'matrix is badly conditioned: condition number is {}' ''.format(condition)) transformed_vecs.append(matrix.dot(principal_vec)) for vec in other_vecs: if vec is None: transformed_vecs.append(None) else: transformed_vecs.append(matrix.dot(vec)) return tuple(transformed_vecs)
python
def transform_system(principal_vec, principal_default, other_vecs, matrix=None): transformed_vecs = [] principal_vec = np.asarray(principal_vec, dtype=float) ndim = principal_vec.shape[0] if matrix is None: # Separate into dilation and rotation. The dilation is only used # for comparison, not in the final matrix. principal_default = np.asarray(principal_default, dtype=float) pr_norm = np.linalg.norm(principal_vec) pr_default_norm = np.linalg.norm(principal_default) if pr_default_norm == 0.0 and pr_norm != 0.0: raise ValueError('no transformation from {} to {}' ''.format(principal_default, principal_vec)) elif pr_norm == 0.0 and pr_default_norm != 0.0: raise ValueError('transformation from {} to {} is singular' ''.format(principal_default, principal_vec)) elif pr_norm == 0.0 and pr_default_norm == 0.0: dilation = 1.0 else: dilation = (np.linalg.norm(principal_vec) / np.linalg.norm(principal_default)) # Determine the rotation part if np.allclose(principal_vec, dilation * principal_default): # Dilation only matrix = np.eye(ndim) else: matrix = rotation_matrix_from_to(principal_default, principal_vec) # This one goes straight in transformed_vecs.append(principal_vec) else: matrix = np.asarray(matrix, dtype=float) if matrix.shape != (ndim, ndim): raise ValueError('matrix shape must be {}, got {}' ''.format((ndim, ndim), matrix.shape)) # Check matrix condition svals = np.linalg.svd(matrix, compute_uv=False) condition = np.inf if 0.0 in svals else svals[0] / svals[-1] if condition > 1e6: raise np.linalg.LinAlgError( 'matrix is badly conditioned: condition number is {}' ''.format(condition)) transformed_vecs.append(matrix.dot(principal_vec)) for vec in other_vecs: if vec is None: transformed_vecs.append(None) else: transformed_vecs.append(matrix.dot(vec)) return tuple(transformed_vecs)
[ "def", "transform_system", "(", "principal_vec", ",", "principal_default", ",", "other_vecs", ",", "matrix", "=", "None", ")", ":", "transformed_vecs", "=", "[", "]", "principal_vec", "=", "np", ".", "asarray", "(", "principal_vec", ",", "dtype", "=", "float",...
Transform vectors with either ``matrix`` or based on ``principal_vec``. The logic of this function is as follows: - If ``matrix`` is not ``None``, transform ``principal_vec`` and all vectors in ``other_vecs`` by ``matrix``, ignoring ``principal_default``. - If ``matrix`` is ``None``, compute the rotation matrix from ``principal_default`` to ``principal_vec``, not including the dilation. Apply that rotation to all vectors in ``other_vecs``. **Note:** All vectors must have the same shape and match the shape of ``matrix`` if given. Parameters ---------- principal_vec : `array-like`, shape ``(ndim,)`` Vector that defines the transformation if ``matrix`` is not provided. principal_default : `array-like`, shape ``(ndim,)`` Default value for ``principal_vec``. The deviation from this determines the transformation. If ``matrix`` is given, this has no effect. other_vecs : sequence of ``None`` or `array-like`'s with shape ``(ndim,)`` The other vectors that should be transformed. ``None`` entries are just appended as-is. matrix : `array-like`, shape ``(ndim, ndim)``, optional Explicit transformation matrix to be applied to the vectors. It is allowed to include a constant scaling but shouldn't have strongly varying directional scaling (bad condition). Returns ------- transformed_vecs : tuple of `numpy.ndarray`, shape ``(ndim,)`` The transformed vectors. The first entry is (the transformed) ``principal_vec``, followed by the transformed ``other_vecs``. Thus the length of the tuple is ``len(other_vecs) + 1``.
[ "Transform", "vectors", "with", "either", "matrix", "or", "based", "on", "principal_vec", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/util/utility.py#L388-L484
231,725
odlgroup/odl
odl/tomo/util/utility.py
perpendicular_vector
def perpendicular_vector(vec): """Return a vector perpendicular to ``vec``. Parameters ---------- vec : `array-like` Vector(s) of arbitrary length. The axis along the vector components must come last. Returns ------- perp_vec : `numpy.ndarray` Array of same shape as ``vec`` such that ``dot(vec, perp_vec) == 0`` (along the last axis if there are multiple vectors). Examples -------- Works in 2d: >>> perpendicular_vector([0, 1]) array([-1., 0.]) >>> np.allclose(perpendicular_vector([1, 0]), [0, 1]) # would print -0 True And in 3d: >>> perpendicular_vector([0, 1, 0]) array([-1., 0., 0.]) >>> perpendicular_vector([0, 0, 1]) array([ 1., 0., 0.]) >>> np.allclose(perpendicular_vector([1, 0, 0]), [0, 1, 0]) True The function is vectorized, i.e., it can be called with multiple vectors at once (additional axes being added to the left): >>> perpendicular_vector([[0, 1, 0], ... [0, 0, 1]]) # 2 vectors array([[-1., 0., 0.], [ 1., 0., 0.]]) >>> vecs = np.zeros((2, 3, 3)) >>> vecs[..., 1] = 1 # (2, 3) array of vectors (0, 1, 0) >>> perpendicular_vector(vecs) array([[[-1., 0., 0.], [-1., 0., 0.], [-1., 0., 0.]], <BLANKLINE> [[-1., 0., 0.], [-1., 0., 0.], [-1., 0., 0.]]]) """ squeeze_out = (np.ndim(vec) == 1) vec = np.array(vec, dtype=float, copy=False, ndmin=2) if np.any(np.all(vec == 0, axis=-1)): raise ValueError('zero vector') result = np.zeros(vec.shape) cond = np.any(vec[..., :2] != 0, axis=-1) result[cond, 0] = -vec[cond, 1] result[cond, 1] = vec[cond, 0] result[~cond, 0] = 1 result /= np.linalg.norm(result, axis=-1, keepdims=True) if squeeze_out: result = result.squeeze() return result
python
def perpendicular_vector(vec): squeeze_out = (np.ndim(vec) == 1) vec = np.array(vec, dtype=float, copy=False, ndmin=2) if np.any(np.all(vec == 0, axis=-1)): raise ValueError('zero vector') result = np.zeros(vec.shape) cond = np.any(vec[..., :2] != 0, axis=-1) result[cond, 0] = -vec[cond, 1] result[cond, 1] = vec[cond, 0] result[~cond, 0] = 1 result /= np.linalg.norm(result, axis=-1, keepdims=True) if squeeze_out: result = result.squeeze() return result
[ "def", "perpendicular_vector", "(", "vec", ")", ":", "squeeze_out", "=", "(", "np", ".", "ndim", "(", "vec", ")", "==", "1", ")", "vec", "=", "np", ".", "array", "(", "vec", ",", "dtype", "=", "float", ",", "copy", "=", "False", ",", "ndmin", "="...
Return a vector perpendicular to ``vec``. Parameters ---------- vec : `array-like` Vector(s) of arbitrary length. The axis along the vector components must come last. Returns ------- perp_vec : `numpy.ndarray` Array of same shape as ``vec`` such that ``dot(vec, perp_vec) == 0`` (along the last axis if there are multiple vectors). Examples -------- Works in 2d: >>> perpendicular_vector([0, 1]) array([-1., 0.]) >>> np.allclose(perpendicular_vector([1, 0]), [0, 1]) # would print -0 True And in 3d: >>> perpendicular_vector([0, 1, 0]) array([-1., 0., 0.]) >>> perpendicular_vector([0, 0, 1]) array([ 1., 0., 0.]) >>> np.allclose(perpendicular_vector([1, 0, 0]), [0, 1, 0]) True The function is vectorized, i.e., it can be called with multiple vectors at once (additional axes being added to the left): >>> perpendicular_vector([[0, 1, 0], ... [0, 0, 1]]) # 2 vectors array([[-1., 0., 0.], [ 1., 0., 0.]]) >>> vecs = np.zeros((2, 3, 3)) >>> vecs[..., 1] = 1 # (2, 3) array of vectors (0, 1, 0) >>> perpendicular_vector(vecs) array([[[-1., 0., 0.], [-1., 0., 0.], [-1., 0., 0.]], <BLANKLINE> [[-1., 0., 0.], [-1., 0., 0.], [-1., 0., 0.]]])
[ "Return", "a", "vector", "perpendicular", "to", "vec", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/util/utility.py#L551-L618
231,726
odlgroup/odl
odl/tomo/util/utility.py
is_inside_bounds
def is_inside_bounds(value, params): """Return ``True`` if ``value`` is contained in ``params``. This method supports broadcasting in the sense that for ``params.ndim >= 2``, if more than one value is given, the inputs are broadcast against each other. Parameters ---------- value : `array-like` Value(s) to be checked. For several inputs, the final bool tells whether all inputs pass the check or not. params : `IntervalProd` Set in which the value is / the values are supposed to lie. Returns ------- is_inside_bounds : bool ``True`` is all values lie in ``params``, ``False`` otherwise. Examples -------- Check a single point: >>> params = odl.IntervalProd([0, 0], [1, 2]) >>> is_inside_bounds([0, 0], params) True >>> is_inside_bounds([0, -1], params) False Using broadcasting: >>> pts_ax0 = np.array([0, 0, 1, 0, 1])[:, None] >>> pts_ax1 = np.array([2, 0, 1])[None, :] >>> is_inside_bounds([pts_ax0, pts_ax1], params) True >>> pts_ax1 = np.array([-2, 1])[None, :] >>> is_inside_bounds([pts_ax0, pts_ax1], params) False """ if value in params: # Single parameter return True else: if params.ndim == 1: return params.contains_all(np.ravel(value)) else: # Flesh out and flatten to check bounds bcast_value = np.broadcast_arrays(*value) stacked_value = np.vstack(bcast_value) flat_value = stacked_value.reshape(params.ndim, -1) return params.contains_all(flat_value)
python
def is_inside_bounds(value, params): if value in params: # Single parameter return True else: if params.ndim == 1: return params.contains_all(np.ravel(value)) else: # Flesh out and flatten to check bounds bcast_value = np.broadcast_arrays(*value) stacked_value = np.vstack(bcast_value) flat_value = stacked_value.reshape(params.ndim, -1) return params.contains_all(flat_value)
[ "def", "is_inside_bounds", "(", "value", ",", "params", ")", ":", "if", "value", "in", "params", ":", "# Single parameter", "return", "True", "else", ":", "if", "params", ".", "ndim", "==", "1", ":", "return", "params", ".", "contains_all", "(", "np", "....
Return ``True`` if ``value`` is contained in ``params``. This method supports broadcasting in the sense that for ``params.ndim >= 2``, if more than one value is given, the inputs are broadcast against each other. Parameters ---------- value : `array-like` Value(s) to be checked. For several inputs, the final bool tells whether all inputs pass the check or not. params : `IntervalProd` Set in which the value is / the values are supposed to lie. Returns ------- is_inside_bounds : bool ``True`` is all values lie in ``params``, ``False`` otherwise. Examples -------- Check a single point: >>> params = odl.IntervalProd([0, 0], [1, 2]) >>> is_inside_bounds([0, 0], params) True >>> is_inside_bounds([0, -1], params) False Using broadcasting: >>> pts_ax0 = np.array([0, 0, 1, 0, 1])[:, None] >>> pts_ax1 = np.array([2, 0, 1])[None, :] >>> is_inside_bounds([pts_ax0, pts_ax1], params) True >>> pts_ax1 = np.array([-2, 1])[None, :] >>> is_inside_bounds([pts_ax0, pts_ax1], params) False
[ "Return", "True", "if", "value", "is", "contained", "in", "params", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/util/utility.py#L621-L672
231,727
odlgroup/odl
odl/trafos/backends/pyfftw_bindings.py
pyfftw_call
def pyfftw_call(array_in, array_out, direction='forward', axes=None, halfcomplex=False, **kwargs): """Calculate the DFT with pyfftw. The discrete Fourier (forward) transform calcuates the sum:: f_hat[k] = sum_j( f[j] * exp(-2*pi*1j * j*k/N) ) where the summation is taken over all indices ``j = (j[0], ..., j[d-1])`` in the range ``0 <= j < N`` (component-wise), with ``N`` being the shape of the input array. The output indices ``k`` lie in the same range, except for half-complex transforms, where the last axis ``i`` in ``axes`` is shortened to ``0 <= k[i] < floor(N[i]/2) + 1``. In the backward transform, sign of the the exponential argument is flipped. Parameters ---------- array_in : `numpy.ndarray` Array to be transformed array_out : `numpy.ndarray` Output array storing the transformed values, may be aliased with ``array_in``. direction : {'forward', 'backward'}, optional Direction of the transform axes : int or sequence of ints, optional Dimensions along which to take the transform. ``None`` means using all axes and is equivalent to ``np.arange(ndim)``. halfcomplex : bool, optional If ``True``, calculate only the negative frequency part along the last axis. If ``False``, calculate the full complex FFT. This option can only be used with real input data. Other Parameters ---------------- fftw_plan : ``pyfftw.FFTW``, optional Use this plan instead of calculating a new one. If specified, the options ``planning_effort``, ``planning_timelimit`` and ``threads`` have no effect. planning_effort : str, optional Flag for the amount of effort put into finding an optimal FFTW plan. See the `FFTW doc on planner flags <http://www.fftw.org/fftw3_doc/Planner-Flags.html>`_. Available options: {'estimate', 'measure', 'patient', 'exhaustive'} Default: 'estimate' planning_timelimit : float or ``None``, optional Limit planning time to roughly this many seconds. Default: ``None`` (no limit) threads : int, optional Number of threads to use. Default: Number of CPUs if the number of data points is larger than 4096, else 1. normalise_idft : bool, optional If ``True``, the result of the backward transform is divided by ``1 / N``, where ``N`` is the total number of points in ``array_in[axes]``. This ensures that the IDFT is the true inverse of the forward DFT. Default: ``False`` import_wisdom : filename or file handle, optional File to load FFTW wisdom from. If the file does not exist, it is ignored. export_wisdom : filename or file handle, optional File to append the accumulated FFTW wisdom to Returns ------- fftw_plan : ``pyfftw.FFTW`` The plan object created from the input arguments. It can be reused for transforms of the same size with the same data types. Note that reuse only gives a speedup if the initial plan used a planner flag other than ``'estimate'``. If ``fftw_plan`` was specified, the returned object is a reference to it. Notes ----- * The planning and direction flags can also be specified as capitalized and prepended by ``'FFTW_'``, i.e. in the original FFTW form. * For a ``halfcomplex`` forward transform, the arrays must fulfill ``array_out.shape[axes[-1]] == array_in.shape[axes[-1]] // 2 + 1``, and vice versa for backward transforms. * All planning schemes except ``'estimate'`` require an internal copy of the input array but are often several times faster after the first call (measuring results are cached). Typically, 'measure' is a good compromise. If you cannot afford the copy, use ``'estimate'``. * If a plan is provided via the ``fftw_plan`` parameter, no copy is needed internally. """ import pickle if not array_in.flags.aligned: raise ValueError('input array not aligned') if not array_out.flags.aligned: raise ValueError('output array not aligned') if axes is None: axes = tuple(range(array_in.ndim)) axes = normalized_axes_tuple(axes, array_in.ndim) direction = _pyfftw_to_local(direction) fftw_plan_in = kwargs.pop('fftw_plan', None) planning_effort = _pyfftw_to_local(kwargs.pop('planning_effort', 'estimate')) planning_timelimit = kwargs.pop('planning_timelimit', None) threads = kwargs.pop('threads', None) normalise_idft = kwargs.pop('normalise_idft', False) wimport = kwargs.pop('import_wisdom', '') wexport = kwargs.pop('export_wisdom', '') # Cast input to complex if necessary array_in_copied = False if is_real_dtype(array_in.dtype) and not halfcomplex: # Need to cast array_in to complex dtype array_in = array_in.astype(complex_dtype(array_in.dtype)) array_in_copied = True # Do consistency checks on the arguments _pyfftw_check_args(array_in, array_out, axes, halfcomplex, direction) # Import wisdom if possible if wimport: try: with open(wimport, 'rb') as wfile: wisdom = pickle.load(wfile) except IOError: wisdom = [] except TypeError: # Got file handle wisdom = pickle.load(wimport) if wisdom: pyfftw.import_wisdom(wisdom) # Copy input array if it hasn't been done yet and the planner is likely # to destroy it. If we already have a plan, we don't have to worry. planner_destroys = _pyfftw_destroys_input( [planning_effort], direction, halfcomplex, array_in.ndim) must_copy_array_in = fftw_plan_in is None and planner_destroys if must_copy_array_in and not array_in_copied: plan_arr_in = np.empty_like(array_in) flags = [_local_to_pyfftw(planning_effort), 'FFTW_DESTROY_INPUT'] else: plan_arr_in = array_in flags = [_local_to_pyfftw(planning_effort)] if fftw_plan_in is None: if threads is None: if plan_arr_in.size <= 4096: # Trade-off wrt threading overhead threads = 1 else: threads = cpu_count() fftw_plan = pyfftw.FFTW( plan_arr_in, array_out, direction=_local_to_pyfftw(direction), flags=flags, planning_timelimit=planning_timelimit, threads=threads, axes=axes) else: fftw_plan = fftw_plan_in fftw_plan(array_in, array_out, normalise_idft=normalise_idft) if wexport: try: with open(wexport, 'ab') as wfile: pickle.dump(pyfftw.export_wisdom(), wfile) except TypeError: # Got file handle pickle.dump(pyfftw.export_wisdom(), wexport) return fftw_plan
python
def pyfftw_call(array_in, array_out, direction='forward', axes=None, halfcomplex=False, **kwargs): import pickle if not array_in.flags.aligned: raise ValueError('input array not aligned') if not array_out.flags.aligned: raise ValueError('output array not aligned') if axes is None: axes = tuple(range(array_in.ndim)) axes = normalized_axes_tuple(axes, array_in.ndim) direction = _pyfftw_to_local(direction) fftw_plan_in = kwargs.pop('fftw_plan', None) planning_effort = _pyfftw_to_local(kwargs.pop('planning_effort', 'estimate')) planning_timelimit = kwargs.pop('planning_timelimit', None) threads = kwargs.pop('threads', None) normalise_idft = kwargs.pop('normalise_idft', False) wimport = kwargs.pop('import_wisdom', '') wexport = kwargs.pop('export_wisdom', '') # Cast input to complex if necessary array_in_copied = False if is_real_dtype(array_in.dtype) and not halfcomplex: # Need to cast array_in to complex dtype array_in = array_in.astype(complex_dtype(array_in.dtype)) array_in_copied = True # Do consistency checks on the arguments _pyfftw_check_args(array_in, array_out, axes, halfcomplex, direction) # Import wisdom if possible if wimport: try: with open(wimport, 'rb') as wfile: wisdom = pickle.load(wfile) except IOError: wisdom = [] except TypeError: # Got file handle wisdom = pickle.load(wimport) if wisdom: pyfftw.import_wisdom(wisdom) # Copy input array if it hasn't been done yet and the planner is likely # to destroy it. If we already have a plan, we don't have to worry. planner_destroys = _pyfftw_destroys_input( [planning_effort], direction, halfcomplex, array_in.ndim) must_copy_array_in = fftw_plan_in is None and planner_destroys if must_copy_array_in and not array_in_copied: plan_arr_in = np.empty_like(array_in) flags = [_local_to_pyfftw(planning_effort), 'FFTW_DESTROY_INPUT'] else: plan_arr_in = array_in flags = [_local_to_pyfftw(planning_effort)] if fftw_plan_in is None: if threads is None: if plan_arr_in.size <= 4096: # Trade-off wrt threading overhead threads = 1 else: threads = cpu_count() fftw_plan = pyfftw.FFTW( plan_arr_in, array_out, direction=_local_to_pyfftw(direction), flags=flags, planning_timelimit=planning_timelimit, threads=threads, axes=axes) else: fftw_plan = fftw_plan_in fftw_plan(array_in, array_out, normalise_idft=normalise_idft) if wexport: try: with open(wexport, 'ab') as wfile: pickle.dump(pyfftw.export_wisdom(), wfile) except TypeError: # Got file handle pickle.dump(pyfftw.export_wisdom(), wexport) return fftw_plan
[ "def", "pyfftw_call", "(", "array_in", ",", "array_out", ",", "direction", "=", "'forward'", ",", "axes", "=", "None", ",", "halfcomplex", "=", "False", ",", "*", "*", "kwargs", ")", ":", "import", "pickle", "if", "not", "array_in", ".", "flags", ".", ...
Calculate the DFT with pyfftw. The discrete Fourier (forward) transform calcuates the sum:: f_hat[k] = sum_j( f[j] * exp(-2*pi*1j * j*k/N) ) where the summation is taken over all indices ``j = (j[0], ..., j[d-1])`` in the range ``0 <= j < N`` (component-wise), with ``N`` being the shape of the input array. The output indices ``k`` lie in the same range, except for half-complex transforms, where the last axis ``i`` in ``axes`` is shortened to ``0 <= k[i] < floor(N[i]/2) + 1``. In the backward transform, sign of the the exponential argument is flipped. Parameters ---------- array_in : `numpy.ndarray` Array to be transformed array_out : `numpy.ndarray` Output array storing the transformed values, may be aliased with ``array_in``. direction : {'forward', 'backward'}, optional Direction of the transform axes : int or sequence of ints, optional Dimensions along which to take the transform. ``None`` means using all axes and is equivalent to ``np.arange(ndim)``. halfcomplex : bool, optional If ``True``, calculate only the negative frequency part along the last axis. If ``False``, calculate the full complex FFT. This option can only be used with real input data. Other Parameters ---------------- fftw_plan : ``pyfftw.FFTW``, optional Use this plan instead of calculating a new one. If specified, the options ``planning_effort``, ``planning_timelimit`` and ``threads`` have no effect. planning_effort : str, optional Flag for the amount of effort put into finding an optimal FFTW plan. See the `FFTW doc on planner flags <http://www.fftw.org/fftw3_doc/Planner-Flags.html>`_. Available options: {'estimate', 'measure', 'patient', 'exhaustive'} Default: 'estimate' planning_timelimit : float or ``None``, optional Limit planning time to roughly this many seconds. Default: ``None`` (no limit) threads : int, optional Number of threads to use. Default: Number of CPUs if the number of data points is larger than 4096, else 1. normalise_idft : bool, optional If ``True``, the result of the backward transform is divided by ``1 / N``, where ``N`` is the total number of points in ``array_in[axes]``. This ensures that the IDFT is the true inverse of the forward DFT. Default: ``False`` import_wisdom : filename or file handle, optional File to load FFTW wisdom from. If the file does not exist, it is ignored. export_wisdom : filename or file handle, optional File to append the accumulated FFTW wisdom to Returns ------- fftw_plan : ``pyfftw.FFTW`` The plan object created from the input arguments. It can be reused for transforms of the same size with the same data types. Note that reuse only gives a speedup if the initial plan used a planner flag other than ``'estimate'``. If ``fftw_plan`` was specified, the returned object is a reference to it. Notes ----- * The planning and direction flags can also be specified as capitalized and prepended by ``'FFTW_'``, i.e. in the original FFTW form. * For a ``halfcomplex`` forward transform, the arrays must fulfill ``array_out.shape[axes[-1]] == array_in.shape[axes[-1]] // 2 + 1``, and vice versa for backward transforms. * All planning schemes except ``'estimate'`` require an internal copy of the input array but are often several times faster after the first call (measuring results are cached). Typically, 'measure' is a good compromise. If you cannot afford the copy, use ``'estimate'``. * If a plan is provided via the ``fftw_plan`` parameter, no copy is needed internally.
[ "Calculate", "the", "DFT", "with", "pyfftw", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/trafos/backends/pyfftw_bindings.py#L39-L214
231,728
odlgroup/odl
odl/trafos/backends/pyfftw_bindings.py
_pyfftw_destroys_input
def _pyfftw_destroys_input(flags, direction, halfcomplex, ndim): """Return ``True`` if FFTW destroys an input array, ``False`` otherwise.""" if any(flag in flags or _pyfftw_to_local(flag) in flags for flag in ('FFTW_MEASURE', 'FFTW_PATIENT', 'FFTW_EXHAUSTIVE', 'FFTW_DESTROY_INPUT')): return True elif (direction in ('backward', 'FFTW_BACKWARD') and halfcomplex and ndim != 1): return True else: return False
python
def _pyfftw_destroys_input(flags, direction, halfcomplex, ndim): if any(flag in flags or _pyfftw_to_local(flag) in flags for flag in ('FFTW_MEASURE', 'FFTW_PATIENT', 'FFTW_EXHAUSTIVE', 'FFTW_DESTROY_INPUT')): return True elif (direction in ('backward', 'FFTW_BACKWARD') and halfcomplex and ndim != 1): return True else: return False
[ "def", "_pyfftw_destroys_input", "(", "flags", ",", "direction", ",", "halfcomplex", ",", "ndim", ")", ":", "if", "any", "(", "flag", "in", "flags", "or", "_pyfftw_to_local", "(", "flag", ")", "in", "flags", "for", "flag", "in", "(", "'FFTW_MEASURE'", ",",...
Return ``True`` if FFTW destroys an input array, ``False`` otherwise.
[ "Return", "True", "if", "FFTW", "destroys", "an", "input", "array", "False", "otherwise", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/trafos/backends/pyfftw_bindings.py#L225-L235
231,729
odlgroup/odl
odl/trafos/backends/pyfftw_bindings.py
_pyfftw_check_args
def _pyfftw_check_args(arr_in, arr_out, axes, halfcomplex, direction): """Raise an error if anything is not ok with in and out.""" if len(set(axes)) != len(axes): raise ValueError('duplicate axes are not allowed') if direction == 'forward': out_shape = list(arr_in.shape) if halfcomplex: try: out_shape[axes[-1]] = arr_in.shape[axes[-1]] // 2 + 1 except IndexError: raise IndexError('axis index {} out of range for array ' 'with {} axes' ''.format(axes[-1], arr_in.ndim)) if arr_out.shape != tuple(out_shape): raise ValueError('expected output shape {}, got {}' ''.format(tuple(out_shape), arr_out.shape)) if is_real_dtype(arr_in.dtype): out_dtype = complex_dtype(arr_in.dtype) elif halfcomplex: raise ValueError('cannot combine halfcomplex forward transform ' 'with complex input') else: out_dtype = arr_in.dtype if arr_out.dtype != out_dtype: raise ValueError('expected output dtype {}, got {}' ''.format(dtype_repr(out_dtype), dtype_repr(arr_out.dtype))) elif direction == 'backward': in_shape = list(arr_out.shape) if halfcomplex: try: in_shape[axes[-1]] = arr_out.shape[axes[-1]] // 2 + 1 except IndexError as err: raise IndexError('axis index {} out of range for array ' 'with {} axes' ''.format(axes[-1], arr_out.ndim)) if arr_in.shape != tuple(in_shape): raise ValueError('expected input shape {}, got {}' ''.format(tuple(in_shape), arr_in.shape)) if is_real_dtype(arr_out.dtype): in_dtype = complex_dtype(arr_out.dtype) elif halfcomplex: raise ValueError('cannot combine halfcomplex backward transform ' 'with complex output') else: in_dtype = arr_out.dtype if arr_in.dtype != in_dtype: raise ValueError('expected input dtype {}, got {}' ''.format(dtype_repr(in_dtype), dtype_repr(arr_in.dtype))) else: # Shouldn't happen raise RuntimeError
python
def _pyfftw_check_args(arr_in, arr_out, axes, halfcomplex, direction): if len(set(axes)) != len(axes): raise ValueError('duplicate axes are not allowed') if direction == 'forward': out_shape = list(arr_in.shape) if halfcomplex: try: out_shape[axes[-1]] = arr_in.shape[axes[-1]] // 2 + 1 except IndexError: raise IndexError('axis index {} out of range for array ' 'with {} axes' ''.format(axes[-1], arr_in.ndim)) if arr_out.shape != tuple(out_shape): raise ValueError('expected output shape {}, got {}' ''.format(tuple(out_shape), arr_out.shape)) if is_real_dtype(arr_in.dtype): out_dtype = complex_dtype(arr_in.dtype) elif halfcomplex: raise ValueError('cannot combine halfcomplex forward transform ' 'with complex input') else: out_dtype = arr_in.dtype if arr_out.dtype != out_dtype: raise ValueError('expected output dtype {}, got {}' ''.format(dtype_repr(out_dtype), dtype_repr(arr_out.dtype))) elif direction == 'backward': in_shape = list(arr_out.shape) if halfcomplex: try: in_shape[axes[-1]] = arr_out.shape[axes[-1]] // 2 + 1 except IndexError as err: raise IndexError('axis index {} out of range for array ' 'with {} axes' ''.format(axes[-1], arr_out.ndim)) if arr_in.shape != tuple(in_shape): raise ValueError('expected input shape {}, got {}' ''.format(tuple(in_shape), arr_in.shape)) if is_real_dtype(arr_out.dtype): in_dtype = complex_dtype(arr_out.dtype) elif halfcomplex: raise ValueError('cannot combine halfcomplex backward transform ' 'with complex output') else: in_dtype = arr_out.dtype if arr_in.dtype != in_dtype: raise ValueError('expected input dtype {}, got {}' ''.format(dtype_repr(in_dtype), dtype_repr(arr_in.dtype))) else: # Shouldn't happen raise RuntimeError
[ "def", "_pyfftw_check_args", "(", "arr_in", ",", "arr_out", ",", "axes", ",", "halfcomplex", ",", "direction", ")", ":", "if", "len", "(", "set", "(", "axes", ")", ")", "!=", "len", "(", "axes", ")", ":", "raise", "ValueError", "(", "'duplicate axes are ...
Raise an error if anything is not ok with in and out.
[ "Raise", "an", "error", "if", "anything", "is", "not", "ok", "with", "in", "and", "out", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/trafos/backends/pyfftw_bindings.py#L238-L298
231,730
odlgroup/odl
odl/solvers/nonsmooth/admm.py
admm_linearized
def admm_linearized(x, f, g, L, tau, sigma, niter, **kwargs): r"""Generic linearized ADMM method for convex problems. ADMM stands for "Alternating Direction Method of Multipliers" and is a popular convex optimization method. This variant solves problems of the form :: min_x [ f(x) + g(Lx) ] with convex ``f`` and ``g``, and a linear operator ``L``. See Section 4.4 of `[PB2014] <http://web.stanford.edu/~boyd/papers/prox_algs.html>`_ and the Notes for more mathematical details. Parameters ---------- x : ``L.domain`` element Starting point of the iteration, updated in-place. f, g : `Functional` The functions ``f`` and ``g`` in the problem definition. They need to implement the ``proximal`` method. L : linear `Operator` The linear operator that is composed with ``g`` in the problem definition. It must fulfill ``L.domain == f.domain`` and ``L.range == g.domain``. tau, sigma : positive float Step size parameters for the update of the variables. niter : non-negative int Number of iterations. Other Parameters ---------------- callback : callable, optional Function called with the current iterate after each iteration. Notes ----- Given :math:`x^{(0)}` (the provided ``x``) and :math:`u^{(0)} = z^{(0)} = 0`, linearized ADMM applies the following iteration: .. math:: x^{(k+1)} &= \mathrm{prox}_{\tau f} \left[ x^{(k)} - \sigma^{-1}\tau L^*\big( L x^{(k)} - z^{(k)} + u^{(k)} \big) \right] z^{(k+1)} &= \mathrm{prox}_{\sigma g}\left( L x^{(k+1)} + u^{(k)} \right) u^{(k+1)} &= u^{(k)} + L x^{(k+1)} - z^{(k+1)} The step size parameters :math:`\tau` and :math:`\sigma` must satisfy .. math:: 0 < \tau < \frac{\sigma}{\|L\|^2} to guarantee convergence. The name "linearized ADMM" comes from the fact that in the minimization subproblem for the :math:`x` variable, this variant uses a linearization of a quadratic term in the augmented Lagrangian of the generic ADMM, in order to make the step expressible with the proximal operator of :math:`f`. Another name for this algorithm is *split inexact Uzawa method*. References ---------- [PB2014] Parikh, N and Boyd, S. *Proximal Algorithms*. Foundations and Trends in Optimization, 1(3) (2014), pp 123-231. """ if not isinstance(L, Operator): raise TypeError('`op` {!r} is not an `Operator` instance' ''.format(L)) if x not in L.domain: raise OpDomainError('`x` {!r} is not in the domain of `op` {!r}' ''.format(x, L.domain)) tau, tau_in = float(tau), tau if tau <= 0: raise ValueError('`tau` must be positive, got {}'.format(tau_in)) sigma, sigma_in = float(sigma), sigma if sigma <= 0: raise ValueError('`sigma` must be positive, got {}'.format(sigma_in)) niter, niter_in = int(niter), niter if niter < 0 or niter != niter_in: raise ValueError('`niter` must be a non-negative integer, got {}' ''.format(niter_in)) # Callback object callback = kwargs.pop('callback', None) if callback is not None and not callable(callback): raise TypeError('`callback` {} is not callable'.format(callback)) # Initialize range variables z = L.range.zero() u = L.range.zero() # Temporary for Lx + u [- z] tmp_ran = L(x) # Temporary for L^*(Lx + u - z) tmp_dom = L.domain.element() # Store proximals since their initialization may involve computation prox_tau_f = f.proximal(tau) prox_sigma_g = g.proximal(sigma) for _ in range(niter): # tmp_ran has value Lx^k here # tmp_dom <- L^*(Lx^k + u^k - z^k) tmp_ran += u tmp_ran -= z L.adjoint(tmp_ran, out=tmp_dom) # x <- x^k - (tau/sigma) L^*(Lx^k + u^k - z^k) x.lincomb(1, x, -tau / sigma, tmp_dom) # x^(k+1) <- prox[tau*f](x) prox_tau_f(x, out=x) # tmp_ran <- Lx^(k+1) L(x, out=tmp_ran) # z^(k+1) <- prox[sigma*g](Lx^(k+1) + u^k) prox_sigma_g(tmp_ran + u, out=z) # 1 copy here # u^(k+1) = u^k + Lx^(k+1) - z^(k+1) u += tmp_ran u -= z if callback is not None: callback(x)
python
def admm_linearized(x, f, g, L, tau, sigma, niter, **kwargs): r"""Generic linearized ADMM method for convex problems. ADMM stands for "Alternating Direction Method of Multipliers" and is a popular convex optimization method. This variant solves problems of the form :: min_x [ f(x) + g(Lx) ] with convex ``f`` and ``g``, and a linear operator ``L``. See Section 4.4 of `[PB2014] <http://web.stanford.edu/~boyd/papers/prox_algs.html>`_ and the Notes for more mathematical details. Parameters ---------- x : ``L.domain`` element Starting point of the iteration, updated in-place. f, g : `Functional` The functions ``f`` and ``g`` in the problem definition. They need to implement the ``proximal`` method. L : linear `Operator` The linear operator that is composed with ``g`` in the problem definition. It must fulfill ``L.domain == f.domain`` and ``L.range == g.domain``. tau, sigma : positive float Step size parameters for the update of the variables. niter : non-negative int Number of iterations. Other Parameters ---------------- callback : callable, optional Function called with the current iterate after each iteration. Notes ----- Given :math:`x^{(0)}` (the provided ``x``) and :math:`u^{(0)} = z^{(0)} = 0`, linearized ADMM applies the following iteration: .. math:: x^{(k+1)} &= \mathrm{prox}_{\tau f} \left[ x^{(k)} - \sigma^{-1}\tau L^*\big( L x^{(k)} - z^{(k)} + u^{(k)} \big) \right] z^{(k+1)} &= \mathrm{prox}_{\sigma g}\left( L x^{(k+1)} + u^{(k)} \right) u^{(k+1)} &= u^{(k)} + L x^{(k+1)} - z^{(k+1)} The step size parameters :math:`\tau` and :math:`\sigma` must satisfy .. math:: 0 < \tau < \frac{\sigma}{\|L\|^2} to guarantee convergence. The name "linearized ADMM" comes from the fact that in the minimization subproblem for the :math:`x` variable, this variant uses a linearization of a quadratic term in the augmented Lagrangian of the generic ADMM, in order to make the step expressible with the proximal operator of :math:`f`. Another name for this algorithm is *split inexact Uzawa method*. References ---------- [PB2014] Parikh, N and Boyd, S. *Proximal Algorithms*. Foundations and Trends in Optimization, 1(3) (2014), pp 123-231. """ if not isinstance(L, Operator): raise TypeError('`op` {!r} is not an `Operator` instance' ''.format(L)) if x not in L.domain: raise OpDomainError('`x` {!r} is not in the domain of `op` {!r}' ''.format(x, L.domain)) tau, tau_in = float(tau), tau if tau <= 0: raise ValueError('`tau` must be positive, got {}'.format(tau_in)) sigma, sigma_in = float(sigma), sigma if sigma <= 0: raise ValueError('`sigma` must be positive, got {}'.format(sigma_in)) niter, niter_in = int(niter), niter if niter < 0 or niter != niter_in: raise ValueError('`niter` must be a non-negative integer, got {}' ''.format(niter_in)) # Callback object callback = kwargs.pop('callback', None) if callback is not None and not callable(callback): raise TypeError('`callback` {} is not callable'.format(callback)) # Initialize range variables z = L.range.zero() u = L.range.zero() # Temporary for Lx + u [- z] tmp_ran = L(x) # Temporary for L^*(Lx + u - z) tmp_dom = L.domain.element() # Store proximals since their initialization may involve computation prox_tau_f = f.proximal(tau) prox_sigma_g = g.proximal(sigma) for _ in range(niter): # tmp_ran has value Lx^k here # tmp_dom <- L^*(Lx^k + u^k - z^k) tmp_ran += u tmp_ran -= z L.adjoint(tmp_ran, out=tmp_dom) # x <- x^k - (tau/sigma) L^*(Lx^k + u^k - z^k) x.lincomb(1, x, -tau / sigma, tmp_dom) # x^(k+1) <- prox[tau*f](x) prox_tau_f(x, out=x) # tmp_ran <- Lx^(k+1) L(x, out=tmp_ran) # z^(k+1) <- prox[sigma*g](Lx^(k+1) + u^k) prox_sigma_g(tmp_ran + u, out=z) # 1 copy here # u^(k+1) = u^k + Lx^(k+1) - z^(k+1) u += tmp_ran u -= z if callback is not None: callback(x)
[ "def", "admm_linearized", "(", "x", ",", "f", ",", "g", ",", "L", ",", "tau", ",", "sigma", ",", "niter", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "L", ",", "Operator", ")", ":", "raise", "TypeError", "(", "'`op` {!r} is n...
r"""Generic linearized ADMM method for convex problems. ADMM stands for "Alternating Direction Method of Multipliers" and is a popular convex optimization method. This variant solves problems of the form :: min_x [ f(x) + g(Lx) ] with convex ``f`` and ``g``, and a linear operator ``L``. See Section 4.4 of `[PB2014] <http://web.stanford.edu/~boyd/papers/prox_algs.html>`_ and the Notes for more mathematical details. Parameters ---------- x : ``L.domain`` element Starting point of the iteration, updated in-place. f, g : `Functional` The functions ``f`` and ``g`` in the problem definition. They need to implement the ``proximal`` method. L : linear `Operator` The linear operator that is composed with ``g`` in the problem definition. It must fulfill ``L.domain == f.domain`` and ``L.range == g.domain``. tau, sigma : positive float Step size parameters for the update of the variables. niter : non-negative int Number of iterations. Other Parameters ---------------- callback : callable, optional Function called with the current iterate after each iteration. Notes ----- Given :math:`x^{(0)}` (the provided ``x``) and :math:`u^{(0)} = z^{(0)} = 0`, linearized ADMM applies the following iteration: .. math:: x^{(k+1)} &= \mathrm{prox}_{\tau f} \left[ x^{(k)} - \sigma^{-1}\tau L^*\big( L x^{(k)} - z^{(k)} + u^{(k)} \big) \right] z^{(k+1)} &= \mathrm{prox}_{\sigma g}\left( L x^{(k+1)} + u^{(k)} \right) u^{(k+1)} &= u^{(k)} + L x^{(k+1)} - z^{(k+1)} The step size parameters :math:`\tau` and :math:`\sigma` must satisfy .. math:: 0 < \tau < \frac{\sigma}{\|L\|^2} to guarantee convergence. The name "linearized ADMM" comes from the fact that in the minimization subproblem for the :math:`x` variable, this variant uses a linearization of a quadratic term in the augmented Lagrangian of the generic ADMM, in order to make the step expressible with the proximal operator of :math:`f`. Another name for this algorithm is *split inexact Uzawa method*. References ---------- [PB2014] Parikh, N and Boyd, S. *Proximal Algorithms*. Foundations and Trends in Optimization, 1(3) (2014), pp 123-231.
[ "r", "Generic", "linearized", "ADMM", "method", "for", "convex", "problems", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/nonsmooth/admm.py#L20-L154
231,731
odlgroup/odl
odl/solvers/nonsmooth/admm.py
admm_linearized_simple
def admm_linearized_simple(x, f, g, L, tau, sigma, niter, **kwargs): """Non-optimized version of ``admm_linearized``. This function is intended for debugging. It makes a lot of copies and performs no error checking. """ callback = kwargs.pop('callback', None) z = L.range.zero() u = L.range.zero() for _ in range(niter): x[:] = f.proximal(tau)(x - tau / sigma * L.adjoint(L(x) + u - z)) z = g.proximal(sigma)(L(x) + u) u = L(x) + u - z if callback is not None: callback(x)
python
def admm_linearized_simple(x, f, g, L, tau, sigma, niter, **kwargs): callback = kwargs.pop('callback', None) z = L.range.zero() u = L.range.zero() for _ in range(niter): x[:] = f.proximal(tau)(x - tau / sigma * L.adjoint(L(x) + u - z)) z = g.proximal(sigma)(L(x) + u) u = L(x) + u - z if callback is not None: callback(x)
[ "def", "admm_linearized_simple", "(", "x", ",", "f", ",", "g", ",", "L", ",", "tau", ",", "sigma", ",", "niter", ",", "*", "*", "kwargs", ")", ":", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "None", ")", "z", "=", "L", ".", ...
Non-optimized version of ``admm_linearized``. This function is intended for debugging. It makes a lot of copies and performs no error checking.
[ "Non", "-", "optimized", "version", "of", "admm_linearized", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/nonsmooth/admm.py#L157-L171
231,732
odlgroup/odl
odl/solvers/functional/derivatives.py
NumericalGradient.derivative
def derivative(self, point): """Return the derivative in ``point``. The derivative of the gradient is often called the Hessian. Parameters ---------- point : `domain` `element-like` The point that the derivative should be taken in. Returns ------- derivative : `NumericalDerivative` Numerical estimate of the derivative. Uses the same method as this operator does, but with half the number of significant digits in the step size in order to preserve numerical stability. Examples -------- Compute a numerical estimate of the derivative of the squared L2 norm: >>> space = odl.rn(3) >>> func = odl.solvers.L2NormSquared(space) >>> grad = NumericalGradient(func) >>> hess = grad.derivative([1, 1, 1]) >>> hess([1, 0, 0]) rn(3).element([ 2., 0., 0.]) Find the Hessian matrix: >>> hess_matrix = odl.matrix_representation(hess) >>> np.allclose(hess_matrix, 2 * np.eye(3)) True """ return NumericalDerivative(self, point, method=self.method, step=np.sqrt(self.step))
python
def derivative(self, point): return NumericalDerivative(self, point, method=self.method, step=np.sqrt(self.step))
[ "def", "derivative", "(", "self", ",", "point", ")", ":", "return", "NumericalDerivative", "(", "self", ",", "point", ",", "method", "=", "self", ".", "method", ",", "step", "=", "np", ".", "sqrt", "(", "self", ".", "step", ")", ")" ]
Return the derivative in ``point``. The derivative of the gradient is often called the Hessian. Parameters ---------- point : `domain` `element-like` The point that the derivative should be taken in. Returns ------- derivative : `NumericalDerivative` Numerical estimate of the derivative. Uses the same method as this operator does, but with half the number of significant digits in the step size in order to preserve numerical stability. Examples -------- Compute a numerical estimate of the derivative of the squared L2 norm: >>> space = odl.rn(3) >>> func = odl.solvers.L2NormSquared(space) >>> grad = NumericalGradient(func) >>> hess = grad.derivative([1, 1, 1]) >>> hess([1, 0, 0]) rn(3).element([ 2., 0., 0.]) Find the Hessian matrix: >>> hess_matrix = odl.matrix_representation(hess) >>> np.allclose(hess_matrix, 2 * np.eye(3)) True
[ "Return", "the", "derivative", "in", "point", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/functional/derivatives.py#L271-L306
231,733
odlgroup/odl
odl/discr/discr_ops.py
_offset_from_spaces
def _offset_from_spaces(dom, ran): """Return index offset corresponding to given spaces.""" affected = np.not_equal(dom.shape, ran.shape) diff_l = np.abs(ran.grid.min() - dom.grid.min()) offset_float = diff_l / dom.cell_sides offset = np.around(offset_float).astype(int) for i in range(dom.ndim): if affected[i] and not np.isclose(offset[i], offset_float[i]): raise ValueError('in axis {}: range is shifted relative to domain ' 'by a non-multiple {} of cell_sides' ''.format(i, offset_float[i] - offset[i])) offset[~affected] = 0 return tuple(offset)
python
def _offset_from_spaces(dom, ran): affected = np.not_equal(dom.shape, ran.shape) diff_l = np.abs(ran.grid.min() - dom.grid.min()) offset_float = diff_l / dom.cell_sides offset = np.around(offset_float).astype(int) for i in range(dom.ndim): if affected[i] and not np.isclose(offset[i], offset_float[i]): raise ValueError('in axis {}: range is shifted relative to domain ' 'by a non-multiple {} of cell_sides' ''.format(i, offset_float[i] - offset[i])) offset[~affected] = 0 return tuple(offset)
[ "def", "_offset_from_spaces", "(", "dom", ",", "ran", ")", ":", "affected", "=", "np", ".", "not_equal", "(", "dom", ".", "shape", ",", "ran", ".", "shape", ")", "diff_l", "=", "np", ".", "abs", "(", "ran", ".", "grid", ".", "min", "(", ")", "-",...
Return index offset corresponding to given spaces.
[ "Return", "index", "offset", "corresponding", "to", "given", "spaces", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/discr_ops.py#L441-L453
231,734
odlgroup/odl
odl/discr/discr_ops.py
Resampling._call
def _call(self, x, out=None): """Apply resampling operator. The element ``x`` is resampled using the sampling and interpolation operators of the underlying spaces. """ if out is None: return x.interpolation else: out.sampling(x.interpolation)
python
def _call(self, x, out=None): if out is None: return x.interpolation else: out.sampling(x.interpolation)
[ "def", "_call", "(", "self", ",", "x", ",", "out", "=", "None", ")", ":", "if", "out", "is", "None", ":", "return", "x", ".", "interpolation", "else", ":", "out", ".", "sampling", "(", "x", ".", "interpolation", ")" ]
Apply resampling operator. The element ``x`` is resampled using the sampling and interpolation operators of the underlying spaces.
[ "Apply", "resampling", "operator", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/discr_ops.py#L83-L92
231,735
odlgroup/odl
odl/discr/discr_ops.py
ResizingOperatorBase.axes
def axes(self): """Dimensions in which an actual resizing is performed.""" return tuple(i for i in range(self.domain.ndim) if self.domain.shape[i] != self.range.shape[i])
python
def axes(self): return tuple(i for i in range(self.domain.ndim) if self.domain.shape[i] != self.range.shape[i])
[ "def", "axes", "(", "self", ")", ":", "return", "tuple", "(", "i", "for", "i", "in", "range", "(", "self", ".", "domain", ".", "ndim", ")", "if", "self", ".", "domain", ".", "shape", "[", "i", "]", "!=", "self", ".", "range", ".", "shape", "[",...
Dimensions in which an actual resizing is performed.
[ "Dimensions", "in", "which", "an", "actual", "resizing", "is", "performed", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/discr_ops.py#L335-L338
231,736
odlgroup/odl
odl/discr/discr_ops.py
ResizingOperator.derivative
def derivative(self, point): """Derivative of this operator at ``point``. For the particular case of constant padding with non-zero constant, the derivative is the corresponding zero-padding variant. In all other cases, this operator is linear, i.e. the derivative is equal to ``self``. """ if self.pad_mode == 'constant' and self.pad_const != 0: return ResizingOperator( domain=self.domain, range=self.range, pad_mode='constant', pad_const=0.0) else: # operator is linear return self
python
def derivative(self, point): if self.pad_mode == 'constant' and self.pad_const != 0: return ResizingOperator( domain=self.domain, range=self.range, pad_mode='constant', pad_const=0.0) else: # operator is linear return self
[ "def", "derivative", "(", "self", ",", "point", ")", ":", "if", "self", ".", "pad_mode", "==", "'constant'", "and", "self", ".", "pad_const", "!=", "0", ":", "return", "ResizingOperator", "(", "domain", "=", "self", ".", "domain", ",", "range", "=", "s...
Derivative of this operator at ``point``. For the particular case of constant padding with non-zero constant, the derivative is the corresponding zero-padding variant. In all other cases, this operator is linear, i.e. the derivative is equal to ``self``.
[ "Derivative", "of", "this", "operator", "at", "point", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/discr_ops.py#L367-L380
231,737
odlgroup/odl
odl/set/sets.py
Strings.contains_all
def contains_all(self, other): """Return ``True`` if all strings in ``other`` have size `length`.""" dtype = getattr(other, 'dtype', None) if dtype is None: dtype = np.result_type(*other) dtype_str = np.dtype('S{}'.format(self.length)) dtype_uni = np.dtype('<U{}'.format(self.length)) return dtype in (dtype_str, dtype_uni)
python
def contains_all(self, other): dtype = getattr(other, 'dtype', None) if dtype is None: dtype = np.result_type(*other) dtype_str = np.dtype('S{}'.format(self.length)) dtype_uni = np.dtype('<U{}'.format(self.length)) return dtype in (dtype_str, dtype_uni)
[ "def", "contains_all", "(", "self", ",", "other", ")", ":", "dtype", "=", "getattr", "(", "other", ",", "'dtype'", ",", "None", ")", "if", "dtype", "is", "None", ":", "dtype", "=", "np", ".", "result_type", "(", "*", "other", ")", "dtype_str", "=", ...
Return ``True`` if all strings in ``other`` have size `length`.
[ "Return", "True", "if", "all", "strings", "in", "other", "have", "size", "length", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/set/sets.py#L250-L257
231,738
odlgroup/odl
odl/set/sets.py
Strings.element
def element(self, inp=None): """Return an element from ``inp`` or from scratch.""" if inp is not None: s = str(inp)[:self.length] s += ' ' * (self.length - len(s)) return s else: return ' ' * self.length
python
def element(self, inp=None): if inp is not None: s = str(inp)[:self.length] s += ' ' * (self.length - len(s)) return s else: return ' ' * self.length
[ "def", "element", "(", "self", ",", "inp", "=", "None", ")", ":", "if", "inp", "is", "not", "None", ":", "s", "=", "str", "(", "inp", ")", "[", ":", "self", ".", "length", "]", "s", "+=", "' '", "*", "(", "self", ".", "length", "-", "len", ...
Return an element from ``inp`` or from scratch.
[ "Return", "an", "element", "from", "inp", "or", "from", "scratch", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/set/sets.py#L267-L274
231,739
odlgroup/odl
odl/set/sets.py
ComplexNumbers.contains_all
def contains_all(self, other): """Return ``True`` if ``other`` is a sequence of complex numbers.""" dtype = getattr(other, 'dtype', None) if dtype is None: dtype = np.result_type(*other) return is_numeric_dtype(dtype)
python
def contains_all(self, other): dtype = getattr(other, 'dtype', None) if dtype is None: dtype = np.result_type(*other) return is_numeric_dtype(dtype)
[ "def", "contains_all", "(", "self", ",", "other", ")", ":", "dtype", "=", "getattr", "(", "other", ",", "'dtype'", ",", "None", ")", "if", "dtype", "is", "None", ":", "dtype", "=", "np", ".", "result_type", "(", "*", "other", ")", "return", "is_numer...
Return ``True`` if ``other`` is a sequence of complex numbers.
[ "Return", "True", "if", "other", "is", "a", "sequence", "of", "complex", "numbers", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/set/sets.py#L344-L349
231,740
odlgroup/odl
odl/set/sets.py
ComplexNumbers.element
def element(self, inp=None): """Return a complex number from ``inp`` or from scratch.""" if inp is not None: # Workaround for missing __complex__ of numpy.ndarray # for Numpy version < 1.12 # TODO: remove when Numpy >= 1.12 is required if isinstance(inp, np.ndarray): return complex(inp.reshape([1])[0]) else: return complex(inp) else: return complex(0.0, 0.0)
python
def element(self, inp=None): if inp is not None: # Workaround for missing __complex__ of numpy.ndarray # for Numpy version < 1.12 # TODO: remove when Numpy >= 1.12 is required if isinstance(inp, np.ndarray): return complex(inp.reshape([1])[0]) else: return complex(inp) else: return complex(0.0, 0.0)
[ "def", "element", "(", "self", ",", "inp", "=", "None", ")", ":", "if", "inp", "is", "not", "None", ":", "# Workaround for missing __complex__ of numpy.ndarray", "# for Numpy version < 1.12", "# TODO: remove when Numpy >= 1.12 is required", "if", "isinstance", "(", "inp",...
Return a complex number from ``inp`` or from scratch.
[ "Return", "a", "complex", "number", "from", "inp", "or", "from", "scratch", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/set/sets.py#L362-L373
231,741
odlgroup/odl
odl/set/sets.py
RealNumbers.contains_set
def contains_set(self, other): """Return ``True`` if ``other`` is a subset of the real numbers. Returns ------- contained : bool ``True`` if other is an instance of `RealNumbers` or `Integers` False otherwise. Examples -------- >>> real_numbers = RealNumbers() >>> real_numbers.contains_set(RealNumbers()) True """ if other is self: return True return (isinstance(other, RealNumbers) or isinstance(other, Integers))
python
def contains_set(self, other): if other is self: return True return (isinstance(other, RealNumbers) or isinstance(other, Integers))
[ "def", "contains_set", "(", "self", ",", "other", ")", ":", "if", "other", "is", "self", ":", "return", "True", "return", "(", "isinstance", "(", "other", ",", "RealNumbers", ")", "or", "isinstance", "(", "other", ",", "Integers", ")", ")" ]
Return ``True`` if ``other`` is a subset of the real numbers. Returns ------- contained : bool ``True`` if other is an instance of `RealNumbers` or `Integers` False otherwise. Examples -------- >>> real_numbers = RealNumbers() >>> real_numbers.contains_set(RealNumbers()) True
[ "Return", "True", "if", "other", "is", "a", "subset", "of", "the", "real", "numbers", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/set/sets.py#L390-L409
231,742
odlgroup/odl
odl/set/sets.py
RealNumbers.contains_all
def contains_all(self, array): """Test if `array` is an array of real numbers.""" dtype = getattr(array, 'dtype', None) if dtype is None: dtype = np.result_type(*array) return is_real_dtype(dtype)
python
def contains_all(self, array): dtype = getattr(array, 'dtype', None) if dtype is None: dtype = np.result_type(*array) return is_real_dtype(dtype)
[ "def", "contains_all", "(", "self", ",", "array", ")", ":", "dtype", "=", "getattr", "(", "array", ",", "'dtype'", ",", "None", ")", "if", "dtype", "is", "None", ":", "dtype", "=", "np", ".", "result_type", "(", "*", "array", ")", "return", "is_real_...
Test if `array` is an array of real numbers.
[ "Test", "if", "array", "is", "an", "array", "of", "real", "numbers", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/set/sets.py#L411-L416
231,743
odlgroup/odl
odl/set/sets.py
Integers.contains_all
def contains_all(self, other): """Return ``True`` if ``other`` is a sequence of integers.""" dtype = getattr(other, 'dtype', None) if dtype is None: dtype = np.result_type(*other) return is_int_dtype(dtype)
python
def contains_all(self, other): dtype = getattr(other, 'dtype', None) if dtype is None: dtype = np.result_type(*other) return is_int_dtype(dtype)
[ "def", "contains_all", "(", "self", ",", "other", ")", ":", "dtype", "=", "getattr", "(", "other", ",", "'dtype'", ",", "None", ")", "if", "dtype", "is", "None", ":", "dtype", "=", "np", ".", "result_type", "(", "*", "other", ")", "return", "is_int_d...
Return ``True`` if ``other`` is a sequence of integers.
[ "Return", "True", "if", "other", "is", "a", "sequence", "of", "integers", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/set/sets.py#L471-L476
231,744
odlgroup/odl
odl/set/sets.py
CartesianProduct.element
def element(self, inp=None): """Create a `CartesianProduct` element. Parameters ---------- inp : iterable, optional Collection of input values for the `LinearSpace.element` methods of all sets in the Cartesian product. Returns ------- element : tuple A tuple of the given input """ if inp is None: tpl = tuple(set_.element() for set_ in self.sets) else: tpl = tuple(set_.element(inpt) for inpt, set_ in zip(inp, self.sets)) if len(tpl) != len(self): raise ValueError('input provides only {} values, needed ' 'are {}'.format(len(tpl), len(self))) return tpl
python
def element(self, inp=None): if inp is None: tpl = tuple(set_.element() for set_ in self.sets) else: tpl = tuple(set_.element(inpt) for inpt, set_ in zip(inp, self.sets)) if len(tpl) != len(self): raise ValueError('input provides only {} values, needed ' 'are {}'.format(len(tpl), len(self))) return tpl
[ "def", "element", "(", "self", ",", "inp", "=", "None", ")", ":", "if", "inp", "is", "None", ":", "tpl", "=", "tuple", "(", "set_", ".", "element", "(", ")", "for", "set_", "in", "self", ".", "sets", ")", "else", ":", "tpl", "=", "tuple", "(", ...
Create a `CartesianProduct` element. Parameters ---------- inp : iterable, optional Collection of input values for the `LinearSpace.element` methods of all sets in the Cartesian product. Returns ------- element : tuple A tuple of the given input
[ "Create", "a", "CartesianProduct", "element", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/set/sets.py#L563-L588
231,745
odlgroup/odl
odl/trafos/fourier.py
DiscreteFourierTransformBase.adjoint
def adjoint(self): """Adjoint transform, equal to the inverse. See Also -------- inverse """ if self.domain.exponent == 2.0 and self.range.exponent == 2.0: return self.inverse else: raise NotImplementedError( 'no adjoint defined for exponents ({}, {}) != (2, 2)' ''.format(self.domain.exponent, self.range.exponent))
python
def adjoint(self): if self.domain.exponent == 2.0 and self.range.exponent == 2.0: return self.inverse else: raise NotImplementedError( 'no adjoint defined for exponents ({}, {}) != (2, 2)' ''.format(self.domain.exponent, self.range.exponent))
[ "def", "adjoint", "(", "self", ")", ":", "if", "self", ".", "domain", ".", "exponent", "==", "2.0", "and", "self", ".", "range", ".", "exponent", "==", "2.0", ":", "return", "self", ".", "inverse", "else", ":", "raise", "NotImplementedError", "(", "'no...
Adjoint transform, equal to the inverse. See Also -------- inverse
[ "Adjoint", "transform", "equal", "to", "the", "inverse", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/trafos/fourier.py#L194-L206
231,746
odlgroup/odl
odl/trafos/fourier.py
FourierTransformBase.create_temporaries
def create_temporaries(self, r=True, f=True): """Allocate and store reusable temporaries. Existing temporaries are overridden. Parameters ---------- r : bool, optional Create temporary for the real space f : bool, optional Create temporary for the frequency space Notes ----- To save memory, clear the temporaries when the transform is no longer used. See Also -------- clear_temporaries clear_fftw_plan : can also hold references to the temporaries """ inverse = isinstance(self, FourierTransformInverse) if inverse: rspace = self.range fspace = self.domain else: rspace = self.domain fspace = self.range if r: self._tmp_r = rspace.element().asarray() if f: self._tmp_f = fspace.element().asarray()
python
def create_temporaries(self, r=True, f=True): inverse = isinstance(self, FourierTransformInverse) if inverse: rspace = self.range fspace = self.domain else: rspace = self.domain fspace = self.range if r: self._tmp_r = rspace.element().asarray() if f: self._tmp_f = fspace.element().asarray()
[ "def", "create_temporaries", "(", "self", ",", "r", "=", "True", ",", "f", "=", "True", ")", ":", "inverse", "=", "isinstance", "(", "self", ",", "FourierTransformInverse", ")", "if", "inverse", ":", "rspace", "=", "self", ".", "range", "fspace", "=", ...
Allocate and store reusable temporaries. Existing temporaries are overridden. Parameters ---------- r : bool, optional Create temporary for the real space f : bool, optional Create temporary for the frequency space Notes ----- To save memory, clear the temporaries when the transform is no longer used. See Also -------- clear_temporaries clear_fftw_plan : can also hold references to the temporaries
[ "Allocate", "and", "store", "reusable", "temporaries", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/trafos/fourier.py#L999-L1033
231,747
odlgroup/odl
odl/trafos/fourier.py
FourierTransform._preprocess
def _preprocess(self, x, out=None): """Return the pre-processed version of ``x``. C2C: use ``tmp_r`` or ``tmp_f`` (C2C operation) R2C: use ``tmp_f`` (R2C operation) HALFC: use ``tmp_r`` (R2R operation) The result is stored in ``out`` if given, otherwise in a temporary or a new array. """ if out is None: if self.domain.field == ComplexNumbers(): out = self._tmp_r if self._tmp_r is not None else self._tmp_f elif self.domain.field == RealNumbers() and not self.halfcomplex: out = self._tmp_f else: out = self._tmp_r return dft_preprocess_data( x, shift=self.shifts, axes=self.axes, sign=self.sign, out=out)
python
def _preprocess(self, x, out=None): if out is None: if self.domain.field == ComplexNumbers(): out = self._tmp_r if self._tmp_r is not None else self._tmp_f elif self.domain.field == RealNumbers() and not self.halfcomplex: out = self._tmp_f else: out = self._tmp_r return dft_preprocess_data( x, shift=self.shifts, axes=self.axes, sign=self.sign, out=out)
[ "def", "_preprocess", "(", "self", ",", "x", ",", "out", "=", "None", ")", ":", "if", "out", "is", "None", ":", "if", "self", ".", "domain", ".", "field", "==", "ComplexNumbers", "(", ")", ":", "out", "=", "self", ".", "_tmp_r", "if", "self", "."...
Return the pre-processed version of ``x``. C2C: use ``tmp_r`` or ``tmp_f`` (C2C operation) R2C: use ``tmp_f`` (R2C operation) HALFC: use ``tmp_r`` (R2R operation) The result is stored in ``out`` if given, otherwise in a temporary or a new array.
[ "Return", "the", "pre", "-", "processed", "version", "of", "x", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/trafos/fourier.py#L1249-L1268
231,748
odlgroup/odl
odl/trafos/fourier.py
FourierTransformInverse.inverse
def inverse(self): """Inverse of the inverse, the forward FT.""" sign = '+' if self.sign == '-' else '-' return FourierTransform( domain=self.range, range=self.domain, impl=self.impl, axes=self.axes, halfcomplex=self.halfcomplex, shift=self.shifts, sign=sign, tmp_r=self._tmp_r, tmp_f=self._tmp_f)
python
def inverse(self): sign = '+' if self.sign == '-' else '-' return FourierTransform( domain=self.range, range=self.domain, impl=self.impl, axes=self.axes, halfcomplex=self.halfcomplex, shift=self.shifts, sign=sign, tmp_r=self._tmp_r, tmp_f=self._tmp_f)
[ "def", "inverse", "(", "self", ")", ":", "sign", "=", "'+'", "if", "self", ".", "sign", "==", "'-'", "else", "'-'", "return", "FourierTransform", "(", "domain", "=", "self", ".", "range", ",", "range", "=", "self", ".", "domain", ",", "impl", "=", ...
Inverse of the inverse, the forward FT.
[ "Inverse", "of", "the", "inverse", "the", "forward", "FT", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/trafos/fourier.py#L1639-L1645
231,749
odlgroup/odl
odl/util/npy_compat.py
moveaxis
def moveaxis(a, source, destination): """Move axes of an array to new positions. Other axes remain in their original order. This function is a backport of `numpy.moveaxis` introduced in NumPy 1.11. See Also -------- numpy.moveaxis """ import numpy if hasattr(numpy, 'moveaxis'): return numpy.moveaxis(a, source, destination) try: source = list(source) except TypeError: source = [source] try: destination = list(destination) except TypeError: destination = [destination] source = [ax + a.ndim if ax < 0 else ax for ax in source] destination = [ax + a.ndim if ax < 0 else ax for ax in destination] order = [n for n in range(a.ndim) if n not in source] for dest, src in sorted(zip(destination, source)): order.insert(dest, src) return a.transpose(order)
python
def moveaxis(a, source, destination): import numpy if hasattr(numpy, 'moveaxis'): return numpy.moveaxis(a, source, destination) try: source = list(source) except TypeError: source = [source] try: destination = list(destination) except TypeError: destination = [destination] source = [ax + a.ndim if ax < 0 else ax for ax in source] destination = [ax + a.ndim if ax < 0 else ax for ax in destination] order = [n for n in range(a.ndim) if n not in source] for dest, src in sorted(zip(destination, source)): order.insert(dest, src) return a.transpose(order)
[ "def", "moveaxis", "(", "a", ",", "source", ",", "destination", ")", ":", "import", "numpy", "if", "hasattr", "(", "numpy", ",", "'moveaxis'", ")", ":", "return", "numpy", ".", "moveaxis", "(", "a", ",", "source", ",", "destination", ")", "try", ":", ...
Move axes of an array to new positions. Other axes remain in their original order. This function is a backport of `numpy.moveaxis` introduced in NumPy 1.11. See Also -------- numpy.moveaxis
[ "Move", "axes", "of", "an", "array", "to", "new", "positions", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/util/npy_compat.py#L19-L52
231,750
odlgroup/odl
odl/util/npy_compat.py
flip
def flip(a, axis): """Reverse the order of elements in an array along the given axis. This function is a backport of `numpy.flip` introduced in NumPy 1.12. See Also -------- numpy.flip """ if not hasattr(a, 'ndim'): a = np.asarray(a) indexer = [slice(None)] * a.ndim try: indexer[axis] = slice(None, None, -1) except IndexError: raise ValueError('axis={} is invalid for the {}-dimensional input ' 'array'.format(axis, a.ndim)) return a[tuple(indexer)]
python
def flip(a, axis): if not hasattr(a, 'ndim'): a = np.asarray(a) indexer = [slice(None)] * a.ndim try: indexer[axis] = slice(None, None, -1) except IndexError: raise ValueError('axis={} is invalid for the {}-dimensional input ' 'array'.format(axis, a.ndim)) return a[tuple(indexer)]
[ "def", "flip", "(", "a", ",", "axis", ")", ":", "if", "not", "hasattr", "(", "a", ",", "'ndim'", ")", ":", "a", "=", "np", ".", "asarray", "(", "a", ")", "indexer", "=", "[", "slice", "(", "None", ")", "]", "*", "a", ".", "ndim", "try", ":"...
Reverse the order of elements in an array along the given axis. This function is a backport of `numpy.flip` introduced in NumPy 1.12. See Also -------- numpy.flip
[ "Reverse", "the", "order", "of", "elements", "in", "an", "array", "along", "the", "given", "axis", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/util/npy_compat.py#L56-L73
231,751
odlgroup/odl
odl/contrib/datasets/ct/mayo.py
_read_projections
def _read_projections(folder, indices): """Read mayo projections from a folder.""" datasets = [] # Get the relevant file names file_names = sorted([f for f in os.listdir(folder) if f.endswith(".dcm")]) if len(file_names) == 0: raise ValueError('No DICOM files found in {}'.format(folder)) file_names = file_names[indices] data_array = None for i, file_name in enumerate(tqdm.tqdm(file_names, 'Loading projection data')): # read the file dataset = dicom.read_file(folder + '/' + file_name) # Get some required data rows = dataset.NumberofDetectorRows cols = dataset.NumberofDetectorColumns hu_factor = dataset.HUCalibrationFactor rescale_intercept = dataset.RescaleIntercept rescale_slope = dataset.RescaleSlope # Load the array as bytes proj_array = np.array(np.frombuffer(dataset.PixelData, 'H'), dtype='float32') proj_array = proj_array.reshape([rows, cols], order='F').T # Rescale array proj_array *= rescale_slope proj_array += rescale_intercept proj_array /= hu_factor # Store results if data_array is None: # We need to load the first dataset before we know the shape data_array = np.empty((len(file_names), cols, rows), dtype='float32') data_array[i] = proj_array[:, ::-1] datasets.append(dataset) return datasets, data_array
python
def _read_projections(folder, indices): datasets = [] # Get the relevant file names file_names = sorted([f for f in os.listdir(folder) if f.endswith(".dcm")]) if len(file_names) == 0: raise ValueError('No DICOM files found in {}'.format(folder)) file_names = file_names[indices] data_array = None for i, file_name in enumerate(tqdm.tqdm(file_names, 'Loading projection data')): # read the file dataset = dicom.read_file(folder + '/' + file_name) # Get some required data rows = dataset.NumberofDetectorRows cols = dataset.NumberofDetectorColumns hu_factor = dataset.HUCalibrationFactor rescale_intercept = dataset.RescaleIntercept rescale_slope = dataset.RescaleSlope # Load the array as bytes proj_array = np.array(np.frombuffer(dataset.PixelData, 'H'), dtype='float32') proj_array = proj_array.reshape([rows, cols], order='F').T # Rescale array proj_array *= rescale_slope proj_array += rescale_intercept proj_array /= hu_factor # Store results if data_array is None: # We need to load the first dataset before we know the shape data_array = np.empty((len(file_names), cols, rows), dtype='float32') data_array[i] = proj_array[:, ::-1] datasets.append(dataset) return datasets, data_array
[ "def", "_read_projections", "(", "folder", ",", "indices", ")", ":", "datasets", "=", "[", "]", "# Get the relevant file names", "file_names", "=", "sorted", "(", "[", "f", "for", "f", "in", "os", ".", "listdir", "(", "folder", ")", "if", "f", ".", "ends...
Read mayo projections from a folder.
[ "Read", "mayo", "projections", "from", "a", "folder", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/datasets/ct/mayo.py#L37-L82
231,752
odlgroup/odl
odl/contrib/datasets/ct/mayo.py
load_projections
def load_projections(folder, indices=None): """Load geometry and data stored in Mayo format from folder. Parameters ---------- folder : str Path to the folder where the Mayo DICOM files are stored. indices : optional Indices of the projections to load. Accepts advanced indexing such as slice or list of indices. Returns ------- geometry : ConeFlatGeometry Geometry corresponding to the Mayo projector. proj_data : `numpy.ndarray` Projection data, given as the line integral of the linear attenuation coefficient (g/cm^3). Its unit is thus g/cm^2. """ datasets, data_array = _read_projections(folder, indices) # Get the angles angles = [d.DetectorFocalCenterAngularPosition for d in datasets] angles = -np.unwrap(angles) - np.pi # different defintion of angles # Set minimum and maximum corners shape = np.array([datasets[0].NumberofDetectorColumns, datasets[0].NumberofDetectorRows]) pixel_size = np.array([datasets[0].DetectorElementTransverseSpacing, datasets[0].DetectorElementAxialSpacing]) # Correct from center of pixel to corner of pixel minp = -(np.array(datasets[0].DetectorCentralElement) - 0.5) * pixel_size maxp = minp + shape * pixel_size # Select geometry parameters src_radius = datasets[0].DetectorFocalCenterRadialDistance det_radius = (datasets[0].ConstantRadialDistance - datasets[0].DetectorFocalCenterRadialDistance) # For unknown reasons, mayo does not include the tag # "TableFeedPerRotation", which is what we want. # Instead we manually compute the pitch pitch = ((datasets[-1].DetectorFocalCenterAxialPosition - datasets[0].DetectorFocalCenterAxialPosition) / ((np.max(angles) - np.min(angles)) / (2 * np.pi))) # Get flying focal spot data offset_axial = np.array([d.SourceAxialPositionShift for d in datasets]) offset_angular = np.array([d.SourceAngularPositionShift for d in datasets]) offset_radial = np.array([d.SourceRadialDistanceShift for d in datasets]) # TODO(adler-j): Implement proper handling of flying focal spot. # Currently we do not fully account for it, merely making some "first # order corrections" to the detector position and radial offset. # Update angles with flying focal spot (in plane direction). # This increases the resolution of the reconstructions. angles = angles - offset_angular # We correct for the mean offset due to the rotated angles, we need to # shift the detector. offset_detector_by_angles = det_radius * np.mean(offset_angular) minp[0] -= offset_detector_by_angles maxp[0] -= offset_detector_by_angles # We currently apply only the mean of the offsets src_radius = src_radius + np.mean(offset_radial) # Partially compensate for a movement of the source by moving the object # instead. We need to rescale by the magnification to get the correct # change in the detector. This approximation is only exactly valid on the # axis of rotation. mean_offset_along_axis_for_ffz = np.mean(offset_axial) * ( src_radius / (src_radius + det_radius)) # Create partition for detector detector_partition = odl.uniform_partition(minp, maxp, shape) # Convert offset to odl defintions offset_along_axis = (mean_offset_along_axis_for_ffz + datasets[0].DetectorFocalCenterAxialPosition - angles[0] / (2 * np.pi) * pitch) # Assemble geometry angle_partition = odl.nonuniform_partition(angles) geometry = odl.tomo.ConeFlatGeometry(angle_partition, detector_partition, src_radius=src_radius, det_radius=det_radius, pitch=pitch, offset_along_axis=offset_along_axis) # Create a *temporary* ray transform (we need its range) spc = odl.uniform_discr([-1] * 3, [1] * 3, [32] * 3) ray_trafo = odl.tomo.RayTransform(spc, geometry, interp='linear') # convert coordinates theta, up, vp = ray_trafo.range.grid.meshgrid d = src_radius + det_radius u = d * np.arctan(up / d) v = d / np.sqrt(d**2 + up**2) * vp # Calculate projection data in rectangular coordinates since we have no # backend that supports cylindrical proj_data_cylinder = ray_trafo.range.element(data_array) interpolated_values = proj_data_cylinder.interpolation((theta, u, v)) proj_data = ray_trafo.range.element(interpolated_values) return geometry, proj_data.asarray()
python
def load_projections(folder, indices=None): datasets, data_array = _read_projections(folder, indices) # Get the angles angles = [d.DetectorFocalCenterAngularPosition for d in datasets] angles = -np.unwrap(angles) - np.pi # different defintion of angles # Set minimum and maximum corners shape = np.array([datasets[0].NumberofDetectorColumns, datasets[0].NumberofDetectorRows]) pixel_size = np.array([datasets[0].DetectorElementTransverseSpacing, datasets[0].DetectorElementAxialSpacing]) # Correct from center of pixel to corner of pixel minp = -(np.array(datasets[0].DetectorCentralElement) - 0.5) * pixel_size maxp = minp + shape * pixel_size # Select geometry parameters src_radius = datasets[0].DetectorFocalCenterRadialDistance det_radius = (datasets[0].ConstantRadialDistance - datasets[0].DetectorFocalCenterRadialDistance) # For unknown reasons, mayo does not include the tag # "TableFeedPerRotation", which is what we want. # Instead we manually compute the pitch pitch = ((datasets[-1].DetectorFocalCenterAxialPosition - datasets[0].DetectorFocalCenterAxialPosition) / ((np.max(angles) - np.min(angles)) / (2 * np.pi))) # Get flying focal spot data offset_axial = np.array([d.SourceAxialPositionShift for d in datasets]) offset_angular = np.array([d.SourceAngularPositionShift for d in datasets]) offset_radial = np.array([d.SourceRadialDistanceShift for d in datasets]) # TODO(adler-j): Implement proper handling of flying focal spot. # Currently we do not fully account for it, merely making some "first # order corrections" to the detector position and radial offset. # Update angles with flying focal spot (in plane direction). # This increases the resolution of the reconstructions. angles = angles - offset_angular # We correct for the mean offset due to the rotated angles, we need to # shift the detector. offset_detector_by_angles = det_radius * np.mean(offset_angular) minp[0] -= offset_detector_by_angles maxp[0] -= offset_detector_by_angles # We currently apply only the mean of the offsets src_radius = src_radius + np.mean(offset_radial) # Partially compensate for a movement of the source by moving the object # instead. We need to rescale by the magnification to get the correct # change in the detector. This approximation is only exactly valid on the # axis of rotation. mean_offset_along_axis_for_ffz = np.mean(offset_axial) * ( src_radius / (src_radius + det_radius)) # Create partition for detector detector_partition = odl.uniform_partition(minp, maxp, shape) # Convert offset to odl defintions offset_along_axis = (mean_offset_along_axis_for_ffz + datasets[0].DetectorFocalCenterAxialPosition - angles[0] / (2 * np.pi) * pitch) # Assemble geometry angle_partition = odl.nonuniform_partition(angles) geometry = odl.tomo.ConeFlatGeometry(angle_partition, detector_partition, src_radius=src_radius, det_radius=det_radius, pitch=pitch, offset_along_axis=offset_along_axis) # Create a *temporary* ray transform (we need its range) spc = odl.uniform_discr([-1] * 3, [1] * 3, [32] * 3) ray_trafo = odl.tomo.RayTransform(spc, geometry, interp='linear') # convert coordinates theta, up, vp = ray_trafo.range.grid.meshgrid d = src_radius + det_radius u = d * np.arctan(up / d) v = d / np.sqrt(d**2 + up**2) * vp # Calculate projection data in rectangular coordinates since we have no # backend that supports cylindrical proj_data_cylinder = ray_trafo.range.element(data_array) interpolated_values = proj_data_cylinder.interpolation((theta, u, v)) proj_data = ray_trafo.range.element(interpolated_values) return geometry, proj_data.asarray()
[ "def", "load_projections", "(", "folder", ",", "indices", "=", "None", ")", ":", "datasets", ",", "data_array", "=", "_read_projections", "(", "folder", ",", "indices", ")", "# Get the angles", "angles", "=", "[", "d", ".", "DetectorFocalCenterAngularPosition", ...
Load geometry and data stored in Mayo format from folder. Parameters ---------- folder : str Path to the folder where the Mayo DICOM files are stored. indices : optional Indices of the projections to load. Accepts advanced indexing such as slice or list of indices. Returns ------- geometry : ConeFlatGeometry Geometry corresponding to the Mayo projector. proj_data : `numpy.ndarray` Projection data, given as the line integral of the linear attenuation coefficient (g/cm^3). Its unit is thus g/cm^2.
[ "Load", "geometry", "and", "data", "stored", "in", "Mayo", "format", "from", "folder", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/datasets/ct/mayo.py#L85-L194
231,753
odlgroup/odl
odl/contrib/datasets/ct/mayo.py
load_reconstruction
def load_reconstruction(folder, slice_start=0, slice_end=-1): """Load a volume from folder, also returns the corresponding partition. Parameters ---------- folder : str Path to the folder where the DICOM files are stored. slice_start : int Index of the first slice to use. Used for subsampling. slice_end : int Index of the final slice to use. Returns ------- partition : `odl.RectPartition` Partition describing the geometric positioning of the voxels. data : `numpy.ndarray` Volumetric data. Scaled such that data = 1 for water (0 HU). Notes ----- DICOM data is highly non trivial. Typically, each slice has been computed with a slice tickness (e.g. 3mm) but the slice spacing might be different from that. Further, the coordinates in DICOM is typically the *middle* of the pixel, not the corners as in ODL. This function should handle all of these peculiarities and give a volume with the correct coordinate system attached. """ file_names = sorted([f for f in os.listdir(folder) if f.endswith(".IMA")]) if len(file_names) == 0: raise ValueError('No DICOM files found in {}'.format(folder)) volumes = [] datasets = [] file_names = file_names[slice_start:slice_end] for file_name in tqdm.tqdm(file_names, 'loading volume data'): # read the file dataset = dicom.read_file(folder + '/' + file_name) # Get parameters pixel_size = np.array(dataset.PixelSpacing) pixel_thickness = float(dataset.SliceThickness) rows = dataset.Rows cols = dataset.Columns # Get data array and convert to correct coordinates data_array = np.array(np.frombuffer(dataset.PixelData, 'H'), dtype='float32') data_array = data_array.reshape([cols, rows], order='C') data_array = np.rot90(data_array, -1) # Convert from storage type to densities # TODO: Optimize these computations hu_values = (dataset.RescaleSlope * data_array + dataset.RescaleIntercept) densities = (hu_values + 1000) / 1000 # Store results volumes.append(densities) datasets.append(dataset) voxel_size = np.array(list(pixel_size) + [pixel_thickness]) shape = np.array([rows, cols, len(volumes)]) # Compute geometry parameters mid_pt = (np.array(dataset.ReconstructionTargetCenterPatient) - np.array(dataset.DataCollectionCenterPatient)) reconstruction_size = (voxel_size * shape) min_pt = mid_pt - reconstruction_size / 2 max_pt = mid_pt + reconstruction_size / 2 # axis 1 has reversed convention min_pt[1], max_pt[1] = -max_pt[1], -min_pt[1] if len(datasets) > 1: slice_distance = np.abs( float(datasets[1].DataCollectionCenterPatient[2]) - float(datasets[0].DataCollectionCenterPatient[2])) else: # If we only have one slice, we must approximate the distance. slice_distance = pixel_thickness # The middle of the minimum/maximum slice can be computed from the # DICOM attribute "DataCollectionCenterPatient". Since ODL uses corner # points (e.g. edge of volume) we need to add half a voxel thickness to # both sides. min_pt[2] = -np.array(datasets[0].DataCollectionCenterPatient)[2] min_pt[2] -= 0.5 * slice_distance max_pt[2] = -np.array(datasets[-1].DataCollectionCenterPatient)[2] max_pt[2] += 0.5 * slice_distance partition = odl.uniform_partition(min_pt, max_pt, shape) volume = np.transpose(np.array(volumes), (1, 2, 0)) return partition, volume
python
def load_reconstruction(folder, slice_start=0, slice_end=-1): file_names = sorted([f for f in os.listdir(folder) if f.endswith(".IMA")]) if len(file_names) == 0: raise ValueError('No DICOM files found in {}'.format(folder)) volumes = [] datasets = [] file_names = file_names[slice_start:slice_end] for file_name in tqdm.tqdm(file_names, 'loading volume data'): # read the file dataset = dicom.read_file(folder + '/' + file_name) # Get parameters pixel_size = np.array(dataset.PixelSpacing) pixel_thickness = float(dataset.SliceThickness) rows = dataset.Rows cols = dataset.Columns # Get data array and convert to correct coordinates data_array = np.array(np.frombuffer(dataset.PixelData, 'H'), dtype='float32') data_array = data_array.reshape([cols, rows], order='C') data_array = np.rot90(data_array, -1) # Convert from storage type to densities # TODO: Optimize these computations hu_values = (dataset.RescaleSlope * data_array + dataset.RescaleIntercept) densities = (hu_values + 1000) / 1000 # Store results volumes.append(densities) datasets.append(dataset) voxel_size = np.array(list(pixel_size) + [pixel_thickness]) shape = np.array([rows, cols, len(volumes)]) # Compute geometry parameters mid_pt = (np.array(dataset.ReconstructionTargetCenterPatient) - np.array(dataset.DataCollectionCenterPatient)) reconstruction_size = (voxel_size * shape) min_pt = mid_pt - reconstruction_size / 2 max_pt = mid_pt + reconstruction_size / 2 # axis 1 has reversed convention min_pt[1], max_pt[1] = -max_pt[1], -min_pt[1] if len(datasets) > 1: slice_distance = np.abs( float(datasets[1].DataCollectionCenterPatient[2]) - float(datasets[0].DataCollectionCenterPatient[2])) else: # If we only have one slice, we must approximate the distance. slice_distance = pixel_thickness # The middle of the minimum/maximum slice can be computed from the # DICOM attribute "DataCollectionCenterPatient". Since ODL uses corner # points (e.g. edge of volume) we need to add half a voxel thickness to # both sides. min_pt[2] = -np.array(datasets[0].DataCollectionCenterPatient)[2] min_pt[2] -= 0.5 * slice_distance max_pt[2] = -np.array(datasets[-1].DataCollectionCenterPatient)[2] max_pt[2] += 0.5 * slice_distance partition = odl.uniform_partition(min_pt, max_pt, shape) volume = np.transpose(np.array(volumes), (1, 2, 0)) return partition, volume
[ "def", "load_reconstruction", "(", "folder", ",", "slice_start", "=", "0", ",", "slice_end", "=", "-", "1", ")", ":", "file_names", "=", "sorted", "(", "[", "f", "for", "f", "in", "os", ".", "listdir", "(", "folder", ")", "if", "f", ".", "endswith", ...
Load a volume from folder, also returns the corresponding partition. Parameters ---------- folder : str Path to the folder where the DICOM files are stored. slice_start : int Index of the first slice to use. Used for subsampling. slice_end : int Index of the final slice to use. Returns ------- partition : `odl.RectPartition` Partition describing the geometric positioning of the voxels. data : `numpy.ndarray` Volumetric data. Scaled such that data = 1 for water (0 HU). Notes ----- DICOM data is highly non trivial. Typically, each slice has been computed with a slice tickness (e.g. 3mm) but the slice spacing might be different from that. Further, the coordinates in DICOM is typically the *middle* of the pixel, not the corners as in ODL. This function should handle all of these peculiarities and give a volume with the correct coordinate system attached.
[ "Load", "a", "volume", "from", "folder", "also", "returns", "the", "corresponding", "partition", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/datasets/ct/mayo.py#L197-L298
231,754
odlgroup/odl
odl/solvers/smooth/newton.py
newtons_method
def newtons_method(f, x, line_search=1.0, maxiter=1000, tol=1e-16, cg_iter=None, callback=None): r"""Newton's method for minimizing a functional. Notes ----- This is a general and optimized implementation of Newton's method for solving the problem: .. math:: \min f(x) for a differentiable function :math:`f: \mathcal{X}\to \mathbb{R}` on a Hilbert space :math:`\mathcal{X}`. It does so by finding a zero of the gradient .. math:: \nabla f: \mathcal{X} \to \mathcal{X}. of finding a root of a function. The algorithm is well-known and there is a vast literature about it. Among others, the method is described in [BV2004], Sections 9.5 and 10.2 (`book available online <http://stanford.edu/~boyd/cvxbook/bv_cvxbook.pdf>`_), [GNS2009], Section 2.7 for solving nonlinear equations and Section 11.3 for its use in minimization, and wikipedia on `Newton's_method <https://en.wikipedia.org/wiki/Newton's_method>`_. The algorithm works by iteratively solving .. math:: \partial f(x_k)p_k = -f(x_k) and then updating as .. math:: x_{k+1} = x_k + \alpha x_k, where :math:`\alpha` is a suitable step length (see the references). In this implementation the system of equations are solved using the conjugate gradient method. Parameters ---------- f : `Functional` Goal functional. Needs to have ``f.gradient`` and ``f.gradient.derivative``. x : ``op.domain`` element Starting point of the iteration line_search : float or `LineSearch`, optional Strategy to choose the step length. If a float is given, uses it as a fixed step length. maxiter : int, optional Maximum number of iterations. tol : float, optional Tolerance that should be used for terminating the iteration. cg_iter : int, optional Number of iterations in the the conjugate gradient solver, for computing the search direction. callback : callable, optional Object executing code per iteration, e.g. plotting each iterate References ---------- [BV2004] Boyd, S, and Vandenberghe, L. *Convex optimization*. Cambridge university press, 2004. [GNS2009] Griva, I, Nash, S G, and Sofer, A. *Linear and nonlinear optimization*. Siam, 2009. """ # TODO: update doc grad = f.gradient if x not in grad.domain: raise TypeError('`x` {!r} is not in the domain of `f` {!r}' ''.format(x, grad.domain)) if not callable(line_search): line_search = ConstantLineSearch(line_search) if cg_iter is None: # Motivated by that if it is Ax = b, x and b in Rn, it takes at most n # iterations to solve with cg cg_iter = grad.domain.size # TODO: optimize by using lincomb and avoiding to create copies for _ in range(maxiter): # Initialize the search direction to 0 search_direction = x.space.zero() # Compute hessian (as operator) and gradient in the current point hessian = grad.derivative(x) deriv_in_point = grad(x) # Solving A*x = b for x, in this case f''(x)*p = -f'(x) # TODO: Let the user provide/choose method for how to solve this? try: hessian_inverse = hessian.inverse except NotImplementedError: conjugate_gradient(hessian, search_direction, -deriv_in_point, cg_iter) else: hessian_inverse(-deriv_in_point, out=search_direction) # Computing step length dir_deriv = search_direction.inner(deriv_in_point) if np.abs(dir_deriv) <= tol: return step_length = line_search(x, search_direction, dir_deriv) # Updating x += step_length * search_direction if callback is not None: callback(x)
python
def newtons_method(f, x, line_search=1.0, maxiter=1000, tol=1e-16, cg_iter=None, callback=None): r"""Newton's method for minimizing a functional. Notes ----- This is a general and optimized implementation of Newton's method for solving the problem: .. math:: \min f(x) for a differentiable function :math:`f: \mathcal{X}\to \mathbb{R}` on a Hilbert space :math:`\mathcal{X}`. It does so by finding a zero of the gradient .. math:: \nabla f: \mathcal{X} \to \mathcal{X}. of finding a root of a function. The algorithm is well-known and there is a vast literature about it. Among others, the method is described in [BV2004], Sections 9.5 and 10.2 (`book available online <http://stanford.edu/~boyd/cvxbook/bv_cvxbook.pdf>`_), [GNS2009], Section 2.7 for solving nonlinear equations and Section 11.3 for its use in minimization, and wikipedia on `Newton's_method <https://en.wikipedia.org/wiki/Newton's_method>`_. The algorithm works by iteratively solving .. math:: \partial f(x_k)p_k = -f(x_k) and then updating as .. math:: x_{k+1} = x_k + \alpha x_k, where :math:`\alpha` is a suitable step length (see the references). In this implementation the system of equations are solved using the conjugate gradient method. Parameters ---------- f : `Functional` Goal functional. Needs to have ``f.gradient`` and ``f.gradient.derivative``. x : ``op.domain`` element Starting point of the iteration line_search : float or `LineSearch`, optional Strategy to choose the step length. If a float is given, uses it as a fixed step length. maxiter : int, optional Maximum number of iterations. tol : float, optional Tolerance that should be used for terminating the iteration. cg_iter : int, optional Number of iterations in the the conjugate gradient solver, for computing the search direction. callback : callable, optional Object executing code per iteration, e.g. plotting each iterate References ---------- [BV2004] Boyd, S, and Vandenberghe, L. *Convex optimization*. Cambridge university press, 2004. [GNS2009] Griva, I, Nash, S G, and Sofer, A. *Linear and nonlinear optimization*. Siam, 2009. """ # TODO: update doc grad = f.gradient if x not in grad.domain: raise TypeError('`x` {!r} is not in the domain of `f` {!r}' ''.format(x, grad.domain)) if not callable(line_search): line_search = ConstantLineSearch(line_search) if cg_iter is None: # Motivated by that if it is Ax = b, x and b in Rn, it takes at most n # iterations to solve with cg cg_iter = grad.domain.size # TODO: optimize by using lincomb and avoiding to create copies for _ in range(maxiter): # Initialize the search direction to 0 search_direction = x.space.zero() # Compute hessian (as operator) and gradient in the current point hessian = grad.derivative(x) deriv_in_point = grad(x) # Solving A*x = b for x, in this case f''(x)*p = -f'(x) # TODO: Let the user provide/choose method for how to solve this? try: hessian_inverse = hessian.inverse except NotImplementedError: conjugate_gradient(hessian, search_direction, -deriv_in_point, cg_iter) else: hessian_inverse(-deriv_in_point, out=search_direction) # Computing step length dir_deriv = search_direction.inner(deriv_in_point) if np.abs(dir_deriv) <= tol: return step_length = line_search(x, search_direction, dir_deriv) # Updating x += step_length * search_direction if callback is not None: callback(x)
[ "def", "newtons_method", "(", "f", ",", "x", ",", "line_search", "=", "1.0", ",", "maxiter", "=", "1000", ",", "tol", "=", "1e-16", ",", "cg_iter", "=", "None", ",", "callback", "=", "None", ")", ":", "# TODO: update doc", "grad", "=", "f", ".", "gra...
r"""Newton's method for minimizing a functional. Notes ----- This is a general and optimized implementation of Newton's method for solving the problem: .. math:: \min f(x) for a differentiable function :math:`f: \mathcal{X}\to \mathbb{R}` on a Hilbert space :math:`\mathcal{X}`. It does so by finding a zero of the gradient .. math:: \nabla f: \mathcal{X} \to \mathcal{X}. of finding a root of a function. The algorithm is well-known and there is a vast literature about it. Among others, the method is described in [BV2004], Sections 9.5 and 10.2 (`book available online <http://stanford.edu/~boyd/cvxbook/bv_cvxbook.pdf>`_), [GNS2009], Section 2.7 for solving nonlinear equations and Section 11.3 for its use in minimization, and wikipedia on `Newton's_method <https://en.wikipedia.org/wiki/Newton's_method>`_. The algorithm works by iteratively solving .. math:: \partial f(x_k)p_k = -f(x_k) and then updating as .. math:: x_{k+1} = x_k + \alpha x_k, where :math:`\alpha` is a suitable step length (see the references). In this implementation the system of equations are solved using the conjugate gradient method. Parameters ---------- f : `Functional` Goal functional. Needs to have ``f.gradient`` and ``f.gradient.derivative``. x : ``op.domain`` element Starting point of the iteration line_search : float or `LineSearch`, optional Strategy to choose the step length. If a float is given, uses it as a fixed step length. maxiter : int, optional Maximum number of iterations. tol : float, optional Tolerance that should be used for terminating the iteration. cg_iter : int, optional Number of iterations in the the conjugate gradient solver, for computing the search direction. callback : callable, optional Object executing code per iteration, e.g. plotting each iterate References ---------- [BV2004] Boyd, S, and Vandenberghe, L. *Convex optimization*. Cambridge university press, 2004. [GNS2009] Griva, I, Nash, S G, and Sofer, A. *Linear and nonlinear optimization*. Siam, 2009.
[ "r", "Newton", "s", "method", "for", "minimizing", "a", "functional", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/smooth/newton.py#L127-L243
231,755
odlgroup/odl
odl/solvers/smooth/newton.py
bfgs_method
def bfgs_method(f, x, line_search=1.0, maxiter=1000, tol=1e-15, num_store=None, hessinv_estimate=None, callback=None): r"""Quasi-Newton BFGS method to minimize a differentiable function. Can use either the regular BFGS method, or the limited memory BFGS method. Notes ----- This is a general and optimized implementation of a quasi-Newton method with BFGS update for solving a general unconstrained optimization problem .. math:: \min f(x) for a differentiable function :math:`f: \mathcal{X}\to \mathbb{R}` on a Hilbert space :math:`\mathcal{X}`. It does so by finding a zero of the gradient .. math:: \nabla f: \mathcal{X} \to \mathcal{X}. The QN method is an approximate Newton method, where the Hessian is approximated and gradually updated in each step. This implementation uses the rank-one BFGS update schema where the inverse of the Hessian is recalculated in each iteration. The algorithm is described in [GNS2009], Section 12.3 and in the `BFGS Wikipedia article <https://en.wikipedia.org/wiki/Broyden%E2%80%93Fletcher%E2%80%93\ Goldfarb%E2%80%93Shanno_algorithm>`_ Parameters ---------- f : `Functional` Functional with ``f.gradient``. x : ``f.domain`` element Starting point of the iteration line_search : float or `LineSearch`, optional Strategy to choose the step length. If a float is given, uses it as a fixed step length. maxiter : int, optional Maximum number of iterations. tol : float, optional Tolerance that should be used for terminating the iteration. num_store : int, optional Maximum number of correction factors to store. For ``None``, the method is the regular BFGS method. For an integer, the method becomes the Limited Memory BFGS method. hessinv_estimate : `Operator`, optional Initial estimate of the inverse of the Hessian operator. Needs to be an operator from ``f.domain`` to ``f.domain``. Default: Identity on ``f.domain`` callback : callable, optional Object executing code per iteration, e.g. plotting each iterate. References ---------- [GNS2009] Griva, I, Nash, S G, and Sofer, A. *Linear and nonlinear optimization*. Siam, 2009. """ grad = f.gradient if x not in grad.domain: raise TypeError('`x` {!r} is not in the domain of `grad` {!r}' ''.format(x, grad.domain)) if not callable(line_search): line_search = ConstantLineSearch(line_search) ys = [] ss = [] grad_x = grad(x) for i in range(maxiter): # Determine a stepsize using line search search_dir = -_bfgs_direction(ss, ys, grad_x, hessinv_estimate) dir_deriv = search_dir.inner(grad_x) if np.abs(dir_deriv) == 0: return # we found an optimum step = line_search(x, direction=search_dir, dir_derivative=dir_deriv) # Update x x_update = search_dir x_update *= step x += x_update grad_x, grad_diff = grad(x), grad_x # grad_diff = grad(x) - grad(x_old) grad_diff.lincomb(-1, grad_diff, 1, grad_x) y_inner_s = grad_diff.inner(x_update) # Test for convergence if np.abs(y_inner_s) < tol: if grad_x.norm() < tol: return else: # Reset if needed ys = [] ss = [] continue # Update Hessian ys.append(grad_diff) ss.append(x_update) if num_store is not None: # Throw away factors if they are too many. ss = ss[-num_store:] ys = ys[-num_store:] if callback is not None: callback(x)
python
def bfgs_method(f, x, line_search=1.0, maxiter=1000, tol=1e-15, num_store=None, hessinv_estimate=None, callback=None): r"""Quasi-Newton BFGS method to minimize a differentiable function. Can use either the regular BFGS method, or the limited memory BFGS method. Notes ----- This is a general and optimized implementation of a quasi-Newton method with BFGS update for solving a general unconstrained optimization problem .. math:: \min f(x) for a differentiable function :math:`f: \mathcal{X}\to \mathbb{R}` on a Hilbert space :math:`\mathcal{X}`. It does so by finding a zero of the gradient .. math:: \nabla f: \mathcal{X} \to \mathcal{X}. The QN method is an approximate Newton method, where the Hessian is approximated and gradually updated in each step. This implementation uses the rank-one BFGS update schema where the inverse of the Hessian is recalculated in each iteration. The algorithm is described in [GNS2009], Section 12.3 and in the `BFGS Wikipedia article <https://en.wikipedia.org/wiki/Broyden%E2%80%93Fletcher%E2%80%93\ Goldfarb%E2%80%93Shanno_algorithm>`_ Parameters ---------- f : `Functional` Functional with ``f.gradient``. x : ``f.domain`` element Starting point of the iteration line_search : float or `LineSearch`, optional Strategy to choose the step length. If a float is given, uses it as a fixed step length. maxiter : int, optional Maximum number of iterations. tol : float, optional Tolerance that should be used for terminating the iteration. num_store : int, optional Maximum number of correction factors to store. For ``None``, the method is the regular BFGS method. For an integer, the method becomes the Limited Memory BFGS method. hessinv_estimate : `Operator`, optional Initial estimate of the inverse of the Hessian operator. Needs to be an operator from ``f.domain`` to ``f.domain``. Default: Identity on ``f.domain`` callback : callable, optional Object executing code per iteration, e.g. plotting each iterate. References ---------- [GNS2009] Griva, I, Nash, S G, and Sofer, A. *Linear and nonlinear optimization*. Siam, 2009. """ grad = f.gradient if x not in grad.domain: raise TypeError('`x` {!r} is not in the domain of `grad` {!r}' ''.format(x, grad.domain)) if not callable(line_search): line_search = ConstantLineSearch(line_search) ys = [] ss = [] grad_x = grad(x) for i in range(maxiter): # Determine a stepsize using line search search_dir = -_bfgs_direction(ss, ys, grad_x, hessinv_estimate) dir_deriv = search_dir.inner(grad_x) if np.abs(dir_deriv) == 0: return # we found an optimum step = line_search(x, direction=search_dir, dir_derivative=dir_deriv) # Update x x_update = search_dir x_update *= step x += x_update grad_x, grad_diff = grad(x), grad_x # grad_diff = grad(x) - grad(x_old) grad_diff.lincomb(-1, grad_diff, 1, grad_x) y_inner_s = grad_diff.inner(x_update) # Test for convergence if np.abs(y_inner_s) < tol: if grad_x.norm() < tol: return else: # Reset if needed ys = [] ss = [] continue # Update Hessian ys.append(grad_diff) ss.append(x_update) if num_store is not None: # Throw away factors if they are too many. ss = ss[-num_store:] ys = ys[-num_store:] if callback is not None: callback(x)
[ "def", "bfgs_method", "(", "f", ",", "x", ",", "line_search", "=", "1.0", ",", "maxiter", "=", "1000", ",", "tol", "=", "1e-15", ",", "num_store", "=", "None", ",", "hessinv_estimate", "=", "None", ",", "callback", "=", "None", ")", ":", "grad", "=",...
r"""Quasi-Newton BFGS method to minimize a differentiable function. Can use either the regular BFGS method, or the limited memory BFGS method. Notes ----- This is a general and optimized implementation of a quasi-Newton method with BFGS update for solving a general unconstrained optimization problem .. math:: \min f(x) for a differentiable function :math:`f: \mathcal{X}\to \mathbb{R}` on a Hilbert space :math:`\mathcal{X}`. It does so by finding a zero of the gradient .. math:: \nabla f: \mathcal{X} \to \mathcal{X}. The QN method is an approximate Newton method, where the Hessian is approximated and gradually updated in each step. This implementation uses the rank-one BFGS update schema where the inverse of the Hessian is recalculated in each iteration. The algorithm is described in [GNS2009], Section 12.3 and in the `BFGS Wikipedia article <https://en.wikipedia.org/wiki/Broyden%E2%80%93Fletcher%E2%80%93\ Goldfarb%E2%80%93Shanno_algorithm>`_ Parameters ---------- f : `Functional` Functional with ``f.gradient``. x : ``f.domain`` element Starting point of the iteration line_search : float or `LineSearch`, optional Strategy to choose the step length. If a float is given, uses it as a fixed step length. maxiter : int, optional Maximum number of iterations. tol : float, optional Tolerance that should be used for terminating the iteration. num_store : int, optional Maximum number of correction factors to store. For ``None``, the method is the regular BFGS method. For an integer, the method becomes the Limited Memory BFGS method. hessinv_estimate : `Operator`, optional Initial estimate of the inverse of the Hessian operator. Needs to be an operator from ``f.domain`` to ``f.domain``. Default: Identity on ``f.domain`` callback : callable, optional Object executing code per iteration, e.g. plotting each iterate. References ---------- [GNS2009] Griva, I, Nash, S G, and Sofer, A. *Linear and nonlinear optimization*. Siam, 2009.
[ "r", "Quasi", "-", "Newton", "BFGS", "method", "to", "minimize", "a", "differentiable", "function", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/smooth/newton.py#L246-L357
231,756
odlgroup/odl
odl/solvers/smooth/newton.py
broydens_method
def broydens_method(f, x, line_search=1.0, impl='first', maxiter=1000, tol=1e-15, hessinv_estimate=None, callback=None): r"""Broyden's first method, a quasi-Newton scheme. Notes ----- This is a general and optimized implementation of Broyden's method, a quasi-Newton method for solving a general unconstrained optimization problem .. math:: \min f(x) for a differentiable function :math:`f: \mathcal{X}\to \mathbb{R}` on a Hilbert space :math:`\mathcal{X}`. It does so by finding a zero of the gradient .. math:: \nabla f: \mathcal{X} \to \mathcal{X} using a Newton-type update scheme with approximate Hessian. The algorithm is described in [Bro1965] and [Kva1991], and in a `Wikipedia article <https://en.wikipedia.org/wiki/Broyden's_method>`_. Parameters ---------- f : `Functional` Functional with ``f.gradient`` x : ``f.domain`` element Starting point of the iteration line_search : float or `LineSearch`, optional Strategy to choose the step length. If a float is given, uses it as a fixed step length. impl : {'first', 'second'}, optional What version of Broydens method to use. First is also known as Broydens 'good' method, while the second is known as Broydens 'bad' method. maxiter : int, optional Maximum number of iterations. ``tol``. tol : float, optional Tolerance that should be used for terminating the iteration. hessinv_estimate : `Operator`, optional Initial estimate of the inverse of the Hessian operator. Needs to be an operator from ``f.domain`` to ``f.domain``. Default: Identity on ``f.domain`` callback : callable, optional Object executing code per iteration, e.g. plotting each iterate. References ---------- [Bro1965] Broyden, C G. *A class of methods for solving nonlinear simultaneous equations*. Mathematics of computation, 33 (1965), pp 577--593. [Kva1991] Kvaalen, E. *A faster Broyden method*. BIT Numerical Mathematics 31 (1991), pp 369--372. """ grad = f.gradient if x not in grad.domain: raise TypeError('`x` {!r} is not in the domain of `grad` {!r}' ''.format(x, grad.domain)) if not callable(line_search): line_search = ConstantLineSearch(line_search) impl, impl_in = str(impl).lower(), impl if impl not in ('first', 'second'): raise ValueError('`impl` {!r} not understood' ''.format(impl_in)) ss = [] ys = [] grad_x = grad(x) for i in range(maxiter): # find step size search_dir = -_broydens_direction(ss, ys, grad_x, hessinv_estimate, impl) dir_deriv = search_dir.inner(grad_x) if np.abs(dir_deriv) == 0: return # we found an optimum step = line_search(x, search_dir, dir_deriv) # update x x_update = step * search_dir x += x_update # compute new gradient grad_x, grad_x_old = grad(x), grad_x delta_grad = grad_x - grad_x_old # update hessian. # TODO: reuse from above v = _broydens_direction(ss, ys, delta_grad, hessinv_estimate, impl) if impl == 'first': divisor = x_update.inner(v) # Test for convergence if np.abs(divisor) < tol: if grad_x.norm() < tol: return else: # Reset if needed ys = [] ss = [] continue u = (x_update - v) / divisor ss.append(u) ys.append(x_update) elif impl == 'second': divisor = delta_grad.inner(delta_grad) # Test for convergence if np.abs(divisor) < tol: if grad_x.norm() < tol: return else: # Reset if needed ys = [] ss = [] continue u = (x_update - v) / divisor ss.append(u) ys.append(delta_grad) if callback is not None: callback(x)
python
def broydens_method(f, x, line_search=1.0, impl='first', maxiter=1000, tol=1e-15, hessinv_estimate=None, callback=None): r"""Broyden's first method, a quasi-Newton scheme. Notes ----- This is a general and optimized implementation of Broyden's method, a quasi-Newton method for solving a general unconstrained optimization problem .. math:: \min f(x) for a differentiable function :math:`f: \mathcal{X}\to \mathbb{R}` on a Hilbert space :math:`\mathcal{X}`. It does so by finding a zero of the gradient .. math:: \nabla f: \mathcal{X} \to \mathcal{X} using a Newton-type update scheme with approximate Hessian. The algorithm is described in [Bro1965] and [Kva1991], and in a `Wikipedia article <https://en.wikipedia.org/wiki/Broyden's_method>`_. Parameters ---------- f : `Functional` Functional with ``f.gradient`` x : ``f.domain`` element Starting point of the iteration line_search : float or `LineSearch`, optional Strategy to choose the step length. If a float is given, uses it as a fixed step length. impl : {'first', 'second'}, optional What version of Broydens method to use. First is also known as Broydens 'good' method, while the second is known as Broydens 'bad' method. maxiter : int, optional Maximum number of iterations. ``tol``. tol : float, optional Tolerance that should be used for terminating the iteration. hessinv_estimate : `Operator`, optional Initial estimate of the inverse of the Hessian operator. Needs to be an operator from ``f.domain`` to ``f.domain``. Default: Identity on ``f.domain`` callback : callable, optional Object executing code per iteration, e.g. plotting each iterate. References ---------- [Bro1965] Broyden, C G. *A class of methods for solving nonlinear simultaneous equations*. Mathematics of computation, 33 (1965), pp 577--593. [Kva1991] Kvaalen, E. *A faster Broyden method*. BIT Numerical Mathematics 31 (1991), pp 369--372. """ grad = f.gradient if x not in grad.domain: raise TypeError('`x` {!r} is not in the domain of `grad` {!r}' ''.format(x, grad.domain)) if not callable(line_search): line_search = ConstantLineSearch(line_search) impl, impl_in = str(impl).lower(), impl if impl not in ('first', 'second'): raise ValueError('`impl` {!r} not understood' ''.format(impl_in)) ss = [] ys = [] grad_x = grad(x) for i in range(maxiter): # find step size search_dir = -_broydens_direction(ss, ys, grad_x, hessinv_estimate, impl) dir_deriv = search_dir.inner(grad_x) if np.abs(dir_deriv) == 0: return # we found an optimum step = line_search(x, search_dir, dir_deriv) # update x x_update = step * search_dir x += x_update # compute new gradient grad_x, grad_x_old = grad(x), grad_x delta_grad = grad_x - grad_x_old # update hessian. # TODO: reuse from above v = _broydens_direction(ss, ys, delta_grad, hessinv_estimate, impl) if impl == 'first': divisor = x_update.inner(v) # Test for convergence if np.abs(divisor) < tol: if grad_x.norm() < tol: return else: # Reset if needed ys = [] ss = [] continue u = (x_update - v) / divisor ss.append(u) ys.append(x_update) elif impl == 'second': divisor = delta_grad.inner(delta_grad) # Test for convergence if np.abs(divisor) < tol: if grad_x.norm() < tol: return else: # Reset if needed ys = [] ss = [] continue u = (x_update - v) / divisor ss.append(u) ys.append(delta_grad) if callback is not None: callback(x)
[ "def", "broydens_method", "(", "f", ",", "x", ",", "line_search", "=", "1.0", ",", "impl", "=", "'first'", ",", "maxiter", "=", "1000", ",", "tol", "=", "1e-15", ",", "hessinv_estimate", "=", "None", ",", "callback", "=", "None", ")", ":", "grad", "=...
r"""Broyden's first method, a quasi-Newton scheme. Notes ----- This is a general and optimized implementation of Broyden's method, a quasi-Newton method for solving a general unconstrained optimization problem .. math:: \min f(x) for a differentiable function :math:`f: \mathcal{X}\to \mathbb{R}` on a Hilbert space :math:`\mathcal{X}`. It does so by finding a zero of the gradient .. math:: \nabla f: \mathcal{X} \to \mathcal{X} using a Newton-type update scheme with approximate Hessian. The algorithm is described in [Bro1965] and [Kva1991], and in a `Wikipedia article <https://en.wikipedia.org/wiki/Broyden's_method>`_. Parameters ---------- f : `Functional` Functional with ``f.gradient`` x : ``f.domain`` element Starting point of the iteration line_search : float or `LineSearch`, optional Strategy to choose the step length. If a float is given, uses it as a fixed step length. impl : {'first', 'second'}, optional What version of Broydens method to use. First is also known as Broydens 'good' method, while the second is known as Broydens 'bad' method. maxiter : int, optional Maximum number of iterations. ``tol``. tol : float, optional Tolerance that should be used for terminating the iteration. hessinv_estimate : `Operator`, optional Initial estimate of the inverse of the Hessian operator. Needs to be an operator from ``f.domain`` to ``f.domain``. Default: Identity on ``f.domain`` callback : callable, optional Object executing code per iteration, e.g. plotting each iterate. References ---------- [Bro1965] Broyden, C G. *A class of methods for solving nonlinear simultaneous equations*. Mathematics of computation, 33 (1965), pp 577--593. [Kva1991] Kvaalen, E. *A faster Broyden method*. BIT Numerical Mathematics 31 (1991), pp 369--372.
[ "r", "Broyden", "s", "first", "method", "a", "quasi", "-", "Newton", "scheme", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/smooth/newton.py#L360-L491
231,757
odlgroup/odl
odl/tomo/analytic/filtered_back_projection.py
_axis_in_detector
def _axis_in_detector(geometry): """A vector in the detector plane that points along the rotation axis.""" du, dv = geometry.det_axes_init axis = geometry.axis c = np.array([np.vdot(axis, du), np.vdot(axis, dv)]) cnorm = np.linalg.norm(c) # Check for numerical errors assert cnorm != 0 return c / cnorm
python
def _axis_in_detector(geometry): du, dv = geometry.det_axes_init axis = geometry.axis c = np.array([np.vdot(axis, du), np.vdot(axis, dv)]) cnorm = np.linalg.norm(c) # Check for numerical errors assert cnorm != 0 return c / cnorm
[ "def", "_axis_in_detector", "(", "geometry", ")", ":", "du", ",", "dv", "=", "geometry", ".", "det_axes_init", "axis", "=", "geometry", ".", "axis", "c", "=", "np", ".", "array", "(", "[", "np", ".", "vdot", "(", "axis", ",", "du", ")", ",", "np", ...
A vector in the detector plane that points along the rotation axis.
[ "A", "vector", "in", "the", "detector", "plane", "that", "points", "along", "the", "rotation", "axis", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/analytic/filtered_back_projection.py#L21-L31
231,758
odlgroup/odl
odl/tomo/analytic/filtered_back_projection.py
_rotation_direction_in_detector
def _rotation_direction_in_detector(geometry): """A vector in the detector plane that points in the rotation direction.""" du, dv = geometry.det_axes_init axis = geometry.axis det_normal = np.cross(dv, du) rot_dir = np.cross(axis, det_normal) c = np.array([np.vdot(rot_dir, du), np.vdot(rot_dir, dv)]) cnorm = np.linalg.norm(c) # Check for numerical errors assert cnorm != 0 return c / cnorm
python
def _rotation_direction_in_detector(geometry): du, dv = geometry.det_axes_init axis = geometry.axis det_normal = np.cross(dv, du) rot_dir = np.cross(axis, det_normal) c = np.array([np.vdot(rot_dir, du), np.vdot(rot_dir, dv)]) cnorm = np.linalg.norm(c) # Check for numerical errors assert cnorm != 0 return c / cnorm
[ "def", "_rotation_direction_in_detector", "(", "geometry", ")", ":", "du", ",", "dv", "=", "geometry", ".", "det_axes_init", "axis", "=", "geometry", ".", "axis", "det_normal", "=", "np", ".", "cross", "(", "dv", ",", "du", ")", "rot_dir", "=", "np", "."...
A vector in the detector plane that points in the rotation direction.
[ "A", "vector", "in", "the", "detector", "plane", "that", "points", "in", "the", "rotation", "direction", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/analytic/filtered_back_projection.py#L34-L46
231,759
odlgroup/odl
odl/tomo/analytic/filtered_back_projection.py
_fbp_filter
def _fbp_filter(norm_freq, filter_type, frequency_scaling): """Create a smoothing filter for FBP. Parameters ---------- norm_freq : `array-like` Frequencies normalized to lie in the interval [0, 1]. filter_type : {'Ram-Lak', 'Shepp-Logan', 'Cosine', 'Hamming', 'Hann', callable} The type of filter to be used. If a string is given, use one of the standard filters with that name. A callable should take an array of values in [0, 1] and return the filter for these frequencies. frequency_scaling : float Scaling of the frequencies for the filter. All frequencies are scaled by this number, any relative frequency above ``frequency_scaling`` is set to 0. Returns ------- smoothing_filter : `numpy.ndarray` Examples -------- Create an FBP filter >>> norm_freq = np.linspace(0, 1, 10) >>> filt = _fbp_filter(norm_freq, ... filter_type='Hann', ... frequency_scaling=0.8) """ filter_type, filter_type_in = str(filter_type).lower(), filter_type if callable(filter_type): filt = filter_type(norm_freq) elif filter_type == 'ram-lak': filt = np.copy(norm_freq) elif filter_type == 'shepp-logan': filt = norm_freq * np.sinc(norm_freq / (2 * frequency_scaling)) elif filter_type == 'cosine': filt = norm_freq * np.cos(norm_freq * np.pi / (2 * frequency_scaling)) elif filter_type == 'hamming': filt = norm_freq * ( 0.54 + 0.46 * np.cos(norm_freq * np.pi / (frequency_scaling))) elif filter_type == 'hann': filt = norm_freq * ( np.cos(norm_freq * np.pi / (2 * frequency_scaling)) ** 2) else: raise ValueError('unknown `filter_type` ({})' ''.format(filter_type_in)) indicator = (norm_freq <= frequency_scaling) filt *= indicator return filt
python
def _fbp_filter(norm_freq, filter_type, frequency_scaling): filter_type, filter_type_in = str(filter_type).lower(), filter_type if callable(filter_type): filt = filter_type(norm_freq) elif filter_type == 'ram-lak': filt = np.copy(norm_freq) elif filter_type == 'shepp-logan': filt = norm_freq * np.sinc(norm_freq / (2 * frequency_scaling)) elif filter_type == 'cosine': filt = norm_freq * np.cos(norm_freq * np.pi / (2 * frequency_scaling)) elif filter_type == 'hamming': filt = norm_freq * ( 0.54 + 0.46 * np.cos(norm_freq * np.pi / (frequency_scaling))) elif filter_type == 'hann': filt = norm_freq * ( np.cos(norm_freq * np.pi / (2 * frequency_scaling)) ** 2) else: raise ValueError('unknown `filter_type` ({})' ''.format(filter_type_in)) indicator = (norm_freq <= frequency_scaling) filt *= indicator return filt
[ "def", "_fbp_filter", "(", "norm_freq", ",", "filter_type", ",", "frequency_scaling", ")", ":", "filter_type", ",", "filter_type_in", "=", "str", "(", "filter_type", ")", ".", "lower", "(", ")", ",", "filter_type", "if", "callable", "(", "filter_type", ")", ...
Create a smoothing filter for FBP. Parameters ---------- norm_freq : `array-like` Frequencies normalized to lie in the interval [0, 1]. filter_type : {'Ram-Lak', 'Shepp-Logan', 'Cosine', 'Hamming', 'Hann', callable} The type of filter to be used. If a string is given, use one of the standard filters with that name. A callable should take an array of values in [0, 1] and return the filter for these frequencies. frequency_scaling : float Scaling of the frequencies for the filter. All frequencies are scaled by this number, any relative frequency above ``frequency_scaling`` is set to 0. Returns ------- smoothing_filter : `numpy.ndarray` Examples -------- Create an FBP filter >>> norm_freq = np.linspace(0, 1, 10) >>> filt = _fbp_filter(norm_freq, ... filter_type='Hann', ... frequency_scaling=0.8)
[ "Create", "a", "smoothing", "filter", "for", "FBP", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/analytic/filtered_back_projection.py#L49-L101
231,760
odlgroup/odl
odl/tomo/analytic/filtered_back_projection.py
tam_danielson_window
def tam_danielson_window(ray_trafo, smoothing_width=0.05, n_pi=1): """Create Tam-Danielson window from a `RayTransform`. The Tam-Danielson window is an indicator function on the minimal set of data needed to reconstruct a volume from given data. It is useful in analytic reconstruction methods such as FBP to give a more accurate reconstruction. See [TAM1998] for more informationon the window. See [PKGT2000] for information on the ``n_pi`` parameter. Parameters ---------- ray_trafo : `RayTransform` The ray transform for which to compute the window. smoothing_width : positive float, optional Width of the smoothing applied to the window's edges given as a fraction of the width of the full window. n_pi : odd int, optional Total number of half rotations to include in the window. Values larger than 1 should be used if the pitch is much smaller than the detector height. Returns ------- tam_danielson_window : ``ray_trafo.range`` element See Also -------- fbp_op : Filtered back-projection operator from `RayTransform` tam_danielson_window : Weighting for short scan data odl.tomo.geometry.conebeam.ConeFlatGeometry : Primary use case for this window function. References ---------- [TSS1998] Tam, K C, Samarasekera, S and Sauer, F. *Exact cone beam CT with a spiral scan*. Physics in Medicine & Biology 4 (1998), p 1015. https://dx.doi.org/10.1088/0031-9155/43/4/028 [PKGT2000] Proksa R, Köhler T, Grass M, Timmer J. *The n-PI-method for helical cone-beam CT* IEEE Trans Med Imaging. 2000 Sep;19(9):848-63. https://www.ncbi.nlm.nih.gov/pubmed/11127600 """ # Extract parameters src_radius = ray_trafo.geometry.src_radius det_radius = ray_trafo.geometry.det_radius pitch = ray_trafo.geometry.pitch if pitch == 0: raise ValueError('Tam-Danielson window is only defined with ' '`pitch!=0`') smoothing_width = float(smoothing_width) if smoothing_width < 0: raise ValueError('`smoothing_width` should be a positive float') if n_pi % 2 != 1: raise ValueError('`n_pi` must be odd, got {}'.format(n_pi)) # Find projection of axis on detector axis_proj = _axis_in_detector(ray_trafo.geometry) rot_dir = _rotation_direction_in_detector(ray_trafo.geometry) # Find distance from projection of rotation axis for each pixel dx = (rot_dir[0] * ray_trafo.range.meshgrid[1] + rot_dir[1] * ray_trafo.range.meshgrid[2]) dx_axis = dx * src_radius / (src_radius + det_radius) def Vn(u): return (pitch / (2 * np.pi) * (1 + (u / src_radius) ** 2) * (n_pi * np.pi / 2.0 - np.arctan(u / src_radius))) lower_proj_axis = -Vn(dx_axis) upper_proj_axis = Vn(-dx_axis) lower_proj = lower_proj_axis * (src_radius + det_radius) / src_radius upper_proj = upper_proj_axis * (src_radius + det_radius) / src_radius # Compute a smoothed width interval = (upper_proj - lower_proj) width = interval * smoothing_width / np.sqrt(2) # Create window function def window_fcn(x): # Lazy import to improve `import odl` time import scipy.special x_along_axis = axis_proj[0] * x[1] + axis_proj[1] * x[2] if smoothing_width != 0: lower_wndw = 0.5 * ( 1 + scipy.special.erf((x_along_axis - lower_proj) / width)) upper_wndw = 0.5 * ( 1 + scipy.special.erf((upper_proj - x_along_axis) / width)) else: lower_wndw = (x_along_axis >= lower_proj) upper_wndw = (x_along_axis <= upper_proj) return lower_wndw * upper_wndw return ray_trafo.range.element(window_fcn) / n_pi
python
def tam_danielson_window(ray_trafo, smoothing_width=0.05, n_pi=1): # Extract parameters src_radius = ray_trafo.geometry.src_radius det_radius = ray_trafo.geometry.det_radius pitch = ray_trafo.geometry.pitch if pitch == 0: raise ValueError('Tam-Danielson window is only defined with ' '`pitch!=0`') smoothing_width = float(smoothing_width) if smoothing_width < 0: raise ValueError('`smoothing_width` should be a positive float') if n_pi % 2 != 1: raise ValueError('`n_pi` must be odd, got {}'.format(n_pi)) # Find projection of axis on detector axis_proj = _axis_in_detector(ray_trafo.geometry) rot_dir = _rotation_direction_in_detector(ray_trafo.geometry) # Find distance from projection of rotation axis for each pixel dx = (rot_dir[0] * ray_trafo.range.meshgrid[1] + rot_dir[1] * ray_trafo.range.meshgrid[2]) dx_axis = dx * src_radius / (src_radius + det_radius) def Vn(u): return (pitch / (2 * np.pi) * (1 + (u / src_radius) ** 2) * (n_pi * np.pi / 2.0 - np.arctan(u / src_radius))) lower_proj_axis = -Vn(dx_axis) upper_proj_axis = Vn(-dx_axis) lower_proj = lower_proj_axis * (src_radius + det_radius) / src_radius upper_proj = upper_proj_axis * (src_radius + det_radius) / src_radius # Compute a smoothed width interval = (upper_proj - lower_proj) width = interval * smoothing_width / np.sqrt(2) # Create window function def window_fcn(x): # Lazy import to improve `import odl` time import scipy.special x_along_axis = axis_proj[0] * x[1] + axis_proj[1] * x[2] if smoothing_width != 0: lower_wndw = 0.5 * ( 1 + scipy.special.erf((x_along_axis - lower_proj) / width)) upper_wndw = 0.5 * ( 1 + scipy.special.erf((upper_proj - x_along_axis) / width)) else: lower_wndw = (x_along_axis >= lower_proj) upper_wndw = (x_along_axis <= upper_proj) return lower_wndw * upper_wndw return ray_trafo.range.element(window_fcn) / n_pi
[ "def", "tam_danielson_window", "(", "ray_trafo", ",", "smoothing_width", "=", "0.05", ",", "n_pi", "=", "1", ")", ":", "# Extract parameters", "src_radius", "=", "ray_trafo", ".", "geometry", ".", "src_radius", "det_radius", "=", "ray_trafo", ".", "geometry", "....
Create Tam-Danielson window from a `RayTransform`. The Tam-Danielson window is an indicator function on the minimal set of data needed to reconstruct a volume from given data. It is useful in analytic reconstruction methods such as FBP to give a more accurate reconstruction. See [TAM1998] for more informationon the window. See [PKGT2000] for information on the ``n_pi`` parameter. Parameters ---------- ray_trafo : `RayTransform` The ray transform for which to compute the window. smoothing_width : positive float, optional Width of the smoothing applied to the window's edges given as a fraction of the width of the full window. n_pi : odd int, optional Total number of half rotations to include in the window. Values larger than 1 should be used if the pitch is much smaller than the detector height. Returns ------- tam_danielson_window : ``ray_trafo.range`` element See Also -------- fbp_op : Filtered back-projection operator from `RayTransform` tam_danielson_window : Weighting for short scan data odl.tomo.geometry.conebeam.ConeFlatGeometry : Primary use case for this window function. References ---------- [TSS1998] Tam, K C, Samarasekera, S and Sauer, F. *Exact cone beam CT with a spiral scan*. Physics in Medicine & Biology 4 (1998), p 1015. https://dx.doi.org/10.1088/0031-9155/43/4/028 [PKGT2000] Proksa R, Köhler T, Grass M, Timmer J. *The n-PI-method for helical cone-beam CT* IEEE Trans Med Imaging. 2000 Sep;19(9):848-63. https://www.ncbi.nlm.nih.gov/pubmed/11127600
[ "Create", "Tam", "-", "Danielson", "window", "from", "a", "RayTransform", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/analytic/filtered_back_projection.py#L104-L208
231,761
odlgroup/odl
odl/tomo/analytic/filtered_back_projection.py
parker_weighting
def parker_weighting(ray_trafo, q=0.25): """Create parker weighting for a `RayTransform`. Parker weighting is a weighting function that ensures that oversampled fan/cone beam data are weighted such that each line has unit weight. It is useful in analytic reconstruction methods such as FBP to give a more accurate result and can improve convergence rates for iterative methods. See the article `Parker weights revisited`_ for more information. Parameters ---------- ray_trafo : `RayTransform` The ray transform for which to compute the weights. q : float, optional Parameter controlling the speed of the roll-off at the edges of the weighting. 1.0 gives the classical Parker weighting, while smaller values in general lead to lower noise but stronger discretization artifacts. Returns ------- parker_weighting : ``ray_trafo.range`` element See Also -------- fbp_op : Filtered back-projection operator from `RayTransform` tam_danielson_window : Indicator function for helical data odl.tomo.geometry.conebeam.FanBeamGeometry : Use case in 2d odl.tomo.geometry.conebeam.ConeFlatGeometry : Use case in 3d (for pitch 0) References ---------- .. _Parker weights revisited: https://www.ncbi.nlm.nih.gov/pubmed/11929021 """ # Note: Parameter names taken from WES2002 # Extract parameters src_radius = ray_trafo.geometry.src_radius det_radius = ray_trafo.geometry.det_radius ndim = ray_trafo.geometry.ndim angles = ray_trafo.range.meshgrid[0] min_rot_angle = ray_trafo.geometry.motion_partition.min_pt alen = ray_trafo.geometry.motion_params.length # Parker weightings are not defined for helical geometries if ray_trafo.geometry.ndim != 2: pitch = ray_trafo.geometry.pitch if pitch != 0: raise ValueError('Parker weighting window is only defined with ' '`pitch==0`') # Find distance from projection of rotation axis for each pixel if ndim == 2: dx = ray_trafo.range.meshgrid[1] elif ndim == 3: # Find projection of axis on detector rot_dir = _rotation_direction_in_detector(ray_trafo.geometry) # If axis is aligned to a coordinate axis, save some memory and time by # using broadcasting if rot_dir[0] == 0: dx = rot_dir[1] * ray_trafo.range.meshgrid[2] elif rot_dir[1] == 0: dx = rot_dir[0] * ray_trafo.range.meshgrid[1] else: dx = (rot_dir[0] * ray_trafo.range.meshgrid[1] + rot_dir[1] * ray_trafo.range.meshgrid[2]) # Compute parameters dx_abs_max = np.max(np.abs(dx)) max_fan_angle = 2 * np.arctan2(dx_abs_max, src_radius + det_radius) delta = max_fan_angle / 2 epsilon = alen - np.pi - max_fan_angle if epsilon < 0: raise Exception('data not sufficiently sampled for parker weighting') # Define utility functions def S(betap): return (0.5 * (1.0 + np.sin(np.pi * betap)) * (np.abs(betap) < 0.5) + (betap >= 0.5)) def b(alpha): return q * (2 * delta - 2 * alpha + epsilon) # Create weighting function beta = np.asarray(angles - min_rot_angle, dtype=ray_trafo.range.dtype) # rotation angle alpha = np.asarray(np.arctan2(dx, src_radius + det_radius), dtype=ray_trafo.range.dtype) # Compute sum in place to save memory S_sum = S(beta / b(alpha) - 0.5) S_sum += S((beta - 2 * delta + 2 * alpha - epsilon) / b(alpha) + 0.5) S_sum -= S((beta - np.pi + 2 * alpha) / b(-alpha) - 0.5) S_sum -= S((beta - np.pi - 2 * delta - epsilon) / b(-alpha) + 0.5) scale = 0.5 * alen / np.pi return ray_trafo.range.element( np.broadcast_to(S_sum * scale, ray_trafo.range.shape))
python
def parker_weighting(ray_trafo, q=0.25): # Note: Parameter names taken from WES2002 # Extract parameters src_radius = ray_trafo.geometry.src_radius det_radius = ray_trafo.geometry.det_radius ndim = ray_trafo.geometry.ndim angles = ray_trafo.range.meshgrid[0] min_rot_angle = ray_trafo.geometry.motion_partition.min_pt alen = ray_trafo.geometry.motion_params.length # Parker weightings are not defined for helical geometries if ray_trafo.geometry.ndim != 2: pitch = ray_trafo.geometry.pitch if pitch != 0: raise ValueError('Parker weighting window is only defined with ' '`pitch==0`') # Find distance from projection of rotation axis for each pixel if ndim == 2: dx = ray_trafo.range.meshgrid[1] elif ndim == 3: # Find projection of axis on detector rot_dir = _rotation_direction_in_detector(ray_trafo.geometry) # If axis is aligned to a coordinate axis, save some memory and time by # using broadcasting if rot_dir[0] == 0: dx = rot_dir[1] * ray_trafo.range.meshgrid[2] elif rot_dir[1] == 0: dx = rot_dir[0] * ray_trafo.range.meshgrid[1] else: dx = (rot_dir[0] * ray_trafo.range.meshgrid[1] + rot_dir[1] * ray_trafo.range.meshgrid[2]) # Compute parameters dx_abs_max = np.max(np.abs(dx)) max_fan_angle = 2 * np.arctan2(dx_abs_max, src_radius + det_radius) delta = max_fan_angle / 2 epsilon = alen - np.pi - max_fan_angle if epsilon < 0: raise Exception('data not sufficiently sampled for parker weighting') # Define utility functions def S(betap): return (0.5 * (1.0 + np.sin(np.pi * betap)) * (np.abs(betap) < 0.5) + (betap >= 0.5)) def b(alpha): return q * (2 * delta - 2 * alpha + epsilon) # Create weighting function beta = np.asarray(angles - min_rot_angle, dtype=ray_trafo.range.dtype) # rotation angle alpha = np.asarray(np.arctan2(dx, src_radius + det_radius), dtype=ray_trafo.range.dtype) # Compute sum in place to save memory S_sum = S(beta / b(alpha) - 0.5) S_sum += S((beta - 2 * delta + 2 * alpha - epsilon) / b(alpha) + 0.5) S_sum -= S((beta - np.pi + 2 * alpha) / b(-alpha) - 0.5) S_sum -= S((beta - np.pi - 2 * delta - epsilon) / b(-alpha) + 0.5) scale = 0.5 * alen / np.pi return ray_trafo.range.element( np.broadcast_to(S_sum * scale, ray_trafo.range.shape))
[ "def", "parker_weighting", "(", "ray_trafo", ",", "q", "=", "0.25", ")", ":", "# Note: Parameter names taken from WES2002", "# Extract parameters", "src_radius", "=", "ray_trafo", ".", "geometry", ".", "src_radius", "det_radius", "=", "ray_trafo", ".", "geometry", "."...
Create parker weighting for a `RayTransform`. Parker weighting is a weighting function that ensures that oversampled fan/cone beam data are weighted such that each line has unit weight. It is useful in analytic reconstruction methods such as FBP to give a more accurate result and can improve convergence rates for iterative methods. See the article `Parker weights revisited`_ for more information. Parameters ---------- ray_trafo : `RayTransform` The ray transform for which to compute the weights. q : float, optional Parameter controlling the speed of the roll-off at the edges of the weighting. 1.0 gives the classical Parker weighting, while smaller values in general lead to lower noise but stronger discretization artifacts. Returns ------- parker_weighting : ``ray_trafo.range`` element See Also -------- fbp_op : Filtered back-projection operator from `RayTransform` tam_danielson_window : Indicator function for helical data odl.tomo.geometry.conebeam.FanBeamGeometry : Use case in 2d odl.tomo.geometry.conebeam.ConeFlatGeometry : Use case in 3d (for pitch 0) References ---------- .. _Parker weights revisited: https://www.ncbi.nlm.nih.gov/pubmed/11929021
[ "Create", "parker", "weighting", "for", "a", "RayTransform", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/analytic/filtered_back_projection.py#L211-L310
231,762
odlgroup/odl
odl/tomo/analytic/filtered_back_projection.py
fbp_op
def fbp_op(ray_trafo, padding=True, filter_type='Ram-Lak', frequency_scaling=1.0): """Create filtered back-projection operator from a `RayTransform`. The filtered back-projection is an approximate inverse to the ray transform. Parameters ---------- ray_trafo : `RayTransform` The ray transform (forward operator) whose approximate inverse should be computed. Its geometry has to be any of the following `Parallel2dGeometry` : Exact reconstruction `Parallel3dAxisGeometry` : Exact reconstruction `FanBeamGeometry` : Approximate reconstruction, correct in limit of fan angle = 0. Only flat detectors are supported (det_curvature_radius is None). `ConeFlatGeometry`, pitch = 0 (circular) : Approximate reconstruction, correct in the limit of fan angle = 0 and cone angle = 0. `ConeFlatGeometry`, pitch > 0 (helical) : Very approximate unless a `tam_danielson_window` is used. Accurate with the window. Other geometries: Not supported padding : bool, optional If the data space should be zero padded. Without padding, the data may be corrupted due to the circular convolution used. Using padding makes the algorithm slower. filter_type : optional The type of filter to be used. The predefined options are, in approximate order from most noise senstive to least noise sensitive: ``'Ram-Lak'``, ``'Shepp-Logan'``, ``'Cosine'``, ``'Hamming'`` and ``'Hann'``. A callable can also be provided. It must take an array of values in [0, 1] and return the filter for these frequencies. frequency_scaling : float, optional Relative cutoff frequency for the filter. The normalized frequencies are rescaled so that they fit into the range [0, frequency_scaling]. Any frequency above ``frequency_scaling`` is set to zero. Returns ------- fbp_op : `Operator` Approximate inverse operator of ``ray_trafo``. See Also -------- tam_danielson_window : Windowing for helical data. parker_weighting : Windowing for overcomplete fan-beam data. """ return ray_trafo.adjoint * fbp_filter_op(ray_trafo, padding, filter_type, frequency_scaling)
python
def fbp_op(ray_trafo, padding=True, filter_type='Ram-Lak', frequency_scaling=1.0): return ray_trafo.adjoint * fbp_filter_op(ray_trafo, padding, filter_type, frequency_scaling)
[ "def", "fbp_op", "(", "ray_trafo", ",", "padding", "=", "True", ",", "filter_type", "=", "'Ram-Lak'", ",", "frequency_scaling", "=", "1.0", ")", ":", "return", "ray_trafo", ".", "adjoint", "*", "fbp_filter_op", "(", "ray_trafo", ",", "padding", ",", "filter_...
Create filtered back-projection operator from a `RayTransform`. The filtered back-projection is an approximate inverse to the ray transform. Parameters ---------- ray_trafo : `RayTransform` The ray transform (forward operator) whose approximate inverse should be computed. Its geometry has to be any of the following `Parallel2dGeometry` : Exact reconstruction `Parallel3dAxisGeometry` : Exact reconstruction `FanBeamGeometry` : Approximate reconstruction, correct in limit of fan angle = 0. Only flat detectors are supported (det_curvature_radius is None). `ConeFlatGeometry`, pitch = 0 (circular) : Approximate reconstruction, correct in the limit of fan angle = 0 and cone angle = 0. `ConeFlatGeometry`, pitch > 0 (helical) : Very approximate unless a `tam_danielson_window` is used. Accurate with the window. Other geometries: Not supported padding : bool, optional If the data space should be zero padded. Without padding, the data may be corrupted due to the circular convolution used. Using padding makes the algorithm slower. filter_type : optional The type of filter to be used. The predefined options are, in approximate order from most noise senstive to least noise sensitive: ``'Ram-Lak'``, ``'Shepp-Logan'``, ``'Cosine'``, ``'Hamming'`` and ``'Hann'``. A callable can also be provided. It must take an array of values in [0, 1] and return the filter for these frequencies. frequency_scaling : float, optional Relative cutoff frequency for the filter. The normalized frequencies are rescaled so that they fit into the range [0, frequency_scaling]. Any frequency above ``frequency_scaling`` is set to zero. Returns ------- fbp_op : `Operator` Approximate inverse operator of ``ray_trafo``. See Also -------- tam_danielson_window : Windowing for helical data. parker_weighting : Windowing for overcomplete fan-beam data.
[ "Create", "filtered", "back", "-", "projection", "operator", "from", "a", "RayTransform", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/analytic/filtered_back_projection.py#L477-L535
231,763
odlgroup/odl
odl/contrib/datasets/ct/fips.py
walnut_data
def walnut_data(): """Tomographic X-ray data of a walnut. Notes ----- See the article `Tomographic X-ray data of a walnut`_ for further information. See Also -------- walnut_geometry References ---------- .. _Tomographic X-ray data of a walnut: https://arxiv.org/abs/1502.04064 """ # TODO: Store data in some ODL controlled url url = 'http://www.fips.fi/dataset/CT_walnut_v1/FullSizeSinograms.mat' dct = get_data('walnut.mat', subset=DATA_SUBSET, url=url) # Change axes to match ODL definitions data = np.swapaxes(dct['sinogram1200'], 0, 1)[::-1, ::-1] data = data.astype('float') # Very crude gain normalization data = -np.log(data / np.max(data, axis=1)[:, None]) return data
python
def walnut_data(): # TODO: Store data in some ODL controlled url url = 'http://www.fips.fi/dataset/CT_walnut_v1/FullSizeSinograms.mat' dct = get_data('walnut.mat', subset=DATA_SUBSET, url=url) # Change axes to match ODL definitions data = np.swapaxes(dct['sinogram1200'], 0, 1)[::-1, ::-1] data = data.astype('float') # Very crude gain normalization data = -np.log(data / np.max(data, axis=1)[:, None]) return data
[ "def", "walnut_data", "(", ")", ":", "# TODO: Store data in some ODL controlled url", "url", "=", "'http://www.fips.fi/dataset/CT_walnut_v1/FullSizeSinograms.mat'", "dct", "=", "get_data", "(", "'walnut.mat'", ",", "subset", "=", "DATA_SUBSET", ",", "url", "=", "url", ")"...
Tomographic X-ray data of a walnut. Notes ----- See the article `Tomographic X-ray data of a walnut`_ for further information. See Also -------- walnut_geometry References ---------- .. _Tomographic X-ray data of a walnut: https://arxiv.org/abs/1502.04064
[ "Tomographic", "X", "-", "ray", "data", "of", "a", "walnut", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/datasets/ct/fips.py#L31-L57
231,764
odlgroup/odl
odl/contrib/datasets/ct/fips.py
lotus_root_data
def lotus_root_data(): """Tomographic X-ray data of a lotus root. Notes ----- See the article `Tomographic X-ray data of a lotus root filled with attenuating objects`_ for further information. See Also -------- lotus_root_geometry References ---------- .. _Tomographic X-ray data of a lotus root filled with attenuating objects: https://arxiv.org/abs/1609.07299 """ # TODO: Store data in some ODL controlled url url = 'http://www.fips.fi/dataset/CT_Lotus_v1/sinogram.mat' dct = get_data('lotus_root.mat', subset=DATA_SUBSET, url=url) # Change axes to match ODL definitions data = np.swapaxes(dct['sinogram'], 0, 1)[:, :] data = data.astype('float') return data
python
def lotus_root_data(): # TODO: Store data in some ODL controlled url url = 'http://www.fips.fi/dataset/CT_Lotus_v1/sinogram.mat' dct = get_data('lotus_root.mat', subset=DATA_SUBSET, url=url) # Change axes to match ODL definitions data = np.swapaxes(dct['sinogram'], 0, 1)[:, :] data = data.astype('float') return data
[ "def", "lotus_root_data", "(", ")", ":", "# TODO: Store data in some ODL controlled url", "url", "=", "'http://www.fips.fi/dataset/CT_Lotus_v1/sinogram.mat'", "dct", "=", "get_data", "(", "'lotus_root.mat'", ",", "subset", "=", "DATA_SUBSET", ",", "url", "=", "url", ")", ...
Tomographic X-ray data of a lotus root. Notes ----- See the article `Tomographic X-ray data of a lotus root filled with attenuating objects`_ for further information. See Also -------- lotus_root_geometry References ---------- .. _Tomographic X-ray data of a lotus root filled with attenuating objects: https://arxiv.org/abs/1609.07299
[ "Tomographic", "X", "-", "ray", "data", "of", "a", "lotus", "root", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/datasets/ct/fips.py#L90-L115
231,765
odlgroup/odl
odl/contrib/datasets/ct/fips.py
lotus_root_geometry
def lotus_root_geometry(): """Tomographic geometry for the lotus root dataset. Notes ----- See the article `Tomographic X-ray data of a lotus root filled with attenuating objects`_ for further information. See Also -------- lotus_root_geometry References ---------- .. _Tomographic X-ray data of a lotus root filled with attenuating objects: https://arxiv.org/abs/1609.07299 """ # To get the same rotation as in the reference article a_offset = np.pi / 2 apart = uniform_partition(a_offset, a_offset + 2 * np.pi * 366. / 360., 366) # TODO: Find exact value, determined experimentally d_offset = 0.35 dpart = uniform_partition(d_offset - 60, d_offset + 60, 2240) geometry = FanBeamGeometry(apart, dpart, src_radius=540, det_radius=90) return geometry
python
def lotus_root_geometry(): # To get the same rotation as in the reference article a_offset = np.pi / 2 apart = uniform_partition(a_offset, a_offset + 2 * np.pi * 366. / 360., 366) # TODO: Find exact value, determined experimentally d_offset = 0.35 dpart = uniform_partition(d_offset - 60, d_offset + 60, 2240) geometry = FanBeamGeometry(apart, dpart, src_radius=540, det_radius=90) return geometry
[ "def", "lotus_root_geometry", "(", ")", ":", "# To get the same rotation as in the reference article", "a_offset", "=", "np", ".", "pi", "/", "2", "apart", "=", "uniform_partition", "(", "a_offset", ",", "a_offset", "+", "2", "*", "np", ".", "pi", "*", "366.", ...
Tomographic geometry for the lotus root dataset. Notes ----- See the article `Tomographic X-ray data of a lotus root filled with attenuating objects`_ for further information. See Also -------- lotus_root_geometry References ---------- .. _Tomographic X-ray data of a lotus root filled with attenuating objects: https://arxiv.org/abs/1609.07299
[ "Tomographic", "geometry", "for", "the", "lotus", "root", "dataset", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/datasets/ct/fips.py#L118-L148
231,766
odlgroup/odl
odl/phantom/noise.py
poisson_noise
def poisson_noise(intensity, seed=None): r"""Poisson distributed noise with given intensity. Parameters ---------- intensity : `TensorSpace` or `ProductSpace` element The intensity (usually called lambda) parameter of the noise. Returns ------- poisson_noise : ``intensity.space`` element Poisson distributed random variable. seed : int, optional Random seed to use for generating the noise. For ``None``, use the current seed. Notes ----- For a Poisson distributed random variable :math:`X` with intensity :math:`\lambda`, the probability of it taking the value :math:`k \in \mathbb{N}_0` is given by .. math:: \frac{\lambda^k e^{-\lambda}}{k!} Note that the function only takes integer values. See Also -------- white_noise salt_pepper_noise uniform_noise numpy.random.poisson """ from odl.space import ProductSpace with NumpyRandomSeed(seed): if isinstance(intensity.space, ProductSpace): values = [poisson_noise(subintensity) for subintensity in intensity] else: values = np.random.poisson(intensity.asarray()) return intensity.space.element(values)
python
def poisson_noise(intensity, seed=None): r"""Poisson distributed noise with given intensity. Parameters ---------- intensity : `TensorSpace` or `ProductSpace` element The intensity (usually called lambda) parameter of the noise. Returns ------- poisson_noise : ``intensity.space`` element Poisson distributed random variable. seed : int, optional Random seed to use for generating the noise. For ``None``, use the current seed. Notes ----- For a Poisson distributed random variable :math:`X` with intensity :math:`\lambda`, the probability of it taking the value :math:`k \in \mathbb{N}_0` is given by .. math:: \frac{\lambda^k e^{-\lambda}}{k!} Note that the function only takes integer values. See Also -------- white_noise salt_pepper_noise uniform_noise numpy.random.poisson """ from odl.space import ProductSpace with NumpyRandomSeed(seed): if isinstance(intensity.space, ProductSpace): values = [poisson_noise(subintensity) for subintensity in intensity] else: values = np.random.poisson(intensity.asarray()) return intensity.space.element(values)
[ "def", "poisson_noise", "(", "intensity", ",", "seed", "=", "None", ")", ":", "from", "odl", ".", "space", "import", "ProductSpace", "with", "NumpyRandomSeed", "(", "seed", ")", ":", "if", "isinstance", "(", "intensity", ".", "space", ",", "ProductSpace", ...
r"""Poisson distributed noise with given intensity. Parameters ---------- intensity : `TensorSpace` or `ProductSpace` element The intensity (usually called lambda) parameter of the noise. Returns ------- poisson_noise : ``intensity.space`` element Poisson distributed random variable. seed : int, optional Random seed to use for generating the noise. For ``None``, use the current seed. Notes ----- For a Poisson distributed random variable :math:`X` with intensity :math:`\lambda`, the probability of it taking the value :math:`k \in \mathbb{N}_0` is given by .. math:: \frac{\lambda^k e^{-\lambda}}{k!} Note that the function only takes integer values. See Also -------- white_noise salt_pepper_noise uniform_noise numpy.random.poisson
[ "r", "Poisson", "distributed", "noise", "with", "given", "intensity", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/phantom/noise.py#L122-L165
231,767
odlgroup/odl
odl/phantom/noise.py
salt_pepper_noise
def salt_pepper_noise(vector, fraction=0.05, salt_vs_pepper=0.5, low_val=None, high_val=None, seed=None): """Add salt and pepper noise to vector. Salt and pepper noise replaces random elements in ``vector`` with ``low_val`` or ``high_val``. Parameters ---------- vector : element of `TensorSpace` or `ProductSpace` The vector that noise should be added to. fraction : float, optional The propotion of the elements in ``vector`` that should be converted to noise. salt_vs_pepper : float, optional Relative abundance of salt (high) vs pepper (low) noise. A high value means more salt than pepper noise. low_val : float, optional The "pepper" color in the noise. Default: minimum value of ``vector``. For product spaces the minimum value per subspace is taken. high_val : float, optional The "salt" value in the noise. Default: maximuim value of ``vector``. For product spaces the maximum value per subspace is taken. seed : int, optional Random seed to use for generating the noise. For ``None``, use the current seed. Returns ------- salt_pepper_noise : ``vector.space`` element ``vector`` with salt and pepper noise. See Also -------- white_noise poisson_noise uniform_noise """ from odl.space import ProductSpace # Validate input parameters fraction, fraction_in = float(fraction), fraction if not (0 <= fraction <= 1): raise ValueError('`fraction` ({}) should be a float in the interval ' '[0, 1]'.format(fraction_in)) salt_vs_pepper, salt_vs_pepper_in = float(salt_vs_pepper), salt_vs_pepper if not (0 <= salt_vs_pepper <= 1): raise ValueError('`salt_vs_pepper` ({}) should be a float in the ' 'interval [0, 1]'.format(salt_vs_pepper_in)) with NumpyRandomSeed(seed): if isinstance(vector.space, ProductSpace): values = [salt_pepper_noise(subintensity, fraction, salt_vs_pepper, low_val, high_val) for subintensity in vector] else: # Extract vector of values values = vector.asarray().flatten() # Determine fill-in values if not given if low_val is None: low_val = np.min(values) if high_val is None: high_val = np.max(values) # Create randomly selected points as a subset of image. a = np.arange(vector.size) np.random.shuffle(a) salt_indices = a[:int(fraction * vector.size * salt_vs_pepper)] pepper_indices = a[int(fraction * vector.size * salt_vs_pepper): int(fraction * vector.size)] values[salt_indices] = high_val values[pepper_indices] = -low_val values = values.reshape(vector.space.shape) return vector.space.element(values)
python
def salt_pepper_noise(vector, fraction=0.05, salt_vs_pepper=0.5, low_val=None, high_val=None, seed=None): from odl.space import ProductSpace # Validate input parameters fraction, fraction_in = float(fraction), fraction if not (0 <= fraction <= 1): raise ValueError('`fraction` ({}) should be a float in the interval ' '[0, 1]'.format(fraction_in)) salt_vs_pepper, salt_vs_pepper_in = float(salt_vs_pepper), salt_vs_pepper if not (0 <= salt_vs_pepper <= 1): raise ValueError('`salt_vs_pepper` ({}) should be a float in the ' 'interval [0, 1]'.format(salt_vs_pepper_in)) with NumpyRandomSeed(seed): if isinstance(vector.space, ProductSpace): values = [salt_pepper_noise(subintensity, fraction, salt_vs_pepper, low_val, high_val) for subintensity in vector] else: # Extract vector of values values = vector.asarray().flatten() # Determine fill-in values if not given if low_val is None: low_val = np.min(values) if high_val is None: high_val = np.max(values) # Create randomly selected points as a subset of image. a = np.arange(vector.size) np.random.shuffle(a) salt_indices = a[:int(fraction * vector.size * salt_vs_pepper)] pepper_indices = a[int(fraction * vector.size * salt_vs_pepper): int(fraction * vector.size)] values[salt_indices] = high_val values[pepper_indices] = -low_val values = values.reshape(vector.space.shape) return vector.space.element(values)
[ "def", "salt_pepper_noise", "(", "vector", ",", "fraction", "=", "0.05", ",", "salt_vs_pepper", "=", "0.5", ",", "low_val", "=", "None", ",", "high_val", "=", "None", ",", "seed", "=", "None", ")", ":", "from", "odl", ".", "space", "import", "ProductSpac...
Add salt and pepper noise to vector. Salt and pepper noise replaces random elements in ``vector`` with ``low_val`` or ``high_val``. Parameters ---------- vector : element of `TensorSpace` or `ProductSpace` The vector that noise should be added to. fraction : float, optional The propotion of the elements in ``vector`` that should be converted to noise. salt_vs_pepper : float, optional Relative abundance of salt (high) vs pepper (low) noise. A high value means more salt than pepper noise. low_val : float, optional The "pepper" color in the noise. Default: minimum value of ``vector``. For product spaces the minimum value per subspace is taken. high_val : float, optional The "salt" value in the noise. Default: maximuim value of ``vector``. For product spaces the maximum value per subspace is taken. seed : int, optional Random seed to use for generating the noise. For ``None``, use the current seed. Returns ------- salt_pepper_noise : ``vector.space`` element ``vector`` with salt and pepper noise. See Also -------- white_noise poisson_noise uniform_noise
[ "Add", "salt", "and", "pepper", "noise", "to", "vector", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/phantom/noise.py#L168-L247
231,768
odlgroup/odl
odl/discr/partition.py
uniform_partition_fromintv
def uniform_partition_fromintv(intv_prod, shape, nodes_on_bdry=False): """Return a partition of an interval product into equally sized cells. Parameters ---------- intv_prod : `IntervalProd` Interval product to be partitioned shape : int or sequence of ints Number of nodes per axis. For 1d intervals, a single integer can be specified. nodes_on_bdry : bool or sequence, optional If a sequence is provided, it determines per axis whether to place the last grid point on the boundary (``True``) or shift it by half a cell size into the interior (``False``). In each axis, an entry may consist in a single bool or a 2-tuple of bool. In the latter case, the first tuple entry decides for the left, the second for the right boundary. The length of the sequence must be ``intv_prod.ndim``. A single boolean is interpreted as a global choice for all boundaries. See Also -------- uniform_partition_fromgrid Examples -------- By default, no grid points are placed on the boundary: >>> interval = odl.IntervalProd(0, 1) >>> part = odl.uniform_partition_fromintv(interval, 4) >>> part.cell_boundary_vecs (array([ 0. , 0.25, 0.5 , 0.75, 1. ]),) >>> part.grid.coord_vectors (array([ 0.125, 0.375, 0.625, 0.875]),) This can be changed with the nodes_on_bdry parameter: >>> part = odl.uniform_partition_fromintv(interval, 3, ... nodes_on_bdry=True) >>> part.cell_boundary_vecs (array([ 0. , 0.25, 0.75, 1. ]),) >>> part.grid.coord_vectors (array([ 0. , 0.5, 1. ]),) We can specify this per axis, too. In this case we choose both in the first axis and only the rightmost in the second: >>> rect = odl.IntervalProd([0, 0], [1, 1]) >>> part = odl.uniform_partition_fromintv( ... rect, (3, 3), nodes_on_bdry=(True, (False, True))) ... >>> part.cell_boundary_vecs[0] # first axis, as above array([ 0. , 0.25, 0.75, 1. ]) >>> part.grid.coord_vectors[0] array([ 0. , 0.5, 1. ]) >>> part.cell_boundary_vecs[1] # second, asymmetric axis array([ 0. , 0.4, 0.8, 1. ]) >>> part.grid.coord_vectors[1] array([ 0.2, 0.6, 1. ]) """ grid = uniform_grid_fromintv(intv_prod, shape, nodes_on_bdry=nodes_on_bdry) return RectPartition(intv_prod, grid)
python
def uniform_partition_fromintv(intv_prod, shape, nodes_on_bdry=False): grid = uniform_grid_fromintv(intv_prod, shape, nodes_on_bdry=nodes_on_bdry) return RectPartition(intv_prod, grid)
[ "def", "uniform_partition_fromintv", "(", "intv_prod", ",", "shape", ",", "nodes_on_bdry", "=", "False", ")", ":", "grid", "=", "uniform_grid_fromintv", "(", "intv_prod", ",", "shape", ",", "nodes_on_bdry", "=", "nodes_on_bdry", ")", "return", "RectPartition", "("...
Return a partition of an interval product into equally sized cells. Parameters ---------- intv_prod : `IntervalProd` Interval product to be partitioned shape : int or sequence of ints Number of nodes per axis. For 1d intervals, a single integer can be specified. nodes_on_bdry : bool or sequence, optional If a sequence is provided, it determines per axis whether to place the last grid point on the boundary (``True``) or shift it by half a cell size into the interior (``False``). In each axis, an entry may consist in a single bool or a 2-tuple of bool. In the latter case, the first tuple entry decides for the left, the second for the right boundary. The length of the sequence must be ``intv_prod.ndim``. A single boolean is interpreted as a global choice for all boundaries. See Also -------- uniform_partition_fromgrid Examples -------- By default, no grid points are placed on the boundary: >>> interval = odl.IntervalProd(0, 1) >>> part = odl.uniform_partition_fromintv(interval, 4) >>> part.cell_boundary_vecs (array([ 0. , 0.25, 0.5 , 0.75, 1. ]),) >>> part.grid.coord_vectors (array([ 0.125, 0.375, 0.625, 0.875]),) This can be changed with the nodes_on_bdry parameter: >>> part = odl.uniform_partition_fromintv(interval, 3, ... nodes_on_bdry=True) >>> part.cell_boundary_vecs (array([ 0. , 0.25, 0.75, 1. ]),) >>> part.grid.coord_vectors (array([ 0. , 0.5, 1. ]),) We can specify this per axis, too. In this case we choose both in the first axis and only the rightmost in the second: >>> rect = odl.IntervalProd([0, 0], [1, 1]) >>> part = odl.uniform_partition_fromintv( ... rect, (3, 3), nodes_on_bdry=(True, (False, True))) ... >>> part.cell_boundary_vecs[0] # first axis, as above array([ 0. , 0.25, 0.75, 1. ]) >>> part.grid.coord_vectors[0] array([ 0. , 0.5, 1. ]) >>> part.cell_boundary_vecs[1] # second, asymmetric axis array([ 0. , 0.4, 0.8, 1. ]) >>> part.grid.coord_vectors[1] array([ 0.2, 0.6, 1. ])
[ "Return", "a", "partition", "of", "an", "interval", "product", "into", "equally", "sized", "cells", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/partition.py#L943-L1006
231,769
odlgroup/odl
odl/discr/partition.py
uniform_partition_fromgrid
def uniform_partition_fromgrid(grid, min_pt=None, max_pt=None): """Return a partition of an interval product based on a given grid. This method is complementary to `uniform_partition_fromintv` in that it infers the set to be partitioned from a given grid and optional parameters for ``min_pt`` and ``max_pt`` of the set. Parameters ---------- grid : `RectGrid` Grid on which the partition is based min_pt, max_pt : float, sequence of floats, or dict, optional Spatial points defining the lower/upper limits of the intervals to be partitioned. The points can be specified in two ways: float or sequence: The values are used directly as ``min_pt`` and/or ``max_pt``. dict: Index-value pairs specifying an axis and a spatial coordinate to be used in that axis. In axes which are not a key in the dictionary, the coordinate for the vector is calculated as:: min_pt = x[0] - (x[1] - x[0]) / 2 max_pt = x[-1] + (x[-1] - x[-2]) / 2 See ``Examples`` below. In general, ``min_pt`` may not be larger than ``grid.min_pt``, and ``max_pt`` not smaller than ``grid.max_pt`` in any component. ``None`` is equivalent to an empty dictionary, i.e. the values are calculated in each dimension. See Also -------- uniform_partition_fromintv Examples -------- Have ``min_pt`` and ``max_pt`` of the bounding box automatically calculated: >>> grid = odl.uniform_grid(0, 1, 3) >>> grid.coord_vectors (array([ 0. , 0.5, 1. ]),) >>> part = odl.uniform_partition_fromgrid(grid) >>> part.cell_boundary_vecs (array([-0.25, 0.25, 0.75, 1.25]),) ``min_pt`` and ``max_pt`` can be given explicitly: >>> part = odl.uniform_partition_fromgrid(grid, min_pt=0, max_pt=1) >>> part.cell_boundary_vecs (array([ 0. , 0.25, 0.75, 1. ]),) Using dictionaries, selective axes can be explicitly set. The keys refer to axes, the values to the coordinates to use: >>> grid = odl.uniform_grid([0, 0], [1, 1], (3, 3)) >>> part = odl.uniform_partition_fromgrid(grid, ... min_pt={0: -1}, max_pt={-1: 3}) >>> part.cell_boundary_vecs[0] array([-1. , 0.25, 0.75, 1.25]) >>> part.cell_boundary_vecs[1] array([-0.25, 0.25, 0.75, 3. ]) """ # Make dictionaries from `min_pt` and `max_pt` and fill with `None` where # no value is given (taking negative indices into account) if min_pt is None: min_pt = {i: None for i in range(grid.ndim)} elif not hasattr(min_pt, 'items'): # array-like min_pt = np.atleast_1d(min_pt) min_pt = {i: float(v) for i, v in enumerate(min_pt)} else: min_pt.update({i: None for i in range(grid.ndim) if i not in min_pt and i - grid.ndim not in min_pt}) if max_pt is None: max_pt = {i: None for i in range(grid.ndim)} elif not hasattr(max_pt, 'items'): max_pt = np.atleast_1d(max_pt) max_pt = {i: float(v) for i, v in enumerate(max_pt)} else: max_pt.update({i: None for i in range(grid.ndim) if i not in max_pt and i - grid.ndim not in max_pt}) # Set the values in the vectors by computing (None) or directly from the # given vectors (otherwise). min_pt_vec = np.empty(grid.ndim) for ax, xmin in min_pt.items(): if xmin is None: cvec = grid.coord_vectors[ax] if len(cvec) == 1: raise ValueError('in axis {}: cannot calculate `min_pt` with ' 'only 1 grid point'.format(ax)) min_pt_vec[ax] = cvec[0] - (cvec[1] - cvec[0]) / 2 else: min_pt_vec[ax] = xmin max_pt_vec = np.empty(grid.ndim) for ax, xmax in max_pt.items(): if xmax is None: cvec = grid.coord_vectors[ax] if len(cvec) == 1: raise ValueError('in axis {}: cannot calculate `max_pt` with ' 'only 1 grid point'.format(ax)) max_pt_vec[ax] = cvec[-1] + (cvec[-1] - cvec[-2]) / 2 else: max_pt_vec[ax] = xmax return RectPartition(IntervalProd(min_pt_vec, max_pt_vec), grid)
python
def uniform_partition_fromgrid(grid, min_pt=None, max_pt=None): # Make dictionaries from `min_pt` and `max_pt` and fill with `None` where # no value is given (taking negative indices into account) if min_pt is None: min_pt = {i: None for i in range(grid.ndim)} elif not hasattr(min_pt, 'items'): # array-like min_pt = np.atleast_1d(min_pt) min_pt = {i: float(v) for i, v in enumerate(min_pt)} else: min_pt.update({i: None for i in range(grid.ndim) if i not in min_pt and i - grid.ndim not in min_pt}) if max_pt is None: max_pt = {i: None for i in range(grid.ndim)} elif not hasattr(max_pt, 'items'): max_pt = np.atleast_1d(max_pt) max_pt = {i: float(v) for i, v in enumerate(max_pt)} else: max_pt.update({i: None for i in range(grid.ndim) if i not in max_pt and i - grid.ndim not in max_pt}) # Set the values in the vectors by computing (None) or directly from the # given vectors (otherwise). min_pt_vec = np.empty(grid.ndim) for ax, xmin in min_pt.items(): if xmin is None: cvec = grid.coord_vectors[ax] if len(cvec) == 1: raise ValueError('in axis {}: cannot calculate `min_pt` with ' 'only 1 grid point'.format(ax)) min_pt_vec[ax] = cvec[0] - (cvec[1] - cvec[0]) / 2 else: min_pt_vec[ax] = xmin max_pt_vec = np.empty(grid.ndim) for ax, xmax in max_pt.items(): if xmax is None: cvec = grid.coord_vectors[ax] if len(cvec) == 1: raise ValueError('in axis {}: cannot calculate `max_pt` with ' 'only 1 grid point'.format(ax)) max_pt_vec[ax] = cvec[-1] + (cvec[-1] - cvec[-2]) / 2 else: max_pt_vec[ax] = xmax return RectPartition(IntervalProd(min_pt_vec, max_pt_vec), grid)
[ "def", "uniform_partition_fromgrid", "(", "grid", ",", "min_pt", "=", "None", ",", "max_pt", "=", "None", ")", ":", "# Make dictionaries from `min_pt` and `max_pt` and fill with `None` where", "# no value is given (taking negative indices into account)", "if", "min_pt", "is", "...
Return a partition of an interval product based on a given grid. This method is complementary to `uniform_partition_fromintv` in that it infers the set to be partitioned from a given grid and optional parameters for ``min_pt`` and ``max_pt`` of the set. Parameters ---------- grid : `RectGrid` Grid on which the partition is based min_pt, max_pt : float, sequence of floats, or dict, optional Spatial points defining the lower/upper limits of the intervals to be partitioned. The points can be specified in two ways: float or sequence: The values are used directly as ``min_pt`` and/or ``max_pt``. dict: Index-value pairs specifying an axis and a spatial coordinate to be used in that axis. In axes which are not a key in the dictionary, the coordinate for the vector is calculated as:: min_pt = x[0] - (x[1] - x[0]) / 2 max_pt = x[-1] + (x[-1] - x[-2]) / 2 See ``Examples`` below. In general, ``min_pt`` may not be larger than ``grid.min_pt``, and ``max_pt`` not smaller than ``grid.max_pt`` in any component. ``None`` is equivalent to an empty dictionary, i.e. the values are calculated in each dimension. See Also -------- uniform_partition_fromintv Examples -------- Have ``min_pt`` and ``max_pt`` of the bounding box automatically calculated: >>> grid = odl.uniform_grid(0, 1, 3) >>> grid.coord_vectors (array([ 0. , 0.5, 1. ]),) >>> part = odl.uniform_partition_fromgrid(grid) >>> part.cell_boundary_vecs (array([-0.25, 0.25, 0.75, 1.25]),) ``min_pt`` and ``max_pt`` can be given explicitly: >>> part = odl.uniform_partition_fromgrid(grid, min_pt=0, max_pt=1) >>> part.cell_boundary_vecs (array([ 0. , 0.25, 0.75, 1. ]),) Using dictionaries, selective axes can be explicitly set. The keys refer to axes, the values to the coordinates to use: >>> grid = odl.uniform_grid([0, 0], [1, 1], (3, 3)) >>> part = odl.uniform_partition_fromgrid(grid, ... min_pt={0: -1}, max_pt={-1: 3}) >>> part.cell_boundary_vecs[0] array([-1. , 0.25, 0.75, 1.25]) >>> part.cell_boundary_vecs[1] array([-0.25, 0.25, 0.75, 3. ])
[ "Return", "a", "partition", "of", "an", "interval", "product", "based", "on", "a", "given", "grid", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/partition.py#L1009-L1119
231,770
odlgroup/odl
odl/discr/partition.py
uniform_partition
def uniform_partition(min_pt=None, max_pt=None, shape=None, cell_sides=None, nodes_on_bdry=False): """Return a partition with equally sized cells. Parameters ---------- min_pt, max_pt : float or sequence of float, optional Vectors defining the lower/upper limits of the intervals in an `IntervalProd` (a rectangular box). ``None`` entries mean "compute the value". shape : int or sequence of ints, optional Number of nodes per axis. ``None`` entries mean "compute the value". cell_sides : float or sequence of floats, optional Side length of the partition cells per axis. ``None`` entries mean "compute the value". nodes_on_bdry : bool or sequence, optional If a sequence is provided, it determines per axis whether to place the last grid point on the boundary (``True``) or shift it by half a cell size into the interior (``False``). In each axis, an entry may consist in a single bool or a 2-tuple of bool. In the latter case, the first tuple entry decides for the left, the second for the right boundary. The length of the sequence must be ``array.ndim``. A single boolean is interpreted as a global choice for all boundaries. Notes ----- In each axis, 3 of the 4 possible parameters ``min_pt``, ``max_pt``, ``shape`` and ``cell_sides`` must be given. If all four are provided, they are checked for consistency. See Also -------- uniform_partition_fromintv : partition an existing set uniform_partition_fromgrid : use an existing grid as basis Examples -------- Any combination of three of the four parameters can be used for creation of a partition: >>> part = odl.uniform_partition(min_pt=0, max_pt=2, shape=4) >>> part.cell_boundary_vecs (array([ 0. , 0.5, 1. , 1.5, 2. ]),) >>> part = odl.uniform_partition(min_pt=0, shape=4, cell_sides=0.5) >>> part.cell_boundary_vecs (array([ 0. , 0.5, 1. , 1.5, 2. ]),) >>> part = odl.uniform_partition(max_pt=2, shape=4, cell_sides=0.5) >>> part.cell_boundary_vecs (array([ 0. , 0.5, 1. , 1.5, 2. ]),) >>> part = odl.uniform_partition(min_pt=0, max_pt=2, cell_sides=0.5) >>> part.cell_boundary_vecs (array([ 0. , 0.5, 1. , 1.5, 2. ]),) In higher dimensions, the parameters can be given differently in each axis. Where ``None`` is given, the value will be computed: >>> part = odl.uniform_partition(min_pt=[0, 0], max_pt=[1, 2], ... shape=[4, 2]) >>> part.cell_boundary_vecs (array([ 0. , 0.25, 0.5 , 0.75, 1. ]), array([ 0., 1., 2.])) >>> part = odl.uniform_partition(min_pt=[0, 0], max_pt=[1, 2], ... shape=[None, 2], cell_sides=[0.25, None]) >>> part.cell_boundary_vecs (array([ 0. , 0.25, 0.5 , 0.75, 1. ]), array([ 0., 1., 2.])) >>> part = odl.uniform_partition(min_pt=[0, None], max_pt=[None, 2], ... shape=[4, 2], cell_sides=[0.25, 1]) >>> part.cell_boundary_vecs (array([ 0. , 0.25, 0.5 , 0.75, 1. ]), array([ 0., 1., 2.])) By default, no grid points are placed on the boundary: >>> part = odl.uniform_partition(0, 1, 4) >>> part.nodes_on_bdry False >>> part.cell_boundary_vecs (array([ 0. , 0.25, 0.5 , 0.75, 1. ]),) >>> part.grid.coord_vectors (array([ 0.125, 0.375, 0.625, 0.875]),) This can be changed with the nodes_on_bdry parameter: >>> part = odl.uniform_partition(0, 1, 3, nodes_on_bdry=True) >>> part.nodes_on_bdry True >>> part.cell_boundary_vecs (array([ 0. , 0.25, 0.75, 1. ]),) >>> part.grid.coord_vectors (array([ 0. , 0.5, 1. ]),) We can specify this per axis, too. In this case we choose both in the first axis and only the rightmost in the second: >>> part = odl.uniform_partition([0, 0], [1, 1], (3, 3), ... nodes_on_bdry=(True, (False, True))) ... >>> part.cell_boundary_vecs[0] # first axis, as above array([ 0. , 0.25, 0.75, 1. ]) >>> part.grid.coord_vectors[0] array([ 0. , 0.5, 1. ]) >>> part.cell_boundary_vecs[1] # second, asymmetric axis array([ 0. , 0.4, 0.8, 1. ]) >>> part.grid.coord_vectors[1] array([ 0.2, 0.6, 1. ]) """ # Normalize partition parameters # np.size(None) == 1, so that would screw it for sizes 0 of the rest sizes = [np.size(p) for p in (min_pt, max_pt, shape, cell_sides) if p is not None] ndim = int(np.max(sizes)) min_pt = normalized_scalar_param_list(min_pt, ndim, param_conv=float, keep_none=True) max_pt = normalized_scalar_param_list(max_pt, ndim, param_conv=float, keep_none=True) shape = normalized_scalar_param_list(shape, ndim, param_conv=safe_int_conv, keep_none=True) cell_sides = normalized_scalar_param_list(cell_sides, ndim, param_conv=float, keep_none=True) nodes_on_bdry = normalized_nodes_on_bdry(nodes_on_bdry, ndim) # Calculate the missing parameters in min_pt, max_pt, shape for i, (xmin, xmax, n, dx, on_bdry) in enumerate( zip(min_pt, max_pt, shape, cell_sides, nodes_on_bdry)): num_params = sum(p is not None for p in (xmin, xmax, n, dx)) if num_params < 3: raise ValueError('in axis {}: expected at least 3 of the ' 'parameters `min_pt`, `max_pt`, `shape`, ' '`cell_sides`, got {}' ''.format(i, num_params)) # Unpack the tuple if possible, else use bool globally for this axis try: bdry_l, bdry_r = on_bdry except TypeError: bdry_l = bdry_r = on_bdry # For each node on the boundary, we subtract 1/2 from the number of # full cells between min_pt and max_pt. if xmin is None: min_pt[i] = xmax - (n - sum([bdry_l, bdry_r]) / 2.0) * dx elif xmax is None: max_pt[i] = xmin + (n - sum([bdry_l, bdry_r]) / 2.0) * dx elif n is None: # Here we add to n since (e-b)/s gives the reduced number of cells. n_calc = (xmax - xmin) / dx + sum([bdry_l, bdry_r]) / 2.0 n_round = int(round(n_calc)) if abs(n_calc - n_round) > 1e-5: raise ValueError('in axis {}: calculated number of nodes ' '{} = ({} - {}) / {} too far from integer' ''.format(i, n_calc, xmax, xmin, dx)) shape[i] = n_round elif dx is None: pass else: xmax_calc = xmin + (n - sum([bdry_l, bdry_r]) / 2.0) * dx if not np.isclose(xmax, xmax_calc): raise ValueError('in axis {}: calculated endpoint ' '{} = {} + {} * {} too far from given ' 'endpoint {}.' ''.format(i, xmax_calc, xmin, n, dx, xmax)) return uniform_partition_fromintv( IntervalProd(min_pt, max_pt), shape, nodes_on_bdry)
python
def uniform_partition(min_pt=None, max_pt=None, shape=None, cell_sides=None, nodes_on_bdry=False): # Normalize partition parameters # np.size(None) == 1, so that would screw it for sizes 0 of the rest sizes = [np.size(p) for p in (min_pt, max_pt, shape, cell_sides) if p is not None] ndim = int(np.max(sizes)) min_pt = normalized_scalar_param_list(min_pt, ndim, param_conv=float, keep_none=True) max_pt = normalized_scalar_param_list(max_pt, ndim, param_conv=float, keep_none=True) shape = normalized_scalar_param_list(shape, ndim, param_conv=safe_int_conv, keep_none=True) cell_sides = normalized_scalar_param_list(cell_sides, ndim, param_conv=float, keep_none=True) nodes_on_bdry = normalized_nodes_on_bdry(nodes_on_bdry, ndim) # Calculate the missing parameters in min_pt, max_pt, shape for i, (xmin, xmax, n, dx, on_bdry) in enumerate( zip(min_pt, max_pt, shape, cell_sides, nodes_on_bdry)): num_params = sum(p is not None for p in (xmin, xmax, n, dx)) if num_params < 3: raise ValueError('in axis {}: expected at least 3 of the ' 'parameters `min_pt`, `max_pt`, `shape`, ' '`cell_sides`, got {}' ''.format(i, num_params)) # Unpack the tuple if possible, else use bool globally for this axis try: bdry_l, bdry_r = on_bdry except TypeError: bdry_l = bdry_r = on_bdry # For each node on the boundary, we subtract 1/2 from the number of # full cells between min_pt and max_pt. if xmin is None: min_pt[i] = xmax - (n - sum([bdry_l, bdry_r]) / 2.0) * dx elif xmax is None: max_pt[i] = xmin + (n - sum([bdry_l, bdry_r]) / 2.0) * dx elif n is None: # Here we add to n since (e-b)/s gives the reduced number of cells. n_calc = (xmax - xmin) / dx + sum([bdry_l, bdry_r]) / 2.0 n_round = int(round(n_calc)) if abs(n_calc - n_round) > 1e-5: raise ValueError('in axis {}: calculated number of nodes ' '{} = ({} - {}) / {} too far from integer' ''.format(i, n_calc, xmax, xmin, dx)) shape[i] = n_round elif dx is None: pass else: xmax_calc = xmin + (n - sum([bdry_l, bdry_r]) / 2.0) * dx if not np.isclose(xmax, xmax_calc): raise ValueError('in axis {}: calculated endpoint ' '{} = {} + {} * {} too far from given ' 'endpoint {}.' ''.format(i, xmax_calc, xmin, n, dx, xmax)) return uniform_partition_fromintv( IntervalProd(min_pt, max_pt), shape, nodes_on_bdry)
[ "def", "uniform_partition", "(", "min_pt", "=", "None", ",", "max_pt", "=", "None", ",", "shape", "=", "None", ",", "cell_sides", "=", "None", ",", "nodes_on_bdry", "=", "False", ")", ":", "# Normalize partition parameters", "# np.size(None) == 1, so that would scre...
Return a partition with equally sized cells. Parameters ---------- min_pt, max_pt : float or sequence of float, optional Vectors defining the lower/upper limits of the intervals in an `IntervalProd` (a rectangular box). ``None`` entries mean "compute the value". shape : int or sequence of ints, optional Number of nodes per axis. ``None`` entries mean "compute the value". cell_sides : float or sequence of floats, optional Side length of the partition cells per axis. ``None`` entries mean "compute the value". nodes_on_bdry : bool or sequence, optional If a sequence is provided, it determines per axis whether to place the last grid point on the boundary (``True``) or shift it by half a cell size into the interior (``False``). In each axis, an entry may consist in a single bool or a 2-tuple of bool. In the latter case, the first tuple entry decides for the left, the second for the right boundary. The length of the sequence must be ``array.ndim``. A single boolean is interpreted as a global choice for all boundaries. Notes ----- In each axis, 3 of the 4 possible parameters ``min_pt``, ``max_pt``, ``shape`` and ``cell_sides`` must be given. If all four are provided, they are checked for consistency. See Also -------- uniform_partition_fromintv : partition an existing set uniform_partition_fromgrid : use an existing grid as basis Examples -------- Any combination of three of the four parameters can be used for creation of a partition: >>> part = odl.uniform_partition(min_pt=0, max_pt=2, shape=4) >>> part.cell_boundary_vecs (array([ 0. , 0.5, 1. , 1.5, 2. ]),) >>> part = odl.uniform_partition(min_pt=0, shape=4, cell_sides=0.5) >>> part.cell_boundary_vecs (array([ 0. , 0.5, 1. , 1.5, 2. ]),) >>> part = odl.uniform_partition(max_pt=2, shape=4, cell_sides=0.5) >>> part.cell_boundary_vecs (array([ 0. , 0.5, 1. , 1.5, 2. ]),) >>> part = odl.uniform_partition(min_pt=0, max_pt=2, cell_sides=0.5) >>> part.cell_boundary_vecs (array([ 0. , 0.5, 1. , 1.5, 2. ]),) In higher dimensions, the parameters can be given differently in each axis. Where ``None`` is given, the value will be computed: >>> part = odl.uniform_partition(min_pt=[0, 0], max_pt=[1, 2], ... shape=[4, 2]) >>> part.cell_boundary_vecs (array([ 0. , 0.25, 0.5 , 0.75, 1. ]), array([ 0., 1., 2.])) >>> part = odl.uniform_partition(min_pt=[0, 0], max_pt=[1, 2], ... shape=[None, 2], cell_sides=[0.25, None]) >>> part.cell_boundary_vecs (array([ 0. , 0.25, 0.5 , 0.75, 1. ]), array([ 0., 1., 2.])) >>> part = odl.uniform_partition(min_pt=[0, None], max_pt=[None, 2], ... shape=[4, 2], cell_sides=[0.25, 1]) >>> part.cell_boundary_vecs (array([ 0. , 0.25, 0.5 , 0.75, 1. ]), array([ 0., 1., 2.])) By default, no grid points are placed on the boundary: >>> part = odl.uniform_partition(0, 1, 4) >>> part.nodes_on_bdry False >>> part.cell_boundary_vecs (array([ 0. , 0.25, 0.5 , 0.75, 1. ]),) >>> part.grid.coord_vectors (array([ 0.125, 0.375, 0.625, 0.875]),) This can be changed with the nodes_on_bdry parameter: >>> part = odl.uniform_partition(0, 1, 3, nodes_on_bdry=True) >>> part.nodes_on_bdry True >>> part.cell_boundary_vecs (array([ 0. , 0.25, 0.75, 1. ]),) >>> part.grid.coord_vectors (array([ 0. , 0.5, 1. ]),) We can specify this per axis, too. In this case we choose both in the first axis and only the rightmost in the second: >>> part = odl.uniform_partition([0, 0], [1, 1], (3, 3), ... nodes_on_bdry=(True, (False, True))) ... >>> part.cell_boundary_vecs[0] # first axis, as above array([ 0. , 0.25, 0.75, 1. ]) >>> part.grid.coord_vectors[0] array([ 0. , 0.5, 1. ]) >>> part.cell_boundary_vecs[1] # second, asymmetric axis array([ 0. , 0.4, 0.8, 1. ]) >>> part.grid.coord_vectors[1] array([ 0.2, 0.6, 1. ])
[ "Return", "a", "partition", "with", "equally", "sized", "cells", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/partition.py#L1122-L1290
231,771
odlgroup/odl
odl/discr/partition.py
nonuniform_partition
def nonuniform_partition(*coord_vecs, **kwargs): """Return a partition with un-equally sized cells. Parameters ---------- coord_vecs1, ... coord_vecsN : `array-like` Arrays of coordinates of the mid-points of the partition cells. min_pt, max_pt : float or sequence of floats, optional Vectors defining the lower/upper limits of the intervals in an `IntervalProd` (a rectangular box). ``None`` entries mean "compute the value". nodes_on_bdry : bool or sequence, optional If a sequence is provided, it determines per axis whether to place the last grid point on the boundary (``True``) or shift it by half a cell size into the interior (``False``). In each axis, an entry may consist in a single bool or a 2-tuple of bool. In the latter case, the first tuple entry decides for the left, the second for the right boundary. The length of the sequence must be ``array.ndim``. A single boolean is interpreted as a global choice for all boundaries. Cannot be given with both min_pt and max_pt since they determine the same thing. Default: ``False`` See Also -------- uniform_partition : uniformly spaced points uniform_partition_fromintv : partition an existing set uniform_partition_fromgrid : use an existing grid as basis Examples -------- With uniformly spaced points the result is the same as a uniform partition: >>> odl.nonuniform_partition([0, 1, 2, 3]) uniform_partition(-0.5, 3.5, 4) >>> odl.nonuniform_partition([0, 1, 2, 3], [1, 2]) uniform_partition([-0.5, 0.5], [ 3.5, 2.5], (4, 2)) If the points are not uniformly spaced, a nonuniform partition is created. Note that the containing interval is calculated by assuming that the points are in the middle of the sub-intervals: >>> odl.nonuniform_partition([0, 1, 3]) nonuniform_partition( [ 0., 1., 3.] ) Higher dimensional partitions are created by specifying the gridpoints along each dimension: >>> odl.nonuniform_partition([0, 1, 3], [1, 2]) nonuniform_partition( [ 0., 1., 3.], [ 1., 2.] ) Partitions with a single element are by default degenerate >>> odl.nonuniform_partition(1) uniform_partition(1.0, 1.0, 1, nodes_on_bdry=True) If the endpoints should be on the boundary, the ``nodes_on_bdry`` parameter can be used: >>> odl.nonuniform_partition([0, 1, 3], nodes_on_bdry=True) nonuniform_partition( [ 0., 1., 3.], nodes_on_bdry=True ) Users can also manually specify the containing intervals dimensions by using the ``min_pt`` and ``max_pt`` arguments: >>> odl.nonuniform_partition([0, 1, 3], min_pt=-2, max_pt=3) nonuniform_partition( [ 0., 1., 3.], min_pt=-2.0, max_pt=3.0 ) """ # Get parameters from kwargs min_pt = kwargs.pop('min_pt', None) max_pt = kwargs.pop('max_pt', None) nodes_on_bdry = kwargs.pop('nodes_on_bdry', False) # np.size(None) == 1 sizes = [len(coord_vecs)] + [np.size(p) for p in (min_pt, max_pt)] ndim = int(np.max(sizes)) min_pt = normalized_scalar_param_list(min_pt, ndim, param_conv=float, keep_none=True) max_pt = normalized_scalar_param_list(max_pt, ndim, param_conv=float, keep_none=True) nodes_on_bdry = normalized_nodes_on_bdry(nodes_on_bdry, ndim) # Calculate the missing parameters in min_pt, max_pt for i, (xmin, xmax, (bdry_l, bdry_r), coords) in enumerate( zip(min_pt, max_pt, nodes_on_bdry, coord_vecs)): # Check input for redundancy if xmin is not None and bdry_l: raise ValueError('in axis {}: got both `min_pt` and ' '`nodes_on_bdry=True`'.format(i)) if xmax is not None and bdry_r: raise ValueError('in axis {}: got both `max_pt` and ' '`nodes_on_bdry=True`'.format(i)) # Handle length 1 inputs coords = np.array(coords, copy=False, ndmin=1) # Compute boundary position if not given by user if xmin is None: if bdry_l or len(coords) == 1: min_pt[i] = coords[0] else: min_pt[i] = coords[0] - (coords[1] - coords[0]) / 2.0 if xmax is None: if bdry_r or len(coords) == 1: max_pt[i] = coords[-1] else: max_pt[i] = coords[-1] + (coords[-1] - coords[-2]) / 2.0 interval = IntervalProd(min_pt, max_pt) grid = RectGrid(*coord_vecs) return RectPartition(interval, grid)
python
def nonuniform_partition(*coord_vecs, **kwargs): # Get parameters from kwargs min_pt = kwargs.pop('min_pt', None) max_pt = kwargs.pop('max_pt', None) nodes_on_bdry = kwargs.pop('nodes_on_bdry', False) # np.size(None) == 1 sizes = [len(coord_vecs)] + [np.size(p) for p in (min_pt, max_pt)] ndim = int(np.max(sizes)) min_pt = normalized_scalar_param_list(min_pt, ndim, param_conv=float, keep_none=True) max_pt = normalized_scalar_param_list(max_pt, ndim, param_conv=float, keep_none=True) nodes_on_bdry = normalized_nodes_on_bdry(nodes_on_bdry, ndim) # Calculate the missing parameters in min_pt, max_pt for i, (xmin, xmax, (bdry_l, bdry_r), coords) in enumerate( zip(min_pt, max_pt, nodes_on_bdry, coord_vecs)): # Check input for redundancy if xmin is not None and bdry_l: raise ValueError('in axis {}: got both `min_pt` and ' '`nodes_on_bdry=True`'.format(i)) if xmax is not None and bdry_r: raise ValueError('in axis {}: got both `max_pt` and ' '`nodes_on_bdry=True`'.format(i)) # Handle length 1 inputs coords = np.array(coords, copy=False, ndmin=1) # Compute boundary position if not given by user if xmin is None: if bdry_l or len(coords) == 1: min_pt[i] = coords[0] else: min_pt[i] = coords[0] - (coords[1] - coords[0]) / 2.0 if xmax is None: if bdry_r or len(coords) == 1: max_pt[i] = coords[-1] else: max_pt[i] = coords[-1] + (coords[-1] - coords[-2]) / 2.0 interval = IntervalProd(min_pt, max_pt) grid = RectGrid(*coord_vecs) return RectPartition(interval, grid)
[ "def", "nonuniform_partition", "(", "*", "coord_vecs", ",", "*", "*", "kwargs", ")", ":", "# Get parameters from kwargs", "min_pt", "=", "kwargs", ".", "pop", "(", "'min_pt'", ",", "None", ")", "max_pt", "=", "kwargs", ".", "pop", "(", "'max_pt'", ",", "No...
Return a partition with un-equally sized cells. Parameters ---------- coord_vecs1, ... coord_vecsN : `array-like` Arrays of coordinates of the mid-points of the partition cells. min_pt, max_pt : float or sequence of floats, optional Vectors defining the lower/upper limits of the intervals in an `IntervalProd` (a rectangular box). ``None`` entries mean "compute the value". nodes_on_bdry : bool or sequence, optional If a sequence is provided, it determines per axis whether to place the last grid point on the boundary (``True``) or shift it by half a cell size into the interior (``False``). In each axis, an entry may consist in a single bool or a 2-tuple of bool. In the latter case, the first tuple entry decides for the left, the second for the right boundary. The length of the sequence must be ``array.ndim``. A single boolean is interpreted as a global choice for all boundaries. Cannot be given with both min_pt and max_pt since they determine the same thing. Default: ``False`` See Also -------- uniform_partition : uniformly spaced points uniform_partition_fromintv : partition an existing set uniform_partition_fromgrid : use an existing grid as basis Examples -------- With uniformly spaced points the result is the same as a uniform partition: >>> odl.nonuniform_partition([0, 1, 2, 3]) uniform_partition(-0.5, 3.5, 4) >>> odl.nonuniform_partition([0, 1, 2, 3], [1, 2]) uniform_partition([-0.5, 0.5], [ 3.5, 2.5], (4, 2)) If the points are not uniformly spaced, a nonuniform partition is created. Note that the containing interval is calculated by assuming that the points are in the middle of the sub-intervals: >>> odl.nonuniform_partition([0, 1, 3]) nonuniform_partition( [ 0., 1., 3.] ) Higher dimensional partitions are created by specifying the gridpoints along each dimension: >>> odl.nonuniform_partition([0, 1, 3], [1, 2]) nonuniform_partition( [ 0., 1., 3.], [ 1., 2.] ) Partitions with a single element are by default degenerate >>> odl.nonuniform_partition(1) uniform_partition(1.0, 1.0, 1, nodes_on_bdry=True) If the endpoints should be on the boundary, the ``nodes_on_bdry`` parameter can be used: >>> odl.nonuniform_partition([0, 1, 3], nodes_on_bdry=True) nonuniform_partition( [ 0., 1., 3.], nodes_on_bdry=True ) Users can also manually specify the containing intervals dimensions by using the ``min_pt`` and ``max_pt`` arguments: >>> odl.nonuniform_partition([0, 1, 3], min_pt=-2, max_pt=3) nonuniform_partition( [ 0., 1., 3.], min_pt=-2.0, max_pt=3.0 )
[ "Return", "a", "partition", "with", "un", "-", "equally", "sized", "cells", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/partition.py#L1293-L1421
231,772
odlgroup/odl
odl/discr/partition.py
RectPartition.nodes_on_bdry
def nodes_on_bdry(self): """Encoding of grid points lying on the boundary. Examples -------- Using global option (default ``False``): >>> part = odl.nonuniform_partition([0, 2, 3], [1, 3]) >>> part.nodes_on_bdry False >>> part = odl.nonuniform_partition([0, 2, 3], [1, 3], ... nodes_on_bdry=True) >>> part.nodes_on_bdry True ``False`` in axis 0, ``True`` in axis 1: >>> part = odl.nonuniform_partition([0, 2, 3], [1, 3], ... nodes_on_bdry=[False, True]) >>> part.nodes_on_bdry (False, True) In axis 0, ``False`` left and ``True`` right, in axis 1 ``False``: >>> part = odl.nonuniform_partition([0, 2, 3], [1, 3], ... nodes_on_bdry=[[False, True], ... False]) >>> part.nodes_on_bdry ((False, True), False) """ if self.size == 0: return True nodes_on_bdry = [] for on_bdry in self.nodes_on_bdry_byaxis: left, right = on_bdry if left == right: nodes_on_bdry.append(left) else: nodes_on_bdry.append((left, right)) if all(on_bdry == nodes_on_bdry[0] for on_bdry in nodes_on_bdry[1:]): return nodes_on_bdry[0] else: return tuple(nodes_on_bdry)
python
def nodes_on_bdry(self): if self.size == 0: return True nodes_on_bdry = [] for on_bdry in self.nodes_on_bdry_byaxis: left, right = on_bdry if left == right: nodes_on_bdry.append(left) else: nodes_on_bdry.append((left, right)) if all(on_bdry == nodes_on_bdry[0] for on_bdry in nodes_on_bdry[1:]): return nodes_on_bdry[0] else: return tuple(nodes_on_bdry)
[ "def", "nodes_on_bdry", "(", "self", ")", ":", "if", "self", ".", "size", "==", "0", ":", "return", "True", "nodes_on_bdry", "=", "[", "]", "for", "on_bdry", "in", "self", ".", "nodes_on_bdry_byaxis", ":", "left", ",", "right", "=", "on_bdry", "if", "l...
Encoding of grid points lying on the boundary. Examples -------- Using global option (default ``False``): >>> part = odl.nonuniform_partition([0, 2, 3], [1, 3]) >>> part.nodes_on_bdry False >>> part = odl.nonuniform_partition([0, 2, 3], [1, 3], ... nodes_on_bdry=True) >>> part.nodes_on_bdry True ``False`` in axis 0, ``True`` in axis 1: >>> part = odl.nonuniform_partition([0, 2, 3], [1, 3], ... nodes_on_bdry=[False, True]) >>> part.nodes_on_bdry (False, True) In axis 0, ``False`` left and ``True`` right, in axis 1 ``False``: >>> part = odl.nonuniform_partition([0, 2, 3], [1, 3], ... nodes_on_bdry=[[False, True], ... False]) >>> part.nodes_on_bdry ((False, True), False)
[ "Encoding", "of", "grid", "points", "lying", "on", "the", "boundary", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/partition.py#L118-L161
231,773
odlgroup/odl
odl/discr/partition.py
RectPartition.has_isotropic_cells
def has_isotropic_cells(self): """``True`` if `grid` is uniform and `cell_sides` are all equal. Always ``True`` for 1D partitions. Examples -------- >>> part = uniform_partition([0, -1], [1, 1], (5, 10)) >>> part.has_isotropic_cells True >>> part = uniform_partition([0, -1], [1, 1], (5, 5)) >>> part.has_isotropic_cells False """ return self.is_uniform and np.allclose(self.cell_sides[:-1], self.cell_sides[1:])
python
def has_isotropic_cells(self): return self.is_uniform and np.allclose(self.cell_sides[:-1], self.cell_sides[1:])
[ "def", "has_isotropic_cells", "(", "self", ")", ":", "return", "self", ".", "is_uniform", "and", "np", ".", "allclose", "(", "self", ".", "cell_sides", "[", ":", "-", "1", "]", ",", "self", ".", "cell_sides", "[", "1", ":", "]", ")" ]
``True`` if `grid` is uniform and `cell_sides` are all equal. Always ``True`` for 1D partitions. Examples -------- >>> part = uniform_partition([0, -1], [1, 1], (5, 10)) >>> part.has_isotropic_cells True >>> part = uniform_partition([0, -1], [1, 1], (5, 5)) >>> part.has_isotropic_cells False
[ "True", "if", "grid", "is", "uniform", "and", "cell_sides", "are", "all", "equal", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/partition.py#L236-L251
231,774
odlgroup/odl
odl/discr/partition.py
RectPartition.boundary_cell_fractions
def boundary_cell_fractions(self): """Return a tuple of contained fractions of boundary cells. Since the outermost grid points can have any distance to the boundary of the partitioned set, the "natural" outermost cell around these points can either be cropped or extended. This property is a tuple of (float, float) tuples, one entry per dimension, where the fractions of the left- and rightmost cells inside the set are stored. If a grid point lies exactly on the boundary, the value is 1/2 since the cell is cut in half. Otherwise, any value larger than 1/2 is possible. Returns ------- on_bdry : tuple of 2-tuples of floats Each 2-tuple contains the fraction of the leftmost (first entry) and rightmost (second entry) cell in the partitioned set in the corresponding dimension. See Also -------- cell_boundary_vecs Examples -------- We create a partition of the rectangle [0, 1.5] x [-2, 2] with the grid points [0, 1] x [-1, 0, 2]. The "natural" cells at the boundary would be: [-0.5, 0.5] and [0.5, 1.5] in the first axis [-1.5, -0.5] and [1, 3] in the second axis Thus, in the first axis, the fractions contained in [0, 1.5] are 0.5 and 1, and in the second axis, [-2, 2] contains the fractions 1.5 and 0.5. >>> rect = odl.IntervalProd([0, -2], [1.5, 2]) >>> grid = odl.RectGrid([0, 1], [-1, 0, 2]) >>> part = odl.RectPartition(rect, grid) >>> part.boundary_cell_fractions ((0.5, 1.0), (1.5, 0.5)) """ frac_list = [] for ax, (cvec, bmin, bmax) in enumerate(zip( self.grid.coord_vectors, self.set.min_pt, self.set.max_pt)): # Degenerate axes have a value of 1.0 (this is used as weight # in integration formulas later) if len(cvec) == 1: frac_list.append((1.0, 1.0)) else: left_frac = 0.5 + (cvec[0] - bmin) / (cvec[1] - cvec[0]) right_frac = 0.5 + (bmax - cvec[-1]) / (cvec[-1] - cvec[-2]) frac_list.append((left_frac, right_frac)) return tuple(frac_list)
python
def boundary_cell_fractions(self): frac_list = [] for ax, (cvec, bmin, bmax) in enumerate(zip( self.grid.coord_vectors, self.set.min_pt, self.set.max_pt)): # Degenerate axes have a value of 1.0 (this is used as weight # in integration formulas later) if len(cvec) == 1: frac_list.append((1.0, 1.0)) else: left_frac = 0.5 + (cvec[0] - bmin) / (cvec[1] - cvec[0]) right_frac = 0.5 + (bmax - cvec[-1]) / (cvec[-1] - cvec[-2]) frac_list.append((left_frac, right_frac)) return tuple(frac_list)
[ "def", "boundary_cell_fractions", "(", "self", ")", ":", "frac_list", "=", "[", "]", "for", "ax", ",", "(", "cvec", ",", "bmin", ",", "bmax", ")", "in", "enumerate", "(", "zip", "(", "self", ".", "grid", ".", "coord_vectors", ",", "self", ".", "set",...
Return a tuple of contained fractions of boundary cells. Since the outermost grid points can have any distance to the boundary of the partitioned set, the "natural" outermost cell around these points can either be cropped or extended. This property is a tuple of (float, float) tuples, one entry per dimension, where the fractions of the left- and rightmost cells inside the set are stored. If a grid point lies exactly on the boundary, the value is 1/2 since the cell is cut in half. Otherwise, any value larger than 1/2 is possible. Returns ------- on_bdry : tuple of 2-tuples of floats Each 2-tuple contains the fraction of the leftmost (first entry) and rightmost (second entry) cell in the partitioned set in the corresponding dimension. See Also -------- cell_boundary_vecs Examples -------- We create a partition of the rectangle [0, 1.5] x [-2, 2] with the grid points [0, 1] x [-1, 0, 2]. The "natural" cells at the boundary would be: [-0.5, 0.5] and [0.5, 1.5] in the first axis [-1.5, -0.5] and [1, 3] in the second axis Thus, in the first axis, the fractions contained in [0, 1.5] are 0.5 and 1, and in the second axis, [-2, 2] contains the fractions 1.5 and 0.5. >>> rect = odl.IntervalProd([0, -2], [1.5, 2]) >>> grid = odl.RectGrid([0, 1], [-1, 0, 2]) >>> part = odl.RectPartition(rect, grid) >>> part.boundary_cell_fractions ((0.5, 1.0), (1.5, 0.5))
[ "Return", "a", "tuple", "of", "contained", "fractions", "of", "boundary", "cells", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/partition.py#L308-L363
231,775
odlgroup/odl
odl/discr/partition.py
RectPartition.cell_sizes_vecs
def cell_sizes_vecs(self): """Return the cell sizes as coordinate vectors. Returns ------- csizes : tuple of `numpy.ndarray`'s The cell sizes per axis. The length of the vectors is the same as the corresponding ``grid.coord_vectors``. For axes with 1 grid point, cell size is set to 0.0. Examples -------- We create a partition of the rectangle [0, 1] x [-1, 2] into 2 x 3 cells with the grid points [0, 1] x [-1, 0, 2]. This implies that the cell boundaries are given as [0, 0.5, 1] x [-1, -0.5, 1, 2], hence the cell size vectors are [0.5, 0.5] x [0.5, 1.5, 1]: >>> rect = odl.IntervalProd([0, -1], [1, 2]) >>> grid = odl.RectGrid([0, 1], [-1, 0, 2]) >>> part = odl.RectPartition(rect, grid) >>> part.cell_boundary_vecs (array([ 0. , 0.5, 1. ]), array([-1. , -0.5, 1. , 2. ])) >>> part.cell_sizes_vecs (array([ 0.5, 0.5]), array([ 0.5, 1.5, 1. ])) """ csizes = [] for ax, cvec in enumerate(self.grid.coord_vectors): if len(cvec) == 1: csizes.append(np.array([0.0])) else: csize = np.empty_like(cvec) csize[1:-1] = (cvec[2:] - cvec[:-2]) / 2.0 csize[0] = (cvec[0] + cvec[1]) / 2 - self.min()[ax] csize[-1] = self.max()[ax] - (cvec[-2] + cvec[-1]) / 2 csizes.append(csize) return tuple(csizes)
python
def cell_sizes_vecs(self): csizes = [] for ax, cvec in enumerate(self.grid.coord_vectors): if len(cvec) == 1: csizes.append(np.array([0.0])) else: csize = np.empty_like(cvec) csize[1:-1] = (cvec[2:] - cvec[:-2]) / 2.0 csize[0] = (cvec[0] + cvec[1]) / 2 - self.min()[ax] csize[-1] = self.max()[ax] - (cvec[-2] + cvec[-1]) / 2 csizes.append(csize) return tuple(csizes)
[ "def", "cell_sizes_vecs", "(", "self", ")", ":", "csizes", "=", "[", "]", "for", "ax", ",", "cvec", "in", "enumerate", "(", "self", ".", "grid", ".", "coord_vectors", ")", ":", "if", "len", "(", "cvec", ")", "==", "1", ":", "csizes", ".", "append",...
Return the cell sizes as coordinate vectors. Returns ------- csizes : tuple of `numpy.ndarray`'s The cell sizes per axis. The length of the vectors is the same as the corresponding ``grid.coord_vectors``. For axes with 1 grid point, cell size is set to 0.0. Examples -------- We create a partition of the rectangle [0, 1] x [-1, 2] into 2 x 3 cells with the grid points [0, 1] x [-1, 0, 2]. This implies that the cell boundaries are given as [0, 0.5, 1] x [-1, -0.5, 1, 2], hence the cell size vectors are [0.5, 0.5] x [0.5, 1.5, 1]: >>> rect = odl.IntervalProd([0, -1], [1, 2]) >>> grid = odl.RectGrid([0, 1], [-1, 0, 2]) >>> part = odl.RectPartition(rect, grid) >>> part.cell_boundary_vecs (array([ 0. , 0.5, 1. ]), array([-1. , -0.5, 1. , 2. ])) >>> part.cell_sizes_vecs (array([ 0.5, 0.5]), array([ 0.5, 1.5, 1. ]))
[ "Return", "the", "cell", "sizes", "as", "coordinate", "vectors", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/partition.py#L366-L403
231,776
odlgroup/odl
odl/discr/partition.py
RectPartition.cell_sides
def cell_sides(self): """Side lengths of all 'inner' cells of a uniform partition. Only defined if ``self.grid`` is uniform. Examples -------- We create a partition of the rectangle [0, 1] x [-1, 2] into 3 x 3 cells, where the grid points lie on the boundary. This means that the grid points are [0, 0.5, 1] x [-1, 0.5, 2], i.e. the inner cell has side lengths 0.5 x 1.5: >>> rect = odl.IntervalProd([0, -1], [1, 2]) >>> grid = odl.uniform_grid([0, -1], [1, 2], (3, 3)) >>> part = odl.RectPartition(rect, grid) >>> part.cell_sides array([ 0.5, 1.5]) """ sides = self.grid.stride sides[sides == 0] = self.extent[sides == 0] return sides
python
def cell_sides(self): sides = self.grid.stride sides[sides == 0] = self.extent[sides == 0] return sides
[ "def", "cell_sides", "(", "self", ")", ":", "sides", "=", "self", ".", "grid", ".", "stride", "sides", "[", "sides", "==", "0", "]", "=", "self", ".", "extent", "[", "sides", "==", "0", "]", "return", "sides" ]
Side lengths of all 'inner' cells of a uniform partition. Only defined if ``self.grid`` is uniform. Examples -------- We create a partition of the rectangle [0, 1] x [-1, 2] into 3 x 3 cells, where the grid points lie on the boundary. This means that the grid points are [0, 0.5, 1] x [-1, 0.5, 2], i.e. the inner cell has side lengths 0.5 x 1.5: >>> rect = odl.IntervalProd([0, -1], [1, 2]) >>> grid = odl.uniform_grid([0, -1], [1, 2], (3, 3)) >>> part = odl.RectPartition(rect, grid) >>> part.cell_sides array([ 0.5, 1.5])
[ "Side", "lengths", "of", "all", "inner", "cells", "of", "a", "uniform", "partition", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/partition.py#L406-L426
231,777
odlgroup/odl
odl/discr/partition.py
RectPartition.approx_equals
def approx_equals(self, other, atol): """Return ``True`` in case of approximate equality. Returns ------- approx_eq : bool ``True`` if ``other`` is a `RectPartition` instance with ``self.set == other.set`` up to ``atol`` and ``self.grid == other.other`` up to ``atol``, ``False`` otherwise. """ if other is self: return True elif not isinstance(other, RectPartition): return False else: return (self.set.approx_equals(other.set, atol=atol) and self.grid.approx_equals(other.grid, atol=atol))
python
def approx_equals(self, other, atol): if other is self: return True elif not isinstance(other, RectPartition): return False else: return (self.set.approx_equals(other.set, atol=atol) and self.grid.approx_equals(other.grid, atol=atol))
[ "def", "approx_equals", "(", "self", ",", "other", ",", "atol", ")", ":", "if", "other", "is", "self", ":", "return", "True", "elif", "not", "isinstance", "(", "other", ",", "RectPartition", ")", ":", "return", "False", "else", ":", "return", "(", "sel...
Return ``True`` in case of approximate equality. Returns ------- approx_eq : bool ``True`` if ``other`` is a `RectPartition` instance with ``self.set == other.set`` up to ``atol`` and ``self.grid == other.other`` up to ``atol``, ``False`` otherwise.
[ "Return", "True", "in", "case", "of", "approximate", "equality", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/partition.py#L451-L467
231,778
odlgroup/odl
odl/discr/partition.py
RectPartition.insert
def insert(self, index, *parts): """Return a copy with ``parts`` inserted before ``index``. The given partitions are inserted (as a block) into ``self``, yielding a new partition whose number of dimensions is the sum of the numbers of dimensions of all involved partitions. Note that no changes are made in-place. Parameters ---------- index : int Index of the dimension before which ``other`` is to be inserted. Negative indices count backwards from ``self.ndim``. part1, ..., partN : `RectPartition` Partitions to be inserted into ``self``. Returns ------- newpart : `RectPartition` The enlarged partition. Examples -------- >>> part1 = odl.uniform_partition([0, -1], [1, 2], (3, 3)) >>> part2 = odl.uniform_partition(0, 1, 5) >>> part1.insert(1, part2) uniform_partition([ 0., 0., -1.], [ 1., 1., 2.], (3, 5, 3)) See Also -------- append """ if not all(isinstance(p, RectPartition) for p in parts): raise TypeError('`parts` must all be `RectPartition` instances, ' 'got ({})' ''.format(', '.join(repr(p) for p in parts))) newgrid = self.grid.insert(index, *(p.grid for p in parts)) newset = self.set.insert(index, *(p.set for p in parts)) return RectPartition(newset, newgrid)
python
def insert(self, index, *parts): if not all(isinstance(p, RectPartition) for p in parts): raise TypeError('`parts` must all be `RectPartition` instances, ' 'got ({})' ''.format(', '.join(repr(p) for p in parts))) newgrid = self.grid.insert(index, *(p.grid for p in parts)) newset = self.set.insert(index, *(p.set for p in parts)) return RectPartition(newset, newgrid)
[ "def", "insert", "(", "self", ",", "index", ",", "*", "parts", ")", ":", "if", "not", "all", "(", "isinstance", "(", "p", ",", "RectPartition", ")", "for", "p", "in", "parts", ")", ":", "raise", "TypeError", "(", "'`parts` must all be `RectPartition` insta...
Return a copy with ``parts`` inserted before ``index``. The given partitions are inserted (as a block) into ``self``, yielding a new partition whose number of dimensions is the sum of the numbers of dimensions of all involved partitions. Note that no changes are made in-place. Parameters ---------- index : int Index of the dimension before which ``other`` is to be inserted. Negative indices count backwards from ``self.ndim``. part1, ..., partN : `RectPartition` Partitions to be inserted into ``self``. Returns ------- newpart : `RectPartition` The enlarged partition. Examples -------- >>> part1 = odl.uniform_partition([0, -1], [1, 2], (3, 3)) >>> part2 = odl.uniform_partition(0, 1, 5) >>> part1.insert(1, part2) uniform_partition([ 0., 0., -1.], [ 1., 1., 2.], (3, 5, 3)) See Also -------- append
[ "Return", "a", "copy", "with", "parts", "inserted", "before", "index", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/partition.py#L591-L631
231,779
odlgroup/odl
odl/discr/partition.py
RectPartition.index
def index(self, value, floating=False): """Return the index of a value in the domain. Parameters ---------- value : ``self.set`` element Point whose index to find. floating : bool, optional If True, then the index should also give the position inside the voxel. This is given by returning the integer valued index of the voxel plus the distance from the left cell boundary as a fraction of the full cell size. Returns ------- index : int, float, tuple of int or tuple of float Index of the value, as counted from the left. If ``self.ndim > 1`` the result is a tuple, else a scalar. If ``floating=True`` the scalar is a float, else an int. Examples -------- Get the indices of start and end: >>> p = odl.uniform_partition(0, 2, 5) >>> p.index(0) 0 >>> p.index(2) 4 For points inside voxels, the index of the containing cell is returned: >>> p.index(0.2) 0 By using the ``floating`` argument, partial positions inside the voxels can instead be determined: >>> p.index(0.2, floating=True) 0.5 These indices work with indexing, extracting the voxel in which the point lies: >>> p[p.index(0.1)] uniform_partition(0.0, 0.4, 1) The same principle also works in higher dimensions: >>> p = uniform_partition([0, -1], [1, 2], (4, 1)) >>> p.index([0.5, 2]) (2, 0) >>> p[p.index([0.5, 2])] uniform_partition([ 0.5, -1. ], [ 0.75, 2. ], (1, 1)) """ value = np.atleast_1d(self.set.element(value)) result = [] for val, cell_bdry_vec in zip(value, self.cell_boundary_vecs): ind = np.searchsorted(cell_bdry_vec, val) if floating: if cell_bdry_vec[ind] == val: # Value is on top of edge result.append(float(ind)) else: # interpolate between csize = float(cell_bdry_vec[ind] - cell_bdry_vec[ind - 1]) result.append(ind - (cell_bdry_vec[ind] - val) / csize) else: if cell_bdry_vec[ind] == val and ind != len(cell_bdry_vec) - 1: # Value is on top of edge, but not last edge result.append(ind) else: result.append(ind - 1) if self.ndim == 1: result = result[0] else: result = tuple(result) return result
python
def index(self, value, floating=False): value = np.atleast_1d(self.set.element(value)) result = [] for val, cell_bdry_vec in zip(value, self.cell_boundary_vecs): ind = np.searchsorted(cell_bdry_vec, val) if floating: if cell_bdry_vec[ind] == val: # Value is on top of edge result.append(float(ind)) else: # interpolate between csize = float(cell_bdry_vec[ind] - cell_bdry_vec[ind - 1]) result.append(ind - (cell_bdry_vec[ind] - val) / csize) else: if cell_bdry_vec[ind] == val and ind != len(cell_bdry_vec) - 1: # Value is on top of edge, but not last edge result.append(ind) else: result.append(ind - 1) if self.ndim == 1: result = result[0] else: result = tuple(result) return result
[ "def", "index", "(", "self", ",", "value", ",", "floating", "=", "False", ")", ":", "value", "=", "np", ".", "atleast_1d", "(", "self", ".", "set", ".", "element", "(", "value", ")", ")", "result", "=", "[", "]", "for", "val", ",", "cell_bdry_vec",...
Return the index of a value in the domain. Parameters ---------- value : ``self.set`` element Point whose index to find. floating : bool, optional If True, then the index should also give the position inside the voxel. This is given by returning the integer valued index of the voxel plus the distance from the left cell boundary as a fraction of the full cell size. Returns ------- index : int, float, tuple of int or tuple of float Index of the value, as counted from the left. If ``self.ndim > 1`` the result is a tuple, else a scalar. If ``floating=True`` the scalar is a float, else an int. Examples -------- Get the indices of start and end: >>> p = odl.uniform_partition(0, 2, 5) >>> p.index(0) 0 >>> p.index(2) 4 For points inside voxels, the index of the containing cell is returned: >>> p.index(0.2) 0 By using the ``floating`` argument, partial positions inside the voxels can instead be determined: >>> p.index(0.2, floating=True) 0.5 These indices work with indexing, extracting the voxel in which the point lies: >>> p[p.index(0.1)] uniform_partition(0.0, 0.4, 1) The same principle also works in higher dimensions: >>> p = uniform_partition([0, -1], [1, 2], (4, 1)) >>> p.index([0.5, 2]) (2, 0) >>> p[p.index([0.5, 2])] uniform_partition([ 0.5, -1. ], [ 0.75, 2. ], (1, 1))
[ "Return", "the", "index", "of", "a", "value", "in", "the", "domain", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/partition.py#L708-L787
231,780
odlgroup/odl
odl/discr/partition.py
RectPartition.byaxis
def byaxis(self): """Object to index ``self`` along axes. Examples -------- Indexing with integers or slices: >>> p = odl.uniform_partition([0, 1, 2], [1, 3, 5], (3, 5, 6)) >>> p.byaxis[0] uniform_partition(0.0, 1.0, 3) >>> p.byaxis[1] uniform_partition(1.0, 3.0, 5) >>> p.byaxis[2] uniform_partition(2.0, 5.0, 6) >>> p.byaxis[:] == p True >>> p.byaxis[1:] uniform_partition([ 1., 2.], [ 3., 5.], (5, 6)) Lists can be used to stack subpartitions arbitrarily: >>> p.byaxis[[0, 2, 0]] uniform_partition([ 0., 2., 0.], [ 1., 5., 1.], (3, 6, 3)) """ partition = self class RectPartitionByAxis(object): """Helper class for accessing `RectPartition` by axis.""" def __getitem__(self, indices): """Return ``self[indices]``.""" try: iter(indices) except TypeError: # Slice or integer slc = np.zeros(partition.ndim, dtype=object) slc[indices] = slice(None) squeeze_axes = np.where(slc == 0)[0] newpart = partition[tuple(slc)].squeeze(squeeze_axes) else: # Sequence, stack together from single-integer indexing indices = [int(i) for i in indices] byaxis = partition.byaxis parts = [byaxis[i] for i in indices] if not parts: newpart = uniform_partition([], [], ()) else: newpart = parts[0].append(*(parts[1:])) return newpart def __repr__(self): """Return ``repr(self)``. Examples -------- >>> p = odl.uniform_partition(0, 1, 5) >>> p.byaxis uniform_partition(0, 1, 5).byaxis """ return '{!r}.byaxis'.format(partition) return RectPartitionByAxis()
python
def byaxis(self): partition = self class RectPartitionByAxis(object): """Helper class for accessing `RectPartition` by axis.""" def __getitem__(self, indices): """Return ``self[indices]``.""" try: iter(indices) except TypeError: # Slice or integer slc = np.zeros(partition.ndim, dtype=object) slc[indices] = slice(None) squeeze_axes = np.where(slc == 0)[0] newpart = partition[tuple(slc)].squeeze(squeeze_axes) else: # Sequence, stack together from single-integer indexing indices = [int(i) for i in indices] byaxis = partition.byaxis parts = [byaxis[i] for i in indices] if not parts: newpart = uniform_partition([], [], ()) else: newpart = parts[0].append(*(parts[1:])) return newpart def __repr__(self): """Return ``repr(self)``. Examples -------- >>> p = odl.uniform_partition(0, 1, 5) >>> p.byaxis uniform_partition(0, 1, 5).byaxis """ return '{!r}.byaxis'.format(partition) return RectPartitionByAxis()
[ "def", "byaxis", "(", "self", ")", ":", "partition", "=", "self", "class", "RectPartitionByAxis", "(", "object", ")", ":", "\"\"\"Helper class for accessing `RectPartition` by axis.\"\"\"", "def", "__getitem__", "(", "self", ",", "indices", ")", ":", "\"\"\"Return ``s...
Object to index ``self`` along axes. Examples -------- Indexing with integers or slices: >>> p = odl.uniform_partition([0, 1, 2], [1, 3, 5], (3, 5, 6)) >>> p.byaxis[0] uniform_partition(0.0, 1.0, 3) >>> p.byaxis[1] uniform_partition(1.0, 3.0, 5) >>> p.byaxis[2] uniform_partition(2.0, 5.0, 6) >>> p.byaxis[:] == p True >>> p.byaxis[1:] uniform_partition([ 1., 2.], [ 3., 5.], (5, 6)) Lists can be used to stack subpartitions arbitrarily: >>> p.byaxis[[0, 2, 0]] uniform_partition([ 0., 2., 0.], [ 1., 5., 1.], (3, 6, 3))
[ "Object", "to", "index", "self", "along", "axes", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/partition.py#L790-L853
231,781
odlgroup/odl
odl/tomo/geometry/geometry.py
DivergentBeamGeometry.det_to_src
def det_to_src(self, angle, dparam, normalized=True): """Vector or direction from a detector location to the source. The unnormalized version of this vector is computed as follows:: vec = src_position(angle) - det_point_position(angle, dparam) Parameters ---------- angle : `array-like` or sequence One or several (Euler) angles in radians at which to evaluate. If ``motion_params.ndim >= 2``, a sequence of that length must be provided. dparam : `array-like` or sequence Detector parameter(s) at which to evaluate. If ``det_params.ndim >= 2``, a sequence of that length must be provided. Returns ------- det_to_src : `numpy.ndarray` Vector(s) pointing from a detector point to the source (at infinity). The shape of the returned array is obtained from the (broadcast) shapes of ``angle`` and ``dparam``, and broadcasting is supported within both parameters and between them. The precise definition of the shape is ``broadcast(bcast_angle, bcast_dparam).shape + (ndim,)``, where ``bcast_angle`` is - ``angle`` if `motion_params` is 1D, - ``broadcast(*angle)`` otherwise, and ``bcast_dparam`` defined analogously. Examples -------- The method works with single parameter values, in which case a single vector is returned: >>> apart = odl.uniform_partition(0, 2 * np.pi, 10) >>> dpart = odl.uniform_partition(-1, 1, 20) >>> geom = odl.tomo.FanBeamGeometry(apart, dpart, src_radius=2, ... det_radius=3) >>> geom.det_to_src(0, 0) array([ 0., -1.]) >>> geom.det_to_src(0, 0, normalized=False) array([ 0., -5.]) >>> vec = geom.det_to_src(0, 1, normalized=False) >>> np.allclose(geom.det_point_position(0, 1) + vec, ... geom.src_position(0)) True >>> dir = geom.det_to_src(np.pi / 2, 0) >>> np.allclose(dir, [1, 0]) True >>> vec = geom.det_to_src(np.pi / 2, 0, normalized=False) >>> np.allclose(vec, [5, 0]) True Both variables support vectorized calls, i.e., stacks of parameters can be provided. The order of axes in the output (left of the ``ndim`` axis for the vector dimension) corresponds to the order of arguments: >>> dirs = geom.det_to_src(0, [-1, 0, 0.5, 1]) >>> dirs[1] array([ 0., -1.]) >>> dirs.shape # (num_dparams, ndim) (4, 2) >>> dirs = geom.det_to_src([0, np.pi / 2, np.pi], 0) >>> np.allclose(dirs, [[0, -1], ... [1, 0], ... [0, 1]]) True >>> dirs.shape # (num_angles, ndim) (3, 2) >>> # Providing 3 pairs of parameters, resulting in 3 vectors >>> dirs = geom.det_to_src([0, np.pi / 2, np.pi], [0, -1, 1]) >>> dirs[0] # Corresponds to angle = 0, dparam = 0 array([ 0., -1.]) >>> dirs.shape (3, 2) >>> # Pairs of parameters arranged in arrays of same size >>> geom.det_to_src(np.zeros((4, 5)), np.zeros((4, 5))).shape (4, 5, 2) >>> # "Outer product" type evaluation using broadcasting >>> geom.det_to_src(np.zeros((4, 1)), np.zeros((1, 5))).shape (4, 5, 2) """ # Always call the downstream methods with vectorized arguments # to be able to reliably manipulate the final axes of the result if self.motion_params.ndim == 1: squeeze_angle = (np.shape(angle) == ()) angle = np.array(angle, dtype=float, copy=False, ndmin=1) else: squeeze_angle = (np.broadcast(*angle).shape == ()) angle = tuple(np.array(a, dtype=float, copy=False, ndmin=1) for a in angle) if self.det_params.ndim == 1: squeeze_dparam = (np.shape(dparam) == ()) dparam = np.array(dparam, dtype=float, copy=False, ndmin=1) else: squeeze_dparam = (np.broadcast(*dparam).shape == ()) dparam = tuple(np.array(p, dtype=float, copy=False, ndmin=1) for p in dparam) det_to_src = (self.src_position(angle) - self.det_point_position(angle, dparam)) if normalized: det_to_src /= np.linalg.norm(det_to_src, axis=-1, keepdims=True) if squeeze_angle and squeeze_dparam: det_to_src = det_to_src.squeeze() return det_to_src
python
def det_to_src(self, angle, dparam, normalized=True): # Always call the downstream methods with vectorized arguments # to be able to reliably manipulate the final axes of the result if self.motion_params.ndim == 1: squeeze_angle = (np.shape(angle) == ()) angle = np.array(angle, dtype=float, copy=False, ndmin=1) else: squeeze_angle = (np.broadcast(*angle).shape == ()) angle = tuple(np.array(a, dtype=float, copy=False, ndmin=1) for a in angle) if self.det_params.ndim == 1: squeeze_dparam = (np.shape(dparam) == ()) dparam = np.array(dparam, dtype=float, copy=False, ndmin=1) else: squeeze_dparam = (np.broadcast(*dparam).shape == ()) dparam = tuple(np.array(p, dtype=float, copy=False, ndmin=1) for p in dparam) det_to_src = (self.src_position(angle) - self.det_point_position(angle, dparam)) if normalized: det_to_src /= np.linalg.norm(det_to_src, axis=-1, keepdims=True) if squeeze_angle and squeeze_dparam: det_to_src = det_to_src.squeeze() return det_to_src
[ "def", "det_to_src", "(", "self", ",", "angle", ",", "dparam", ",", "normalized", "=", "True", ")", ":", "# Always call the downstream methods with vectorized arguments", "# to be able to reliably manipulate the final axes of the result", "if", "self", ".", "motion_params", "...
Vector or direction from a detector location to the source. The unnormalized version of this vector is computed as follows:: vec = src_position(angle) - det_point_position(angle, dparam) Parameters ---------- angle : `array-like` or sequence One or several (Euler) angles in radians at which to evaluate. If ``motion_params.ndim >= 2``, a sequence of that length must be provided. dparam : `array-like` or sequence Detector parameter(s) at which to evaluate. If ``det_params.ndim >= 2``, a sequence of that length must be provided. Returns ------- det_to_src : `numpy.ndarray` Vector(s) pointing from a detector point to the source (at infinity). The shape of the returned array is obtained from the (broadcast) shapes of ``angle`` and ``dparam``, and broadcasting is supported within both parameters and between them. The precise definition of the shape is ``broadcast(bcast_angle, bcast_dparam).shape + (ndim,)``, where ``bcast_angle`` is - ``angle`` if `motion_params` is 1D, - ``broadcast(*angle)`` otherwise, and ``bcast_dparam`` defined analogously. Examples -------- The method works with single parameter values, in which case a single vector is returned: >>> apart = odl.uniform_partition(0, 2 * np.pi, 10) >>> dpart = odl.uniform_partition(-1, 1, 20) >>> geom = odl.tomo.FanBeamGeometry(apart, dpart, src_radius=2, ... det_radius=3) >>> geom.det_to_src(0, 0) array([ 0., -1.]) >>> geom.det_to_src(0, 0, normalized=False) array([ 0., -5.]) >>> vec = geom.det_to_src(0, 1, normalized=False) >>> np.allclose(geom.det_point_position(0, 1) + vec, ... geom.src_position(0)) True >>> dir = geom.det_to_src(np.pi / 2, 0) >>> np.allclose(dir, [1, 0]) True >>> vec = geom.det_to_src(np.pi / 2, 0, normalized=False) >>> np.allclose(vec, [5, 0]) True Both variables support vectorized calls, i.e., stacks of parameters can be provided. The order of axes in the output (left of the ``ndim`` axis for the vector dimension) corresponds to the order of arguments: >>> dirs = geom.det_to_src(0, [-1, 0, 0.5, 1]) >>> dirs[1] array([ 0., -1.]) >>> dirs.shape # (num_dparams, ndim) (4, 2) >>> dirs = geom.det_to_src([0, np.pi / 2, np.pi], 0) >>> np.allclose(dirs, [[0, -1], ... [1, 0], ... [0, 1]]) True >>> dirs.shape # (num_angles, ndim) (3, 2) >>> # Providing 3 pairs of parameters, resulting in 3 vectors >>> dirs = geom.det_to_src([0, np.pi / 2, np.pi], [0, -1, 1]) >>> dirs[0] # Corresponds to angle = 0, dparam = 0 array([ 0., -1.]) >>> dirs.shape (3, 2) >>> # Pairs of parameters arranged in arrays of same size >>> geom.det_to_src(np.zeros((4, 5)), np.zeros((4, 5))).shape (4, 5, 2) >>> # "Outer product" type evaluation using broadcasting >>> geom.det_to_src(np.zeros((4, 1)), np.zeros((1, 5))).shape (4, 5, 2)
[ "Vector", "or", "direction", "from", "a", "detector", "location", "to", "the", "source", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/geometry/geometry.py#L439-L555
231,782
odlgroup/odl
odl/tomo/geometry/geometry.py
AxisOrientedGeometry.rotation_matrix
def rotation_matrix(self, angle): """Return the rotation matrix to the system state at ``angle``. The matrix is computed according to `Rodrigues' rotation formula <https://en.wikipedia.org/wiki/Rodrigues'_rotation_formula>`_. Parameters ---------- angle : float or `array-like` Angle(s) in radians describing the counter-clockwise rotation of the system around `axis`. Returns ------- rot : `numpy.ndarray` The rotation matrix (or matrices) mapping vectors at the initial state to the ones in the state defined by ``angle``. The rotation is extrinsic, i.e., defined in the "world" coordinate system. If ``angle`` is a single parameter, the returned array has shape ``(3, 3)``, otherwise ``angle.shape + (3, 3)``. """ squeeze_out = (np.shape(angle) == ()) angle = np.array(angle, dtype=float, copy=False, ndmin=1) if (self.check_bounds and not is_inside_bounds(angle, self.motion_params)): raise ValueError('`angle` {} not in the valid range {}' ''.format(angle, self.motion_params)) matrix = axis_rotation_matrix(self.axis, angle) if squeeze_out: matrix = matrix.squeeze() return matrix
python
def rotation_matrix(self, angle): squeeze_out = (np.shape(angle) == ()) angle = np.array(angle, dtype=float, copy=False, ndmin=1) if (self.check_bounds and not is_inside_bounds(angle, self.motion_params)): raise ValueError('`angle` {} not in the valid range {}' ''.format(angle, self.motion_params)) matrix = axis_rotation_matrix(self.axis, angle) if squeeze_out: matrix = matrix.squeeze() return matrix
[ "def", "rotation_matrix", "(", "self", ",", "angle", ")", ":", "squeeze_out", "=", "(", "np", ".", "shape", "(", "angle", ")", "==", "(", ")", ")", "angle", "=", "np", ".", "array", "(", "angle", ",", "dtype", "=", "float", ",", "copy", "=", "Fal...
Return the rotation matrix to the system state at ``angle``. The matrix is computed according to `Rodrigues' rotation formula <https://en.wikipedia.org/wiki/Rodrigues'_rotation_formula>`_. Parameters ---------- angle : float or `array-like` Angle(s) in radians describing the counter-clockwise rotation of the system around `axis`. Returns ------- rot : `numpy.ndarray` The rotation matrix (or matrices) mapping vectors at the initial state to the ones in the state defined by ``angle``. The rotation is extrinsic, i.e., defined in the "world" coordinate system. If ``angle`` is a single parameter, the returned array has shape ``(3, 3)``, otherwise ``angle.shape + (3, 3)``.
[ "Return", "the", "rotation", "matrix", "to", "the", "system", "state", "at", "angle", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/geometry/geometry.py#L585-L619
231,783
odlgroup/odl
odl/contrib/tomo/elekta.py
elekta_icon_geometry
def elekta_icon_geometry(sad=780.0, sdd=1000.0, piercing_point=(390.0, 0.0), angles=None, num_angles=None, detector_shape=(780, 720)): """Tomographic geometry of the Elekta Icon CBCT system. See the [whitepaper]_ for specific descriptions of each parameter. All measurments are given in millimeters unless otherwise stated. Parameters ---------- sad : float, optional Source to Axis distance. sdd : float, optional Source to Detector distance. piercing_point : sequence of foat, optional Position in the detector (in pixel coordinates) that a beam from the source, passing through the axis of rotation perpendicularly, hits. angles : array-like, optional List of angles given in radians that the projection images were taken at. Exclusive with num_angles. Default: np.linspace(1.2, 5.0, 332) num_angles : int, optional Number of angles. Exclusive with angles. Default: 332 detector_shape : sequence of int, optional Shape of the detector (in pixels). Useful if a sub-sampled system should be studied. Returns ------- elekta_icon_geometry : `ConeFlatGeometry` Examples -------- Create default geometry: >>> from odl.contrib import tomo >>> geometry = tomo.elekta_icon_geometry() Use a smaller detector (improves efficiency): >>> small_geometry = tomo.elekta_icon_geometry(detector_shape=[100, 100]) See Also -------- elekta_icon_space : Default reconstruction space for the Elekta Icon CBCT. elekta_icon_fbp: Default reconstruction method for the Elekta Icon CBCT. References ---------- .. [whitepaper] *Design and performance characteristics of a Cone Beam CT system for Leksell Gamma Knife Icon* """ sad = float(sad) assert sad > 0 sdd = float(sdd) assert sdd > sad piercing_point = np.array(piercing_point, dtype=float) assert piercing_point.shape == (2,) if angles is not None and num_angles is not None: raise ValueError('cannot provide both `angles` and `num_angles`') elif angles is not None: angles = odl.nonuniform_partition(angles) assert angles.ndim == 1 elif num_angles is not None: angles = odl.uniform_partition(1.2, 5.0, num_angles) else: angles = odl.uniform_partition(1.2, 5.0, 332) detector_shape = np.array(detector_shape, dtype=int) # Constant system parameters pixel_size = 0.368 det_extent_mm = np.array([287.04, 264.96]) # Compute the detector partition piercing_point_mm = pixel_size * piercing_point det_min_pt = -piercing_point_mm det_max_pt = det_min_pt + det_extent_mm detector_partition = odl.uniform_partition(min_pt=det_min_pt, max_pt=det_max_pt, shape=detector_shape) # Create the geometry geometry = odl.tomo.ConeFlatGeometry( angles, detector_partition, src_radius=sad, det_radius=sdd - sad) return geometry
python
def elekta_icon_geometry(sad=780.0, sdd=1000.0, piercing_point=(390.0, 0.0), angles=None, num_angles=None, detector_shape=(780, 720)): sad = float(sad) assert sad > 0 sdd = float(sdd) assert sdd > sad piercing_point = np.array(piercing_point, dtype=float) assert piercing_point.shape == (2,) if angles is not None and num_angles is not None: raise ValueError('cannot provide both `angles` and `num_angles`') elif angles is not None: angles = odl.nonuniform_partition(angles) assert angles.ndim == 1 elif num_angles is not None: angles = odl.uniform_partition(1.2, 5.0, num_angles) else: angles = odl.uniform_partition(1.2, 5.0, 332) detector_shape = np.array(detector_shape, dtype=int) # Constant system parameters pixel_size = 0.368 det_extent_mm = np.array([287.04, 264.96]) # Compute the detector partition piercing_point_mm = pixel_size * piercing_point det_min_pt = -piercing_point_mm det_max_pt = det_min_pt + det_extent_mm detector_partition = odl.uniform_partition(min_pt=det_min_pt, max_pt=det_max_pt, shape=detector_shape) # Create the geometry geometry = odl.tomo.ConeFlatGeometry( angles, detector_partition, src_radius=sad, det_radius=sdd - sad) return geometry
[ "def", "elekta_icon_geometry", "(", "sad", "=", "780.0", ",", "sdd", "=", "1000.0", ",", "piercing_point", "=", "(", "390.0", ",", "0.0", ")", ",", "angles", "=", "None", ",", "num_angles", "=", "None", ",", "detector_shape", "=", "(", "780", ",", "720...
Tomographic geometry of the Elekta Icon CBCT system. See the [whitepaper]_ for specific descriptions of each parameter. All measurments are given in millimeters unless otherwise stated. Parameters ---------- sad : float, optional Source to Axis distance. sdd : float, optional Source to Detector distance. piercing_point : sequence of foat, optional Position in the detector (in pixel coordinates) that a beam from the source, passing through the axis of rotation perpendicularly, hits. angles : array-like, optional List of angles given in radians that the projection images were taken at. Exclusive with num_angles. Default: np.linspace(1.2, 5.0, 332) num_angles : int, optional Number of angles. Exclusive with angles. Default: 332 detector_shape : sequence of int, optional Shape of the detector (in pixels). Useful if a sub-sampled system should be studied. Returns ------- elekta_icon_geometry : `ConeFlatGeometry` Examples -------- Create default geometry: >>> from odl.contrib import tomo >>> geometry = tomo.elekta_icon_geometry() Use a smaller detector (improves efficiency): >>> small_geometry = tomo.elekta_icon_geometry(detector_shape=[100, 100]) See Also -------- elekta_icon_space : Default reconstruction space for the Elekta Icon CBCT. elekta_icon_fbp: Default reconstruction method for the Elekta Icon CBCT. References ---------- .. [whitepaper] *Design and performance characteristics of a Cone Beam CT system for Leksell Gamma Knife Icon*
[ "Tomographic", "geometry", "of", "the", "Elekta", "Icon", "CBCT", "system", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/tomo/elekta.py#L23-L114
231,784
odlgroup/odl
odl/contrib/tomo/elekta.py
elekta_icon_space
def elekta_icon_space(shape=(448, 448, 448), **kwargs): """Default reconstruction space for the Elekta Icon CBCT. See the [whitepaper]_ for further information. Parameters ---------- shape : sequence of int, optional Shape of the space, in voxels. kwargs : Keyword arguments to pass to `uniform_discr` to modify the space, e.g. use another backend. By default, the dtype is set to float32. Returns ------- elekta_icon_space : `DiscreteLp` Examples -------- Create default space: >>> from odl.contrib import tomo >>> space = tomo.elekta_icon_space() Create sub-sampled space: >>> space = tomo.elekta_icon_space(shape=(100, 100, 100)) See Also -------- elekta_icon_geometry: Geometry for the Elekta Icon CBCT. elekta_icon_fbp: Default reconstruction method for the Elekta Icon CBCT. References ---------- .. [whitepaper] *Design and performance characteristics of a Cone Beam CT system for Leksell Gamma Knife Icon* """ if 'dtype' not in kwargs: kwargs['dtype'] = 'float32' return odl.uniform_discr(min_pt=[-112.0, -112.0, 0.0], max_pt=[112.0, 112.0, 224.0], shape=shape, **kwargs)
python
def elekta_icon_space(shape=(448, 448, 448), **kwargs): if 'dtype' not in kwargs: kwargs['dtype'] = 'float32' return odl.uniform_discr(min_pt=[-112.0, -112.0, 0.0], max_pt=[112.0, 112.0, 224.0], shape=shape, **kwargs)
[ "def", "elekta_icon_space", "(", "shape", "=", "(", "448", ",", "448", ",", "448", ")", ",", "*", "*", "kwargs", ")", ":", "if", "'dtype'", "not", "in", "kwargs", ":", "kwargs", "[", "'dtype'", "]", "=", "'float32'", "return", "odl", ".", "uniform_di...
Default reconstruction space for the Elekta Icon CBCT. See the [whitepaper]_ for further information. Parameters ---------- shape : sequence of int, optional Shape of the space, in voxels. kwargs : Keyword arguments to pass to `uniform_discr` to modify the space, e.g. use another backend. By default, the dtype is set to float32. Returns ------- elekta_icon_space : `DiscreteLp` Examples -------- Create default space: >>> from odl.contrib import tomo >>> space = tomo.elekta_icon_space() Create sub-sampled space: >>> space = tomo.elekta_icon_space(shape=(100, 100, 100)) See Also -------- elekta_icon_geometry: Geometry for the Elekta Icon CBCT. elekta_icon_fbp: Default reconstruction method for the Elekta Icon CBCT. References ---------- .. [whitepaper] *Design and performance characteristics of a Cone Beam CT system for Leksell Gamma Knife Icon*
[ "Default", "reconstruction", "space", "for", "the", "Elekta", "Icon", "CBCT", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/tomo/elekta.py#L117-L160
231,785
odlgroup/odl
odl/contrib/tomo/elekta.py
elekta_icon_fbp
def elekta_icon_fbp(ray_transform, padding=False, filter_type='Hann', frequency_scaling=0.6, parker_weighting=True): """Approximation of the FDK reconstruction used in the Elekta Icon. Parameters ---------- ray_transform : `RayTransform` The ray transform to be used, should have an Elekta Icon geometry. padding : bool, optional Whether the FBP filter should use padding, increases memory use significantly. filter_type : str, optional Type of filter to apply in the FBP filter. frequency_scaling : float, optional Frequency scaling for FBP filter. parker_weighting : bool, optional Whether Parker weighting should be applied to compensate for partial scan. Returns ------- elekta_icon_fbp : `DiscreteLp` Examples -------- Create default FBP for default geometry: >>> from odl.contrib import tomo >>> geometry = tomo.elekta_icon_geometry() >>> space = tomo.elekta_icon_space() >>> ray_transform = odl.tomo.RayTransform(space, geometry) >>> fbp_op = tomo.elekta_icon_fbp(ray_transform) """ fbp_op = odl.tomo.fbp_op(ray_transform, padding=padding, filter_type=filter_type, frequency_scaling=frequency_scaling) if parker_weighting: parker_weighting = odl.tomo.parker_weighting(ray_transform) fbp_op = fbp_op * parker_weighting return fbp_op
python
def elekta_icon_fbp(ray_transform, padding=False, filter_type='Hann', frequency_scaling=0.6, parker_weighting=True): fbp_op = odl.tomo.fbp_op(ray_transform, padding=padding, filter_type=filter_type, frequency_scaling=frequency_scaling) if parker_weighting: parker_weighting = odl.tomo.parker_weighting(ray_transform) fbp_op = fbp_op * parker_weighting return fbp_op
[ "def", "elekta_icon_fbp", "(", "ray_transform", ",", "padding", "=", "False", ",", "filter_type", "=", "'Hann'", ",", "frequency_scaling", "=", "0.6", ",", "parker_weighting", "=", "True", ")", ":", "fbp_op", "=", "odl", ".", "tomo", ".", "fbp_op", "(", "r...
Approximation of the FDK reconstruction used in the Elekta Icon. Parameters ---------- ray_transform : `RayTransform` The ray transform to be used, should have an Elekta Icon geometry. padding : bool, optional Whether the FBP filter should use padding, increases memory use significantly. filter_type : str, optional Type of filter to apply in the FBP filter. frequency_scaling : float, optional Frequency scaling for FBP filter. parker_weighting : bool, optional Whether Parker weighting should be applied to compensate for partial scan. Returns ------- elekta_icon_fbp : `DiscreteLp` Examples -------- Create default FBP for default geometry: >>> from odl.contrib import tomo >>> geometry = tomo.elekta_icon_geometry() >>> space = tomo.elekta_icon_space() >>> ray_transform = odl.tomo.RayTransform(space, geometry) >>> fbp_op = tomo.elekta_icon_fbp(ray_transform)
[ "Approximation", "of", "the", "FDK", "reconstruction", "used", "in", "the", "Elekta", "Icon", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/tomo/elekta.py#L163-L205
231,786
odlgroup/odl
odl/contrib/tomo/elekta.py
elekta_xvi_space
def elekta_xvi_space(shape=(512, 512, 512), **kwargs): """Default reconstruction space for the Elekta XVI CBCT. Parameters ---------- shape : sequence of int, optional Shape of the space, in voxels. kwargs : Keyword arguments to pass to `uniform_discr` to modify the space, e.g. use another backend. By default, the dtype is set to float32. Returns ------- elekta_xvi_space : `DiscreteLp` Examples -------- Create default space: >>> from odl.contrib import tomo >>> space = tomo.elekta_xvi_space() Create sub-sampled space: >>> space = tomo.elekta_xvi_space(shape=(100, 100, 100)) See Also -------- elekta_xvi_geometry: Geometry for the Elekta XVI CBCT. elekta_xvi_fbp: Default reconstruction method for the Elekta XVI CBCT. """ if 'dtype' not in kwargs: kwargs['dtype'] = 'float32' return odl.uniform_discr(min_pt=[-128.0, -128, -128.0], max_pt=[128.0, 128.0, 128.0], shape=shape, **kwargs)
python
def elekta_xvi_space(shape=(512, 512, 512), **kwargs): if 'dtype' not in kwargs: kwargs['dtype'] = 'float32' return odl.uniform_discr(min_pt=[-128.0, -128, -128.0], max_pt=[128.0, 128.0, 128.0], shape=shape, **kwargs)
[ "def", "elekta_xvi_space", "(", "shape", "=", "(", "512", ",", "512", ",", "512", ")", ",", "*", "*", "kwargs", ")", ":", "if", "'dtype'", "not", "in", "kwargs", ":", "kwargs", "[", "'dtype'", "]", "=", "'float32'", "return", "odl", ".", "uniform_dis...
Default reconstruction space for the Elekta XVI CBCT. Parameters ---------- shape : sequence of int, optional Shape of the space, in voxels. kwargs : Keyword arguments to pass to `uniform_discr` to modify the space, e.g. use another backend. By default, the dtype is set to float32. Returns ------- elekta_xvi_space : `DiscreteLp` Examples -------- Create default space: >>> from odl.contrib import tomo >>> space = tomo.elekta_xvi_space() Create sub-sampled space: >>> space = tomo.elekta_xvi_space(shape=(100, 100, 100)) See Also -------- elekta_xvi_geometry: Geometry for the Elekta XVI CBCT. elekta_xvi_fbp: Default reconstruction method for the Elekta XVI CBCT.
[ "Default", "reconstruction", "space", "for", "the", "Elekta", "XVI", "CBCT", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/tomo/elekta.py#L295-L331
231,787
odlgroup/odl
odl/contrib/tomo/elekta.py
elekta_xvi_fbp
def elekta_xvi_fbp(ray_transform, padding=False, filter_type='Hann', frequency_scaling=0.6): """Approximation of the FDK reconstruction used in the Elekta XVI. Parameters ---------- ray_transform : `RayTransform` The ray transform to be used, should have an Elekta XVI geometry. padding : bool, optional Whether the FBP filter should use padding, increases memory use significantly. filter_type : str, optional Type of filter to apply in the FBP filter. frequency_scaling : float, optional Frequency scaling for FBP filter. Returns ------- elekta_xvi_fbp : `DiscreteLp` Examples -------- Create default FBP for default geometry: >>> from odl.contrib import tomo >>> geometry = tomo.elekta_xvi_geometry() >>> space = tomo.elekta_xvi_space() >>> ray_transform = odl.tomo.RayTransform(space, geometry) >>> fbp_op = tomo.elekta_xvi_fbp(ray_transform) """ fbp_op = odl.tomo.fbp_op(ray_transform, padding=padding, filter_type=filter_type, frequency_scaling=frequency_scaling) return fbp_op
python
def elekta_xvi_fbp(ray_transform, padding=False, filter_type='Hann', frequency_scaling=0.6): fbp_op = odl.tomo.fbp_op(ray_transform, padding=padding, filter_type=filter_type, frequency_scaling=frequency_scaling) return fbp_op
[ "def", "elekta_xvi_fbp", "(", "ray_transform", ",", "padding", "=", "False", ",", "filter_type", "=", "'Hann'", ",", "frequency_scaling", "=", "0.6", ")", ":", "fbp_op", "=", "odl", ".", "tomo", ".", "fbp_op", "(", "ray_transform", ",", "padding", "=", "pa...
Approximation of the FDK reconstruction used in the Elekta XVI. Parameters ---------- ray_transform : `RayTransform` The ray transform to be used, should have an Elekta XVI geometry. padding : bool, optional Whether the FBP filter should use padding, increases memory use significantly. filter_type : str, optional Type of filter to apply in the FBP filter. frequency_scaling : float, optional Frequency scaling for FBP filter. Returns ------- elekta_xvi_fbp : `DiscreteLp` Examples -------- Create default FBP for default geometry: >>> from odl.contrib import tomo >>> geometry = tomo.elekta_xvi_geometry() >>> space = tomo.elekta_xvi_space() >>> ray_transform = odl.tomo.RayTransform(space, geometry) >>> fbp_op = tomo.elekta_xvi_fbp(ray_transform)
[ "Approximation", "of", "the", "FDK", "reconstruction", "used", "in", "the", "Elekta", "XVI", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/tomo/elekta.py#L334-L369
231,788
odlgroup/odl
odl/phantom/transmission.py
_modified_shepp_logan_ellipsoids
def _modified_shepp_logan_ellipsoids(ellipsoids): """Modify ellipsoids to give the modified Shepp-Logan phantom. Works for both 2d and 3d. """ intensities = [1.0, -0.8, -0.2, -0.2, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1] # Add minimal numbers to ensure that the result is nowhere negative. # This is needed due to numerical issues. intensities[2] += 5e-17 intensities[3] += 5e-17 assert len(ellipsoids) == len(intensities) for ellipsoid, intensity in zip(ellipsoids, intensities): ellipsoid[0] = intensity
python
def _modified_shepp_logan_ellipsoids(ellipsoids): intensities = [1.0, -0.8, -0.2, -0.2, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1] # Add minimal numbers to ensure that the result is nowhere negative. # This is needed due to numerical issues. intensities[2] += 5e-17 intensities[3] += 5e-17 assert len(ellipsoids) == len(intensities) for ellipsoid, intensity in zip(ellipsoids, intensities): ellipsoid[0] = intensity
[ "def", "_modified_shepp_logan_ellipsoids", "(", "ellipsoids", ")", ":", "intensities", "=", "[", "1.0", ",", "-", "0.8", ",", "-", "0.2", ",", "-", "0.2", ",", "0.1", ",", "0.1", ",", "0.1", ",", "0.1", ",", "0.1", ",", "0.1", "]", "# Add minimal numbe...
Modify ellipsoids to give the modified Shepp-Logan phantom. Works for both 2d and 3d.
[ "Modify", "ellipsoids", "to", "give", "the", "modified", "Shepp", "-", "Logan", "phantom", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/phantom/transmission.py#L61-L76
231,789
odlgroup/odl
odl/phantom/transmission.py
shepp_logan_ellipsoids
def shepp_logan_ellipsoids(ndim, modified=False): """Ellipsoids for the standard Shepp-Logan phantom in 2 or 3 dimensions. Parameters ---------- ndim : {2, 3} Dimension of the space the ellipsoids should be in. modified : bool, optional True if the modified Shepp-Logan phantom should be given. The modified phantom has greatly amplified contrast to aid visualization. See Also -------- odl.phantom.geometric.ellipsoid_phantom : Function for creating arbitrary ellipsoids phantoms shepp_logan : Create a phantom with these ellipsoids References ---------- .. _Shepp-Logan phantom: https://en.wikipedia.org/wiki/Shepp-Logan_phantom """ if ndim == 2: ellipsoids = _shepp_logan_ellipse_2d() elif ndim == 3: ellipsoids = _shepp_logan_ellipsoids_3d() else: raise ValueError('dimension not 2 or 3, no phantom available') if modified: _modified_shepp_logan_ellipsoids(ellipsoids) return ellipsoids
python
def shepp_logan_ellipsoids(ndim, modified=False): if ndim == 2: ellipsoids = _shepp_logan_ellipse_2d() elif ndim == 3: ellipsoids = _shepp_logan_ellipsoids_3d() else: raise ValueError('dimension not 2 or 3, no phantom available') if modified: _modified_shepp_logan_ellipsoids(ellipsoids) return ellipsoids
[ "def", "shepp_logan_ellipsoids", "(", "ndim", ",", "modified", "=", "False", ")", ":", "if", "ndim", "==", "2", ":", "ellipsoids", "=", "_shepp_logan_ellipse_2d", "(", ")", "elif", "ndim", "==", "3", ":", "ellipsoids", "=", "_shepp_logan_ellipsoids_3d", "(", ...
Ellipsoids for the standard Shepp-Logan phantom in 2 or 3 dimensions. Parameters ---------- ndim : {2, 3} Dimension of the space the ellipsoids should be in. modified : bool, optional True if the modified Shepp-Logan phantom should be given. The modified phantom has greatly amplified contrast to aid visualization. See Also -------- odl.phantom.geometric.ellipsoid_phantom : Function for creating arbitrary ellipsoids phantoms shepp_logan : Create a phantom with these ellipsoids References ---------- .. _Shepp-Logan phantom: https://en.wikipedia.org/wiki/Shepp-Logan_phantom
[ "Ellipsoids", "for", "the", "standard", "Shepp", "-", "Logan", "phantom", "in", "2", "or", "3", "dimensions", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/phantom/transmission.py#L79-L111
231,790
odlgroup/odl
odl/phantom/transmission.py
shepp_logan
def shepp_logan(space, modified=False, min_pt=None, max_pt=None): """Standard Shepp-Logan phantom in 2 or 3 dimensions. Parameters ---------- space : `DiscreteLp` Space in which the phantom is created, must be 2- or 3-dimensional. If ``space.shape`` is 1 in an axis, a corresponding slice of the phantom is created. modified : `bool`, optional True if the modified Shepp-Logan phantom should be given. The modified phantom has greatly amplified contrast to aid visualization. min_pt, max_pt : array-like, optional If provided, use these vectors to determine the bounding box of the phantom instead of ``space.min_pt`` and ``space.max_pt``. It is currently required that ``min_pt >= space.min_pt`` and ``max_pt <= space.max_pt``, i.e., shifting or scaling outside the original space is not allowed. Providing one of them results in a shift, e.g., for ``min_pt``:: new_min_pt = min_pt new_max_pt = space.max_pt + (min_pt - space.min_pt) Providing both results in a scaled version of the phantom. See Also -------- forbild : Similar phantom but with more complexity. Only supports 2d. odl.phantom.geometric.defrise : Geometry test phantom shepp_logan_ellipsoids : Get the parameters that define this phantom odl.phantom.geometric.ellipsoid_phantom : Function for creating arbitrary ellipsoid phantoms References ---------- .. _Shepp-Logan phantom: https://en.wikipedia.org/wiki/Shepp-Logan_phantom """ ellipsoids = shepp_logan_ellipsoids(space.ndim, modified) return ellipsoid_phantom(space, ellipsoids, min_pt, max_pt)
python
def shepp_logan(space, modified=False, min_pt=None, max_pt=None): ellipsoids = shepp_logan_ellipsoids(space.ndim, modified) return ellipsoid_phantom(space, ellipsoids, min_pt, max_pt)
[ "def", "shepp_logan", "(", "space", ",", "modified", "=", "False", ",", "min_pt", "=", "None", ",", "max_pt", "=", "None", ")", ":", "ellipsoids", "=", "shepp_logan_ellipsoids", "(", "space", ".", "ndim", ",", "modified", ")", "return", "ellipsoid_phantom", ...
Standard Shepp-Logan phantom in 2 or 3 dimensions. Parameters ---------- space : `DiscreteLp` Space in which the phantom is created, must be 2- or 3-dimensional. If ``space.shape`` is 1 in an axis, a corresponding slice of the phantom is created. modified : `bool`, optional True if the modified Shepp-Logan phantom should be given. The modified phantom has greatly amplified contrast to aid visualization. min_pt, max_pt : array-like, optional If provided, use these vectors to determine the bounding box of the phantom instead of ``space.min_pt`` and ``space.max_pt``. It is currently required that ``min_pt >= space.min_pt`` and ``max_pt <= space.max_pt``, i.e., shifting or scaling outside the original space is not allowed. Providing one of them results in a shift, e.g., for ``min_pt``:: new_min_pt = min_pt new_max_pt = space.max_pt + (min_pt - space.min_pt) Providing both results in a scaled version of the phantom. See Also -------- forbild : Similar phantom but with more complexity. Only supports 2d. odl.phantom.geometric.defrise : Geometry test phantom shepp_logan_ellipsoids : Get the parameters that define this phantom odl.phantom.geometric.ellipsoid_phantom : Function for creating arbitrary ellipsoid phantoms References ---------- .. _Shepp-Logan phantom: https://en.wikipedia.org/wiki/Shepp-Logan_phantom
[ "Standard", "Shepp", "-", "Logan", "phantom", "in", "2", "or", "3", "dimensions", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/phantom/transmission.py#L114-L154
231,791
odlgroup/odl
odl/discr/lp_discr.py
_scaling_func_list
def _scaling_func_list(bdry_fracs, exponent): """Return a list of lists of scaling functions for the boundary.""" def scaling(factor): def scaling_func(x): return x * factor return scaling_func func_list = [] for frac_l, frac_r in bdry_fracs: func_list_entry = [] if np.isclose(frac_l, 1.0): func_list_entry.append(None) else: func_list_entry.append(scaling(frac_l ** (1 / exponent))) if np.isclose(frac_r, 1.0): func_list_entry.append(None) else: func_list_entry.append(scaling(frac_r ** (1 / exponent))) func_list.append(func_list_entry) return func_list
python
def _scaling_func_list(bdry_fracs, exponent): def scaling(factor): def scaling_func(x): return x * factor return scaling_func func_list = [] for frac_l, frac_r in bdry_fracs: func_list_entry = [] if np.isclose(frac_l, 1.0): func_list_entry.append(None) else: func_list_entry.append(scaling(frac_l ** (1 / exponent))) if np.isclose(frac_r, 1.0): func_list_entry.append(None) else: func_list_entry.append(scaling(frac_r ** (1 / exponent))) func_list.append(func_list_entry) return func_list
[ "def", "_scaling_func_list", "(", "bdry_fracs", ",", "exponent", ")", ":", "def", "scaling", "(", "factor", ")", ":", "def", "scaling_func", "(", "x", ")", ":", "return", "x", "*", "factor", "return", "scaling_func", "func_list", "=", "[", "]", "for", "f...
Return a list of lists of scaling functions for the boundary.
[ "Return", "a", "list", "of", "lists", "of", "scaling", "functions", "for", "the", "boundary", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/lp_discr.py#L1893-L1914
231,792
odlgroup/odl
odl/discr/lp_discr.py
DiscreteLp.interp
def interp(self): """Interpolation type of this discretization.""" if self.ndim == 0: return 'nearest' elif all(interp == self.interp_byaxis[0] for interp in self.interp_byaxis): return self.interp_byaxis[0] else: return self.interp_byaxis
python
def interp(self): if self.ndim == 0: return 'nearest' elif all(interp == self.interp_byaxis[0] for interp in self.interp_byaxis): return self.interp_byaxis[0] else: return self.interp_byaxis
[ "def", "interp", "(", "self", ")", ":", "if", "self", ".", "ndim", "==", "0", ":", "return", "'nearest'", "elif", "all", "(", "interp", "==", "self", ".", "interp_byaxis", "[", "0", "]", "for", "interp", "in", "self", ".", "interp_byaxis", ")", ":", ...
Interpolation type of this discretization.
[ "Interpolation", "type", "of", "this", "discretization", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/lp_discr.py#L142-L150
231,793
odlgroup/odl
odl/discr/lp_discr.py
DiscreteLp.tangent_bundle
def tangent_bundle(self): """The tangent bundle associated with `domain` using `partition`. The tangent bundle of a space ``X`` of functions ``R^d --> F`` can be interpreted as the space of vector-valued functions ``R^d --> F^d``. This space can be identified with the power space ``X^d`` as used in this implementation. """ if self.ndim == 0: return ProductSpace(field=self.field) else: return ProductSpace(self, self.ndim)
python
def tangent_bundle(self): if self.ndim == 0: return ProductSpace(field=self.field) else: return ProductSpace(self, self.ndim)
[ "def", "tangent_bundle", "(", "self", ")", ":", "if", "self", ".", "ndim", "==", "0", ":", "return", "ProductSpace", "(", "field", "=", "self", ".", "field", ")", "else", ":", "return", "ProductSpace", "(", "self", ",", "self", ".", "ndim", ")" ]
The tangent bundle associated with `domain` using `partition`. The tangent bundle of a space ``X`` of functions ``R^d --> F`` can be interpreted as the space of vector-valued functions ``R^d --> F^d``. This space can be identified with the power space ``X^d`` as used in this implementation.
[ "The", "tangent", "bundle", "associated", "with", "domain", "using", "partition", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/lp_discr.py#L252-L263
231,794
odlgroup/odl
odl/discr/lp_discr.py
DiscreteLp.is_uniformly_weighted
def is_uniformly_weighted(self): """``True`` if the weighting is the same for all space points.""" try: is_uniformly_weighted = self.__is_uniformly_weighted except AttributeError: bdry_fracs = self.partition.boundary_cell_fractions is_uniformly_weighted = ( np.allclose(bdry_fracs, 1.0) or self.exponent == float('inf') or not getattr(self.tspace, 'is_weighted', False)) self.__is_uniformly_weighted = is_uniformly_weighted return is_uniformly_weighted
python
def is_uniformly_weighted(self): try: is_uniformly_weighted = self.__is_uniformly_weighted except AttributeError: bdry_fracs = self.partition.boundary_cell_fractions is_uniformly_weighted = ( np.allclose(bdry_fracs, 1.0) or self.exponent == float('inf') or not getattr(self.tspace, 'is_weighted', False)) self.__is_uniformly_weighted = is_uniformly_weighted return is_uniformly_weighted
[ "def", "is_uniformly_weighted", "(", "self", ")", ":", "try", ":", "is_uniformly_weighted", "=", "self", ".", "__is_uniformly_weighted", "except", "AttributeError", ":", "bdry_fracs", "=", "self", ".", "partition", ".", "boundary_cell_fractions", "is_uniformly_weighted"...
``True`` if the weighting is the same for all space points.
[ "True", "if", "the", "weighting", "is", "the", "same", "for", "all", "space", "points", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/lp_discr.py#L266-L279
231,795
odlgroup/odl
odl/discr/lp_discr.py
DiscreteLpElement.imag
def imag(self, newimag): """Set the imaginary part of this element to ``newimag``. This method is invoked by ``x.imag = other``. Parameters ---------- newimag : array-like or scalar Values to be assigned to the imaginary part of this element. Raises ------ ValueError If the space is real, i.e., no imagninary part can be set. """ if self.space.is_real: raise ValueError('cannot set imaginary part in real spaces') self.tensor.imag = newimag
python
def imag(self, newimag): if self.space.is_real: raise ValueError('cannot set imaginary part in real spaces') self.tensor.imag = newimag
[ "def", "imag", "(", "self", ",", "newimag", ")", ":", "if", "self", ".", "space", ".", "is_real", ":", "raise", "ValueError", "(", "'cannot set imaginary part in real spaces'", ")", "self", ".", "tensor", ".", "imag", "=", "newimag" ]
Set the imaginary part of this element to ``newimag``. This method is invoked by ``x.imag = other``. Parameters ---------- newimag : array-like or scalar Values to be assigned to the imaginary part of this element. Raises ------ ValueError If the space is real, i.e., no imagninary part can be set.
[ "Set", "the", "imaginary", "part", "of", "this", "element", "to", "newimag", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/lp_discr.py#L697-L714
231,796
odlgroup/odl
odl/discr/lp_discr.py
DiscreteLpElement.conj
def conj(self, out=None): """Complex conjugate of this element. Parameters ---------- out : `DiscreteLpElement`, optional Element to which the complex conjugate is written. Must be an element of this element's space. Returns ------- out : `DiscreteLpElement` The complex conjugate element. If ``out`` is provided, the returned object is a reference to it. Examples -------- >>> discr = uniform_discr(0, 1, 4, dtype=complex) >>> x = discr.element([5+1j, 3, 2-2j, 1j]) >>> y = x.conj() >>> print(y) [ 5.-1.j, 3.-0.j, 2.+2.j, 0.-1.j] The out parameter allows you to avoid a copy: >>> z = discr.element() >>> z_out = x.conj(out=z) >>> print(z) [ 5.-1.j, 3.-0.j, 2.+2.j, 0.-1.j] >>> z_out is z True It can also be used for in-place conjugation: >>> x_out = x.conj(out=x) >>> print(x) [ 5.-1.j, 3.-0.j, 2.+2.j, 0.-1.j] >>> x_out is x True """ if out is None: return self.space.element(self.tensor.conj()) else: self.tensor.conj(out=out.tensor) return out
python
def conj(self, out=None): if out is None: return self.space.element(self.tensor.conj()) else: self.tensor.conj(out=out.tensor) return out
[ "def", "conj", "(", "self", ",", "out", "=", "None", ")", ":", "if", "out", "is", "None", ":", "return", "self", ".", "space", ".", "element", "(", "self", ".", "tensor", ".", "conj", "(", ")", ")", "else", ":", "self", ".", "tensor", ".", "con...
Complex conjugate of this element. Parameters ---------- out : `DiscreteLpElement`, optional Element to which the complex conjugate is written. Must be an element of this element's space. Returns ------- out : `DiscreteLpElement` The complex conjugate element. If ``out`` is provided, the returned object is a reference to it. Examples -------- >>> discr = uniform_discr(0, 1, 4, dtype=complex) >>> x = discr.element([5+1j, 3, 2-2j, 1j]) >>> y = x.conj() >>> print(y) [ 5.-1.j, 3.-0.j, 2.+2.j, 0.-1.j] The out parameter allows you to avoid a copy: >>> z = discr.element() >>> z_out = x.conj(out=z) >>> print(z) [ 5.-1.j, 3.-0.j, 2.+2.j, 0.-1.j] >>> z_out is z True It can also be used for in-place conjugation: >>> x_out = x.conj(out=x) >>> print(x) [ 5.-1.j, 3.-0.j, 2.+2.j, 0.-1.j] >>> x_out is x True
[ "Complex", "conjugate", "of", "this", "element", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/discr/lp_discr.py#L716-L760
231,797
odlgroup/odl
odl/solvers/nonsmooth/douglas_rachford.py
_operator_norms
def _operator_norms(L): """Get operator norms if needed. Parameters ---------- L : sequence of `Operator` or float The operators or the norms of the operators that are used in the `douglas_rachford_pd` method. For `Operator` entries, the norm is computed with ``Operator.norm(estimate=True)``. """ L_norms = [] for Li in L: if np.isscalar(Li): L_norms.append(float(Li)) elif isinstance(Li, Operator): L_norms.append(Li.norm(estimate=True)) else: raise TypeError('invalid entry {!r} in `L`'.format(Li)) return L_norms
python
def _operator_norms(L): L_norms = [] for Li in L: if np.isscalar(Li): L_norms.append(float(Li)) elif isinstance(Li, Operator): L_norms.append(Li.norm(estimate=True)) else: raise TypeError('invalid entry {!r} in `L`'.format(Li)) return L_norms
[ "def", "_operator_norms", "(", "L", ")", ":", "L_norms", "=", "[", "]", "for", "Li", "in", "L", ":", "if", "np", ".", "isscalar", "(", "Li", ")", ":", "L_norms", ".", "append", "(", "float", "(", "Li", ")", ")", "elif", "isinstance", "(", "Li", ...
Get operator norms if needed. Parameters ---------- L : sequence of `Operator` or float The operators or the norms of the operators that are used in the `douglas_rachford_pd` method. For `Operator` entries, the norm is computed with ``Operator.norm(estimate=True)``.
[ "Get", "operator", "norms", "if", "needed", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/nonsmooth/douglas_rachford.py#L253-L271
231,798
odlgroup/odl
odl/solvers/nonsmooth/douglas_rachford.py
douglas_rachford_pd_stepsize
def douglas_rachford_pd_stepsize(L, tau=None, sigma=None): r"""Default step sizes for `douglas_rachford_pd`. Parameters ---------- L : sequence of `Operator` or float The operators or the norms of the operators that are used in the `douglas_rachford_pd` method. For `Operator` entries, the norm is computed with ``Operator.norm(estimate=True)``. tau : positive float, optional Use this value for ``tau`` instead of computing it from the operator norms, see Notes. sigma : tuple of float, optional The ``sigma`` step size parameters for the dual update. Returns ------- tau : float The ``tau`` step size parameter for the primal update. sigma : tuple of float The ``sigma`` step size parameters for the dual update. Notes ----- To guarantee convergence, the parameters :math:`\tau`, :math:`\sigma_i` and :math:`L_i` need to satisfy .. math:: \tau \sum_{i=1}^n \sigma_i \|L_i\|^2 < 4. This function has 4 options, :math:`\tau`/:math:`\sigma` given or not given. - If neither :math:`\tau` nor :math:`\sigma` are given, they are chosen as: .. math:: \tau = \frac{1}{\sum_{i=1}^n \|L_i\|}, \quad \sigma_i = \frac{2}{n \tau \|L_i\|^2} - If only :math:`\sigma` is given, :math:`\tau` is set to: .. math:: \tau = \frac{2}{\sum_{i=1}^n \sigma_i \|L_i\|^2} - If only :math:`\tau` is given, :math:`\sigma` is set to: .. math:: \sigma_i = \frac{2}{n \tau \|L_i\|^2} - If both are given, they are returned as-is without further validation. """ if tau is None and sigma is None: L_norms = _operator_norms(L) tau = 1 / sum(L_norms) sigma = [2.0 / (len(L_norms) * tau * Li_norm ** 2) for Li_norm in L_norms] return tau, tuple(sigma) elif tau is None: L_norms = _operator_norms(L) tau = 2 / sum(si * Li_norm ** 2 for si, Li_norm in zip(sigma, L_norms)) return tau, tuple(sigma) elif sigma is None: L_norms = _operator_norms(L) tau = float(tau) sigma = [2.0 / (len(L_norms) * tau * Li_norm ** 2) for Li_norm in L_norms] return tau, tuple(sigma) else: return float(tau), tuple(sigma)
python
def douglas_rachford_pd_stepsize(L, tau=None, sigma=None): r"""Default step sizes for `douglas_rachford_pd`. Parameters ---------- L : sequence of `Operator` or float The operators or the norms of the operators that are used in the `douglas_rachford_pd` method. For `Operator` entries, the norm is computed with ``Operator.norm(estimate=True)``. tau : positive float, optional Use this value for ``tau`` instead of computing it from the operator norms, see Notes. sigma : tuple of float, optional The ``sigma`` step size parameters for the dual update. Returns ------- tau : float The ``tau`` step size parameter for the primal update. sigma : tuple of float The ``sigma`` step size parameters for the dual update. Notes ----- To guarantee convergence, the parameters :math:`\tau`, :math:`\sigma_i` and :math:`L_i` need to satisfy .. math:: \tau \sum_{i=1}^n \sigma_i \|L_i\|^2 < 4. This function has 4 options, :math:`\tau`/:math:`\sigma` given or not given. - If neither :math:`\tau` nor :math:`\sigma` are given, they are chosen as: .. math:: \tau = \frac{1}{\sum_{i=1}^n \|L_i\|}, \quad \sigma_i = \frac{2}{n \tau \|L_i\|^2} - If only :math:`\sigma` is given, :math:`\tau` is set to: .. math:: \tau = \frac{2}{\sum_{i=1}^n \sigma_i \|L_i\|^2} - If only :math:`\tau` is given, :math:`\sigma` is set to: .. math:: \sigma_i = \frac{2}{n \tau \|L_i\|^2} - If both are given, they are returned as-is without further validation. """ if tau is None and sigma is None: L_norms = _operator_norms(L) tau = 1 / sum(L_norms) sigma = [2.0 / (len(L_norms) * tau * Li_norm ** 2) for Li_norm in L_norms] return tau, tuple(sigma) elif tau is None: L_norms = _operator_norms(L) tau = 2 / sum(si * Li_norm ** 2 for si, Li_norm in zip(sigma, L_norms)) return tau, tuple(sigma) elif sigma is None: L_norms = _operator_norms(L) tau = float(tau) sigma = [2.0 / (len(L_norms) * tau * Li_norm ** 2) for Li_norm in L_norms] return tau, tuple(sigma) else: return float(tau), tuple(sigma)
[ "def", "douglas_rachford_pd_stepsize", "(", "L", ",", "tau", "=", "None", ",", "sigma", "=", "None", ")", ":", "if", "tau", "is", "None", "and", "sigma", "is", "None", ":", "L_norms", "=", "_operator_norms", "(", "L", ")", "tau", "=", "1", "/", "sum"...
r"""Default step sizes for `douglas_rachford_pd`. Parameters ---------- L : sequence of `Operator` or float The operators or the norms of the operators that are used in the `douglas_rachford_pd` method. For `Operator` entries, the norm is computed with ``Operator.norm(estimate=True)``. tau : positive float, optional Use this value for ``tau`` instead of computing it from the operator norms, see Notes. sigma : tuple of float, optional The ``sigma`` step size parameters for the dual update. Returns ------- tau : float The ``tau`` step size parameter for the primal update. sigma : tuple of float The ``sigma`` step size parameters for the dual update. Notes ----- To guarantee convergence, the parameters :math:`\tau`, :math:`\sigma_i` and :math:`L_i` need to satisfy .. math:: \tau \sum_{i=1}^n \sigma_i \|L_i\|^2 < 4. This function has 4 options, :math:`\tau`/:math:`\sigma` given or not given. - If neither :math:`\tau` nor :math:`\sigma` are given, they are chosen as: .. math:: \tau = \frac{1}{\sum_{i=1}^n \|L_i\|}, \quad \sigma_i = \frac{2}{n \tau \|L_i\|^2} - If only :math:`\sigma` is given, :math:`\tau` is set to: .. math:: \tau = \frac{2}{\sum_{i=1}^n \sigma_i \|L_i\|^2} - If only :math:`\tau` is given, :math:`\sigma` is set to: .. math:: \sigma_i = \frac{2}{n \tau \|L_i\|^2} - If both are given, they are returned as-is without further validation.
[ "r", "Default", "step", "sizes", "for", "douglas_rachford_pd", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/nonsmooth/douglas_rachford.py#L274-L350
231,799
odlgroup/odl
odl/tomo/geometry/parallel.py
parallel_beam_geometry
def parallel_beam_geometry(space, num_angles=None, det_shape=None): r"""Create default parallel beam geometry from ``space``. This is intended for simple test cases where users do not need the full flexibility of the geometries, but simply want a geometry that works. This default geometry gives a fully sampled sinogram according to the Nyquist criterion, which in general results in a very large number of samples. In particular, a ``space`` that is not centered at the origin can result in very large detectors. Parameters ---------- space : `DiscreteLp` Reconstruction space, the space of the volumetric data to be projected. Needs to be 2d or 3d. num_angles : int, optional Number of angles. Default: Enough to fully sample the data, see Notes. det_shape : int or sequence of int, optional Number of detector pixels. Default: Enough to fully sample the data, see Notes. Returns ------- geometry : `ParallelBeamGeometry` If ``space`` is 2d, return a `Parallel2dGeometry`. If ``space`` is 3d, return a `Parallel3dAxisGeometry`. Examples -------- Create a parallel beam geometry from a 2d space: >>> space = odl.uniform_discr([-1, -1], [1, 1], (20, 20)) >>> geometry = parallel_beam_geometry(space) >>> geometry.angles.size 45 >>> geometry.detector.size 31 Notes ----- According to [NW2001]_, pages 72--74, a function :math:`f : \mathbb{R}^2 \to \mathbb{R}` that has compact support .. math:: \| x \| > \rho \implies f(x) = 0, and is essentially bandlimited .. math:: \| \xi \| > \Omega \implies \hat{f}(\xi) \approx 0, can be fully reconstructed from a parallel beam ray transform if (1) the projection angles are sampled with a spacing of :math:`\Delta \psi` such that .. math:: \Delta \psi \leq \frac{\pi}{\rho \Omega}, and (2) the detector is sampled with an interval :math:`\Delta s` that satisfies .. math:: \Delta s \leq \frac{\pi}{\Omega}. The geometry returned by this function satisfies these conditions exactly. If the domain is 3-dimensional, the geometry is "separable", in that each slice along the z-dimension of the data is treated as independed 2d data. References ---------- .. [NW2001] Natterer, F and Wuebbeling, F. *Mathematical Methods in Image Reconstruction*. SIAM, 2001. https://dx.doi.org/10.1137/1.9780898718324 """ # Find maximum distance from rotation axis corners = space.domain.corners()[:, :2] rho = np.max(np.linalg.norm(corners, axis=1)) # Find default values according to Nyquist criterion. # We assume that the function is bandlimited by a wave along the x or y # axis. The highest frequency we can measure is then a standing wave with # period of twice the inter-node distance. min_side = min(space.partition.cell_sides[:2]) omega = np.pi / min_side num_px_horiz = 2 * int(np.ceil(rho * omega / np.pi)) + 1 if space.ndim == 2: det_min_pt = -rho det_max_pt = rho if det_shape is None: det_shape = num_px_horiz elif space.ndim == 3: num_px_vert = space.shape[2] min_h = space.domain.min_pt[2] max_h = space.domain.max_pt[2] det_min_pt = [-rho, min_h] det_max_pt = [rho, max_h] if det_shape is None: det_shape = [num_px_horiz, num_px_vert] if num_angles is None: num_angles = int(np.ceil(omega * rho)) angle_partition = uniform_partition(0, np.pi, num_angles) det_partition = uniform_partition(det_min_pt, det_max_pt, det_shape) if space.ndim == 2: return Parallel2dGeometry(angle_partition, det_partition) elif space.ndim == 3: return Parallel3dAxisGeometry(angle_partition, det_partition) else: raise ValueError('``space.ndim`` must be 2 or 3.')
python
def parallel_beam_geometry(space, num_angles=None, det_shape=None): r"""Create default parallel beam geometry from ``space``. This is intended for simple test cases where users do not need the full flexibility of the geometries, but simply want a geometry that works. This default geometry gives a fully sampled sinogram according to the Nyquist criterion, which in general results in a very large number of samples. In particular, a ``space`` that is not centered at the origin can result in very large detectors. Parameters ---------- space : `DiscreteLp` Reconstruction space, the space of the volumetric data to be projected. Needs to be 2d or 3d. num_angles : int, optional Number of angles. Default: Enough to fully sample the data, see Notes. det_shape : int or sequence of int, optional Number of detector pixels. Default: Enough to fully sample the data, see Notes. Returns ------- geometry : `ParallelBeamGeometry` If ``space`` is 2d, return a `Parallel2dGeometry`. If ``space`` is 3d, return a `Parallel3dAxisGeometry`. Examples -------- Create a parallel beam geometry from a 2d space: >>> space = odl.uniform_discr([-1, -1], [1, 1], (20, 20)) >>> geometry = parallel_beam_geometry(space) >>> geometry.angles.size 45 >>> geometry.detector.size 31 Notes ----- According to [NW2001]_, pages 72--74, a function :math:`f : \mathbb{R}^2 \to \mathbb{R}` that has compact support .. math:: \| x \| > \rho \implies f(x) = 0, and is essentially bandlimited .. math:: \| \xi \| > \Omega \implies \hat{f}(\xi) \approx 0, can be fully reconstructed from a parallel beam ray transform if (1) the projection angles are sampled with a spacing of :math:`\Delta \psi` such that .. math:: \Delta \psi \leq \frac{\pi}{\rho \Omega}, and (2) the detector is sampled with an interval :math:`\Delta s` that satisfies .. math:: \Delta s \leq \frac{\pi}{\Omega}. The geometry returned by this function satisfies these conditions exactly. If the domain is 3-dimensional, the geometry is "separable", in that each slice along the z-dimension of the data is treated as independed 2d data. References ---------- .. [NW2001] Natterer, F and Wuebbeling, F. *Mathematical Methods in Image Reconstruction*. SIAM, 2001. https://dx.doi.org/10.1137/1.9780898718324 """ # Find maximum distance from rotation axis corners = space.domain.corners()[:, :2] rho = np.max(np.linalg.norm(corners, axis=1)) # Find default values according to Nyquist criterion. # We assume that the function is bandlimited by a wave along the x or y # axis. The highest frequency we can measure is then a standing wave with # period of twice the inter-node distance. min_side = min(space.partition.cell_sides[:2]) omega = np.pi / min_side num_px_horiz = 2 * int(np.ceil(rho * omega / np.pi)) + 1 if space.ndim == 2: det_min_pt = -rho det_max_pt = rho if det_shape is None: det_shape = num_px_horiz elif space.ndim == 3: num_px_vert = space.shape[2] min_h = space.domain.min_pt[2] max_h = space.domain.max_pt[2] det_min_pt = [-rho, min_h] det_max_pt = [rho, max_h] if det_shape is None: det_shape = [num_px_horiz, num_px_vert] if num_angles is None: num_angles = int(np.ceil(omega * rho)) angle_partition = uniform_partition(0, np.pi, num_angles) det_partition = uniform_partition(det_min_pt, det_max_pt, det_shape) if space.ndim == 2: return Parallel2dGeometry(angle_partition, det_partition) elif space.ndim == 3: return Parallel3dAxisGeometry(angle_partition, det_partition) else: raise ValueError('``space.ndim`` must be 2 or 3.')
[ "def", "parallel_beam_geometry", "(", "space", ",", "num_angles", "=", "None", ",", "det_shape", "=", "None", ")", ":", "# Find maximum distance from rotation axis", "corners", "=", "space", ".", "domain", ".", "corners", "(", ")", "[", ":", ",", ":", "2", "...
r"""Create default parallel beam geometry from ``space``. This is intended for simple test cases where users do not need the full flexibility of the geometries, but simply want a geometry that works. This default geometry gives a fully sampled sinogram according to the Nyquist criterion, which in general results in a very large number of samples. In particular, a ``space`` that is not centered at the origin can result in very large detectors. Parameters ---------- space : `DiscreteLp` Reconstruction space, the space of the volumetric data to be projected. Needs to be 2d or 3d. num_angles : int, optional Number of angles. Default: Enough to fully sample the data, see Notes. det_shape : int or sequence of int, optional Number of detector pixels. Default: Enough to fully sample the data, see Notes. Returns ------- geometry : `ParallelBeamGeometry` If ``space`` is 2d, return a `Parallel2dGeometry`. If ``space`` is 3d, return a `Parallel3dAxisGeometry`. Examples -------- Create a parallel beam geometry from a 2d space: >>> space = odl.uniform_discr([-1, -1], [1, 1], (20, 20)) >>> geometry = parallel_beam_geometry(space) >>> geometry.angles.size 45 >>> geometry.detector.size 31 Notes ----- According to [NW2001]_, pages 72--74, a function :math:`f : \mathbb{R}^2 \to \mathbb{R}` that has compact support .. math:: \| x \| > \rho \implies f(x) = 0, and is essentially bandlimited .. math:: \| \xi \| > \Omega \implies \hat{f}(\xi) \approx 0, can be fully reconstructed from a parallel beam ray transform if (1) the projection angles are sampled with a spacing of :math:`\Delta \psi` such that .. math:: \Delta \psi \leq \frac{\pi}{\rho \Omega}, and (2) the detector is sampled with an interval :math:`\Delta s` that satisfies .. math:: \Delta s \leq \frac{\pi}{\Omega}. The geometry returned by this function satisfies these conditions exactly. If the domain is 3-dimensional, the geometry is "separable", in that each slice along the z-dimension of the data is treated as independed 2d data. References ---------- .. [NW2001] Natterer, F and Wuebbeling, F. *Mathematical Methods in Image Reconstruction*. SIAM, 2001. https://dx.doi.org/10.1137/1.9780898718324
[ "r", "Create", "default", "parallel", "beam", "geometry", "from", "space", "." ]
b8443f6aca90e191ba36c91d32253c5a36249a6c
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/tomo/geometry/parallel.py#L1471-L1587