after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def array2string(
a,
max_line_width=None,
precision=None,
suppress_small=None,
separator=" ",
prefix="",
style=np._NoValue,
formatter=None,
threshold=None,
edgeitems=None,
sign=None,
floatmode=None,
suffix="",
**kwarg,
):
"""
Return a string representation of an array.
Parameters
----------
a : array_like
Input array.
max_line_width : int, optional
The maximum number of columns the string should span. Newline
characters splits the string appropriately after array elements.
precision : int or None, optional
Floating point precision. Default is the current printing
precision (usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent very small numbers as zero. A number is "very small" if it
is smaller than the current printing precision.
separator : str, optional
Inserted between elements.
prefix : str, optional
suffix: str, optional
The length of the prefix and suffix strings are used to respectively
align and wrap the output. An array is typically printed as::
prefix + array2string(a) + suffix
The output is left-padded by the length of the prefix string, and
wrapping is forced at the column ``max_line_width - len(suffix)``.
style : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.14.0
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
can be set are::
- 'bool'
- 'int'
- 'timedelta' : a `numpy.timedelta64`
- 'datetime' : a `numpy.datetime64`
- 'float'
- 'longfloat' : 128-bit floats
- 'complexfloat'
- 'longcomplexfloat' : composed of two 128-bit floats
- 'void' : type `numpy.void`
- 'numpystr' : types `numpy.string_` and `numpy.unicode_`
- 'str' : all other strings
Other keys that can be used to set a group of types at once are::
- 'all' : sets all types
- 'int_kind' : sets 'int'
- 'float_kind' : sets 'float' and 'longfloat'
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- 'str_kind' : sets 'str' and 'numpystr'
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr.
edgeitems : int, optional
Number of array items in summary at beginning and end of
each dimension.
sign : string, either '-', '+', or ' ', optional
Controls printing of the sign of floating-point types. If '+', always
print the sign of positive values. If ' ', always prints a space
(whitespace character) in the sign position of positive values. If
'-', omit the sign character of positive values.
floatmode : str, optional
Controls the interpretation of the `precision` option for
floating-point types. Can take the following values:
- 'fixed' : Always print exactly `precision` fractional digits,
even if this would print more or fewer digits than
necessary to specify the value uniquely.
- 'unique : Print the minimum number of fractional digits necessary
to represent each value uniquely. Different elements may
have a different number of digits. The value of the
`precision` option is ignored.
- 'maxprec' : Print at most `precision` fractional digits, but if
an element can be uniquely represented with fewer digits
only print it with that many.
- 'maxprec_equal' : Print at most `precision` fractional digits,
but if every element in the array can be uniquely
represented with an equal number of fewer digits, use that
many digits for all elements.
legacy : string or `False`, optional
If set to the string `'1.13'` enables 1.13 legacy printing mode. This
approximates numpy 1.13 print output by including a space in the sign
position of floats and different behavior for 0d arrays. If set to
`False`, disables legacy mode. Unrecognized strings will be ignored
with a warning for forward compatibility.
.. versionadded:: 1.14.0
Returns
-------
array_str : str
String representation of the array.
Raises
------
TypeError
if a callable in `formatter` does not return a string.
See Also
--------
array_str, array_repr, set_printoptions, get_printoptions
Notes
-----
If a formatter is specified for a certain type, the `precision` keyword is
ignored for that type.
This is a very flexible function; `array_repr` and `array_str` are using
`array2string` internally so keywords with the same name should work
identically in all three functions.
Examples
--------
>>> x = np.array([1e-16,1,2,3])
>>> print(np.array2string(x, precision=2, separator=',',
... suppress_small=True))
[ 0., 1., 2., 3.]
>>> x = np.arange(3.)
>>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x})
'[0.00 1.00 2.00]'
>>> x = np.arange(3)
>>> np.array2string(x, formatter={'int':lambda x: hex(x)})
'[0x0L 0x1L 0x2L]'
"""
legacy = kwarg.pop("legacy", None)
if kwarg:
msg = "array2string() got unexpected keyword argument '{}'"
raise TypeError(msg.format(kwarg.popitem()[0]))
overrides = _make_options_dict(
precision,
threshold,
edgeitems,
max_line_width,
suppress_small,
None,
None,
sign,
formatter,
floatmode,
legacy,
)
options = _format_options.copy()
options.update(overrides)
if options["legacy"] == "1.13":
if style is np._NoValue:
style = repr
if a.shape == () and not a.dtype.names:
return style(a.item())
elif style is not np._NoValue:
# Deprecation 11-9-2017 v1.14
warnings.warn(
"'style' argument is deprecated and no longer functional"
" except in 1.13 'legacy' mode",
DeprecationWarning,
stacklevel=3,
)
if options["legacy"] != "1.13":
options["linewidth"] -= len(suffix)
# treat as a null array if any of shape elements == 0
if a.size == 0:
return "[]"
return _array2string(a, options, separator, prefix)
|
def array2string(
a,
max_line_width=None,
precision=None,
suppress_small=None,
separator=" ",
prefix="",
style=np._NoValue,
formatter=None,
threshold=None,
edgeitems=None,
sign=None,
floatmode=None,
suffix="",
**kwarg,
):
"""
Return a string representation of an array.
Parameters
----------
a : array_like
Input array.
max_line_width : int, optional
The maximum number of columns the string should span. Newline
characters splits the string appropriately after array elements.
precision : int or None, optional
Floating point precision. Default is the current printing
precision (usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent very small numbers as zero. A number is "very small" if it
is smaller than the current printing precision.
separator : str, optional
Inserted between elements.
prefix : str, optional
suffix: str, optional
The length of the prefix and suffix strings are used to respectively
align and wrap the output. An array is typically printed as::
prefix + array2string(a) + suffix
The output is left-padded by the length of the prefix string, and
wrapping is forced at the column ``max_line_width - len(suffix)``.
style : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.14.0
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
can be set are::
- 'bool'
- 'int'
- 'timedelta' : a `numpy.timedelta64`
- 'datetime' : a `numpy.datetime64`
- 'float'
- 'longfloat' : 128-bit floats
- 'complexfloat'
- 'longcomplexfloat' : composed of two 128-bit floats
- 'void' : type `numpy.void`
- 'numpystr' : types `numpy.string_` and `numpy.unicode_`
- 'str' : all other strings
Other keys that can be used to set a group of types at once are::
- 'all' : sets all types
- 'int_kind' : sets 'int'
- 'float_kind' : sets 'float' and 'longfloat'
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- 'str_kind' : sets 'str' and 'numpystr'
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr.
edgeitems : int, optional
Number of array items in summary at beginning and end of
each dimension.
sign : string, either '-', '+', or ' ', optional
Controls printing of the sign of floating-point types. If '+', always
print the sign of positive values. If ' ', always prints a space
(whitespace character) in the sign position of positive values. If
'-', omit the sign character of positive values.
floatmode : str, optional
Controls the interpretation of the `precision` option for
floating-point types. Can take the following values:
- 'fixed' : Always print exactly `precision` fractional digits,
even if this would print more or fewer digits than
necessary to specify the value uniquely.
- 'unique : Print the minimum number of fractional digits necessary
to represent each value uniquely. Different elements may
have a different number of digits. The value of the
`precision` option is ignored.
- 'maxprec' : Print at most `precision` fractional digits, but if
an element can be uniquely represented with fewer digits
only print it with that many.
- 'maxprec_equal' : Print at most `precision` fractional digits,
but if every element in the array can be uniquely
represented with an equal number of fewer digits, use that
many digits for all elements.
legacy : string or `False`, optional
If set to the string `'1.13'` enables 1.13 legacy printing mode. This
approximates numpy 1.13 print output by including a space in the sign
position of floats and different behavior for 0d arrays. If set to
`False`, disables legacy mode. Unrecognized strings will be ignored
with a warning for forward compatibility.
.. versionadded:: 1.14.0
Returns
-------
array_str : str
String representation of the array.
Raises
------
TypeError
if a callable in `formatter` does not return a string.
See Also
--------
array_str, array_repr, set_printoptions, get_printoptions
Notes
-----
If a formatter is specified for a certain type, the `precision` keyword is
ignored for that type.
This is a very flexible function; `array_repr` and `array_str` are using
`array2string` internally so keywords with the same name should work
identically in all three functions.
Examples
--------
>>> x = np.array([1e-16,1,2,3])
>>> print(np.array2string(x, precision=2, separator=',',
... suppress_small=True))
[ 0., 1., 2., 3.]
>>> x = np.arange(3.)
>>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x})
'[0.00 1.00 2.00]'
>>> x = np.arange(3)
>>> np.array2string(x, formatter={'int':lambda x: hex(x)})
'[0x0L 0x1L 0x2L]'
"""
legacy = kwarg.pop("legacy", None)
if kwarg:
msg = "array2string() got unexpected keyword argument '{}'"
raise TypeError(msg.format(kwarg.popitem()[0]))
overrides = _make_options_dict(
precision,
threshold,
edgeitems,
max_line_width,
suppress_small,
None,
None,
sign,
formatter,
floatmode,
legacy,
)
options = _format_options.copy()
options.update(overrides)
if options["legacy"] == "1.13":
if a.shape == () and not a.dtype.names:
return style(a.item())
elif style is not np._NoValue:
# Deprecation 11-9-2017 v1.14
warnings.warn(
"'style' argument is deprecated and no longer functional"
" except in 1.13 'legacy' mode",
DeprecationWarning,
stacklevel=3,
)
if options["legacy"] != "1.13":
options["linewidth"] -= len(suffix)
# treat as a null array if any of shape elements == 0
if a.size == 0:
return "[]"
return _array2string(a, options, separator, prefix)
|
https://github.com/numpy/numpy/issues/10934
|
import numpy as np
np.__version__
'1.14.2'
np.array2string(np.array(1.0))
'1.'
np.set_printoptions(legacy='1.13')
np.array2string(np.array(1.0))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "[...]/numpy/core/arrayprint.py", line 616, in array2string
return style(a.item())
TypeError: object() takes no parameters
|
TypeError
|
def array2string(
a,
max_line_width=None,
precision=None,
suppress_small=None,
separator=" ",
prefix="",
style=np._NoValue,
formatter=None,
threshold=None,
edgeitems=None,
sign=None,
floatmode=None,
suffix="",
**kwarg,
):
"""
Return a string representation of an array.
Parameters
----------
a : array_like
Input array.
max_line_width : int, optional
The maximum number of columns the string should span. Newline
characters splits the string appropriately after array elements.
precision : int or None, optional
Floating point precision. Default is the current printing
precision (usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent very small numbers as zero. A number is "very small" if it
is smaller than the current printing precision.
separator : str, optional
Inserted between elements.
prefix : str, optional
suffix: str, optional
The length of the prefix and suffix strings are used to respectively
align and wrap the output. An array is typically printed as::
prefix + array2string(a) + suffix
The output is left-padded by the length of the prefix string, and
wrapping is forced at the column ``max_line_width - len(suffix)``.
style : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.14.0
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
can be set are:
- 'bool'
- 'int'
- 'timedelta' : a `numpy.timedelta64`
- 'datetime' : a `numpy.datetime64`
- 'float'
- 'longfloat' : 128-bit floats
- 'complexfloat'
- 'longcomplexfloat' : composed of two 128-bit floats
- 'void' : type `numpy.void`
- 'numpystr' : types `numpy.string_` and `numpy.unicode_`
- 'str' : all other strings
Other keys that can be used to set a group of types at once are:
- 'all' : sets all types
- 'int_kind' : sets 'int'
- 'float_kind' : sets 'float' and 'longfloat'
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- 'str_kind' : sets 'str' and 'numpystr'
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr.
edgeitems : int, optional
Number of array items in summary at beginning and end of
each dimension.
sign : string, either '-', '+', or ' ', optional
Controls printing of the sign of floating-point types. If '+', always
print the sign of positive values. If ' ', always prints a space
(whitespace character) in the sign position of positive values. If
'-', omit the sign character of positive values.
floatmode : str, optional
Controls the interpretation of the `precision` option for
floating-point types. Can take the following values:
- 'fixed': Always print exactly `precision` fractional digits,
even if this would print more or fewer digits than
necessary to specify the value uniquely.
- 'unique': Print the minimum number of fractional digits necessary
to represent each value uniquely. Different elements may
have a different number of digits. The value of the
`precision` option is ignored.
- 'maxprec': Print at most `precision` fractional digits, but if
an element can be uniquely represented with fewer digits
only print it with that many.
- 'maxprec_equal': Print at most `precision` fractional digits,
but if every element in the array can be uniquely
represented with an equal number of fewer digits, use that
many digits for all elements.
legacy : string or `False`, optional
If set to the string `'1.13'` enables 1.13 legacy printing mode. This
approximates numpy 1.13 print output by including a space in the sign
position of floats and different behavior for 0d arrays. If set to
`False`, disables legacy mode. Unrecognized strings will be ignored
with a warning for forward compatibility.
.. versionadded:: 1.14.0
Returns
-------
array_str : str
String representation of the array.
Raises
------
TypeError
if a callable in `formatter` does not return a string.
See Also
--------
array_str, array_repr, set_printoptions, get_printoptions
Notes
-----
If a formatter is specified for a certain type, the `precision` keyword is
ignored for that type.
This is a very flexible function; `array_repr` and `array_str` are using
`array2string` internally so keywords with the same name should work
identically in all three functions.
Examples
--------
>>> x = np.array([1e-16,1,2,3])
>>> print(np.array2string(x, precision=2, separator=',',
... suppress_small=True))
[ 0., 1., 2., 3.]
>>> x = np.arange(3.)
>>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x})
'[0.00 1.00 2.00]'
>>> x = np.arange(3)
>>> np.array2string(x, formatter={'int':lambda x: hex(x)})
'[0x0L 0x1L 0x2L]'
"""
legacy = kwarg.pop("legacy", None)
if kwarg:
msg = "array2string() got unexpected keyword argument '{}'"
raise TypeError(msg.format(kwarg.popitem()[0]))
overrides = _make_options_dict(
precision,
threshold,
edgeitems,
max_line_width,
suppress_small,
None,
None,
sign,
formatter,
floatmode,
legacy,
)
options = _format_options.copy()
options.update(overrides)
if options["legacy"] == "1.13":
if style is np._NoValue:
style = repr
if a.shape == () and not a.dtype.names:
return style(a.item())
elif style is not np._NoValue:
# Deprecation 11-9-2017 v1.14
warnings.warn(
"'style' argument is deprecated and no longer functional"
" except in 1.13 'legacy' mode",
DeprecationWarning,
stacklevel=3,
)
if options["legacy"] != "1.13":
options["linewidth"] -= len(suffix)
# treat as a null array if any of shape elements == 0
if a.size == 0:
return "[]"
return _array2string(a, options, separator, prefix)
|
def array2string(
a,
max_line_width=None,
precision=None,
suppress_small=None,
separator=" ",
prefix="",
style=np._NoValue,
formatter=None,
threshold=None,
edgeitems=None,
sign=None,
floatmode=None,
suffix="",
**kwarg,
):
"""
Return a string representation of an array.
Parameters
----------
a : array_like
Input array.
max_line_width : int, optional
The maximum number of columns the string should span. Newline
characters splits the string appropriately after array elements.
precision : int or None, optional
Floating point precision. Default is the current printing
precision (usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent very small numbers as zero. A number is "very small" if it
is smaller than the current printing precision.
separator : str, optional
Inserted between elements.
prefix : str, optional
suffix: str, optional
The length of the prefix and suffix strings are used to respectively
align and wrap the output. An array is typically printed as::
prefix + array2string(a) + suffix
The output is left-padded by the length of the prefix string, and
wrapping is forced at the column ``max_line_width - len(suffix)``.
style : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.14.0
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
can be set are:
- 'bool'
- 'int'
- 'timedelta' : a `numpy.timedelta64`
- 'datetime' : a `numpy.datetime64`
- 'float'
- 'longfloat' : 128-bit floats
- 'complexfloat'
- 'longcomplexfloat' : composed of two 128-bit floats
- 'void' : type `numpy.void`
- 'numpystr' : types `numpy.string_` and `numpy.unicode_`
- 'str' : all other strings
Other keys that can be used to set a group of types at once are:
- 'all' : sets all types
- 'int_kind' : sets 'int'
- 'float_kind' : sets 'float' and 'longfloat'
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- 'str_kind' : sets 'str' and 'numpystr'
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr.
edgeitems : int, optional
Number of array items in summary at beginning and end of
each dimension.
sign : string, either '-', '+', or ' ', optional
Controls printing of the sign of floating-point types. If '+', always
print the sign of positive values. If ' ', always prints a space
(whitespace character) in the sign position of positive values. If
'-', omit the sign character of positive values.
floatmode : str, optional
Controls the interpretation of the `precision` option for
floating-point types. Can take the following values:
- 'fixed': Always print exactly `precision` fractional digits,
even if this would print more or fewer digits than
necessary to specify the value uniquely.
- 'unique': Print the minimum number of fractional digits necessary
to represent each value uniquely. Different elements may
have a different number of digits. The value of the
`precision` option is ignored.
- 'maxprec': Print at most `precision` fractional digits, but if
an element can be uniquely represented with fewer digits
only print it with that many.
- 'maxprec_equal': Print at most `precision` fractional digits,
but if every element in the array can be uniquely
represented with an equal number of fewer digits, use that
many digits for all elements.
legacy : string or `False`, optional
If set to the string `'1.13'` enables 1.13 legacy printing mode. This
approximates numpy 1.13 print output by including a space in the sign
position of floats and different behavior for 0d arrays. If set to
`False`, disables legacy mode. Unrecognized strings will be ignored
with a warning for forward compatibility.
.. versionadded:: 1.14.0
Returns
-------
array_str : str
String representation of the array.
Raises
------
TypeError
if a callable in `formatter` does not return a string.
See Also
--------
array_str, array_repr, set_printoptions, get_printoptions
Notes
-----
If a formatter is specified for a certain type, the `precision` keyword is
ignored for that type.
This is a very flexible function; `array_repr` and `array_str` are using
`array2string` internally so keywords with the same name should work
identically in all three functions.
Examples
--------
>>> x = np.array([1e-16,1,2,3])
>>> print(np.array2string(x, precision=2, separator=',',
... suppress_small=True))
[ 0., 1., 2., 3.]
>>> x = np.arange(3.)
>>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x})
'[0.00 1.00 2.00]'
>>> x = np.arange(3)
>>> np.array2string(x, formatter={'int':lambda x: hex(x)})
'[0x0L 0x1L 0x2L]'
"""
legacy = kwarg.pop("legacy", None)
if kwarg:
msg = "array2string() got unexpected keyword argument '{}'"
raise TypeError(msg.format(kwarg.popitem()[0]))
overrides = _make_options_dict(
precision,
threshold,
edgeitems,
max_line_width,
suppress_small,
None,
None,
sign,
formatter,
floatmode,
legacy,
)
options = _format_options.copy()
options.update(overrides)
if options["legacy"] == "1.13":
if a.shape == () and not a.dtype.names:
return style(a.item())
elif style is not np._NoValue:
# Deprecation 11-9-2017 v1.14
warnings.warn(
"'style' argument is deprecated and no longer functional"
" except in 1.13 'legacy' mode",
DeprecationWarning,
stacklevel=3,
)
if options["legacy"] != "1.13":
options["linewidth"] -= len(suffix)
# treat as a null array if any of shape elements == 0
if a.size == 0:
return "[]"
return _array2string(a, options, separator, prefix)
|
https://github.com/numpy/numpy/issues/10934
|
import numpy as np
np.__version__
'1.14.2'
np.array2string(np.array(1.0))
'1.'
np.set_printoptions(legacy='1.13')
np.array2string(np.array(1.0))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "[...]/numpy/core/arrayprint.py", line 616, in array2string
return style(a.item())
TypeError: object() takes no parameters
|
TypeError
|
def _get_format_function(data, **options):
"""
find the right formatting function for the dtype_
"""
dtype_ = data.dtype
dtypeobj = dtype_.type
formatdict = _get_formatdict(data, **options)
if issubclass(dtypeobj, _nt.bool_):
return formatdict["bool"]()
elif issubclass(dtypeobj, _nt.integer):
if issubclass(dtypeobj, _nt.timedelta64):
return formatdict["timedelta"]()
else:
return formatdict["int"]()
elif issubclass(dtypeobj, _nt.floating):
if issubclass(dtypeobj, _nt.longfloat):
return formatdict["longfloat"]()
else:
return formatdict["float"]()
elif issubclass(dtypeobj, _nt.complexfloating):
if issubclass(dtypeobj, _nt.clongfloat):
return formatdict["longcomplexfloat"]()
else:
return formatdict["complexfloat"]()
elif issubclass(dtypeobj, (_nt.unicode_, _nt.string_)):
return formatdict["numpystr"]()
elif issubclass(dtypeobj, _nt.datetime64):
return formatdict["datetime"]()
elif issubclass(dtypeobj, _nt.object_):
return formatdict["object"]()
elif issubclass(dtypeobj, _nt.void):
if dtype_.names is not None:
return StructuredVoidFormat.from_data(data, **options)
else:
return formatdict["void"]()
else:
return formatdict["numpystr"]()
|
def _get_format_function(data, **options):
"""
find the right formatting function for the dtype_
"""
dtype_ = data.dtype
if dtype_.fields is not None:
return StructureFormat.from_data(data, **options)
dtypeobj = dtype_.type
formatdict = _get_formatdict(data, **options)
if issubclass(dtypeobj, _nt.bool_):
return formatdict["bool"]()
elif issubclass(dtypeobj, _nt.integer):
if issubclass(dtypeobj, _nt.timedelta64):
return formatdict["timedelta"]()
else:
return formatdict["int"]()
elif issubclass(dtypeobj, _nt.floating):
if issubclass(dtypeobj, _nt.longfloat):
return formatdict["longfloat"]()
else:
return formatdict["float"]()
elif issubclass(dtypeobj, _nt.complexfloating):
if issubclass(dtypeobj, _nt.clongfloat):
return formatdict["longcomplexfloat"]()
else:
return formatdict["complexfloat"]()
elif issubclass(dtypeobj, (_nt.unicode_, _nt.string_)):
return formatdict["numpystr"]()
elif issubclass(dtypeobj, _nt.datetime64):
return formatdict["datetime"]()
elif issubclass(dtypeobj, _nt.object_):
return formatdict["object"]()
elif issubclass(dtypeobj, _nt.void):
return formatdict["void"]()
else:
return formatdict["numpystr"]()
|
https://github.com/numpy/numpy/issues/9821
|
import numpy as np
x = np.zeros(3, dtype=('i4',[('r','u1'), ('g','u1'), ('b','u1'), ('a','u1')]))
x
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/tos/py3npy/lib/python3.6/site-packages/numpy-1.14.0.dev0+1f4ed32-py3.6-macosx-10.12-x86_64.egg/numpy/core/arrayprint.py", line 955, in array_repr
', ', class_name + "(")
File "/Users/tos/py3npy/lib/python3.6/site-packages/numpy-1.14.0.dev0+1f4ed32-py3.6-macosx-10.12-x86_64.egg/numpy/core/arrayprint.py", line 521, in array2string
lst = _array2string(a, options, separator, prefix)
File "/Users/tos/py3npy/lib/python3.6/site-packages/numpy-1.14.0.dev0+1f4ed32-py3.6-macosx-10.12-x86_64.egg/numpy/core/arrayprint.py", line 363, in wrapper
return f(self, *args, **kwargs)
File "/Users/tos/py3npy/lib/python3.6/site-packages/numpy-1.14.0.dev0+1f4ed32-py3.6-macosx-10.12-x86_64.egg/numpy/core/arrayprint.py", line 392, in _array2string
options['edgeitems'], summary_insert)[:-1]
File "/Users/tos/py3npy/lib/python3.6/site-packages/numpy-1.14.0.dev0+1f4ed32-py3.6-macosx-10.12-x86_64.egg/numpy/core/arrayprint.py", line 565, in _formatArray
word = format_function(a[-i]) + separator
File "/Users/tos/py3npy/lib/python3.6/site-packages/numpy-1.14.0.dev0+1f4ed32-py3.6-macosx-10.12-x86_64.egg/numpy/core/arrayprint.py", line 886, in __call__
for field, format_function in zip(x, self.format_functions):
TypeError: zip argument #1 must support iteration
|
TypeError
|
def __init__(self, *args, **kwargs):
# NumPy 1.14, 2018-02-14
warnings.warn(
"StructureFormat has been replaced by StructuredVoidFormat",
DeprecationWarning,
stacklevel=2,
)
super(StructureFormat, self).__init__(*args, **kwargs)
|
def __init__(self, format_functions):
self.format_functions = format_functions
|
https://github.com/numpy/numpy/issues/9821
|
import numpy as np
x = np.zeros(3, dtype=('i4',[('r','u1'), ('g','u1'), ('b','u1'), ('a','u1')]))
x
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/tos/py3npy/lib/python3.6/site-packages/numpy-1.14.0.dev0+1f4ed32-py3.6-macosx-10.12-x86_64.egg/numpy/core/arrayprint.py", line 955, in array_repr
', ', class_name + "(")
File "/Users/tos/py3npy/lib/python3.6/site-packages/numpy-1.14.0.dev0+1f4ed32-py3.6-macosx-10.12-x86_64.egg/numpy/core/arrayprint.py", line 521, in array2string
lst = _array2string(a, options, separator, prefix)
File "/Users/tos/py3npy/lib/python3.6/site-packages/numpy-1.14.0.dev0+1f4ed32-py3.6-macosx-10.12-x86_64.egg/numpy/core/arrayprint.py", line 363, in wrapper
return f(self, *args, **kwargs)
File "/Users/tos/py3npy/lib/python3.6/site-packages/numpy-1.14.0.dev0+1f4ed32-py3.6-macosx-10.12-x86_64.egg/numpy/core/arrayprint.py", line 392, in _array2string
options['edgeitems'], summary_insert)[:-1]
File "/Users/tos/py3npy/lib/python3.6/site-packages/numpy-1.14.0.dev0+1f4ed32-py3.6-macosx-10.12-x86_64.egg/numpy/core/arrayprint.py", line 565, in _formatArray
word = format_function(a[-i]) + separator
File "/Users/tos/py3npy/lib/python3.6/site-packages/numpy-1.14.0.dev0+1f4ed32-py3.6-macosx-10.12-x86_64.egg/numpy/core/arrayprint.py", line 886, in __call__
for field, format_function in zip(x, self.format_functions):
TypeError: zip argument #1 must support iteration
|
TypeError
|
def _void_scalar_repr(x):
"""
Implements the repr for structured-void scalars. It is called from the
scalartypes.c.src code, and is placed here because it uses the elementwise
formatters defined above.
"""
return StructuredVoidFormat.from_data(array(x), **_format_options)(x)
|
def _void_scalar_repr(x):
"""
Implements the repr for structured-void scalars. It is called from the
scalartypes.c.src code, and is placed here because it uses the elementwise
formatters defined above.
"""
return StructureFormat.from_data(array(x), **_format_options)(x)
|
https://github.com/numpy/numpy/issues/9821
|
import numpy as np
x = np.zeros(3, dtype=('i4',[('r','u1'), ('g','u1'), ('b','u1'), ('a','u1')]))
x
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/tos/py3npy/lib/python3.6/site-packages/numpy-1.14.0.dev0+1f4ed32-py3.6-macosx-10.12-x86_64.egg/numpy/core/arrayprint.py", line 955, in array_repr
', ', class_name + "(")
File "/Users/tos/py3npy/lib/python3.6/site-packages/numpy-1.14.0.dev0+1f4ed32-py3.6-macosx-10.12-x86_64.egg/numpy/core/arrayprint.py", line 521, in array2string
lst = _array2string(a, options, separator, prefix)
File "/Users/tos/py3npy/lib/python3.6/site-packages/numpy-1.14.0.dev0+1f4ed32-py3.6-macosx-10.12-x86_64.egg/numpy/core/arrayprint.py", line 363, in wrapper
return f(self, *args, **kwargs)
File "/Users/tos/py3npy/lib/python3.6/site-packages/numpy-1.14.0.dev0+1f4ed32-py3.6-macosx-10.12-x86_64.egg/numpy/core/arrayprint.py", line 392, in _array2string
options['edgeitems'], summary_insert)[:-1]
File "/Users/tos/py3npy/lib/python3.6/site-packages/numpy-1.14.0.dev0+1f4ed32-py3.6-macosx-10.12-x86_64.egg/numpy/core/arrayprint.py", line 565, in _formatArray
word = format_function(a[-i]) + separator
File "/Users/tos/py3npy/lib/python3.6/site-packages/numpy-1.14.0.dev0+1f4ed32-py3.6-macosx-10.12-x86_64.egg/numpy/core/arrayprint.py", line 886, in __call__
for field, format_function in zip(x, self.format_functions):
TypeError: zip argument #1 must support iteration
|
TypeError
|
def from_data(cls, data, **options):
"""
This is a second way to initialize StructuredVoidFormat, using the raw data
as input. Added to avoid changing the signature of __init__.
"""
format_functions = []
for field_name in data.dtype.names:
format_function = _get_format_function(data[field_name], **options)
if data.dtype[field_name].shape != ():
format_function = SubArrayFormat(format_function)
format_functions.append(format_function)
return cls(format_functions)
|
def from_data(cls, data, **options):
"""
This is a second way to initialize StructureFormat, using the raw data
as input. Added to avoid changing the signature of __init__.
"""
format_functions = []
for field_name in data.dtype.names:
format_function = _get_format_function(data[field_name], **options)
if data.dtype[field_name].shape != ():
format_function = SubArrayFormat(format_function)
format_functions.append(format_function)
return cls(format_functions)
|
https://github.com/numpy/numpy/issues/9821
|
import numpy as np
x = np.zeros(3, dtype=('i4',[('r','u1'), ('g','u1'), ('b','u1'), ('a','u1')]))
x
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/tos/py3npy/lib/python3.6/site-packages/numpy-1.14.0.dev0+1f4ed32-py3.6-macosx-10.12-x86_64.egg/numpy/core/arrayprint.py", line 955, in array_repr
', ', class_name + "(")
File "/Users/tos/py3npy/lib/python3.6/site-packages/numpy-1.14.0.dev0+1f4ed32-py3.6-macosx-10.12-x86_64.egg/numpy/core/arrayprint.py", line 521, in array2string
lst = _array2string(a, options, separator, prefix)
File "/Users/tos/py3npy/lib/python3.6/site-packages/numpy-1.14.0.dev0+1f4ed32-py3.6-macosx-10.12-x86_64.egg/numpy/core/arrayprint.py", line 363, in wrapper
return f(self, *args, **kwargs)
File "/Users/tos/py3npy/lib/python3.6/site-packages/numpy-1.14.0.dev0+1f4ed32-py3.6-macosx-10.12-x86_64.egg/numpy/core/arrayprint.py", line 392, in _array2string
options['edgeitems'], summary_insert)[:-1]
File "/Users/tos/py3npy/lib/python3.6/site-packages/numpy-1.14.0.dev0+1f4ed32-py3.6-macosx-10.12-x86_64.egg/numpy/core/arrayprint.py", line 565, in _formatArray
word = format_function(a[-i]) + separator
File "/Users/tos/py3npy/lib/python3.6/site-packages/numpy-1.14.0.dev0+1f4ed32-py3.6-macosx-10.12-x86_64.egg/numpy/core/arrayprint.py", line 886, in __call__
for field, format_function in zip(x, self.format_functions):
TypeError: zip argument #1 must support iteration
|
TypeError
|
def _get_bin_edges(a, bins, range, weights):
"""
Computes the bins used internally by `histogram`.
Parameters
==========
a : ndarray
Ravelled data array
bins, range
Forwarded arguments from `histogram`.
weights : ndarray, optional
Ravelled weights array, or None
Returns
=======
bin_edges : ndarray
Array of bin edges
uniform_bins : (Number, Number, int):
The upper bound, lowerbound, and number of bins, used in the optimized
implementation of `histogram` that works on uniform bins.
"""
# parse the overloaded bins argument
n_equal_bins = None
bin_edges = None
if isinstance(bins, basestring):
bin_name = bins
# if `bins` is a string for an automatic method,
# this will replace it with the number of bins calculated
if bin_name not in _hist_bin_selectors:
raise ValueError(
"{!r} is not a valid estimator for `bins`".format(bin_name)
)
if weights is not None:
raise TypeError(
"Automated estimation of the number of "
"bins is not supported for weighted data"
)
first_edge, last_edge = _get_outer_edges(a, range)
# truncate the range if needed
if range is not None:
keep = a >= first_edge
keep &= a <= last_edge
if not np.logical_and.reduce(keep):
a = a[keep]
if a.size == 0:
n_equal_bins = 1
else:
# Do not call selectors on empty arrays
width = _hist_bin_selectors[bin_name](a)
if width:
n_equal_bins = int(np.ceil((last_edge - first_edge) / width))
else:
# Width can be zero for some estimators, e.g. FD when
# the IQR of the data is zero.
n_equal_bins = 1
elif np.ndim(bins) == 0:
try:
n_equal_bins = operator.index(bins)
except TypeError:
raise TypeError("`bins` must be an integer, a string, or an array")
if n_equal_bins < 1:
raise ValueError("`bins` must be positive, when an integer")
first_edge, last_edge = _get_outer_edges(a, range)
elif np.ndim(bins) == 1:
bin_edges = np.asarray(bins)
if np.any(bin_edges[:-1] > bin_edges[1:]):
raise ValueError("`bins` must increase monotonically, when an array")
else:
raise ValueError("`bins` must be 1d, when an array")
if n_equal_bins is not None:
# gh-10322 means that type resolution rules are dependent on array
# shapes. To avoid this causing problems, we pick a type now and stick
# with it throughout.
bin_type = np.result_type(first_edge, last_edge, a)
if np.issubdtype(bin_type, np.integer):
bin_type = np.result_type(bin_type, float)
# bin edges must be computed
bin_edges = np.linspace(
first_edge, last_edge, n_equal_bins + 1, endpoint=True, dtype=bin_type
)
return bin_edges, (first_edge, last_edge, n_equal_bins)
else:
return bin_edges, None
|
def _get_bin_edges(a, bins, range, weights):
"""
Computes the bins used internally by `histogram`.
Parameters
==========
a : ndarray
Ravelled data array
bins, range
Forwarded arguments from `histogram`.
weights : ndarray, optional
Ravelled weights array, or None
Returns
=======
bin_edges : ndarray
Array of bin edges
uniform_bins : (Number, Number, int):
The upper bound, lowerbound, and number of bins, used in the optimized
implementation of `histogram` that works on uniform bins.
"""
# parse the overloaded bins argument
n_equal_bins = None
bin_edges = None
if isinstance(bins, basestring):
bin_name = bins
# if `bins` is a string for an automatic method,
# this will replace it with the number of bins calculated
if bin_name not in _hist_bin_selectors:
raise ValueError(
"{!r} is not a valid estimator for `bins`".format(bin_name)
)
if weights is not None:
raise TypeError(
"Automated estimation of the number of "
"bins is not supported for weighted data"
)
first_edge, last_edge = _get_outer_edges(a, range)
# truncate the range if needed
if range is not None:
keep = a >= first_edge
keep &= a <= last_edge
if not np.logical_and.reduce(keep):
a = a[keep]
if a.size == 0:
n_equal_bins = 1
else:
# Do not call selectors on empty arrays
width = _hist_bin_selectors[bin_name](a)
if width:
n_equal_bins = int(np.ceil((last_edge - first_edge) / width))
else:
# Width can be zero for some estimators, e.g. FD when
# the IQR of the data is zero.
n_equal_bins = 1
elif np.ndim(bins) == 0:
try:
n_equal_bins = operator.index(bins)
except TypeError:
raise TypeError("`bins` must be an integer, a string, or an array")
if n_equal_bins < 1:
raise ValueError("`bins` must be positive, when an integer")
first_edge, last_edge = _get_outer_edges(a, range)
elif np.ndim(bins) == 1:
bin_edges = np.asarray(bins)
if np.any(bin_edges[:-1] > bin_edges[1:]):
raise ValueError("`bins` must increase monotonically, when an array")
else:
raise ValueError("`bins` must be 1d, when an array")
if n_equal_bins is not None:
# bin edges must be computed
bin_edges = np.linspace(first_edge, last_edge, n_equal_bins + 1, endpoint=True)
return bin_edges, (first_edge, last_edge, n_equal_bins)
else:
return bin_edges, None
|
https://github.com/numpy/numpy/issues/8123
|
In [142]: print(sys.version)
3.5.0 (default, Nov 20 2015, 16:20:41)
[GCC 4.4.7 20120313 (Red Hat 4.4.7-16)]
In [143]: print(numpy.__version__)
1.11.2
In [155]: histogram(array([-24.35791367], dtype=float32), range=(-24.3579135, 0))
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-155-57aa993972c6> in <module>()
----> 1 histogram(array([-24.35791367], dtype=float32), range=(-24.3579135, 0))
/dev/shm/gerrit/venv/stable-3.5/lib/python3.5/site-packages/numpy/lib/function_base.py in histogram(a, bins, range, normed, weights, density)
609 n.imag += np.bincount(indices, weights=tmp_w.imag, minlength=bins)
610 else:
--> 611 n += np.bincount(indices, weights=tmp_w, minlength=bins).astype(ntype)
612
613 # Rename the bin edges for return.
ValueError: The first argument of bincount must be non-negative
|
ValueError
|
def histogram(a, bins=10, range=None, normed=False, weights=None, density=None):
r"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines the bin edges, including the rightmost
edge, allowing for non-uniform bin widths.
.. versionadded:: 1.11.0
If `bins` is a string from the list below, `histogram` will use
the method chosen to calculate the optimal bin width and
consequently the number of bins (see `Notes` for more detail on
the estimators) from the data that falls within the requested
range. While the bin width will be optimal for the actual data
in the range, the number of bins will be computed to fill the
entire range, including the empty portions. For visualisation,
using the 'auto' option is suggested. Weighted data is not
supported for automated bin size selection.
'auto'
Maximum of the 'sturges' and 'fd' estimators. Provides good
all around performance.
'fd' (Freedman Diaconis Estimator)
Robust (resilient to outliers) estimator that takes into
account data variability and data size.
'doane'
An improved version of Sturges' estimator that works better
with non-normal datasets.
'scott'
Less robust estimator that that takes into account data
variability and data size.
'rice'
Estimator does not take variability into account, only data
size. Commonly overestimates number of bins required.
'sturges'
R's default method, only accounts for data size. Only
optimal for gaussian data and underestimates number of bins
for large non-gaussian datasets.
'sqrt'
Square root (of data size) estimator, used by Excel and
other programs for its speed and simplicity.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored. The first element of the range must be less than or
equal to the second. `range` affects the automatic bin
computation as well. While bin width is computed to be optimal
based on the actual data within `range`, the bin count will fill
the entire range including portions containing no data.
normed : bool, optional
This keyword is deprecated in NumPy 1.6.0 due to confusing/buggy
behavior. It will be removed in NumPy 2.0.0. Use the ``density``
keyword instead. If ``False``, the result will contain the
number of samples in each bin. If ``True``, the result is the
value of the probability *density* function at the bin,
normalized such that the *integral* over the range is 1. Note
that this latter behavior is known to be buggy with unequal bin
widths; use ``density`` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
(instead of 1). If `density` is True, the weights are
normalized, so that the integral of the density over the range
remains 1.
density : bool, optional
If ``False``, the result will contain the number of samples in
each bin. If ``True``, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the ``normed`` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `density` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words,
if `bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and
the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which
*includes* 4.
.. versionadded:: 1.11.0
The methods to estimate the optimal number of bins are well founded
in literature, and are inspired by the choices R provides for
histogram visualisation. Note that having the number of bins
proportional to :math:`n^{1/3}` is asymptotically optimal, which is
why it appears in most estimators. These are simply plug-in methods
that give good starting points for number of bins. In the equations
below, :math:`h` is the binwidth and :math:`n_h` is the number of
bins. All estimators that compute bin counts are recast to bin width
using the `ptp` of the data. The final bin count is obtained from
``np.round(np.ceil(range / h))`.
'Auto' (maximum of the 'Sturges' and 'FD' estimators)
A compromise to get a good value. For small datasets the Sturges
value will usually be chosen, while larger datasets will usually
default to FD. Avoids the overly conservative behaviour of FD
and Sturges for small and large datasets respectively.
Switchover point is usually :math:`a.size \approx 1000`.
'FD' (Freedman Diaconis Estimator)
.. math:: h = 2 \frac{IQR}{n^{1/3}}
The binwidth is proportional to the interquartile range (IQR)
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good for large
datasets. The IQR is very robust to outliers.
'Scott'
.. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}}
The binwidth is proportional to the standard deviation of the
data and inversely proportional to cube root of ``x.size``. Can
be too conservative for small datasets, but is quite good for
large datasets. The standard deviation is not very robust to
outliers. Values are very similar to the Freedman-Diaconis
estimator in the absence of outliers.
'Rice'
.. math:: n_h = 2n^{1/3}
The number of bins is only proportional to cube root of
``a.size``. It tends to overestimate the number of bins and it
does not take into account data variability.
'Sturges'
.. math:: n_h = \log _{2}n+1
The number of bins is the base 2 log of ``a.size``. This
estimator assumes normality of data and is too conservative for
larger, non-normal datasets. This is the default method in R's
``hist`` method.
'Doane'
.. math:: n_h = 1 + \log_{2}(n) +
\log_{2}(1 + \frac{|g_1|}{\sigma_{g_1}})
g_1 = mean[(\frac{x - \mu}{\sigma})^3]
\sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}}
An improved version of Sturges' formula that produces better
estimates for non-normal datasets. This estimator attempts to
account for the skew of the data.
'Sqrt'
.. math:: n_h = \sqrt n
The simplest and fastest estimator. Only takes into account the
data size.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist * np.diff(bin_edges))
1.0
.. versionadded:: 1.11.0
Automated Bin Selection Methods example, using 2 peak random data
with 2000 points:
>>> import matplotlib.pyplot as plt
>>> rng = np.random.RandomState(10) # deterministic random data
>>> a = np.hstack((rng.normal(size=1000),
... rng.normal(loc=5, scale=2, size=1000)))
>>> plt.hist(a, bins='auto') # arguments are passed to np.histogram
>>> plt.title("Histogram with 'auto' bins")
>>> plt.show()
"""
a, weights = _ravel_and_check_weights(a, weights)
bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights)
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = np.dtype(np.intp)
else:
ntype = weights.dtype
# We set a block size, as this allows us to iterate over chunks when
# computing histograms, to minimize memory usage.
BLOCK = 65536
# The fast path uses bincount, but that only works for certain types
# of weight
simple_weights = (
weights is None
or np.can_cast(weights.dtype, np.double)
or np.can_cast(weights.dtype, complex)
)
if uniform_bins is not None and simple_weights:
# Fast algorithm for equal bins
# We now convert values of a to bin indices, under the assumption of
# equal bin widths (which is valid here).
first_edge, last_edge, n_equal_bins = uniform_bins
# Initialize empty histogram
n = np.zeros(n_equal_bins, ntype)
# Pre-compute histogram scaling factor
norm = n_equal_bins / (last_edge - first_edge)
# We iterate over blocks here for two reasons: the first is that for
# large arrays, it is actually faster (for example for a 10^8 array it
# is 2x as fast) and it results in a memory footprint 3x lower in the
# limit of large arrays.
for i in np.arange(0, len(a), BLOCK):
tmp_a = a[i : i + BLOCK]
if weights is None:
tmp_w = None
else:
tmp_w = weights[i : i + BLOCK]
# Only include values in the right range
keep = tmp_a >= first_edge
keep &= tmp_a <= last_edge
if not np.logical_and.reduce(keep):
tmp_a = tmp_a[keep]
if tmp_w is not None:
tmp_w = tmp_w[keep]
# This cast ensures no type promotions occur below, which gh-10322
# make unpredictable. Getting it wrong leads to precision errors
# like gh-8123.
tmp_a = tmp_a.astype(bin_edges.dtype, copy=False)
# Compute the bin indices, and for values that lie exactly on
# last_edge we need to subtract one
f_indices = (tmp_a - first_edge) * norm
indices = f_indices.astype(np.intp)
indices[indices == n_equal_bins] -= 1
# The index computation is not guaranteed to give exactly
# consistent results within ~1 ULP of the bin edges.
decrement = tmp_a < bin_edges[indices]
indices[decrement] -= 1
# The last bin includes the right edge. The other bins do not.
increment = (tmp_a >= bin_edges[indices + 1]) & (
indices != n_equal_bins - 1
)
indices[increment] += 1
# We now compute the histogram using bincount
if ntype.kind == "c":
n.real += np.bincount(
indices, weights=tmp_w.real, minlength=n_equal_bins
)
n.imag += np.bincount(
indices, weights=tmp_w.imag, minlength=n_equal_bins
)
else:
n += np.bincount(indices, weights=tmp_w, minlength=n_equal_bins).astype(
ntype
)
else:
# Compute via cumulative histogram
cum_n = np.zeros(bin_edges.shape, ntype)
if weights is None:
for i in np.arange(0, len(a), BLOCK):
sa = np.sort(a[i : i + BLOCK])
cum_n += _search_sorted_inclusive(sa, bin_edges)
else:
zero = np.zeros(1, dtype=ntype)
for i in np.arange(0, len(a), BLOCK):
tmp_a = a[i : i + BLOCK]
tmp_w = weights[i : i + BLOCK]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate((zero, sw.cumsum()))
bin_index = _search_sorted_inclusive(sa, bin_edges)
cum_n += cw[bin_index]
n = np.diff(cum_n)
# density overrides the normed keyword
if density is not None:
normed = False
if density:
db = np.array(np.diff(bin_edges), float)
return n / db / n.sum(), bin_edges
elif normed:
# deprecated, buggy behavior. Remove for NumPy 2.0.0
db = np.array(np.diff(bin_edges), float)
return n / (n * db).sum(), bin_edges
else:
return n, bin_edges
|
def histogram(a, bins=10, range=None, normed=False, weights=None, density=None):
r"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars or str, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines the bin edges, including the rightmost
edge, allowing for non-uniform bin widths.
.. versionadded:: 1.11.0
If `bins` is a string from the list below, `histogram` will use
the method chosen to calculate the optimal bin width and
consequently the number of bins (see `Notes` for more detail on
the estimators) from the data that falls within the requested
range. While the bin width will be optimal for the actual data
in the range, the number of bins will be computed to fill the
entire range, including the empty portions. For visualisation,
using the 'auto' option is suggested. Weighted data is not
supported for automated bin size selection.
'auto'
Maximum of the 'sturges' and 'fd' estimators. Provides good
all around performance.
'fd' (Freedman Diaconis Estimator)
Robust (resilient to outliers) estimator that takes into
account data variability and data size.
'doane'
An improved version of Sturges' estimator that works better
with non-normal datasets.
'scott'
Less robust estimator that that takes into account data
variability and data size.
'rice'
Estimator does not take variability into account, only data
size. Commonly overestimates number of bins required.
'sturges'
R's default method, only accounts for data size. Only
optimal for gaussian data and underestimates number of bins
for large non-gaussian datasets.
'sqrt'
Square root (of data size) estimator, used by Excel and
other programs for its speed and simplicity.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored. The first element of the range must be less than or
equal to the second. `range` affects the automatic bin
computation as well. While bin width is computed to be optimal
based on the actual data within `range`, the bin count will fill
the entire range including portions containing no data.
normed : bool, optional
This keyword is deprecated in NumPy 1.6.0 due to confusing/buggy
behavior. It will be removed in NumPy 2.0.0. Use the ``density``
keyword instead. If ``False``, the result will contain the
number of samples in each bin. If ``True``, the result is the
value of the probability *density* function at the bin,
normalized such that the *integral* over the range is 1. Note
that this latter behavior is known to be buggy with unequal bin
widths; use ``density`` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in
`a` only contributes its associated weight towards the bin count
(instead of 1). If `density` is True, the weights are
normalized, so that the integral of the density over the range
remains 1.
density : bool, optional
If ``False``, the result will contain the number of samples in
each bin. If ``True``, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the ``normed`` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `density` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words,
if `bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and
the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which
*includes* 4.
.. versionadded:: 1.11.0
The methods to estimate the optimal number of bins are well founded
in literature, and are inspired by the choices R provides for
histogram visualisation. Note that having the number of bins
proportional to :math:`n^{1/3}` is asymptotically optimal, which is
why it appears in most estimators. These are simply plug-in methods
that give good starting points for number of bins. In the equations
below, :math:`h` is the binwidth and :math:`n_h` is the number of
bins. All estimators that compute bin counts are recast to bin width
using the `ptp` of the data. The final bin count is obtained from
``np.round(np.ceil(range / h))`.
'Auto' (maximum of the 'Sturges' and 'FD' estimators)
A compromise to get a good value. For small datasets the Sturges
value will usually be chosen, while larger datasets will usually
default to FD. Avoids the overly conservative behaviour of FD
and Sturges for small and large datasets respectively.
Switchover point is usually :math:`a.size \approx 1000`.
'FD' (Freedman Diaconis Estimator)
.. math:: h = 2 \frac{IQR}{n^{1/3}}
The binwidth is proportional to the interquartile range (IQR)
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good for large
datasets. The IQR is very robust to outliers.
'Scott'
.. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}}
The binwidth is proportional to the standard deviation of the
data and inversely proportional to cube root of ``x.size``. Can
be too conservative for small datasets, but is quite good for
large datasets. The standard deviation is not very robust to
outliers. Values are very similar to the Freedman-Diaconis
estimator in the absence of outliers.
'Rice'
.. math:: n_h = 2n^{1/3}
The number of bins is only proportional to cube root of
``a.size``. It tends to overestimate the number of bins and it
does not take into account data variability.
'Sturges'
.. math:: n_h = \log _{2}n+1
The number of bins is the base 2 log of ``a.size``. This
estimator assumes normality of data and is too conservative for
larger, non-normal datasets. This is the default method in R's
``hist`` method.
'Doane'
.. math:: n_h = 1 + \log_{2}(n) +
\log_{2}(1 + \frac{|g_1|}{\sigma_{g_1}})
g_1 = mean[(\frac{x - \mu}{\sigma})^3]
\sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}}
An improved version of Sturges' formula that produces better
estimates for non-normal datasets. This estimator attempts to
account for the skew of the data.
'Sqrt'
.. math:: n_h = \sqrt n
The simplest and fastest estimator. Only takes into account the
data size.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist * np.diff(bin_edges))
1.0
.. versionadded:: 1.11.0
Automated Bin Selection Methods example, using 2 peak random data
with 2000 points:
>>> import matplotlib.pyplot as plt
>>> rng = np.random.RandomState(10) # deterministic random data
>>> a = np.hstack((rng.normal(size=1000),
... rng.normal(loc=5, scale=2, size=1000)))
>>> plt.hist(a, bins='auto') # arguments are passed to np.histogram
>>> plt.title("Histogram with 'auto' bins")
>>> plt.show()
"""
a, weights = _ravel_and_check_weights(a, weights)
bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights)
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = np.dtype(np.intp)
else:
ntype = weights.dtype
# We set a block size, as this allows us to iterate over chunks when
# computing histograms, to minimize memory usage.
BLOCK = 65536
# The fast path uses bincount, but that only works for certain types
# of weight
simple_weights = (
weights is None
or np.can_cast(weights.dtype, np.double)
or np.can_cast(weights.dtype, complex)
)
if uniform_bins is not None and simple_weights:
# Fast algorithm for equal bins
# We now convert values of a to bin indices, under the assumption of
# equal bin widths (which is valid here).
first_edge, last_edge, n_equal_bins = uniform_bins
# Initialize empty histogram
n = np.zeros(n_equal_bins, ntype)
# Pre-compute histogram scaling factor
norm = n_equal_bins / (last_edge - first_edge)
# We iterate over blocks here for two reasons: the first is that for
# large arrays, it is actually faster (for example for a 10^8 array it
# is 2x as fast) and it results in a memory footprint 3x lower in the
# limit of large arrays.
for i in np.arange(0, len(a), BLOCK):
tmp_a = a[i : i + BLOCK]
if weights is None:
tmp_w = None
else:
tmp_w = weights[i : i + BLOCK]
# Only include values in the right range
keep = tmp_a >= first_edge
keep &= tmp_a <= last_edge
if not np.logical_and.reduce(keep):
tmp_a = tmp_a[keep]
if tmp_w is not None:
tmp_w = tmp_w[keep]
tmp_a_data = tmp_a.astype(float)
tmp_a = tmp_a_data - first_edge
tmp_a *= norm
# Compute the bin indices, and for values that lie exactly on
# last_edge we need to subtract one
indices = tmp_a.astype(np.intp)
indices[indices == n_equal_bins] -= 1
# The index computation is not guaranteed to give exactly
# consistent results within ~1 ULP of the bin edges.
decrement = tmp_a_data < bin_edges[indices]
indices[decrement] -= 1
# The last bin includes the right edge. The other bins do not.
increment = (tmp_a_data >= bin_edges[indices + 1]) & (
indices != n_equal_bins - 1
)
indices[increment] += 1
# We now compute the histogram using bincount
if ntype.kind == "c":
n.real += np.bincount(
indices, weights=tmp_w.real, minlength=n_equal_bins
)
n.imag += np.bincount(
indices, weights=tmp_w.imag, minlength=n_equal_bins
)
else:
n += np.bincount(indices, weights=tmp_w, minlength=n_equal_bins).astype(
ntype
)
else:
# Compute via cumulative histogram
cum_n = np.zeros(bin_edges.shape, ntype)
if weights is None:
for i in np.arange(0, len(a), BLOCK):
sa = np.sort(a[i : i + BLOCK])
cum_n += _search_sorted_inclusive(sa, bin_edges)
else:
zero = np.zeros(1, dtype=ntype)
for i in np.arange(0, len(a), BLOCK):
tmp_a = a[i : i + BLOCK]
tmp_w = weights[i : i + BLOCK]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate((zero, sw.cumsum()))
bin_index = _search_sorted_inclusive(sa, bin_edges)
cum_n += cw[bin_index]
n = np.diff(cum_n)
# density overrides the normed keyword
if density is not None:
normed = False
if density:
db = np.array(np.diff(bin_edges), float)
return n / db / n.sum(), bin_edges
elif normed:
# deprecated, buggy behavior. Remove for NumPy 2.0.0
db = np.array(np.diff(bin_edges), float)
return n / (n * db).sum(), bin_edges
else:
return n, bin_edges
|
https://github.com/numpy/numpy/issues/8123
|
In [142]: print(sys.version)
3.5.0 (default, Nov 20 2015, 16:20:41)
[GCC 4.4.7 20120313 (Red Hat 4.4.7-16)]
In [143]: print(numpy.__version__)
1.11.2
In [155]: histogram(array([-24.35791367], dtype=float32), range=(-24.3579135, 0))
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-155-57aa993972c6> in <module>()
----> 1 histogram(array([-24.35791367], dtype=float32), range=(-24.3579135, 0))
/dev/shm/gerrit/venv/stable-3.5/lib/python3.5/site-packages/numpy/lib/function_base.py in histogram(a, bins, range, normed, weights, density)
609 n.imag += np.bincount(indices, weights=tmp_w.imag, minlength=bins)
610 else:
--> 611 n += np.bincount(indices, weights=tmp_w, minlength=bins).astype(ntype)
612
613 # Rename the bin edges for return.
ValueError: The first argument of bincount must be non-negative
|
ValueError
|
def generate_config_py(target):
"""Generate config.py file containing system_info information
used during building the package.
Usage:
config['py_modules'].append((packagename, '__config__',generate_config_py))
"""
from numpy.distutils.system_info import system_info
from distutils.dir_util import mkpath
mkpath(os.path.dirname(target))
f = open(target, "w")
f.write(
"# This file is generated by numpy's %s\n" % (os.path.basename(sys.argv[0]))
)
f.write("# It contains system_info results at the time of building this package.\n")
f.write('__all__ = ["get_info","show"]\n\n')
# For gfortran+msvc combination, extra shared libraries may exist
f.write("""
import os
import sys
extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
if os.path.isdir(extra_dll_dir) and sys.platform == 'win32':
try:
from ctypes import windll, c_wchar_p
_AddDllDirectory = windll.kernel32.AddDllDirectory
_AddDllDirectory.argtypes = [c_wchar_p]
# Needed to initialize AddDllDirectory modifications
windll.kernel32.SetDefaultDllDirectories(0x1000)
except AttributeError:
def _AddDllDirectory(dll_directory):
os.environ.setdefault('PATH', '')
os.environ['PATH'] += os.pathsep + dll_directory
_AddDllDirectory(extra_dll_dir)
""")
for k, i in system_info.saved_results.items():
f.write("%s=%r\n" % (k, i))
f.write(r"""
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
""")
f.close()
return target
|
def generate_config_py(target):
"""Generate config.py file containing system_info information
used during building the package.
Usage:
config['py_modules'].append((packagename, '__config__',generate_config_py))
"""
from numpy.distutils.system_info import system_info
from distutils.dir_util import mkpath
mkpath(os.path.dirname(target))
f = open(target, "w")
f.write(
"# This file is generated by numpy's %s\n" % (os.path.basename(sys.argv[0]))
)
f.write("# It contains system_info results at the time of building this package.\n")
f.write('__all__ = ["get_info","show"]\n\n')
# For gfortran+msvc combination, extra shared libraries may exist
f.write("""
import os
import sys
extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
if os.path.isdir(extra_dll_dir) and sys.platform == 'win32':
try:
from ctypes import windll, c_wchar_p
_AddDllDirectory = windll.kernel32.AddDllDirectory
_AddDllDirectory.argtypes = [c_wchar_p]
# Needed to initialize AddDllDirectory modifications
windll.kernel32.SetDefaultDllDirectories(0x1000)
except AttributeError:
def _AddDllDirectory(dll_directory):
os.environ["PATH"] += os.pathsep + dll_directory
_AddDllDirectory(extra_dll_dir)
""")
for k, i in system_info.saved_results.items():
f.write("%s=%r\n" % (k, i))
f.write(r"""
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
""")
f.close()
return target
|
https://github.com/numpy/numpy/issues/10338
|
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/django/utils/autoreload.py", line 228, in wrapper
fn(*args, **kwargs)
File "/usr/local/lib/python3.6/site-packages/django/core/management/commands/runserver.py", line 117, in inner_run
autoreload.raise_last_exception()
File "/usr/local/lib/python3.6/site-packages/django/utils/autoreload.py", line 251, in raise_last_exception
six.reraise(*_exception)
File "/usr/local/lib/python3.6/site-packages/django/utils/six.py", line 685, in reraise
raise value.with_traceback(tb)
File "/usr/local/lib/python3.6/site-packages/django/utils/autoreload.py", line 228, in wrapper
fn(*args, **kwargs)
File "/usr/local/lib/python3.6/site-packages/django/__init__.py", line 27, in setup
apps.populate(settings.INSTALLED_APPS)
File "/usr/local/lib/python3.6/site-packages/django/apps/registry.py", line 108, in populate
app_config.import_models()
File "/usr/local/lib/python3.6/site-packages/django/apps/config.py", line 202, in import_models
self.models_module = import_module(models_module_name)
File "/usr/local/lib/python3.6/importlib/__init__.py", line 126, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/code/analytics/models.py", line 6, in <module>
import pandas as pd
File "/usr/local/lib/python3.6/site-packages/pandas/__init__.py", line 13, in <module>
__import__(dependency)
File "/usr/local/lib/python3.6/site-packages/numpy/__init__.py", line 126, in <module>
from numpy.__config__ import show as show_config
File "/usr/local/lib/python3.6/site-packages/numpy/__config__.py", line 9, in <module>
os.environ["PATH"] += os.pathsep + extra_dll_dir
File "/usr/local/lib/python3.6/os.py", line 669, in __getitem__
raise KeyError(key) from None
KeyError: 'PATH'
|
KeyError
|
def __new__(cls):
if cls.__singleton is None:
# We define the masked singleton as a float for higher precedence.
# Note that it can be tricky sometimes w/ type comparison
data = np.array(0.0)
mask = np.array(True)
# prevent any modifications
data.flags.writeable = False
mask.flags.writeable = False
# don't fall back on MaskedArray.__new__(MaskedConstant), since
# that might confuse it - this way, the construction is entirely
# within our control
cls.__singleton = MaskedArray(data, mask=mask).view(cls)
return cls.__singleton
|
def __new__(self):
return self._data.view(self)
|
https://github.com/numpy/numpy/issues/4595
|
np.ma.masked.fill_value
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "numpy/ma/core.py", line 3353, in get_fill_value
if self._fill_value is None:
AttributeError: 'MaskedConstant' object has no attribute '_fill_value'
|
AttributeError
|
def __array_finalize__(self, obj):
if self.__singleton is None:
# this handles the `.view` in __new__, which we want to copy across
# properties normally
return super(MaskedConstant, self).__array_finalize__(obj)
elif self is self.__singleton:
# not clear how this can happen, play it safe
pass
else:
# everywhere else, we want to downcast to MaskedArray, to prevent a
# duplicate maskedconstant.
self.__class__ = MaskedArray
MaskedArray.__array_finalize__(self, obj)
|
def __array_finalize__(self, obj):
return
|
https://github.com/numpy/numpy/issues/4595
|
np.ma.masked.fill_value
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "numpy/ma/core.py", line 3353, in get_fill_value
if self._fill_value is None:
AttributeError: 'MaskedConstant' object has no attribute '_fill_value'
|
AttributeError
|
def __repr__(self):
if self is self.__singleton:
return "masked"
else:
# it's a subclass, or something is wrong, make it obvious
return object.__repr__(self)
|
def __repr__(self):
return "masked"
|
https://github.com/numpy/numpy/issues/4595
|
np.ma.masked.fill_value
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "numpy/ma/core.py", line 3353, in get_fill_value
if self._fill_value is None:
AttributeError: 'MaskedConstant' object has no attribute '_fill_value'
|
AttributeError
|
def svd(a, full_matrices=True, compute_uv=True):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = "D->DdD" if isComplexType(t) else "d->ddd"
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = "D->d" if isComplexType(t) else "d->d"
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
|
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = "D->DdD" if isComplexType(t) else "d->ddd"
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = "D->d" if isComplexType(t) else "d->d"
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
|
https://github.com/numpy/numpy/issues/8826
|
a = np.stack((np.eye(3),)*4, axis=0)
ai = np.linalg.inv(a)
assert (a == ai).all()
api = np.linalg.pinv(a)
Traceback (most recent call last):
File "<pyshell#9>", line 1, in <module>
np.linalg.pinv(a)
File "C:\Program Files\Python 3.5\lib\site-packages\numpy\linalg\linalg.py", line 1668, in pinv
if s[i] > cutoff:
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
|
ValueError
|
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
.. versionchanged:: 1.14
Can now operate on stacks of matrices
Parameters
----------
M : {(M,), (..., M, N)} array_like
input vector or stack of matrices
tol : (...) array_like, float, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
.. versionchanged:: 1.14
Broadcasted against the stack of matrices
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim < 2:
return int(not all(M == 0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) * finfo(S.dtype).eps
else:
tol = asarray(tol)[..., newaxis]
return (S > tol).sum(axis=-1)
|
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (..., M, N)} array_like
input vector or stack of matrices
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim < 2:
return int(not all(M == 0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) * finfo(S.dtype).eps
return (S > tol).sum(axis=-1)
|
https://github.com/numpy/numpy/issues/8826
|
a = np.stack((np.eye(3),)*4, axis=0)
ai = np.linalg.inv(a)
assert (a == ai).all()
api = np.linalg.pinv(a)
Traceback (most recent call last):
File "<pyshell#9>", line 1, in <module>
np.linalg.pinv(a)
File "C:\Program Files\Python 3.5\lib\site-packages\numpy\linalg\linalg.py", line 1668, in pinv
if s[i] > cutoff:
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
|
ValueError
|
def pinv(a, rcond=1e-15):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
.. versionchanged:: 1.14
Can now operate on stacks of matrices
Parameters
----------
a : (..., M, N) array_like
Matrix or stack of matrices to be pseudo-inverted.
rcond : (...) array_like of float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero. Broadcasts against the stack of matrices
Returns
-------
B : (..., N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
rcond = asarray(rcond)
if _isEmpty2d(a):
res = empty(a.shape[:-2] + (a.shape[-1], a.shape[-2]), dtype=a.dtype)
return wrap(res)
a = a.conjugate()
u, s, vt = svd(a, full_matrices=False)
# discard small singular values
cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True)
large = s > cutoff
s = divide(1, s, where=large, out=s)
s[~large] = 0
res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u)))
return wrap(res)
|
def pinv(a, rcond=1e-15):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
if _isEmpty2d(a):
res = empty(a.shape[:-2] + (a.shape[-1], a.shape[-2]), dtype=a.dtype)
return wrap(res)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond * maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1.0 / s[i]
else:
s[i] = 0.0
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
|
https://github.com/numpy/numpy/issues/8826
|
a = np.stack((np.eye(3),)*4, axis=0)
ai = np.linalg.inv(a)
assert (a == ai).all()
api = np.linalg.pinv(a)
Traceback (most recent call last):
File "<pyshell#9>", line 1, in <module>
np.linalg.pinv(a)
File "C:\Program Files\Python 3.5\lib\site-packages\numpy\linalg\linalg.py", line 1668, in pinv
if s[i] > cutoff:
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
|
ValueError
|
def lstsq(a, b, rcond="warn"):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
For the purposes of rank determination, singular values are treated
as zero if they are smaller than `rcond` times the largest singular
value of `a`.
.. versionchanged:: 1.14.0
If not set, a FutureWarning is given. The previous default
of ``-1`` will use the machine precision as `rcond` parameter,
the new default will use the machine precision times `max(M, N)`.
To silence the warning and use the new default, use ``rcond=None``,
to keep using the old behavior, use ``rcond=-1``.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print(m, c)
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = b.ndim == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
_assertNoEmpty2d(a, b) # TODO: relax this constraint
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError("Incompatible dimensions")
t, result_t = _commonType(a, b)
# Determine default rcond value
if rcond == "warn":
# 2017-08-19, 1.14.0
warnings.warn(
"`rcond` parameter will change to the default of "
"machine precision times ``max(M, N)`` where M and N "
"are the input matrix dimensions.\n"
"To use the future default and silence this warning "
"we advise to pass `rcond=None`, to keep using the old, "
"explicitly pass `rcond=-1`.",
FutureWarning,
stacklevel=2,
)
rcond = -1
if rcond is None:
rcond = finfo(t).eps * ldb
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[: b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
# This line:
# * is incorrect, according to the LAPACK documentation
# * raises a ValueError if min(m,n) == 0
# * should not be calculated here anyway, as LAPACK should calculate
# `liwork` for us. But that only works if our version of lapack does
# not have this bug:
# http://icl.cs.utk.edu/lapack-forum/archives/lapack/msg00899.html
# Lapack_lite does have that bug...
nlvl = max(0, int(math.log(float(min(m, n)) / 2.0)) + 1)
iwork = zeros((3 * min(m, n) * nlvl + 11 * min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(
m, n, n_rhs, a, m, bstar, ldb, s, rcond, 0, work, -1, rwork, iwork, 0
)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros(
(
ldb,
n_rhs,
),
real_t,
)
results = lapack_lite.dgelsd(
m, n, n_rhs, a_real, m, bstar_real, ldb, s, rcond, 0, rwork, -1, iwork, 0
)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(
m, n, n_rhs, a, m, bstar, ldb, s, rcond, 0, work, lwork, rwork, iwork, 0
)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(
m, n, n_rhs, a, m, bstar, ldb, s, rcond, 0, work, -1, iwork, 0
)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(
m, n, n_rhs, a, m, bstar, ldb, s, rcond, 0, work, lwork, iwork, 0
)
if results["info"] > 0:
raise LinAlgError("SVD did not converge in Linear Least Squares")
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results["rank"] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:]) ** 2)], dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:]) ** 2)], dtype=result_real_t)
else:
x = array(bstar.T[:n, :], dtype=result_t, copy=True)
if results["rank"] == n and m > n:
if isComplexType(t):
resids = sum(abs(bstar.T[n:, :]) ** 2, axis=0).astype(
result_real_t, copy=False
)
else:
resids = sum((bstar.T[n:, :]) ** 2, axis=0).astype(
result_real_t, copy=False
)
st = s[: min(n, m)].astype(result_real_t, copy=True)
return wrap(x), wrap(resids), results["rank"], st
|
def lstsq(a, b, rcond="warn"):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
For the purposes of rank determination, singular values are treated
as zero if they are smaller than `rcond` times the largest singular
value of `a`.
.. versionchanged:: 1.14.0
If not set, a FutureWarning is given. The previous default
of ``-1`` will use the machine precision as `rcond` parameter,
the new default will use the machine precision times `max(M, N)`.
To silence the warning and use the new default, use ``rcond=None``,
to keep using the old behavior, use ``rcond=-1``.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print(m, c)
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = b.ndim == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
_assertNoEmpty2d(a, b) # TODO: relax this constraint
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError("Incompatible dimensions")
t, result_t = _commonType(a, b)
# Determine default rcond value
if rcond == "warn":
# 2017-08-19, 1.14.0
warnings.warn(
"`rcond` parameter will change to the default of "
"machine precision times ``max(M, N)`` where M and N "
"are the input matrix dimensions.\n"
"To use the future default and silence this warning "
"we advise to pass `rcond=None`, to keep using the old, "
"explicitly pass `rcond=-1`.",
FutureWarning,
stacklevel=2,
)
rcond = -1
if rcond is None:
rcond = finfo(t).eps * ldb
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[: b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
# This line:
# * is incorrect, according to the LAPACK documentation
# * raises a ValueError if min(m,n) == 0
# * should not be calculated here anyway, as LAPACK should calculate
# `liwork` for us. But that only works if our version of lapack does
# not have this bug:
# http://icl.cs.utk.edu/lapack-forum/archives/lapack/msg00899.html
# Lapack_lite does have that bug...
nlvl = max(0, int(math.log(float(min(m, n)) / 2.0)) + 1)
iwork = zeros((3 * min(m, n) * nlvl + 11 * min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(
m, n, n_rhs, a, m, bstar, ldb, s, rcond, 0, work, -1, rwork, iwork, 0
)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros(
(
ldb,
n_rhs,
),
real_t,
)
results = lapack_lite.dgelsd(
m, n, n_rhs, a_real, m, bstar_real, ldb, s, rcond, 0, rwork, -1, iwork, 0
)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(
m, n, n_rhs, a, m, bstar, ldb, s, rcond, 0, work, lwork, rwork, iwork, 0
)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(
m, n, n_rhs, a, m, bstar, ldb, s, rcond, 0, work, -1, iwork, 0
)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(
m, n, n_rhs, a, m, bstar, ldb, s, rcond, 0, work, lwork, iwork, 0
)
if results["info"] > 0:
raise LinAlgError("SVD did not converge in Linear Least Squares")
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results["rank"] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:]) ** 2)], dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:]) ** 2)], dtype=result_real_t)
else:
x = array(transpose(bstar)[:n, :], dtype=result_t, copy=True)
if results["rank"] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:, :]) ** 2, axis=0).astype(
result_real_t, copy=False
)
else:
resids = sum((transpose(bstar)[n:, :]) ** 2, axis=0).astype(
result_real_t, copy=False
)
st = s[: min(n, m)].astype(result_real_t, copy=True)
return wrap(x), wrap(resids), results["rank"], st
|
https://github.com/numpy/numpy/issues/8826
|
a = np.stack((np.eye(3),)*4, axis=0)
ai = np.linalg.inv(a)
assert (a == ai).all()
api = np.linalg.pinv(a)
Traceback (most recent call last):
File "<pyshell#9>", line 1, in <module>
np.linalg.pinv(a)
File "C:\Program Files\Python 3.5\lib\site-packages\numpy\linalg\linalg.py", line 1668, in pinv
if s[i] > cutoff:
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
|
ValueError
|
def pinv(a, rcond=1e-15):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
.. versionchanged:: 1.14
Can now operate on stacks of matrices
Parameters
----------
a : (..., M, N) array_like
Matrix or stack of matrices to be pseudo-inverted.
rcond : (...) array_like of float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero. Broadcasts against the stack of matrices
Returns
-------
B : (..., N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
rcond = asarray(rcond)
if _isEmpty2d(a):
res = empty(a.shape[:-2] + (a.shape[-1], a.shape[-2]), dtype=a.dtype)
return wrap(res)
a = a.conjugate()
u, s, vt = svd(a, full_matrices=False)
# discard small singular values
cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True)
large = s > cutoff
s = divide(1, s, where=large, out=s)
s[~large] = 0
res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u)))
return wrap(res)
|
def pinv(a, rcond=1e-15):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
if _isEmpty2d(a):
res = empty(a.shape[:-2] + (a.shape[-1], a.shape[-2]), dtype=a.dtype)
return wrap(res)
a = a.conjugate()
u, s, vt = svd(a, full_matrices=False)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond * maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1.0 / s[i]
else:
s[i] = 0.0
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
|
https://github.com/numpy/numpy/issues/8826
|
a = np.stack((np.eye(3),)*4, axis=0)
ai = np.linalg.inv(a)
assert (a == ai).all()
api = np.linalg.pinv(a)
Traceback (most recent call last):
File "<pyshell#9>", line 1, in <module>
np.linalg.pinv(a)
File "C:\Program Files\Python 3.5\lib\site-packages\numpy\linalg\linalg.py", line 1668, in pinv
if s[i] > cutoff:
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
|
ValueError
|
def make_mask(m, copy=False, shrink=True, dtype=MaskType):
"""
Create a boolean mask from an array.
Return `m` as a boolean mask, creating a copy if necessary or requested.
The function can accept any sequence that is convertible to integers,
or ``nomask``. Does not require that contents must be 0s and 1s, values
of 0 are interepreted as False, everything else as True.
Parameters
----------
m : array_like
Potential mask.
copy : bool, optional
Whether to return a copy of `m` (True) or `m` itself (False).
shrink : bool, optional
Whether to shrink `m` to ``nomask`` if all its values are False.
dtype : dtype, optional
Data-type of the output mask. By default, the output mask has a
dtype of MaskType (bool). If the dtype is flexible, each field has
a boolean dtype. This is ignored when `m` is ``nomask``, in which
case ``nomask`` is always returned.
Returns
-------
result : ndarray
A boolean mask derived from `m`.
Examples
--------
>>> import numpy.ma as ma
>>> m = [True, False, True, True]
>>> ma.make_mask(m)
array([ True, False, True, True], dtype=bool)
>>> m = [1, 0, 1, 1]
>>> ma.make_mask(m)
array([ True, False, True, True], dtype=bool)
>>> m = [1, 0, 2, -3]
>>> ma.make_mask(m)
array([ True, False, True, True], dtype=bool)
Effect of the `shrink` parameter.
>>> m = np.zeros(4)
>>> m
array([ 0., 0., 0., 0.])
>>> ma.make_mask(m)
False
>>> ma.make_mask(m, shrink=False)
array([False, False, False, False], dtype=bool)
Using a flexible `dtype`.
>>> m = [1, 0, 1, 1]
>>> n = [0, 1, 0, 0]
>>> arr = []
>>> for man, mouse in zip(m, n):
... arr.append((man, mouse))
>>> arr
[(1, 0), (0, 1), (1, 0), (1, 0)]
>>> dtype = np.dtype({'names':['man', 'mouse'],
'formats':[int, int]})
>>> arr = np.array(arr, dtype=dtype)
>>> arr
array([(1, 0), (0, 1), (1, 0), (1, 0)],
dtype=[('man', '<i4'), ('mouse', '<i4')])
>>> ma.make_mask(arr, dtype=dtype)
array([(True, False), (False, True), (True, False), (True, False)],
dtype=[('man', '|b1'), ('mouse', '|b1')])
"""
if m is nomask:
return nomask
# Make sure the input dtype is valid.
dtype = make_mask_descr(dtype)
# legacy boolean special case: "existence of fields implies true"
if isinstance(m, ndarray) and m.dtype.fields and dtype == np.bool_:
return np.ones(m.shape, dtype=dtype)
# Fill the mask in case there are missing data; turn it into an ndarray.
result = np.array(filled(m, True), copy=copy, dtype=dtype, subok=True)
# Bas les masques !
if shrink and (not result.dtype.names) and (not result.any()):
return nomask
else:
return result
|
def make_mask(m, copy=False, shrink=True, dtype=MaskType):
"""
Create a boolean mask from an array.
Return `m` as a boolean mask, creating a copy if necessary or requested.
The function can accept any sequence that is convertible to integers,
or ``nomask``. Does not require that contents must be 0s and 1s, values
of 0 are interepreted as False, everything else as True.
Parameters
----------
m : array_like
Potential mask.
copy : bool, optional
Whether to return a copy of `m` (True) or `m` itself (False).
shrink : bool, optional
Whether to shrink `m` to ``nomask`` if all its values are False.
dtype : dtype, optional
Data-type of the output mask. By default, the output mask has a
dtype of MaskType (bool). If the dtype is flexible, each field has
a boolean dtype. This is ignored when `m` is ``nomask``, in which
case ``nomask`` is always returned.
Returns
-------
result : ndarray
A boolean mask derived from `m`.
Examples
--------
>>> import numpy.ma as ma
>>> m = [True, False, True, True]
>>> ma.make_mask(m)
array([ True, False, True, True], dtype=bool)
>>> m = [1, 0, 1, 1]
>>> ma.make_mask(m)
array([ True, False, True, True], dtype=bool)
>>> m = [1, 0, 2, -3]
>>> ma.make_mask(m)
array([ True, False, True, True], dtype=bool)
Effect of the `shrink` parameter.
>>> m = np.zeros(4)
>>> m
array([ 0., 0., 0., 0.])
>>> ma.make_mask(m)
False
>>> ma.make_mask(m, shrink=False)
array([False, False, False, False], dtype=bool)
Using a flexible `dtype`.
>>> m = [1, 0, 1, 1]
>>> n = [0, 1, 0, 0]
>>> arr = []
>>> for man, mouse in zip(m, n):
... arr.append((man, mouse))
>>> arr
[(1, 0), (0, 1), (1, 0), (1, 0)]
>>> dtype = np.dtype({'names':['man', 'mouse'],
'formats':[int, int]})
>>> arr = np.array(arr, dtype=dtype)
>>> arr
array([(1, 0), (0, 1), (1, 0), (1, 0)],
dtype=[('man', '<i4'), ('mouse', '<i4')])
>>> ma.make_mask(arr, dtype=dtype)
array([(True, False), (False, True), (True, False), (True, False)],
dtype=[('man', '|b1'), ('mouse', '|b1')])
"""
if m is nomask:
return nomask
# Make sure the input dtype is valid.
dtype = make_mask_descr(dtype)
# Fill the mask in case there are missing data; turn it into an ndarray.
result = np.array(filled(m, True), copy=copy, dtype=dtype, subok=True)
# Bas les masques !
if shrink and (not result.dtype.names) and (not result.any()):
return nomask
else:
return result
|
https://github.com/numpy/numpy/issues/2346
|
class TestAppendFieldsObj(TestCase):
"""
Test append_fields with arrays containing objects
"""
def setUp(self):
from datetime import date
self.data = dict(obj=date(2000, 1, 1))
def test_append_to_objects(self):
"Test append_fields when the base array contains objects"
obj = self.data['obj']
x = np.array([(obj, 1.), (obj, 2.)], dtype=[('A', object), ('B', float)])
y = np.array([10, 20], dtype=int)
test = append_fields(x, 'C', data=y, usemask=False)
control = np.array([(obj, 1.0, 10), (obj, 2.0, 20)],
dtype=[('A', object), ('B', float), ('C', int)])
assert_equal(test, control)
#
def test_append_with_objects(self):
"Test append_fields when the appended data contains objects"
obj = self.data['obj']
x = np.array([(10, 1.), (20, 2.)], dtype=[('A', int), ('B', float)])
y = np.array([obj, obj], dtype=object)
test = append_fields(y, 'C', data=y, dtypes=object, usemask=False)
control = np.array([(10, 1.0, obj), (20, 2.0, obj)],
dtype=[('A', int), ('B', float), ('C', object)])
assert_equal(test, control)
======================================================================
ERROR: Test append_fields when the base array contains objects
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/kolpakov/lib/python2.6/site-packages/numpy/lib/tests/test_recfunctions.py", line 406, in test_append_to_objects
test = append_fields(x, 'C', data=y, usemask=False)
File "/home/kolpakov/lib/python2.6/site-packages/numpy/lib/recfunctions.py", line 629, in append_fields
base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
File "/home/kolpakov/lib/python2.6/site-packages/numpy/lib/recfunctions.py", line 399, in merge_arrays
return seqarrays.view(dtype=seqdtype, type=seqtype)
TypeError: Cannot change data-type for object array.
======================================================================
ERROR: Test append_fields when the appended data contains objects
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/kolpakov/lib/python2.6/site-packages/numpy/lib/tests/test_recfunctions.py", line 416, in test_append_with_objects
test = append_fields(y, 'C', data=y, dtypes=object, usemask=False)
File "/home/kolpakov/lib/python2.6/site-packages/numpy/lib/recfunctions.py", line 627, in append_fields
for (a, n, d) in zip(data, names, dtypes)]
TypeError: Cannot change data-type for object array.
----------------------------------------------------------------------
Ran 33 tests in 0.062s
|
TypeError
|
def matrix_power(M, n):
"""
Raise a square matrix to the (integer) power `n`.
For positive integers `n`, the power is computed by repeated matrix
squarings and matrix multiplications. If ``n == 0``, the identity matrix
of the same shape as M is returned. If ``n < 0``, the inverse
is computed and then raised to the ``abs(n)``.
Parameters
----------
M : ndarray or matrix object
Matrix to be "powered." Must be square, i.e. ``M.shape == (m, m)``,
with `m` a positive integer.
n : int
The exponent can be any integer or long integer, positive,
negative, or zero.
Returns
-------
M**n : ndarray or matrix object
The return value is the same shape and type as `M`;
if the exponent is positive or zero then the type of the
elements is the same as those of `M`. If the exponent is
negative the elements are floating-point.
Raises
------
LinAlgError
If the matrix is not numerically invertible.
See Also
--------
matrix
Provides an equivalent function as the exponentiation operator
(``**``, not ``^``).
Examples
--------
>>> from numpy import linalg as LA
>>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit
>>> LA.matrix_power(i, 3) # should = -i
array([[ 0, -1],
[ 1, 0]])
>>> LA.matrix_power(np.matrix(i), 3) # matrix arg returns matrix
matrix([[ 0, -1],
[ 1, 0]])
>>> LA.matrix_power(i, 0)
array([[1, 0],
[0, 1]])
>>> LA.matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements
array([[ 0., 1.],
[-1., 0.]])
Somewhat more sophisticated example
>>> q = np.zeros((4, 4))
>>> q[0:2, 0:2] = -i
>>> q[2:4, 2:4] = i
>>> q # one of the three quaternion units not equal to 1
array([[ 0., -1., 0., 0.],
[ 1., 0., 0., 0.],
[ 0., 0., 0., 1.],
[ 0., 0., -1., 0.]])
>>> LA.matrix_power(q, 2) # = -np.eye(4)
array([[-1., 0., 0., 0.],
[ 0., -1., 0., 0.],
[ 0., 0., -1., 0.],
[ 0., 0., 0., -1.]])
"""
M = asanyarray(M)
if M.ndim != 2 or M.shape[0] != M.shape[1]:
raise ValueError("input must be a square array")
if not issubdtype(type(n), N.integer):
raise TypeError("exponent must be an integer")
from numpy.linalg import inv
if n == 0:
M = M.copy()
M[:] = identity(M.shape[0])
return M
elif n < 0:
M = inv(M)
n *= -1
result = M
if n <= 3:
for _ in range(n - 1):
result = N.dot(result, M)
return result
# binary decomposition to reduce the number of Matrix
# multiplications for n > 3.
beta = binary_repr(n)
Z, q, t = M, 0, len(beta)
while beta[t - q - 1] == "0":
Z = N.dot(Z, Z)
q += 1
result = Z
for k in range(q + 1, t):
Z = N.dot(Z, Z)
if beta[t - k - 1] == "1":
result = N.dot(result, Z)
return result
|
def matrix_power(M, n):
"""
Raise a square matrix to the (integer) power `n`.
For positive integers `n`, the power is computed by repeated matrix
squarings and matrix multiplications. If ``n == 0``, the identity matrix
of the same shape as M is returned. If ``n < 0``, the inverse
is computed and then raised to the ``abs(n)``.
Parameters
----------
M : ndarray or matrix object
Matrix to be "powered." Must be square, i.e. ``M.shape == (m, m)``,
with `m` a positive integer.
n : int
The exponent can be any integer or long integer, positive,
negative, or zero.
Returns
-------
M**n : ndarray or matrix object
The return value is the same shape and type as `M`;
if the exponent is positive or zero then the type of the
elements is the same as those of `M`. If the exponent is
negative the elements are floating-point.
Raises
------
LinAlgError
If the matrix is not numerically invertible.
See Also
--------
matrix
Provides an equivalent function as the exponentiation operator
(``**``, not ``^``).
Examples
--------
>>> from numpy import linalg as LA
>>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit
>>> LA.matrix_power(i, 3) # should = -i
array([[ 0, -1],
[ 1, 0]])
>>> LA.matrix_power(np.matrix(i), 3) # matrix arg returns matrix
matrix([[ 0, -1],
[ 1, 0]])
>>> LA.matrix_power(i, 0)
array([[1, 0],
[0, 1]])
>>> LA.matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements
array([[ 0., 1.],
[-1., 0.]])
Somewhat more sophisticated example
>>> q = np.zeros((4, 4))
>>> q[0:2, 0:2] = -i
>>> q[2:4, 2:4] = i
>>> q # one of the three quaternion units not equal to 1
array([[ 0., -1., 0., 0.],
[ 1., 0., 0., 0.],
[ 0., 0., 0., 1.],
[ 0., 0., -1., 0.]])
>>> LA.matrix_power(q, 2) # = -np.eye(4)
array([[-1., 0., 0., 0.],
[ 0., -1., 0., 0.],
[ 0., 0., -1., 0.],
[ 0., 0., 0., -1.]])
"""
M = asanyarray(M)
if M.ndim != 2 or M.shape[0] != M.shape[1]:
raise ValueError("input must be a square array")
if not issubdtype(type(n), int):
raise TypeError("exponent must be an integer")
from numpy.linalg import inv
if n == 0:
M = M.copy()
M[:] = identity(M.shape[0])
return M
elif n < 0:
M = inv(M)
n *= -1
result = M
if n <= 3:
for _ in range(n - 1):
result = N.dot(result, M)
return result
# binary decomposition to reduce the number of Matrix
# multiplications for n > 3.
beta = binary_repr(n)
Z, q, t = M, 0, len(beta)
while beta[t - q - 1] == "0":
Z = N.dot(Z, Z)
q += 1
result = Z
for k in range(q + 1, t):
Z = N.dot(Z, Z)
if beta[t - k - 1] == "1":
result = N.dot(result, Z)
return result
|
https://github.com/numpy/numpy/issues/9506
|
np.matrix(np.eye(3)) ** np.uint8(1)
Traceback (most recent call last):
File "<pyshell#53>", line 1, in <module>
np.matrix(np.eye(3)) ** np.uint8(1)
File "C:\Users\wiese\AppData\Roaming\Python\Python35\site-packages\numpy\matrixlib\defmatrix.py", line 356, in __pow__
return matrix_power(self, other)
File "C:\Users\wiese\AppData\Roaming\Python\Python35\site-packages\numpy\matrixlib\defmatrix.py", line 175, in matrix_power
raise TypeError("exponent must be an integer")
TypeError: exponent must be an integer
|
TypeError
|
def default_fill_value(obj):
"""
Return the default fill value for the argument object.
The default filling value depends on the datatype of the input
array or the type of the input scalar:
======== ========
datatype default
======== ========
bool True
int 999999
float 1.e20
complex 1.e20+0j
object '?'
string 'N/A'
======== ========
For structured types, a structured scalar is returned, with each field the
default fill value for its type.
For subarray types, the fill value is an array of the same size containing
the default scalar fill value.
Parameters
----------
obj : ndarray, dtype or scalar
The array data-type or scalar for which the default fill value
is returned.
Returns
-------
fill_value : scalar
The default fill value.
Examples
--------
>>> np.ma.default_fill_value(1)
999999
>>> np.ma.default_fill_value(np.array([1.1, 2., np.pi]))
1e+20
>>> np.ma.default_fill_value(np.dtype(complex))
(1e+20+0j)
"""
def _scalar_fill_value(dtype):
if dtype.kind in "Mm":
return default_filler.get(dtype.str[1:], "?")
else:
return default_filler.get(dtype.kind, "?")
dtype = _get_dtype_of(obj)
return _recursive_fill_value(dtype, _scalar_fill_value)
|
def default_fill_value(obj):
"""
Return the default fill value for the argument object.
The default filling value depends on the datatype of the input
array or the type of the input scalar:
======== ========
datatype default
======== ========
bool True
int 999999
float 1.e20
complex 1.e20+0j
object '?'
string 'N/A'
======== ========
Parameters
----------
obj : ndarray, dtype or scalar
The array data-type or scalar for which the default fill value
is returned.
Returns
-------
fill_value : scalar
The default fill value.
Examples
--------
>>> np.ma.default_fill_value(1)
999999
>>> np.ma.default_fill_value(np.array([1.1, 2., np.pi]))
1e+20
>>> np.ma.default_fill_value(np.dtype(complex))
(1e+20+0j)
"""
if hasattr(obj, "dtype"):
defval = _check_fill_value(None, obj.dtype)
elif isinstance(obj, np.dtype):
if obj.subdtype:
defval = default_filler.get(obj.subdtype[0].kind, "?")
elif obj.kind in "Mm":
defval = default_filler.get(obj.str[1:], "?")
else:
defval = default_filler.get(obj.kind, "?")
elif isinstance(obj, float):
defval = default_filler["f"]
elif isinstance(obj, int) or isinstance(obj, long):
defval = default_filler["i"]
elif isinstance(obj, bytes):
defval = default_filler["S"]
elif isinstance(obj, unicode):
defval = default_filler["U"]
elif isinstance(obj, complex):
defval = default_filler["c"]
else:
defval = default_filler["O"]
return defval
|
https://github.com/numpy/numpy/issues/8069
|
In [32]: A = np.arange(100, dtype='i8').view("(4)i4,(4)i4")
In [33]: Am = np.ma.MaskedArray(A, np.zeros_like(A))
In [34]: Am.sort()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-34-9b8a457bb910> in <module>()
----> 1 Am.sort()
/home/users/gholl/venv/stable-3.5/lib/python3.5/site-packages/numpy/ma/core.py in sort(self, axis, kind, order, endwith, fill_value)
5417 filler = fill_value
5418
-> 5419 sidx = self.filled(filler).argsort(axis=axis, kind=kind,
5420 order=order)
5421 # save meshgrid memory for 1d arrays
/home/users/gholl/venv/stable-3.5/lib/python3.5/site-packages/numpy/ma/core.py in filled(self, fill_value)
3625 fill_value = self.fill_value
3626 else:
-> 3627 fill_value = _check_fill_value(fill_value, self.dtype)
3628
3629 if self is masked_singleton:
/home/users/gholl/venv/stable-3.5/lib/python3.5/site-packages/numpy/ma/core.py in _check_fill_value(fill_value, ndtype)
428 descr = ndtype.descr
429 fill_value = np.asarray(fill_value, dtype=object)
--> 430 fill_value = np.array(_recursive_set_fill_value(fill_value, descr),
431 dtype=ndtype)
432 else:
/home/users/gholl/venv/stable-3.5/lib/python3.5/site-packages/numpy/ma/core.py in _recursive_set_fill_value(fillvalue, dtypedescr)
392 output_value.append(tuple(_recursive_set_fill_value(fval, cdtype)))
393 else:
--> 394 output_value.append(np.array(fval, dtype=cdtype).item())
395 return tuple(output_value)
396
TypeError: int() argument must be a string, a bytes-like object or a number, not 'NoneType'
|
TypeError
|
def minimum_fill_value(obj):
"""
Return the maximum value that can be represented by the dtype of an object.
This function is useful for calculating a fill value suitable for
taking the minimum of an array with a given dtype.
Parameters
----------
obj : ndarray, dtype or scalar
An object that can be queried for it's numeric type.
Returns
-------
val : scalar
The maximum representable value.
Raises
------
TypeError
If `obj` isn't a suitable numeric type.
See Also
--------
maximum_fill_value : The inverse function.
set_fill_value : Set the filling value of a masked array.
MaskedArray.fill_value : Return current fill value.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.int8()
>>> ma.minimum_fill_value(a)
127
>>> a = np.int32()
>>> ma.minimum_fill_value(a)
2147483647
An array of numeric data can also be passed.
>>> a = np.array([1, 2, 3], dtype=np.int8)
>>> ma.minimum_fill_value(a)
127
>>> a = np.array([1, 2, 3], dtype=np.float32)
>>> ma.minimum_fill_value(a)
inf
"""
return _extremum_fill_value(obj, min_filler, "minimum")
|
def minimum_fill_value(obj):
"""
Return the maximum value that can be represented by the dtype of an object.
This function is useful for calculating a fill value suitable for
taking the minimum of an array with a given dtype.
Parameters
----------
obj : ndarray or dtype
An object that can be queried for it's numeric type.
Returns
-------
val : scalar
The maximum representable value.
Raises
------
TypeError
If `obj` isn't a suitable numeric type.
See Also
--------
maximum_fill_value : The inverse function.
set_fill_value : Set the filling value of a masked array.
MaskedArray.fill_value : Return current fill value.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.int8()
>>> ma.minimum_fill_value(a)
127
>>> a = np.int32()
>>> ma.minimum_fill_value(a)
2147483647
An array of numeric data can also be passed.
>>> a = np.array([1, 2, 3], dtype=np.int8)
>>> ma.minimum_fill_value(a)
127
>>> a = np.array([1, 2, 3], dtype=np.float32)
>>> ma.minimum_fill_value(a)
inf
"""
errmsg = "Unsuitable type for calculating minimum."
if hasattr(obj, "dtype"):
return _recursive_extremum_fill_value(obj.dtype, min_filler)
elif isinstance(obj, float):
return min_filler[ntypes.typeDict["float_"]]
elif isinstance(obj, int):
return min_filler[ntypes.typeDict["int_"]]
elif isinstance(obj, long):
return min_filler[ntypes.typeDict["uint"]]
elif isinstance(obj, np.dtype):
return min_filler[obj]
else:
raise TypeError(errmsg)
|
https://github.com/numpy/numpy/issues/8069
|
In [32]: A = np.arange(100, dtype='i8').view("(4)i4,(4)i4")
In [33]: Am = np.ma.MaskedArray(A, np.zeros_like(A))
In [34]: Am.sort()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-34-9b8a457bb910> in <module>()
----> 1 Am.sort()
/home/users/gholl/venv/stable-3.5/lib/python3.5/site-packages/numpy/ma/core.py in sort(self, axis, kind, order, endwith, fill_value)
5417 filler = fill_value
5418
-> 5419 sidx = self.filled(filler).argsort(axis=axis, kind=kind,
5420 order=order)
5421 # save meshgrid memory for 1d arrays
/home/users/gholl/venv/stable-3.5/lib/python3.5/site-packages/numpy/ma/core.py in filled(self, fill_value)
3625 fill_value = self.fill_value
3626 else:
-> 3627 fill_value = _check_fill_value(fill_value, self.dtype)
3628
3629 if self is masked_singleton:
/home/users/gholl/venv/stable-3.5/lib/python3.5/site-packages/numpy/ma/core.py in _check_fill_value(fill_value, ndtype)
428 descr = ndtype.descr
429 fill_value = np.asarray(fill_value, dtype=object)
--> 430 fill_value = np.array(_recursive_set_fill_value(fill_value, descr),
431 dtype=ndtype)
432 else:
/home/users/gholl/venv/stable-3.5/lib/python3.5/site-packages/numpy/ma/core.py in _recursive_set_fill_value(fillvalue, dtypedescr)
392 output_value.append(tuple(_recursive_set_fill_value(fval, cdtype)))
393 else:
--> 394 output_value.append(np.array(fval, dtype=cdtype).item())
395 return tuple(output_value)
396
TypeError: int() argument must be a string, a bytes-like object or a number, not 'NoneType'
|
TypeError
|
def maximum_fill_value(obj):
"""
Return the minimum value that can be represented by the dtype of an object.
This function is useful for calculating a fill value suitable for
taking the maximum of an array with a given dtype.
Parameters
----------
obj : ndarray, dtype or scalar
An object that can be queried for it's numeric type.
Returns
-------
val : scalar
The minimum representable value.
Raises
------
TypeError
If `obj` isn't a suitable numeric type.
See Also
--------
minimum_fill_value : The inverse function.
set_fill_value : Set the filling value of a masked array.
MaskedArray.fill_value : Return current fill value.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.int8()
>>> ma.maximum_fill_value(a)
-128
>>> a = np.int32()
>>> ma.maximum_fill_value(a)
-2147483648
An array of numeric data can also be passed.
>>> a = np.array([1, 2, 3], dtype=np.int8)
>>> ma.maximum_fill_value(a)
-128
>>> a = np.array([1, 2, 3], dtype=np.float32)
>>> ma.maximum_fill_value(a)
-inf
"""
return _extremum_fill_value(obj, max_filler, "maximum")
|
def maximum_fill_value(obj):
"""
Return the minimum value that can be represented by the dtype of an object.
This function is useful for calculating a fill value suitable for
taking the maximum of an array with a given dtype.
Parameters
----------
obj : {ndarray, dtype}
An object that can be queried for it's numeric type.
Returns
-------
val : scalar
The minimum representable value.
Raises
------
TypeError
If `obj` isn't a suitable numeric type.
See Also
--------
minimum_fill_value : The inverse function.
set_fill_value : Set the filling value of a masked array.
MaskedArray.fill_value : Return current fill value.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.int8()
>>> ma.maximum_fill_value(a)
-128
>>> a = np.int32()
>>> ma.maximum_fill_value(a)
-2147483648
An array of numeric data can also be passed.
>>> a = np.array([1, 2, 3], dtype=np.int8)
>>> ma.maximum_fill_value(a)
-128
>>> a = np.array([1, 2, 3], dtype=np.float32)
>>> ma.maximum_fill_value(a)
-inf
"""
errmsg = "Unsuitable type for calculating maximum."
if hasattr(obj, "dtype"):
return _recursive_extremum_fill_value(obj.dtype, max_filler)
elif isinstance(obj, float):
return max_filler[ntypes.typeDict["float_"]]
elif isinstance(obj, int):
return max_filler[ntypes.typeDict["int_"]]
elif isinstance(obj, long):
return max_filler[ntypes.typeDict["uint"]]
elif isinstance(obj, np.dtype):
return max_filler[obj]
else:
raise TypeError(errmsg)
|
https://github.com/numpy/numpy/issues/8069
|
In [32]: A = np.arange(100, dtype='i8').view("(4)i4,(4)i4")
In [33]: Am = np.ma.MaskedArray(A, np.zeros_like(A))
In [34]: Am.sort()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-34-9b8a457bb910> in <module>()
----> 1 Am.sort()
/home/users/gholl/venv/stable-3.5/lib/python3.5/site-packages/numpy/ma/core.py in sort(self, axis, kind, order, endwith, fill_value)
5417 filler = fill_value
5418
-> 5419 sidx = self.filled(filler).argsort(axis=axis, kind=kind,
5420 order=order)
5421 # save meshgrid memory for 1d arrays
/home/users/gholl/venv/stable-3.5/lib/python3.5/site-packages/numpy/ma/core.py in filled(self, fill_value)
3625 fill_value = self.fill_value
3626 else:
-> 3627 fill_value = _check_fill_value(fill_value, self.dtype)
3628
3629 if self is masked_singleton:
/home/users/gholl/venv/stable-3.5/lib/python3.5/site-packages/numpy/ma/core.py in _check_fill_value(fill_value, ndtype)
428 descr = ndtype.descr
429 fill_value = np.asarray(fill_value, dtype=object)
--> 430 fill_value = np.array(_recursive_set_fill_value(fill_value, descr),
431 dtype=ndtype)
432 else:
/home/users/gholl/venv/stable-3.5/lib/python3.5/site-packages/numpy/ma/core.py in _recursive_set_fill_value(fillvalue, dtypedescr)
392 output_value.append(tuple(_recursive_set_fill_value(fval, cdtype)))
393 else:
--> 394 output_value.append(np.array(fval, dtype=cdtype).item())
395 return tuple(output_value)
396
TypeError: int() argument must be a string, a bytes-like object or a number, not 'NoneType'
|
TypeError
|
def _check_fill_value(fill_value, ndtype):
"""
Private function validating the given `fill_value` for the given dtype.
If fill_value is None, it is set to the default corresponding to the dtype.
If fill_value is not None, its value is forced to the given dtype.
The result is always a 0d array.
"""
ndtype = np.dtype(ndtype)
fields = ndtype.fields
if fill_value is None:
fill_value = default_fill_value(ndtype)
elif fields:
fdtype = [(_[0], _[1]) for _ in ndtype.descr]
if isinstance(fill_value, (ndarray, np.void)):
try:
fill_value = np.array(fill_value, copy=False, dtype=fdtype)
except ValueError:
err_msg = "Unable to transform %s to dtype %s"
raise ValueError(err_msg % (fill_value, fdtype))
else:
fill_value = np.asarray(fill_value, dtype=object)
fill_value = np.array(
_recursive_set_fill_value(fill_value, ndtype), dtype=ndtype
)
else:
if isinstance(fill_value, basestring) and (ndtype.char not in "OSVU"):
err_msg = "Cannot set fill value of string with array of dtype %s"
raise TypeError(err_msg % ndtype)
else:
# In case we want to convert 1e20 to int.
try:
fill_value = np.array(fill_value, copy=False, dtype=ndtype)
except OverflowError:
# Raise TypeError instead of OverflowError. OverflowError
# is seldom used, and the real problem here is that the
# passed fill_value is not compatible with the ndtype.
err_msg = "Fill value %s overflows dtype %s"
raise TypeError(err_msg % (fill_value, ndtype))
return np.array(fill_value)
|
def _check_fill_value(fill_value, ndtype):
"""
Private function validating the given `fill_value` for the given dtype.
If fill_value is None, it is set to the default corresponding to the dtype
if this latter is standard (no fields). If the datatype is flexible (named
fields), fill_value is set to a tuple whose elements are the default fill
values corresponding to each field.
If fill_value is not None, its value is forced to the given dtype.
"""
ndtype = np.dtype(ndtype)
fields = ndtype.fields
if fill_value is None:
if fields:
fill_value = np.array(
_recursive_set_default_fill_value(ndtype), dtype=ndtype
)
else:
fill_value = default_fill_value(ndtype)
elif fields:
fdtype = [(_[0], _[1]) for _ in ndtype.descr]
if isinstance(fill_value, (ndarray, np.void)):
try:
fill_value = np.array(fill_value, copy=False, dtype=fdtype)
except ValueError:
err_msg = "Unable to transform %s to dtype %s"
raise ValueError(err_msg % (fill_value, fdtype))
else:
fill_value = np.asarray(fill_value, dtype=object)
fill_value = np.array(
_recursive_set_fill_value(fill_value, ndtype), dtype=ndtype
)
else:
if isinstance(fill_value, basestring) and (ndtype.char not in "OSVU"):
err_msg = "Cannot set fill value of string with array of dtype %s"
raise TypeError(err_msg % ndtype)
else:
# In case we want to convert 1e20 to int.
try:
fill_value = np.array(fill_value, copy=False, dtype=ndtype)
except OverflowError:
# Raise TypeError instead of OverflowError. OverflowError
# is seldom used, and the real problem here is that the
# passed fill_value is not compatible with the ndtype.
err_msg = "Fill value %s overflows dtype %s"
raise TypeError(err_msg % (fill_value, ndtype))
return np.array(fill_value)
|
https://github.com/numpy/numpy/issues/8069
|
In [32]: A = np.arange(100, dtype='i8').view("(4)i4,(4)i4")
In [33]: Am = np.ma.MaskedArray(A, np.zeros_like(A))
In [34]: Am.sort()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-34-9b8a457bb910> in <module>()
----> 1 Am.sort()
/home/users/gholl/venv/stable-3.5/lib/python3.5/site-packages/numpy/ma/core.py in sort(self, axis, kind, order, endwith, fill_value)
5417 filler = fill_value
5418
-> 5419 sidx = self.filled(filler).argsort(axis=axis, kind=kind,
5420 order=order)
5421 # save meshgrid memory for 1d arrays
/home/users/gholl/venv/stable-3.5/lib/python3.5/site-packages/numpy/ma/core.py in filled(self, fill_value)
3625 fill_value = self.fill_value
3626 else:
-> 3627 fill_value = _check_fill_value(fill_value, self.dtype)
3628
3629 if self is masked_singleton:
/home/users/gholl/venv/stable-3.5/lib/python3.5/site-packages/numpy/ma/core.py in _check_fill_value(fill_value, ndtype)
428 descr = ndtype.descr
429 fill_value = np.asarray(fill_value, dtype=object)
--> 430 fill_value = np.array(_recursive_set_fill_value(fill_value, descr),
431 dtype=ndtype)
432 else:
/home/users/gholl/venv/stable-3.5/lib/python3.5/site-packages/numpy/ma/core.py in _recursive_set_fill_value(fillvalue, dtypedescr)
392 output_value.append(tuple(_recursive_set_fill_value(fval, cdtype)))
393 else:
--> 394 output_value.append(np.array(fval, dtype=cdtype).item())
395 return tuple(output_value)
396
TypeError: int() argument must be a string, a bytes-like object or a number, not 'NoneType'
|
TypeError
|
def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1):
self.axis = axis
self.matrix = matrix
self.trans1d = trans1d
self.ndmin = ndmin
|
def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1):
self._axis = axis
self._matrix = matrix
self.axis = axis
self.matrix = matrix
self.col = 0
self.trans1d = trans1d
self.ndmin = ndmin
|
https://github.com/numpy/numpy/issues/8815
|
Traceback (most recent call last):
File "test.py", line 13, in <module>
print(numpy.r_[a, b.reshape((3, 3, 1))])
File "/usr/local/lib/python2.7/dist-packages/numpy/lib/index_tricks.py", line 338, in __getitem__
res = _nx.concatenate(tuple(objs), axis=self.axis)
ValueError: all the input array dimensions except for the concatenation axis must match exactly
|
ValueError
|
def __getitem__(self, key):
# handle matrix builder syntax
if isinstance(key, str):
frame = sys._getframe().f_back
mymat = matrixlib.bmat(key, frame.f_globals, frame.f_locals)
return mymat
if not isinstance(key, tuple):
key = (key,)
# copy attributes, since they can be overriden in the first argument
trans1d = self.trans1d
ndmin = self.ndmin
matrix = self.matrix
axis = self.axis
objs = []
scalars = []
arraytypes = []
scalartypes = []
for k, item in enumerate(key):
scalar = False
if isinstance(item, slice):
step = item.step
start = item.start
stop = item.stop
if start is None:
start = 0
if step is None:
step = 1
if isinstance(step, complex):
size = int(abs(step))
newobj = function_base.linspace(start, stop, num=size)
else:
newobj = _nx.arange(start, stop, step)
if ndmin > 1:
newobj = array(newobj, copy=False, ndmin=ndmin)
if trans1d != -1:
newobj = newobj.swapaxes(-1, trans1d)
elif isinstance(item, str):
if k != 0:
raise ValueError("special directives must be the first entry.")
if item in ("r", "c"):
matrix = True
col = item == "c"
continue
if "," in item:
vec = item.split(",")
try:
axis, ndmin = [int(x) for x in vec[:2]]
if len(vec) == 3:
trans1d = int(vec[2])
continue
except:
raise ValueError("unknown special directive")
try:
axis = int(item)
continue
except (ValueError, TypeError):
raise ValueError("unknown special directive")
elif type(item) in ScalarType:
newobj = array(item, ndmin=ndmin)
scalars.append(len(objs))
scalar = True
scalartypes.append(newobj.dtype)
else:
newobj = item
if ndmin > 1:
tempobj = array(newobj, copy=False, subok=True)
newobj = array(newobj, copy=False, subok=True, ndmin=ndmin)
if trans1d != -1 and tempobj.ndim < ndmin:
k2 = ndmin - tempobj.ndim
if trans1d < 0:
trans1d += k2 + 1
defaxes = list(range(ndmin))
k1 = trans1d
axes = defaxes[:k1] + defaxes[k2:] + defaxes[k1:k2]
newobj = newobj.transpose(axes)
del tempobj
objs.append(newobj)
if not scalar and isinstance(newobj, _nx.ndarray):
arraytypes.append(newobj.dtype)
# Ensure that scalars won't up-cast unless warranted
final_dtype = find_common_type(arraytypes, scalartypes)
if final_dtype is not None:
for k in scalars:
objs[k] = objs[k].astype(final_dtype)
res = self.concatenate(tuple(objs), axis=axis)
if matrix:
oldndim = res.ndim
res = self.makemat(res)
if oldndim == 1 and col:
res = res.T
return res
|
def __getitem__(self, key):
trans1d = self.trans1d
ndmin = self.ndmin
if isinstance(key, str):
frame = sys._getframe().f_back
mymat = matrix.bmat(key, frame.f_globals, frame.f_locals)
return mymat
if not isinstance(key, tuple):
key = (key,)
objs = []
scalars = []
arraytypes = []
scalartypes = []
for k in range(len(key)):
scalar = False
if isinstance(key[k], slice):
step = key[k].step
start = key[k].start
stop = key[k].stop
if start is None:
start = 0
if step is None:
step = 1
if isinstance(step, complex):
size = int(abs(step))
newobj = function_base.linspace(start, stop, num=size)
else:
newobj = _nx.arange(start, stop, step)
if ndmin > 1:
newobj = array(newobj, copy=False, ndmin=ndmin)
if trans1d != -1:
newobj = newobj.swapaxes(-1, trans1d)
elif isinstance(key[k], str):
if k != 0:
raise ValueError("special directives must be the first entry.")
key0 = key[0]
if key0 in "rc":
self.matrix = True
self.col = key0 == "c"
continue
if "," in key0:
vec = key0.split(",")
try:
self.axis, ndmin = [int(x) for x in vec[:2]]
if len(vec) == 3:
trans1d = int(vec[2])
continue
except:
raise ValueError("unknown special directive")
try:
self.axis = int(key[k])
continue
except (ValueError, TypeError):
raise ValueError("unknown special directive")
elif type(key[k]) in ScalarType:
newobj = array(key[k], ndmin=ndmin)
scalars.append(k)
scalar = True
scalartypes.append(newobj.dtype)
else:
newobj = key[k]
if ndmin > 1:
tempobj = array(newobj, copy=False, subok=True)
newobj = array(newobj, copy=False, subok=True, ndmin=ndmin)
if trans1d != -1 and tempobj.ndim < ndmin:
k2 = ndmin - tempobj.ndim
if trans1d < 0:
trans1d += k2 + 1
defaxes = list(range(ndmin))
k1 = trans1d
axes = defaxes[:k1] + defaxes[k2:] + defaxes[k1:k2]
newobj = newobj.transpose(axes)
del tempobj
objs.append(newobj)
if not scalar and isinstance(newobj, _nx.ndarray):
arraytypes.append(newobj.dtype)
# Esure that scalars won't up-cast unless warranted
final_dtype = find_common_type(arraytypes, scalartypes)
if final_dtype is not None:
for k in scalars:
objs[k] = objs[k].astype(final_dtype)
res = _nx.concatenate(tuple(objs), axis=self.axis)
return self._retval(res)
|
https://github.com/numpy/numpy/issues/8815
|
Traceback (most recent call last):
File "test.py", line 13, in <module>
print(numpy.r_[a, b.reshape((3, 3, 1))])
File "/usr/local/lib/python2.7/dist-packages/numpy/lib/index_tricks.py", line 338, in __getitem__
res = _nx.concatenate(tuple(objs), axis=self.axis)
ValueError: all the input array dimensions except for the concatenation axis must match exactly
|
ValueError
|
def __getitem__(self, key):
# matrix builder syntax, like 'a, b; c, d'
if isinstance(key, str):
raise MAError("Unavailable for masked array.")
return super(MAxisConcatenator, self).__getitem__(key)
|
def __getitem__(self, key):
if isinstance(key, str):
raise MAError("Unavailable for masked array.")
if not isinstance(key, tuple):
key = (key,)
objs = []
scalars = []
final_dtypedescr = None
for k in range(len(key)):
scalar = False
if isinstance(key[k], slice):
step = key[k].step
start = key[k].start
stop = key[k].stop
if start is None:
start = 0
if step is None:
step = 1
if isinstance(step, complex):
size = int(abs(step))
newobj = np.linspace(start, stop, num=size)
else:
newobj = np.arange(start, stop, step)
elif isinstance(key[k], str):
if key[k] in "rc":
self.matrix = True
self.col = key[k] == "c"
continue
try:
self.axis = int(key[k])
continue
except (ValueError, TypeError):
raise ValueError("Unknown special directive")
elif type(key[k]) in np.ScalarType:
newobj = asarray([key[k]])
scalars.append(k)
scalar = True
else:
newobj = key[k]
objs.append(newobj)
if isinstance(newobj, ndarray) and not scalar:
if final_dtypedescr is None:
final_dtypedescr = newobj.dtype
elif newobj.dtype > final_dtypedescr:
final_dtypedescr = newobj.dtype
if final_dtypedescr is not None:
for k in scalars:
objs[k] = objs[k].astype(final_dtypedescr)
res = concatenate(tuple(objs), axis=self.axis)
return self._retval(res)
|
https://github.com/numpy/numpy/issues/8815
|
Traceback (most recent call last):
File "test.py", line 13, in <module>
print(numpy.r_[a, b.reshape((3, 3, 1))])
File "/usr/local/lib/python2.7/dist-packages/numpy/lib/index_tricks.py", line 338, in __getitem__
res = _nx.concatenate(tuple(objs), axis=self.axis)
ValueError: all the input array dimensions except for the concatenation axis must match exactly
|
ValueError
|
def __getitem__(self, key):
# handle matrix builder syntax
if isinstance(key, str):
frame = sys._getframe().f_back
mymat = matrixlib.bmat(key, frame.f_globals, frame.f_locals)
return mymat
if not isinstance(key, tuple):
key = (key,)
# copy attributes, since they can be overriden in the first argument
trans1d = self.trans1d
ndmin = self.ndmin
matrix = self.matrix
axis = self.axis
objs = []
scalars = []
arraytypes = []
scalartypes = []
for k in range(len(key)):
scalar = False
if isinstance(key[k], slice):
step = key[k].step
start = key[k].start
stop = key[k].stop
if start is None:
start = 0
if step is None:
step = 1
if isinstance(step, complex):
size = int(abs(step))
newobj = function_base.linspace(start, stop, num=size)
else:
newobj = _nx.arange(start, stop, step)
if ndmin > 1:
newobj = array(newobj, copy=False, ndmin=ndmin)
if trans1d != -1:
newobj = newobj.swapaxes(-1, trans1d)
elif isinstance(key[k], str):
if k != 0:
raise ValueError("special directives must be the first entry.")
key0 = key[0]
if key0 in "rc":
matrix = True
col = key0 == "c"
continue
if "," in key0:
vec = key0.split(",")
try:
axis, ndmin = [int(x) for x in vec[:2]]
if len(vec) == 3:
trans1d = int(vec[2])
continue
except:
raise ValueError("unknown special directive")
try:
axis = int(key[k])
continue
except (ValueError, TypeError):
raise ValueError("unknown special directive")
elif type(key[k]) in ScalarType:
newobj = array(key[k], ndmin=ndmin)
scalars.append(k)
scalar = True
scalartypes.append(newobj.dtype)
else:
newobj = key[k]
if ndmin > 1:
tempobj = array(newobj, copy=False, subok=True)
newobj = array(newobj, copy=False, subok=True, ndmin=ndmin)
if trans1d != -1 and tempobj.ndim < ndmin:
k2 = ndmin - tempobj.ndim
if trans1d < 0:
trans1d += k2 + 1
defaxes = list(range(ndmin))
k1 = trans1d
axes = defaxes[:k1] + defaxes[k2:] + defaxes[k1:k2]
newobj = newobj.transpose(axes)
del tempobj
objs.append(newobj)
if not scalar and isinstance(newobj, _nx.ndarray):
arraytypes.append(newobj.dtype)
# Ensure that scalars won't up-cast unless warranted
final_dtype = find_common_type(arraytypes, scalartypes)
if final_dtype is not None:
for k in scalars:
objs[k] = objs[k].astype(final_dtype)
res = self.concatenate(tuple(objs), axis=axis)
if matrix:
oldndim = res.ndim
res = makemat(res)
if oldndim == 1 and col:
res = res.T
return res
|
def __getitem__(self, key):
trans1d = self.trans1d
ndmin = self.ndmin
if isinstance(key, str):
frame = sys._getframe().f_back
mymat = matrix.bmat(key, frame.f_globals, frame.f_locals)
return mymat
if not isinstance(key, tuple):
key = (key,)
objs = []
scalars = []
arraytypes = []
scalartypes = []
for k in range(len(key)):
scalar = False
if isinstance(key[k], slice):
step = key[k].step
start = key[k].start
stop = key[k].stop
if start is None:
start = 0
if step is None:
step = 1
if isinstance(step, complex):
size = int(abs(step))
newobj = function_base.linspace(start, stop, num=size)
else:
newobj = _nx.arange(start, stop, step)
if ndmin > 1:
newobj = array(newobj, copy=False, ndmin=ndmin)
if trans1d != -1:
newobj = newobj.swapaxes(-1, trans1d)
elif isinstance(key[k], str):
if k != 0:
raise ValueError("special directives must be the first entry.")
key0 = key[0]
if key0 in "rc":
self.matrix = True
self.col = key0 == "c"
continue
if "," in key0:
vec = key0.split(",")
try:
self.axis, ndmin = [int(x) for x in vec[:2]]
if len(vec) == 3:
trans1d = int(vec[2])
continue
except:
raise ValueError("unknown special directive")
try:
self.axis = int(key[k])
continue
except (ValueError, TypeError):
raise ValueError("unknown special directive")
elif type(key[k]) in ScalarType:
newobj = array(key[k], ndmin=ndmin)
scalars.append(k)
scalar = True
scalartypes.append(newobj.dtype)
else:
newobj = key[k]
if ndmin > 1:
tempobj = array(newobj, copy=False, subok=True)
newobj = array(newobj, copy=False, subok=True, ndmin=ndmin)
if trans1d != -1 and tempobj.ndim < ndmin:
k2 = ndmin - tempobj.ndim
if trans1d < 0:
trans1d += k2 + 1
defaxes = list(range(ndmin))
k1 = trans1d
axes = defaxes[:k1] + defaxes[k2:] + defaxes[k1:k2]
newobj = newobj.transpose(axes)
del tempobj
objs.append(newobj)
if not scalar and isinstance(newobj, _nx.ndarray):
arraytypes.append(newobj.dtype)
# Esure that scalars won't up-cast unless warranted
final_dtype = find_common_type(arraytypes, scalartypes)
if final_dtype is not None:
for k in scalars:
objs[k] = objs[k].astype(final_dtype)
res = self.concatenate(tuple(objs), axis=self.axis)
return self._retval(res)
|
https://github.com/numpy/numpy/issues/8815
|
Traceback (most recent call last):
File "test.py", line 13, in <module>
print(numpy.r_[a, b.reshape((3, 3, 1))])
File "/usr/local/lib/python2.7/dist-packages/numpy/lib/index_tricks.py", line 338, in __getitem__
res = _nx.concatenate(tuple(objs), axis=self.axis)
ValueError: all the input array dimensions except for the concatenation axis must match exactly
|
ValueError
|
def __getitem__(self, indx):
"""
x.__getitem__(y) <==> x[y]
Return the item described by i, as a masked array.
"""
dout = self.data[indx]
# We could directly use ndarray.__getitem__ on self.
# But then we would have to modify __array_finalize__ to prevent the
# mask of being reshaped if it hasn't been set up properly yet
# So it's easier to stick to the current version
_mask = self._mask
# Did we extract a single item?
if not getattr(dout, "ndim", False):
# A record
if isinstance(dout, np.void):
mask = _mask[indx]
# We should always re-cast to mvoid, otherwise users can
# change masks on rows that already have masked values, but not
# on rows that have no masked values, which is inconsistent.
dout = mvoid(dout, mask=mask, hardmask=self._hardmask)
# Just a scalar
elif _mask is not nomask and _mask[indx]:
return masked
elif self.dtype.type is np.object_ and self.dtype is not dout.dtype:
# self contains an object array of arrays (yes, that happens).
# If masked, turn into a MaskedArray, with everything masked.
if _mask is not nomask and _mask[indx]:
return MaskedArray(dout, mask=True)
else:
# Force dout to MA
dout = dout.view(type(self))
# Inherit attributes from self
dout._update_from(self)
# Check the fill_value
if isinstance(indx, basestring):
if self._fill_value is not None:
dout._fill_value = self._fill_value[indx]
# If we're indexing a multidimensional field in a
# structured array (such as dtype("(2,)i2,(2,)i1")),
# dimensionality goes up (M[field].ndim == M.ndim +
# len(M.dtype[field].shape)). That's fine for
# M[field] but problematic for M[field].fill_value
# which should have shape () to avoid breaking several
# methods. There is no great way out, so set to
# first element. See issue #6723.
if dout._fill_value.ndim > 0:
if not (dout._fill_value == dout._fill_value.flat[0]).all():
warnings.warn(
"Upon accessing multidimensional field "
"{indx:s}, need to keep dimensionality "
"of fill_value at 0. Discarding "
"heterogeneous fill_value and setting "
"all to {fv!s}.".format(indx=indx, fv=dout._fill_value[0])
)
dout._fill_value = dout._fill_value.flat[0]
dout._isfield = True
# Update the mask if needed
if _mask is not nomask:
dout._mask = _mask[indx]
# set shape to match that of data; this is needed for matrices
dout._mask.shape = dout.shape
dout._sharedmask = True
# Note: Don't try to check for m.any(), that'll take too long
return dout
|
def __getitem__(self, indx):
"""
x.__getitem__(y) <==> x[y]
Return the item described by i, as a masked array.
"""
dout = self.data[indx]
# We could directly use ndarray.__getitem__ on self.
# But then we would have to modify __array_finalize__ to prevent the
# mask of being reshaped if it hasn't been set up properly yet
# So it's easier to stick to the current version
_mask = self._mask
# Did we extract a single item?
if not getattr(dout, "ndim", False):
# A record
if isinstance(dout, np.void):
mask = _mask[indx]
# We should always re-cast to mvoid, otherwise users can
# change masks on rows that already have masked values, but not
# on rows that have no masked values, which is inconsistent.
dout = mvoid(dout, mask=mask, hardmask=self._hardmask)
# Just a scalar
elif _mask is not nomask and _mask[indx]:
return masked
elif self.dtype.type is np.object_ and self.dtype is not dout.dtype:
# self contains an object array of arrays (yes, that happens).
# If masked, turn into a MaskedArray, with everything masked.
if _mask is not nomask and _mask[indx]:
return MaskedArray(dout, mask=True)
else:
# Force dout to MA
dout = dout.view(type(self))
# Inherit attributes from self
dout._update_from(self)
# Check the fill_value
if isinstance(indx, basestring):
if self._fill_value is not None:
dout._fill_value = self._fill_value[indx]
dout._isfield = True
# Update the mask if needed
if _mask is not nomask:
dout._mask = _mask[indx]
# set shape to match that of data; this is needed for matrices
dout._mask.shape = dout.shape
dout._sharedmask = True
# Note: Don't try to check for m.any(), that'll take too long
return dout
|
https://github.com/numpy/numpy/issues/6723
|
In [332]: A = ma.masked_array(data=[([0,1,2],), ([3,4,5],)], mask=[([True, False, False],), ([False, True, False],)], dtype=[("A", ">i2", (3,))])
In [339]: A["A"][:, 0].filled()
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-339-ddde509d73bf> in <module>()
----> 1 A["A"][:, 0].filled()
/home/users/gholl/venv/stable-3.5/lib/python3.5/site-packages/numpy/ma/core.py in filled(self, fill_value)
3573 result = self._data.copy('K')
3574 try:
-> 3575 np.copyto(result, fill_value, where=m)
3576 except (TypeError, AttributeError):
3577 fill_value = narray(fill_value, dtype=object)
ValueError: could not broadcast input array from shape (3) into shape (2)
In [341]: A.shape, A.fill_value.shape, A["A"].shape, A["A"].fill_value.shape, A["A"][:, 0].shape, A["A"][:, 0].fill_value.shape
Out[341]: ((2,), (), (2, 3), (3,), (2,), (3,))
|
ValueError
|
def where(condition, x=_NoValue, y=_NoValue):
"""
Return a masked array with elements from x or y, depending on condition.
Returns a masked array, shaped like condition, where the elements
are from `x` when `condition` is True, and from `y` otherwise.
If neither `x` nor `y` are given, the function returns a tuple of
indices where `condition` is True (the result of
``condition.nonzero()``).
Parameters
----------
condition : array_like, bool
The condition to meet. For each True element, yield the corresponding
element from `x`, otherwise from `y`.
x, y : array_like, optional
Values from which to choose. `x` and `y` need to have the same shape
as condition, or be broadcast-able to that shape.
Returns
-------
out : MaskedArray or tuple of ndarrays
The resulting masked array if `x` and `y` were given, otherwise
the result of ``condition.nonzero()``.
See Also
--------
numpy.where : Equivalent function in the top-level NumPy module.
Examples
--------
>>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0],
... [1, 0, 1],
... [0, 1, 0]])
>>> print(x)
[[0.0 -- 2.0]
[-- 4.0 --]
[6.0 -- 8.0]]
>>> np.ma.where(x > 5) # return the indices where x > 5
(array([2, 2]), array([0, 2]))
>>> print(np.ma.where(x > 5, x, -3.1416))
[[-3.1416 -- -3.1416]
[-- -3.1416 --]
[6.0 -- 8.0]]
"""
# handle the single-argument case
missing = (x is _NoValue, y is _NoValue).count(True)
if missing == 1:
raise ValueError("Must provide both 'x' and 'y' or neither.")
if missing == 2:
return nonzero(condition)
# we only care if the condition is true - false or masked pick y
cf = filled(condition, False)
xd = getdata(x)
yd = getdata(y)
# we need the full arrays here for correct final dimensions
cm = getmaskarray(condition)
xm = getmaskarray(x)
ym = getmaskarray(y)
# deal with the fact that masked.dtype == float64, but we don't actually
# want to treat it as that.
if x is masked and y is not masked:
xd = np.zeros((), dtype=yd.dtype)
xm = np.ones((), dtype=ym.dtype)
elif y is masked and x is not masked:
yd = np.zeros((), dtype=xd.dtype)
ym = np.ones((), dtype=xm.dtype)
data = np.where(cf, xd, yd)
mask = np.where(cf, xm, ym)
mask = np.where(cm, np.ones((), dtype=mask.dtype), mask)
# collapse the mask, for backwards compatibility
if mask.dtype == np.bool_ and not mask.any():
mask = nomask
return masked_array(data, mask=mask)
|
def where(condition, x=_NoValue, y=_NoValue):
"""
Return a masked array with elements from x or y, depending on condition.
Returns a masked array, shaped like condition, where the elements
are from `x` when `condition` is True, and from `y` otherwise.
If neither `x` nor `y` are given, the function returns a tuple of
indices where `condition` is True (the result of
``condition.nonzero()``).
Parameters
----------
condition : array_like, bool
The condition to meet. For each True element, yield the corresponding
element from `x`, otherwise from `y`.
x, y : array_like, optional
Values from which to choose. `x` and `y` need to have the same shape
as condition, or be broadcast-able to that shape.
Returns
-------
out : MaskedArray or tuple of ndarrays
The resulting masked array if `x` and `y` were given, otherwise
the result of ``condition.nonzero()``.
See Also
--------
numpy.where : Equivalent function in the top-level NumPy module.
Examples
--------
>>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0],
... [1, 0, 1],
... [0, 1, 0]])
>>> print(x)
[[0.0 -- 2.0]
[-- 4.0 --]
[6.0 -- 8.0]]
>>> np.ma.where(x > 5) # return the indices where x > 5
(array([2, 2]), array([0, 2]))
>>> print(np.ma.where(x > 5, x, -3.1416))
[[-3.1416 -- -3.1416]
[-- -3.1416 --]
[6.0 -- 8.0]]
"""
missing = (x is _NoValue, y is _NoValue).count(True)
if missing == 1:
raise ValueError("Must provide both 'x' and 'y' or neither.")
if missing == 2:
return filled(condition, 0).nonzero()
# Both x and y are provided
# Get the condition
fc = filled(condition, 0).astype(MaskType)
notfc = np.logical_not(fc)
# Get the data
xv = getdata(x)
yv = getdata(y)
if x is masked:
ndtype = yv.dtype
elif y is masked:
ndtype = xv.dtype
else:
ndtype = np.find_common_type([xv.dtype, yv.dtype], [])
# Construct an empty array and fill it
d = np.empty(fc.shape, dtype=ndtype).view(MaskedArray)
np.copyto(d._data, xv.astype(ndtype), where=fc)
np.copyto(d._data, yv.astype(ndtype), where=notfc)
# Create an empty mask and fill it
mask = np.zeros(fc.shape, dtype=MaskType)
np.copyto(mask, getmask(x), where=fc)
np.copyto(mask, getmask(y), where=notfc)
mask |= getmaskarray(condition)
# Use d._mask instead of d.mask to avoid copies
d._mask = mask if mask.any() else nomask
return d
|
https://github.com/numpy/numpy/issues/8599
|
x = np.eye(3)
y = np.eye(3)
np.where([0, 1, 0], x, y)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
np.ma.where([0, 1, 0], x, y)
Traceback (most recent call last):
File "<pyshell#24>", line 1, in <module>
np.ma.where([0, 1, 0], x, y)
File "C:\Program Files\Python 3.5\lib\site-packages\numpy\ma\core.py", line 6964, in where
np.copyto(d._data, xv.astype(ndtype), where=fc)
ValueError: could not broadcast input array from shape (3,3) into shape (3)
|
ValueError
|
def hstack(tup):
"""
Stack arrays in sequence horizontally (column wise).
Take a sequence of arrays and stack them horizontally to make
a single array. Rebuild arrays divided by `hsplit`.
This function continues to be supported for backward compatibility, but
you should prefer ``np.concatenate`` or ``np.stack``. The ``np.stack``
function was added in NumPy 1.10.
Parameters
----------
tup : sequence of ndarrays
All arrays must have the same shape along all but the second axis.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
stack : Join a sequence of arrays along a new axis.
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third axis).
concatenate : Join a sequence of arrays along an existing axis.
hsplit : Split array along second axis.
Notes
-----
Equivalent to ``np.concatenate(tup, axis=1)``
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.hstack((a,b))
array([1, 2, 3, 2, 3, 4])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.hstack((a,b))
array([[1, 2],
[2, 3],
[3, 4]])
"""
arrs = [atleast_1d(_m) for _m in tup]
# As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
if arrs and arrs[0].ndim == 1:
return _nx.concatenate(arrs, 0)
else:
return _nx.concatenate(arrs, 1)
|
def hstack(tup):
"""
Stack arrays in sequence horizontally (column wise).
Take a sequence of arrays and stack them horizontally to make
a single array. Rebuild arrays divided by `hsplit`.
This function continues to be supported for backward compatibility, but
you should prefer ``np.concatenate`` or ``np.stack``. The ``np.stack``
function was added in NumPy 1.10.
Parameters
----------
tup : sequence of ndarrays
All arrays must have the same shape along all but the second axis.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
stack : Join a sequence of arrays along a new axis.
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third axis).
concatenate : Join a sequence of arrays along an existing axis.
hsplit : Split array along second axis.
Notes
-----
Equivalent to ``np.concatenate(tup, axis=1)``
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.hstack((a,b))
array([1, 2, 3, 2, 3, 4])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.hstack((a,b))
array([[1, 2],
[2, 3],
[3, 4]])
"""
arrs = [atleast_1d(_m) for _m in tup]
# As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
if arrs[0].ndim == 1:
return _nx.concatenate(arrs, 0)
else:
return _nx.concatenate(arrs, 1)
|
https://github.com/numpy/numpy/issues/8790
|
import numpy as np
np.hstack([ ])
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python3/dist-packages/numpy/core/shape_base.py", line 277, in hstack
if arrs[0].ndim == 1:
IndexError: list index out of range
|
IndexError
|
def roots(self):
"""The roots of the polynomial, where self(x) == 0"""
return roots(self._coeffs)
|
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by::
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like
Rank-1 array of polynomial coefficients.
Returns
-------
out : ndarray
An array containing the roots of the polynomial.
Raises
------
ValueError
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
polyval : Compute polynomial values.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if p.ndim != 1:
raise ValueError("Input must be a rank-1 array.")
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]) : int(non_zero[-1]) + 1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N - 2,), p.dtype), -1)
A[0, :] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
|
https://github.com/numpy/numpy/issues/8760
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/python3.5/inspect.py", line 2987, in signature
return Signature.from_callable(obj, follow_wrapped=follow_wrapped)
File "/python3.5/inspect.py", line 2737, in from_callable
follow_wrapper_chains=follow_wrapped)
File "/python3.5/inspect.py", line 2228, in _signature_from_callable
if _signature_is_builtin(obj):
File "/python3.5/inspect.py", line 1785, in _signature_is_builtin
obj in (type, object))
File "/python3.5/site-packages/numpy/lib/polynomial.py", line 1203, in __eq__
if self.coeffs.shape != other.coeffs.shape:
AttributeError: type object 'type' has no attribute 'coeffs'
|
AttributeError
|
def __init__(self, c_or_r, r=False, variable=None):
if isinstance(c_or_r, poly1d):
self._variable = c_or_r._variable
self._coeffs = c_or_r._coeffs
if set(c_or_r.__dict__) - set(self.__dict__):
msg = (
"In the future extra properties will not be copied "
"across when constructing one poly1d from another"
)
warnings.warn(msg, FutureWarning, stacklevel=2)
self.__dict__.update(c_or_r.__dict__)
if variable is not None:
self._variable = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if c_or_r.ndim > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim="f")
if len(c_or_r) == 0:
c_or_r = NX.array([0.0])
self._coeffs = c_or_r
if variable is None:
variable = "x"
self._variable = variable
|
def __init__(self, c_or_r, r=0, variable=None):
if isinstance(c_or_r, poly1d):
for key in c_or_r.__dict__.keys():
self.__dict__[key] = c_or_r.__dict__[key]
if variable is not None:
self.__dict__["variable"] = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if c_or_r.ndim > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim="f")
if len(c_or_r) == 0:
c_or_r = NX.array([0.0])
self.__dict__["coeffs"] = c_or_r
self.__dict__["order"] = len(c_or_r) - 1
if variable is None:
variable = "x"
self.__dict__["variable"] = variable
|
https://github.com/numpy/numpy/issues/8760
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/python3.5/inspect.py", line 2987, in signature
return Signature.from_callable(obj, follow_wrapped=follow_wrapped)
File "/python3.5/inspect.py", line 2737, in from_callable
follow_wrapper_chains=follow_wrapped)
File "/python3.5/inspect.py", line 2228, in _signature_from_callable
if _signature_is_builtin(obj):
File "/python3.5/inspect.py", line 1785, in _signature_is_builtin
obj in (type, object))
File "/python3.5/site-packages/numpy/lib/polynomial.py", line 1203, in __eq__
if self.coeffs.shape != other.coeffs.shape:
AttributeError: type object 'type' has no attribute 'coeffs'
|
AttributeError
|
def __eq__(self, other):
if not isinstance(other, poly1d):
return NotImplemented
if self.coeffs.shape != other.coeffs.shape:
return False
return (self.coeffs == other.coeffs).all()
|
def __eq__(self, other):
if self.coeffs.shape != other.coeffs.shape:
return False
return (self.coeffs == other.coeffs).all()
|
https://github.com/numpy/numpy/issues/8760
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/python3.5/inspect.py", line 2987, in signature
return Signature.from_callable(obj, follow_wrapped=follow_wrapped)
File "/python3.5/inspect.py", line 2737, in from_callable
follow_wrapper_chains=follow_wrapped)
File "/python3.5/inspect.py", line 2228, in _signature_from_callable
if _signature_is_builtin(obj):
File "/python3.5/inspect.py", line 1785, in _signature_is_builtin
obj in (type, object))
File "/python3.5/site-packages/numpy/lib/polynomial.py", line 1203, in __eq__
if self.coeffs.shape != other.coeffs.shape:
AttributeError: type object 'type' has no attribute 'coeffs'
|
AttributeError
|
def __ne__(self, other):
if not isinstance(other, poly1d):
return NotImplemented
return not self.__eq__(other)
|
def __ne__(self, other):
return not self.__eq__(other)
|
https://github.com/numpy/numpy/issues/8760
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/python3.5/inspect.py", line 2987, in signature
return Signature.from_callable(obj, follow_wrapped=follow_wrapped)
File "/python3.5/inspect.py", line 2737, in from_callable
follow_wrapper_chains=follow_wrapped)
File "/python3.5/inspect.py", line 2228, in _signature_from_callable
if _signature_is_builtin(obj):
File "/python3.5/inspect.py", line 1785, in _signature_is_builtin
obj in (type, object))
File "/python3.5/site-packages/numpy/lib/polynomial.py", line 1203, in __eq__
if self.coeffs.shape != other.coeffs.shape:
AttributeError: type object 'type' has no attribute 'coeffs'
|
AttributeError
|
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key - self.order, self.coeffs.dtype)
self._coeffs = NX.concatenate((zr, self.coeffs))
ind = 0
self._coeffs[ind] = val
return
|
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key - self.order, self.coeffs.dtype)
self.__dict__["coeffs"] = NX.concatenate((zr, self.coeffs))
self.__dict__["order"] = key
ind = 0
self.__dict__["coeffs"][ind] = val
return
|
https://github.com/numpy/numpy/issues/8760
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/python3.5/inspect.py", line 2987, in signature
return Signature.from_callable(obj, follow_wrapped=follow_wrapped)
File "/python3.5/inspect.py", line 2737, in from_callable
follow_wrapper_chains=follow_wrapped)
File "/python3.5/inspect.py", line 2228, in _signature_from_callable
if _signature_is_builtin(obj):
File "/python3.5/inspect.py", line 1785, in _signature_is_builtin
obj in (type, object))
File "/python3.5/site-packages/numpy/lib/polynomial.py", line 1203, in __eq__
if self.coeffs.shape != other.coeffs.shape:
AttributeError: type object 'type' has no attribute 'coeffs'
|
AttributeError
|
def gradient(f, *varargs, **kwargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using second order accurate central differences
in the interior points and either first or second order accurate one-sides
(forward or backwards) differences at the boundaries.
The returned gradient hence has the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
varargs : list of scalar or array, optional
Spacing between f values. Default unitary spacing for all dimensions.
Spacing can be specified using:
1. single scalar to specify a sample distance for all dimensions.
2. N scalars to specify a constant sample distance for each dimension.
i.e. `dx`, `dy`, `dz`, ...
3. N arrays to specify the coordinates of the values along each
dimension of F. The length of the array must match the size of
the corresponding dimension
4. Any combination of N scalars/arrays with the meaning of 2. and 3.
If `axis` is given, the number of varargs must equal the number of axes.
Default: 1.
edge_order : {1, 2}, optional
Gradient is calculated using N-th order accurate differences
at the boundaries. Default: 1.
.. versionadded:: 1.9.1
axis : None or int or tuple of ints, optional
Gradient is calculated only along the given axis or axes
The default (axis = None) is to calculate the gradient for all the axes
of the input array. axis may be negative, in which case it counts from
the last to the first axis.
.. versionadded:: 1.11.0
Returns
-------
gradient : ndarray or list of ndarray
A set of ndarrays (or a single ndarray if there is only one dimension)
corresponding to the derivatives of f with respect to each dimension.
Each derivative has the same shape as f.
Examples
--------
>>> f = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(f)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(f, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
Spacing can be also specified with an array that represents the coordinates
of the values F along the dimensions.
For instance a uniform spacing:
>>> x = np.arange(f.size)
>>> np.gradient(f, x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
Or a non uniform one:
>>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=np.float)
>>> np.gradient(f, x)
array([ 1. , 3. , 3.5, 6.7, 6.9, 2.5])
For two dimensional arrays, the return will be two arrays ordered by
axis. In this example the first array stands for the gradient in
rows and the second one in columns direction:
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
In this example the spacing is also specified:
uniform for axis=0 and non uniform for axis=1
>>> dx = 2.
>>> y = [1., 1.5, 3.5]
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), dx, y)
[array([[ 1. , 1. , -0.5],
[ 1. , 1. , -0.5]]), array([[ 2. , 2. , 2. ],
[ 2. , 1.7, 0.5]])]
It is possible to specify how boundaries are treated using `edge_order`
>>> x = np.array([0, 1, 2, 3, 4])
>>> f = x**2
>>> np.gradient(f, edge_order=1)
array([ 1., 2., 4., 6., 7.])
>>> np.gradient(f, edge_order=2)
array([-0., 2., 4., 6., 8.])
The `axis` keyword can be used to specify a subset of axes of which the
gradient is calculated
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), axis=0)
array([[ 2., 2., -1.],
[ 2., 2., -1.]])
Notes
-----
Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continous
derivatives) and let be :math:`h_{*}` a non homogeneous stepsize, the
spacing the finite difference coefficients are computed by minimising
the consistency error :math:`\\eta_{i}`:
.. math::
\\eta_{i} = f_{i}^{\\left(1\\right)} -
\\left[ \\alpha f\\left(x_{i}\\right) +
\\beta f\\left(x_{i} + h_{d}\\right) +
\\gamma f\\left(x_{i}-h_{s}\\right)
\\right]
By substituting :math:`f(x_{i} + h_{d})` and :math:`f(x_{i} - h_{s})`
with their Taylor series expansion, this translates into solving
the following the linear system:
.. math::
\\left\\{
\\begin{array}{r}
\\alpha+\\beta+\\gamma=0 \\\\
-\\beta h_{d}+\\gamma h_{s}=1 \\\\
\\beta h_{d}^{2}+\\gamma h_{s}^{2}=0
\\end{array}
\\right.
The resulting approximation of :math:`f_{i}^{(1)}` is the following:
.. math::
\\hat f_{i}^{(1)} =
\\frac{
h_{s}^{2}f\\left(x_{i} + h_{d}\\right)
+ \\left(h_{d}^{2} - h_{s}^{2}\\right)f\\left(x_{i}\\right)
- h_{d}^{2}f\\left(x_{i}-h_{s}\\right)}
{ h_{s}h_{d}\\left(h_{d} + h_{s}\\right)}
+ \\mathcal{O}\\left(\\frac{h_{d}h_{s}^{2}
+ h_{s}h_{d}^{2}}{h_{d}
+ h_{s}}\\right)
It is worth noting that if :math:`h_{s}=h_{d}`
(i.e., data are evenly spaced)
we find the standard second order approximation:
.. math::
\\hat f_{i}^{(1)}=
\\frac{f\\left(x_{i+1}\\right) - f\\left(x_{i-1}\\right)}{2h}
+ \\mathcal{O}\\left(h^{2}\\right)
With a similar procedure the forward/backward approximations used for
boundaries can be derived.
References
----------
.. [1] Quarteroni A., Sacco R., Saleri F. (2007) Numerical Mathematics
(Texts in Applied Mathematics). New York: Springer.
.. [2] Durran D. R. (1999) Numerical Methods for Wave Equations
in Geophysical Fluid Dynamics. New York: Springer.
.. [3] Fornberg B. (1988) Generation of Finite Difference Formulas on
Arbitrarily Spaced Grids,
Mathematics of Computation 51, no. 184 : 699-706.
`PDF <http://www.ams.org/journals/mcom/1988-51-184/
S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_.
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
axes = kwargs.pop("axis", None)
if axes is None:
axes = tuple(range(N))
# check axes to have correct type and no duplicate entries
if isinstance(axes, int):
axes = (axes,)
if not isinstance(axes, tuple):
raise TypeError("A tuple of integers or a single integer is required")
# normalize axis values:
axes = tuple(x + N if x < 0 else x for x in axes)
if max(axes) >= N or min(axes) < 0:
raise ValueError("'axis' entry is out of bounds")
len_axes = len(axes)
if len(set(axes)) != len_axes:
raise ValueError("duplicate value in 'axis'")
n = len(varargs)
if n == 0:
dx = [1.0] * len_axes
elif n == len_axes or (n == 1 and np.isscalar(varargs[0])):
dx = list(varargs)
for i, distances in enumerate(dx):
if np.isscalar(distances):
continue
if len(distances) != f.shape[axes[i]]:
raise ValueError(
"distances must be either scalars or match "
"the length of the corresponding dimension"
)
diffx = np.diff(dx[i])
# if distances are constant reduce to the scalar case
# since it brings a consistent speedup
if (diffx == diffx[0]).all():
diffx = diffx[0]
dx[i] = diffx
if len(dx) == 1:
dx *= len_axes
else:
raise TypeError("invalid number of arguments")
edge_order = kwargs.pop("edge_order", 1)
if kwargs:
raise TypeError(
'"{}" are not valid keyword arguments.'.format('", "'.join(kwargs.keys()))
)
if edge_order > 2:
raise ValueError("'edge_order' greater than 2 not supported")
# use central differences on interior and one-sided differences on the
# endpoints. This preserves second order-accuracy over the full domain.
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)] * N
slice2 = [slice(None)] * N
slice3 = [slice(None)] * N
slice4 = [slice(None)] * N
otype = f.dtype.char
if otype not in ["f", "d", "F", "D", "m", "M"]:
otype = "d"
# Difference of datetime64 elements results in timedelta64
if otype == "M":
# Need to use the full dtype name because it contains unit information
otype = f.dtype.name.replace("datetime", "timedelta")
elif otype == "m":
# Needs to keep the specific units, can't be a general unit
otype = f.dtype
# Convert datetime64 data into ints. Make dummy variable `y`
# that is a view of ints if the data is datetime64, otherwise
# just set y equal to the array `f`.
if f.dtype.char in ["M", "m"]:
y = f.view("int64")
else:
y = f
for i, axis in enumerate(axes):
if y.shape[axis] < edge_order + 1:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least (edge_order + 1) elements are required."
)
# result allocation
out = np.empty_like(y, dtype=otype)
uniform_spacing = np.isscalar(dx[i])
# Numerical differentiation: 2nd order interior
slice1[axis] = slice(1, -1)
slice2[axis] = slice(None, -2)
slice3[axis] = slice(1, -1)
slice4[axis] = slice(2, None)
if uniform_spacing:
out[slice1] = (f[slice4] - f[slice2]) / (2.0 * dx[i])
else:
dx1 = dx[i][0:-1]
dx2 = dx[i][1:]
a = -(dx2) / (dx1 * (dx1 + dx2))
b = (dx2 - dx1) / (dx1 * dx2)
c = dx1 / (dx2 * (dx1 + dx2))
# fix the shape for broadcasting
shape = np.ones(N, dtype=int)
shape[axis] = -1
a.shape = b.shape = c.shape = shape
# 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:]
out[slice1] = a * f[slice2] + b * f[slice3] + c * f[slice4]
# Numerical differentiation: 1st order edges
if edge_order == 1:
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
dx_0 = dx[i] if uniform_spacing else dx[i][0]
# 1D equivalent -- out[0] = (y[1] - y[0]) / (x[1] - x[0])
out[slice1] = (y[slice2] - y[slice3]) / dx_0
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
dx_n = dx[i] if uniform_spacing else dx[i][-1]
# 1D equivalent -- out[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2])
out[slice1] = (y[slice2] - y[slice3]) / dx_n
# Numerical differentiation: 2nd order edges
else:
slice1[axis] = 0
slice2[axis] = 0
slice3[axis] = 1
slice4[axis] = 2
if uniform_spacing:
a = -1.5 / dx[i]
b = 2.0 / dx[i]
c = -0.5 / dx[i]
else:
dx1 = dx[i][0]
dx2 = dx[i][1]
a = -(2.0 * dx1 + dx2) / (dx1 * (dx1 + dx2))
b = (dx1 + dx2) / (dx1 * dx2)
c = -dx1 / (dx2 * (dx1 + dx2))
# 1D equivalent -- out[0] = a * y[0] + b * y[1] + c * y[2]
out[slice1] = a * y[slice2] + b * y[slice3] + c * y[slice4]
slice1[axis] = -1
slice2[axis] = -3
slice3[axis] = -2
slice4[axis] = -1
if uniform_spacing:
a = 0.5 / dx[i]
b = -2.0 / dx[i]
c = 1.5 / dx[i]
else:
dx1 = dx[i][-2]
dx2 = dx[i][-1]
a = (dx2) / (dx1 * (dx1 + dx2))
b = -(dx2 + dx1) / (dx1 * dx2)
c = (2.0 * dx2 + dx1) / (dx2 * (dx1 + dx2))
# 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1]
out[slice1] = a * y[slice2] + b * y[slice3] + c * y[slice4]
outvals.append(out)
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
slice4[axis] = slice(None)
if len_axes == 1:
return outvals[0]
else:
return outvals
|
def gradient(f, *varargs, **kwargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using second order accurate central differences
in the interior points and either first or second order accurate one-sides
(forward or backwards) differences at the boundaries.
The returned gradient hence has the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
varargs : list of scalar or array, optional
Spacing between f values. Default unitary spacing for all dimensions.
Spacing can be specified using:
1. single scalar to specify a sample distance for all dimensions.
2. N scalars to specify a constant sample distance for each dimension.
i.e. `dx`, `dy`, `dz`, ...
3. N arrays to specify the coordinates of the values along each
dimension of F. The length of the array must match the size of
the corresponding dimension
4. Any combination of N scalars/arrays with the meaning of 2. and 3.
If `axis` is given, the number of varargs must equal the number of axes.
Default: 1.
edge_order : {1, 2}, optional
Gradient is calculated using N-th order accurate differences
at the boundaries. Default: 1.
.. versionadded:: 1.9.1
axis : None or int or tuple of ints, optional
Gradient is calculated only along the given axis or axes
The default (axis = None) is to calculate the gradient for all the axes
of the input array. axis may be negative, in which case it counts from
the last to the first axis.
.. versionadded:: 1.11.0
Returns
-------
gradient : ndarray or list of ndarray
A set of ndarrays (or a single ndarray if there is only one dimension)
corresponding to the derivatives of f with respect to each dimension.
Each derivative has the same shape as f.
Examples
--------
>>> f = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(f)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(f, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
Spacing can be also specified with an array that represents the coordinates
of the values F along the dimensions.
For instance a uniform spacing:
>>> x = np.arange(f.size)
>>> np.gradient(f, x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
Or a non uniform one:
>>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=np.float)
>>> np.gradient(f, x)
array([ 1. , 3. , 3.5, 6.7, 6.9, 2.5])
For two dimensional arrays, the return will be two arrays ordered by
axis. In this example the first array stands for the gradient in
rows and the second one in columns direction:
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
In this example the spacing is also specified:
uniform for axis=0 and non uniform for axis=1
>>> dx = 2.
>>> y = [1., 1.5, 3.5]
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), dx, y)
[array([[ 1. , 1. , -0.5],
[ 1. , 1. , -0.5]]), array([[ 2. , 2. , 2. ],
[ 2. , 1.7, 0.5]])]
It is possible to specify how boundaries are treated using `edge_order`
>>> x = np.array([0, 1, 2, 3, 4])
>>> f = x**2
>>> np.gradient(f, edge_order=1)
array([ 1., 2., 4., 6., 7.])
>>> np.gradient(f, edge_order=2)
array([-0., 2., 4., 6., 8.])
The `axis` keyword can be used to specify a subset of axes of which the
gradient is calculated
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), axis=0)
array([[ 2., 2., -1.],
[ 2., 2., -1.]])
Notes
-----
Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continous
derivatives) and let be :math:`h_{*}` a non homogeneous stepsize, the
spacing the finite difference coefficients are computed by minimising
the consistency error :math:`\\eta_{i}`:
.. math::
\\eta_{i} = f_{i}^{\\left(1\\right)} -
\\left[ \\alpha f\\left(x_{i}\\right) +
\\beta f\\left(x_{i} + h_{d}\\right) +
\\gamma f\\left(x_{i}-h_{s}\\right)
\\right]
By substituting :math:`f(x_{i} + h_{d})` and :math:`f(x_{i} - h_{s})`
with their Taylor series expansion, this translates into solving
the following the linear system:
.. math::
\\left\\{
\\begin{array}{r}
\\alpha+\\beta+\\gamma=0 \\\\
-\\beta h_{d}+\\gamma h_{s}=1 \\\\
\\beta h_{d}^{2}+\\gamma h_{s}^{2}=0
\\end{array}
\\right.
The resulting approximation of :math:`f_{i}^{(1)}` is the following:
.. math::
\\hat f_{i}^{(1)} =
\\frac{
h_{s}^{2}f\\left(x_{i} + h_{d}\\right)
+ \\left(h_{d}^{2} - h_{s}^{2}\\right)f\\left(x_{i}\\right)
- h_{d}^{2}f\\left(x_{i}-h_{s}\\right)}
{ h_{s}h_{d}\\left(h_{d} + h_{s}\\right)}
+ \mathcal{O}\\left(\\frac{h_{d}h_{s}^{2}
+ h_{s}h_{d}^{2}}{h_{d}
+ h_{s}}\\right)
It is worth noting that if :math:`h_{s}=h_{d}`
(i.e., data are evenly spaced)
we find the standard second order approximation:
.. math::
\\hat f_{i}^{(1)}=
\\frac{f\\left(x_{i+1}\\right) - f\\left(x_{i-1}\\right)}{2h}
+ \mathcal{O}\\left(h^{2}\\right)
With a similar procedure the forward/backward approximations used for
boundaries can be derived.
References
----------
.. [1] Quarteroni A., Sacco R., Saleri F. (2007) Numerical Mathematics
(Texts in Applied Mathematics). New York: Springer.
.. [2] Durran D. R. (1999) Numerical Methods for Wave Equations
in Geophysical Fluid Dynamics. New York: Springer.
.. [3] Fornberg B. (1988) Generation of Finite Difference Formulas on
Arbitrarily Spaced Grids,
Mathematics of Computation 51, no. 184 : 699-706.
`PDF <http://www.ams.org/journals/mcom/1988-51-184/
S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_.
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
axes = kwargs.pop("axis", None)
if axes is None:
axes = tuple(range(N))
# check axes to have correct type and no duplicate entries
if isinstance(axes, int):
axes = (axes,)
if not isinstance(axes, tuple):
raise TypeError("A tuple of integers or a single integer is required")
# normalize axis values:
axes = tuple(x + N if x < 0 else x for x in axes)
if max(axes) >= N or min(axes) < 0:
raise ValueError("'axis' entry is out of bounds")
len_axes = len(axes)
if len(set(axes)) != len_axes:
raise ValueError("duplicate value in 'axis'")
n = len(varargs)
if n == 0:
dx = [1.0] * len_axes
elif n == len_axes or (n == 1 and np.isscalar(varargs[0])):
dx = list(varargs)
for i, distances in enumerate(dx):
if np.isscalar(distances):
continue
if len(distances) != f.shape[axes[i]]:
raise ValueError(
"distances must be either scalars or match "
"the length of the corresponding dimension"
)
diffx = np.diff(dx[i])
# if distances are constant reduce to the scalar case
# since it brings a consistent speedup
if (diffx == diffx[0]).all():
diffx = diffx[0]
dx[i] = diffx
if len(dx) == 1:
dx *= len_axes
else:
raise TypeError("invalid number of arguments")
edge_order = kwargs.pop("edge_order", 1)
if kwargs:
raise TypeError(
'"{}" are not valid keyword arguments.'.format('", "'.join(kwargs.keys()))
)
if edge_order > 2:
raise ValueError("'edge_order' greater than 2 not supported")
# use central differences on interior and one-sided differences on the
# endpoints. This preserves second order-accuracy over the full domain.
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)] * N
slice2 = [slice(None)] * N
slice3 = [slice(None)] * N
slice4 = [slice(None)] * N
otype = f.dtype.char
if otype not in ["f", "d", "F", "D", "m", "M"]:
otype = "d"
# Difference of datetime64 elements results in timedelta64
if otype == "M":
# Need to use the full dtype name because it contains unit information
otype = f.dtype.name.replace("datetime", "timedelta")
elif otype == "m":
# Needs to keep the specific units, can't be a general unit
otype = f.dtype
# Convert datetime64 data into ints. Make dummy variable `y`
# that is a view of ints if the data is datetime64, otherwise
# just set y equal to the array `f`.
if f.dtype.char in ["M", "m"]:
y = f.view("int64")
else:
y = f
for i, axis in enumerate(axes):
if y.shape[axis] < edge_order + 1:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least (edge_order + 1) elements are required."
)
# result allocation
out = np.empty_like(y, dtype=otype)
uniform_spacing = np.isscalar(dx[i])
# Numerical differentiation: 2nd order interior
slice1[axis] = slice(1, -1)
slice2[axis] = slice(None, -2)
slice3[axis] = slice(1, -1)
slice4[axis] = slice(2, None)
if uniform_spacing:
out[slice1] = (f[slice4] - f[slice2]) / (2.0 * dx[i])
else:
dx1 = dx[i][0:-1]
dx2 = dx[i][1:]
a = -(dx2) / (dx1 * (dx1 + dx2))
b = (dx2 - dx1) / (dx1 * dx2)
c = dx1 / (dx2 * (dx1 + dx2))
# fix the shape for broadcasting
shape = np.ones(N, dtype=int)
shape[axis] = -1
a.shape = b.shape = c.shape = shape
# 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:]
out[slice1] = a * f[slice2] + b * f[slice3] + c * f[slice4]
# Numerical differentiation: 1st order edges
if edge_order == 1:
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
dx_0 = dx[i] if uniform_spacing else dx[i][0]
# 1D equivalent -- out[0] = (y[1] - y[0]) / (x[1] - x[0])
out[slice1] = (y[slice2] - y[slice3]) / dx_0
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
dx_n = dx[i] if uniform_spacing else dx[i][-1]
# 1D equivalent -- out[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2])
out[slice1] = (y[slice2] - y[slice3]) / dx_n
# Numerical differentiation: 2nd order edges
else:
slice1[axis] = 0
slice2[axis] = 0
slice3[axis] = 1
slice4[axis] = 2
if uniform_spacing:
a = -1.5 / dx[i]
b = 2.0 / dx[i]
c = -0.5 / dx[i]
else:
dx1 = dx[i][0]
dx2 = dx[i][1]
a = -(2.0 * dx1 + dx2) / (dx1 * (dx1 + dx2))
b = (dx1 + dx2) / (dx1 * dx2)
c = -dx1 / (dx2 * (dx1 + dx2))
# 1D equivalent -- out[0] = a * y[0] + b * y[1] + c * y[2]
out[slice1] = a * y[slice2] + b * y[slice3] + c * y[slice4]
slice1[axis] = -1
slice2[axis] = -3
slice3[axis] = -2
slice4[axis] = -1
if uniform_spacing:
a = 0.5 / dx[i]
b = -2.0 / dx[i]
c = 1.5 / dx[i]
else:
dx1 = dx[i][-2]
dx2 = dx[i][-1]
a = (dx2) / (dx1 * (dx1 + dx2))
b = -(dx2 + dx1) / (dx1 * dx2)
c = (2.0 * dx2 + dx1) / (dx2 * (dx1 + dx2))
# 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1]
out[slice1] = a * y[slice2] + b * y[slice3] + c * y[slice4]
outvals.append(out)
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
slice4[axis] = slice(None)
if len_axes == 1:
return outvals[0]
else:
return outvals
|
https://github.com/numpy/numpy/issues/8687
|
======================================================================
ERROR: test_warnings.test_warning_calls
----------------------------------------------------------------------
Traceback (most recent call last):
File "/venv/lib/python3.6/site-packages/nose/case.py", line 198, in runTest
self.test(*self.arg)
File "/venv/lib/python3.6/site-packages/numpy/tests/test_warnings.py", line 81, in test_warning_calls
tree = ast.parse(file.read())
File "/opt/cp36m/lib/python3.6/ast.py", line 35, in parse
return compile(source, filename, mode, PyCF_ONLY_AST)
File "<unknown>", line 1675
SyntaxError: invalid escape sequence \m
|
SyntaxError
|
def __call__(self, a, *args, **params):
if self.reversed:
args = list(args)
a, args[0] = args[0], a
marr = asanyarray(a)
method_name = self.__name__
method = getattr(type(marr), method_name, None)
if method is None:
# use the corresponding np function
method = getattr(np, method_name)
return method(marr, *args, **params)
|
def __call__(self, a, *args, **params):
if self.reversed:
args = list(args)
arr = args[0]
args[0] = a
a = arr
# Get the method from the array (if possible)
method_name = self.__name__
method = getattr(a, method_name, None)
if method is not None:
return method(*args, **params)
# Still here ? Then a is not a MaskedArray
method = getattr(MaskedArray, method_name, None)
if method is not None:
return method(MaskedArray(a), *args, **params)
# Still here ? OK, let's call the corresponding np function
method = getattr(np, method_name)
return method(a, *args, **params)
|
https://github.com/numpy/numpy/issues/8019
|
np.ma.copy([1,2,3]) # unexpected behaviour
[1, 2, 3]
np.copy([1,2,3]) # expected behaviour
array([1, 2, 3])
np.ma.count([1,2,3])
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Programming\Anaconda\envs\test\lib\site-packages\numpy\ma\core.py", line 6389, in __call__
return method(*args, **params)
TypeError: count() takes exactly one argument (0 given)
|
TypeError
|
def asanyarray(a, dtype=None):
"""
Convert the input to a masked array, conserving subclasses.
If `a` is a subclass of `MaskedArray`, its class is conserved.
No copy is performed if the input is already an `ndarray`.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array.
dtype : dtype, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Default is 'C'.
Returns
-------
out : MaskedArray
MaskedArray interpretation of `a`.
See Also
--------
asarray : Similar to `asanyarray`, but does not conserve subclass.
Examples
--------
>>> x = np.arange(10.).reshape(2, 5)
>>> x
array([[ 0., 1., 2., 3., 4.],
[ 5., 6., 7., 8., 9.]])
>>> np.ma.asanyarray(x)
masked_array(data =
[[ 0. 1. 2. 3. 4.]
[ 5. 6. 7. 8. 9.]],
mask =
False,
fill_value = 1e+20)
>>> type(np.ma.asanyarray(x))
<class 'numpy.ma.core.MaskedArray'>
"""
# workaround for #8666, to preserve identity. Ideally the bottom line
# would handle this for us.
if isinstance(a, MaskedArray) and (dtype is None or dtype == a.dtype):
return a
return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True)
|
def asanyarray(a, dtype=None):
"""
Convert the input to a masked array, conserving subclasses.
If `a` is a subclass of `MaskedArray`, its class is conserved.
No copy is performed if the input is already an `ndarray`.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array.
dtype : dtype, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Default is 'C'.
Returns
-------
out : MaskedArray
MaskedArray interpretation of `a`.
See Also
--------
asarray : Similar to `asanyarray`, but does not conserve subclass.
Examples
--------
>>> x = np.arange(10.).reshape(2, 5)
>>> x
array([[ 0., 1., 2., 3., 4.],
[ 5., 6., 7., 8., 9.]])
>>> np.ma.asanyarray(x)
masked_array(data =
[[ 0. 1. 2. 3. 4.]
[ 5. 6. 7. 8. 9.]],
mask =
False,
fill_value = 1e+20)
>>> type(np.ma.asanyarray(x))
<class 'numpy.ma.core.MaskedArray'>
"""
return masked_array(a, dtype=dtype, copy=False, keep_mask=True, subok=True)
|
https://github.com/numpy/numpy/issues/8019
|
np.ma.copy([1,2,3]) # unexpected behaviour
[1, 2, 3]
np.copy([1,2,3]) # expected behaviour
array([1, 2, 3])
np.ma.count([1,2,3])
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Programming\Anaconda\envs\test\lib\site-packages\numpy\ma\core.py", line 6389, in __call__
return method(*args, **params)
TypeError: count() takes exactly one argument (0 given)
|
TypeError
|
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""
Apply a function to 1-D slices along the given axis.
Execute `func1d(a, *args)` where `func1d` operates on 1-D arrays and `a`
is a 1-D slice of `arr` along `axis`.
Parameters
----------
func1d : function
This function should accept 1-D arrays. It is applied to 1-D
slices of `arr` along the specified axis.
axis : integer
Axis along which `arr` is sliced.
arr : ndarray
Input array.
args : any
Additional arguments to `func1d`.
kwargs : any
Additional named arguments to `func1d`.
.. versionadded:: 1.9.0
Returns
-------
apply_along_axis : ndarray
The output array. The shape of `outarr` is identical to the shape of
`arr`, except along the `axis` dimension. This axis is removed, and
replaced with new dimensions equal to the shape of the return value
of `func1d`. So if `func1d` returns a scalar `outarr` will have one
fewer dimensions than `arr`.
See Also
--------
apply_over_axes : Apply a function repeatedly over multiple axes.
Examples
--------
>>> def my_func(a):
... \"\"\"Average first and last element of a 1-D array\"\"\"
... return (a[0] + a[-1]) * 0.5
>>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> np.apply_along_axis(my_func, 0, b)
array([ 4., 5., 6.])
>>> np.apply_along_axis(my_func, 1, b)
array([ 2., 5., 8.])
For a function that returns a 1D array, the number of dimensions in
`outarr` is the same as `arr`.
>>> b = np.array([[8,1,7], [4,3,9], [5,2,6]])
>>> np.apply_along_axis(sorted, 1, b)
array([[1, 7, 8],
[3, 4, 9],
[2, 5, 6]])
For a function that returns a higher dimensional array, those dimensions
are inserted in place of the `axis` dimension.
>>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> np.apply_along_axis(np.diag, -1, b)
array([[[1, 0, 0],
[0, 2, 0],
[0, 0, 3]],
[[4, 0, 0],
[0, 5, 0],
[0, 0, 6]],
[[7, 0, 0],
[0, 8, 0],
[0, 0, 9]]])
"""
# handle negative axes
arr = asanyarray(arr)
nd = arr.ndim
if not (-nd <= axis < nd):
raise IndexError("axis {0} out of bounds [-{1}, {1})".format(axis, nd))
if axis < 0:
axis += nd
# arr, with the iteration axis at the end
in_dims = list(range(nd))
inarr_view = transpose(arr, in_dims[:axis] + in_dims[axis + 1 :] + [axis])
# compute indices for the iteration axes
inds = ndindex(inarr_view.shape[:-1])
# invoke the function on the first item
try:
ind0 = next(inds)
except StopIteration:
raise ValueError("Cannot apply_along_axis when any iteration dimensions are 0")
res = asanyarray(func1d(inarr_view[ind0], *args, **kwargs))
# build a buffer for storing evaluations of func1d.
# remove the requested axis, and add the new ones on the end.
# laid out so that each write is contiguous.
# for a tuple index inds, buff[inds] = func1d(inarr_view[inds])
buff = zeros(inarr_view.shape[:-1] + res.shape, res.dtype)
# permutation of axes such that out = buff.transpose(buff_permute)
buff_dims = list(range(buff.ndim))
buff_permute = (
buff_dims[0:axis]
+ buff_dims[buff.ndim - res.ndim : buff.ndim]
+ buff_dims[axis : buff.ndim - res.ndim]
)
# matrices have a nasty __array_prepare__ and __array_wrap__
if not isinstance(res, matrix):
buff = res.__array_prepare__(buff)
# save the first result, then compute and save all remaining results
buff[ind0] = res
for ind in inds:
buff[ind] = asanyarray(func1d(inarr_view[ind], *args, **kwargs))
if not isinstance(res, matrix):
# wrap the array, to preserve subclasses
buff = res.__array_wrap__(buff)
# finally, rotate the inserted axes back to where they belong
return transpose(buff, buff_permute)
else:
# matrices have to be transposed first, because they collapse dimensions!
out_arr = transpose(buff, buff_permute)
return res.__array_wrap__(out_arr)
|
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""
Apply a function to 1-D slices along the given axis.
Execute `func1d(a, *args)` where `func1d` operates on 1-D arrays and `a`
is a 1-D slice of `arr` along `axis`.
Parameters
----------
func1d : function
This function should accept 1-D arrays. It is applied to 1-D
slices of `arr` along the specified axis.
axis : integer
Axis along which `arr` is sliced.
arr : ndarray
Input array.
args : any
Additional arguments to `func1d`.
kwargs : any
Additional named arguments to `func1d`.
.. versionadded:: 1.9.0
Returns
-------
apply_along_axis : ndarray
The output array. The shape of `outarr` is identical to the shape of
`arr`, except along the `axis` dimension. This axis is removed, and
replaced with new dimensions equal to the shape of the return value
of `func1d`. So if `func1d` returns a scalar `outarr` will have one
fewer dimensions than `arr`.
See Also
--------
apply_over_axes : Apply a function repeatedly over multiple axes.
Examples
--------
>>> def my_func(a):
... \"\"\"Average first and last element of a 1-D array\"\"\"
... return (a[0] + a[-1]) * 0.5
>>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> np.apply_along_axis(my_func, 0, b)
array([ 4., 5., 6.])
>>> np.apply_along_axis(my_func, 1, b)
array([ 2., 5., 8.])
For a function that returns a 1D array, the number of dimensions in
`outarr` is the same as `arr`.
>>> b = np.array([[8,1,7], [4,3,9], [5,2,6]])
>>> np.apply_along_axis(sorted, 1, b)
array([[1, 7, 8],
[3, 4, 9],
[2, 5, 6]])
For a function that returns a higher dimensional array, those dimensions
are inserted in place of the `axis` dimension.
>>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> np.apply_along_axis(np.diag, -1, b)
array([[[1, 0, 0],
[0, 2, 0],
[0, 0, 3]],
[[4, 0, 0],
[0, 5, 0],
[0, 0, 6]],
[[7, 0, 0],
[0, 8, 0],
[0, 0, 9]]])
"""
# handle negative axes
arr = asanyarray(arr)
nd = arr.ndim
if not (-nd <= axis < nd):
raise IndexError("axis {0} out of bounds [-{1}, {1})".format(axis, nd))
if axis < 0:
axis += nd
# arr, with the iteration axis at the end
in_dims = list(range(nd))
inarr_view = transpose(arr, in_dims[:axis] + in_dims[axis + 1 :] + [axis])
# compute indices for the iteration axes
inds = ndindex(inarr_view.shape[:-1])
# invoke the function on the first item
ind0 = next(inds)
res = asanyarray(func1d(inarr_view[ind0], *args, **kwargs))
# build a buffer for storing evaluations of func1d.
# remove the requested axis, and add the new ones on the end.
# laid out so that each write is contiguous.
# for a tuple index inds, buff[inds] = func1d(inarr_view[inds])
buff = zeros(inarr_view.shape[:-1] + res.shape, res.dtype)
# permutation of axes such that out = buff.transpose(buff_permute)
buff_dims = list(range(buff.ndim))
buff_permute = (
buff_dims[0:axis]
+ buff_dims[buff.ndim - res.ndim : buff.ndim]
+ buff_dims[axis : buff.ndim - res.ndim]
)
# matrices have a nasty __array_prepare__ and __array_wrap__
if not isinstance(res, matrix):
buff = res.__array_prepare__(buff)
# save the first result, then compute and save all remaining results
buff[ind0] = res
for ind in inds:
buff[ind] = asanyarray(func1d(inarr_view[ind], *args, **kwargs))
if not isinstance(res, matrix):
# wrap the array, to preserve subclasses
buff = res.__array_wrap__(buff)
# finally, rotate the inserted axes back to where they belong
return transpose(buff, buff_permute)
else:
# matrices have to be transposed first, because they collapse dimensions!
out_arr = transpose(buff, buff_permute)
return res.__array_wrap__(out_arr)
|
https://github.com/numpy/numpy/issues/6927
|
In [47]: a = np.ones((0, 2))
In [48]: np.sum(a, axis=0)
Out[48]: array([ 0., 0.])
In [49]: np.sum(a, axis=1)
Out[49]: array([], dtype=float64)
In [50]: np.apply_along_axis(np.sum, axis=0, arr=a)
Out[50]: array([ 0., 0.])
In [51]: np.apply_along_axis(np.sum, axis=1, arr=a)
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-51-c8275650df94> in <module>()
----> 1 np.apply_along_axis(np.sum, axis=1, arr=a)
/home/rgommers/.local/lib/python2.7/site-packages/numpy/lib/shape_base.pyc in apply_along_axis(func1d, axis, arr, *args, **kwargs)
89 outshape = asarray(arr.shape).take(indlist)
90 i.put(indlist, ind)
---> 91 res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
92 # if res is a number, then we have a smaller output array
93 if isscalar(res):
IndexError: index 0 is out of bounds for ax
|
IndexError
|
def mean(self, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Returns the average of the array elements along given axis.
Masked entries are ignored, and result elements which are not
finite will be masked.
Refer to `numpy.mean` for full documentation.
See Also
--------
ndarray.mean : corresponding function for ndarrays
numpy.mean : Equivalent function
numpy.ma.average: Weighted average.
Examples
--------
>>> a = np.ma.array([1,2,3], mask=[False, False, True])
>>> a
masked_array(data = [1 2 --],
mask = [False False True],
fill_value = 999999)
>>> a.mean()
1.5
"""
kwargs = {} if keepdims is np._NoValue else {"keepdims": keepdims}
if self._mask is nomask:
result = super(MaskedArray, self).mean(axis=axis, dtype=dtype, **kwargs)[()]
else:
dsum = self.sum(axis=axis, dtype=dtype, **kwargs)
cnt = self.count(axis=axis, **kwargs)
if cnt.shape == () and (cnt == 0):
result = masked
else:
result = dsum * 1.0 / cnt
if out is not None:
out.flat = result
if isinstance(out, MaskedArray):
outmask = getattr(out, "_mask", nomask)
if outmask is nomask:
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = getattr(result, "_mask", nomask)
return out
return result
|
def mean(self, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Returns the average of the array elements along given axis.
Masked entries are ignored, and result elements which are not
finite will be masked.
Refer to `numpy.mean` for full documentation.
See Also
--------
ndarray.mean : corresponding function for ndarrays
numpy.mean : Equivalent function
numpy.ma.average: Weighted average.
Examples
--------
>>> a = np.ma.array([1,2,3], mask=[False, False, True])
>>> a
masked_array(data = [1 2 --],
mask = [False False True],
fill_value = 999999)
>>> a.mean()
1.5
"""
kwargs = {} if keepdims is np._NoValue else {"keepdims": keepdims}
if self._mask is nomask:
result = super(MaskedArray, self).mean(axis=axis, dtype=dtype, **kwargs)
else:
dsum = self.sum(axis=axis, dtype=dtype, **kwargs)
cnt = self.count(axis=axis, **kwargs)
if cnt.shape == () and (cnt == 0):
result = masked
else:
result = dsum * 1.0 / cnt
if out is not None:
out.flat = result
if isinstance(out, MaskedArray):
outmask = getattr(out, "_mask", nomask)
if outmask is nomask:
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = getattr(result, "_mask", nomask)
return out
return result
|
https://github.com/numpy/numpy/issues/5769
|
In [7]: foo.mean() == bar.mean()
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-7-3b824b0972e3> in <module>()
----> 1 foo.mean() == bar.mean()
/users/___/.local/lib/python2.7/site-packages/numpy/ma/core.pyc in __eq__(self, other)
3705 mask = np.all([[f[n].all() for n in mask.dtype.names]
3706 for f in mask], axis=axis)
-> 3707 check._mask = mask
3708 return check
3709 #
AttributeError: 'numpy.bool_' object has no attribute '_mask'
In [8]: foo.mean() != bar.mean()
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-8-0947fa5da1ed> in <module>()
----> 1 foo.mean() != bar.mean()
/users/___/.local/lib/python2.7/site-packages/numpy/ma/core.pyc in __ne__(self, other)
3738 mask = np.all([[f[n].all() for n in mask.dtype.names]
3739 for f in mask], axis=axis)
-> 3740 check._mask = mask
3741 return check
3742 #
|
AttributeError
|
def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Returns the variance of the array elements along given axis.
Masked entries are ignored, and result elements which are not
finite will be masked.
Refer to `numpy.var` for full documentation.
See Also
--------
ndarray.var : corresponding function for ndarrays
numpy.var : Equivalent function
"""
kwargs = {} if keepdims is np._NoValue else {"keepdims": keepdims}
# Easy case: nomask, business as usual
if self._mask is nomask:
ret = super(MaskedArray, self).var(
axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs
)[()]
if out is not None:
if isinstance(out, MaskedArray):
out.__setmask__(nomask)
return out
return ret
# Some data are masked, yay!
cnt = self.count(axis=axis, **kwargs) - ddof
danom = self - self.mean(axis, dtype, keepdims=True)
if iscomplexobj(self):
danom = umath.absolute(danom) ** 2
else:
danom *= danom
dvar = divide(danom.sum(axis, **kwargs), cnt).view(type(self))
# Apply the mask if it's not a scalar
if dvar.ndim:
dvar._mask = mask_or(self._mask.all(axis, **kwargs), (cnt <= 0))
dvar._update_from(self)
elif getattr(dvar, "_mask", False):
# Make sure that masked is returned when the scalar is masked.
dvar = masked
if out is not None:
if isinstance(out, MaskedArray):
out.flat = 0
out.__setmask__(True)
elif out.dtype.kind in "biu":
errmsg = (
"Masked data information would be lost in one or more location."
)
raise MaskError(errmsg)
else:
out.flat = np.nan
return out
# In case with have an explicit output
if out is not None:
# Set the data
out.flat = dvar
# Set the mask if needed
if isinstance(out, MaskedArray):
out.__setmask__(dvar.mask)
return out
return dvar
|
def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Returns the variance of the array elements along given axis.
Masked entries are ignored, and result elements which are not
finite will be masked.
Refer to `numpy.var` for full documentation.
See Also
--------
ndarray.var : corresponding function for ndarrays
numpy.var : Equivalent function
"""
kwargs = {} if keepdims is np._NoValue else {"keepdims": keepdims}
# Easy case: nomask, business as usual
if self._mask is nomask:
return self._data.var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
# Some data are masked, yay!
cnt = self.count(axis=axis, **kwargs) - ddof
danom = self - self.mean(axis, dtype, keepdims=True)
if iscomplexobj(self):
danom = umath.absolute(danom) ** 2
else:
danom *= danom
dvar = divide(danom.sum(axis, **kwargs), cnt).view(type(self))
# Apply the mask if it's not a scalar
if dvar.ndim:
dvar._mask = mask_or(self._mask.all(axis, **kwargs), (cnt <= 0))
dvar._update_from(self)
elif getattr(dvar, "_mask", False):
# Make sure that masked is returned when the scalar is masked.
dvar = masked
if out is not None:
if isinstance(out, MaskedArray):
out.flat = 0
out.__setmask__(True)
elif out.dtype.kind in "biu":
errmsg = (
"Masked data information would be lost in one or more location."
)
raise MaskError(errmsg)
else:
out.flat = np.nan
return out
# In case with have an explicit output
if out is not None:
# Set the data
out.flat = dvar
# Set the mask if needed
if isinstance(out, MaskedArray):
out.__setmask__(dvar.mask)
return out
return dvar
|
https://github.com/numpy/numpy/issues/5769
|
In [7]: foo.mean() == bar.mean()
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-7-3b824b0972e3> in <module>()
----> 1 foo.mean() == bar.mean()
/users/___/.local/lib/python2.7/site-packages/numpy/ma/core.pyc in __eq__(self, other)
3705 mask = np.all([[f[n].all() for n in mask.dtype.names]
3706 for f in mask], axis=axis)
-> 3707 check._mask = mask
3708 return check
3709 #
AttributeError: 'numpy.bool_' object has no attribute '_mask'
In [8]: foo.mean() != bar.mean()
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-8-0947fa5da1ed> in <module>()
----> 1 foo.mean() != bar.mean()
/users/___/.local/lib/python2.7/site-packages/numpy/ma/core.pyc in __ne__(self, other)
3738 mask = np.all([[f[n].all() for n in mask.dtype.names]
3739 for f in mask], axis=axis)
-> 3740 check._mask = mask
3741 return check
3742 #
|
AttributeError
|
def _median(a, axis=None, out=None, overwrite_input=False):
if overwrite_input:
if axis is None:
asorted = a.ravel()
asorted.sort()
else:
a.sort(axis=axis)
asorted = a
else:
asorted = sort(a, axis=axis)
if axis is None:
axis = 0
elif axis < 0:
axis += asorted.ndim
if asorted.ndim == 1:
idx, odd = divmod(count(asorted), 2)
return asorted[idx + odd - 1 : idx + 1].mean(out=out)
counts = count(asorted, axis=axis)
h = counts // 2
# create indexing mesh grid for all but reduced axis
axes_grid = [np.arange(x) for i, x in enumerate(asorted.shape) if i != axis]
ind = np.meshgrid(*axes_grid, sparse=True, indexing="ij")
# insert indices of low and high median
ind.insert(axis, h - 1)
low = asorted[tuple(ind)]
low._sharedmask = False
ind[axis] = h
high = asorted[tuple(ind)]
# duplicate high if odd number of elements so mean does nothing
odd = counts % 2 == 1
if asorted.ndim > 1:
np.copyto(low, high, where=odd)
elif odd:
low = high
if np.issubdtype(asorted.dtype, np.inexact):
# avoid inf / x = masked
s = np.ma.sum([low, high], axis=0, out=out)
np.true_divide(s.data, 2.0, casting="unsafe", out=s.data)
else:
s = np.ma.mean([low, high], axis=0, out=out)
return s
|
def _median(a, axis=None, out=None, overwrite_input=False):
if overwrite_input:
if axis is None:
asorted = a.ravel()
asorted.sort()
else:
a.sort(axis=axis)
asorted = a
else:
asorted = sort(a, axis=axis)
if axis is None:
axis = 0
elif axis < 0:
axis += a.ndim
if asorted.ndim == 1:
idx, odd = divmod(count(asorted), 2)
return asorted[idx - (not odd) : idx + 1].mean()
counts = asorted.shape[axis] - (asorted.mask).sum(axis=axis)
h = counts // 2
# create indexing mesh grid for all but reduced axis
axes_grid = [np.arange(x) for i, x in enumerate(asorted.shape) if i != axis]
ind = np.meshgrid(*axes_grid, sparse=True, indexing="ij")
# insert indices of low and high median
ind.insert(axis, h - 1)
low = asorted[tuple(ind)]
low._sharedmask = False
ind[axis] = h
high = asorted[tuple(ind)]
# duplicate high if odd number of elements so mean does nothing
odd = counts % 2 == 1
if asorted.ndim == 1:
if odd:
low = high
else:
low[odd] = high[odd]
if np.issubdtype(asorted.dtype, np.inexact):
# avoid inf / x = masked
s = np.ma.sum([low, high], axis=0, out=out)
np.true_divide(s.data, 2.0, casting="unsafe", out=s.data)
else:
s = np.ma.mean([low, high], axis=0, out=out)
return s
|
https://github.com/numpy/numpy/issues/8015
|
IndexError Traceback (most recent call last)
<ipython-input-26-b5117c1f3918> in <module>()
----> 1 np.ma.median(np.ma.array(np.ones((5, 5))), axis=0)
C:\-\lib\site-packages\numpy\ma\extras.py in median(a, axis, out, overwrite_input, keepdims)
693
694 r, k = _ureduce(a, func=_median, axis=axis, out=out,
--> 695 overwrite_input=overwrite_input)
696 if keepdims:
697 return r.reshape(k)
C:\-\lib\site-packages\numpy\lib\function_base.py in _ureduce(a, func, **kwargs)
3631 keepdim = [1] * a.ndim
3632
-> 3633 r = func(a, **kwargs)
3634 return r, keepdim
3635
C:\-\lib\site-packages\numpy\ma\extras.py in _median(a, axis, out, overwrite_input)
736 low = high
737 else:
--> 738 low[odd] = high[odd]
739
740 if np.issubdtype(asorted.dtype, np.inexact):
C:\-\lib\site-packages\numpy\ma\core.py in __getitem__(self, indx)
3161
3162 """
-> 3163 dout = self.data[indx]
3164 # We could directly use ndarray.__getitem__ on self.
3165 # But then we would have to modify __array_finalize__ to prevent the
IndexError: in the future, 0-d boolean arrays will be interpreted as a valid boolean index
|
IndexError
|
def clean_up_temporary_directory():
if _tmpdirs is not None:
for d in _tmpdirs:
try:
shutil.rmtree(d)
except OSError:
pass
|
def clean_up_temporary_directory():
for d in _tmpdirs:
try:
shutil.rmtree(d)
except OSError:
pass
|
https://github.com/numpy/numpy/issues/7809
|
Error in atexit._run_exitfuncs:
Traceback (most recent call last):
File "/usr/lib/python2.7/atexit.py", line 24, in _run_exitfuncs
func(*targs, **kargs)
File "/usr/lib/python2.7/dist-packages/numpy/distutils/misc_util.py", line 27, in clean_up_temporary_directory
for d in _tmpdirs:
TypeError: 'NoneType' object is not iterable
Error in atexit._run_exitfuncs:
Traceback (most recent call last):
File "/usr/lib/python2.7/atexit.py", line 24, in _run_exitfuncs
func(*targs, **kargs)
File "/usr/lib/python2.7/dist-packages/numpy/distutils/misc_util.py", line 27, in clean_up_temporary_directory
for d in _tmpdirs:
TypeError: 'NoneType' object is not iterable
Error in atexit._run_exitfuncs:
Traceback (most recent call last):
File "/usr/lib/python2.7/atexit.py", line 24, in _run_exitfuncs
func(*targs, **kargs)
File "/usr/lib/python2.7/dist-packages/numpy/distutils/misc_util.py", line 27, in clean_up_temporary_directory
for d in _tmpdirs:
TypeError: 'NoneType' object is not iterable
Error in sys.exitfunc:
Traceback (most recent call last):
File "/usr/lib/python2.7/atexit.py", line 24, in _run_exitfuncs
func(*targs, **kargs)
File "/usr/lib/python2.7/dist-packages/numpy/distutils/misc_util.py", line 27, in clean_up_temporary_directory
for d in _tmpdirs:
TypeError: 'NoneType' object is not iterable
|
TypeError
|
def __str__(self):
"""
String representation.
"""
if masked_print_option.enabled():
f = masked_print_option
if self is masked:
return str(f)
m = self._mask
if m is nomask:
res = self._data
else:
if m.shape == ():
if m.dtype.names:
m = m.view((bool, len(m.dtype)))
if m.any():
return str(
tuple(
(f if _m else _d)
for _d, _m in zip(self._data.tolist(), m)
)
)
else:
return str(self._data)
elif m:
return str(f)
else:
return str(self._data)
# convert to object array to make filled work
names = self.dtype.names
if names is None:
data = self._data
mask = m
nval = 50
# For big arrays, to avoid a costly conversion to the
# object dtype, extract the corners before the conversion.
for axis in range(self.ndim):
if data.shape[axis] > 2 * nval:
arr = np.split(data, (nval, -nval), axis=axis)
data = np.concatenate((arr[0], arr[2]), axis=axis)
arr = np.split(mask, (nval, -nval), axis=axis)
mask = np.concatenate((arr[0], arr[2]), axis=axis)
res = data.astype("O")
res.view(ndarray)[mask] = f
else:
rdtype = _recursive_make_descr(self.dtype, "O")
res = self._data.astype(rdtype)
_recursive_printoption(res, m, f)
else:
res = self.filled(self.fill_value)
return str(res)
|
def __str__(self):
"""
String representation.
"""
if masked_print_option.enabled():
f = masked_print_option
if self is masked:
return str(f)
m = self._mask
if m is nomask:
res = self._data
else:
if m.shape == ():
if m.dtype.names:
m = m.view((bool, len(m.dtype)))
if m.any():
return str(
tuple(
(f if _m else _d)
for _d, _m in zip(self._data.tolist(), m)
)
)
else:
return str(self._data)
elif m:
return str(f)
else:
return str(self._data)
# convert to object array to make filled work
names = self.dtype.names
if names is None:
res = self._data.astype("O")
res.view(ndarray)[m] = f
else:
rdtype = _recursive_make_descr(self.dtype, "O")
res = self._data.astype(rdtype)
_recursive_printoption(res, m, f)
else:
res = self.filled(self.fill_value)
return str(res)
|
https://github.com/numpy/numpy/issues/3544
|
arr = np.zeros([43200, 21600], dtype=np.int8)
z = np.ma.masked_values(arr, 0)
print z
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/sci/lib/python2.7/site-packages/numpy/ma/core.py", line 3530, in __str__
res = self._data.astype("|O8")
MemoryError
|
MemoryError
|
def _nanpercentile(
a,
q,
axis=None,
out=None,
overwrite_input=False,
interpolation="linear",
keepdims=False,
):
"""
Private function that doesn't support extended axis or keepdims.
These methods are extended to this function using _ureduce
See nanpercentile for parameter usage
"""
if axis is None:
part = a.ravel()
result = _nanpercentile1d(part, q, overwrite_input, interpolation)
else:
result = np.apply_along_axis(
_nanpercentile1d, axis, a, q, overwrite_input, interpolation
)
# apply_along_axis fills in collapsed axis with results.
# Move that axis to the beginning to match percentile's
# convention.
if q.ndim != 0:
result = np.swapaxes(result, 0, axis)
if out is not None:
out[...] = result
return result
|
def _nanpercentile(
a,
q,
axis=None,
out=None,
overwrite_input=False,
interpolation="linear",
keepdims=False,
):
"""
Private function that doesn't support extended axis or keepdims.
These methods are extended to this function using _ureduce
See nanpercentile for parameter usage
"""
if axis is None:
part = a.ravel()
result = _nanpercentile1d(part, q, overwrite_input, interpolation)
else:
result = np.apply_along_axis(
_nanpercentile1d, axis, a, q, overwrite_input, interpolation
)
if out is not None:
out[...] = result
return result
|
https://github.com/numpy/numpy/issues/5760
|
In [9]: np.nanpercentile([[np.nan, np.nan], [np.nan, 2]], [50, 100], axis=0)
/usr/lib/python3.4/site-packages/numpy/lib/nanfunctions.py:914: RuntimeWarning: All-NaN slice encountered
warnings.warn("All-NaN slice encountered", RuntimeWarning)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-9-f425a58093df> in <module>()
----> 1 np.nanpercentile([[np.nan, np.nan], [np.nan, 2]], [50, 100], axis=0)
/usr/lib/python3.4/site-packages/numpy/lib/nanfunctions.py in nanpercentile(a, q, axis, out, overwrite_input, interpolation, keepdims)
873 r, k = _ureduce(a, func=_nanpercentile, q=q, axis=axis, out=out,
874 overwrite_input=overwrite_input,
--> 875 interpolation=interpolation)
876 if keepdims:
877 if q.ndim == 0:
/usr/lib/python3.4/site-packages/numpy/lib/function_base.py in _ureduce(a, func, **kwargs)
2801 keepdim = [1] * a.ndim
2802
-> 2803 r = func(a, **kwargs)
2804 return r, keepdim
2805
/usr/lib/python3.4/site-packages/numpy/lib/nanfunctions.py in _nanpercentile(a, q, axis, out, overwrite_input, interpolation, keepdims)
896 else:
897 result = np.apply_along_axis(_nanpercentile1d, axis, a, q,
--> 898 overwrite_input, interpolation)
899
900 if out is not None:
/usr/lib/python3.4/site-packages/numpy/lib/shape_base.py in apply_along_axis(func1d, axis, arr, *args, **kwargs)
106 i.put(indlist, ind)
107 res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
--> 108 outarr[tuple(ind)] = res
109 k += 1
110 return outarr
ValueError: setting an array element with a sequence.
|
ValueError
|
def _nanpercentile1d(arr1d, q, overwrite_input=False, interpolation="linear"):
"""
Private function for rank 1 arrays. Compute percentile ignoring NaNs.
See nanpercentile for parameter usage
"""
c = np.isnan(arr1d)
s = np.where(c)[0]
if s.size == arr1d.size:
warnings.warn("All-NaN slice encountered", RuntimeWarning)
if q.ndim == 0:
return np.nan
else:
return np.nan * np.ones((len(q),))
elif s.size == 0:
return np.percentile(
arr1d, q, overwrite_input=overwrite_input, interpolation=interpolation
)
else:
if overwrite_input:
x = arr1d
else:
x = arr1d.copy()
# select non-nans at end of array
enonan = arr1d[-s.size :][~c[-s.size :]]
# fill nans in beginning of array with non-nans of end
x[s[: enonan.size]] = enonan
# slice nans away
return np.percentile(
x[: -s.size], q, overwrite_input=True, interpolation=interpolation
)
|
def _nanpercentile1d(arr1d, q, overwrite_input=False, interpolation="linear"):
"""
Private function for rank 1 arrays. Compute percentile ignoring NaNs.
See nanpercentile for parameter usage
"""
c = np.isnan(arr1d)
s = np.where(c)[0]
if s.size == arr1d.size:
warnings.warn("All-NaN slice encountered", RuntimeWarning)
return np.nan
elif s.size == 0:
return np.percentile(
arr1d, q, overwrite_input=overwrite_input, interpolation=interpolation
)
else:
if overwrite_input:
x = arr1d
else:
x = arr1d.copy()
# select non-nans at end of array
enonan = arr1d[-s.size :][~c[-s.size :]]
# fill nans in beginning of array with non-nans of end
x[s[: enonan.size]] = enonan
# slice nans away
return np.percentile(
x[: -s.size], q, overwrite_input=True, interpolation=interpolation
)
|
https://github.com/numpy/numpy/issues/5760
|
In [9]: np.nanpercentile([[np.nan, np.nan], [np.nan, 2]], [50, 100], axis=0)
/usr/lib/python3.4/site-packages/numpy/lib/nanfunctions.py:914: RuntimeWarning: All-NaN slice encountered
warnings.warn("All-NaN slice encountered", RuntimeWarning)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-9-f425a58093df> in <module>()
----> 1 np.nanpercentile([[np.nan, np.nan], [np.nan, 2]], [50, 100], axis=0)
/usr/lib/python3.4/site-packages/numpy/lib/nanfunctions.py in nanpercentile(a, q, axis, out, overwrite_input, interpolation, keepdims)
873 r, k = _ureduce(a, func=_nanpercentile, q=q, axis=axis, out=out,
874 overwrite_input=overwrite_input,
--> 875 interpolation=interpolation)
876 if keepdims:
877 if q.ndim == 0:
/usr/lib/python3.4/site-packages/numpy/lib/function_base.py in _ureduce(a, func, **kwargs)
2801 keepdim = [1] * a.ndim
2802
-> 2803 r = func(a, **kwargs)
2804 return r, keepdim
2805
/usr/lib/python3.4/site-packages/numpy/lib/nanfunctions.py in _nanpercentile(a, q, axis, out, overwrite_input, interpolation, keepdims)
896 else:
897 result = np.apply_along_axis(_nanpercentile1d, axis, a, q,
--> 898 overwrite_input, interpolation)
899
900 if out is not None:
/usr/lib/python3.4/site-packages/numpy/lib/shape_base.py in apply_along_axis(func1d, axis, arr, *args, **kwargs)
106 i.put(indlist, ind)
107 res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
--> 108 outarr[tuple(ind)] = res
109 k += 1
110 return outarr
ValueError: setting an array element with a sequence.
|
ValueError
|
def __getitem__(self, indx):
"""
Get the index.
"""
m = self._mask
if isinstance(m[indx], ndarray):
# Can happen when indx is a multi-dimensional field:
# A = ma.masked_array(data=[([0,1],)], mask=[([True,
# False],)], dtype=[("A", ">i2", (2,))])
# x = A[0]; y = x["A"]; then y.mask["A"].size==2
# and we can not say masked/unmasked.
# The result is no longer mvoid!
# See also issue #6724.
return masked_array(
data=self._data[indx],
mask=m[indx],
fill_value=self._fill_value[indx],
hard_mask=self._hardmask,
)
if m is not nomask and m[indx]:
return masked
return self._data[indx]
|
def __getitem__(self, indx):
"""
Get the index.
"""
m = self._mask
if m is not nomask and m[indx]:
return masked
return self._data[indx]
|
https://github.com/numpy/numpy/issues/6724
|
In [17]: numpy.version.version
Out[17]: '1.11.0.dev0+e711c95'
In [18]: A = ma.masked_array(data=[([0,1,2],), ([3,4,5],)], mask=[([True, False, False],), ([False, True, False],)], dtype=[("A", ">i2", (3,))])
In [19]: x = A[0]
In [20]: y = x["A"]
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-20-795ad5dbbe43> in <module>()
----> 1 y = x["A"]
/home/users/gholl/venv/stable-3.5/lib/python3.5/site-packages/numpy/ma/core.py in __getitem__(self, indx)
5851 """
5852 m = self._mask
-> 5853 if m is not nomask and m[indx]:
5854 return masked
5855 return self._data[indx]
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
|
ValueError
|
def ix_(*args):
"""
Construct an open mesh from multiple sequences.
This function takes N 1-D sequences and returns N outputs with N
dimensions each, such that the shape is 1 in all but one dimension
and the dimension with the non-unit shape value cycles through all
N dimensions.
Using `ix_` one can quickly construct index arrays that will index
the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array
``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``.
Parameters
----------
args : 1-D sequences
Returns
-------
out : tuple of ndarrays
N arrays with N dimensions each, with N the number of input
sequences. Together these arrays form an open mesh.
See Also
--------
ogrid, mgrid, meshgrid
Examples
--------
>>> a = np.arange(10).reshape(2, 5)
>>> a
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> ixgrid = np.ix_([0,1], [2,4])
>>> ixgrid
(array([[0],
[1]]), array([[2, 4]]))
>>> ixgrid[0].shape, ixgrid[1].shape
((2, 1), (1, 2))
>>> a[ixgrid]
array([[2, 4],
[7, 9]])
"""
out = []
nd = len(args)
for k, new in enumerate(args):
new = asarray(new)
if new.ndim != 1:
raise ValueError("Cross index must be 1 dimensional")
if new.size == 0:
# Explicitly type empty arrays to avoid float default
new = new.astype(_nx.intp)
if issubdtype(new.dtype, _nx.bool_):
(new,) = new.nonzero()
new = new.reshape((1,) * k + (new.size,) + (1,) * (nd - k - 1))
out.append(new)
return tuple(out)
|
def ix_(*args):
"""
Construct an open mesh from multiple sequences.
This function takes N 1-D sequences and returns N outputs with N
dimensions each, such that the shape is 1 in all but one dimension
and the dimension with the non-unit shape value cycles through all
N dimensions.
Using `ix_` one can quickly construct index arrays that will index
the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array
``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``.
Parameters
----------
args : 1-D sequences
Returns
-------
out : tuple of ndarrays
N arrays with N dimensions each, with N the number of input
sequences. Together these arrays form an open mesh.
See Also
--------
ogrid, mgrid, meshgrid
Examples
--------
>>> a = np.arange(10).reshape(2, 5)
>>> a
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> ixgrid = np.ix_([0,1], [2,4])
>>> ixgrid
(array([[0],
[1]]), array([[2, 4]]))
>>> ixgrid[0].shape, ixgrid[1].shape
((2, 1), (1, 2))
>>> a[ixgrid]
array([[2, 4],
[7, 9]])
"""
out = []
nd = len(args)
for k, new in enumerate(args):
new = asarray(new)
if new.ndim != 1:
raise ValueError("Cross index must be 1 dimensional")
if new.size == 0:
# Explicitly type empty arrays to avoid float default
new = new.astype(_nx.intp)
if issubdtype(new.dtype, _nx.bool_):
(new,) = new.nonzero()
new.shape = (1,) * k + (new.size,) + (1,) * (nd - k - 1)
out.append(new)
return tuple(out)
|
https://github.com/numpy/numpy/issues/6062
|
======================================================================
ERROR: test_qhull.TestUtilities.test_more_barycentric_transforms
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python2.7/site-packages/nose/case.py", line 197, in runTest
self.test(*self.arg)
File "/Users/rgommers/Code/bldscipy/scipy/spatial/tests/test_qhull.py", line 310, in test_more_barycentric_transforms
grid = np.c_[list(map(np.ravel, np.broadcast_arrays(*np.ix_(*([x]*ndim)))))].T
File "/Users/rgommers/Code/numpy/numpy/lib/index_tricks.py", line 77, in ix_
raise ValueError("Cross index must be 1 dimensional")
ValueError: Cross index must be 1 dimensional
|
ValueError
|
def masked_where(condition, a, copy=True):
"""
Mask an array where a condition is met.
Return `a` as an array masked where `condition` is True.
Any masked values of `a` or `condition` are also masked in the output.
Parameters
----------
condition : array_like
Masking condition. When `condition` tests floating point values for
equality, consider using ``masked_values`` instead.
a : array_like
Array to mask.
copy : bool
If True (default) make a copy of `a` in the result. If False modify
`a` in place and return a view.
Returns
-------
result : MaskedArray
The result of masking `a` where `condition` is True.
See Also
--------
masked_values : Mask using floating point equality.
masked_equal : Mask where equal to a given value.
masked_not_equal : Mask where `not` equal to a given value.
masked_less_equal : Mask where less than or equal to a given value.
masked_greater_equal : Mask where greater than or equal to a given value.
masked_less : Mask where less than a given value.
masked_greater : Mask where greater than a given value.
masked_inside : Mask inside a given interval.
masked_outside : Mask outside a given interval.
masked_invalid : Mask invalid values (NaNs or infs).
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_where(a <= 2, a)
masked_array(data = [-- -- -- 3],
mask = [ True True True False],
fill_value=999999)
Mask array `b` conditional on `a`.
>>> b = ['a', 'b', 'c', 'd']
>>> ma.masked_where(a == 2, b)
masked_array(data = [a b -- d],
mask = [False False True False],
fill_value=N/A)
Effect of the `copy` argument.
>>> c = ma.masked_where(a <= 2, a)
>>> c
masked_array(data = [-- -- -- 3],
mask = [ True True True False],
fill_value=999999)
>>> c[0] = 99
>>> c
masked_array(data = [99 -- -- 3],
mask = [False True True False],
fill_value=999999)
>>> a
array([0, 1, 2, 3])
>>> c = ma.masked_where(a <= 2, a, copy=False)
>>> c[0] = 99
>>> c
masked_array(data = [99 -- -- 3],
mask = [False True True False],
fill_value=999999)
>>> a
array([99, 1, 2, 3])
When `condition` or `a` contain masked values.
>>> a = np.arange(4)
>>> a = ma.masked_where(a == 2, a)
>>> a
masked_array(data = [0 1 -- 3],
mask = [False False True False],
fill_value=999999)
>>> b = np.arange(4)
>>> b = ma.masked_where(b == 0, b)
>>> b
masked_array(data = [-- 1 2 3],
mask = [ True False False False],
fill_value=999999)
>>> ma.masked_where(a == 3, b)
masked_array(data = [-- 1 -- --],
mask = [ True False True True],
fill_value=999999)
"""
# Make sure that condition is a valid standard-type mask.
cond = make_mask(condition)
a = np.array(a, copy=copy, subok=True)
(cshape, ashape) = (cond.shape, a.shape)
if cshape and cshape != ashape:
raise IndexError(
"Inconsistant shape between the condition and the input"
" (got %s and %s)" % (cshape, ashape)
)
if hasattr(a, "_mask"):
cond = mask_or(cond, a._mask)
cls = type(a)
else:
cls = MaskedArray
result = a.view(cls)
# Assign to *.mask so that structured masks are handled correctly.
result.mask = cond
return result
|
def masked_where(condition, a, copy=True):
"""
Mask an array where a condition is met.
Return `a` as an array masked where `condition` is True.
Any masked values of `a` or `condition` are also masked in the output.
Parameters
----------
condition : array_like
Masking condition. When `condition` tests floating point values for
equality, consider using ``masked_values`` instead.
a : array_like
Array to mask.
copy : bool
If True (default) make a copy of `a` in the result. If False modify
`a` in place and return a view.
Returns
-------
result : MaskedArray
The result of masking `a` where `condition` is True.
See Also
--------
masked_values : Mask using floating point equality.
masked_equal : Mask where equal to a given value.
masked_not_equal : Mask where `not` equal to a given value.
masked_less_equal : Mask where less than or equal to a given value.
masked_greater_equal : Mask where greater than or equal to a given value.
masked_less : Mask where less than a given value.
masked_greater : Mask where greater than a given value.
masked_inside : Mask inside a given interval.
masked_outside : Mask outside a given interval.
masked_invalid : Mask invalid values (NaNs or infs).
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_where(a <= 2, a)
masked_array(data = [-- -- -- 3],
mask = [ True True True False],
fill_value=999999)
Mask array `b` conditional on `a`.
>>> b = ['a', 'b', 'c', 'd']
>>> ma.masked_where(a == 2, b)
masked_array(data = [a b -- d],
mask = [False False True False],
fill_value=N/A)
Effect of the `copy` argument.
>>> c = ma.masked_where(a <= 2, a)
>>> c
masked_array(data = [-- -- -- 3],
mask = [ True True True False],
fill_value=999999)
>>> c[0] = 99
>>> c
masked_array(data = [99 -- -- 3],
mask = [False True True False],
fill_value=999999)
>>> a
array([0, 1, 2, 3])
>>> c = ma.masked_where(a <= 2, a, copy=False)
>>> c[0] = 99
>>> c
masked_array(data = [99 -- -- 3],
mask = [False True True False],
fill_value=999999)
>>> a
array([99, 1, 2, 3])
When `condition` or `a` contain masked values.
>>> a = np.arange(4)
>>> a = ma.masked_where(a == 2, a)
>>> a
masked_array(data = [0 1 -- 3],
mask = [False False True False],
fill_value=999999)
>>> b = np.arange(4)
>>> b = ma.masked_where(b == 0, b)
>>> b
masked_array(data = [-- 1 2 3],
mask = [ True False False False],
fill_value=999999)
>>> ma.masked_where(a == 3, b)
masked_array(data = [-- 1 -- --],
mask = [ True False True True],
fill_value=999999)
"""
# Make sure that condition is a valid standard-type mask.
cond = make_mask(condition)
a = np.array(a, copy=copy, subok=True)
(cshape, ashape) = (cond.shape, a.shape)
if cshape and cshape != ashape:
raise IndexError(
"Inconsistant shape between the condition and the input"
" (got %s and %s)" % (cshape, ashape)
)
if hasattr(a, "_mask"):
cond = mask_or(cond, a._mask)
cls = type(a)
else:
cls = MaskedArray
result = a.view(cls)
result._mask = cond
return result
|
https://github.com/numpy/numpy/issues/2972
|
R = numpy.empty(10, dtype=[("A", "<f2"), ("B", "<f4")])
Rm = numpy.ma.masked_where(R["A"]<0.1, R)
Rm["A"]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/local/gerrit/python3.2-bleed/lib/python3.2/site-packages/numpy/ma/core.py", line 3014, in __getitem__
dout._mask = _mask[indx]
ValueError: field named A not found.
print(Rm)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/local/gerrit/python3.2-bleed/lib/python3.2/site-packages/numpy/ma/core.py", line 3583, in __str__
_recursive_printoption(res, m, f)
File "/local/gerrit/python3.2-bleed/lib/python3.2/site-packages/numpy/ma/core.py", line 2294, in _recursive_printoption
(curdata, curmask) = (result[name], mask[name])
ValueError: field named A not found.
print(numpy.version.version)
1.8.0.dev-b8bfcd0
print(numpy.version.git_revision)
b8bfcd02a2f246a9c23675e1650c3d316d733306
|
ValueError
|
def nan_to_num(x):
"""
Replace nan with zero and inf with finite numbers.
Returns an array or scalar replacing Not a Number (NaN) with zero,
(positive) infinity with a very large number and negative infinity
with a very small (or negative) number.
Parameters
----------
x : array_like
Input data.
Returns
-------
out : ndarray, float
Array with the same shape as `x` and dtype of the element in `x` with
the greatest precision. NaN is replaced by zero, and infinity
(-infinity) is replaced by the largest (smallest or most negative)
floating point value that fits in the output dtype. All finite numbers
are upcast to the output dtype (default float64).
See Also
--------
isinf : Shows which elements are negative or negative infinity.
isneginf : Shows which elements are negative infinity.
isposinf : Shows which elements are positive infinity.
isnan : Shows which elements are Not a Number (NaN).
isfinite : Shows which elements are finite (not NaN, not infinity)
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.set_printoptions(precision=8)
>>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])
>>> np.nan_to_num(x)
array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000,
-1.28000000e+002, 1.28000000e+002])
"""
try:
t = x.dtype.type
except AttributeError:
t = obj2sctype(type(x))
if issubclass(t, _nx.complexfloating):
return nan_to_num(x.real) + 1j * nan_to_num(x.imag)
else:
try:
y = x.copy()
except AttributeError:
y = array(x)
t = y.dtype.type
if not issubclass(t, _nx.integer):
if not y.shape:
y = array([x])
scalar = True
else:
scalar = False
are_inf = isposinf(y)
are_neg_inf = isneginf(y)
are_nan = isnan(y)
maxf, minf = _getmaxmin(y.dtype.type)
y[are_nan] = 0
y[are_inf] = maxf
y[are_neg_inf] = minf
if scalar:
y = y[0]
return y
|
def nan_to_num(x):
"""
Replace nan with zero and inf with finite numbers.
Returns an array or scalar replacing Not a Number (NaN) with zero,
(positive) infinity with a very large number and negative infinity
with a very small (or negative) number.
Parameters
----------
x : array_like
Input data.
Returns
-------
out : ndarray, float
Array with the same shape as `x` and dtype of the element in `x` with
the greatest precision. NaN is replaced by zero, and infinity
(-infinity) is replaced by the largest (smallest or most negative)
floating point value that fits in the output dtype. All finite numbers
are upcast to the output dtype (default float64).
See Also
--------
isinf : Shows which elements are negative or negative infinity.
isneginf : Shows which elements are negative infinity.
isposinf : Shows which elements are positive infinity.
isnan : Shows which elements are Not a Number (NaN).
isfinite : Shows which elements are finite (not NaN, not infinity)
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.set_printoptions(precision=8)
>>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])
>>> np.nan_to_num(x)
array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000,
-1.28000000e+002, 1.28000000e+002])
"""
try:
t = x.dtype.type
except AttributeError:
t = obj2sctype(type(x))
if issubclass(t, _nx.complexfloating):
return nan_to_num(x.real) + 1j * nan_to_num(x.imag)
else:
try:
y = x.copy()
except AttributeError:
y = array(x)
if not issubclass(t, _nx.integer):
if not y.shape:
y = array([x])
scalar = True
else:
scalar = False
are_inf = isposinf(y)
are_neg_inf = isneginf(y)
are_nan = isnan(y)
maxf, minf = _getmaxmin(y.dtype.type)
y[are_nan] = 0
y[are_inf] = maxf
y[are_neg_inf] = minf
if scalar:
y = y[0]
return y
|
https://github.com/numpy/numpy/issues/1478
|
np.nan_to_num([1.0,3]) # returns: array([ 1., 3.])
n=np.array([1,3])
np.nan_to_num(n) # returns: array([1, 3])
np.nan_to_num([1,3])
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib64/python2.5/site-packages/numpy/lib/type_check.py", line 135, in nan_to_num
maxf, minf = _getmaxmin(y.dtype.type)
File "/usr/lib64/python2.5/site-packages/numpy/lib/type_check.py", line 103, in _getmaxmin
f = getlimits.finfo(t)
File "/usr/lib64/python2.5/site-packages/numpy/lib/getlimits.py", line 46, in __new__
raise ValueError, "data type %r not inexact" % (dtype)
ValueError: data type <type 'numpy.int64'> not inexact
|
ValueError
|
def __add__(self, other):
try:
othercoef = self._get_coefficients(other)
coef = self._add(self.coef, othercoef)
except TypeError as e:
raise e
except:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
|
def __add__(self, other):
if isinstance(other, ABCPolyBase):
if not self.has_sametype(other):
raise TypeError("Polynomial types differ")
elif not self.has_samedomain(other):
raise TypeError("Domains differ")
elif not self.has_samewindow(other):
raise TypeError("Windows differ")
else:
coef = self._add(self.coef, other.coef)
else:
try:
coef = self._add(self.coef, other)
except:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
|
https://github.com/numpy/numpy/issues/4631
|
In [1]: import numpy.polynomial.polynomial as npp
In [2]: p1 = npp.Polynomial([1,2])
In [3]: p2 = npp.Polynomial([3,4])
In [4]: npp.polydiv(p1, p2)
---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
<ipython-input-4-00c25f56fb20> in <module>()
----> 1 npp.polydiv(p1, p2)
/Users/warren/anaconda/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc in polydiv(c1, c2)
406 len2 = len(c2)
407 if len2 == 1 :
--> 408 return c1/c2[-1], c1[:1]*0
409 elif len1 < len2 :
410 return c1[:1]*0, c1
/Users/warren/anaconda/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc in __rtruediv__(self, other)
UnboundLocalError: local variable 'quo' referenced before assignment
|
UnboundLocalError
|
def __sub__(self, other):
try:
othercoef = self._get_coefficients(other)
coef = self._sub(self.coef, othercoef)
except TypeError as e:
raise e
except:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
|
def __sub__(self, other):
if isinstance(other, ABCPolyBase):
if not self.has_sametype(other):
raise TypeError("Polynomial types differ")
elif not self.has_samedomain(other):
raise TypeError("Domains differ")
elif not self.has_samewindow(other):
raise TypeError("Windows differ")
else:
coef = self._sub(self.coef, other.coef)
else:
try:
coef = self._sub(self.coef, other)
except:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
|
https://github.com/numpy/numpy/issues/4631
|
In [1]: import numpy.polynomial.polynomial as npp
In [2]: p1 = npp.Polynomial([1,2])
In [3]: p2 = npp.Polynomial([3,4])
In [4]: npp.polydiv(p1, p2)
---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
<ipython-input-4-00c25f56fb20> in <module>()
----> 1 npp.polydiv(p1, p2)
/Users/warren/anaconda/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc in polydiv(c1, c2)
406 len2 = len(c2)
407 if len2 == 1 :
--> 408 return c1/c2[-1], c1[:1]*0
409 elif len1 < len2 :
410 return c1[:1]*0, c1
/Users/warren/anaconda/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc in __rtruediv__(self, other)
UnboundLocalError: local variable 'quo' referenced before assignment
|
UnboundLocalError
|
def __mul__(self, other):
try:
othercoef = self._get_coefficients(other)
coef = self._mul(self.coef, othercoef)
except TypeError as e:
raise e
except:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
|
def __mul__(self, other):
if isinstance(other, ABCPolyBase):
if not self.has_sametype(other):
raise TypeError("Polynomial types differ")
elif not self.has_samedomain(other):
raise TypeError("Domains differ")
elif not self.has_samewindow(other):
raise TypeError("Windows differ")
else:
coef = self._mul(self.coef, other.coef)
else:
try:
coef = self._mul(self.coef, other)
except:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
|
https://github.com/numpy/numpy/issues/4631
|
In [1]: import numpy.polynomial.polynomial as npp
In [2]: p1 = npp.Polynomial([1,2])
In [3]: p2 = npp.Polynomial([3,4])
In [4]: npp.polydiv(p1, p2)
---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
<ipython-input-4-00c25f56fb20> in <module>()
----> 1 npp.polydiv(p1, p2)
/Users/warren/anaconda/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc in polydiv(c1, c2)
406 len2 = len(c2)
407 if len2 == 1 :
--> 408 return c1/c2[-1], c1[:1]*0
409 elif len1 < len2 :
410 return c1[:1]*0, c1
/Users/warren/anaconda/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc in __rtruediv__(self, other)
UnboundLocalError: local variable 'quo' referenced before assignment
|
UnboundLocalError
|
def __truediv__(self, other):
# there is no true divide if the rhs is not a Number, although it
# could return the first n elements of an infinite series.
# It is hard to see where n would come from, though.
if not isinstance(other, Number) or isinstance(other, bool):
form = "unsupported types for true division: '%s', '%s'"
raise TypeError(form % (type(self), type(other)))
return self.__floordiv__(other)
|
def __truediv__(self, other):
# there is no true divide if the rhs is not a scalar, although it
# could return the first n elements of an infinite series.
# It is hard to see where n would come from, though.
if np.isscalar(other):
# this might be overly restrictive
coef = self.coef / other
return self.__class__(coef, self.domain, self.window)
else:
return NotImplemented
|
https://github.com/numpy/numpy/issues/4631
|
In [1]: import numpy.polynomial.polynomial as npp
In [2]: p1 = npp.Polynomial([1,2])
In [3]: p2 = npp.Polynomial([3,4])
In [4]: npp.polydiv(p1, p2)
---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
<ipython-input-4-00c25f56fb20> in <module>()
----> 1 npp.polydiv(p1, p2)
/Users/warren/anaconda/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc in polydiv(c1, c2)
406 len2 = len(c2)
407 if len2 == 1 :
--> 408 return c1/c2[-1], c1[:1]*0
409 elif len1 < len2 :
410 return c1[:1]*0, c1
/Users/warren/anaconda/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc in __rtruediv__(self, other)
UnboundLocalError: local variable 'quo' referenced before assignment
|
UnboundLocalError
|
def __floordiv__(self, other):
res = self.__divmod__(other)
if res is NotImplemented:
return res
return res[0]
|
def __floordiv__(self, other):
if isinstance(other, ABCPolyBase):
if not self.has_sametype(other):
raise TypeError("Polynomial types differ")
elif not self.has_samedomain(other):
raise TypeError("Domains differ")
elif not self.has_samewindow(other):
raise TypeError("Windows differ")
else:
quo, rem = self._div(self.coef, other.coef)
else:
try:
quo, rem = self._div(self.coef, other)
except:
return NotImplemented
return self.__class__(quo, self.domain, self.window)
|
https://github.com/numpy/numpy/issues/4631
|
In [1]: import numpy.polynomial.polynomial as npp
In [2]: p1 = npp.Polynomial([1,2])
In [3]: p2 = npp.Polynomial([3,4])
In [4]: npp.polydiv(p1, p2)
---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
<ipython-input-4-00c25f56fb20> in <module>()
----> 1 npp.polydiv(p1, p2)
/Users/warren/anaconda/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc in polydiv(c1, c2)
406 len2 = len(c2)
407 if len2 == 1 :
--> 408 return c1/c2[-1], c1[:1]*0
409 elif len1 < len2 :
410 return c1[:1]*0, c1
/Users/warren/anaconda/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc in __rtruediv__(self, other)
UnboundLocalError: local variable 'quo' referenced before assignment
|
UnboundLocalError
|
def __mod__(self, other):
res = self.__divmod__(other)
if res is NotImplemented:
return res
return res[1]
|
def __mod__(self, other):
if isinstance(other, ABCPolyBase):
if not self.has_sametype(other):
raise TypeError("Polynomial types differ")
elif not self.has_samedomain(other):
raise TypeError("Domains differ")
elif not self.has_samewindow(other):
raise TypeError("Windows differ")
else:
quo, rem = self._div(self.coef, other.coef)
else:
try:
quo, rem = self._div(self.coef, other)
except:
return NotImplemented
return self.__class__(rem, self.domain, self.window)
|
https://github.com/numpy/numpy/issues/4631
|
In [1]: import numpy.polynomial.polynomial as npp
In [2]: p1 = npp.Polynomial([1,2])
In [3]: p2 = npp.Polynomial([3,4])
In [4]: npp.polydiv(p1, p2)
---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
<ipython-input-4-00c25f56fb20> in <module>()
----> 1 npp.polydiv(p1, p2)
/Users/warren/anaconda/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc in polydiv(c1, c2)
406 len2 = len(c2)
407 if len2 == 1 :
--> 408 return c1/c2[-1], c1[:1]*0
409 elif len1 < len2 :
410 return c1[:1]*0, c1
/Users/warren/anaconda/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc in __rtruediv__(self, other)
UnboundLocalError: local variable 'quo' referenced before assignment
|
UnboundLocalError
|
def __divmod__(self, other):
try:
othercoef = self._get_coefficients(other)
quo, rem = self._div(self.coef, othercoef)
except (TypeError, ZeroDivisionError) as e:
raise e
except:
return NotImplemented
quo = self.__class__(quo, self.domain, self.window)
rem = self.__class__(rem, self.domain, self.window)
return quo, rem
|
def __divmod__(self, other):
if isinstance(other, self.__class__):
if not self.has_samedomain(other):
raise TypeError("Domains are not equal")
elif not self.has_samewindow(other):
raise TypeError("Windows are not equal")
else:
quo, rem = self._div(self.coef, other.coef)
else:
try:
quo, rem = self._div(self.coef, other)
except:
return NotImplemented
quo = self.__class__(quo, self.domain, self.window)
rem = self.__class__(rem, self.domain, self.window)
return quo, rem
|
https://github.com/numpy/numpy/issues/4631
|
In [1]: import numpy.polynomial.polynomial as npp
In [2]: p1 = npp.Polynomial([1,2])
In [3]: p2 = npp.Polynomial([3,4])
In [4]: npp.polydiv(p1, p2)
---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
<ipython-input-4-00c25f56fb20> in <module>()
----> 1 npp.polydiv(p1, p2)
/Users/warren/anaconda/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc in polydiv(c1, c2)
406 len2 = len(c2)
407 if len2 == 1 :
--> 408 return c1/c2[-1], c1[:1]*0
409 elif len1 < len2 :
410 return c1[:1]*0, c1
/Users/warren/anaconda/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc in __rtruediv__(self, other)
UnboundLocalError: local variable 'quo' referenced before assignment
|
UnboundLocalError
|
def __pow__(self, other):
coef = self._pow(self.coef, other, maxpower=self.maxpower)
res = self.__class__(coef, self.domain, self.window)
return res
|
def __pow__(self, other):
try:
coef = self._pow(self.coef, other, maxpower=self.maxpower)
except:
raise
return self.__class__(coef, self.domain, self.window)
|
https://github.com/numpy/numpy/issues/4631
|
In [1]: import numpy.polynomial.polynomial as npp
In [2]: p1 = npp.Polynomial([1,2])
In [3]: p2 = npp.Polynomial([3,4])
In [4]: npp.polydiv(p1, p2)
---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
<ipython-input-4-00c25f56fb20> in <module>()
----> 1 npp.polydiv(p1, p2)
/Users/warren/anaconda/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc in polydiv(c1, c2)
406 len2 = len(c2)
407 if len2 == 1 :
--> 408 return c1/c2[-1], c1[:1]*0
409 elif len1 < len2 :
410 return c1[:1]*0, c1
/Users/warren/anaconda/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc in __rtruediv__(self, other)
UnboundLocalError: local variable 'quo' referenced before assignment
|
UnboundLocalError
|
def __rtruediv__(self, other):
# An instance of ABCPolyBase is not considered a
# Number.
return NotImplemented
|
def __rtruediv__(self, other):
# there is no true divide if the rhs is not a scalar, although it
# could return the first n elements of an infinite series.
# It is hard to see where n would come from, though.
if len(self.coef) == 1:
try:
quo, rem = self._div(other, self.coef[0])
except:
return NotImplemented
return self.__class__(quo, self.domain, self.window)
|
https://github.com/numpy/numpy/issues/4631
|
In [1]: import numpy.polynomial.polynomial as npp
In [2]: p1 = npp.Polynomial([1,2])
In [3]: p2 = npp.Polynomial([3,4])
In [4]: npp.polydiv(p1, p2)
---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
<ipython-input-4-00c25f56fb20> in <module>()
----> 1 npp.polydiv(p1, p2)
/Users/warren/anaconda/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc in polydiv(c1, c2)
406 len2 = len(c2)
407 if len2 == 1 :
--> 408 return c1/c2[-1], c1[:1]*0
409 elif len1 < len2 :
410 return c1[:1]*0, c1
/Users/warren/anaconda/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc in __rtruediv__(self, other)
UnboundLocalError: local variable 'quo' referenced before assignment
|
UnboundLocalError
|
def __rfloordiv__(self, other):
res = self.__rdivmod__(other)
if res is NotImplemented:
return res
return res[0]
|
def __rfloordiv__(self, other):
try:
quo, rem = self._div(other, self.coef)
except:
return NotImplemented
return self.__class__(quo, self.domain, self.window)
|
https://github.com/numpy/numpy/issues/4631
|
In [1]: import numpy.polynomial.polynomial as npp
In [2]: p1 = npp.Polynomial([1,2])
In [3]: p2 = npp.Polynomial([3,4])
In [4]: npp.polydiv(p1, p2)
---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
<ipython-input-4-00c25f56fb20> in <module>()
----> 1 npp.polydiv(p1, p2)
/Users/warren/anaconda/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc in polydiv(c1, c2)
406 len2 = len(c2)
407 if len2 == 1 :
--> 408 return c1/c2[-1], c1[:1]*0
409 elif len1 < len2 :
410 return c1[:1]*0, c1
/Users/warren/anaconda/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc in __rtruediv__(self, other)
UnboundLocalError: local variable 'quo' referenced before assignment
|
UnboundLocalError
|
def __rmod__(self, other):
res = self.__rdivmod__(other)
if res is NotImplemented:
return res
return res[1]
|
def __rmod__(self, other):
try:
quo, rem = self._div(other, self.coef)
except:
return NotImplemented
return self.__class__(rem, self.domain, self.window)
|
https://github.com/numpy/numpy/issues/4631
|
In [1]: import numpy.polynomial.polynomial as npp
In [2]: p1 = npp.Polynomial([1,2])
In [3]: p2 = npp.Polynomial([3,4])
In [4]: npp.polydiv(p1, p2)
---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
<ipython-input-4-00c25f56fb20> in <module>()
----> 1 npp.polydiv(p1, p2)
/Users/warren/anaconda/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc in polydiv(c1, c2)
406 len2 = len(c2)
407 if len2 == 1 :
--> 408 return c1/c2[-1], c1[:1]*0
409 elif len1 < len2 :
410 return c1[:1]*0, c1
/Users/warren/anaconda/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc in __rtruediv__(self, other)
UnboundLocalError: local variable 'quo' referenced before assignment
|
UnboundLocalError
|
def __rdivmod__(self, other):
try:
quo, rem = self._div(other, self.coef)
except ZeroDivisionError as e:
raise e
except:
return NotImplemented
quo = self.__class__(quo, self.domain, self.window)
rem = self.__class__(rem, self.domain, self.window)
return quo, rem
|
def __rdivmod__(self, other):
try:
quo, rem = self._div(other, self.coef)
except:
return NotImplemented
quo = self.__class__(quo, self.domain, self.window)
rem = self.__class__(rem, self.domain, self.window)
return quo, rem
|
https://github.com/numpy/numpy/issues/4631
|
In [1]: import numpy.polynomial.polynomial as npp
In [2]: p1 = npp.Polynomial([1,2])
In [3]: p2 = npp.Polynomial([3,4])
In [4]: npp.polydiv(p1, p2)
---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
<ipython-input-4-00c25f56fb20> in <module>()
----> 1 npp.polydiv(p1, p2)
/Users/warren/anaconda/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc in polydiv(c1, c2)
406 len2 = len(c2)
407 if len2 == 1 :
--> 408 return c1/c2[-1], c1[:1]*0
409 elif len1 < len2 :
410 return c1[:1]*0, c1
/Users/warren/anaconda/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc in __rtruediv__(self, other)
UnboundLocalError: local variable 'quo' referenced before assignment
|
UnboundLocalError
|
def __eq__(self, other):
res = (
isinstance(other, self.__class__)
and np.all(self.domain == other.domain)
and np.all(self.window == other.window)
and np.all(self.coef == other.coef)
)
return res
|
def __eq__(self, other):
res = (
isinstance(other, self.__class__)
and self.has_samecoef(other)
and self.has_samedomain(other)
and self.has_samewindow(other)
)
return res
|
https://github.com/numpy/numpy/issues/4631
|
In [1]: import numpy.polynomial.polynomial as npp
In [2]: p1 = npp.Polynomial([1,2])
In [3]: p2 = npp.Polynomial([3,4])
In [4]: npp.polydiv(p1, p2)
---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
<ipython-input-4-00c25f56fb20> in <module>()
----> 1 npp.polydiv(p1, p2)
/Users/warren/anaconda/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc in polydiv(c1, c2)
406 len2 = len(c2)
407 if len2 == 1 :
--> 408 return c1/c2[-1], c1[:1]*0
409 elif len1 < len2 :
410 return c1[:1]*0, c1
/Users/warren/anaconda/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc in __rtruediv__(self, other)
UnboundLocalError: local variable 'quo' referenced before assignment
|
UnboundLocalError
|
def fit(cls, x, y, deg, domain=None, rcond=None, full=False, w=None, window=None):
"""Least squares fit to data.
Return a series instance that is the least squares fit to the data
`y` sampled at `x`. The domain of the returned instance can be
specified and this will often result in a superior fit with less
chance of ill conditioning.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial.
domain : {None, [beg, end], []}, optional
Domain to use for the returned series. If ``None``,
then a minimal domain that covers the points `x` is chosen. If
``[]`` the class domain is used. The default value was the
class domain in NumPy 1.4 and ``None`` in later versions.
The ``[]`` option was added in numpy 1.5.0.
rcond : float, optional
Relative condition number of the fit. Singular values smaller
than this relative to the largest singular value will be
ignored. The default value is len(x)*eps, where eps is the
relative precision of the float type, about 2e-16 in most
cases.
full : bool, optional
Switch determining nature of return value. When it is False
(the default) just the coefficients are returned, when True
diagnostic information from the singular value decomposition is
also returned.
w : array_like, shape (M,), optional
Weights. If not None the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products
``w[i]*y[i]`` all have the same variance. The default value is
None.
.. versionadded:: 1.5.0
window : {[beg, end]}, optional
Window to use for the returned series. The default
value is the default class domain
.. versionadded:: 1.6.0
Returns
-------
new_series : series
A series that represents the least squares fit to the data and
has the domain specified in the call.
[resid, rank, sv, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
"""
if domain is None:
domain = pu.getdomain(x)
elif isinstance(domain, list) and len(domain) == 0:
domain = cls.domain
if window is None:
window = cls.window
xnew = pu.mapdomain(x, domain, window)
res = cls._fit(xnew, y, deg, w=w, rcond=rcond, full=full)
if full:
[coef, status] = res
return cls(coef, domain=domain, window=window), status
else:
coef = res
return cls(coef, domain=domain, window=window)
|
def fit(cls, x, y, deg, domain=None, rcond=None, full=False, w=None, window=None):
"""Least squares fit to data.
Return a series instance that is the least squares fit to the data
`y` sampled at `x`. The domain of the returned instance can be
specified and this will often result in a superior fit with less
chance of ill conditioning.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial.
domain : {None, [beg, end], []}, optional
Domain to use for the returned series. If ``None``,
then a minimal domain that covers the points `x` is chosen. If
``[]`` the class domain is used. The default value was the
class domain in NumPy 1.4 and ``None`` in later versions.
The ``[]`` option was added in numpy 1.5.0.
rcond : float, optional
Relative condition number of the fit. Singular values smaller
than this relative to the largest singular value will be
ignored. The default value is len(x)*eps, where eps is the
relative precision of the float type, about 2e-16 in most
cases.
full : bool, optional
Switch determining nature of return value. When it is False
(the default) just the coefficients are returned, when True
diagnostic information from the singular value decomposition is
also returned.
w : array_like, shape (M,), optional
Weights. If not None the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products
``w[i]*y[i]`` all have the same variance. The default value is
None.
.. versionadded:: 1.5.0
window : {[beg, end]}, optional
Window to use for the returned series. The default
value is the default class domain
.. versionadded:: 1.6.0
Returns
-------
new_series : series
A series that represents the least squares fit to the data and
has the domain specified in the call.
[resid, rank, sv, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
"""
if domain is None:
domain = pu.getdomain(x)
elif domain == []:
domain = cls.domain
if window is None:
window = cls.window
xnew = pu.mapdomain(x, domain, window)
res = cls._fit(xnew, y, deg, w=w, rcond=rcond, full=full)
if full:
[coef, status] = res
return cls(coef, domain=domain, window=window), status
else:
coef = res
return cls(coef, domain=domain, window=window)
|
https://github.com/numpy/numpy/issues/4631
|
In [1]: import numpy.polynomial.polynomial as npp
In [2]: p1 = npp.Polynomial([1,2])
In [3]: p2 = npp.Polynomial([3,4])
In [4]: npp.polydiv(p1, p2)
---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
<ipython-input-4-00c25f56fb20> in <module>()
----> 1 npp.polydiv(p1, p2)
/Users/warren/anaconda/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc in polydiv(c1, c2)
406 len2 = len(c2)
407 if len2 == 1 :
--> 408 return c1/c2[-1], c1[:1]*0
409 elif len1 < len2 :
410 return c1[:1]*0, c1
/Users/warren/anaconda/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc in __rtruediv__(self, other)
UnboundLocalError: local variable 'quo' referenced before assignment
|
UnboundLocalError
|
def fromroots(cls, roots, domain=[], window=None):
"""Return series instance that has the specified roots.
Returns a series representing the product
``(x - r[0])*(x - r[1])*...*(x - r[n-1])``, where ``r`` is a
list of roots.
Parameters
----------
roots : array_like
List of roots.
domain : {[], None, array_like}, optional
Domain for the resulting series. If None the domain is the
interval from the smallest root to the largest. If [] the
domain is the class domain. The default is [].
window : {None, array_like}, optional
Window for the returned series. If None the class window is
used. The default is None.
Returns
-------
new_series : series
Series with the specified roots.
"""
[roots] = pu.as_series([roots], trim=False)
if domain is None:
domain = pu.getdomain(roots)
elif isinstance(domain, list) and len(domain) == 0:
domain = cls.domain
if window is None:
window = cls.window
deg = len(roots)
off, scl = pu.mapparms(domain, window)
rnew = off + scl * roots
coef = cls._fromroots(rnew) / scl**deg
return cls(coef, domain=domain, window=window)
|
def fromroots(cls, roots, domain=[], window=None):
"""Return series instance that has the specified roots.
Returns a series representing the product
``(x - r[0])*(x - r[1])*...*(x - r[n-1])``, where ``r`` is a
list of roots.
Parameters
----------
roots : array_like
List of roots.
domain : {[], None, array_like}, optional
Domain for the resulting series. If None the domain is the
interval from the smallest root to the largest. If [] the
domain is the class domain. The default is [].
window : {None, array_like}, optional
Window for the returned series. If None the class window is
used. The default is None.
Returns
-------
new_series : series
Series with the specified roots.
"""
[roots] = pu.as_series([roots], trim=False)
if domain is None:
domain = pu.getdomain(roots)
elif domain == []:
domain = cls.domain
if window is None:
window = cls.window
deg = len(roots)
off, scl = pu.mapparms(domain, window)
rnew = off + scl * roots
coef = cls._fromroots(rnew) / scl**deg
return cls(coef, domain=domain, window=window)
|
https://github.com/numpy/numpy/issues/4631
|
In [1]: import numpy.polynomial.polynomial as npp
In [2]: p1 = npp.Polynomial([1,2])
In [3]: p2 = npp.Polynomial([3,4])
In [4]: npp.polydiv(p1, p2)
---------------------------------------------------------------------------
UnboundLocalError Traceback (most recent call last)
<ipython-input-4-00c25f56fb20> in <module>()
----> 1 npp.polydiv(p1, p2)
/Users/warren/anaconda/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc in polydiv(c1, c2)
406 len2 = len(c2)
407 if len2 == 1 :
--> 408 return c1/c2[-1], c1[:1]*0
409 elif len1 < len2 :
410 return c1[:1]*0, c1
/Users/warren/anaconda/lib/python2.7/site-packages/numpy/polynomial/polynomial.pyc in __rtruediv__(self, other)
UnboundLocalError: local variable 'quo' referenced before assignment
|
UnboundLocalError
|
def loadtxt(
fname,
dtype=float,
comments="#",
delimiter=None,
converters=None,
skiprows=0,
usecols=None,
unpack=False,
ndmin=0,
):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
record data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str, optional
The character used to indicate the start of a comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a record
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
Examples
--------
>>> from StringIO import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
comments = asbytes(comments)
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
usecols = list(usecols)
fown = False
try:
if _is_string_like(fname):
fown = True
if fname.endswith(".gz"):
fh = iter(seek_gzip_factory(fname))
elif fname.endswith(".bz2"):
import bz2
fh = iter(bz2.BZ2File(fname))
elif sys.version_info[0] == 2:
fh = iter(open(fname, "U"))
else:
fh = iter(open(fname))
else:
fh = iter(fname)
except TypeError:
raise ValueError("fname must be a string, file handle, or generator")
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim * packing[0][0], packing * dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start : start + length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter."""
line = asbytes(line).split(comments)[0].strip(asbytes("\r\n"))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ""
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d" % line_num)
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if not ndmin in [0, 1, 2]:
raise ValueError("Illegal value of ndmin keyword: %s" % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
|
def loadtxt(
fname,
dtype=float,
comments="#",
delimiter=None,
converters=None,
skiprows=0,
usecols=None,
unpack=False,
ndmin=0,
):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
record data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str, optional
The character used to indicate the start of a comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a record
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
Examples
--------
>>> from StringIO import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
comments = asbytes(comments)
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
usecols = list(usecols)
fown = False
try:
if _is_string_like(fname):
fown = True
if fname.endswith(".gz"):
fh = iter(seek_gzip_factory(fname))
elif fname.endswith(".bz2"):
import bz2
fh = iter(bz2.BZ2File(fname))
elif sys.version_info[0] == 2:
fh = iter(open(fname, "U"))
else:
fh = iter(open(fname))
else:
fh = iter(fname)
except TypeError:
raise ValueError("fname must be a string, file handle, or generator")
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim * packing[0][0], packing * dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start : start + length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter."""
line = asbytes(line).split(comments)[0].strip(asbytes("\r\n"))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ""
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if not ndmin in [0, 1, 2]:
raise ValueError("Illegal value of ndmin keyword: %s" % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
|
https://github.com/numpy/numpy/issues/2591
|
Traceback (most recent call last):
File "./np_lt.py", line 5, in <module>
numpy.loadtxt(sys.argv[1])
File "/usr/lib/python2.7/site-packages/numpy/lib/npyio.py", line 804, in loadtxt
X = np.array(X, dtype)
ValueError: setting an array element with a sequence.
|
ValueError
|
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
# Check the size of condlist and choicelist are the same, or abort.
if len(condlist) != len(choicelist):
raise ValueError("list of cases must be same length as list of conditions")
# Now that the dtype is known, handle the deprecated select([], []) case
if len(condlist) == 0:
warnings.warn(
"select with an empty condition list is not possibleand will be deprecated",
DeprecationWarning,
)
return np.asarray(default)[()]
choicelist = [np.asarray(choice) for choice in choicelist]
choicelist.append(np.asarray(default))
# need to get the result type before broadcasting for correct scalar
# behaviour
dtype = np.result_type(*choicelist)
# Convert conditions to arrays and broadcast conditions and choices
# as the shape is needed for the result. Doing it seperatly optimizes
# for example when all choices are scalars.
condlist = np.broadcast_arrays(*condlist)
choicelist = np.broadcast_arrays(*choicelist)
# If cond array is not an ndarray in boolean format or scalar bool, abort.
deprecated_ints = False
for i in range(len(condlist)):
cond = condlist[i]
if cond.dtype.type is not np.bool_:
if np.issubdtype(cond.dtype, np.integer):
# A previous implementation accepted int ndarrays accidentally.
# Supported here deliberately, but deprecated.
condlist[i] = condlist[i].astype(bool)
deprecated_ints = True
else:
raise ValueError(
"invalid entry in choicelist: should be boolean ndarray"
)
if deprecated_ints:
msg = (
"select condlists containing integer ndarrays is deprecated "
"and will be removed in the future. Use `.astype(bool)` to "
"convert to bools."
)
warnings.warn(msg, DeprecationWarning)
if choicelist[0].ndim == 0:
# This may be common, so avoid the call.
result_shape = condlist[0].shape
else:
result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape
result = np.full(result_shape, choicelist[-1], dtype)
# Use np.copyto to burn each choicelist array onto result, using the
# corresponding condlist as a boolean mask. This is done in reverse
# order since the first choice should take precedence.
choicelist = choicelist[-2::-1]
condlist = condlist[::-1]
for choice, cond in zip(choicelist, condlist):
np.copyto(result, choice, where=cond)
return result
|
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
n = len(condlist)
n2 = len(choicelist)
if n2 != n:
raise ValueError("list of cases must be same length as list of conditions")
choicelist = [default] + choicelist
S = 0
pfac = 1
for k in range(1, n + 1):
S += k * pfac * asarray(condlist[k - 1])
if k < n:
pfac *= 1 - asarray(condlist[k - 1])
# handle special case of a 1-element condition but
# a multi-element choice
if type(S) in ScalarType or max(asarray(S).shape) == 1:
pfac = asarray(1)
for k in range(n2 + 1):
pfac = pfac + asarray(choicelist[k])
if type(S) in ScalarType:
S = S * ones(asarray(pfac).shape, type(S))
else:
S = S * ones(asarray(pfac).shape, S.dtype)
return choose(S, tuple(choicelist))
|
https://github.com/numpy/numpy/issues/3254
|
1
Traceback (most recent call last):
File "numpy5.py", line 11, in <module>
np.select(choices,results)
File "/Library/Python/2.7/site-packages/numpy-override/numpy/lib/function_base.py", line 779, in select
return choose(S, tuple(choicelist))
File "/Library/Python/2.7/site-packages/numpy-override/numpy/core/fromnumeric.py", line 297, in choose
return choose(choices, out=out, mode=mode)
ValueError: Need between 2 and (32) array objects (inclusive).
|
ValueError
|
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Evaluate a polynomial at a point.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
pass
else:
raise ValueError("input must be 1d or non-empty square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
a = [1]
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, [1, -seq_of_zeros[k]], mode="full")
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
pos_roots = sort_complex(NX.compress(roots.imag > 0, roots))
neg_roots = NX.conjugate(sort_complex(NX.compress(roots.imag < 0, roots)))
if len(pos_roots) == len(neg_roots) and NX.alltrue(neg_roots == pos_roots):
a = a.real.copy()
return a
|
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Evaluate a polynomial at a point.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
pass
else:
raise ValueError("input must be 1d or square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
a = [1]
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, [1, -seq_of_zeros[k]], mode="full")
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
pos_roots = sort_complex(NX.compress(roots.imag > 0, roots))
neg_roots = NX.conjugate(sort_complex(NX.compress(roots.imag < 0, roots)))
if len(pos_roots) == len(neg_roots) and NX.alltrue(neg_roots == pos_roots):
a = a.real.copy()
return a
|
https://github.com/numpy/numpy/issues/2092
|
In [1]: np.poly(np.zeros((0,0)))
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/charris/<ipython console> in <module>()
/usr/local/lib/python2.6/dist-packages/numpy/lib/polynomial.pyc in poly(seq_of_zeros)
126 pass
127 else:
--> 128 raise ValueError, "input must be 1d or square 2d array."
129
130 if len(seq_of_zeros) == 0:
ValueError: input must be 1d or square 2d array.
|
ValueError
|
def __init__(self, pyfunc, otypes="", doc=None, excluded=None, cache=False):
self.pyfunc = pyfunc
self.cache = cache
self._ufunc = None # Caching to improve default performance
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes["All"]:
raise ValueError("Invalid otype specified: %s" % (char,))
elif iterable(otypes):
self.otypes = "".join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError("Invalid otype specification")
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
|
def __init__(self, pyfunc, otypes="", doc=None, excluded=None, cache=False):
self.pyfunc = pyfunc
self.cache = cache
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes["All"]:
raise ValueError("Invalid otype specified: %s" % (char,))
elif iterable(otypes):
self.otypes = "".join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError("Invalid otype specification")
# Excluded variable support
if excluded is None:
excluded = set()
self.excluded = set(excluded)
if self.otypes and not self.excluded:
self._ufunc = None # Caching to improve default performance
|
https://github.com/numpy/numpy/issues/3285
|
v = numpy.vectorize( lambda x: x )
v.otypes='i'
v( [1,2] )
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib64/python2.7/site-packages/numpy/lib/function_base.py", line 1873, in __call__
return self._vectorize_call(func=func, args=vargs)
File "/usr/lib64/python2.7/site-packages/numpy/lib/function_base.py", line 1933, in _vectorize_call
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
File "/usr/lib64/python2.7/site-packages/numpy/lib/function_base.py", line 1886, in _get_ufunc_and_otypes
if func is self.pyfunc and self._ufunc is not None:
AttributeError: 'vectorize' object has no attribute '_ufunc'
|
AttributeError
|
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
if not args:
raise ValueError("args can not be empty")
if self.otypes:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
inputs = [asarray(_a).flat[0] for _a in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple
# functions at least -- this wrapping can almost double the
# execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = "".join([asarray(outputs[_k]).dtype.char for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
|
def _get_ufunc_and_otypes(self, func, args):
"""Return (ufunc, otypes)."""
# frompyfunc will fail if args is empty
assert args
if self.otypes:
otypes = self.otypes
nout = len(otypes)
# Note logic here: We only *use* self._ufunc if func is self.pyfunc
# even though we set self._ufunc regardless.
if func is self.pyfunc and self._ufunc is not None:
ufunc = self._ufunc
else:
ufunc = self._ufunc = frompyfunc(func, len(args), nout)
else:
# Get number of outputs and output types by calling the function on
# the first entries of args. We also cache the result to prevent
# the subsequent call when the ufunc is evaluated.
# Assumes that ufunc first evaluates the 0th elements in the input
# arrays (the input values are not checked to ensure this)
inputs = [asarray(_a).flat[0] for _a in args]
outputs = func(*inputs)
# Performance note: profiling indicates that -- for simple
# functions at least -- this wrapping can almost double the
# execution time.
# Hence we make it optional.
if self.cache:
_cache = [outputs]
def _func(*vargs):
if _cache:
return _cache.pop()
else:
return func(*vargs)
else:
_func = func
if isinstance(outputs, tuple):
nout = len(outputs)
else:
nout = 1
outputs = (outputs,)
otypes = "".join([asarray(outputs[_k]).dtype.char for _k in range(nout)])
# Performance note: profiling indicates that creating the ufunc is
# not a significant cost compared with wrapping so it seems not
# worth trying to cache this.
ufunc = frompyfunc(_func, len(args), nout)
return ufunc, otypes
|
https://github.com/numpy/numpy/issues/3285
|
v = numpy.vectorize( lambda x: x )
v.otypes='i'
v( [1,2] )
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib64/python2.7/site-packages/numpy/lib/function_base.py", line 1873, in __call__
return self._vectorize_call(func=func, args=vargs)
File "/usr/lib64/python2.7/site-packages/numpy/lib/function_base.py", line 1933, in _vectorize_call
ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args)
File "/usr/lib64/python2.7/site-packages/numpy/lib/function_base.py", line 1886, in _get_ufunc_and_otypes
if func is self.pyfunc and self._ufunc is not None:
AttributeError: 'vectorize' object has no attribute '_ufunc'
|
AttributeError
|
def byte_bounds(a):
"""
Returns pointers to the end-points of an array.
Parameters
----------
a : ndarray
Input array. It must conform to the Python-side of the array interface.
Returns
-------
(low, high) : tuple of 2 integers
The first integer is the first byte of the array, the second integer is
just past the last byte of the array. If `a` is not contiguous it
will not use every byte between the (`low`, `high`) values.
Examples
--------
>>> I = np.eye(2, dtype='f'); I.dtype
dtype('float32')
>>> low, high = np.byte_bounds(I)
>>> high - low == I.size*I.itemsize
True
>>> I = np.eye(2, dtype='G'); I.dtype
dtype('complex192')
>>> low, high = np.byte_bounds(I)
>>> high - low == I.size*I.itemsize
True
"""
ai = a.__array_interface__
a_data = ai["data"][0]
astrides = ai["strides"]
ashape = ai["shape"]
bytes_a = asarray(a).dtype.itemsize
a_low = a_high = a_data
if astrides is None: # contiguous case
a_high += a.size * bytes_a
else:
for shape, stride in zip(ashape, astrides):
if stride < 0:
a_low += (shape - 1) * stride
else:
a_high += (shape - 1) * stride
a_high += bytes_a
return a_low, a_high
|
def byte_bounds(a):
"""
Returns pointers to the end-points of an array.
Parameters
----------
a : ndarray
Input array. It must conform to the Python-side of the array interface.
Returns
-------
(low, high) : tuple of 2 integers
The first integer is the first byte of the array, the second integer is
just past the last byte of the array. If `a` is not contiguous it
will not use every byte between the (`low`, `high`) values.
Examples
--------
>>> I = np.eye(2, dtype='f'); I.dtype
dtype('float32')
>>> low, high = np.byte_bounds(I)
>>> high - low == I.size*I.itemsize
True
>>> I = np.eye(2, dtype='G'); I.dtype
dtype('complex192')
>>> low, high = np.byte_bounds(I)
>>> high - low == I.size*I.itemsize
True
"""
ai = a.__array_interface__
a_data = ai["data"][0]
astrides = ai["strides"]
ashape = ai["shape"]
bytes_a = int(ai["typestr"][2:])
a_low = a_high = a_data
if astrides is None: # contiguous case
a_high += a.size * bytes_a
else:
for shape, stride in zip(ashape, astrides):
if stride < 0:
a_low += (shape - 1) * stride
else:
a_high += (shape - 1) * stride
a_high += bytes_a
return a_low, a_high
|
https://github.com/numpy/numpy/issues/4354
|
In [52]: from datetime import datetime
In [53]: dts = [datetime(2005, 1, i) for i in range(1, 11)]
In [54]: arr = np.array(dts).astype('M8[ns]')
In [55]: byte_bounds(arr)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-55-084f96ed2076> in <module>()
----> 1 byte_bounds(arr)
/home/charris/.local/lib/python2.7/site-packages/numpy/lib/utils.pyc in byte_bounds(a)
211 astrides = ai['strides']
212 ashape = ai['shape']
--> 213 bytes_a = int(ai['typestr'][2:])
214
215 a_low = a_high = a_data
ValueError: invalid literal for int() with base 10: '8[ns]'
|
ValueError
|
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""
Return the cross product of two (arrays of) vectors.
The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular
to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors
are defined by the last axis of `a` and `b` by default, and these axes
can have dimensions 2 or 3. Where the dimension of either `a` or `b` is
2, the third component of the input vector is assumed to be zero and the
cross product calculated accordingly. In cases where both input vectors
have dimension 2, the z-component of the cross product is returned.
Parameters
----------
a : array_like
Components of the first vector(s).
b : array_like
Components of the second vector(s).
axisa : int, optional
Axis of `a` that defines the vector(s). By default, the last axis.
axisb : int, optional
Axis of `b` that defines the vector(s). By default, the last axis.
axisc : int, optional
Axis of `c` containing the cross product vector(s). By default, the
last axis.
axis : int, optional
If defined, the axis of `a`, `b` and `c` that defines the vector(s)
and cross product(s). Overrides `axisa`, `axisb` and `axisc`.
Returns
-------
c : ndarray
Vector cross product(s).
Raises
------
ValueError
When the dimension of the vector(s) in `a` and/or `b` does not
equal 2 or 3.
See Also
--------
inner : Inner product
outer : Outer product.
ix_ : Construct index arrays.
Notes
-----
.. versionadded:: 1.9.0
Supports full broadcasting of the inputs.
Examples
--------
Vector cross-product.
>>> x = [1, 2, 3]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([-3, 6, -3])
One vector with dimension 2.
>>> x = [1, 2]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Equivalently:
>>> x = [1, 2, 0]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Both vectors with dimension 2.
>>> x = [1,2]
>>> y = [4,5]
>>> np.cross(x, y)
-3
Multiple vector cross-products. Note that the direction of the cross
product vector is defined by the `right-hand rule`.
>>> x = np.array([[1,2,3], [4,5,6]])
>>> y = np.array([[4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[-3, 6, -3],
[ 3, -6, 3]])
The orientation of `c` can be changed using the `axisc` keyword.
>>> np.cross(x, y, axisc=0)
array([[-3, 3],
[ 6, -6],
[-3, 3]])
Change the vector definition of `x` and `y` using `axisa` and `axisb`.
>>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]])
>>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[ -6, 12, -6],
[ 0, 0, 0],
[ 6, -12, 6]])
>>> np.cross(x, y, axisa=0, axisb=0)
array([[-24, 48, -24],
[-30, 60, -30],
[-36, 72, -36]])
"""
if axis is not None:
axisa, axisb, axisc = (axis,) * 3
a = asarray(a)
b = asarray(b)
# Move working axis to the end of the shape
a = rollaxis(a, axisa, a.ndim)
b = rollaxis(b, axisb, b.ndim)
msg = "incompatible dimensions for cross product\n(dimension must be 2 or 3)"
if a.shape[-1] not in [2, 3] or b.shape[-1] not in [2, 3]:
raise ValueError(msg)
# Create the output array
shape = broadcast(a[..., 0], b[..., 0]).shape
if a.shape[-1] == 3 or b.shape[-1] == 3:
shape += (3,)
dtype = promote_types(a.dtype, b.dtype)
cp = empty(shape, dtype)
if a.shape[-1] == 2:
if b.shape[-1] == 2:
# cp = a[..., 0]*b[..., 1] - a[..., 1]*b[..., 0]
multiply(a[..., 0], b[..., 1], out=cp)
cp -= a[..., 1] * b[..., 0]
if cp.ndim == 0:
return cp
else:
# This works because we are moving the last axis
return rollaxis(cp, -1, axisc)
else:
# cp[..., 0] = a[..., 1]*b[..., 2]
multiply(a[..., 1], b[..., 2], out=cp[..., 0])
# cp[..., 1] = -a[..., 0]*b[..., 2]
multiply(a[..., 0], b[..., 2], out=cp[..., 1])
cp[..., 1] *= -1
# cp[..., 2] = a[..., 0]*b[..., 1] - a[..., 1]*b[..., 0]
multiply(a[..., 0], b[..., 1], out=cp[..., 2])
cp[..., 2] -= a[..., 1] * b[..., 0]
elif a.shape[-1] == 3:
if b.shape[-1] == 3:
# cp[..., 0] = a[..., 1]*b[..., 2] - a[..., 2]*b[..., 1]
multiply(a[..., 1], b[..., 2], out=cp[..., 0])
cp[..., 0] -= a[..., 2] * b[..., 1]
# cp[..., 1] = a[..., 2]*b[..., 0] - a[..., 0]*b[..., 2]
multiply(a[..., 2], b[..., 0], out=cp[..., 1])
cp[..., 1] -= a[..., 0] * b[..., 2]
# cp[..., 2] = a[..., 0]*b[..., 1] - a[..., 1]*b[..., 0]
multiply(a[..., 0], b[..., 1], out=cp[..., 2])
cp[..., 2] -= a[..., 1] * b[..., 0]
else:
# cp[..., 0] = -a[..., 2]*b[..., 1]
multiply(a[..., 2], b[..., 1], out=cp[..., 0])
cp[..., 0] *= -1
# cp[..., 1] = a[..., 2]*b[..., 0]
multiply(a[..., 2], b[..., 0], out=cp[..., 1])
# cp[..., 2] = a[..., 0]*b[..., 1] - a[..., 1]*b[..., 0]
multiply(a[..., 0], b[..., 1], out=cp[..., 2])
cp[..., 2] -= a[..., 1] * b[..., 0]
if cp.ndim == 1:
return cp
else:
# This works because we are moving the last axis
return rollaxis(cp, -1, axisc)
|
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""
Return the cross product of two (arrays of) vectors.
The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular
to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors
are defined by the last axis of `a` and `b` by default, and these axes
can have dimensions 2 or 3. Where the dimension of either `a` or `b` is
2, the third component of the input vector is assumed to be zero and the
cross product calculated accordingly. In cases where both input vectors
have dimension 2, the z-component of the cross product is returned.
Parameters
----------
a : array_like
Components of the first vector(s).
b : array_like
Components of the second vector(s).
axisa : int, optional
Axis of `a` that defines the vector(s). By default, the last axis.
axisb : int, optional
Axis of `b` that defines the vector(s). By default, the last axis.
axisc : int, optional
Axis of `c` containing the cross product vector(s). By default, the
last axis.
axis : int, optional
If defined, the axis of `a`, `b` and `c` that defines the vector(s)
and cross product(s). Overrides `axisa`, `axisb` and `axisc`.
Returns
-------
c : ndarray
Vector cross product(s).
Raises
------
ValueError
When the dimension of the vector(s) in `a` and/or `b` does not
equal 2 or 3.
See Also
--------
inner : Inner product
outer : Outer product.
ix_ : Construct index arrays.
Examples
--------
Vector cross-product.
>>> x = [1, 2, 3]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([-3, 6, -3])
One vector with dimension 2.
>>> x = [1, 2]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Equivalently:
>>> x = [1, 2, 0]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Both vectors with dimension 2.
>>> x = [1,2]
>>> y = [4,5]
>>> np.cross(x, y)
-3
Multiple vector cross-products. Note that the direction of the cross
product vector is defined by the `right-hand rule`.
>>> x = np.array([[1,2,3], [4,5,6]])
>>> y = np.array([[4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[-3, 6, -3],
[ 3, -6, 3]])
The orientation of `c` can be changed using the `axisc` keyword.
>>> np.cross(x, y, axisc=0)
array([[-3, 3],
[ 6, -6],
[-3, 3]])
Change the vector definition of `x` and `y` using `axisa` and `axisb`.
>>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]])
>>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[ -6, 12, -6],
[ 0, 0, 0],
[ 6, -12, 6]])
>>> np.cross(x, y, axisa=0, axisb=0)
array([[-24, 48, -24],
[-30, 60, -30],
[-36, 72, -36]])
"""
if axis is not None:
axisa, axisb, axisc = (axis,) * 3
a = asarray(a).swapaxes(axisa, 0)
b = asarray(b).swapaxes(axisb, 0)
msg = "incompatible dimensions for cross product\n(dimension must be 2 or 3)"
if (a.shape[0] not in [2, 3]) or (b.shape[0] not in [2, 3]):
raise ValueError(msg)
if a.shape[0] == 2:
if b.shape[0] == 2:
cp = a[0] * b[1] - a[1] * b[0]
if cp.ndim == 0:
return cp
else:
return cp.swapaxes(0, axisc)
else:
x = a[1] * b[2]
y = -a[0] * b[2]
z = a[0] * b[1] - a[1] * b[0]
elif a.shape[0] == 3:
if b.shape[0] == 3:
x = a[1] * b[2] - a[2] * b[1]
y = a[2] * b[0] - a[0] * b[2]
z = a[0] * b[1] - a[1] * b[0]
else:
x = -a[2] * b[1]
y = a[2] * b[0]
z = a[0] * b[1] - a[1] * b[0]
cp = array([x, y, z])
if cp.ndim == 1:
return cp
else:
return cp.swapaxes(0, axisc)
|
https://github.com/numpy/numpy/issues/2624
|
cross(random.randn(2,1,3),random.randn(5,3))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/opt/local/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/site-packages/numpy/core/numeric.py", line 1377, in cross
x = a[1]*b[2] - a[2]*b[1]
ValueError: operands could not be broadcast together with shapes (1,2) (5)
|
ValueError
|
def __init__(self, methodname, reversed=False):
self.__name__ = methodname
self.__doc__ = self.getdoc()
self.reversed = reversed
|
def __init__(self, methodname):
self.__name__ = methodname
self.__doc__ = self.getdoc()
|
https://github.com/numpy/numpy/issues/2495
|
In [14]: arr = np.arange(8)
In [15]: arr.shape = 4,2
In [16]: cond = np.array([True, False, True, True])
In [17]: np.ma.compress(cond, arr, axis=0)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/efiring/<ipython-input-17-50a13b754f15> in <module>()
----> 1 np.ma.compress(cond, arr, axis=0)
/usr/local/lib/python2.7/dist-packages/numpy/ma/core.pyc in __call__(self, a, *args, **params)
5931 method = getattr(a, method_name, None)
5932 if method is not None:
-> 5933 return method(*args, **params)
5934 # Still here ? Then a is not a MaskedArray
5935 method = getattr(MaskedArray, method_name, None)
ValueError: condition must be 1-d array
|
ValueError
|
def __call__(self, a, *args, **params):
if self.reversed:
args = list(args)
arr = args[0]
args[0] = a
a = arr
# Get the method from the array (if possible)
method_name = self.__name__
method = getattr(a, method_name, None)
if method is not None:
return method(*args, **params)
# Still here ? Then a is not a MaskedArray
method = getattr(MaskedArray, method_name, None)
if method is not None:
return method(MaskedArray(a), *args, **params)
# Still here ? OK, let's call the corresponding np function
method = getattr(np, method_name)
return method(a, *args, **params)
|
def __call__(self, a, *args, **params):
# Get the method from the array (if possible)
method_name = self.__name__
method = getattr(a, method_name, None)
if method is not None:
return method(*args, **params)
# Still here ? Then a is not a MaskedArray
method = getattr(MaskedArray, method_name, None)
if method is not None:
return method(MaskedArray(a), *args, **params)
# Still here ? OK, let's call the corresponding np function
method = getattr(np, method_name)
return method(a, *args, **params)
|
https://github.com/numpy/numpy/issues/2495
|
In [14]: arr = np.arange(8)
In [15]: arr.shape = 4,2
In [16]: cond = np.array([True, False, True, True])
In [17]: np.ma.compress(cond, arr, axis=0)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/home/efiring/<ipython-input-17-50a13b754f15> in <module>()
----> 1 np.ma.compress(cond, arr, axis=0)
/usr/local/lib/python2.7/dist-packages/numpy/ma/core.pyc in __call__(self, a, *args, **params)
5931 method = getattr(a, method_name, None)
5932 if method is not None:
-> 5933 return method(*args, **params)
5934 # Still here ? Then a is not a MaskedArray
5935 method = getattr(MaskedArray, method_name, None)
ValueError: condition must be 1-d array
|
ValueError
|
def CCompiler_spawn(self, cmd, display=None):
"""
Execute a command in a sub-process.
Parameters
----------
cmd : str
The command to execute.
display : str or sequence of str, optional
The text to add to the log file kept by `numpy.distutils`.
If not given, `display` is equal to `cmd`.
Returns
-------
None
Raises
------
DistutilsExecError
If the command failed, i.e. the exit status was not 0.
"""
if display is None:
display = cmd
if is_sequence(display):
display = " ".join(list(display))
log.info(display)
s, o = exec_command(cmd)
if s:
if is_sequence(cmd):
cmd = " ".join(list(cmd))
try:
print(o)
except UnicodeError:
# When installing through pip, `o` can contain non-ascii chars
pass
if re.search("Too many open files", o):
msg = "\nTry rerunning setup command until build succeeds."
else:
msg = ""
raise DistutilsExecError(
'Command "%s" failed with exit status %d%s' % (cmd, s, msg)
)
|
def CCompiler_spawn(self, cmd, display=None):
"""
Execute a command in a sub-process.
Parameters
----------
cmd : str
The command to execute.
display : str or sequence of str, optional
The text to add to the log file kept by `numpy.distutils`.
If not given, `display` is equal to `cmd`.
Returns
-------
None
Raises
------
DistutilsExecError
If the command failed, i.e. the exit status was not 0.
"""
if display is None:
display = cmd
if is_sequence(display):
display = " ".join(list(display))
log.info(display)
s, o = exec_command(cmd)
if s:
if is_sequence(cmd):
cmd = " ".join(list(cmd))
print(o)
if re.search("Too many open files", o):
msg = "\nTry rerunning setup command until build succeeds."
else:
msg = ""
raise DistutilsExecError(
'Command "%s" failed with exit status %d%s' % (cmd, s, msg)
)
|
https://github.com/numpy/numpy/issues/1857
|
x = np.array([-1j], '<c8')
>>> x.tostring().encode('hex')
'00000000000080bf'
# This is a little-endian representation, in the order (real, imag)
# When I swap the whole array, it swaps each of the (real, imag) parts
separately
>>> y = x.byteswap()
>>> y.tostring().encode('hex')
'00000000bf800000'
# and this round-trips fine
>>> z = np.fromstring(y.tostring(), dtype='>c8')
>>> assert z[0] == -1j
>>>
# When I swap the scalar, it seems to swap the entire 8 bytes
>>> y = x[0].byteswap()
>>> y.tostring().encode('hex')
'bf80000000000000'
# ...and this doesn't round-trip
>>> z = np.fromstring(y.tostring(), dtype='>c8')
>>> assert z[0] == -1j
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AssertionError
>>>
|
AssertionError
|
def setup_package():
# Perform 2to3 if needed
local_path = os.path.dirname(os.path.abspath(sys.argv[0]))
src_path = local_path
if sys.version_info[0] == 3:
src_path = os.path.join(local_path, "build", "py3k")
sys.path.insert(0, os.path.join(local_path, "tools"))
import py3tool
print("Converting to Python3 via 2to3...")
py3tool.sync_2to3("numpy", os.path.join(src_path, "numpy"))
site_cfg = os.path.join(local_path, "site.cfg")
if os.path.isfile(site_cfg):
shutil.copy(site_cfg, src_path)
# Ugly hack to make pip work with Python 3, see #1857.
# Explanation: pip messes with __file__ which interacts badly with the
# change in directory due to the 2to3 conversion. Therefore we restore
# __file__ to what it would have been otherwise.
global __file__
__file__ = os.path.join(os.curdir, os.path.basename(__file__))
if "--egg-base" in sys.argv:
# Change pip-egg-info entry to absolute path, so pip can find it
# after changing directory.
idx = sys.argv.index("--egg-base")
if sys.argv[idx + 1] == "pip-egg-info":
sys.argv[idx + 1] = os.path.join(local_path, "pip-egg-info")
old_path = os.getcwd()
os.chdir(src_path)
sys.path.insert(0, src_path)
# Rewrite the version file everytime
write_version_py()
# Run build
from numpy.distutils.core import setup
try:
setup(
name=NAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url=URL,
download_url=DOWNLOAD_URL,
license=LICENSE,
classifiers=CLASSIFIERS,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
platforms=PLATFORMS,
configuration=configuration,
)
finally:
del sys.path[0]
os.chdir(old_path)
return
|
def setup_package():
# Perform 2to3 if needed
local_path = os.path.dirname(os.path.abspath(sys.argv[0]))
src_path = local_path
if sys.version_info[0] == 3:
src_path = os.path.join(local_path, "build", "py3k")
sys.path.insert(0, os.path.join(local_path, "tools"))
import py3tool
print("Converting to Python3 via 2to3...")
py3tool.sync_2to3("numpy", os.path.join(src_path, "numpy"))
site_cfg = os.path.join(local_path, "site.cfg")
if os.path.isfile(site_cfg):
shutil.copy(site_cfg, src_path)
old_path = os.getcwd()
os.chdir(src_path)
sys.path.insert(0, src_path)
# Rewrite the version file everytime
write_version_py()
# Run build
from numpy.distutils.core import setup
try:
setup(
name=NAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url=URL,
download_url=DOWNLOAD_URL,
license=LICENSE,
classifiers=CLASSIFIERS,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
platforms=PLATFORMS,
configuration=configuration,
)
finally:
del sys.path[0]
os.chdir(old_path)
return
|
https://github.com/numpy/numpy/issues/1857
|
x = np.array([-1j], '<c8')
>>> x.tostring().encode('hex')
'00000000000080bf'
# This is a little-endian representation, in the order (real, imag)
# When I swap the whole array, it swaps each of the (real, imag) parts
separately
>>> y = x.byteswap()
>>> y.tostring().encode('hex')
'00000000bf800000'
# and this round-trips fine
>>> z = np.fromstring(y.tostring(), dtype='>c8')
>>> assert z[0] == -1j
>>>
# When I swap the scalar, it seems to swap the entire 8 bytes
>>> y = x[0].byteswap()
>>> y.tostring().encode('hex')
'bf80000000000000'
# ...and this doesn't round-trip
>>> z = np.fromstring(y.tostring(), dtype='>c8')
>>> assert z[0] == -1j
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AssertionError
>>>
|
AssertionError
|
def savetxt(
fname,
X,
fmt="%.18e",
delimiter=" ",
newline="\n",
header="",
footer="",
comments="# ",
):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
Character separating columns.
newline : str, optional
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 2.0.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 2.0.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
Character separating lines.
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into a ``.npz`` compressed archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to preceed result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith(".gz"):
import gzip
fh = gzip.open(fname, "wb")
else:
if sys.version_info[0] >= 3:
fh = open(fname, "wb")
else:
fh = open(fname, "w")
elif hasattr(fname, "seek"):
fh = fname
else:
raise ValueError("fname must be a string or file handle")
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError("fmt has wrong shape. %s" % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif type(fmt) is str:
n_fmt_chars = fmt.count("%")
error = ValueError("fmt has wrong number of %% formats: %s" % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [
" (%s+%sj)" % (fmt, fmt),
] * ncol
else:
fmt = [
fmt,
] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif (not iscomplex_X) and n_fmt_chars != ncol:
raise error
else:
format = fmt
if len(header) > 0:
header = header.replace("\n", "\n" + comments)
fh.write(asbytes(comments + header + newline))
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
fh.write(asbytes(format % tuple(row) + newline))
if len(footer) > 0:
footer = footer.replace("\n", "\n" + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
|
def savetxt(
fname,
X,
fmt="%.18e",
delimiter=" ",
newline="\n",
header="",
footer="",
comments="# ",
):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored.
delimiter : str, optional
Character separating columns.
newline : str, optional
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 2.0.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 2.0.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 2.0.0
Character separating lines.
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into a ``.npz`` compressed archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to preceed result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith(".gz"):
import gzip
fh = gzip.open(fname, "wb")
else:
if sys.version_info[0] >= 3:
fh = open(fname, "wb")
else:
fh = open(fname, "w")
elif hasattr(fname, "seek"):
fh = fname
else:
raise ValueError("fname must be a string or file handle")
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError("fmt has wrong shape. %s" % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif type(fmt) is str:
if fmt.count("%") == 1:
fmt = [
fmt,
] * ncol
format = delimiter.join(fmt)
elif fmt.count("%") != ncol:
raise AttributeError("fmt has wrong number of %% formats. %s" % fmt)
else:
format = fmt
if len(header) > 0:
header = header.replace("\n", "\n" + comments)
fh.write(asbytes(comments + header + newline))
for row in X:
fh.write(asbytes(format % tuple(row) + newline))
if len(footer) > 0:
footer = footer.replace("\n", "\n" + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
|
https://github.com/numpy/numpy/issues/1573
|
======================================================================
FAIL: Check formatting.
----------------------------------------------------------------------
Traceback (most recent call last):
File "C:\Python26\Lib\site-packages\numpy\core\tests\test_print.py", line 28, in test_complex_types
assert_equal(str(t(x)), str(complex(x)))
File "C:\Python26\Lib\site-packages\numpy\testing\utils.py", line 183, in assert_equal
raise AssertionError(msg)
AssertionError:
Items are not equal:
ACTUAL: '(1e+020+0j)'
DESIRED: '(1e+20+0j)'
======================================================================
FAIL: Check formatting.
----------------------------------------------------------------------
Traceback (most recent call last):
File "C:\Python26\Lib\site-packages\numpy\core\tests\test_print.py", line 16, in test_float_types
assert_equal(str(t(x)), str(float(x)))
File "C:\Python26\Lib\site-packages\numpy\testing\utils.py", line 183, in assert_equal
raise AssertionError(msg)
AssertionError:
Items are not equal:
ACTUAL: '1e+020'
DESIRED: '1e+20'
|
AssertionError
|
def _raise_error():
"""
Raises errors for inotify failures.
"""
err = ctypes.get_errno()
if err == errno.ENOSPC:
raise OSError(errno.ENOSPC, "inotify watch limit reached")
elif err == errno.EMFILE:
raise OSError(errno.EMFILE, "inotify instance limit reached")
elif err == errno.EACCES:
# Prevent raising an exception when a file with no permissions
# changes
pass
else:
raise OSError(err, os.strerror(err))
|
def _raise_error():
"""
Raises errors for inotify failures.
"""
err = ctypes.get_errno()
if err == errno.ENOSPC:
raise OSError(errno.ENOSPC, "inotify watch limit reached")
elif err == errno.EMFILE:
raise OSError(errno.EMFILE, "inotify instance limit reached")
else:
raise OSError(err, os.strerror(err))
|
https://github.com/gorakhargosh/watchdog/issues/670
|
$ watchmedo log --recursive /path
Traceback (most recent call last):
File ".local/bin/watchmedo", line 11, in <module>
sys.exit(main())
File "/home/user/.local/lib/python3.6/site-packages/watchdog/watchmedo.py", line 576, in main
parser.dispatch()
File "/home/user/.local/lib/python3.6/site-packages/argh/helpers.py", line 55, in dispatch
return dispatch(self, *args, **kwargs)
File "/home/user/.local/lib/python3.6/site-packages/argh/dispatching.py", line 174, in dispatch
for line in lines:
File "/home/user/.local/lib/python3.6/site-packages/argh/dispatching.py", line 277, in _execute_command
for line in result:
File "/home/user/.local/lib/python3.6/site-packages/argh/dispatching.py", line 231, in _call
result = function(namespace_obj)
File "/home/user/.local/lib/python3.6/site-packages/watchdog/watchmedo.py", line 350, in log
observe_with(observer, handler, args.directories, args.recursive)
File "/home/user/.local/lib/python3.6/site-packages/watchdog/watchmedo.py", line 116, in observe_with
observer.start()
File "/home/user/.local/lib/python3.6/site-packages/watchdog/observers/api.py", line 253, in start
emitter.start()
File "/home/user/.local/lib/python3.6/site-packages/watchdog/utils/__init__.py", line 110, in start
self.on_thread_start()
File "/home/user/.local/lib/python3.6/site-packages/watchdog/observers/inotify.py", line 121, in on_thread_start
self._inotify = InotifyBuffer(path, self.watch.is_recursive)
File "/home/user/.local/lib/python3.6/site-packages/watchdog/observers/inotify_buffer.py", line 35, in __init__
self._inotify = Inotify(path, recursive)
File "/home/user/.local/lib/python3.6/site-packages/watchdog/observers/inotify_c.py", line 200, in __init__
self._add_dir_watch(path, recursive, event_mask)
File "/home/user/.local/lib/python3.6/site-packages/watchdog/observers/inotify_c.py", line 395, in _add_dir_watch
self._add_watch(full_path, mask)
File "/home/user/.local/lib/python3.6/site-packages/watchdog/observers/inotify_c.py", line 409, in _add_watch
Inotify._raise_error()
File "/home/user/.local/lib/python3.6/site-packages/watchdog/observers/inotify_c.py", line 425, in _raise_error
raise OSError(err, os.strerror(err))
PermissionError: [Errno 13] Permission denied
|
PermissionError
|
def _queue_renamed(self, src_path, is_directory, ref_snapshot, new_snapshot):
"""
Compares information from two directory snapshots (one taken before
the rename operation and another taken right after) to determine the
destination path of the file system object renamed, and adds
appropriate events to the event queue.
"""
try:
ref_stat_info = ref_snapshot.stat_info(src_path)
except KeyError:
# Probably caught a temporary file/directory that was renamed
# and deleted. Fires a sequence of created and deleted events
# for the path.
if is_directory:
self.queue_event(DirCreatedEvent(src_path))
self.queue_event(DirDeletedEvent(src_path))
else:
self.queue_event(FileCreatedEvent(src_path))
self.queue_event(FileDeletedEvent(src_path))
# We don't process any further and bail out assuming
# the event represents deletion/creation instead of movement.
return
try:
dest_path = absolute_path(new_snapshot.path(ref_stat_info.st_ino))
if is_directory:
event = DirMovedEvent(src_path, dest_path)
# TODO: Do we need to fire moved events for the items
# inside the directory tree? Does kqueue does this
# all by itself? Check this and then enable this code
# only if it doesn't already.
# A: It doesn't. So I've enabled this block.
if self.watch.is_recursive:
for sub_event in event.sub_moved_events():
self.queue_event(sub_event)
self.queue_event(event)
else:
self.queue_event(FileMovedEvent(src_path, dest_path))
except KeyError:
# If the new snapshot does not have an inode for the
# old path, we haven't found the new name. Therefore,
# we mark it as deleted and remove unregister the path.
if is_directory:
self.queue_event(DirDeletedEvent(src_path))
else:
self.queue_event(FileDeletedEvent(src_path))
|
def _queue_renamed(self, src_path, is_directory, ref_snapshot, new_snapshot):
"""
Compares information from two directory snapshots (one taken before
the rename operation and another taken right after) to determine the
destination path of the file system object renamed, and adds
appropriate events to the event queue.
"""
try:
ref_stat_info = ref_snapshot.stat_info(src_path)
except KeyError:
# Probably caught a temporary file/directory that was renamed
# and deleted. Fires a sequence of created and deleted events
# for the path.
if is_directory:
self.queue_event(DirCreatedEvent(src_path))
self.queue_event(DirDeletedEvent(src_path))
else:
self.queue_event(FileCreatedEvent(src_path))
self.queue_event(FileDeletedEvent(src_path))
# We don't process any further and bail out assuming
# the event represents deletion/creation instead of movement.
return
try:
dest_path = absolute_path(new_snapshot.path_for_inode(ref_stat_info.st_ino))
if is_directory:
event = DirMovedEvent(src_path, dest_path)
# TODO: Do we need to fire moved events for the items
# inside the directory tree? Does kqueue does this
# all by itself? Check this and then enable this code
# only if it doesn't already.
# A: It doesn't. So I've enabled this block.
if self.watch.is_recursive:
for sub_event in event.sub_moved_events():
self.queue_event(sub_event)
self.queue_event(event)
else:
self.queue_event(FileMovedEvent(src_path, dest_path))
except KeyError:
# If the new snapshot does not have an inode for the
# old path, we haven't found the new name. Therefore,
# we mark it as deleted and remove unregister the path.
if is_directory:
self.queue_event(DirDeletedEvent(src_path))
else:
self.queue_event(FileDeletedEvent(src_path))
|
https://github.com/gorakhargosh/watchdog/issues/436
|
Exception in thread Thread-4:
Traceback (most recent call last):
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/SNIP/watchdog-0.8.3-py2-none-any.whl/watchdog/observers/api.py", line 146, in run
self.queue_events(self.timeout)
File "/SNIP/watchdog-0.8.3-py2-none-any.whl/watchdog/observers/kqueue.py", line 695, in queue_events
new_snapshot)
File "/SNIP/watchdog-0.8.3-py2-none-any.whl/watchdog/observers/kqueue.py", line 630, in _queue_renamed
new_snapshot.path_for_inode(ref_stat_info.st_ino))
AttributeError: 'DirectorySnapshot' object has no attribute 'path_for_inode'
|
AttributeError
|
def _encode_host(cls, host):
try:
ip, sep, zone = host.partition("%")
ip = ip_address(ip)
except ValueError:
for char in host:
if char > "\x7f":
break
else:
return host
try:
host = idna.encode(host, uts46=True).decode("ascii")
except UnicodeError:
host = host.encode("idna").decode("ascii")
else:
host = ip.compressed
if sep:
host += "%" + zone
if ip.version == 6:
host = "[" + host + "]"
return host
|
def _encode_host(cls, host):
try:
ip, sep, zone = host.partition("%")
ip = ip_address(ip)
except ValueError:
try:
host = idna.encode(host, uts46=True).decode("ascii")
except UnicodeError:
host = host.encode("idna").decode("ascii")
else:
host = ip.compressed
if sep:
host += "%" + zone
if ip.version == 6:
host = "[" + host + "]"
return host
|
https://github.com/aio-libs/yarl/issues/388
|
Python 3.6.5 (default, May 24 2018, 08:58:10)
[GCC 8.1.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
from yarl import URL
url = URL('https://www.python.org/~guido?arg=1#frag')
Traceback (most recent call last):
File "/tmp/foo/yarl/yarl/lib/python3.6/site-packages/yarl/__init__.py", line 667, in _encode_host
ip = ip_address(ip)
File "/home/mruf/local/opt/pyenv/versions/3.6.5/lib/python3.6/ipaddress.py", line 54, in ip_address
address)
ValueError: 'www.python.org' does not appear to be an IPv4 or IPv6 address
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/tmp/foo/yarl/yarl/lib/python3.6/site-packages/yarl/__init__.py", line 170, in __new__
val.username, val.password, host, port, encode=True
File "/tmp/foo/yarl/yarl/lib/python3.6/site-packages/yarl/__init__.py", line 688, in _make_netloc
ret = cls._encode_host(host)
File "/tmp/foo/yarl/yarl/lib/python3.6/site-packages/yarl/__init__.py", line 671, in _encode_host
if host.isascii():
AttributeError: 'str' object has no attribute 'isascii'
|
ValueError
|
def __init__(self, slither: "SlitherCore"):
super().__init__()
self._scope: List[str] = []
self._name: Optional[str] = None
self._view: bool = False
self._pure: bool = False
self._payable: bool = False
self._visibility: Optional[str] = None
self._is_implemented: Optional[bool] = None
self._is_empty: Optional[bool] = None
self._entry_point: Optional["Node"] = None
self._nodes: List["Node"] = []
self._variables: Dict[str, "LocalVariable"] = {}
# slithir Temporary and references variables (but not SSA)
self._slithir_variables: Set["SlithIRVariable"] = set()
self._parameters: List["LocalVariable"] = []
self._parameters_ssa: List["LocalIRVariable"] = []
self._parameters_src: SourceMapping = SourceMapping()
self._returns: List["LocalVariable"] = []
self._returns_ssa: List["LocalIRVariable"] = []
self._returns_src: SourceMapping = SourceMapping()
self._return_values: Optional[List["SlithIRVariable"]] = None
self._return_values_ssa: Optional[List["SlithIRVariable"]] = None
self._vars_read: List["Variable"] = []
self._vars_written: List["Variable"] = []
self._state_vars_read: List["StateVariable"] = []
self._vars_read_or_written: List["Variable"] = []
self._solidity_vars_read: List["SolidityVariable"] = []
self._state_vars_written: List["StateVariable"] = []
self._internal_calls: List["InternalCallType"] = []
self._solidity_calls: List["SolidityFunction"] = []
self._low_level_calls: List["LowLevelCallType"] = []
self._high_level_calls: List["HighLevelCallType"] = []
self._library_calls: List["LibraryCallType"] = []
self._external_calls_as_expressions: List["Expression"] = []
self._expression_vars_read: List["Expression"] = []
self._expression_vars_written: List["Expression"] = []
self._expression_calls: List["Expression"] = []
# self._expression_modifiers: List["Expression"] = []
self._modifiers: List[ModifierStatements] = []
self._explicit_base_constructor_calls: List[ModifierStatements] = []
self._contains_assembly: bool = False
self._expressions: Optional[List["Expression"]] = None
self._slithir_operations: Optional[List["Operation"]] = None
self._slithir_ssa_operations: Optional[List["Operation"]] = None
self._all_expressions: Optional[List["Expression"]] = None
self._all_slithir_operations: Optional[List["Operation"]] = None
self._all_internals_calls: Optional[List["InternalCallType"]] = None
self._all_high_level_calls: Optional[List["HighLevelCallType"]] = None
self._all_library_calls: Optional[List["LibraryCallType"]] = None
self._all_low_level_calls: Optional[List["LowLevelCallType"]] = None
self._all_solidity_calls: Optional[List["SolidityFunction"]] = None
self._all_state_variables_read: Optional[List["StateVariable"]] = None
self._all_solidity_variables_read: Optional[List["SolidityVariable"]] = None
self._all_state_variables_written: Optional[List["StateVariable"]] = None
self._all_slithir_variables: Optional[List["SlithIRVariable"]] = None
self._all_nodes: Optional[List["Node"]] = None
self._all_conditional_state_variables_read: Optional[List["StateVariable"]] = None
self._all_conditional_state_variables_read_with_loop: Optional[
List["StateVariable"]
] = None
self._all_conditional_solidity_variables_read: Optional[
List["SolidityVariable"]
] = None
self._all_conditional_solidity_variables_read_with_loop: Optional[
List["SolidityVariable"]
] = None
self._all_solidity_variables_used_as_args: Optional[List["SolidityVariable"]] = None
self._is_shadowed: bool = False
self._shadows: bool = False
# set(ReacheableNode)
self._reachable_from_nodes: Set[ReacheableNode] = set()
self._reachable_from_functions: Set[ReacheableNode] = set()
# Constructor, fallback, State variable constructor
self._function_type: Optional[FunctionType] = None
self._is_constructor: Optional[bool] = None
# Computed on the fly, can be True of False
self._can_reenter: Optional[bool] = None
self._can_send_eth: Optional[bool] = None
self._nodes_ordered_dominators: Optional[List["Node"]] = None
self._counter_nodes = 0
# Memoize parameters:
# TODO: identify all the memoize parameters and add a way to undo the memoization
self._full_name: Optional[str] = None
self._signature: Optional[Tuple[str, List[str], List[str]]] = None
self._solidity_signature: Optional[str] = None
self._signature_str: Optional[str] = None
self._canonical_name: Optional[str] = None
self._is_protected: Optional[bool] = None
self._slither: "SlitherCore" = slither
|
def __init__(self, slither: "SlitherCore"):
super().__init__()
self._scope: List[str] = []
self._name: Optional[str] = None
self._view: bool = False
self._pure: bool = False
self._payable: bool = False
self._visibility: Optional[str] = None
self._is_implemented: Optional[bool] = None
self._is_empty: Optional[bool] = None
self._entry_point: Optional["Node"] = None
self._nodes: List["Node"] = []
self._variables: Dict[str, "LocalVariable"] = {}
# slithir Temporary and references variables (but not SSA)
self._slithir_variables: Set["SlithIRVariable"] = set()
self._parameters: List["LocalVariable"] = []
self._parameters_ssa: List["LocalIRVariable"] = []
self._parameters_src: Optional[SourceMapping] = None
self._returns: List["LocalVariable"] = []
self._returns_ssa: List["LocalIRVariable"] = []
self._returns_src: Optional[SourceMapping] = None
self._return_values: Optional[List["SlithIRVariable"]] = None
self._return_values_ssa: Optional[List["SlithIRVariable"]] = None
self._vars_read: List["Variable"] = []
self._vars_written: List["Variable"] = []
self._state_vars_read: List["StateVariable"] = []
self._vars_read_or_written: List["Variable"] = []
self._solidity_vars_read: List["SolidityVariable"] = []
self._state_vars_written: List["StateVariable"] = []
self._internal_calls: List["InternalCallType"] = []
self._solidity_calls: List["SolidityFunction"] = []
self._low_level_calls: List["LowLevelCallType"] = []
self._high_level_calls: List["HighLevelCallType"] = []
self._library_calls: List["LibraryCallType"] = []
self._external_calls_as_expressions: List["Expression"] = []
self._expression_vars_read: List["Expression"] = []
self._expression_vars_written: List["Expression"] = []
self._expression_calls: List["Expression"] = []
# self._expression_modifiers: List["Expression"] = []
self._modifiers: List[ModifierStatements] = []
self._explicit_base_constructor_calls: List[ModifierStatements] = []
self._contains_assembly: bool = False
self._expressions: Optional[List["Expression"]] = None
self._slithir_operations: Optional[List["Operation"]] = None
self._slithir_ssa_operations: Optional[List["Operation"]] = None
self._all_expressions: Optional[List["Expression"]] = None
self._all_slithir_operations: Optional[List["Operation"]] = None
self._all_internals_calls: Optional[List["InternalCallType"]] = None
self._all_high_level_calls: Optional[List["HighLevelCallType"]] = None
self._all_library_calls: Optional[List["LibraryCallType"]] = None
self._all_low_level_calls: Optional[List["LowLevelCallType"]] = None
self._all_solidity_calls: Optional[List["SolidityFunction"]] = None
self._all_state_variables_read: Optional[List["StateVariable"]] = None
self._all_solidity_variables_read: Optional[List["SolidityVariable"]] = None
self._all_state_variables_written: Optional[List["StateVariable"]] = None
self._all_slithir_variables: Optional[List["SlithIRVariable"]] = None
self._all_nodes: Optional[List["Node"]] = None
self._all_conditional_state_variables_read: Optional[List["StateVariable"]] = None
self._all_conditional_state_variables_read_with_loop: Optional[
List["StateVariable"]
] = None
self._all_conditional_solidity_variables_read: Optional[
List["SolidityVariable"]
] = None
self._all_conditional_solidity_variables_read_with_loop: Optional[
List["SolidityVariable"]
] = None
self._all_solidity_variables_used_as_args: Optional[List["SolidityVariable"]] = None
self._is_shadowed: bool = False
self._shadows: bool = False
# set(ReacheableNode)
self._reachable_from_nodes: Set[ReacheableNode] = set()
self._reachable_from_functions: Set[ReacheableNode] = set()
# Constructor, fallback, State variable constructor
self._function_type: Optional[FunctionType] = None
self._is_constructor: Optional[bool] = None
# Computed on the fly, can be True of False
self._can_reenter: Optional[bool] = None
self._can_send_eth: Optional[bool] = None
self._nodes_ordered_dominators: Optional[List["Node"]] = None
self._counter_nodes = 0
# Memoize parameters:
# TODO: identify all the memoize parameters and add a way to undo the memoization
self._full_name: Optional[str] = None
self._signature: Optional[Tuple[str, List[str], List[str]]] = None
self._solidity_signature: Optional[str] = None
self._signature_str: Optional[str] = None
self._canonical_name: Optional[str] = None
self._is_protected: Optional[bool] = None
self._slither: "SlitherCore" = slither
|
https://github.com/crytic/slither/issues/771
|
Traceback (most recent call last):
File "/Users/nataliechin/.virtualenvs/slither-dev/bin/slither-flat", line 33, in <module>
sys.exit(load_entry_point('slither-analyzer', 'console_scripts', 'slither-flat')())
File "/Users/nataliechin/GitHub/slither/slither/tools/flattening/__main__.py", line 100, in main
flat = Flattening(
File "/Users/nataliechin/GitHub/slither/slither/tools/flattening/flattening.py", line 67, in __init__
self._get_source_code(contract)
File "/Users/nataliechin/GitHub/slither/slither/tools/flattening/flattening.py", line 103, in _get_source_code
f.parameters_src.source_mapping["start"]
AttributeError: 'FunctionContract' object has no attribute 'parameters_src'
|
AttributeError
|
def custom_format(slither, result):
elements = result["elements"]
for element in elements:
if element["type"] != "function":
# Skip variable elements
continue
target_contract = slither.get_contract_from_name(
element["type_specific_fields"]["parent"]["name"]
)
if target_contract:
function = target_contract.get_function_from_signature(
element["type_specific_fields"]["signature"]
)
if function:
_patch(
slither,
result,
element["source_mapping"]["filename_absolute"],
int(
function.parameters_src().source_mapping["start"]
+ function.parameters_src().source_mapping["length"]
),
int(function.returns_src().source_mapping["start"]),
)
|
def custom_format(slither, result):
elements = result["elements"]
for element in elements:
if element["type"] != "function":
# Skip variable elements
continue
target_contract = slither.get_contract_from_name(
element["type_specific_fields"]["parent"]["name"]
)
if target_contract:
function = target_contract.get_function_from_signature(
element["type_specific_fields"]["signature"]
)
if function:
_patch(
slither,
result,
element["source_mapping"]["filename_absolute"],
int(
function.parameters_src.source_mapping["start"]
+ function.parameters_src.source_mapping["length"]
),
int(function.returns_src.source_mapping["start"]),
)
|
https://github.com/crytic/slither/issues/771
|
Traceback (most recent call last):
File "/Users/nataliechin/.virtualenvs/slither-dev/bin/slither-flat", line 33, in <module>
sys.exit(load_entry_point('slither-analyzer', 'console_scripts', 'slither-flat')())
File "/Users/nataliechin/GitHub/slither/slither/tools/flattening/__main__.py", line 100, in main
flat = Flattening(
File "/Users/nataliechin/GitHub/slither/slither/tools/flattening/flattening.py", line 67, in __init__
self._get_source_code(contract)
File "/Users/nataliechin/GitHub/slither/slither/tools/flattening/flattening.py", line 103, in _get_source_code
f.parameters_src.source_mapping["start"]
AttributeError: 'FunctionContract' object has no attribute 'parameters_src'
|
AttributeError
|
def custom_format(slither, result):
elements = result["elements"]
for element in elements:
target_contract = slither.get_contract_from_name(
element["type_specific_fields"]["parent"]["name"]
)
if target_contract:
function = target_contract.get_function_from_signature(
element["type_specific_fields"]["signature"]
)
if function:
_patch(
slither,
result,
element["source_mapping"]["filename_absolute"],
int(function.parameters_src().source_mapping["start"]),
int(function.returns_src().source_mapping["start"]),
)
|
def custom_format(slither, result):
elements = result["elements"]
for element in elements:
target_contract = slither.get_contract_from_name(
element["type_specific_fields"]["parent"]["name"]
)
if target_contract:
function = target_contract.get_function_from_signature(
element["type_specific_fields"]["signature"]
)
if function:
_patch(
slither,
result,
element["source_mapping"]["filename_absolute"],
int(function.parameters_src.source_mapping["start"]),
int(function.returns_src.source_mapping["start"]),
)
|
https://github.com/crytic/slither/issues/771
|
Traceback (most recent call last):
File "/Users/nataliechin/.virtualenvs/slither-dev/bin/slither-flat", line 33, in <module>
sys.exit(load_entry_point('slither-analyzer', 'console_scripts', 'slither-flat')())
File "/Users/nataliechin/GitHub/slither/slither/tools/flattening/__main__.py", line 100, in main
flat = Flattening(
File "/Users/nataliechin/GitHub/slither/slither/tools/flattening/flattening.py", line 67, in __init__
self._get_source_code(contract)
File "/Users/nataliechin/GitHub/slither/slither/tools/flattening/flattening.py", line 103, in _get_source_code
f.parameters_src.source_mapping["start"]
AttributeError: 'FunctionContract' object has no attribute 'parameters_src'
|
AttributeError
|
def __init__(
self,
function: Function,
function_data: Dict,
contract_parser: Optional["ContractSolc"],
slither_parser: "SlitherSolc",
):
self._slither_parser: "SlitherSolc" = slither_parser
self._contract_parser = contract_parser
self._function = function
# Only present if compact AST
self._referenced_declaration: Optional[int] = None
if self.is_compact_ast:
self._function.name = function_data["name"]
if "id" in function_data:
self._referenced_declaration = function_data["id"]
self._function.id = function_data["id"]
else:
self._function.name = function_data["attributes"][self.get_key()]
self._functionNotParsed = function_data
self._params_was_analyzed = False
self._content_was_analyzed = False
self._counter_scope_local_variables = 0
# variable renamed will map the solc id
# to the variable. It only works for compact format
# Later if an expression provides the referencedDeclaration attr
# we can retrieve the variable
# It only matters if two variables have the same name in the function
# which is only possible with solc > 0.5
self._variables_renamed: Dict[
int, Union[LocalVariableSolc, LocalVariableInitFromTupleSolc]
] = {}
self._analyze_type()
self._node_to_nodesolc: Dict[Node, NodeSolc] = dict()
self._node_to_yulobject: Dict[Node, YulBlock] = dict()
self._local_variables_parser: List[
Union[LocalVariableSolc, LocalVariableInitFromTupleSolc]
] = []
|
def __init__(
self,
function: Function,
function_data: Dict,
contract_parser: Optional["ContractSolc"],
slither_parser: "SlitherSolc",
):
self._slither_parser: "SlitherSolc" = slither_parser
self._contract_parser = contract_parser
self._function = function
# Only present if compact AST
self._referenced_declaration: Optional[int] = None
if self.is_compact_ast:
self._function.name = function_data["name"]
if "id" in function_data:
self._referenced_declaration = function_data["id"]
self._function.id = function_data["id"]
else:
self._function.name = function_data["attributes"][self.get_key()]
self._functionNotParsed = function_data
self._params_was_analyzed = False
self._content_was_analyzed = False
self._counter_scope_local_variables = 0
# variable renamed will map the solc id
# to the variable. It only works for compact format
# Later if an expression provides the referencedDeclaration attr
# we can retrieve the variable
# It only matters if two variables have the same name in the function
# which is only possible with solc > 0.5
self._variables_renamed: Dict[
int, Union[LocalVariableSolc, LocalVariableInitFromTupleSolc]
] = {}
self._analyze_type()
self.parameters_src = SourceMapping()
self.returns_src = SourceMapping()
self._node_to_nodesolc: Dict[Node, NodeSolc] = dict()
self._node_to_yulobject: Dict[Node, YulBlock] = dict()
self._local_variables_parser: List[
Union[LocalVariableSolc, LocalVariableInitFromTupleSolc]
] = []
|
https://github.com/crytic/slither/issues/771
|
Traceback (most recent call last):
File "/Users/nataliechin/.virtualenvs/slither-dev/bin/slither-flat", line 33, in <module>
sys.exit(load_entry_point('slither-analyzer', 'console_scripts', 'slither-flat')())
File "/Users/nataliechin/GitHub/slither/slither/tools/flattening/__main__.py", line 100, in main
flat = Flattening(
File "/Users/nataliechin/GitHub/slither/slither/tools/flattening/flattening.py", line 67, in __init__
self._get_source_code(contract)
File "/Users/nataliechin/GitHub/slither/slither/tools/flattening/flattening.py", line 103, in _get_source_code
f.parameters_src.source_mapping["start"]
AttributeError: 'FunctionContract' object has no attribute 'parameters_src'
|
AttributeError
|
def _parse_params(self, params: Dict):
assert params[self.get_key()] == "ParameterList"
self._function.parameters_src().set_offset(params["src"], self._function.slither)
if self.is_compact_ast:
params = params["parameters"]
else:
params = params[self.get_children("children")]
for param in params:
assert param[self.get_key()] == "VariableDeclaration"
local_var = self._add_param(param)
self._function.add_parameters(local_var.underlying_variable)
|
def _parse_params(self, params: Dict):
assert params[self.get_key()] == "ParameterList"
self.parameters_src.set_offset(params["src"], self._function.slither)
if self.is_compact_ast:
params = params["parameters"]
else:
params = params[self.get_children("children")]
for param in params:
assert param[self.get_key()] == "VariableDeclaration"
local_var = self._add_param(param)
self._function.add_parameters(local_var.underlying_variable)
|
https://github.com/crytic/slither/issues/771
|
Traceback (most recent call last):
File "/Users/nataliechin/.virtualenvs/slither-dev/bin/slither-flat", line 33, in <module>
sys.exit(load_entry_point('slither-analyzer', 'console_scripts', 'slither-flat')())
File "/Users/nataliechin/GitHub/slither/slither/tools/flattening/__main__.py", line 100, in main
flat = Flattening(
File "/Users/nataliechin/GitHub/slither/slither/tools/flattening/flattening.py", line 67, in __init__
self._get_source_code(contract)
File "/Users/nataliechin/GitHub/slither/slither/tools/flattening/flattening.py", line 103, in _get_source_code
f.parameters_src.source_mapping["start"]
AttributeError: 'FunctionContract' object has no attribute 'parameters_src'
|
AttributeError
|
def _parse_returns(self, returns: Dict):
assert returns[self.get_key()] == "ParameterList"
self._function.returns_src().set_offset(returns["src"], self._function.slither)
if self.is_compact_ast:
returns = returns["parameters"]
else:
returns = returns[self.get_children("children")]
for ret in returns:
assert ret[self.get_key()] == "VariableDeclaration"
local_var = self._add_param(ret)
self._function.add_return(local_var.underlying_variable)
|
def _parse_returns(self, returns: Dict):
assert returns[self.get_key()] == "ParameterList"
self.returns_src.set_offset(returns["src"], self._function.slither)
if self.is_compact_ast:
returns = returns["parameters"]
else:
returns = returns[self.get_children("children")]
for ret in returns:
assert ret[self.get_key()] == "VariableDeclaration"
local_var = self._add_param(ret)
self._function.add_return(local_var.underlying_variable)
|
https://github.com/crytic/slither/issues/771
|
Traceback (most recent call last):
File "/Users/nataliechin/.virtualenvs/slither-dev/bin/slither-flat", line 33, in <module>
sys.exit(load_entry_point('slither-analyzer', 'console_scripts', 'slither-flat')())
File "/Users/nataliechin/GitHub/slither/slither/tools/flattening/__main__.py", line 100, in main
flat = Flattening(
File "/Users/nataliechin/GitHub/slither/slither/tools/flattening/flattening.py", line 67, in __init__
self._get_source_code(contract)
File "/Users/nataliechin/GitHub/slither/slither/tools/flattening/flattening.py", line 103, in _get_source_code
f.parameters_src.source_mapping["start"]
AttributeError: 'FunctionContract' object has no attribute 'parameters_src'
|
AttributeError
|
def _get_source_code(self, contract: Contract): # pylint: disable=too-many-branches,too-many-statements
"""
Save the source code of the contract in self._source_codes
Patch the source code
:param contract:
:return:
"""
src_mapping = contract.source_mapping
content = self._slither.source_code[src_mapping["filename_absolute"]].encode("utf8")
start = src_mapping["start"]
end = src_mapping["start"] + src_mapping["length"]
to_patch = []
# interface must use external
if self._external_to_public and contract.contract_kind != "interface":
for f in contract.functions_declared:
# fallback must be external
if f.is_fallback or f.is_constructor_variables:
continue
if f.visibility == "external":
attributes_start = (
f.parameters_src().source_mapping["start"]
+ f.parameters_src().source_mapping["length"]
)
attributes_end = f.returns_src().source_mapping["start"]
attributes = content[attributes_start:attributes_end]
regex = re.search(
r"((\sexternal)\s+)|(\sexternal)$|(\)external)$", attributes
)
if regex:
to_patch.append(
Patch(
attributes_start + regex.span()[0] + 1,
"public_to_external",
)
)
else:
raise SlitherException(
f"External keyword not found {f.name} {attributes}"
)
for var in f.parameters:
if var.location == "calldata":
calldata_start = var.source_mapping["start"]
calldata_end = calldata_start + var.source_mapping["length"]
calldata_idx = content[calldata_start:calldata_end].find(
" calldata "
)
to_patch.append(
Patch(
calldata_start + calldata_idx + 1,
"calldata_to_memory",
)
)
if self._private_to_internal:
for variable in contract.state_variables_declared:
if variable.visibility == "private":
print(variable.source_mapping)
attributes_start = variable.source_mapping["start"]
attributes_end = attributes_start + variable.source_mapping["length"]
attributes = content[attributes_start:attributes_end]
print(attributes)
regex = re.search(r" private ", attributes)
if regex:
to_patch.append(
Patch(
attributes_start + regex.span()[0] + 1,
"private_to_internal",
)
)
else:
raise SlitherException(
f"private keyword not found {variable.name} {attributes}"
)
if self._remove_assert:
for function in contract.functions_and_modifiers_declared:
for node in function.nodes:
for ir in node.irs:
if isinstance(ir, SolidityCall) and ir.function == SolidityFunction(
"assert(bool)"
):
to_patch.append(
Patch(node.source_mapping["start"], "line_removal")
)
logger.info(
f"Code commented: {node.expression} ({node.source_mapping_str})"
)
to_patch.sort(key=lambda x: x.index, reverse=True)
content = content[start:end]
for patch in to_patch:
patch_type = patch.patch_type
index = patch.index
index = index - start
if patch_type == "public_to_external":
content = content[:index] + "public" + content[index + len("external") :]
if patch_type == "private_to_internal":
content = content[:index] + "internal" + content[index + len("private") :]
elif patch_type == "calldata_to_memory":
content = content[:index] + "memory" + content[index + len("calldata") :]
else:
assert patch_type == "line_removal"
content = content[:index] + " // " + content[index:]
self._source_codes[contract] = content.decode("utf8")
|
def _get_source_code(self, contract: Contract): # pylint: disable=too-many-branches,too-many-statements
"""
Save the source code of the contract in self._source_codes
Patch the source code
:param contract:
:return:
"""
src_mapping = contract.source_mapping
content = self._slither.source_code[src_mapping["filename_absolute"]].encode("utf8")
start = src_mapping["start"]
end = src_mapping["start"] + src_mapping["length"]
to_patch = []
# interface must use external
if self._external_to_public and contract.contract_kind != "interface":
for f in contract.functions_declared:
# fallback must be external
if f.is_fallback or f.is_constructor_variables:
continue
if f.visibility == "external":
attributes_start = (
f.parameters_src.source_mapping["start"]
+ f.parameters_src.source_mapping["length"]
)
attributes_end = f.returns_src.source_mapping["start"]
attributes = content[attributes_start:attributes_end]
regex = re.search(
r"((\sexternal)\s+)|(\sexternal)$|(\)external)$", attributes
)
if regex:
to_patch.append(
Patch(
attributes_start + regex.span()[0] + 1,
"public_to_external",
)
)
else:
raise SlitherException(
f"External keyword not found {f.name} {attributes}"
)
for var in f.parameters:
if var.location == "calldata":
calldata_start = var.source_mapping["start"]
calldata_end = calldata_start + var.source_mapping["length"]
calldata_idx = content[calldata_start:calldata_end].find(
" calldata "
)
to_patch.append(
Patch(
calldata_start + calldata_idx + 1,
"calldata_to_memory",
)
)
if self._private_to_internal:
for variable in contract.state_variables_declared:
if variable.visibility == "private":
print(variable.source_mapping)
attributes_start = variable.source_mapping["start"]
attributes_end = attributes_start + variable.source_mapping["length"]
attributes = content[attributes_start:attributes_end]
print(attributes)
regex = re.search(r" private ", attributes)
if regex:
to_patch.append(
Patch(
attributes_start + regex.span()[0] + 1,
"private_to_internal",
)
)
else:
raise SlitherException(
f"private keyword not found {variable.name} {attributes}"
)
if self._remove_assert:
for function in contract.functions_and_modifiers_declared:
for node in function.nodes:
for ir in node.irs:
if isinstance(ir, SolidityCall) and ir.function == SolidityFunction(
"assert(bool)"
):
to_patch.append(
Patch(node.source_mapping["start"], "line_removal")
)
logger.info(
f"Code commented: {node.expression} ({node.source_mapping_str})"
)
to_patch.sort(key=lambda x: x.index, reverse=True)
content = content[start:end]
for patch in to_patch:
patch_type = patch.patch_type
index = patch.index
index = index - start
if patch_type == "public_to_external":
content = content[:index] + "public" + content[index + len("external") :]
if patch_type == "private_to_internal":
content = content[:index] + "internal" + content[index + len("private") :]
elif patch_type == "calldata_to_memory":
content = content[:index] + "memory" + content[index + len("calldata") :]
else:
assert patch_type == "line_removal"
content = content[:index] + " // " + content[index:]
self._source_codes[contract] = content.decode("utf8")
|
https://github.com/crytic/slither/issues/771
|
Traceback (most recent call last):
File "/Users/nataliechin/.virtualenvs/slither-dev/bin/slither-flat", line 33, in <module>
sys.exit(load_entry_point('slither-analyzer', 'console_scripts', 'slither-flat')())
File "/Users/nataliechin/GitHub/slither/slither/tools/flattening/__main__.py", line 100, in main
flat = Flattening(
File "/Users/nataliechin/GitHub/slither/slither/tools/flattening/flattening.py", line 67, in __init__
self._get_source_code(contract)
File "/Users/nataliechin/GitHub/slither/slither/tools/flattening/flattening.py", line 103, in _get_source_code
f.parameters_src.source_mapping["start"]
AttributeError: 'FunctionContract' object has no attribute 'parameters_src'
|
AttributeError
|
def _export_from_type(self, t, contract, exported, list_contract):
if isinstance(t, UserDefinedType):
if isinstance(t.type, (EnumContract, StructureContract)):
if t.type.contract != contract and t.type.contract not in exported:
self._export_list_used_contracts(
t.type.contract, exported, list_contract
)
else:
assert isinstance(t.type, Contract)
if t.type != contract and t.type not in exported:
self._export_list_used_contracts(t.type, exported, list_contract)
elif isinstance(t, MappingType):
self._export_from_type(t.type_from, contract, exported, list_contract)
self._export_from_type(t.type_to, contract, exported, list_contract)
elif isinstance(t, ArrayType):
self._export_from_type(t.type, contract, exported, list_contract)
|
def _export_from_type(self, t, contract, exported, list_contract):
if isinstance(t, UserDefinedType):
if isinstance(t.type, (Enum, Structure)):
if t.type.contract != contract and t.type.contract not in exported:
self._export_list_used_contracts(
t.type.contract, exported, list_contract
)
else:
assert isinstance(t.type, Contract)
if t.type != contract and t.type not in exported:
self._export_list_used_contracts(t.type, exported, list_contract)
elif isinstance(t, MappingType):
self._export_from_type(t.type_from, contract, exported, list_contract)
self._export_from_type(t.type_to, contract, exported, list_contract)
elif isinstance(t, ArrayType):
self._export_from_type(t.type, contract, exported, list_contract)
|
https://github.com/crytic/slither/issues/771
|
Traceback (most recent call last):
File "/Users/nataliechin/.virtualenvs/slither-dev/bin/slither-flat", line 33, in <module>
sys.exit(load_entry_point('slither-analyzer', 'console_scripts', 'slither-flat')())
File "/Users/nataliechin/GitHub/slither/slither/tools/flattening/__main__.py", line 100, in main
flat = Flattening(
File "/Users/nataliechin/GitHub/slither/slither/tools/flattening/flattening.py", line 67, in __init__
self._get_source_code(contract)
File "/Users/nataliechin/GitHub/slither/slither/tools/flattening/flattening.py", line 103, in _get_source_code
f.parameters_src.source_mapping["start"]
AttributeError: 'FunctionContract' object has no attribute 'parameters_src'
|
AttributeError
|
def propagate_type_and_convert_call(result, node):
"""
Propagate the types variables and convert tmp call to real call operation
"""
calls_value = {}
calls_gas = {}
call_data = []
idx = 0
# use of while len() as result can be modified during the iteration
while idx < len(result):
ins = result[idx]
if isinstance(ins, TmpCall):
new_ins = extract_tmp_call(ins, node.function.contract)
if new_ins:
new_ins.set_node(ins.node)
ins = new_ins
result[idx] = ins
if isinstance(ins, Argument):
if ins.get_type() in [ArgumentType.GAS]:
assert not ins.call_id in calls_gas
calls_gas[ins.call_id] = ins.argument
elif ins.get_type() in [ArgumentType.VALUE]:
assert not ins.call_id in calls_value
calls_value[ins.call_id] = ins.argument
else:
assert ins.get_type() == ArgumentType.CALL
call_data.append(ins.argument)
if isinstance(ins, (HighLevelCall, NewContract, InternalDynamicCall)):
if ins.call_id in calls_value:
ins.call_value = calls_value[ins.call_id]
if ins.call_id in calls_gas:
ins.call_gas = calls_gas[ins.call_id]
if isinstance(ins, (Call, NewContract, NewStructure)):
# We might have stored some arguments for libraries
if ins.arguments:
call_data = ins.arguments + call_data
ins.arguments = call_data
call_data = []
if is_temporary(ins):
del result[idx]
continue
new_ins = propagate_types(ins, node)
if new_ins:
if isinstance(new_ins, (list,)):
if len(new_ins) == 2:
new_ins[0].set_node(ins.node)
new_ins[1].set_node(ins.node)
del result[idx]
result.insert(idx, new_ins[0])
result.insert(idx + 1, new_ins[1])
idx = idx + 1
elif len(new_ins) == 3:
new_ins[0].set_node(ins.node)
new_ins[1].set_node(ins.node)
new_ins[2].set_node(ins.node)
del result[idx]
result.insert(idx, new_ins[0])
result.insert(idx + 1, new_ins[1])
result.insert(idx + 2, new_ins[2])
idx = idx + 2
else:
# Pop conversion
assert len(new_ins) == 6
new_ins[0].set_node(ins.node)
new_ins[1].set_node(ins.node)
new_ins[2].set_node(ins.node)
new_ins[3].set_node(ins.node)
new_ins[4].set_node(ins.node)
new_ins[5].set_node(ins.node)
del result[idx]
result.insert(idx, new_ins[0])
result.insert(idx + 1, new_ins[1])
result.insert(idx + 2, new_ins[2])
result.insert(idx + 3, new_ins[3])
result.insert(idx + 4, new_ins[4])
result.insert(idx + 5, new_ins[5])
idx = idx + 5
else:
new_ins.set_node(ins.node)
result[idx] = new_ins
idx = idx + 1
return result
|
def propagate_type_and_convert_call(result, node):
"""
Propagate the types variables and convert tmp call to real call operation
"""
calls_value = {}
calls_gas = {}
call_data = []
idx = 0
# use of while len() as result can be modified during the iteration
while idx < len(result):
ins = result[idx]
if isinstance(ins, TmpCall):
new_ins = extract_tmp_call(ins, node.function.contract)
if new_ins:
new_ins.set_node(ins.node)
ins = new_ins
result[idx] = ins
if isinstance(ins, Argument):
if ins.get_type() in [ArgumentType.GAS]:
assert not ins.call_id in calls_gas
calls_gas[ins.call_id] = ins.argument
elif ins.get_type() in [ArgumentType.VALUE]:
assert not ins.call_id in calls_value
calls_value[ins.call_id] = ins.argument
else:
assert ins.get_type() == ArgumentType.CALL
call_data.append(ins.argument)
if isinstance(ins, (HighLevelCall, NewContract, InternalDynamicCall)):
if ins.call_id in calls_value:
ins.call_value = calls_value[ins.call_id]
if ins.call_id in calls_gas:
ins.call_gas = calls_gas[ins.call_id]
if isinstance(ins, (Call, NewContract, NewStructure)):
ins.arguments = call_data
call_data = []
if is_temporary(ins):
del result[idx]
continue
new_ins = propagate_types(ins, node)
if new_ins:
if isinstance(new_ins, (list,)):
if len(new_ins) == 2:
new_ins[0].set_node(ins.node)
new_ins[1].set_node(ins.node)
del result[idx]
result.insert(idx, new_ins[0])
result.insert(idx + 1, new_ins[1])
idx = idx + 1
elif len(new_ins) == 3:
new_ins[0].set_node(ins.node)
new_ins[1].set_node(ins.node)
new_ins[2].set_node(ins.node)
del result[idx]
result.insert(idx, new_ins[0])
result.insert(idx + 1, new_ins[1])
result.insert(idx + 2, new_ins[2])
idx = idx + 2
else:
# Pop conversion
assert len(new_ins) == 6
new_ins[0].set_node(ins.node)
new_ins[1].set_node(ins.node)
new_ins[2].set_node(ins.node)
new_ins[3].set_node(ins.node)
new_ins[4].set_node(ins.node)
new_ins[5].set_node(ins.node)
del result[idx]
result.insert(idx, new_ins[0])
result.insert(idx + 1, new_ins[1])
result.insert(idx + 2, new_ins[2])
result.insert(idx + 3, new_ins[3])
result.insert(idx + 4, new_ins[4])
result.insert(idx + 5, new_ins[5])
idx = idx + 5
else:
new_ins.set_node(ins.node)
result[idx] = new_ins
idx = idx + 1
return result
|
https://github.com/crytic/slither/issues/589
|
ERROR:root:Error in .\function_members.sol
ERROR:root:Traceback (most recent call last):
File "c:\users\x\documents\github\slither\slither\__main__.py", line 610, in main_impl
(slither_instances, results_detectors, results_printers, number_contracts) = process_all(filename, args,
File "c:\users\x\documents\github\slither\slither\__main__.py", line 67, in process_all
(slither, current_results_detectors, current_results_printers, current_analyzed_count) = process_single(
File "c:\users\x\documents\github\slither\slither\__main__.py", line 53, in process_single
slither = Slither(target,
File "c:\users\x\documents\github\slither\slither\slither.py", line 86, in __init__
self._parser.analyze_contracts()
File "c:\users\x\documents\github\slither\slither\solc_parsing\slitherSolc.py", line 345, in analyze_contracts
self._convert_to_slithir()
File "c:\users\x\documents\github\slither\slither\solc_parsing\slitherSolc.py", line 489, in _convert_to_slithir
func.generate_slithir_and_analyze()
File "c:\users\x\documents\github\slither\slither\core\declarations\function.py", line 1652, in generate_slithir_and_analyze
node.slithir_generation()
File "c:\users\x\documents\github\slither\slither\core\cfg\node.py", line 702, in slithir_generation
self._irs = convert_expression(expression, self)
File "c:\users\x\documents\github\slither\slither\slithir\convert.py", line 64, in convert_expression
visitor = ExpressionToSlithIR(expression, node)
File "c:\users\x\documents\github\slither\slither\visitors\slithir\expression_to_slithir.py", line 103, in __init__
self._visit_expression(self.expression)
File "c:\users\x\documents\github\slither\slither\visitors\expression\expression.py", line 51, in _visit_expression
self._visit_call_expression(expression)
File "c:\users\x\documents\github\slither\slither\visitors\expression\expression.py", line 108, in _visit_call_expression
self._visit_expression(expression.called)
File "c:\users\x\documents\github\slither\slither\visitors\expression\expression.py", line 95, in _visit_expression
self._post_visit(expression)
File "c:\users\x\documents\github\slither\slither\visitors\expression\expression.py", line 289, in _post_visit
self._post_member_access(expression)
File "c:\users\x\documents\github\slither\slither\visitors\slithir\expression_to_slithir.py", line 318, in _post_member_access
member = Member(expr, Constant(expression.member_name), val)
File "c:\users\x\documents\github\slither\slither\slithir\operations\member.py", line 14, in __init__
assert is_valid_rvalue(variable_left) or isinstance(variable_left, (Contract, Enum))
AssertionError
|
AssertionError
|
def propagate_types(ir, node): # pylint: disable=too-many-locals
# propagate the type
using_for = node.function.contract.using_for
if isinstance(ir, OperationWithLValue):
# Force assignment in case of missing previous correct type
if not ir.lvalue.type:
if isinstance(ir, Assignment):
ir.lvalue.set_type(ir.rvalue.type)
elif isinstance(ir, Binary):
if BinaryType.return_bool(ir.type):
ir.lvalue.set_type(ElementaryType("bool"))
else:
ir.lvalue.set_type(ir.variable_left.type)
elif isinstance(ir, Delete):
# nothing to propagate
pass
elif isinstance(ir, LibraryCall):
return convert_type_library_call(ir, ir.destination)
elif isinstance(ir, HighLevelCall):
t = ir.destination.type
# Temporary operation (they are removed later)
if t is None:
return None
if isinstance(t, ElementaryType) and t.name == "address":
if can_be_solidity_func(ir):
return convert_to_solidity_func(ir)
# convert library
if t in using_for or "*" in using_for:
new_ir = convert_to_library(ir, node, using_for)
if new_ir:
return new_ir
if isinstance(t, UserDefinedType):
# UserdefinedType
t_type = t.type
if isinstance(t_type, Contract):
contract = node.slither.get_contract_from_name(t_type.name)
return convert_type_of_high_and_internal_level_call(
ir, contract
)
# Convert HighLevelCall to LowLevelCall
if isinstance(t, ElementaryType) and t.name == "address":
if ir.destination.name == "this":
return convert_type_of_high_and_internal_level_call(
ir, node.function.contract
)
if can_be_low_level(ir):
return convert_to_low_level(ir)
# Convert push operations
# May need to insert a new operation
# Which leads to return a list of operation
if isinstance(t, ArrayType) or (
isinstance(t, ElementaryType) and t.type == "bytes"
):
if ir.function_name == "push" and len(ir.arguments) == 1:
return convert_to_push(ir, node)
if ir.function_name == "pop" and len(ir.arguments) == 0:
return convert_to_pop(ir, node)
elif isinstance(ir, Index):
if isinstance(ir.variable_left.type, MappingType):
ir.lvalue.set_type(ir.variable_left.type.type_to)
elif isinstance(ir.variable_left.type, ArrayType):
ir.lvalue.set_type(ir.variable_left.type.type)
elif isinstance(ir, InitArray):
length = len(ir.init_values)
t = ir.init_values[0].type
ir.lvalue.set_type(ArrayType(t, length))
elif isinstance(ir, InternalCall):
# if its not a tuple, return a singleton
if ir.function is None:
convert_type_of_high_and_internal_level_call(
ir, node.function.contract
)
return_type = ir.function.return_type
if return_type:
if len(return_type) == 1:
ir.lvalue.set_type(return_type[0])
elif len(return_type) > 1:
ir.lvalue.set_type(return_type)
else:
ir.lvalue = None
elif isinstance(ir, InternalDynamicCall):
# if its not a tuple, return a singleton
return_type = ir.function_type.return_type
if return_type:
if len(return_type) == 1:
ir.lvalue.set_type(return_type[0])
else:
ir.lvalue.set_type(return_type)
else:
ir.lvalue = None
elif isinstance(ir, LowLevelCall):
# Call are not yet converted
# This should not happen
assert False
elif isinstance(ir, Member):
# TODO we should convert the reference to a temporary if the member is a length or a balance
if (
ir.variable_right == "length"
and not isinstance(ir.variable_left, Contract)
and isinstance(ir.variable_left.type, (ElementaryType, ArrayType))
):
length = Length(ir.variable_left, ir.lvalue)
length.set_expression(ir.expression)
length.lvalue.points_to = ir.variable_left
length.set_node(ir.node)
return length
if (
ir.variable_right == "balance"
and not isinstance(ir.variable_left, Contract)
and isinstance(ir.variable_left.type, ElementaryType)
):
b = Balance(ir.variable_left, ir.lvalue)
b.set_expression(ir.expression)
b.set_node(ir.node)
return b
if (
ir.variable_right == "codesize"
and not isinstance(ir.variable_left, Contract)
and isinstance(ir.variable_left.type, ElementaryType)
):
b = CodeSize(ir.variable_left, ir.lvalue)
b.set_expression(ir.expression)
b.set_node(ir.node)
return b
if ir.variable_right == "selector" and isinstance(
ir.variable_left.type, Function
):
assignment = Assignment(
ir.lvalue,
Constant(str(get_function_id(ir.variable_left.type.full_name))),
ElementaryType("bytes4"),
)
assignment.set_expression(ir.expression)
assignment.set_node(ir.node)
assignment.lvalue.set_type(ElementaryType("bytes4"))
return assignment
if isinstance(ir.variable_left, TemporaryVariable) and isinstance(
ir.variable_left.type, TypeInformation
):
return _convert_type_contract(ir, node.function.slither)
left = ir.variable_left
t = None
# Handling of this.function_name usage
if (
left == SolidityVariable("this")
and isinstance(ir.variable_right, Constant)
and str(ir.variable_right)
in [x.name for x in ir.function.contract.functions]
):
# Assumption that this.function_name can only compile if
# And the contract does not have two functions starting with function_name
# Otherwise solc raises:
# Error: Member "f" not unique after argument-dependent lookup in contract
targeted_function = next(
(
x
for x in ir.function.contract.functions
if x.name == str(ir.variable_right)
)
)
t = _make_function_type(targeted_function)
ir.lvalue.set_type(t)
elif isinstance(left, (Variable, SolidityVariable)):
t = ir.variable_left.type
elif isinstance(left, (Contract, Enum, Structure)):
t = UserDefinedType(left)
# can be None due to temporary operation
if t:
if isinstance(t, UserDefinedType):
# UserdefinedType
type_t = t.type
if isinstance(type_t, Enum):
ir.lvalue.set_type(t)
elif isinstance(type_t, Structure):
elems = type_t.elems
for elem in elems:
if elem == ir.variable_right:
ir.lvalue.set_type(elems[elem].type)
else:
assert isinstance(type_t, Contract)
# Allow type propagtion as a Function
# Only for reference variables
# This allows to track the selector keyword
# We dont need to check for function collision, as solc prevents the use of selector
# if there are multiple functions with the same name
f = next(
(
f
for f in type_t.functions
if f.name == ir.variable_right
),
None,
)
if f:
ir.lvalue.set_type(f)
else:
# Allow propgation for variable access through contract's nale
# like Base_contract.my_variable
v = next(
(
v
for v in type_t.state_variables
if v.name == ir.variable_right
),
None,
)
if v:
ir.lvalue.set_type(v.type)
elif isinstance(ir, NewArray):
ir.lvalue.set_type(ir.array_type)
elif isinstance(ir, NewContract):
contract = node.slither.get_contract_from_name(ir.contract_name)
ir.lvalue.set_type(UserDefinedType(contract))
elif isinstance(ir, NewElementaryType):
ir.lvalue.set_type(ir.type)
elif isinstance(ir, NewStructure):
ir.lvalue.set_type(UserDefinedType(ir.structure))
elif isinstance(ir, Push):
# No change required
pass
elif isinstance(ir, Send):
ir.lvalue.set_type(ElementaryType("bool"))
elif isinstance(ir, SolidityCall):
if ir.function.name in ["type(address)", "type()"]:
ir.function.return_type = [TypeInformation(ir.arguments[0])]
return_type = ir.function.return_type
if len(return_type) == 1:
ir.lvalue.set_type(return_type[0])
elif len(return_type) > 1:
ir.lvalue.set_type(return_type)
elif isinstance(ir, TypeConversion):
ir.lvalue.set_type(ir.type)
elif isinstance(ir, Unary):
ir.lvalue.set_type(ir.rvalue.type)
elif isinstance(ir, Unpack):
types = ir.tuple.type.type
idx = ir.index
t = types[idx]
ir.lvalue.set_type(t)
elif isinstance(
ir,
(
Argument,
TmpCall,
TmpNewArray,
TmpNewContract,
TmpNewStructure,
TmpNewElementaryType,
),
):
# temporary operation; they will be removed
pass
else:
raise SlithIRError(
"Not handling {} during type propagation".format(type(ir))
)
return None
|
def propagate_types(ir, node): # pylint: disable=too-many-locals
# propagate the type
using_for = node.function.contract.using_for
if isinstance(ir, OperationWithLValue):
# Force assignment in case of missing previous correct type
if not ir.lvalue.type:
if isinstance(ir, Assignment):
ir.lvalue.set_type(ir.rvalue.type)
elif isinstance(ir, Binary):
if BinaryType.return_bool(ir.type):
ir.lvalue.set_type(ElementaryType("bool"))
else:
ir.lvalue.set_type(ir.variable_left.type)
elif isinstance(ir, Delete):
# nothing to propagate
pass
elif isinstance(ir, LibraryCall):
return convert_type_library_call(ir, ir.destination)
elif isinstance(ir, HighLevelCall):
t = ir.destination.type
# Temporary operation (they are removed later)
if t is None:
return None
if isinstance(t, ElementaryType) and t.name == "address":
if can_be_solidity_func(ir):
return convert_to_solidity_func(ir)
# convert library
if t in using_for or "*" in using_for:
new_ir = convert_to_library(ir, node, using_for)
if new_ir:
return new_ir
if isinstance(t, UserDefinedType):
# UserdefinedType
t_type = t.type
if isinstance(t_type, Contract):
contract = node.slither.get_contract_from_name(t_type.name)
return convert_type_of_high_and_internal_level_call(
ir, contract
)
# Convert HighLevelCall to LowLevelCall
if isinstance(t, ElementaryType) and t.name == "address":
if ir.destination.name == "this":
return convert_type_of_high_and_internal_level_call(
ir, node.function.contract
)
if can_be_low_level(ir):
return convert_to_low_level(ir)
# Convert push operations
# May need to insert a new operation
# Which leads to return a list of operation
if isinstance(t, ArrayType) or (
isinstance(t, ElementaryType) and t.type == "bytes"
):
if ir.function_name == "push" and len(ir.arguments) == 1:
return convert_to_push(ir, node)
if ir.function_name == "pop" and len(ir.arguments) == 0:
return convert_to_pop(ir, node)
elif isinstance(ir, Index):
if isinstance(ir.variable_left.type, MappingType):
ir.lvalue.set_type(ir.variable_left.type.type_to)
elif isinstance(ir.variable_left.type, ArrayType):
ir.lvalue.set_type(ir.variable_left.type.type)
elif isinstance(ir, InitArray):
length = len(ir.init_values)
t = ir.init_values[0].type
ir.lvalue.set_type(ArrayType(t, length))
elif isinstance(ir, InternalCall):
# if its not a tuple, return a singleton
if ir.function is None:
convert_type_of_high_and_internal_level_call(
ir, node.function.contract
)
return_type = ir.function.return_type
if return_type:
if len(return_type) == 1:
ir.lvalue.set_type(return_type[0])
elif len(return_type) > 1:
ir.lvalue.set_type(return_type)
else:
ir.lvalue = None
elif isinstance(ir, InternalDynamicCall):
# if its not a tuple, return a singleton
return_type = ir.function_type.return_type
if return_type:
if len(return_type) == 1:
ir.lvalue.set_type(return_type[0])
else:
ir.lvalue.set_type(return_type)
else:
ir.lvalue = None
elif isinstance(ir, LowLevelCall):
# Call are not yet converted
# This should not happen
assert False
elif isinstance(ir, Member):
# TODO we should convert the reference to a temporary if the member is a length or a balance
if (
ir.variable_right == "length"
and not isinstance(ir.variable_left, Contract)
and isinstance(ir.variable_left.type, (ElementaryType, ArrayType))
):
length = Length(ir.variable_left, ir.lvalue)
length.set_expression(ir.expression)
length.lvalue.points_to = ir.variable_left
length.set_node(ir.node)
return length
if (
ir.variable_right == "balance"
and not isinstance(ir.variable_left, Contract)
and isinstance(ir.variable_left.type, ElementaryType)
):
b = Balance(ir.variable_left, ir.lvalue)
b.set_expression(ir.expression)
b.set_node(ir.node)
return b
if (
ir.variable_right == "codesize"
and not isinstance(ir.variable_left, Contract)
and isinstance(ir.variable_left.type, ElementaryType)
):
b = CodeSize(ir.variable_left, ir.lvalue)
b.set_expression(ir.expression)
b.set_node(ir.node)
return b
if ir.variable_right == "selector" and isinstance(
ir.variable_left.type, Function
):
assignment = Assignment(
ir.lvalue,
Constant(str(get_function_id(ir.variable_left.type.full_name))),
ElementaryType("bytes4"),
)
assignment.set_expression(ir.expression)
assignment.set_node(ir.node)
assignment.lvalue.set_type(ElementaryType("bytes4"))
return assignment
if isinstance(ir.variable_left, TemporaryVariable) and isinstance(
ir.variable_left.type, TypeInformation
):
return _convert_type_contract(ir, node.function.slither)
left = ir.variable_left
t = None
# Handling of this.function_name usage
if (
left == SolidityVariable("this")
and isinstance(ir.variable_right, Constant)
and str(ir.variable_right)
in [x.name for x in ir.function.contract.functions]
):
# Assumption that this.function_name can only compile if
# And the contract does not have two functions starting with function_name
# Otherwise solc raises:
# Error: Member "f" not unique after argument-dependent lookup in contract
targeted_function = next(
(
x
for x in ir.function.contract.functions
if x.name == str(ir.variable_right)
)
)
parameters = []
returns = []
for parameter in targeted_function.parameters:
v = FunctionTypeVariable()
v.name = parameter.name
parameters.append(v)
for return_var in targeted_function.returns:
v = FunctionTypeVariable()
v.name = return_var.name
returns.append(v)
t = FunctionType(parameters, returns)
ir.lvalue.set_type(t)
elif isinstance(left, (Variable, SolidityVariable)):
t = ir.variable_left.type
elif isinstance(left, (Contract, Enum, Structure)):
t = UserDefinedType(left)
# can be None due to temporary operation
if t:
if isinstance(t, UserDefinedType):
# UserdefinedType
type_t = t.type
if isinstance(type_t, Enum):
ir.lvalue.set_type(t)
elif isinstance(type_t, Structure):
elems = type_t.elems
for elem in elems:
if elem == ir.variable_right:
ir.lvalue.set_type(elems[elem].type)
else:
assert isinstance(type_t, Contract)
# Allow type propagtion as a Function
# Only for reference variables
# This allows to track the selector keyword
# We dont need to check for function collision, as solc prevents the use of selector
# if there are multiple functions with the same name
f = next(
(
f
for f in type_t.functions
if f.name == ir.variable_right
),
None,
)
if f:
ir.lvalue.set_type(f)
else:
# Allow propgation for variable access through contract's nale
# like Base_contract.my_variable
v = next(
(
v
for v in type_t.state_variables
if v.name == ir.variable_right
),
None,
)
if v:
ir.lvalue.set_type(v.type)
elif isinstance(ir, NewArray):
ir.lvalue.set_type(ir.array_type)
elif isinstance(ir, NewContract):
contract = node.slither.get_contract_from_name(ir.contract_name)
ir.lvalue.set_type(UserDefinedType(contract))
elif isinstance(ir, NewElementaryType):
ir.lvalue.set_type(ir.type)
elif isinstance(ir, NewStructure):
ir.lvalue.set_type(UserDefinedType(ir.structure))
elif isinstance(ir, Push):
# No change required
pass
elif isinstance(ir, Send):
ir.lvalue.set_type(ElementaryType("bool"))
elif isinstance(ir, SolidityCall):
if ir.function.name in ["type(address)", "type()"]:
ir.function.return_type = [TypeInformation(ir.arguments[0])]
return_type = ir.function.return_type
if len(return_type) == 1:
ir.lvalue.set_type(return_type[0])
elif len(return_type) > 1:
ir.lvalue.set_type(return_type)
elif isinstance(ir, TypeConversion):
ir.lvalue.set_type(ir.type)
elif isinstance(ir, Unary):
ir.lvalue.set_type(ir.rvalue.type)
elif isinstance(ir, Unpack):
types = ir.tuple.type.type
idx = ir.index
t = types[idx]
ir.lvalue.set_type(t)
elif isinstance(
ir,
(
Argument,
TmpCall,
TmpNewArray,
TmpNewContract,
TmpNewStructure,
TmpNewElementaryType,
),
):
# temporary operation; they will be removed
pass
else:
raise SlithIRError(
"Not handling {} during type propgation".format(type(ir))
)
return None
|
https://github.com/crytic/slither/issues/589
|
ERROR:root:Error in .\function_members.sol
ERROR:root:Traceback (most recent call last):
File "c:\users\x\documents\github\slither\slither\__main__.py", line 610, in main_impl
(slither_instances, results_detectors, results_printers, number_contracts) = process_all(filename, args,
File "c:\users\x\documents\github\slither\slither\__main__.py", line 67, in process_all
(slither, current_results_detectors, current_results_printers, current_analyzed_count) = process_single(
File "c:\users\x\documents\github\slither\slither\__main__.py", line 53, in process_single
slither = Slither(target,
File "c:\users\x\documents\github\slither\slither\slither.py", line 86, in __init__
self._parser.analyze_contracts()
File "c:\users\x\documents\github\slither\slither\solc_parsing\slitherSolc.py", line 345, in analyze_contracts
self._convert_to_slithir()
File "c:\users\x\documents\github\slither\slither\solc_parsing\slitherSolc.py", line 489, in _convert_to_slithir
func.generate_slithir_and_analyze()
File "c:\users\x\documents\github\slither\slither\core\declarations\function.py", line 1652, in generate_slithir_and_analyze
node.slithir_generation()
File "c:\users\x\documents\github\slither\slither\core\cfg\node.py", line 702, in slithir_generation
self._irs = convert_expression(expression, self)
File "c:\users\x\documents\github\slither\slither\slithir\convert.py", line 64, in convert_expression
visitor = ExpressionToSlithIR(expression, node)
File "c:\users\x\documents\github\slither\slither\visitors\slithir\expression_to_slithir.py", line 103, in __init__
self._visit_expression(self.expression)
File "c:\users\x\documents\github\slither\slither\visitors\expression\expression.py", line 51, in _visit_expression
self._visit_call_expression(expression)
File "c:\users\x\documents\github\slither\slither\visitors\expression\expression.py", line 108, in _visit_call_expression
self._visit_expression(expression.called)
File "c:\users\x\documents\github\slither\slither\visitors\expression\expression.py", line 95, in _visit_expression
self._post_visit(expression)
File "c:\users\x\documents\github\slither\slither\visitors\expression\expression.py", line 289, in _post_visit
self._post_member_access(expression)
File "c:\users\x\documents\github\slither\slither\visitors\slithir\expression_to_slithir.py", line 318, in _post_member_access
member = Member(expr, Constant(expression.member_name), val)
File "c:\users\x\documents\github\slither\slither\slithir\operations\member.py", line 14, in __init__
assert is_valid_rvalue(variable_left) or isinstance(variable_left, (Contract, Enum))
AssertionError
|
AssertionError
|
def extract_tmp_call(ins, contract): # pylint: disable=too-many-locals
assert isinstance(ins, TmpCall)
if isinstance(ins.called, Variable) and isinstance(ins.called.type, FunctionType):
# If the call is made to a variable member, where the member is this
# We need to convert it to a HighLelelCall and not an internal dynamic call
if isinstance(ins.ori, Member) and ins.ori.variable_left == SolidityVariable(
"this"
):
pass
else:
call = InternalDynamicCall(ins.lvalue, ins.called, ins.called.type)
call.set_expression(ins.expression)
call.call_id = ins.call_id
return call
if isinstance(ins.ori, Member):
# If there is a call on an inherited contract, it is an internal call or an event
if ins.ori.variable_left in contract.inheritance + [contract]:
if str(ins.ori.variable_right) in [f.name for f in contract.functions]:
internalcall = InternalCall(
(ins.ori.variable_right, ins.ori.variable_left.name),
ins.nbr_arguments,
ins.lvalue,
ins.type_call,
)
internalcall.set_expression(ins.expression)
internalcall.call_id = ins.call_id
return internalcall
if str(ins.ori.variable_right) in [f.name for f in contract.events]:
eventcall = EventCall(ins.ori.variable_right)
eventcall.set_expression(ins.expression)
eventcall.call_id = ins.call_id
return eventcall
if isinstance(ins.ori.variable_left, Contract):
st = ins.ori.variable_left.get_structure_from_name(ins.ori.variable_right)
if st:
op = NewStructure(st, ins.lvalue)
op.set_expression(ins.expression)
op.call_id = ins.call_id
return op
libcall = LibraryCall(
ins.ori.variable_left,
ins.ori.variable_right,
ins.nbr_arguments,
ins.lvalue,
ins.type_call,
)
libcall.set_expression(ins.expression)
libcall.call_id = ins.call_id
return libcall
if isinstance(ins.ori.variable_left, Function):
# Support for library call where the parameter is a function
# We could merge this with the standard library handling
# Except that we will have some troubles with using_for
# As the type of the funciton will not match function()
# Additionally we do not have a correct view on the parameters of the tmpcall
# At this level
#
# library FunctionExtensions {
# function h(function() internal _t, uint8) internal { }
# }
# contract FunctionMembers {
# using FunctionExtensions for function();
#
# function f() public {
# f.h(1);
# }
# }
using_for = ins.node.function.contract.using_for
targeted_libraries = (
[] + using_for.get("*", []) + using_for.get(FunctionType([], []), [])
)
lib_contract: Contract
candidates = []
for lib_contract_type in targeted_libraries:
if not isinstance(lib_contract_type, UserDefinedType) and isinstance(
lib_contract_type.type, Contract
):
continue
lib_contract = lib_contract_type.type
for lib_func in lib_contract.functions:
if lib_func.name == ins.ori.variable_right:
candidates.append(lib_func)
if len(candidates) == 1:
lib_func = candidates[0]
lib_call = LibraryCall(
lib_func.contract,
Constant(lib_func.name),
len(lib_func.parameters),
ins.lvalue,
"d",
)
lib_call.set_expression(ins.expression)
lib_call.set_node(ins.node)
lib_call.call_gas = ins.call_gas
lib_call.call_id = ins.call_id
lib_call.set_node(ins.node)
lib_call.function = lib_func
lib_call.arguments.append(ins.ori.variable_left)
return lib_call
# We do not support something lik
# library FunctionExtensions {
# function h(function() internal _t, uint8) internal { }
# function h(function() internal _t, bool) internal { }
# }
# contract FunctionMembers {
# using FunctionExtensions for function();
#
# function f() public {
# f.h(1);
# }
# }
to_log = "Slither does not support dynamic functions to libraries if functions have the same name"
to_log += f"{[candidate.full_name for candidate in candidates]}"
raise SlithIRError(to_log)
msgcall = HighLevelCall(
ins.ori.variable_left,
ins.ori.variable_right,
ins.nbr_arguments,
ins.lvalue,
ins.type_call,
)
msgcall.call_id = ins.call_id
if ins.call_gas:
msgcall.call_gas = ins.call_gas
if ins.call_value:
msgcall.call_value = ins.call_value
msgcall.set_expression(ins.expression)
return msgcall
if isinstance(ins.ori, TmpCall):
r = extract_tmp_call(ins.ori, contract)
r.set_node(ins.node)
return r
if isinstance(ins.called, SolidityVariableComposed):
if str(ins.called) == "block.blockhash":
ins.called = SolidityFunction("blockhash(uint256)")
elif str(ins.called) == "this.balance":
s = SolidityCall(
SolidityFunction("this.balance()"),
ins.nbr_arguments,
ins.lvalue,
ins.type_call,
)
s.set_expression(ins.expression)
return s
if isinstance(ins.called, SolidityFunction):
s = SolidityCall(ins.called, ins.nbr_arguments, ins.lvalue, ins.type_call)
s.set_expression(ins.expression)
return s
if isinstance(ins.ori, TmpNewElementaryType):
n = NewElementaryType(ins.ori.type, ins.lvalue)
n.set_expression(ins.expression)
return n
if isinstance(ins.ori, TmpNewContract):
op = NewContract(Constant(ins.ori.contract_name), ins.lvalue)
op.set_expression(ins.expression)
op.call_id = ins.call_id
if ins.call_value:
op.call_value = ins.call_value
if ins.call_salt:
op.call_salt = ins.call_salt
return op
if isinstance(ins.ori, TmpNewArray):
n = NewArray(ins.ori.depth, ins.ori.array_type, ins.lvalue)
n.set_expression(ins.expression)
return n
if isinstance(ins.called, Structure):
op = NewStructure(ins.called, ins.lvalue)
op.set_expression(ins.expression)
op.call_id = ins.call_id
op.set_expression(ins.expression)
return op
if isinstance(ins.called, Event):
e = EventCall(ins.called.name)
e.set_expression(ins.expression)
return e
if isinstance(ins.called, Contract):
# Called a base constructor, where there is no constructor
if ins.called.constructor is None:
return Nop()
# Case where:
# contract A{ constructor(uint) }
# contract B is A {}
# contract C is B{ constructor() A(10) B() {}
# C calls B(), which does not exist
# Ideally we should compare here for the parameters types too
if len(ins.called.constructor.parameters) != ins.nbr_arguments:
return Nop()
internalcall = InternalCall(
ins.called.constructor, ins.nbr_arguments, ins.lvalue, ins.type_call
)
internalcall.call_id = ins.call_id
internalcall.set_expression(ins.expression)
return internalcall
raise Exception("Not extracted {} {}".format(type(ins.called), ins))
|
def extract_tmp_call(ins, contract):
assert isinstance(ins, TmpCall)
if isinstance(ins.called, Variable) and isinstance(ins.called.type, FunctionType):
# If the call is made to a variable member, where the member is this
# We need to convert it to a HighLelelCall and not an internal dynamic call
if isinstance(ins.ori, Member) and ins.ori.variable_left == SolidityVariable(
"this"
):
pass
else:
call = InternalDynamicCall(ins.lvalue, ins.called, ins.called.type)
call.set_expression(ins.expression)
call.call_id = ins.call_id
return call
if isinstance(ins.ori, Member):
# If there is a call on an inherited contract, it is an internal call or an event
if ins.ori.variable_left in contract.inheritance + [contract]:
if str(ins.ori.variable_right) in [f.name for f in contract.functions]:
internalcall = InternalCall(
(ins.ori.variable_right, ins.ori.variable_left.name),
ins.nbr_arguments,
ins.lvalue,
ins.type_call,
)
internalcall.set_expression(ins.expression)
internalcall.call_id = ins.call_id
return internalcall
if str(ins.ori.variable_right) in [f.name for f in contract.events]:
eventcall = EventCall(ins.ori.variable_right)
eventcall.set_expression(ins.expression)
eventcall.call_id = ins.call_id
return eventcall
if isinstance(ins.ori.variable_left, Contract):
st = ins.ori.variable_left.get_structure_from_name(ins.ori.variable_right)
if st:
op = NewStructure(st, ins.lvalue)
op.set_expression(ins.expression)
op.call_id = ins.call_id
return op
libcall = LibraryCall(
ins.ori.variable_left,
ins.ori.variable_right,
ins.nbr_arguments,
ins.lvalue,
ins.type_call,
)
libcall.set_expression(ins.expression)
libcall.call_id = ins.call_id
return libcall
msgcall = HighLevelCall(
ins.ori.variable_left,
ins.ori.variable_right,
ins.nbr_arguments,
ins.lvalue,
ins.type_call,
)
msgcall.call_id = ins.call_id
if ins.call_gas:
msgcall.call_gas = ins.call_gas
if ins.call_value:
msgcall.call_value = ins.call_value
msgcall.set_expression(ins.expression)
return msgcall
if isinstance(ins.ori, TmpCall):
r = extract_tmp_call(ins.ori, contract)
r.set_node(ins.node)
return r
if isinstance(ins.called, SolidityVariableComposed):
if str(ins.called) == "block.blockhash":
ins.called = SolidityFunction("blockhash(uint256)")
elif str(ins.called) == "this.balance":
s = SolidityCall(
SolidityFunction("this.balance()"),
ins.nbr_arguments,
ins.lvalue,
ins.type_call,
)
s.set_expression(ins.expression)
return s
if isinstance(ins.called, SolidityFunction):
s = SolidityCall(ins.called, ins.nbr_arguments, ins.lvalue, ins.type_call)
s.set_expression(ins.expression)
return s
if isinstance(ins.ori, TmpNewElementaryType):
n = NewElementaryType(ins.ori.type, ins.lvalue)
n.set_expression(ins.expression)
return n
if isinstance(ins.ori, TmpNewContract):
op = NewContract(Constant(ins.ori.contract_name), ins.lvalue)
op.set_expression(ins.expression)
op.call_id = ins.call_id
if ins.call_value:
op.call_value = ins.call_value
if ins.call_salt:
op.call_salt = ins.call_salt
return op
if isinstance(ins.ori, TmpNewArray):
n = NewArray(ins.ori.depth, ins.ori.array_type, ins.lvalue)
n.set_expression(ins.expression)
return n
if isinstance(ins.called, Structure):
op = NewStructure(ins.called, ins.lvalue)
op.set_expression(ins.expression)
op.call_id = ins.call_id
op.set_expression(ins.expression)
return op
if isinstance(ins.called, Event):
e = EventCall(ins.called.name)
e.set_expression(ins.expression)
return e
if isinstance(ins.called, Contract):
# Called a base constructor, where there is no constructor
if ins.called.constructor is None:
return Nop()
# Case where:
# contract A{ constructor(uint) }
# contract B is A {}
# contract C is B{ constructor() A(10) B() {}
# C calls B(), which does not exist
# Ideally we should compare here for the parameters types too
if len(ins.called.constructor.parameters) != ins.nbr_arguments:
return Nop()
internalcall = InternalCall(
ins.called.constructor, ins.nbr_arguments, ins.lvalue, ins.type_call
)
internalcall.call_id = ins.call_id
internalcall.set_expression(ins.expression)
return internalcall
raise Exception("Not extracted {} {}".format(type(ins.called), ins))
|
https://github.com/crytic/slither/issues/589
|
ERROR:root:Error in .\function_members.sol
ERROR:root:Traceback (most recent call last):
File "c:\users\x\documents\github\slither\slither\__main__.py", line 610, in main_impl
(slither_instances, results_detectors, results_printers, number_contracts) = process_all(filename, args,
File "c:\users\x\documents\github\slither\slither\__main__.py", line 67, in process_all
(slither, current_results_detectors, current_results_printers, current_analyzed_count) = process_single(
File "c:\users\x\documents\github\slither\slither\__main__.py", line 53, in process_single
slither = Slither(target,
File "c:\users\x\documents\github\slither\slither\slither.py", line 86, in __init__
self._parser.analyze_contracts()
File "c:\users\x\documents\github\slither\slither\solc_parsing\slitherSolc.py", line 345, in analyze_contracts
self._convert_to_slithir()
File "c:\users\x\documents\github\slither\slither\solc_parsing\slitherSolc.py", line 489, in _convert_to_slithir
func.generate_slithir_and_analyze()
File "c:\users\x\documents\github\slither\slither\core\declarations\function.py", line 1652, in generate_slithir_and_analyze
node.slithir_generation()
File "c:\users\x\documents\github\slither\slither\core\cfg\node.py", line 702, in slithir_generation
self._irs = convert_expression(expression, self)
File "c:\users\x\documents\github\slither\slither\slithir\convert.py", line 64, in convert_expression
visitor = ExpressionToSlithIR(expression, node)
File "c:\users\x\documents\github\slither\slither\visitors\slithir\expression_to_slithir.py", line 103, in __init__
self._visit_expression(self.expression)
File "c:\users\x\documents\github\slither\slither\visitors\expression\expression.py", line 51, in _visit_expression
self._visit_call_expression(expression)
File "c:\users\x\documents\github\slither\slither\visitors\expression\expression.py", line 108, in _visit_call_expression
self._visit_expression(expression.called)
File "c:\users\x\documents\github\slither\slither\visitors\expression\expression.py", line 95, in _visit_expression
self._post_visit(expression)
File "c:\users\x\documents\github\slither\slither\visitors\expression\expression.py", line 289, in _post_visit
self._post_member_access(expression)
File "c:\users\x\documents\github\slither\slither\visitors\slithir\expression_to_slithir.py", line 318, in _post_member_access
member = Member(expr, Constant(expression.member_name), val)
File "c:\users\x\documents\github\slither\slither\slithir\operations\member.py", line 14, in __init__
assert is_valid_rvalue(variable_left) or isinstance(variable_left, (Contract, Enum))
AssertionError
|
AssertionError
|
def __init__(self, variable_left, variable_right, result):
# Function can happen for something like
# library FunctionExtensions {
# function h(function() internal _t, uint8) internal { }
# }
# contract FunctionMembers {
# using FunctionExtensions for function();
#
# function f() public {
# f.h(1);
# }
# }
assert is_valid_rvalue(variable_left) or isinstance(
variable_left, (Contract, Enum, Function)
)
assert isinstance(variable_right, Constant)
assert isinstance(result, ReferenceVariable)
super().__init__()
self._variable_left = variable_left
self._variable_right = variable_right
self._lvalue = result
self._gas = None
self._value = None
|
def __init__(self, variable_left, variable_right, result):
assert is_valid_rvalue(variable_left) or isinstance(variable_left, (Contract, Enum))
assert isinstance(variable_right, Constant)
assert isinstance(result, ReferenceVariable)
super().__init__()
self._variable_left = variable_left
self._variable_right = variable_right
self._lvalue = result
self._gas = None
self._value = None
|
https://github.com/crytic/slither/issues/589
|
ERROR:root:Error in .\function_members.sol
ERROR:root:Traceback (most recent call last):
File "c:\users\x\documents\github\slither\slither\__main__.py", line 610, in main_impl
(slither_instances, results_detectors, results_printers, number_contracts) = process_all(filename, args,
File "c:\users\x\documents\github\slither\slither\__main__.py", line 67, in process_all
(slither, current_results_detectors, current_results_printers, current_analyzed_count) = process_single(
File "c:\users\x\documents\github\slither\slither\__main__.py", line 53, in process_single
slither = Slither(target,
File "c:\users\x\documents\github\slither\slither\slither.py", line 86, in __init__
self._parser.analyze_contracts()
File "c:\users\x\documents\github\slither\slither\solc_parsing\slitherSolc.py", line 345, in analyze_contracts
self._convert_to_slithir()
File "c:\users\x\documents\github\slither\slither\solc_parsing\slitherSolc.py", line 489, in _convert_to_slithir
func.generate_slithir_and_analyze()
File "c:\users\x\documents\github\slither\slither\core\declarations\function.py", line 1652, in generate_slithir_and_analyze
node.slithir_generation()
File "c:\users\x\documents\github\slither\slither\core\cfg\node.py", line 702, in slithir_generation
self._irs = convert_expression(expression, self)
File "c:\users\x\documents\github\slither\slither\slithir\convert.py", line 64, in convert_expression
visitor = ExpressionToSlithIR(expression, node)
File "c:\users\x\documents\github\slither\slither\visitors\slithir\expression_to_slithir.py", line 103, in __init__
self._visit_expression(self.expression)
File "c:\users\x\documents\github\slither\slither\visitors\expression\expression.py", line 51, in _visit_expression
self._visit_call_expression(expression)
File "c:\users\x\documents\github\slither\slither\visitors\expression\expression.py", line 108, in _visit_call_expression
self._visit_expression(expression.called)
File "c:\users\x\documents\github\slither\slither\visitors\expression\expression.py", line 95, in _visit_expression
self._post_visit(expression)
File "c:\users\x\documents\github\slither\slither\visitors\expression\expression.py", line 289, in _post_visit
self._post_member_access(expression)
File "c:\users\x\documents\github\slither\slither\visitors\slithir\expression_to_slithir.py", line 318, in _post_member_access
member = Member(expr, Constant(expression.member_name), val)
File "c:\users\x\documents\github\slither\slither\slithir\operations\member.py", line 14, in __init__
assert is_valid_rvalue(variable_left) or isinstance(variable_left, (Contract, Enum))
AssertionError
|
AssertionError
|
def propagate_types(ir, node):
# propagate the type
using_for = node.function.contract.using_for
if isinstance(ir, OperationWithLValue):
# Force assignment in case of missing previous correct type
if not ir.lvalue.type:
if isinstance(ir, Assignment):
ir.lvalue.set_type(ir.rvalue.type)
elif isinstance(ir, Binary):
if BinaryType.return_bool(ir.type):
ir.lvalue.set_type(ElementaryType("bool"))
else:
ir.lvalue.set_type(ir.variable_left.type)
elif isinstance(ir, Delete):
# nothing to propagate
pass
elif isinstance(ir, LibraryCall):
return convert_type_library_call(ir, ir.destination)
elif isinstance(ir, HighLevelCall):
t = ir.destination.type
# Temporary operation (they are removed later)
if t is None:
return
if isinstance(t, ElementaryType) and t.name == "address":
if can_be_solidity_func(ir):
return convert_to_solidity_func(ir)
# convert library
if t in using_for or "*" in using_for:
new_ir = convert_to_library(ir, node, using_for)
if new_ir:
return new_ir
if isinstance(t, UserDefinedType):
# UserdefinedType
t_type = t.type
if isinstance(t_type, Contract):
contract = node.slither.get_contract_from_name(t_type.name)
return convert_type_of_high_and_internal_level_call(
ir, contract
)
# Convert HighLevelCall to LowLevelCall
if isinstance(t, ElementaryType) and t.name == "address":
if ir.destination.name == "this":
return convert_type_of_high_and_internal_level_call(
ir, node.function.contract
)
if can_be_low_level(ir):
return convert_to_low_level(ir)
# Convert push operations
# May need to insert a new operation
# Which leads to return a list of operation
if isinstance(t, ArrayType) or (
isinstance(t, ElementaryType) and t.type == "bytes"
):
if ir.function_name == "push" and len(ir.arguments) == 1:
return convert_to_push(ir, node)
if ir.function_name == "pop" and len(ir.arguments) == 0:
return convert_to_pop(ir, node)
elif isinstance(ir, Index):
if isinstance(ir.variable_left.type, MappingType):
ir.lvalue.set_type(ir.variable_left.type.type_to)
elif isinstance(ir.variable_left.type, ArrayType):
ir.lvalue.set_type(ir.variable_left.type.type)
elif isinstance(ir, InitArray):
length = len(ir.init_values)
t = ir.init_values[0].type
ir.lvalue.set_type(ArrayType(t, length))
elif isinstance(ir, InternalCall):
# if its not a tuple, return a singleton
if ir.function is None:
convert_type_of_high_and_internal_level_call(
ir, node.function.contract
)
return_type = ir.function.return_type
if return_type:
if len(return_type) == 1:
ir.lvalue.set_type(return_type[0])
elif len(return_type) > 1:
ir.lvalue.set_type(return_type)
else:
ir.lvalue = None
elif isinstance(ir, InternalDynamicCall):
# if its not a tuple, return a singleton
return_type = ir.function_type.return_type
if return_type:
if len(return_type) == 1:
ir.lvalue.set_type(return_type[0])
else:
ir.lvalue.set_type(return_type)
else:
ir.lvalue = None
elif isinstance(ir, LowLevelCall):
# Call are not yet converted
# This should not happen
assert False
elif isinstance(ir, Member):
# TODO we should convert the reference to a temporary if the member is a length or a balance
if (
ir.variable_right == "length"
and not isinstance(ir.variable_left, Contract)
and isinstance(ir.variable_left.type, (ElementaryType, ArrayType))
):
length = Length(ir.variable_left, ir.lvalue)
length.set_expression(ir.expression)
length.lvalue.points_to = ir.variable_left
length.set_node(ir.node)
return length
if (
ir.variable_right == "balance"
and not isinstance(ir.variable_left, Contract)
and isinstance(ir.variable_left.type, ElementaryType)
):
b = Balance(ir.variable_left, ir.lvalue)
b.set_expression(ir.expression)
b.set_node(ir.node)
return b
if (
ir.variable_right == "codesize"
and not isinstance(ir.variable_left, Contract)
and isinstance(ir.variable_left.type, ElementaryType)
):
b = CodeSize(ir.variable_left, ir.lvalue)
b.set_expression(ir.expression)
b.set_node(ir.node)
return b
if ir.variable_right == "selector" and isinstance(
ir.variable_left.type, Function
):
assignment = Assignment(
ir.lvalue,
Constant(str(get_function_id(ir.variable_left.type.full_name))),
ElementaryType("bytes4"),
)
assignment.set_expression(ir.expression)
assignment.set_node(ir.node)
assignment.lvalue.set_type(ElementaryType("bytes4"))
return assignment
if isinstance(ir.variable_left, TemporaryVariable) and isinstance(
ir.variable_left.type, TypeInformation
):
return _convert_type_contract(ir, node.function.slither)
left = ir.variable_left
t = None
# Handling of this.function_name usage
if (
left == SolidityVariable("this")
and isinstance(ir.variable_right, Constant)
and str(ir.variable_right)
in [x.name for x in ir.function.contract.functions]
):
# Assumption that this.function_name can only compile if
# And the contract does not have two functions starting with function_name
# Otherwise solc raises:
# Error: Member "f" not unique after argument-dependent lookup in contract
targeted_function = next(
(
x
for x in ir.function.contract.functions
if x.name == str(ir.variable_right)
)
)
parameters = []
returns = []
for parameter in targeted_function.parameters:
v = FunctionTypeVariable()
v.name = parameter.name
parameters.append(v)
for return_var in targeted_function.returns:
v = FunctionTypeVariable()
v.name = return_var.name
returns.append(v)
t = FunctionType(parameters, returns)
ir.lvalue.set_type(t)
elif isinstance(left, (Variable, SolidityVariable)):
t = ir.variable_left.type
elif isinstance(left, (Contract, Enum, Structure)):
t = UserDefinedType(left)
# can be None due to temporary operation
if t:
if isinstance(t, UserDefinedType):
# UserdefinedType
type_t = t.type
if isinstance(type_t, Enum):
ir.lvalue.set_type(t)
elif isinstance(type_t, Structure):
elems = type_t.elems
for elem in elems:
if elem == ir.variable_right:
ir.lvalue.set_type(elems[elem].type)
else:
assert isinstance(type_t, Contract)
# Allow type propagtion as a Function
# Only for reference variables
# This allows to track the selector keyword
# We dont need to check for function collision, as solc prevents the use of selector
# if there are multiple functions with the same name
f = next(
(
f
for f in type_t.functions
if f.name == ir.variable_right
),
None,
)
if f:
ir.lvalue.set_type(f)
else:
# Allow propgation for variable access through contract's nale
# like Base_contract.my_variable
v = next(
(
v
for v in type_t.state_variables
if v.name == ir.variable_right
),
None,
)
if v:
ir.lvalue.set_type(v.type)
elif isinstance(ir, NewArray):
ir.lvalue.set_type(ir.array_type)
elif isinstance(ir, NewContract):
contract = node.slither.get_contract_from_name(ir.contract_name)
ir.lvalue.set_type(UserDefinedType(contract))
elif isinstance(ir, NewElementaryType):
ir.lvalue.set_type(ir.type)
elif isinstance(ir, NewStructure):
ir.lvalue.set_type(UserDefinedType(ir.structure))
elif isinstance(ir, Push):
# No change required
pass
elif isinstance(ir, Send):
ir.lvalue.set_type(ElementaryType("bool"))
elif isinstance(ir, SolidityCall):
if ir.function.name in ["type(address)", "type()"]:
ir.function.return_type = [TypeInformation(ir.arguments[0])]
return_type = ir.function.return_type
if len(return_type) == 1:
ir.lvalue.set_type(return_type[0])
elif len(return_type) > 1:
ir.lvalue.set_type(return_type)
elif isinstance(ir, TypeConversion):
ir.lvalue.set_type(ir.type)
elif isinstance(ir, Unary):
ir.lvalue.set_type(ir.rvalue.type)
elif isinstance(ir, Unpack):
types = ir.tuple.type.type
idx = ir.index
t = types[idx]
ir.lvalue.set_type(t)
elif isinstance(
ir,
(
Argument,
TmpCall,
TmpNewArray,
TmpNewContract,
TmpNewStructure,
TmpNewElementaryType,
),
):
# temporary operation; they will be removed
pass
else:
raise SlithIRError(
"Not handling {} during type propgation".format(type(ir))
)
|
def propagate_types(ir, node):
# propagate the type
using_for = node.function.contract.using_for
if isinstance(ir, OperationWithLValue):
# Force assignment in case of missing previous correct type
if not ir.lvalue.type:
if isinstance(ir, Assignment):
ir.lvalue.set_type(ir.rvalue.type)
elif isinstance(ir, Binary):
if BinaryType.return_bool(ir.type):
ir.lvalue.set_type(ElementaryType("bool"))
else:
ir.lvalue.set_type(ir.variable_left.type)
elif isinstance(ir, Delete):
# nothing to propagate
pass
elif isinstance(ir, LibraryCall):
return convert_type_library_call(ir, ir.destination)
elif isinstance(ir, HighLevelCall):
t = ir.destination.type
# Temporary operation (they are removed later)
if t is None:
return
if isinstance(t, ElementaryType) and t.name == "address":
if can_be_solidity_func(ir):
return convert_to_solidity_func(ir)
# convert library
if t in using_for or "*" in using_for:
new_ir = convert_to_library(ir, node, using_for)
if new_ir:
return new_ir
if isinstance(t, UserDefinedType):
# UserdefinedType
t_type = t.type
if isinstance(t_type, Contract):
contract = node.slither.get_contract_from_name(t_type.name)
return convert_type_of_high_and_internal_level_call(
ir, contract
)
# Convert HighLevelCall to LowLevelCall
if isinstance(t, ElementaryType) and t.name == "address":
if ir.destination.name == "this":
return convert_type_of_high_and_internal_level_call(
ir, node.function.contract
)
if can_be_low_level(ir):
return convert_to_low_level(ir)
# Convert push operations
# May need to insert a new operation
# Which leads to return a list of operation
if isinstance(t, ArrayType) or (
isinstance(t, ElementaryType) and t.type == "bytes"
):
if ir.function_name == "push" and len(ir.arguments) == 1:
return convert_to_push(ir, node)
if ir.function_name == "pop" and len(ir.arguments) == 0:
return convert_to_pop(ir, node)
elif isinstance(ir, Index):
if isinstance(ir.variable_left.type, MappingType):
ir.lvalue.set_type(ir.variable_left.type.type_to)
elif isinstance(ir.variable_left.type, ArrayType):
ir.lvalue.set_type(ir.variable_left.type.type)
elif isinstance(ir, InitArray):
length = len(ir.init_values)
t = ir.init_values[0].type
ir.lvalue.set_type(ArrayType(t, length))
elif isinstance(ir, InternalCall):
# if its not a tuple, return a singleton
if ir.function is None:
convert_type_of_high_and_internal_level_call(
ir, node.function.contract
)
return_type = ir.function.return_type
if return_type:
if len(return_type) == 1:
ir.lvalue.set_type(return_type[0])
elif len(return_type) > 1:
ir.lvalue.set_type(return_type)
else:
ir.lvalue = None
elif isinstance(ir, InternalDynamicCall):
# if its not a tuple, return a singleton
return_type = ir.function_type.return_type
if return_type:
if len(return_type) == 1:
ir.lvalue.set_type(return_type[0])
else:
ir.lvalue.set_type(return_type)
else:
ir.lvalue = None
elif isinstance(ir, LowLevelCall):
# Call are not yet converted
# This should not happen
assert False
elif isinstance(ir, Member):
# TODO we should convert the reference to a temporary if the member is a length or a balance
if (
ir.variable_right == "length"
and not isinstance(ir.variable_left, Contract)
and isinstance(ir.variable_left.type, (ElementaryType, ArrayType))
):
length = Length(ir.variable_left, ir.lvalue)
length.set_expression(ir.expression)
length.lvalue.points_to = ir.variable_left
length.set_node(ir.node)
return length
if (
ir.variable_right == "balance"
and not isinstance(ir.variable_left, Contract)
and isinstance(ir.variable_left.type, ElementaryType)
):
b = Balance(ir.variable_left, ir.lvalue)
b.set_expression(ir.expression)
b.set_node(ir.node)
return b
if (
ir.variable_right == "codesize"
and not isinstance(ir.variable_left, Contract)
and isinstance(ir.variable_left.type, ElementaryType)
):
b = CodeSize(ir.variable_left, ir.lvalue)
b.set_expression(ir.expression)
b.set_node(ir.node)
return b
if ir.variable_right == "selector" and isinstance(
ir.variable_left.type, Function
):
assignment = Assignment(
ir.lvalue,
Constant(str(get_function_id(ir.variable_left.type.full_name))),
ElementaryType("bytes4"),
)
assignment.set_expression(ir.expression)
assignment.set_node(ir.node)
assignment.lvalue.set_type(ElementaryType("bytes4"))
return assignment
if isinstance(ir.variable_left, TemporaryVariable) and isinstance(
ir.variable_left.type, TypeInformation
):
return _convert_type_contract(ir, node.function.slither)
left = ir.variable_left
t = None
if isinstance(left, (Variable, SolidityVariable)):
t = ir.variable_left.type
elif isinstance(left, (Contract, Enum, Structure)):
t = UserDefinedType(left)
# can be None due to temporary operation
if t:
if isinstance(t, UserDefinedType):
# UserdefinedType
type_t = t.type
if isinstance(type_t, Enum):
ir.lvalue.set_type(t)
elif isinstance(type_t, Structure):
elems = type_t.elems
for elem in elems:
if elem == ir.variable_right:
ir.lvalue.set_type(elems[elem].type)
else:
assert isinstance(type_t, Contract)
# Allow type propagtion as a Function
# Only for reference variables
# This allows to track the selector keyword
# We dont need to check for function collision, as solc prevents the use of selector
# if there are multiple functions with the same name
f = next(
(
f
for f in type_t.functions
if f.name == ir.variable_right
),
None,
)
if f:
ir.lvalue.set_type(f)
else:
# Allow propgation for variable access through contract's nale
# like Base_contract.my_variable
v = next(
(
v
for v in type_t.state_variables
if v.name == ir.variable_right
),
None,
)
if v:
ir.lvalue.set_type(v.type)
elif isinstance(ir, NewArray):
ir.lvalue.set_type(ir.array_type)
elif isinstance(ir, NewContract):
contract = node.slither.get_contract_from_name(ir.contract_name)
ir.lvalue.set_type(UserDefinedType(contract))
elif isinstance(ir, NewElementaryType):
ir.lvalue.set_type(ir.type)
elif isinstance(ir, NewStructure):
ir.lvalue.set_type(UserDefinedType(ir.structure))
elif isinstance(ir, Push):
# No change required
pass
elif isinstance(ir, Send):
ir.lvalue.set_type(ElementaryType("bool"))
elif isinstance(ir, SolidityCall):
if ir.function.name in ["type(address)", "type()"]:
ir.function.return_type = [TypeInformation(ir.arguments[0])]
return_type = ir.function.return_type
if len(return_type) == 1:
ir.lvalue.set_type(return_type[0])
elif len(return_type) > 1:
ir.lvalue.set_type(return_type)
elif isinstance(ir, TypeConversion):
ir.lvalue.set_type(ir.type)
elif isinstance(ir, Unary):
ir.lvalue.set_type(ir.rvalue.type)
elif isinstance(ir, Unpack):
types = ir.tuple.type.type
idx = ir.index
t = types[idx]
ir.lvalue.set_type(t)
elif isinstance(
ir,
(
Argument,
TmpCall,
TmpNewArray,
TmpNewContract,
TmpNewStructure,
TmpNewElementaryType,
),
):
# temporary operation; they will be removed
pass
else:
raise SlithIRError(
"Not handling {} during type propgation".format(type(ir))
)
|
https://github.com/crytic/slither/issues/592
|
Traceback (most recent call last):
File "c:\users\x\documents\github\slither\slither\core\cfg\node.py", line 887, in _find_read_write_call
self._high_level_calls.append((ir.destination.type.type, ir.function))
AttributeError: 'NoneType' object has no attribute 'type'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "c:\users\x\documents\github\slither\slither\__main__.py", line 610, in main_impl
(slither_instances, results_detectors, results_printers, number_contracts) = process_all(filename, args,
File "c:\users\x\documents\github\slither\slither\__main__.py", line 67, in process_all
(slither, current_results_detectors, current_results_printers, current_analyzed_count) = process_single(
File "c:\users\x\documents\github\slither\slither\__main__.py", line 53, in process_single
slither = Slither(target,
File "c:\users\x\documents\github\slither\slither\slither.py", line 86, in __init__
self._parser.analyze_contracts()
File "c:\users\x\documents\github\slither\slither\solc_parsing\slitherSolc.py", line 345, in analyze_contracts
self._convert_to_slithir()
File "c:\users\x\documents\github\slither\slither\solc_parsing\slitherSolc.py", line 489, in _convert_to_slithir
func.generate_slithir_and_analyze()
File "c:\users\x\documents\github\slither\slither\core\declarations\function.py", line 1652, in generate_slithir_and_analyze
node.slithir_generation()
File "c:\users\x\documents\github\slither\slither\core\cfg\node.py", line 704, in slithir_generation
self._find_read_write_call()
File "c:\users\x\documents\github\slither\slither\core\cfg\node.py", line 889, in _find_read_write_call
raise SlitherException(
slither.exceptions.SlitherException: Function not found on TMP_0(None) = HIGH_LEVEL_CALL, dest:REF_0(None), function:g, arguments:[] . Please try compiling with a recent Solidity version.
ERROR:root:Error:
ERROR:root:Function not found on TMP_0(None) = HIGH_LEVEL_CALL, dest:REF_0(None), function:g, arguments:[] . Please try compiling with a recent Solidity version.
ERROR:root:Please report an issue to https://github.com/crytic/slither/issues
|
AttributeError
|
def __init__(self, values):
# Note: Can return None
# ex: return call()
# where call() dont return
if not isinstance(values, list):
assert (
is_valid_rvalue(values)
or isinstance(values, (TupleVariable, Function))
or values is None
)
if values is None:
values = []
else:
values = [values]
else:
# Remove None
# Prior Solidity 0.5
# return (0,)
# was valid for returns(uint)
values = [v for v in values if not v is None]
self._valid_value(values)
super(Return, self).__init__()
self._values = values
|
def __init__(self, values):
# Note: Can return None
# ex: return call()
# where call() dont return
if not isinstance(values, list):
assert (
is_valid_rvalue(values)
or isinstance(values, TupleVariable)
or values is None
)
if values is None:
values = []
else:
values = [values]
else:
# Remove None
# Prior Solidity 0.5
# return (0,)
# was valid for returns(uint)
values = [v for v in values if not v is None]
self._valid_value(values)
super(Return, self).__init__()
self._values = values
|
https://github.com/crytic/slither/issues/590
|
ERROR:root:Error in .\function_returning_function.sol
ERROR:root:Traceback (most recent call last):
File "c:\users\x\documents\github\slither\slither\__main__.py", line 610, in main_impl
(slither_instances, results_detectors, results_printers, number_contracts) = process_all(filename, args,
File "c:\users\x\documents\github\slither\slither\__main__.py", line 67, in process_all
(slither, current_results_detectors, current_results_printers, current_analyzed_count) = process_single(
File "c:\users\x\documents\github\slither\slither\__main__.py", line 53, in process_single
slither = Slither(target,
File "c:\users\x\documents\github\slither\slither\slither.py", line 86, in __init__
self._parser.analyze_contracts()
File "c:\users\x\documents\github\slither\slither\solc_parsing\slitherSolc.py", line 345, in analyze_contracts
self._convert_to_slithir()
File "c:\users\x\documents\github\slither\slither\solc_parsing\slitherSolc.py", line 489, in _convert_to_slithir
func.generate_slithir_and_analyze()
File "c:\users\x\documents\github\slither\slither\core\declarations\function.py", line 1652, in generate_slithir_and_analyze
node.slithir_generation()
File "c:\users\x\documents\github\slither\slither\core\cfg\node.py", line 702, in slithir_generation
self._irs = convert_expression(expression, self)
File "c:\users\x\documents\github\slither\slither\slithir\convert.py", line 64, in convert_expression
visitor = ExpressionToSlithIR(expression, node)
File "c:\users\x\documents\github\slither\slither\visitors\slithir\expression_to_slithir.py", line 105, in __init__
r = Return(get(self.expression))
File "c:\users\x\documents\github\slither\slither\slithir\operations\return_operation.py", line 18, in __init__
assert is_valid_rvalue(values) or isinstance(values, TupleVariable) or values is None
AssertionError
|
AssertionError
|
def _valid_value(self, value):
if isinstance(value, list):
assert all(self._valid_value(v) for v in value)
else:
assert is_valid_rvalue(value) or isinstance(value, (TupleVariable, Function))
return True
|
def _valid_value(self, value):
if isinstance(value, list):
assert all(self._valid_value(v) for v in value)
else:
assert is_valid_rvalue(value) or isinstance(value, TupleVariable)
return True
|
https://github.com/crytic/slither/issues/590
|
ERROR:root:Error in .\function_returning_function.sol
ERROR:root:Traceback (most recent call last):
File "c:\users\x\documents\github\slither\slither\__main__.py", line 610, in main_impl
(slither_instances, results_detectors, results_printers, number_contracts) = process_all(filename, args,
File "c:\users\x\documents\github\slither\slither\__main__.py", line 67, in process_all
(slither, current_results_detectors, current_results_printers, current_analyzed_count) = process_single(
File "c:\users\x\documents\github\slither\slither\__main__.py", line 53, in process_single
slither = Slither(target,
File "c:\users\x\documents\github\slither\slither\slither.py", line 86, in __init__
self._parser.analyze_contracts()
File "c:\users\x\documents\github\slither\slither\solc_parsing\slitherSolc.py", line 345, in analyze_contracts
self._convert_to_slithir()
File "c:\users\x\documents\github\slither\slither\solc_parsing\slitherSolc.py", line 489, in _convert_to_slithir
func.generate_slithir_and_analyze()
File "c:\users\x\documents\github\slither\slither\core\declarations\function.py", line 1652, in generate_slithir_and_analyze
node.slithir_generation()
File "c:\users\x\documents\github\slither\slither\core\cfg\node.py", line 702, in slithir_generation
self._irs = convert_expression(expression, self)
File "c:\users\x\documents\github\slither\slither\slithir\convert.py", line 64, in convert_expression
visitor = ExpressionToSlithIR(expression, node)
File "c:\users\x\documents\github\slither\slither\visitors\slithir\expression_to_slithir.py", line 105, in __init__
r = Return(get(self.expression))
File "c:\users\x\documents\github\slither\slither\slithir\operations\return_operation.py", line 18, in __init__
assert is_valid_rvalue(values) or isinstance(values, TupleVariable) or values is None
AssertionError
|
AssertionError
|
def parse_yul_identifier(
root: YulScope, node: YulNode, ast: Dict
) -> Optional[Expression]:
name = ast["name"]
if name in builtins:
return Identifier(YulBuiltin(name))
# check function-scoped variables
if root.parent_func:
variable = root.parent_func.get_local_variable_from_name(name)
if variable:
return Identifier(variable)
variable = root.parent_func.contract.get_state_variable_from_name(name)
if variable:
return Identifier(variable)
# check yul-scoped variable
variable = root.get_yul_local_variable_from_name(name)
if variable:
return Identifier(variable.underlying)
# check yul-scoped function
func = root.get_yul_local_function_from_name(name)
if func:
return Identifier(func.underlying)
# check for magic suffixes
if name.endswith("_slot"):
potential_name = name[:-5]
var = root.function.contract.get_state_variable_from_name(potential_name)
if var:
return Identifier(SolidityVariable(name))
if name.endswith("_offset"):
potential_name = name[:-7]
var = root.function.contract.get_state_variable_from_name(potential_name)
if var:
return Identifier(SolidityVariable(name))
raise SlitherException(f"unresolved reference to identifier {name}")
|
def parse_yul_identifier(
root: YulScope, node: YulNode, ast: Dict
) -> Optional[Expression]:
name = ast["name"]
if name in builtins:
return Identifier(YulBuiltin(name))
# check function-scoped variables
if root.parent_func:
variable = root.parent_func.get_local_variable_from_name(name)
if variable:
return Identifier(variable)
# check yul-scoped variable
variable = root.get_yul_local_variable_from_name(name)
if variable:
return Identifier(variable.underlying)
# check yul-scoped function
func = root.get_yul_local_function_from_name(name)
if func:
return Identifier(func.underlying)
# check for magic suffixes
if name.endswith("_slot"):
potential_name = name[:-5]
var = root.function.contract.get_state_variable_from_name(potential_name)
if var:
return Identifier(SolidityVariable(name))
if name.endswith("_offset"):
potential_name = name[:-7]
var = root.function.contract.get_state_variable_from_name(potential_name)
if var:
return Identifier(SolidityVariable(name))
raise SlitherException(f"unresolved reference to identifier {name}")
|
https://github.com/crytic/slither/issues/574
|
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/slither_analyzer-0.6.12-py3.6.egg/slither/__main__.py", line 612, in main_impl
printer_classes)
File "/usr/local/lib/python3.6/dist-packages/slither_analyzer-0.6.12-py3.6.egg/slither/__main__.py", line 68, in process_all
compilation, args, detector_classes, printer_classes)
File "/usr/local/lib/python3.6/dist-packages/slither_analyzer-0.6.12-py3.6.egg/slither/__main__.py", line 55, in process_single
**vars(args))
File "/usr/local/lib/python3.6/dist-packages/slither_analyzer-0.6.12-py3.6.egg/slither/slither.py", line 86, in __init__
self._parser.analyze_contracts()
File "/usr/local/lib/python3.6/dist-packages/slither_analyzer-0.6.12-py3.6.egg/slither/solc_parsing/slitherSolc.py", line 341, in analyze_contracts
self._analyze_third_part(contracts_to_be_analyzed, libraries)
File "/usr/local/lib/python3.6/dist-packages/slither_analyzer-0.6.12-py3.6.egg/slither/solc_parsing/slitherSolc.py", line 438, in _analyze_third_part
self._analyze_variables_modifiers_functions(contract)
File "/usr/local/lib/python3.6/dist-packages/slither_analyzer-0.6.12-py3.6.egg/slither/solc_parsing/slitherSolc.py", line 478, in _analyze_variables_modifiers_functions
contract.analyze_content_functions()
File "/usr/local/lib/python3.6/dist-packages/slither_analyzer-0.6.12-py3.6.egg/slither/solc_parsing/declarations/contract.py", line 356, in analyze_content_functions
function_parser.analyze_content()
File "/usr/local/lib/python3.6/dist-packages/slither_analyzer-0.6.12-py3.6.egg/slither/solc_parsing/declarations/function.py", line 303, in analyze_content
node_parser.analyze_expressions()
File "/usr/local/lib/python3.6/dist-packages/slither_analyzer-0.6.12-py3.6.egg/slither/solc_parsing/yul/parse_yul.py", line 254, in analyze_expressions
node.analyze_expressions()
File "/usr/local/lib/python3.6/dist-packages/slither_analyzer-0.6.12-py3.6.egg/slither/solc_parsing/yul/parse_yul.py", line 42, in analyze_expressions
expression = parse_yul(self._scope, self, self._unparsed_expression)
File "/usr/local/lib/python3.6/dist-packages/slither_analyzer-0.6.12-py3.6.egg/slither/solc_parsing/yul/parse_yul.py", line 663, in parse_yul
op = parsers.get(ast['nodeType'], parse_yul_unsupported)(root, node, ast)
File "/usr/local/lib/python3.6/dist-packages/slither_analyzer-0.6.12-py3.6.egg/slither/solc_parsing/yul/parse_yul.py", line 570, in parse_yul_assignment
return _parse_yul_assignment_common(root, node, ast, 'variableNames')
File "/usr/local/lib/python3.6/dist-packages/slither_analyzer-0.6.12-py3.6.egg/slither/solc_parsing/yul/parse_yul.py", line 552, in _parse_yul_assignment_common
rhs = parse_yul(root, node, ast['value'])
File "/usr/local/lib/python3.6/dist-packages/slither_analyzer-0.6.12-py3.6.egg/slither/solc_parsing/yul/parse_yul.py", line 663, in parse_yul
op = parsers.get(ast['nodeType'], parse_yul_unsupported)(root, node, ast)
File "/usr/local/lib/python3.6/dist-packages/slither_analyzer-0.6.12-py3.6.egg/slither/solc_parsing/yul/parse_yul.py", line 574, in parse_yul_function_call
args = [parse_yul(root, node, arg) for arg in ast['arguments']]
File "/usr/local/lib/python3.6/dist-packages/slither_analyzer-0.6.12-py3.6.egg/slither/solc_parsing/yul/parse_yul.py", line 574, in <listcomp>
args = [parse_yul(root, node, arg) for arg in ast['arguments']]
File "/usr/local/lib/python3.6/dist-packages/slither_analyzer-0.6.12-py3.6.egg/slither/solc_parsing/yul/parse_yul.py", line 663, in parse_yul
op = parsers.get(ast['nodeType'], parse_yul_unsupported)(root, node, ast)
File "/usr/local/lib/python3.6/dist-packages/slither_analyzer-0.6.12-py3.6.egg/slither/solc_parsing/yul/parse_yul.py", line 637, in parse_yul_identifier
raise SlitherException(f"unresolved reference to identifier {name}")
slither.exceptions.SlitherException: unresolved reference to identifier PUBLIC_KEY_OFFSET
ERROR:root:Error:
ERROR:root:unresolved reference to identifier PUBLIC_KEY_OFFSET
ERROR:root:Please report an issue to https://github.com/crytic/slither/issues
|
slither.exceptions.SlitherException
|
def __init__(self, called, arguments, type_call):
assert isinstance(called, Expression)
super(CallExpression, self).__init__()
self._called = called
self._arguments = arguments
self._type_call = type_call
# gas and value are only available if the syntax is {gas: , value: }
# For the .gas().value(), the member are considered as function call
# And converted later to the correct info (convert.py)
self._gas = None
self._value = None
self._salt = None
|
def __init__(self, called, arguments, type_call):
assert isinstance(called, Expression)
super(CallExpression, self).__init__()
self._called = called
self._arguments = arguments
self._type_call = type_call
# gas and value are only available if the syntax is {gas: , value: }
# For the .gas().value(), the member are considered as function call
# And converted later to the correct info (convert.py)
self._gas = None
self._value = None
|
https://github.com/crytic/slither/issues/485
|
ERROR:root:Error in .
ERROR:root:Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/slither/__main__.py", line 606, in main_impl
printer_classes)
File "/usr/local/lib/python3.7/site-packages/slither/__main__.py", line 68, in process_all
compilation, args, detector_classes, printer_classes)
File "/usr/local/lib/python3.7/site-packages/slither/__main__.py", line 55, in process_single
**vars(args))
File "/usr/local/lib/python3.7/site-packages/slither/slither.py", line 86, in __init__
self._analyze_contracts()
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/slitherSolc.py", line 254, in _analyze_contracts
self._analyze_third_part(contracts_to_be_analyzed, libraries)
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/slitherSolc.py", line 332, in _analyze_third_part
self._analyze_variables_modifiers_functions(contract)
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/slitherSolc.py", line 372, in _analyze_variables_modifiers_functions
contract.analyze_content_functions()
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/declarations/contract.py", line 291, in analyze_content_functions
function.analyze_content()
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/declarations/function.py", line 240, in analyze_content
node.analyze_expressions(self)
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/cfg/node.py", line 31, in analyze_expressions
expression = parse_expression(self._unparsed_expression, caller_context)
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/expressions/expression_parsing.py", line 417, in parse_expression
return parse_call(expression, caller_context)
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/expressions/expression_parsing.py", line 271, in parse_call
called = parse_expression(expression['expression'], caller_context)
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/expressions/expression_parsing.py", line 422, in parse_expression
assert isinstance(called, MemberAccess)
AssertionError
|
AssertionError
|
def __str__(self):
txt = str(self._called)
if self.call_gas or self.call_value:
gas = f"gas: {self.call_gas}" if self.call_gas else ""
value = f"value: {self.call_value}" if self.call_value else ""
salt = f"salt: {self.call_salt}" if self.call_salt else ""
if gas or value or salt:
options = [gas, value, salt]
txt += "{" + ",".join([o for o in options if o != ""]) + "}"
return txt + "(" + ",".join([str(a) for a in self._arguments]) + ")"
|
def __str__(self):
txt = str(self._called)
if self.call_gas or self.call_value:
gas = f"gas: {self.call_gas}" if self.call_gas else ""
value = f"value: {self.call_value}" if self.call_value else ""
if gas and value:
txt += "{" + f"{gas}, {value}" + "}"
elif gas:
txt += "{" + f"{gas}" + "}"
else:
txt += "{" + f"{value}" + "}"
return txt + "(" + ",".join([str(a) for a in self._arguments]) + ")"
|
https://github.com/crytic/slither/issues/485
|
ERROR:root:Error in .
ERROR:root:Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/slither/__main__.py", line 606, in main_impl
printer_classes)
File "/usr/local/lib/python3.7/site-packages/slither/__main__.py", line 68, in process_all
compilation, args, detector_classes, printer_classes)
File "/usr/local/lib/python3.7/site-packages/slither/__main__.py", line 55, in process_single
**vars(args))
File "/usr/local/lib/python3.7/site-packages/slither/slither.py", line 86, in __init__
self._analyze_contracts()
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/slitherSolc.py", line 254, in _analyze_contracts
self._analyze_third_part(contracts_to_be_analyzed, libraries)
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/slitherSolc.py", line 332, in _analyze_third_part
self._analyze_variables_modifiers_functions(contract)
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/slitherSolc.py", line 372, in _analyze_variables_modifiers_functions
contract.analyze_content_functions()
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/declarations/contract.py", line 291, in analyze_content_functions
function.analyze_content()
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/declarations/function.py", line 240, in analyze_content
node.analyze_expressions(self)
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/cfg/node.py", line 31, in analyze_expressions
expression = parse_expression(self._unparsed_expression, caller_context)
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/expressions/expression_parsing.py", line 417, in parse_expression
return parse_call(expression, caller_context)
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/expressions/expression_parsing.py", line 271, in parse_call
called = parse_expression(expression['expression'], caller_context)
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/expressions/expression_parsing.py", line 422, in parse_expression
assert isinstance(called, MemberAccess)
AssertionError
|
AssertionError
|
def __init__(self, contract_name):
super(NewContract, self).__init__()
self._contract_name = contract_name
self._gas = None
self._value = None
self._salt = None
|
def __init__(self, contract_name):
super(NewContract, self).__init__()
self._contract_name = contract_name
|
https://github.com/crytic/slither/issues/485
|
ERROR:root:Error in .
ERROR:root:Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/slither/__main__.py", line 606, in main_impl
printer_classes)
File "/usr/local/lib/python3.7/site-packages/slither/__main__.py", line 68, in process_all
compilation, args, detector_classes, printer_classes)
File "/usr/local/lib/python3.7/site-packages/slither/__main__.py", line 55, in process_single
**vars(args))
File "/usr/local/lib/python3.7/site-packages/slither/slither.py", line 86, in __init__
self._analyze_contracts()
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/slitherSolc.py", line 254, in _analyze_contracts
self._analyze_third_part(contracts_to_be_analyzed, libraries)
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/slitherSolc.py", line 332, in _analyze_third_part
self._analyze_variables_modifiers_functions(contract)
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/slitherSolc.py", line 372, in _analyze_variables_modifiers_functions
contract.analyze_content_functions()
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/declarations/contract.py", line 291, in analyze_content_functions
function.analyze_content()
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/declarations/function.py", line 240, in analyze_content
node.analyze_expressions(self)
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/cfg/node.py", line 31, in analyze_expressions
expression = parse_expression(self._unparsed_expression, caller_context)
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/expressions/expression_parsing.py", line 417, in parse_expression
return parse_call(expression, caller_context)
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/expressions/expression_parsing.py", line 271, in parse_call
called = parse_expression(expression['expression'], caller_context)
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/expressions/expression_parsing.py", line 422, in parse_expression
assert isinstance(called, MemberAccess)
AssertionError
|
AssertionError
|
def extract_tmp_call(ins, contract):
assert isinstance(ins, TmpCall)
if isinstance(ins.called, Variable) and isinstance(ins.called.type, FunctionType):
call = InternalDynamicCall(ins.lvalue, ins.called, ins.called.type)
call.set_expression(ins.expression)
call.call_id = ins.call_id
return call
if isinstance(ins.ori, Member):
# If there is a call on an inherited contract, it is an internal call or an event
if ins.ori.variable_left in contract.inheritance + [contract]:
if str(ins.ori.variable_right) in [f.name for f in contract.functions]:
internalcall = InternalCall(
(ins.ori.variable_right, ins.ori.variable_left.name),
ins.nbr_arguments,
ins.lvalue,
ins.type_call,
)
internalcall.set_expression(ins.expression)
internalcall.call_id = ins.call_id
return internalcall
if str(ins.ori.variable_right) in [f.name for f in contract.events]:
eventcall = EventCall(ins.ori.variable_right)
eventcall.set_expression(ins.expression)
eventcall.call_id = ins.call_id
return eventcall
if isinstance(ins.ori.variable_left, Contract):
st = ins.ori.variable_left.get_structure_from_name(ins.ori.variable_right)
if st:
op = NewStructure(st, ins.lvalue)
op.set_expression(ins.expression)
op.call_id = ins.call_id
return op
libcall = LibraryCall(
ins.ori.variable_left,
ins.ori.variable_right,
ins.nbr_arguments,
ins.lvalue,
ins.type_call,
)
libcall.set_expression(ins.expression)
libcall.call_id = ins.call_id
return libcall
msgcall = HighLevelCall(
ins.ori.variable_left,
ins.ori.variable_right,
ins.nbr_arguments,
ins.lvalue,
ins.type_call,
)
msgcall.call_id = ins.call_id
if ins.call_gas:
msgcall.call_gas = ins.call_gas
if ins.call_value:
msgcall.call_value = ins.call_value
msgcall.set_expression(ins.expression)
return msgcall
if isinstance(ins.ori, TmpCall):
r = extract_tmp_call(ins.ori, contract)
r.set_node(ins.node)
return r
if isinstance(ins.called, SolidityVariableComposed):
if str(ins.called) == "block.blockhash":
ins.called = SolidityFunction("blockhash(uint256)")
elif str(ins.called) == "this.balance":
s = SolidityCall(
SolidityFunction("this.balance()"),
ins.nbr_arguments,
ins.lvalue,
ins.type_call,
)
s.set_expression(ins.expression)
return s
if isinstance(ins.called, SolidityFunction):
s = SolidityCall(ins.called, ins.nbr_arguments, ins.lvalue, ins.type_call)
s.set_expression(ins.expression)
return s
if isinstance(ins.ori, TmpNewElementaryType):
n = NewElementaryType(ins.ori.type, ins.lvalue)
n.set_expression(ins.expression)
return n
if isinstance(ins.ori, TmpNewContract):
op = NewContract(Constant(ins.ori.contract_name), ins.lvalue)
op.set_expression(ins.expression)
op.call_id = ins.call_id
if ins.call_value:
op.call_value = ins.call_value
if ins.call_salt:
op.call_salt = ins.call_salt
return op
if isinstance(ins.ori, TmpNewArray):
n = NewArray(ins.ori.depth, ins.ori.array_type, ins.lvalue)
n.set_expression(ins.expression)
return n
if isinstance(ins.called, Structure):
op = NewStructure(ins.called, ins.lvalue)
op.set_expression(ins.expression)
op.call_id = ins.call_id
op.set_expression(ins.expression)
return op
if isinstance(ins.called, Event):
e = EventCall(ins.called.name)
e.set_expression(ins.expression)
return e
if isinstance(ins.called, Contract):
# Called a base constructor, where there is no constructor
if ins.called.constructor is None:
return Nop()
# Case where:
# contract A{ constructor(uint) }
# contract B is A {}
# contract C is B{ constructor() A(10) B() {}
# C calls B(), which does not exist
# Ideally we should compare here for the parameters types too
if len(ins.called.constructor.parameters) != ins.nbr_arguments:
return Nop()
internalcall = InternalCall(
ins.called.constructor, ins.nbr_arguments, ins.lvalue, ins.type_call
)
internalcall.call_id = ins.call_id
internalcall.set_expression(ins.expression)
return internalcall
raise Exception("Not extracted {} {}".format(type(ins.called), ins))
|
def extract_tmp_call(ins, contract):
assert isinstance(ins, TmpCall)
if isinstance(ins.called, Variable) and isinstance(ins.called.type, FunctionType):
call = InternalDynamicCall(ins.lvalue, ins.called, ins.called.type)
call.set_expression(ins.expression)
call.call_id = ins.call_id
return call
if isinstance(ins.ori, Member):
# If there is a call on an inherited contract, it is an internal call or an event
if ins.ori.variable_left in contract.inheritance + [contract]:
if str(ins.ori.variable_right) in [f.name for f in contract.functions]:
internalcall = InternalCall(
(ins.ori.variable_right, ins.ori.variable_left.name),
ins.nbr_arguments,
ins.lvalue,
ins.type_call,
)
internalcall.set_expression(ins.expression)
internalcall.call_id = ins.call_id
return internalcall
if str(ins.ori.variable_right) in [f.name for f in contract.events]:
eventcall = EventCall(ins.ori.variable_right)
eventcall.set_expression(ins.expression)
eventcall.call_id = ins.call_id
return eventcall
if isinstance(ins.ori.variable_left, Contract):
st = ins.ori.variable_left.get_structure_from_name(ins.ori.variable_right)
if st:
op = NewStructure(st, ins.lvalue)
op.set_expression(ins.expression)
op.call_id = ins.call_id
return op
libcall = LibraryCall(
ins.ori.variable_left,
ins.ori.variable_right,
ins.nbr_arguments,
ins.lvalue,
ins.type_call,
)
libcall.set_expression(ins.expression)
libcall.call_id = ins.call_id
return libcall
msgcall = HighLevelCall(
ins.ori.variable_left,
ins.ori.variable_right,
ins.nbr_arguments,
ins.lvalue,
ins.type_call,
)
msgcall.call_id = ins.call_id
if ins.call_gas:
msgcall.call_gas = ins.call_gas
if ins.call_value:
msgcall.call_value = ins.call_value
msgcall.set_expression(ins.expression)
return msgcall
if isinstance(ins.ori, TmpCall):
r = extract_tmp_call(ins.ori, contract)
r.set_node(ins.node)
return r
if isinstance(ins.called, SolidityVariableComposed):
if str(ins.called) == "block.blockhash":
ins.called = SolidityFunction("blockhash(uint256)")
elif str(ins.called) == "this.balance":
s = SolidityCall(
SolidityFunction("this.balance()"),
ins.nbr_arguments,
ins.lvalue,
ins.type_call,
)
s.set_expression(ins.expression)
return s
if isinstance(ins.called, SolidityFunction):
s = SolidityCall(ins.called, ins.nbr_arguments, ins.lvalue, ins.type_call)
s.set_expression(ins.expression)
return s
if isinstance(ins.ori, TmpNewElementaryType):
n = NewElementaryType(ins.ori.type, ins.lvalue)
n.set_expression(ins.expression)
return n
if isinstance(ins.ori, TmpNewContract):
op = NewContract(Constant(ins.ori.contract_name), ins.lvalue)
op.set_expression(ins.expression)
op.call_id = ins.call_id
return op
if isinstance(ins.ori, TmpNewArray):
n = NewArray(ins.ori.depth, ins.ori.array_type, ins.lvalue)
n.set_expression(ins.expression)
return n
if isinstance(ins.called, Structure):
op = NewStructure(ins.called, ins.lvalue)
op.set_expression(ins.expression)
op.call_id = ins.call_id
op.set_expression(ins.expression)
return op
if isinstance(ins.called, Event):
e = EventCall(ins.called.name)
e.set_expression(ins.expression)
return e
if isinstance(ins.called, Contract):
# Called a base constructor, where there is no constructor
if ins.called.constructor is None:
return Nop()
# Case where:
# contract A{ constructor(uint) }
# contract B is A {}
# contract C is B{ constructor() A(10) B() {}
# C calls B(), which does not exist
# Ideally we should compare here for the parameters types too
if len(ins.called.constructor.parameters) != ins.nbr_arguments:
return Nop()
internalcall = InternalCall(
ins.called.constructor, ins.nbr_arguments, ins.lvalue, ins.type_call
)
internalcall.call_id = ins.call_id
internalcall.set_expression(ins.expression)
return internalcall
raise Exception("Not extracted {} {}".format(type(ins.called), ins))
|
https://github.com/crytic/slither/issues/485
|
ERROR:root:Error in .
ERROR:root:Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/slither/__main__.py", line 606, in main_impl
printer_classes)
File "/usr/local/lib/python3.7/site-packages/slither/__main__.py", line 68, in process_all
compilation, args, detector_classes, printer_classes)
File "/usr/local/lib/python3.7/site-packages/slither/__main__.py", line 55, in process_single
**vars(args))
File "/usr/local/lib/python3.7/site-packages/slither/slither.py", line 86, in __init__
self._analyze_contracts()
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/slitherSolc.py", line 254, in _analyze_contracts
self._analyze_third_part(contracts_to_be_analyzed, libraries)
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/slitherSolc.py", line 332, in _analyze_third_part
self._analyze_variables_modifiers_functions(contract)
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/slitherSolc.py", line 372, in _analyze_variables_modifiers_functions
contract.analyze_content_functions()
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/declarations/contract.py", line 291, in analyze_content_functions
function.analyze_content()
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/declarations/function.py", line 240, in analyze_content
node.analyze_expressions(self)
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/cfg/node.py", line 31, in analyze_expressions
expression = parse_expression(self._unparsed_expression, caller_context)
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/expressions/expression_parsing.py", line 417, in parse_expression
return parse_call(expression, caller_context)
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/expressions/expression_parsing.py", line 271, in parse_call
called = parse_expression(expression['expression'], caller_context)
File "/usr/local/lib/python3.7/site-packages/slither/solc_parsing/expressions/expression_parsing.py", line 422, in parse_expression
assert isinstance(called, MemberAccess)
AssertionError
|
AssertionError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.