content stringlengths 1 103k ⌀ | path stringlengths 8 216 | filename stringlengths 2 179 | language stringclasses 15
values | size_bytes int64 2 189k | quality_score float64 0.5 0.95 | complexity float64 0 1 | documentation_ratio float64 0 1 | repository stringclasses 5
values | stars int64 0 1k | created_date stringdate 2023-07-10 19:21:08 2025-07-09 19:11:45 | license stringclasses 4
values | is_test bool 2
classes | file_hash stringlengths 32 32 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
import pytest\n\nimport numpy as np\nfrom numpy import histogram, histogram_bin_edges, histogramdd\nfrom numpy.testing import (\n assert_,\n assert_allclose,\n assert_almost_equal,\n assert_array_almost_equal,\n assert_array_equal,\n assert_array_max_ulp,\n assert_equal,\n assert_raises,\n assert_raises_regex,\n suppress_warnings,\n)\n\n\nclass TestHistogram:\n\n def setup_method(self):\n pass\n\n def teardown_method(self):\n pass\n\n def test_simple(self):\n n = 100\n v = np.random.rand(n)\n (a, b) = histogram(v)\n # check if the sum of the bins equals the number of samples\n assert_equal(np.sum(a, axis=0), n)\n # check that the bin counts are evenly spaced when the data is from\n # a linear function\n (a, b) = histogram(np.linspace(0, 10, 100))\n assert_array_equal(a, 10)\n\n def test_one_bin(self):\n # Ticket 632\n hist, edges = histogram([1, 2, 3, 4], [1, 2])\n assert_array_equal(hist, [2, ])\n assert_array_equal(edges, [1, 2])\n assert_raises(ValueError, histogram, [1, 2], bins=0)\n h, e = histogram([1, 2], bins=1)\n assert_equal(h, np.array([2]))\n assert_allclose(e, np.array([1., 2.]))\n\n def test_density(self):\n # Check that the integral of the density equals 1.\n n = 100\n v = np.random.rand(n)\n a, b = histogram(v, density=True)\n area = np.sum(a * np.diff(b))\n assert_almost_equal(area, 1)\n\n # Check with non-constant bin widths\n v = np.arange(10)\n bins = [0, 1, 3, 6, 10]\n a, b = histogram(v, bins, density=True)\n assert_array_equal(a, .1)\n assert_equal(np.sum(a * np.diff(b)), 1)\n\n # Test that passing False works too\n a, b = histogram(v, bins, density=False)\n assert_array_equal(a, [1, 2, 3, 4])\n\n # Variable bin widths are especially useful to deal with\n # infinities.\n v = np.arange(10)\n bins = [0, 1, 3, 6, np.inf]\n a, b = histogram(v, bins, density=True)\n assert_array_equal(a, [.1, .1, .1, 0.])\n\n # Taken from a bug report from N. Becker on the numpy-discussion\n # mailing list Aug. 6, 2010.\n counts, dmy = np.histogram(\n [1, 2, 3, 4], [0.5, 1.5, np.inf], density=True)\n assert_equal(counts, [.25, 0])\n\n def test_outliers(self):\n # Check that outliers are not tallied\n a = np.arange(10) + .5\n\n # Lower outliers\n h, b = histogram(a, range=[0, 9])\n assert_equal(h.sum(), 9)\n\n # Upper outliers\n h, b = histogram(a, range=[1, 10])\n assert_equal(h.sum(), 9)\n\n # Normalization\n h, b = histogram(a, range=[1, 9], density=True)\n assert_almost_equal((h * np.diff(b)).sum(), 1, decimal=15)\n\n # Weights\n w = np.arange(10) + .5\n h, b = histogram(a, range=[1, 9], weights=w, density=True)\n assert_equal((h * np.diff(b)).sum(), 1)\n\n h, b = histogram(a, bins=8, range=[1, 9], weights=w)\n assert_equal(h, w[1:-1])\n\n def test_arr_weights_mismatch(self):\n a = np.arange(10) + .5\n w = np.arange(11) + .5\n with assert_raises_regex(ValueError, "same shape as"):\n h, b = histogram(a, range=[1, 9], weights=w, density=True)\n\n def test_type(self):\n # Check the type of the returned histogram\n a = np.arange(10) + .5\n h, b = histogram(a)\n assert_(np.issubdtype(h.dtype, np.integer))\n\n h, b = histogram(a, density=True)\n assert_(np.issubdtype(h.dtype, np.floating))\n\n h, b = histogram(a, weights=np.ones(10, int))\n assert_(np.issubdtype(h.dtype, np.integer))\n\n h, b = histogram(a, weights=np.ones(10, float))\n assert_(np.issubdtype(h.dtype, np.floating))\n\n def test_f32_rounding(self):\n # gh-4799, check that the rounding of the edges works with float32\n x = np.array([276.318359, -69.593948, 21.329449], dtype=np.float32)\n y = np.array([5005.689453, 4481.327637, 6010.369629], dtype=np.float32)\n counts_hist, xedges, yedges = np.histogram2d(x, y, bins=100)\n assert_equal(counts_hist.sum(), 3.)\n\n def test_bool_conversion(self):\n # gh-12107\n # Reference integer histogram\n a = np.array([1, 1, 0], dtype=np.uint8)\n int_hist, int_edges = np.histogram(a)\n\n # Should raise an warning on booleans\n # Ensure that the histograms are equivalent, need to suppress\n # the warnings to get the actual outputs\n with suppress_warnings() as sup:\n rec = sup.record(RuntimeWarning, 'Converting input from .*')\n hist, edges = np.histogram([True, True, False])\n # A warning should be issued\n assert_equal(len(rec), 1)\n assert_array_equal(hist, int_hist)\n assert_array_equal(edges, int_edges)\n\n def test_weights(self):\n v = np.random.rand(100)\n w = np.ones(100) * 5\n a, b = histogram(v)\n na, nb = histogram(v, density=True)\n wa, wb = histogram(v, weights=w)\n nwa, nwb = histogram(v, weights=w, density=True)\n assert_array_almost_equal(a * 5, wa)\n assert_array_almost_equal(na, nwa)\n\n # Check weights are properly applied.\n v = np.linspace(0, 10, 10)\n w = np.concatenate((np.zeros(5), np.ones(5)))\n wa, wb = histogram(v, bins=np.arange(11), weights=w)\n assert_array_almost_equal(wa, w)\n\n # Check with integer weights\n wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1])\n assert_array_equal(wa, [4, 5, 0, 1])\n wa, wb = histogram(\n [1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], density=True)\n assert_array_almost_equal(wa, np.array([4, 5, 0, 1]) / 10. / 3. * 4)\n\n # Check weights with non-uniform bin widths\n a, b = histogram(\n np.arange(9), [0, 1, 3, 6, 10],\n weights=[2, 1, 1, 1, 1, 1, 1, 1, 1], density=True)\n assert_almost_equal(a, [.2, .1, .1, .075])\n\n def test_exotic_weights(self):\n\n # Test the use of weights that are not integer or floats, but e.g.\n # complex numbers or object types.\n\n # Complex weights\n values = np.array([1.3, 2.5, 2.3])\n weights = np.array([1, -1, 2]) + 1j * np.array([2, 1, 2])\n\n # Check with custom bins\n wa, wb = histogram(values, bins=[0, 2, 3], weights=weights)\n assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3]))\n\n # Check with even bins\n wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights)\n assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3]))\n\n # Decimal weights\n from decimal import Decimal\n values = np.array([1.3, 2.5, 2.3])\n weights = np.array([Decimal(1), Decimal(2), Decimal(3)])\n\n # Check with custom bins\n wa, wb = histogram(values, bins=[0, 2, 3], weights=weights)\n assert_array_almost_equal(wa, [Decimal(1), Decimal(5)])\n\n # Check with even bins\n wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights)\n assert_array_almost_equal(wa, [Decimal(1), Decimal(5)])\n\n def test_no_side_effects(self):\n # This is a regression test that ensures that values passed to\n # ``histogram`` are unchanged.\n values = np.array([1.3, 2.5, 2.3])\n np.histogram(values, range=[-10, 10], bins=100)\n assert_array_almost_equal(values, [1.3, 2.5, 2.3])\n\n def test_empty(self):\n a, b = histogram([], bins=([0, 1]))\n assert_array_equal(a, np.array([0]))\n assert_array_equal(b, np.array([0, 1]))\n\n def test_error_binnum_type(self):\n # Tests if right Error is raised if bins argument is float\n vals = np.linspace(0.0, 1.0, num=100)\n histogram(vals, 5)\n assert_raises(TypeError, histogram, vals, 2.4)\n\n def test_finite_range(self):\n # Normal ranges should be fine\n vals = np.linspace(0.0, 1.0, num=100)\n histogram(vals, range=[0.25, 0.75])\n assert_raises(ValueError, histogram, vals, range=[np.nan, 0.75])\n assert_raises(ValueError, histogram, vals, range=[0.25, np.inf])\n\n def test_invalid_range(self):\n # start of range must be < end of range\n vals = np.linspace(0.0, 1.0, num=100)\n with assert_raises_regex(ValueError, "max must be larger than"):\n np.histogram(vals, range=[0.1, 0.01])\n\n def test_bin_edge_cases(self):\n # Ensure that floating-point computations correctly place edge cases.\n arr = np.array([337, 404, 739, 806, 1007, 1811, 2012])\n hist, edges = np.histogram(arr, bins=8296, range=(2, 2280))\n mask = hist > 0\n left_edges = edges[:-1][mask]\n right_edges = edges[1:][mask]\n for x, left, right in zip(arr, left_edges, right_edges):\n assert_(x >= left)\n assert_(x < right)\n\n def test_last_bin_inclusive_range(self):\n arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.])\n hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5))\n assert_equal(hist[-1], 1)\n\n def test_bin_array_dims(self):\n # gracefully handle bins object > 1 dimension\n vals = np.linspace(0.0, 1.0, num=100)\n bins = np.array([[0, 0.5], [0.6, 1.0]])\n with assert_raises_regex(ValueError, "must be 1d"):\n np.histogram(vals, bins=bins)\n\n def test_unsigned_monotonicity_check(self):\n # Ensures ValueError is raised if bins not increasing monotonically\n # when bins contain unsigned values (see #9222)\n arr = np.array([2])\n bins = np.array([1, 3, 1], dtype='uint64')\n with assert_raises(ValueError):\n hist, edges = np.histogram(arr, bins=bins)\n\n def test_object_array_of_0d(self):\n # gh-7864\n assert_raises(ValueError,\n histogram, [np.array(0.4) for i in range(10)] + [-np.inf])\n assert_raises(ValueError,\n histogram, [np.array(0.4) for i in range(10)] + [np.inf])\n\n # these should not crash\n np.histogram([np.array(0.5) for i in range(10)] + [.500000000000002])\n np.histogram([np.array(0.5) for i in range(10)] + [.5])\n\n def test_some_nan_values(self):\n # gh-7503\n one_nan = np.array([0, 1, np.nan])\n all_nan = np.array([np.nan, np.nan])\n\n # the internal comparisons with NaN give warnings\n sup = suppress_warnings()\n sup.filter(RuntimeWarning)\n with sup:\n # can't infer range with nan\n assert_raises(ValueError, histogram, one_nan, bins='auto')\n assert_raises(ValueError, histogram, all_nan, bins='auto')\n\n # explicit range solves the problem\n h, b = histogram(one_nan, bins='auto', range=(0, 1))\n assert_equal(h.sum(), 2) # nan is not counted\n h, b = histogram(all_nan, bins='auto', range=(0, 1))\n assert_equal(h.sum(), 0) # nan is not counted\n\n # as does an explicit set of bins\n h, b = histogram(one_nan, bins=[0, 1])\n assert_equal(h.sum(), 2) # nan is not counted\n h, b = histogram(all_nan, bins=[0, 1])\n assert_equal(h.sum(), 0) # nan is not counted\n\n def test_datetime(self):\n begin = np.datetime64('2000-01-01', 'D')\n offsets = np.array([0, 0, 1, 1, 2, 3, 5, 10, 20])\n bins = np.array([0, 2, 7, 20])\n dates = begin + offsets\n date_bins = begin + bins\n\n td = np.dtype('timedelta64[D]')\n\n # Results should be the same for integer offsets or datetime values.\n # For now, only explicit bins are supported, since linspace does not\n # work on datetimes or timedeltas\n d_count, d_edge = histogram(dates, bins=date_bins)\n t_count, t_edge = histogram(offsets.astype(td), bins=bins.astype(td))\n i_count, i_edge = histogram(offsets, bins=bins)\n\n assert_equal(d_count, i_count)\n assert_equal(t_count, i_count)\n\n assert_equal((d_edge - begin).astype(int), i_edge)\n assert_equal(t_edge.astype(int), i_edge)\n\n assert_equal(d_edge.dtype, dates.dtype)\n assert_equal(t_edge.dtype, td)\n\n def do_signed_overflow_bounds(self, dtype):\n exponent = 8 * np.dtype(dtype).itemsize - 1\n arr = np.array([-2**exponent + 4, 2**exponent - 4], dtype=dtype)\n hist, e = histogram(arr, bins=2)\n assert_equal(e, [-2**exponent + 4, 0, 2**exponent - 4])\n assert_equal(hist, [1, 1])\n\n def test_signed_overflow_bounds(self):\n self.do_signed_overflow_bounds(np.byte)\n self.do_signed_overflow_bounds(np.short)\n self.do_signed_overflow_bounds(np.intc)\n self.do_signed_overflow_bounds(np.int_)\n self.do_signed_overflow_bounds(np.longlong)\n\n def do_precision_lower_bound(self, float_small, float_large):\n eps = np.finfo(float_large).eps\n\n arr = np.array([1.0], float_small)\n range = np.array([1.0 + eps, 2.0], float_large)\n\n # test is looking for behavior when the bounds change between dtypes\n if range.astype(float_small)[0] != 1:\n return\n\n # previously crashed\n count, x_loc = np.histogram(arr, bins=1, range=range)\n assert_equal(count, [0])\n assert_equal(x_loc.dtype, float_large)\n\n def do_precision_upper_bound(self, float_small, float_large):\n eps = np.finfo(float_large).eps\n\n arr = np.array([1.0], float_small)\n range = np.array([0.0, 1.0 - eps], float_large)\n\n # test is looking for behavior when the bounds change between dtypes\n if range.astype(float_small)[-1] != 1:\n return\n\n # previously crashed\n count, x_loc = np.histogram(arr, bins=1, range=range)\n assert_equal(count, [0])\n\n assert_equal(x_loc.dtype, float_large)\n\n def do_precision(self, float_small, float_large):\n self.do_precision_lower_bound(float_small, float_large)\n self.do_precision_upper_bound(float_small, float_large)\n\n def test_precision(self):\n # not looping results in a useful stack trace upon failure\n self.do_precision(np.half, np.single)\n self.do_precision(np.half, np.double)\n self.do_precision(np.half, np.longdouble)\n self.do_precision(np.single, np.double)\n self.do_precision(np.single, np.longdouble)\n self.do_precision(np.double, np.longdouble)\n\n def test_histogram_bin_edges(self):\n hist, e = histogram([1, 2, 3, 4], [1, 2])\n edges = histogram_bin_edges([1, 2, 3, 4], [1, 2])\n assert_array_equal(edges, e)\n\n arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.])\n hist, e = histogram(arr, bins=30, range=(-0.5, 5))\n edges = histogram_bin_edges(arr, bins=30, range=(-0.5, 5))\n assert_array_equal(edges, e)\n\n hist, e = histogram(arr, bins='auto', range=(0, 1))\n edges = histogram_bin_edges(arr, bins='auto', range=(0, 1))\n assert_array_equal(edges, e)\n\n def test_small_value_range(self):\n arr = np.array([1, 1 + 2e-16] * 10)\n with pytest.raises(ValueError, match="Too many bins for data range"):\n histogram(arr, bins=10)\n\n # @requires_memory(free_bytes=1e10)\n # @pytest.mark.slow\n @pytest.mark.skip(reason="Bad memory reports lead to OOM in ci testing")\n def test_big_arrays(self):\n sample = np.zeros([100000000, 3])\n xbins = 400\n ybins = 400\n zbins = np.arange(16000)\n hist = np.histogramdd(sample=sample, bins=(xbins, ybins, zbins))\n assert_equal(type(hist), type((1, 2)))\n\n def test_gh_23110(self):\n hist, e = np.histogram(np.array([-0.9e-308], dtype='>f8'),\n bins=2,\n range=(-1e-308, -2e-313))\n expected_hist = np.array([1, 0])\n assert_array_equal(hist, expected_hist)\n\n def test_gh_28400(self):\n e = 1 + 1e-12\n Z = [0, 1, 1, 1, 1, 1, e, e, e, e, e, e, 2]\n counts, edges = np.histogram(Z, bins="auto")\n assert len(counts) < 10\n assert edges[0] == Z[0]\n assert edges[-1] == Z[-1]\n\nclass TestHistogramOptimBinNums:\n """\n Provide test coverage when using provided estimators for optimal number of\n bins\n """\n\n def test_empty(self):\n estimator_list = ['fd', 'scott', 'rice', 'sturges',\n 'doane', 'sqrt', 'auto', 'stone']\n # check it can deal with empty data\n for estimator in estimator_list:\n a, b = histogram([], bins=estimator)\n assert_array_equal(a, np.array([0]))\n assert_array_equal(b, np.array([0, 1]))\n\n def test_simple(self):\n """\n Straightforward testing with a mixture of linspace data (for\n consistency). All test values have been precomputed and the values\n shouldn't change\n """\n # Some basic sanity checking, with some fixed data.\n # Checking for the correct number of bins\n basic_test = {50: {'fd': 4, 'scott': 4, 'rice': 8, 'sturges': 7,\n 'doane': 8, 'sqrt': 8, 'auto': 7, 'stone': 2},\n 500: {'fd': 8, 'scott': 8, 'rice': 16, 'sturges': 10,\n 'doane': 12, 'sqrt': 23, 'auto': 10, 'stone': 9},\n 5000: {'fd': 17, 'scott': 17, 'rice': 35, 'sturges': 14,\n 'doane': 17, 'sqrt': 71, 'auto': 17, 'stone': 20}}\n\n for testlen, expectedResults in basic_test.items():\n # Create some sort of non uniform data to test with\n # (2 peak uniform mixture)\n x1 = np.linspace(-10, -1, testlen // 5 * 2)\n x2 = np.linspace(1, 10, testlen // 5 * 3)\n x = np.concatenate((x1, x2))\n for estimator, numbins in expectedResults.items():\n a, b = np.histogram(x, estimator)\n assert_equal(len(a), numbins, err_msg=f"For the {estimator} estimator "\n f"with datasize of {testlen}")\n\n def test_small(self):\n """\n Smaller datasets have the potential to cause issues with the data\n adaptive methods, especially the FD method. All bin numbers have been\n precalculated.\n """\n small_dat = {1: {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1,\n 'doane': 1, 'sqrt': 1, 'stone': 1},\n 2: {'fd': 2, 'scott': 1, 'rice': 3, 'sturges': 2,\n 'doane': 1, 'sqrt': 2, 'stone': 1},\n 3: {'fd': 2, 'scott': 2, 'rice': 3, 'sturges': 3,\n 'doane': 3, 'sqrt': 2, 'stone': 1}}\n\n for testlen, expectedResults in small_dat.items():\n testdat = np.arange(testlen).astype(float)\n for estimator, expbins in expectedResults.items():\n a, b = np.histogram(testdat, estimator)\n assert_equal(len(a), expbins, err_msg=f"For the {estimator} estimator "\n f"with datasize of {testlen}")\n\n def test_incorrect_methods(self):\n """\n Check a Value Error is thrown when an unknown string is passed in\n """\n check_list = ['mad', 'freeman', 'histograms', 'IQR']\n for estimator in check_list:\n assert_raises(ValueError, histogram, [1, 2, 3], estimator)\n\n def test_novariance(self):\n """\n Check that methods handle no variance in data\n Primarily for Scott and FD as the SD and IQR are both 0 in this case\n """\n novar_dataset = np.ones(100)\n novar_resultdict = {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1,\n 'doane': 1, 'sqrt': 1, 'auto': 1, 'stone': 1}\n\n for estimator, numbins in novar_resultdict.items():\n a, b = np.histogram(novar_dataset, estimator)\n assert_equal(len(a), numbins,\n err_msg=f"{estimator} estimator, No Variance test")\n\n def test_limited_variance(self):\n """\n Check when IQR is 0, but variance exists, we return a reasonable value.\n """\n lim_var_data = np.ones(1000)\n lim_var_data[:3] = 0\n lim_var_data[-4:] = 100\n\n edges_auto = histogram_bin_edges(lim_var_data, 'auto')\n assert_equal(edges_auto[0], 0)\n assert_equal(edges_auto[-1], 100.)\n assert len(edges_auto) < 100\n\n edges_fd = histogram_bin_edges(lim_var_data, 'fd')\n assert_equal(edges_fd, np.array([0, 100]))\n\n edges_sturges = histogram_bin_edges(lim_var_data, 'sturges')\n assert_equal(edges_sturges, np.linspace(0, 100, 12))\n\n def test_outlier(self):\n """\n Check the FD, Scott and Doane with outliers.\n\n The FD estimates a smaller binwidth since it's less affected by\n outliers. Since the range is so (artificially) large, this means more\n bins, most of which will be empty, but the data of interest usually is\n unaffected. The Scott estimator is more affected and returns fewer bins,\n despite most of the variance being in one area of the data. The Doane\n estimator lies somewhere between the other two.\n """\n xcenter = np.linspace(-10, 10, 50)\n outlier_dataset = np.hstack((np.linspace(-110, -100, 5), xcenter))\n\n outlier_resultdict = {'fd': 21, 'scott': 5, 'doane': 11, 'stone': 6}\n\n for estimator, numbins in outlier_resultdict.items():\n a, b = np.histogram(outlier_dataset, estimator)\n assert_equal(len(a), numbins)\n\n def test_scott_vs_stone(self):\n """Verify that Scott's rule and Stone's rule converges for normally distributed data"""\n\n def nbins_ratio(seed, size):\n rng = np.random.RandomState(seed)\n x = rng.normal(loc=0, scale=2, size=size)\n a, b = len(np.histogram(x, 'stone')[0]), len(np.histogram(x, 'scott')[0])\n return a / (a + b)\n\n ll = [[nbins_ratio(seed, size) for size in np.geomspace(start=10, stop=100, num=4).round().astype(int)]\n for seed in range(10)]\n\n # the average difference between the two methods decreases as the dataset size increases.\n avg = abs(np.mean(ll, axis=0) - 0.5)\n assert_almost_equal(avg, [0.15, 0.09, 0.08, 0.03], decimal=2)\n\n def test_simple_range(self):\n """\n Straightforward testing with a mixture of linspace data (for\n consistency). Adding in a 3rd mixture that will then be\n completely ignored. All test values have been precomputed and\n the shouldn't change.\n """\n # some basic sanity checking, with some fixed data.\n # Checking for the correct number of bins\n basic_test = {\n 50: {'fd': 8, 'scott': 8, 'rice': 15,\n 'sturges': 14, 'auto': 14, 'stone': 8},\n 500: {'fd': 15, 'scott': 16, 'rice': 32,\n 'sturges': 20, 'auto': 20, 'stone': 80},\n 5000: {'fd': 33, 'scott': 33, 'rice': 69,\n 'sturges': 27, 'auto': 33, 'stone': 80}\n }\n\n for testlen, expectedResults in basic_test.items():\n # create some sort of non uniform data to test with\n # (3 peak uniform mixture)\n x1 = np.linspace(-10, -1, testlen // 5 * 2)\n x2 = np.linspace(1, 10, testlen // 5 * 3)\n x3 = np.linspace(-100, -50, testlen)\n x = np.hstack((x1, x2, x3))\n for estimator, numbins in expectedResults.items():\n a, b = np.histogram(x, estimator, range=(-20, 20))\n msg = f"For the {estimator} estimator"\n msg += f" with datasize of {testlen}"\n assert_equal(len(a), numbins, err_msg=msg)\n\n @pytest.mark.parametrize("bins", ['auto', 'fd', 'doane', 'scott',\n 'stone', 'rice', 'sturges'])\n def test_signed_integer_data(self, bins):\n # Regression test for gh-14379.\n a = np.array([-2, 0, 127], dtype=np.int8)\n hist, edges = np.histogram(a, bins=bins)\n hist32, edges32 = np.histogram(a.astype(np.int32), bins=bins)\n assert_array_equal(hist, hist32)\n assert_array_equal(edges, edges32)\n\n @pytest.mark.parametrize("bins", ['auto', 'fd', 'doane', 'scott',\n 'stone', 'rice', 'sturges'])\n def test_integer(self, bins):\n """\n Test that bin width for integer data is at least 1.\n """\n with suppress_warnings() as sup:\n if bins == 'stone':\n sup.filter(RuntimeWarning)\n assert_equal(\n np.histogram_bin_edges(np.tile(np.arange(9), 1000), bins),\n np.arange(9))\n\n def test_integer_non_auto(self):\n """\n Test that the bin-width>=1 requirement *only* applies to auto binning.\n """\n assert_equal(\n np.histogram_bin_edges(np.tile(np.arange(9), 1000), 16),\n np.arange(17) / 2)\n assert_equal(\n np.histogram_bin_edges(np.tile(np.arange(9), 1000), [.1, .2]),\n [.1, .2])\n\n def test_simple_weighted(self):\n """\n Check that weighted data raises a TypeError\n """\n estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto']\n for estimator in estimator_list:\n assert_raises(TypeError, histogram, [1, 2, 3],\n estimator, weights=[1, 2, 3])\n\n\nclass TestHistogramdd:\n\n def test_simple(self):\n x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5],\n [.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]])\n H, edges = histogramdd(x, (2, 3, 3),\n range=[[-1, 1], [0, 3], [0, 3]])\n answer = np.array([[[0, 1, 0], [0, 0, 1], [1, 0, 0]],\n [[0, 1, 0], [0, 0, 1], [0, 0, 1]]])\n assert_array_equal(H, answer)\n\n # Check normalization\n ed = [[-2, 0, 2], [0, 1, 2, 3], [0, 1, 2, 3]]\n H, edges = histogramdd(x, bins=ed, density=True)\n assert_(np.all(H == answer / 12.))\n\n # Check that H has the correct shape.\n H, edges = histogramdd(x, (2, 3, 4),\n range=[[-1, 1], [0, 3], [0, 4]],\n density=True)\n answer = np.array([[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]],\n [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0]]])\n assert_array_almost_equal(H, answer / 6., 4)\n # Check that a sequence of arrays is accepted and H has the correct\n # shape.\n z = [np.squeeze(y) for y in np.split(x, 3, axis=1)]\n H, edges = histogramdd(\n z, bins=(4, 3, 2), range=[[-2, 2], [0, 3], [0, 2]])\n answer = np.array([[[0, 0], [0, 0], [0, 0]],\n [[0, 1], [0, 0], [1, 0]],\n [[0, 1], [0, 0], [0, 0]],\n [[0, 0], [0, 0], [0, 0]]])\n assert_array_equal(H, answer)\n\n Z = np.zeros((5, 5, 5))\n Z[list(range(5)), list(range(5)), list(range(5))] = 1.\n H, edges = histogramdd([np.arange(5), np.arange(5), np.arange(5)], 5)\n assert_array_equal(H, Z)\n\n def test_shape_3d(self):\n # All possible permutations for bins of different lengths in 3D.\n bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4),\n (4, 5, 6))\n r = np.random.rand(10, 3)\n for b in bins:\n H, edges = histogramdd(r, b)\n assert_(H.shape == b)\n\n def test_shape_4d(self):\n # All possible permutations for bins of different lengths in 4D.\n bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4),\n (5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6),\n (7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7),\n (4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5),\n (6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5),\n (5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4))\n\n r = np.random.rand(10, 4)\n for b in bins:\n H, edges = histogramdd(r, b)\n assert_(H.shape == b)\n\n def test_weights(self):\n v = np.random.rand(100, 2)\n hist, edges = histogramdd(v)\n n_hist, edges = histogramdd(v, density=True)\n w_hist, edges = histogramdd(v, weights=np.ones(100))\n assert_array_equal(w_hist, hist)\n w_hist, edges = histogramdd(v, weights=np.ones(100) * 2, density=True)\n assert_array_equal(w_hist, n_hist)\n w_hist, edges = histogramdd(v, weights=np.ones(100, int) * 2)\n assert_array_equal(w_hist, 2 * hist)\n\n def test_identical_samples(self):\n x = np.zeros((10, 2), int)\n hist, edges = histogramdd(x, bins=2)\n assert_array_equal(edges[0], np.array([-0.5, 0., 0.5]))\n\n def test_empty(self):\n a, b = histogramdd([[], []], bins=([0, 1], [0, 1]))\n assert_array_max_ulp(a, np.array([[0.]]))\n a, b = np.histogramdd([[], [], []], bins=2)\n assert_array_max_ulp(a, np.zeros((2, 2, 2)))\n\n def test_bins_errors(self):\n # There are two ways to specify bins. Check for the right errors\n # when mixing those.\n x = np.arange(8).reshape(2, 4)\n assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5])\n assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1])\n assert_raises(\n ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]])\n assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]]))\n\n def test_inf_edges(self):\n # Test using +/-inf bin edges works. See #1788.\n with np.errstate(invalid='ignore'):\n x = np.arange(6).reshape(3, 2)\n expected = np.array([[1, 0], [0, 1], [0, 1]])\n h, e = np.histogramdd(x, bins=[3, [-np.inf, 2, 10]])\n assert_allclose(h, expected)\n h, e = np.histogramdd(x, bins=[3, np.array([-1, 2, np.inf])])\n assert_allclose(h, expected)\n h, e = np.histogramdd(x, bins=[3, [-np.inf, 3, np.inf]])\n assert_allclose(h, expected)\n\n def test_rightmost_binedge(self):\n # Test event very close to rightmost binedge. See Github issue #4266\n x = [0.9999999995]\n bins = [[0., 0.5, 1.0]]\n hist, _ = histogramdd(x, bins=bins)\n assert_(hist[0] == 0.0)\n assert_(hist[1] == 1.)\n x = [1.0]\n bins = [[0., 0.5, 1.0]]\n hist, _ = histogramdd(x, bins=bins)\n assert_(hist[0] == 0.0)\n assert_(hist[1] == 1.)\n x = [1.0000000001]\n bins = [[0., 0.5, 1.0]]\n hist, _ = histogramdd(x, bins=bins)\n assert_(hist[0] == 0.0)\n assert_(hist[1] == 0.0)\n x = [1.0001]\n bins = [[0., 0.5, 1.0]]\n hist, _ = histogramdd(x, bins=bins)\n assert_(hist[0] == 0.0)\n assert_(hist[1] == 0.0)\n\n def test_finite_range(self):\n vals = np.random.random((100, 3))\n histogramdd(vals, range=[[0.0, 1.0], [0.25, 0.75], [0.25, 0.5]])\n assert_raises(ValueError, histogramdd, vals,\n range=[[0.0, 1.0], [0.25, 0.75], [0.25, np.inf]])\n assert_raises(ValueError, histogramdd, vals,\n range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]])\n\n def test_equal_edges(self):\n """ Test that adjacent entries in an edge array can be equal """\n x = np.array([0, 1, 2])\n y = np.array([0, 1, 2])\n x_edges = np.array([0, 2, 2])\n y_edges = 1\n hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))\n\n hist_expected = np.array([\n [2.],\n [1.], # x == 2 falls in the final bin\n ])\n assert_equal(hist, hist_expected)\n\n def test_edge_dtype(self):\n """ Test that if an edge array is input, its type is preserved """\n x = np.array([0, 10, 20])\n y = x / 10\n x_edges = np.array([0, 5, 15, 20])\n y_edges = x_edges / 10\n hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))\n\n assert_equal(edges[0].dtype, x_edges.dtype)\n assert_equal(edges[1].dtype, y_edges.dtype)\n\n def test_large_integers(self):\n big = 2**60 # Too large to represent with a full precision float\n\n x = np.array([0], np.int64)\n x_edges = np.array([-1, +1], np.int64)\n y = big + x\n y_edges = big + x_edges\n\n hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))\n\n assert_equal(hist[0, 0], 1)\n\n def test_density_non_uniform_2d(self):\n # Defines the following grid:\n #\n # 0 2 8\n # 0+-+-----+\n # + | +\n # + | +\n # 6+-+-----+\n # 8+-+-----+\n x_edges = np.array([0, 2, 8])\n y_edges = np.array([0, 6, 8])\n relative_areas = np.array([\n [3, 9],\n [1, 3]])\n\n # ensure the number of points in each region is proportional to its area\n x = np.array([1] + [1] * 3 + [7] * 3 + [7] * 9)\n y = np.array([7] + [1] * 3 + [7] * 3 + [1] * 9)\n\n # sanity check that the above worked as intended\n hist, edges = histogramdd((y, x), bins=(y_edges, x_edges))\n assert_equal(hist, relative_areas)\n\n # resulting histogram should be uniform, since counts and areas are proportional\n hist, edges = histogramdd((y, x), bins=(y_edges, x_edges), density=True)\n assert_equal(hist, 1 / (8 * 8))\n\n def test_density_non_uniform_1d(self):\n # compare to histogram to show the results are the same\n v = np.arange(10)\n bins = np.array([0, 1, 3, 6, 10])\n hist, edges = histogram(v, bins, density=True)\n hist_dd, edges_dd = histogramdd((v,), (bins,), density=True)\n assert_equal(hist, hist_dd)\n assert_equal(edges, edges_dd[0])\n | .venv\Lib\site-packages\numpy\lib\tests\test_histograms.py | test_histograms.py | Python | 34,821 | 0.95 | 0.132164 | 0.130556 | node-utils | 176 | 2025-04-22T08:14:44.238917 | GPL-3.0 | true | bd45a09df4e69308d804a1f1fdba73e3 |
import pytest\n\nimport numpy as np\nfrom numpy.lib._index_tricks_impl import (\n c_,\n diag_indices,\n diag_indices_from,\n fill_diagonal,\n index_exp,\n ix_,\n mgrid,\n ndenumerate,\n ndindex,\n ogrid,\n r_,\n s_,\n)\nfrom numpy.testing import (\n assert_,\n assert_almost_equal,\n assert_array_almost_equal,\n assert_array_equal,\n assert_equal,\n assert_raises,\n assert_raises_regex,\n)\n\n\nclass TestRavelUnravelIndex:\n def test_basic(self):\n assert_equal(np.unravel_index(2, (2, 2)), (1, 0))\n\n # test that new shape argument works properly\n assert_equal(np.unravel_index(indices=2,\n shape=(2, 2)),\n (1, 0))\n\n # test that an invalid second keyword argument\n # is properly handled, including the old name `dims`.\n with assert_raises(TypeError):\n np.unravel_index(indices=2, hape=(2, 2))\n\n with assert_raises(TypeError):\n np.unravel_index(2, hape=(2, 2))\n\n with assert_raises(TypeError):\n np.unravel_index(254, ims=(17, 94))\n\n with assert_raises(TypeError):\n np.unravel_index(254, dims=(17, 94))\n\n assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2)\n assert_equal(np.unravel_index(254, (17, 94)), (2, 66))\n assert_equal(np.ravel_multi_index((2, 66), (17, 94)), 254)\n assert_raises(ValueError, np.unravel_index, -1, (2, 2))\n assert_raises(TypeError, np.unravel_index, 0.5, (2, 2))\n assert_raises(ValueError, np.unravel_index, 4, (2, 2))\n assert_raises(ValueError, np.ravel_multi_index, (-3, 1), (2, 2))\n assert_raises(ValueError, np.ravel_multi_index, (2, 1), (2, 2))\n assert_raises(ValueError, np.ravel_multi_index, (0, -3), (2, 2))\n assert_raises(ValueError, np.ravel_multi_index, (0, 2), (2, 2))\n assert_raises(TypeError, np.ravel_multi_index, (0.1, 0.), (2, 2))\n\n assert_equal(np.unravel_index((2 * 3 + 1) * 6 + 4, (4, 3, 6)), [2, 1, 4])\n assert_equal(\n np.ravel_multi_index([2, 1, 4], (4, 3, 6)), (2 * 3 + 1) * 6 + 4)\n\n arr = np.array([[3, 6, 6], [4, 5, 1]])\n assert_equal(np.ravel_multi_index(arr, (7, 6)), [22, 41, 37])\n assert_equal(\n np.ravel_multi_index(arr, (7, 6), order='F'), [31, 41, 13])\n assert_equal(\n np.ravel_multi_index(arr, (4, 6), mode='clip'), [22, 23, 19])\n assert_equal(np.ravel_multi_index(arr, (4, 4), mode=('clip', 'wrap')),\n [12, 13, 13])\n assert_equal(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)), 1621)\n\n assert_equal(np.unravel_index(np.array([22, 41, 37]), (7, 6)),\n [[3, 6, 6], [4, 5, 1]])\n assert_equal(\n np.unravel_index(np.array([31, 41, 13]), (7, 6), order='F'),\n [[3, 6, 6], [4, 5, 1]])\n assert_equal(np.unravel_index(1621, (6, 7, 8, 9)), [3, 1, 4, 1])\n\n def test_empty_indices(self):\n msg1 = 'indices must be integral: the provided empty sequence was'\n msg2 = 'only int indices permitted'\n assert_raises_regex(TypeError, msg1, np.unravel_index, [], (10, 3, 5))\n assert_raises_regex(TypeError, msg1, np.unravel_index, (), (10, 3, 5))\n assert_raises_regex(TypeError, msg2, np.unravel_index, np.array([]),\n (10, 3, 5))\n assert_equal(np.unravel_index(np.array([], dtype=int), (10, 3, 5)),\n [[], [], []])\n assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], []),\n (10, 3))\n assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], ['abc']),\n (10, 3))\n assert_raises_regex(TypeError, msg2, np.ravel_multi_index,\n (np.array([]), np.array([])), (5, 3))\n assert_equal(np.ravel_multi_index(\n (np.array([], dtype=int), np.array([], dtype=int)), (5, 3)), [])\n assert_equal(np.ravel_multi_index(np.array([[], []], dtype=int),\n (5, 3)), [])\n\n def test_big_indices(self):\n # ravel_multi_index for big indices (issue #7546)\n if np.intp == np.int64:\n arr = ([1, 29], [3, 5], [3, 117], [19, 2],\n [2379, 1284], [2, 2], [0, 1])\n assert_equal(\n np.ravel_multi_index(arr, (41, 7, 120, 36, 2706, 8, 6)),\n [5627771580, 117259570957])\n\n # test unravel_index for big indices (issue #9538)\n assert_raises(ValueError, np.unravel_index, 1, (2**32 - 1, 2**31 + 1))\n\n # test overflow checking for too big array (issue #7546)\n dummy_arr = ([0], [0])\n half_max = np.iinfo(np.intp).max // 2\n assert_equal(\n np.ravel_multi_index(dummy_arr, (half_max, 2)), [0])\n assert_raises(ValueError,\n np.ravel_multi_index, dummy_arr, (half_max + 1, 2))\n assert_equal(\n np.ravel_multi_index(dummy_arr, (half_max, 2), order='F'), [0])\n assert_raises(ValueError,\n np.ravel_multi_index, dummy_arr, (half_max + 1, 2), order='F')\n\n def test_dtypes(self):\n # Test with different data types\n for dtype in [np.int16, np.uint16, np.int32,\n np.uint32, np.int64, np.uint64]:\n coords = np.array(\n [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], dtype=dtype)\n shape = (5, 8)\n uncoords = 8 * coords[0] + coords[1]\n assert_equal(np.ravel_multi_index(coords, shape), uncoords)\n assert_equal(coords, np.unravel_index(uncoords, shape))\n uncoords = coords[0] + 5 * coords[1]\n assert_equal(\n np.ravel_multi_index(coords, shape, order='F'), uncoords)\n assert_equal(coords, np.unravel_index(uncoords, shape, order='F'))\n\n coords = np.array(\n [[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]],\n dtype=dtype)\n shape = (5, 8, 10)\n uncoords = 10 * (8 * coords[0] + coords[1]) + coords[2]\n assert_equal(np.ravel_multi_index(coords, shape), uncoords)\n assert_equal(coords, np.unravel_index(uncoords, shape))\n uncoords = coords[0] + 5 * (coords[1] + 8 * coords[2])\n assert_equal(\n np.ravel_multi_index(coords, shape, order='F'), uncoords)\n assert_equal(coords, np.unravel_index(uncoords, shape, order='F'))\n\n def test_clipmodes(self):\n # Test clipmodes\n assert_equal(\n np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), mode='wrap'),\n np.ravel_multi_index([1, 1, 6, 2], (4, 3, 7, 12)))\n assert_equal(np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12),\n mode=(\n 'wrap', 'raise', 'clip', 'raise')),\n np.ravel_multi_index([1, 1, 0, 2], (4, 3, 7, 12)))\n assert_raises(\n ValueError, np.ravel_multi_index, [5, 1, -1, 2], (4, 3, 7, 12))\n\n def test_writeability(self):\n # gh-7269\n x, y = np.unravel_index([1, 2, 3], (4, 5))\n assert_(x.flags.writeable)\n assert_(y.flags.writeable)\n\n def test_0d(self):\n # gh-580\n x = np.unravel_index(0, ())\n assert_equal(x, ())\n\n assert_raises_regex(ValueError, "0d array", np.unravel_index, [0], ())\n assert_raises_regex(\n ValueError, "out of bounds", np.unravel_index, [1], ())\n\n @pytest.mark.parametrize("mode", ["clip", "wrap", "raise"])\n def test_empty_array_ravel(self, mode):\n res = np.ravel_multi_index(\n np.zeros((3, 0), dtype=np.intp), (2, 1, 0), mode=mode)\n assert res.shape == (0,)\n\n with assert_raises(ValueError):\n np.ravel_multi_index(\n np.zeros((3, 1), dtype=np.intp), (2, 1, 0), mode=mode)\n\n def test_empty_array_unravel(self):\n res = np.unravel_index(np.zeros(0, dtype=np.intp), (2, 1, 0))\n # res is a tuple of three empty arrays\n assert len(res) == 3\n assert all(a.shape == (0,) for a in res)\n\n with assert_raises(ValueError):\n np.unravel_index([1], (2, 1, 0))\n\nclass TestGrid:\n def test_basic(self):\n a = mgrid[-1:1:10j]\n b = mgrid[-1:1:0.1]\n assert_(a.shape == (10,))\n assert_(b.shape == (20,))\n assert_(a[0] == -1)\n assert_almost_equal(a[-1], 1)\n assert_(b[0] == -1)\n assert_almost_equal(b[1] - b[0], 0.1, 11)\n assert_almost_equal(b[-1], b[0] + 19 * 0.1, 11)\n assert_almost_equal(a[1] - a[0], 2.0 / 9.0, 11)\n\n def test_linspace_equivalence(self):\n y, st = np.linspace(2, 10, retstep=True)\n assert_almost_equal(st, 8 / 49.0)\n assert_array_almost_equal(y, mgrid[2:10:50j], 13)\n\n def test_nd(self):\n c = mgrid[-1:1:10j, -2:2:10j]\n d = mgrid[-1:1:0.1, -2:2:0.2]\n assert_(c.shape == (2, 10, 10))\n assert_(d.shape == (2, 20, 20))\n assert_array_equal(c[0][0, :], -np.ones(10, 'd'))\n assert_array_equal(c[1][:, 0], -2 * np.ones(10, 'd'))\n assert_array_almost_equal(c[0][-1, :], np.ones(10, 'd'), 11)\n assert_array_almost_equal(c[1][:, -1], 2 * np.ones(10, 'd'), 11)\n assert_array_almost_equal(d[0, 1, :] - d[0, 0, :],\n 0.1 * np.ones(20, 'd'), 11)\n assert_array_almost_equal(d[1, :, 1] - d[1, :, 0],\n 0.2 * np.ones(20, 'd'), 11)\n\n def test_sparse(self):\n grid_full = mgrid[-1:1:10j, -2:2:10j]\n grid_sparse = ogrid[-1:1:10j, -2:2:10j]\n\n # sparse grids can be made dense by broadcasting\n grid_broadcast = np.broadcast_arrays(*grid_sparse)\n for f, b in zip(grid_full, grid_broadcast):\n assert_equal(f, b)\n\n @pytest.mark.parametrize("start, stop, step, expected", [\n (None, 10, 10j, (200, 10)),\n (-10, 20, None, (1800, 30)),\n ])\n def test_mgrid_size_none_handling(self, start, stop, step, expected):\n # regression test None value handling for\n # start and step values used by mgrid;\n # internally, this aims to cover previously\n # unexplored code paths in nd_grid()\n grid = mgrid[start:stop:step, start:stop:step]\n # need a smaller grid to explore one of the\n # untested code paths\n grid_small = mgrid[start:stop:step]\n assert_equal(grid.size, expected[0])\n assert_equal(grid_small.size, expected[1])\n\n def test_accepts_npfloating(self):\n # regression test for #16466\n grid64 = mgrid[0.1:0.33:0.1, ]\n grid32 = mgrid[np.float32(0.1):np.float32(0.33):np.float32(0.1), ]\n assert_array_almost_equal(grid64, grid32)\n # At some point this was float64, but NEP 50 changed it:\n assert grid32.dtype == np.float32\n assert grid64.dtype == np.float64\n\n # different code path for single slice\n grid64 = mgrid[0.1:0.33:0.1]\n grid32 = mgrid[np.float32(0.1):np.float32(0.33):np.float32(0.1)]\n assert_(grid32.dtype == np.float64)\n assert_array_almost_equal(grid64, grid32)\n\n def test_accepts_longdouble(self):\n # regression tests for #16945\n grid64 = mgrid[0.1:0.33:0.1, ]\n grid128 = mgrid[\n np.longdouble(0.1):np.longdouble(0.33):np.longdouble(0.1),\n ]\n assert_(grid128.dtype == np.longdouble)\n assert_array_almost_equal(grid64, grid128)\n\n grid128c_a = mgrid[0:np.longdouble(1):3.4j]\n grid128c_b = mgrid[0:np.longdouble(1):3.4j, ]\n assert_(grid128c_a.dtype == grid128c_b.dtype == np.longdouble)\n assert_array_equal(grid128c_a, grid128c_b[0])\n\n # different code path for single slice\n grid64 = mgrid[0.1:0.33:0.1]\n grid128 = mgrid[\n np.longdouble(0.1):np.longdouble(0.33):np.longdouble(0.1)\n ]\n assert_(grid128.dtype == np.longdouble)\n assert_array_almost_equal(grid64, grid128)\n\n def test_accepts_npcomplexfloating(self):\n # Related to #16466\n assert_array_almost_equal(\n mgrid[0.1:0.3:3j, ], mgrid[0.1:0.3:np.complex64(3j), ]\n )\n\n # different code path for single slice\n assert_array_almost_equal(\n mgrid[0.1:0.3:3j], mgrid[0.1:0.3:np.complex64(3j)]\n )\n\n # Related to #16945\n grid64_a = mgrid[0.1:0.3:3.3j]\n grid64_b = mgrid[0.1:0.3:3.3j, ][0]\n assert_(grid64_a.dtype == grid64_b.dtype == np.float64)\n assert_array_equal(grid64_a, grid64_b)\n\n grid128_a = mgrid[0.1:0.3:np.clongdouble(3.3j)]\n grid128_b = mgrid[0.1:0.3:np.clongdouble(3.3j), ][0]\n assert_(grid128_a.dtype == grid128_b.dtype == np.longdouble)\n assert_array_equal(grid64_a, grid64_b)\n\n\nclass TestConcatenator:\n def test_1d(self):\n assert_array_equal(r_[1, 2, 3, 4, 5, 6], np.array([1, 2, 3, 4, 5, 6]))\n b = np.ones(5)\n c = r_[b, 0, 0, b]\n assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1])\n\n def test_mixed_type(self):\n g = r_[10.1, 1:10]\n assert_(g.dtype == 'f8')\n\n def test_more_mixed_type(self):\n g = r_[-10.1, np.array([1]), np.array([2, 3, 4]), 10.0]\n assert_(g.dtype == 'f8')\n\n def test_complex_step(self):\n # Regression test for #12262\n g = r_[0:36:100j]\n assert_(g.shape == (100,))\n\n # Related to #16466\n g = r_[0:36:np.complex64(100j)]\n assert_(g.shape == (100,))\n\n def test_2d(self):\n b = np.random.rand(5, 5)\n c = np.random.rand(5, 5)\n d = r_['1', b, c] # append columns\n assert_(d.shape == (5, 10))\n assert_array_equal(d[:, :5], b)\n assert_array_equal(d[:, 5:], c)\n d = r_[b, c]\n assert_(d.shape == (10, 5))\n assert_array_equal(d[:5, :], b)\n assert_array_equal(d[5:, :], c)\n\n def test_0d(self):\n assert_equal(r_[0, np.array(1), 2], [0, 1, 2])\n assert_equal(r_[[0, 1, 2], np.array(3)], [0, 1, 2, 3])\n assert_equal(r_[np.array(0), [1, 2, 3]], [0, 1, 2, 3])\n\n\nclass TestNdenumerate:\n def test_basic(self):\n a = np.array([[1, 2], [3, 4]])\n assert_equal(list(ndenumerate(a)),\n [((0, 0), 1), ((0, 1), 2), ((1, 0), 3), ((1, 1), 4)])\n\n\nclass TestIndexExpression:\n def test_regression_1(self):\n # ticket #1196\n a = np.arange(2)\n assert_equal(a[:-1], a[s_[:-1]])\n assert_equal(a[:-1], a[index_exp[:-1]])\n\n def test_simple_1(self):\n a = np.random.rand(4, 5, 6)\n\n assert_equal(a[:, :3, [1, 2]], a[index_exp[:, :3, [1, 2]]])\n assert_equal(a[:, :3, [1, 2]], a[s_[:, :3, [1, 2]]])\n\n\nclass TestIx_:\n def test_regression_1(self):\n # Test empty untyped inputs create outputs of indexing type, gh-5804\n a, = np.ix_(range(0))\n assert_equal(a.dtype, np.intp)\n\n a, = np.ix_([])\n assert_equal(a.dtype, np.intp)\n\n # but if the type is specified, don't change it\n a, = np.ix_(np.array([], dtype=np.float32))\n assert_equal(a.dtype, np.float32)\n\n def test_shape_and_dtype(self):\n sizes = (4, 5, 3, 2)\n # Test both lists and arrays\n for func in (range, np.arange):\n arrays = np.ix_(*[func(sz) for sz in sizes])\n for k, (a, sz) in enumerate(zip(arrays, sizes)):\n assert_equal(a.shape[k], sz)\n assert_(all(sh == 1 for j, sh in enumerate(a.shape) if j != k))\n assert_(np.issubdtype(a.dtype, np.integer))\n\n def test_bool(self):\n bool_a = [True, False, True, True]\n int_a, = np.nonzero(bool_a)\n assert_equal(np.ix_(bool_a)[0], int_a)\n\n def test_1d_only(self):\n idx2d = [[1, 2, 3], [4, 5, 6]]\n assert_raises(ValueError, np.ix_, idx2d)\n\n def test_repeated_input(self):\n length_of_vector = 5\n x = np.arange(length_of_vector)\n out = ix_(x, x)\n assert_equal(out[0].shape, (length_of_vector, 1))\n assert_equal(out[1].shape, (1, length_of_vector))\n # check that input shape is not modified\n assert_equal(x.shape, (length_of_vector,))\n\n\ndef test_c_():\n a = c_[np.array([[1, 2, 3]]), 0, 0, np.array([[4, 5, 6]])]\n assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]])\n\n\nclass TestFillDiagonal:\n def test_basic(self):\n a = np.zeros((3, 3), int)\n fill_diagonal(a, 5)\n assert_array_equal(\n a, np.array([[5, 0, 0],\n [0, 5, 0],\n [0, 0, 5]])\n )\n\n def test_tall_matrix(self):\n a = np.zeros((10, 3), int)\n fill_diagonal(a, 5)\n assert_array_equal(\n a, np.array([[5, 0, 0],\n [0, 5, 0],\n [0, 0, 5],\n [0, 0, 0],\n [0, 0, 0],\n [0, 0, 0],\n [0, 0, 0],\n [0, 0, 0],\n [0, 0, 0],\n [0, 0, 0]])\n )\n\n def test_tall_matrix_wrap(self):\n a = np.zeros((10, 3), int)\n fill_diagonal(a, 5, True)\n assert_array_equal(\n a, np.array([[5, 0, 0],\n [0, 5, 0],\n [0, 0, 5],\n [0, 0, 0],\n [5, 0, 0],\n [0, 5, 0],\n [0, 0, 5],\n [0, 0, 0],\n [5, 0, 0],\n [0, 5, 0]])\n )\n\n def test_wide_matrix(self):\n a = np.zeros((3, 10), int)\n fill_diagonal(a, 5)\n assert_array_equal(\n a, np.array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 5, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 5, 0, 0, 0, 0, 0, 0, 0]])\n )\n\n def test_operate_4d_array(self):\n a = np.zeros((3, 3, 3, 3), int)\n fill_diagonal(a, 4)\n i = np.array([0, 1, 2])\n assert_equal(np.where(a != 0), (i, i, i, i))\n\n def test_low_dim_handling(self):\n # raise error with low dimensionality\n a = np.zeros(3, int)\n with assert_raises_regex(ValueError, "at least 2-d"):\n fill_diagonal(a, 5)\n\n def test_hetero_shape_handling(self):\n # raise error with high dimensionality and\n # shape mismatch\n a = np.zeros((3, 3, 7, 3), int)\n with assert_raises_regex(ValueError, "equal length"):\n fill_diagonal(a, 2)\n\n\ndef test_diag_indices():\n di = diag_indices(4)\n a = np.array([[1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16]])\n a[di] = 100\n assert_array_equal(\n a, np.array([[100, 2, 3, 4],\n [5, 100, 7, 8],\n [9, 10, 100, 12],\n [13, 14, 15, 100]])\n )\n\n # Now, we create indices to manipulate a 3-d array:\n d3 = diag_indices(2, 3)\n\n # And use it to set the diagonal of a zeros array to 1:\n a = np.zeros((2, 2, 2), int)\n a[d3] = 1\n assert_array_equal(\n a, np.array([[[1, 0],\n [0, 0]],\n [[0, 0],\n [0, 1]]])\n )\n\n\nclass TestDiagIndicesFrom:\n\n def test_diag_indices_from(self):\n x = np.random.random((4, 4))\n r, c = diag_indices_from(x)\n assert_array_equal(r, np.arange(4))\n assert_array_equal(c, np.arange(4))\n\n def test_error_small_input(self):\n x = np.ones(7)\n with assert_raises_regex(ValueError, "at least 2-d"):\n diag_indices_from(x)\n\n def test_error_shape_mismatch(self):\n x = np.zeros((3, 3, 2, 3), int)\n with assert_raises_regex(ValueError, "equal length"):\n diag_indices_from(x)\n\n\ndef test_ndindex():\n x = list(ndindex(1, 2, 3))\n expected = [ix for ix, e in ndenumerate(np.zeros((1, 2, 3)))]\n assert_array_equal(x, expected)\n\n x = list(ndindex((1, 2, 3)))\n assert_array_equal(x, expected)\n\n # Test use of scalars and tuples\n x = list(ndindex((3,)))\n assert_array_equal(x, list(ndindex(3)))\n\n # Make sure size argument is optional\n x = list(ndindex())\n assert_equal(x, [()])\n\n x = list(ndindex(()))\n assert_equal(x, [()])\n\n # Make sure 0-sized ndindex works correctly\n x = list(ndindex(*[0]))\n assert_equal(x, [])\n | .venv\Lib\site-packages\numpy\lib\tests\test_index_tricks.py | test_index_tricks.py | Python | 21,045 | 0.95 | 0.128521 | 0.085595 | vue-tools | 704 | 2024-12-11T12:24:45.124210 | GPL-3.0 | true | 0118f5a8e4a8efdafb154323c237ecb7 |
"""\nTests specific to `np.loadtxt` added during the move of loadtxt to be backed\nby C code.\nThese tests complement those found in `test_io.py`.\n"""\n\nimport os\nimport sys\nfrom io import StringIO\nfrom tempfile import NamedTemporaryFile, mkstemp\n\nimport pytest\n\nimport numpy as np\nfrom numpy.ma.testutils import assert_equal\nfrom numpy.testing import HAS_REFCOUNT, IS_PYPY, assert_array_equal\n\n\ndef test_scientific_notation():\n """Test that both 'e' and 'E' are parsed correctly."""\n data = StringIO(\n\n "1.0e-1,2.0E1,3.0\n"\n "4.0e-2,5.0E-1,6.0\n"\n "7.0e-3,8.0E1,9.0\n"\n "0.0e-4,1.0E-1,2.0"\n\n )\n expected = np.array(\n [[0.1, 20., 3.0], [0.04, 0.5, 6], [0.007, 80., 9], [0, 0.1, 2]]\n )\n assert_array_equal(np.loadtxt(data, delimiter=","), expected)\n\n\n@pytest.mark.parametrize("comment", ["..", "//", "@-", "this is a comment:"])\ndef test_comment_multiple_chars(comment):\n content = "# IGNORE\n1.5, 2.5# ABC\n3.0,4.0# XXX\n5.5,6.0\n"\n txt = StringIO(content.replace("#", comment))\n a = np.loadtxt(txt, delimiter=",", comments=comment)\n assert_equal(a, [[1.5, 2.5], [3.0, 4.0], [5.5, 6.0]])\n\n\n@pytest.fixture\ndef mixed_types_structured():\n """\n Fixture providing heterogeneous input data with a structured dtype, along\n with the associated structured array.\n """\n data = StringIO(\n\n "1000;2.4;alpha;-34\n"\n "2000;3.1;beta;29\n"\n "3500;9.9;gamma;120\n"\n "4090;8.1;delta;0\n"\n "5001;4.4;epsilon;-99\n"\n "6543;7.8;omega;-1\n"\n\n )\n dtype = np.dtype(\n [('f0', np.uint16), ('f1', np.float64), ('f2', 'S7'), ('f3', np.int8)]\n )\n expected = np.array(\n [\n (1000, 2.4, "alpha", -34),\n (2000, 3.1, "beta", 29),\n (3500, 9.9, "gamma", 120),\n (4090, 8.1, "delta", 0),\n (5001, 4.4, "epsilon", -99),\n (6543, 7.8, "omega", -1)\n ],\n dtype=dtype\n )\n return data, dtype, expected\n\n\n@pytest.mark.parametrize('skiprows', [0, 1, 2, 3])\ndef test_structured_dtype_and_skiprows_no_empty_lines(\n skiprows, mixed_types_structured):\n data, dtype, expected = mixed_types_structured\n a = np.loadtxt(data, dtype=dtype, delimiter=";", skiprows=skiprows)\n assert_array_equal(a, expected[skiprows:])\n\n\ndef test_unpack_structured(mixed_types_structured):\n data, dtype, expected = mixed_types_structured\n\n a, b, c, d = np.loadtxt(data, dtype=dtype, delimiter=";", unpack=True)\n assert_array_equal(a, expected["f0"])\n assert_array_equal(b, expected["f1"])\n assert_array_equal(c, expected["f2"])\n assert_array_equal(d, expected["f3"])\n\n\ndef test_structured_dtype_with_shape():\n dtype = np.dtype([("a", "u1", 2), ("b", "u1", 2)])\n data = StringIO("0,1,2,3\n6,7,8,9\n")\n expected = np.array([((0, 1), (2, 3)), ((6, 7), (8, 9))], dtype=dtype)\n assert_array_equal(np.loadtxt(data, delimiter=",", dtype=dtype), expected)\n\n\ndef test_structured_dtype_with_multi_shape():\n dtype = np.dtype([("a", "u1", (2, 2))])\n data = StringIO("0 1 2 3\n")\n expected = np.array([(((0, 1), (2, 3)),)], dtype=dtype)\n assert_array_equal(np.loadtxt(data, dtype=dtype), expected)\n\n\ndef test_nested_structured_subarray():\n # Test from gh-16678\n point = np.dtype([('x', float), ('y', float)])\n dt = np.dtype([('code', int), ('points', point, (2,))])\n data = StringIO("100,1,2,3,4\n200,5,6,7,8\n")\n expected = np.array(\n [\n (100, [(1., 2.), (3., 4.)]),\n (200, [(5., 6.), (7., 8.)]),\n ],\n dtype=dt\n )\n assert_array_equal(np.loadtxt(data, dtype=dt, delimiter=","), expected)\n\n\ndef test_structured_dtype_offsets():\n # An aligned structured dtype will have additional padding\n dt = np.dtype("i1, i4, i1, i4, i1, i4", align=True)\n data = StringIO("1,2,3,4,5,6\n7,8,9,10,11,12\n")\n expected = np.array([(1, 2, 3, 4, 5, 6), (7, 8, 9, 10, 11, 12)], dtype=dt)\n assert_array_equal(np.loadtxt(data, delimiter=",", dtype=dt), expected)\n\n\n@pytest.mark.parametrize("param", ("skiprows", "max_rows"))\ndef test_exception_negative_row_limits(param):\n """skiprows and max_rows should raise for negative parameters."""\n with pytest.raises(ValueError, match="argument must be nonnegative"):\n np.loadtxt("foo.bar", **{param: -3})\n\n\n@pytest.mark.parametrize("param", ("skiprows", "max_rows"))\ndef test_exception_noninteger_row_limits(param):\n with pytest.raises(TypeError, match="argument must be an integer"):\n np.loadtxt("foo.bar", **{param: 1.0})\n\n\n@pytest.mark.parametrize(\n "data, shape",\n [\n ("1 2 3 4 5\n", (1, 5)), # Single row\n ("1\n2\n3\n4\n5\n", (5, 1)), # Single column\n ]\n)\ndef test_ndmin_single_row_or_col(data, shape):\n arr = np.array([1, 2, 3, 4, 5])\n arr2d = arr.reshape(shape)\n\n assert_array_equal(np.loadtxt(StringIO(data), dtype=int), arr)\n assert_array_equal(np.loadtxt(StringIO(data), dtype=int, ndmin=0), arr)\n assert_array_equal(np.loadtxt(StringIO(data), dtype=int, ndmin=1), arr)\n assert_array_equal(np.loadtxt(StringIO(data), dtype=int, ndmin=2), arr2d)\n\n\n@pytest.mark.parametrize("badval", [-1, 3, None, "plate of shrimp"])\ndef test_bad_ndmin(badval):\n with pytest.raises(ValueError, match="Illegal value of ndmin keyword"):\n np.loadtxt("foo.bar", ndmin=badval)\n\n\n@pytest.mark.parametrize(\n "ws",\n (\n " ", # space\n "\t", # tab\n "\u2003", # em\n "\u00A0", # non-break\n "\u3000", # ideographic space\n )\n)\ndef test_blank_lines_spaces_delimit(ws):\n txt = StringIO(\n f"1 2{ws}30\n\n{ws}\n"\n f"4 5 60{ws}\n {ws} \n"\n f"7 8 {ws} 90\n # comment\n"\n f"3 2 1"\n )\n # NOTE: It is unclear that the ` # comment` should succeed. Except\n # for delimiter=None, which should use any whitespace (and maybe\n # should just be implemented closer to Python\n expected = np.array([[1, 2, 30], [4, 5, 60], [7, 8, 90], [3, 2, 1]])\n assert_equal(\n np.loadtxt(txt, dtype=int, delimiter=None, comments="#"), expected\n )\n\n\ndef test_blank_lines_normal_delimiter():\n txt = StringIO('1,2,30\n\n4,5,60\n\n7,8,90\n# comment\n3,2,1')\n expected = np.array([[1, 2, 30], [4, 5, 60], [7, 8, 90], [3, 2, 1]])\n assert_equal(\n np.loadtxt(txt, dtype=int, delimiter=',', comments="#"), expected\n )\n\n\n@pytest.mark.parametrize("dtype", (float, object))\ndef test_maxrows_no_blank_lines(dtype):\n txt = StringIO("1.5,2.5\n3.0,4.0\n5.5,6.0")\n res = np.loadtxt(txt, dtype=dtype, delimiter=",", max_rows=2)\n assert_equal(res.dtype, dtype)\n assert_equal(res, np.array([["1.5", "2.5"], ["3.0", "4.0"]], dtype=dtype))\n\n\n@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),\n reason="PyPy bug in error formatting")\n@pytest.mark.parametrize("dtype", (np.dtype("f8"), np.dtype("i2")))\ndef test_exception_message_bad_values(dtype):\n txt = StringIO("1,2\n3,XXX\n5,6")\n msg = f"could not convert string 'XXX' to {dtype} at row 1, column 2"\n with pytest.raises(ValueError, match=msg):\n np.loadtxt(txt, dtype=dtype, delimiter=",")\n\n\ndef test_converters_negative_indices():\n txt = StringIO('1.5,2.5\n3.0,XXX\n5.5,6.0')\n conv = {-1: lambda s: np.nan if s == 'XXX' else float(s)}\n expected = np.array([[1.5, 2.5], [3.0, np.nan], [5.5, 6.0]])\n res = np.loadtxt(txt, dtype=np.float64, delimiter=",", converters=conv)\n assert_equal(res, expected)\n\n\ndef test_converters_negative_indices_with_usecols():\n txt = StringIO('1.5,2.5,3.5\n3.0,4.0,XXX\n5.5,6.0,7.5\n')\n conv = {-1: lambda s: np.nan if s == 'XXX' else float(s)}\n expected = np.array([[1.5, 3.5], [3.0, np.nan], [5.5, 7.5]])\n res = np.loadtxt(\n txt,\n dtype=np.float64,\n delimiter=",",\n converters=conv,\n usecols=[0, -1],\n )\n assert_equal(res, expected)\n\n # Second test with variable number of rows:\n res = np.loadtxt(StringIO('''0,1,2\n0,1,2,3,4'''), delimiter=",",\n usecols=[0, -1], converters={-1: (lambda x: -1)})\n assert_array_equal(res, [[0, -1], [0, -1]])\n\n\ndef test_ragged_error():\n rows = ["1,2,3", "1,2,3", "4,3,2,1"]\n with pytest.raises(ValueError,\n match="the number of columns changed from 3 to 4 at row 3"):\n np.loadtxt(rows, delimiter=",")\n\n\ndef test_ragged_usecols():\n # usecols, and negative ones, work even with varying number of columns.\n txt = StringIO("0,0,XXX\n0,XXX,0,XXX\n0,XXX,XXX,0,XXX\n")\n expected = np.array([[0, 0], [0, 0], [0, 0]])\n res = np.loadtxt(txt, dtype=float, delimiter=",", usecols=[0, -2])\n assert_equal(res, expected)\n\n txt = StringIO("0,0,XXX\n0\n0,XXX,XXX,0,XXX\n")\n with pytest.raises(ValueError,\n match="invalid column index -2 at row 2 with 1 columns"):\n # There is no -2 column in the second row:\n np.loadtxt(txt, dtype=float, delimiter=",", usecols=[0, -2])\n\n\ndef test_empty_usecols():\n txt = StringIO("0,0,XXX\n0,XXX,0,XXX\n0,XXX,XXX,0,XXX\n")\n res = np.loadtxt(txt, dtype=np.dtype([]), delimiter=",", usecols=[])\n assert res.shape == (3,)\n assert res.dtype == np.dtype([])\n\n\n@pytest.mark.parametrize("c1", ["a", "の", "🫕"])\n@pytest.mark.parametrize("c2", ["a", "の", "🫕"])\ndef test_large_unicode_characters(c1, c2):\n # c1 and c2 span ascii, 16bit and 32bit range.\n txt = StringIO(f"a,{c1},c,1.0\ne,{c2},2.0,g")\n res = np.loadtxt(txt, dtype=np.dtype('U12'), delimiter=",")\n expected = np.array(\n [f"a,{c1},c,1.0".split(","), f"e,{c2},2.0,g".split(",")],\n dtype=np.dtype('U12')\n )\n assert_equal(res, expected)\n\n\ndef test_unicode_with_converter():\n txt = StringIO("cat,dog\nαβγ,δεζ\nabc,def\n")\n conv = {0: lambda s: s.upper()}\n res = np.loadtxt(\n txt,\n dtype=np.dtype("U12"),\n converters=conv,\n delimiter=",",\n encoding=None\n )\n expected = np.array([['CAT', 'dog'], ['ΑΒΓ', 'δεζ'], ['ABC', 'def']])\n assert_equal(res, expected)\n\n\ndef test_converter_with_structured_dtype():\n txt = StringIO('1.5,2.5,Abc\n3.0,4.0,dEf\n5.5,6.0,ghI\n')\n dt = np.dtype([('m', np.int32), ('r', np.float32), ('code', 'U8')])\n conv = {0: lambda s: int(10 * float(s)), -1: lambda s: s.upper()}\n res = np.loadtxt(txt, dtype=dt, delimiter=",", converters=conv)\n expected = np.array(\n [(15, 2.5, 'ABC'), (30, 4.0, 'DEF'), (55, 6.0, 'GHI')], dtype=dt\n )\n assert_equal(res, expected)\n\n\ndef test_converter_with_unicode_dtype():\n """\n With the 'bytes' encoding, tokens are encoded prior to being\n passed to the converter. This means that the output of the converter may\n be bytes instead of unicode as expected by `read_rows`.\n\n This test checks that outputs from the above scenario are properly decoded\n prior to parsing by `read_rows`.\n """\n txt = StringIO('abc,def\nrst,xyz')\n conv = bytes.upper\n res = np.loadtxt(\n txt, dtype=np.dtype("U3"), converters=conv, delimiter=",",\n encoding="bytes")\n expected = np.array([['ABC', 'DEF'], ['RST', 'XYZ']])\n assert_equal(res, expected)\n\n\ndef test_read_huge_row():\n row = "1.5, 2.5," * 50000\n row = row[:-1] + "\n"\n txt = StringIO(row * 2)\n res = np.loadtxt(txt, delimiter=",", dtype=float)\n assert_equal(res, np.tile([1.5, 2.5], (2, 50000)))\n\n\n@pytest.mark.parametrize("dtype", "edfgFDG")\ndef test_huge_float(dtype):\n # Covers a non-optimized path that is rarely taken:\n field = "0" * 1000 + ".123456789"\n dtype = np.dtype(dtype)\n value = np.loadtxt([field], dtype=dtype)[()]\n assert value == dtype.type("0.123456789")\n\n\n@pytest.mark.parametrize(\n ("given_dtype", "expected_dtype"),\n [\n ("S", np.dtype("S5")),\n ("U", np.dtype("U5")),\n ],\n)\ndef test_string_no_length_given(given_dtype, expected_dtype):\n """\n The given dtype is just 'S' or 'U' with no length. In these cases, the\n length of the resulting dtype is determined by the longest string found\n in the file.\n """\n txt = StringIO("AAA,5-1\nBBBBB,0-3\nC,4-9\n")\n res = np.loadtxt(txt, dtype=given_dtype, delimiter=",")\n expected = np.array(\n [['AAA', '5-1'], ['BBBBB', '0-3'], ['C', '4-9']], dtype=expected_dtype\n )\n assert_equal(res, expected)\n assert_equal(res.dtype, expected_dtype)\n\n\ndef test_float_conversion():\n """\n Some tests that the conversion to float64 works as accurately as the\n Python built-in `float` function. In a naive version of the float parser,\n these strings resulted in values that were off by an ULP or two.\n """\n strings = [\n '0.9999999999999999',\n '9876543210.123456',\n '5.43215432154321e+300',\n '0.901',\n '0.333',\n ]\n txt = StringIO('\n'.join(strings))\n res = np.loadtxt(txt)\n expected = np.array([float(s) for s in strings])\n assert_equal(res, expected)\n\n\ndef test_bool():\n # Simple test for bool via integer\n txt = StringIO("1, 0\n10, -1")\n res = np.loadtxt(txt, dtype=bool, delimiter=",")\n assert res.dtype == bool\n assert_array_equal(res, [[True, False], [True, True]])\n # Make sure we use only 1 and 0 on the byte level:\n assert_array_equal(res.view(np.uint8), [[1, 0], [1, 1]])\n\n\n@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),\n reason="PyPy bug in error formatting")\n@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])\n@pytest.mark.filterwarnings("error:.*integer via a float.*:DeprecationWarning")\ndef test_integer_signs(dtype):\n dtype = np.dtype(dtype)\n assert np.loadtxt(["+2"], dtype=dtype) == 2\n if dtype.kind == "u":\n with pytest.raises(ValueError):\n np.loadtxt(["-1\n"], dtype=dtype)\n else:\n assert np.loadtxt(["-2\n"], dtype=dtype) == -2\n\n for sign in ["++", "+-", "--", "-+"]:\n with pytest.raises(ValueError):\n np.loadtxt([f"{sign}2\n"], dtype=dtype)\n\n\n@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),\n reason="PyPy bug in error formatting")\n@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])\n@pytest.mark.filterwarnings("error:.*integer via a float.*:DeprecationWarning")\ndef test_implicit_cast_float_to_int_fails(dtype):\n txt = StringIO("1.0, 2.1, 3.7\n4, 5, 6")\n with pytest.raises(ValueError):\n np.loadtxt(txt, dtype=dtype, delimiter=",")\n\n@pytest.mark.parametrize("dtype", (np.complex64, np.complex128))\n@pytest.mark.parametrize("with_parens", (False, True))\ndef test_complex_parsing(dtype, with_parens):\n s = "(1.0-2.5j),3.75,(7+-5.0j)\n(4),(-19e2j),(0)"\n if not with_parens:\n s = s.replace("(", "").replace(")", "")\n\n res = np.loadtxt(StringIO(s), dtype=dtype, delimiter=",")\n expected = np.array(\n [[1.0 - 2.5j, 3.75, 7 - 5j], [4.0, -1900j, 0]], dtype=dtype\n )\n assert_equal(res, expected)\n\n\ndef test_read_from_generator():\n def gen():\n for i in range(4):\n yield f"{i},{2 * i},{i**2}"\n\n res = np.loadtxt(gen(), dtype=int, delimiter=",")\n expected = np.array([[0, 0, 0], [1, 2, 1], [2, 4, 4], [3, 6, 9]])\n assert_equal(res, expected)\n\n\ndef test_read_from_generator_multitype():\n def gen():\n for i in range(3):\n yield f"{i} {i / 4}"\n\n res = np.loadtxt(gen(), dtype="i, d", delimiter=" ")\n expected = np.array([(0, 0.0), (1, 0.25), (2, 0.5)], dtype="i, d")\n assert_equal(res, expected)\n\n\ndef test_read_from_bad_generator():\n def gen():\n yield from ["1,2", b"3, 5", 12738]\n\n with pytest.raises(\n TypeError, match=r"non-string returned while reading data"):\n np.loadtxt(gen(), dtype="i, i", delimiter=",")\n\n\n@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")\ndef test_object_cleanup_on_read_error():\n sentinel = object()\n already_read = 0\n\n def conv(x):\n nonlocal already_read\n if already_read > 4999:\n raise ValueError("failed half-way through!")\n already_read += 1\n return sentinel\n\n txt = StringIO("x\n" * 10000)\n\n with pytest.raises(ValueError, match="at row 5000, column 1"):\n np.loadtxt(txt, dtype=object, converters={0: conv})\n\n assert sys.getrefcount(sentinel) == 2\n\n\n@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),\n reason="PyPy bug in error formatting")\ndef test_character_not_bytes_compatible():\n """Test exception when a character cannot be encoded as 'S'."""\n data = StringIO("–") # == \u2013\n with pytest.raises(ValueError):\n np.loadtxt(data, dtype="S5")\n\n\n@pytest.mark.parametrize("conv", (0, [float], ""))\ndef test_invalid_converter(conv):\n msg = (\n "converters must be a dictionary mapping columns to converter "\n "functions or a single callable."\n )\n with pytest.raises(TypeError, match=msg):\n np.loadtxt(StringIO("1 2\n3 4"), converters=conv)\n\n\n@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),\n reason="PyPy bug in error formatting")\ndef test_converters_dict_raises_non_integer_key():\n with pytest.raises(TypeError, match="keys of the converters dict"):\n np.loadtxt(StringIO("1 2\n3 4"), converters={"a": int})\n with pytest.raises(TypeError, match="keys of the converters dict"):\n np.loadtxt(StringIO("1 2\n3 4"), converters={"a": int}, usecols=0)\n\n\n@pytest.mark.parametrize("bad_col_ind", (3, -3))\ndef test_converters_dict_raises_non_col_key(bad_col_ind):\n data = StringIO("1 2\n3 4")\n with pytest.raises(ValueError, match="converter specified for column"):\n np.loadtxt(data, converters={bad_col_ind: int})\n\n\ndef test_converters_dict_raises_val_not_callable():\n with pytest.raises(TypeError,\n match="values of the converters dictionary must be callable"):\n np.loadtxt(StringIO("1 2\n3 4"), converters={0: 1})\n\n\n@pytest.mark.parametrize("q", ('"', "'", "`"))\ndef test_quoted_field(q):\n txt = StringIO(\n f"{q}alpha, x{q}, 2.5\n{q}beta, y{q}, 4.5\n{q}gamma, z{q}, 5.0\n"\n )\n dtype = np.dtype([('f0', 'U8'), ('f1', np.float64)])\n expected = np.array(\n [("alpha, x", 2.5), ("beta, y", 4.5), ("gamma, z", 5.0)], dtype=dtype\n )\n\n res = np.loadtxt(txt, dtype=dtype, delimiter=",", quotechar=q)\n assert_array_equal(res, expected)\n\n\n@pytest.mark.parametrize("q", ('"', "'", "`"))\ndef test_quoted_field_with_whitepace_delimiter(q):\n txt = StringIO(\n f"{q}alpha, x{q} 2.5\n{q}beta, y{q} 4.5\n{q}gamma, z{q} 5.0\n"\n )\n dtype = np.dtype([('f0', 'U8'), ('f1', np.float64)])\n expected = np.array(\n [("alpha, x", 2.5), ("beta, y", 4.5), ("gamma, z", 5.0)], dtype=dtype\n )\n\n res = np.loadtxt(txt, dtype=dtype, delimiter=None, quotechar=q)\n assert_array_equal(res, expected)\n\n\ndef test_quote_support_default():\n """Support for quoted fields is disabled by default."""\n txt = StringIO('"lat,long", 45, 30\n')\n dtype = np.dtype([('f0', 'U24'), ('f1', np.float64), ('f2', np.float64)])\n\n with pytest.raises(ValueError,\n match="the dtype passed requires 3 columns but 4 were"):\n np.loadtxt(txt, dtype=dtype, delimiter=",")\n\n # Enable quoting support with non-None value for quotechar param\n txt.seek(0)\n expected = np.array([("lat,long", 45., 30.)], dtype=dtype)\n\n res = np.loadtxt(txt, dtype=dtype, delimiter=",", quotechar='"')\n assert_array_equal(res, expected)\n\n\n@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),\n reason="PyPy bug in error formatting")\ndef test_quotechar_multichar_error():\n txt = StringIO("1,2\n3,4")\n msg = r".*must be a single unicode character or None"\n with pytest.raises(TypeError, match=msg):\n np.loadtxt(txt, delimiter=",", quotechar="''")\n\n\ndef test_comment_multichar_error_with_quote():\n txt = StringIO("1,2\n3,4")\n msg = (\n "when multiple comments or a multi-character comment is given, "\n "quotes are not supported."\n )\n with pytest.raises(ValueError, match=msg):\n np.loadtxt(txt, delimiter=",", comments="123", quotechar='"')\n with pytest.raises(ValueError, match=msg):\n np.loadtxt(txt, delimiter=",", comments=["#", "%"], quotechar='"')\n\n # A single character string in a tuple is unpacked though:\n res = np.loadtxt(txt, delimiter=",", comments=("#",), quotechar="'")\n assert_equal(res, [[1, 2], [3, 4]])\n\n\ndef test_structured_dtype_with_quotes():\n data = StringIO(\n\n "1000;2.4;'alpha';-34\n"\n "2000;3.1;'beta';29\n"\n "3500;9.9;'gamma';120\n"\n "4090;8.1;'delta';0\n"\n "5001;4.4;'epsilon';-99\n"\n "6543;7.8;'omega';-1\n"\n\n )\n dtype = np.dtype(\n [('f0', np.uint16), ('f1', np.float64), ('f2', 'S7'), ('f3', np.int8)]\n )\n expected = np.array(\n [\n (1000, 2.4, "alpha", -34),\n (2000, 3.1, "beta", 29),\n (3500, 9.9, "gamma", 120),\n (4090, 8.1, "delta", 0),\n (5001, 4.4, "epsilon", -99),\n (6543, 7.8, "omega", -1)\n ],\n dtype=dtype\n )\n res = np.loadtxt(data, dtype=dtype, delimiter=";", quotechar="'")\n assert_array_equal(res, expected)\n\n\ndef test_quoted_field_is_not_empty():\n txt = StringIO('1\n\n"4"\n""')\n expected = np.array(["1", "4", ""], dtype="U1")\n res = np.loadtxt(txt, delimiter=",", dtype="U1", quotechar='"')\n assert_equal(res, expected)\n\ndef test_quoted_field_is_not_empty_nonstrict():\n # Same as test_quoted_field_is_not_empty but check that we are not strict\n # about missing closing quote (this is the `csv.reader` default also)\n txt = StringIO('1\n\n"4"\n"')\n expected = np.array(["1", "4", ""], dtype="U1")\n res = np.loadtxt(txt, delimiter=",", dtype="U1", quotechar='"')\n assert_equal(res, expected)\n\ndef test_consecutive_quotechar_escaped():\n txt = StringIO('"Hello, my name is ""Monty""!"')\n expected = np.array('Hello, my name is "Monty"!', dtype="U40")\n res = np.loadtxt(txt, dtype="U40", delimiter=",", quotechar='"')\n assert_equal(res, expected)\n\n\n@pytest.mark.parametrize("data", ("", "\n\n\n", "# 1 2 3\n# 4 5 6\n"))\n@pytest.mark.parametrize("ndmin", (0, 1, 2))\n@pytest.mark.parametrize("usecols", [None, (1, 2, 3)])\ndef test_warn_on_no_data(data, ndmin, usecols):\n """Check that a UserWarning is emitted when no data is read from input."""\n if usecols is not None:\n expected_shape = (0, 3)\n elif ndmin == 2:\n expected_shape = (0, 1) # guess a single column?!\n else:\n expected_shape = (0,)\n\n txt = StringIO(data)\n with pytest.warns(UserWarning, match="input contained no data"):\n res = np.loadtxt(txt, ndmin=ndmin, usecols=usecols)\n assert res.shape == expected_shape\n\n with NamedTemporaryFile(mode="w") as fh:\n fh.write(data)\n fh.seek(0)\n with pytest.warns(UserWarning, match="input contained no data"):\n res = np.loadtxt(txt, ndmin=ndmin, usecols=usecols)\n assert res.shape == expected_shape\n\n@pytest.mark.parametrize("skiprows", (2, 3))\ndef test_warn_on_skipped_data(skiprows):\n data = "1 2 3\n4 5 6"\n txt = StringIO(data)\n with pytest.warns(UserWarning, match="input contained no data"):\n np.loadtxt(txt, skiprows=skiprows)\n\n\n@pytest.mark.parametrize(["dtype", "value"], [\n ("i2", 0x0001), ("u2", 0x0001),\n ("i4", 0x00010203), ("u4", 0x00010203),\n ("i8", 0x0001020304050607), ("u8", 0x0001020304050607),\n # The following values are constructed to lead to unique bytes:\n ("float16", 3.07e-05),\n ("float32", 9.2557e-41), ("complex64", 9.2557e-41 + 2.8622554e-29j),\n ("float64", -1.758571353180402e-24),\n # Here and below, the repr side-steps a small loss of precision in\n # complex `str` in PyPy (which is probably fine, as repr works):\n ("complex128", repr(5.406409232372729e-29 - 1.758571353180402e-24j)),\n # Use integer values that fit into double. Everything else leads to\n # problems due to longdoubles going via double and decimal strings\n # causing rounding errors.\n ("longdouble", 0x01020304050607),\n ("clongdouble", repr(0x01020304050607 + (0x00121314151617 * 1j))),\n ("U2", "\U00010203\U000a0b0c")])\n@pytest.mark.parametrize("swap", [True, False])\ndef test_byteswapping_and_unaligned(dtype, value, swap):\n # Try to create "interesting" values within the valid unicode range:\n dtype = np.dtype(dtype)\n data = [f"x,{value}\n"] # repr as PyPy `str` truncates some\n if swap:\n dtype = dtype.newbyteorder()\n full_dt = np.dtype([("a", "S1"), ("b", dtype)], align=False)\n # The above ensures that the interesting "b" field is unaligned:\n assert full_dt.fields["b"][1] == 1\n res = np.loadtxt(data, dtype=full_dt, delimiter=",",\n max_rows=1) # max-rows prevents over-allocation\n assert res["b"] == dtype.type(value)\n\n\n@pytest.mark.parametrize("dtype",\n np.typecodes["AllInteger"] + "efdFD" + "?")\ndef test_unicode_whitespace_stripping(dtype):\n # Test that all numeric types (and bool) strip whitespace correctly\n # \u202F is a narrow no-break space, `\n` is just a whitespace if quoted.\n # Currently, skip float128 as it did not always support this and has no\n # "custom" parsing:\n txt = StringIO(' 3 ,"\u202F2\n"')\n res = np.loadtxt(txt, dtype=dtype, delimiter=",", quotechar='"')\n assert_array_equal(res, np.array([3, 2]).astype(dtype))\n\n\n@pytest.mark.parametrize("dtype", "FD")\ndef test_unicode_whitespace_stripping_complex(dtype):\n # Complex has a few extra cases since it has two components and\n # parentheses\n line = " 1 , 2+3j , ( 4+5j ), ( 6+-7j ) , 8j , ( 9j ) \n"\n data = [line, line.replace(" ", "\u202F")]\n res = np.loadtxt(data, dtype=dtype, delimiter=',')\n assert_array_equal(res, np.array([[1, 2 + 3j, 4 + 5j, 6 - 7j, 8j, 9j]] * 2))\n\n\n@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),\n reason="PyPy bug in error formatting")\n@pytest.mark.parametrize("dtype", "FD")\n@pytest.mark.parametrize("field",\n ["1 +2j", "1+ 2j", "1+2 j", "1+-+3", "(1j", "(1", "(1+2j", "1+2j)"])\ndef test_bad_complex(dtype, field):\n with pytest.raises(ValueError):\n np.loadtxt([field + "\n"], dtype=dtype, delimiter=",")\n\n\n@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),\n reason="PyPy bug in error formatting")\n@pytest.mark.parametrize("dtype",\n np.typecodes["AllInteger"] + "efgdFDG" + "?")\ndef test_nul_character_error(dtype):\n # Test that a \0 character is correctly recognized as an error even if\n # what comes before is valid (not everything gets parsed internally).\n if dtype.lower() == "g":\n pytest.xfail("longdouble/clongdouble assignment may misbehave.")\n with pytest.raises(ValueError):\n np.loadtxt(["1\000"], dtype=dtype, delimiter=",", quotechar='"')\n\n\n@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),\n reason="PyPy bug in error formatting")\n@pytest.mark.parametrize("dtype",\n np.typecodes["AllInteger"] + "efgdFDG" + "?")\ndef test_no_thousands_support(dtype):\n # Mainly to document behaviour, Python supports thousands like 1_1.\n # (e and G may end up using different conversion and support it, this is\n # a bug but happens...)\n if dtype == "e":\n pytest.skip("half assignment currently uses Python float converter")\n if dtype in "eG":\n pytest.xfail("clongdouble assignment is buggy (uses `complex`?).")\n\n assert int("1_1") == float("1_1") == complex("1_1") == 11\n with pytest.raises(ValueError):\n np.loadtxt(["1_1\n"], dtype=dtype)\n\n\n@pytest.mark.parametrize("data", [\n ["1,2\n", "2\n,3\n"],\n ["1,2\n", "2\r,3\n"]])\ndef test_bad_newline_in_iterator(data):\n # In NumPy <=1.22 this was accepted, because newlines were completely\n # ignored when the input was an iterable. This could be changed, but right\n # now, we raise an error.\n msg = "Found an unquoted embedded newline within a single line"\n with pytest.raises(ValueError, match=msg):\n np.loadtxt(data, delimiter=",")\n\n\n@pytest.mark.parametrize("data", [\n ["1,2\n", "2,3\r\n"], # a universal newline\n ["1,2\n", "'2\n',3\n"], # a quoted newline\n ["1,2\n", "'2\r',3\n"],\n ["1,2\n", "'2\r\n',3\n"],\n])\ndef test_good_newline_in_iterator(data):\n # The quoted newlines will be untransformed here, but are just whitespace.\n res = np.loadtxt(data, delimiter=",", quotechar="'")\n assert_array_equal(res, [[1., 2.], [2., 3.]])\n\n\n@pytest.mark.parametrize("newline", ["\n", "\r", "\r\n"])\ndef test_universal_newlines_quoted(newline):\n # Check that universal newline support within the tokenizer is not applied\n # to quoted fields. (note that lines must end in newline or quoted\n # fields will not include a newline at all)\n data = ['1,"2\n"\n', '3,"4\n', '1"\n']\n data = [row.replace("\n", newline) for row in data]\n res = np.loadtxt(data, dtype=object, delimiter=",", quotechar='"')\n assert_array_equal(res, [['1', f'2{newline}'], ['3', f'4{newline}1']])\n\n\ndef test_null_character():\n # Basic tests to check that the NUL character is not special:\n res = np.loadtxt(["1\0002\0003\n", "4\0005\0006"], delimiter="\000")\n assert_array_equal(res, [[1, 2, 3], [4, 5, 6]])\n\n # Also not as part of a field (avoid unicode/arrays as unicode strips \0)\n res = np.loadtxt(["1\000,2\000,3\n", "4\000,5\000,6"],\n delimiter=",", dtype=object)\n assert res.tolist() == [["1\000", "2\000", "3"], ["4\000", "5\000", "6"]]\n\n\ndef test_iterator_fails_getting_next_line():\n class BadSequence:\n def __len__(self):\n return 100\n\n def __getitem__(self, item):\n if item == 50:\n raise RuntimeError("Bad things happened!")\n return f"{item}, {item + 1}"\n\n with pytest.raises(RuntimeError, match="Bad things happened!"):\n np.loadtxt(BadSequence(), dtype=int, delimiter=",")\n\n\nclass TestCReaderUnitTests:\n # These are internal tests for path that should not be possible to hit\n # unless things go very very wrong somewhere.\n def test_not_an_filelike(self):\n with pytest.raises(AttributeError, match=".*read"):\n np._core._multiarray_umath._load_from_filelike(\n object(), dtype=np.dtype("i"), filelike=True)\n\n def test_filelike_read_fails(self):\n # Can only be reached if loadtxt opens the file, so it is hard to do\n # via the public interface (although maybe not impossible considering\n # the current "DataClass" backing).\n class BadFileLike:\n counter = 0\n\n def read(self, size):\n self.counter += 1\n if self.counter > 20:\n raise RuntimeError("Bad bad bad!")\n return "1,2,3\n"\n\n with pytest.raises(RuntimeError, match="Bad bad bad!"):\n np._core._multiarray_umath._load_from_filelike(\n BadFileLike(), dtype=np.dtype("i"), filelike=True)\n\n def test_filelike_bad_read(self):\n # Can only be reached if loadtxt opens the file, so it is hard to do\n # via the public interface (although maybe not impossible considering\n # the current "DataClass" backing).\n\n class BadFileLike:\n counter = 0\n\n def read(self, size):\n return 1234 # not a string!\n\n with pytest.raises(TypeError,\n match="non-string returned while reading data"):\n np._core._multiarray_umath._load_from_filelike(\n BadFileLike(), dtype=np.dtype("i"), filelike=True)\n\n def test_not_an_iter(self):\n with pytest.raises(TypeError,\n match="error reading from object, expected an iterable"):\n np._core._multiarray_umath._load_from_filelike(\n object(), dtype=np.dtype("i"), filelike=False)\n\n def test_bad_type(self):\n with pytest.raises(TypeError, match="internal error: dtype must"):\n np._core._multiarray_umath._load_from_filelike(\n object(), dtype="i", filelike=False)\n\n def test_bad_encoding(self):\n with pytest.raises(TypeError, match="encoding must be a unicode"):\n np._core._multiarray_umath._load_from_filelike(\n object(), dtype=np.dtype("i"), filelike=False, encoding=123)\n\n @pytest.mark.parametrize("newline", ["\r", "\n", "\r\n"])\n def test_manual_universal_newlines(self, newline):\n # This is currently not available to users, because we should always\n # open files with universal newlines enabled `newlines=None`.\n # (And reading from an iterator uses slightly different code paths.)\n # We have no real support for `newline="\r"` or `newline="\n" as the\n # user cannot specify those options.\n data = StringIO('0\n1\n"2\n"\n3\n4 #\n'.replace("\n", newline),\n newline="")\n\n res = np._core._multiarray_umath._load_from_filelike(\n data, dtype=np.dtype("U10"), filelike=True,\n quote='"', comment="#", skiplines=1)\n assert_array_equal(res[:, 0], ["1", f"2{newline}", "3", "4 "])\n\n\ndef test_delimiter_comment_collision_raises():\n with pytest.raises(TypeError, match=".*control characters.*incompatible"):\n np.loadtxt(StringIO("1, 2, 3"), delimiter=",", comments=",")\n\n\ndef test_delimiter_quotechar_collision_raises():\n with pytest.raises(TypeError, match=".*control characters.*incompatible"):\n np.loadtxt(StringIO("1, 2, 3"), delimiter=",", quotechar=",")\n\n\ndef test_comment_quotechar_collision_raises():\n with pytest.raises(TypeError, match=".*control characters.*incompatible"):\n np.loadtxt(StringIO("1 2 3"), comments="#", quotechar="#")\n\n\ndef test_delimiter_and_multiple_comments_collision_raises():\n with pytest.raises(\n TypeError, match="Comment characters.*cannot include the delimiter"\n ):\n np.loadtxt(StringIO("1, 2, 3"), delimiter=",", comments=["#", ","])\n\n\n@pytest.mark.parametrize(\n "ws",\n (\n " ", # space\n "\t", # tab\n "\u2003", # em\n "\u00A0", # non-break\n "\u3000", # ideographic space\n )\n)\ndef test_collision_with_default_delimiter_raises(ws):\n with pytest.raises(TypeError, match=".*control characters.*incompatible"):\n np.loadtxt(StringIO(f"1{ws}2{ws}3\n4{ws}5{ws}6\n"), comments=ws)\n with pytest.raises(TypeError, match=".*control characters.*incompatible"):\n np.loadtxt(StringIO(f"1{ws}2{ws}3\n4{ws}5{ws}6\n"), quotechar=ws)\n\n\n@pytest.mark.parametrize("nl", ("\n", "\r"))\ndef test_control_character_newline_raises(nl):\n txt = StringIO(f"1{nl}2{nl}3{nl}{nl}4{nl}5{nl}6{nl}{nl}")\n msg = "control character.*cannot be a newline"\n with pytest.raises(TypeError, match=msg):\n np.loadtxt(txt, delimiter=nl)\n with pytest.raises(TypeError, match=msg):\n np.loadtxt(txt, comments=nl)\n with pytest.raises(TypeError, match=msg):\n np.loadtxt(txt, quotechar=nl)\n\n\n@pytest.mark.parametrize(\n ("generic_data", "long_datum", "unitless_dtype", "expected_dtype"),\n [\n ("2012-03", "2013-01-15", "M8", "M8[D]"), # Datetimes\n ("spam-a-lot", "tis_but_a_scratch", "U", "U17"), # str\n ],\n)\n@pytest.mark.parametrize("nrows", (10, 50000, 60000)) # lt, eq, gt chunksize\ndef test_parametric_unit_discovery(\n generic_data, long_datum, unitless_dtype, expected_dtype, nrows\n):\n """Check that the correct unit (e.g. month, day, second) is discovered from\n the data when a user specifies a unitless datetime."""\n # Unit should be "D" (days) due to last entry\n data = [generic_data] * nrows + [long_datum]\n expected = np.array(data, dtype=expected_dtype)\n assert len(data) == nrows + 1\n assert len(data) == len(expected)\n\n # file-like path\n txt = StringIO("\n".join(data))\n a = np.loadtxt(txt, dtype=unitless_dtype)\n assert len(a) == len(expected)\n assert a.dtype == expected.dtype\n assert_equal(a, expected)\n\n # file-obj path\n fd, fname = mkstemp()\n os.close(fd)\n with open(fname, "w") as fh:\n fh.write("\n".join(data) + "\n")\n # loading the full file...\n a = np.loadtxt(fname, dtype=unitless_dtype)\n assert len(a) == len(expected)\n assert a.dtype == expected.dtype\n assert_equal(a, expected)\n # loading half of the file...\n a = np.loadtxt(fname, dtype=unitless_dtype, max_rows=int(nrows / 2))\n os.remove(fname)\n assert len(a) == int(nrows / 2)\n assert_equal(a, expected[:int(nrows / 2)])\n\n\ndef test_str_dtype_unit_discovery_with_converter():\n data = ["spam-a-lot"] * 60000 + ["XXXtis_but_a_scratch"]\n expected = np.array(\n ["spam-a-lot"] * 60000 + ["tis_but_a_scratch"], dtype="U17"\n )\n conv = lambda s: s.removeprefix("XXX")\n\n # file-like path\n txt = StringIO("\n".join(data))\n a = np.loadtxt(txt, dtype="U", converters=conv)\n assert a.dtype == expected.dtype\n assert_equal(a, expected)\n\n # file-obj path\n fd, fname = mkstemp()\n os.close(fd)\n with open(fname, "w") as fh:\n fh.write("\n".join(data))\n a = np.loadtxt(fname, dtype="U", converters=conv)\n os.remove(fname)\n assert a.dtype == expected.dtype\n assert_equal(a, expected)\n\n\n@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),\n reason="PyPy bug in error formatting")\ndef test_control_character_empty():\n with pytest.raises(TypeError, match="Text reading control character must"):\n np.loadtxt(StringIO("1 2 3"), delimiter="")\n with pytest.raises(TypeError, match="Text reading control character must"):\n np.loadtxt(StringIO("1 2 3"), quotechar="")\n with pytest.raises(ValueError, match="comments cannot be an empty string"):\n np.loadtxt(StringIO("1 2 3"), comments="")\n with pytest.raises(ValueError, match="comments cannot be an empty string"):\n np.loadtxt(StringIO("1 2 3"), comments=["#", ""])\n\n\ndef test_control_characters_as_bytes():\n """Byte control characters (comments, delimiter) are supported."""\n a = np.loadtxt(StringIO("#header\n1,2,3"), comments=b"#", delimiter=b",")\n assert_equal(a, [1, 2, 3])\n\n\n@pytest.mark.filterwarnings('ignore::UserWarning')\ndef test_field_growing_cases():\n # Test empty field appending/growing (each field still takes 1 character)\n # to see if the final field appending does not create issues.\n res = np.loadtxt([""], delimiter=",", dtype=bytes)\n assert len(res) == 0\n\n for i in range(1, 1024):\n res = np.loadtxt(["," * i], delimiter=",", dtype=bytes, max_rows=10)\n assert len(res) == i + 1\n\n@pytest.mark.parametrize("nmax", (10000, 50000, 55000, 60000))\ndef test_maxrows_exceeding_chunksize(nmax):\n # tries to read all of the file,\n # or less, equal, greater than _loadtxt_chunksize\n file_length = 60000\n\n # file-like path\n data = ["a 0.5 1"] * file_length\n txt = StringIO("\n".join(data))\n res = np.loadtxt(txt, dtype=str, delimiter=" ", max_rows=nmax)\n assert len(res) == nmax\n\n # file-obj path\n fd, fname = mkstemp()\n os.close(fd)\n with open(fname, "w") as fh:\n fh.write("\n".join(data))\n res = np.loadtxt(fname, dtype=str, delimiter=" ", max_rows=nmax)\n os.remove(fname)\n assert len(res) == nmax\n\n@pytest.mark.parametrize("nskip", (0, 10000, 12345, 50000, 67891, 100000))\ndef test_skiprow_exceeding_maxrows_exceeding_chunksize(tmpdir, nskip):\n # tries to read a file in chunks by skipping a variable amount of lines,\n # less, equal, greater than max_rows\n file_length = 110000\n data = "\n".join(f"{i} a 0.5 1" for i in range(1, file_length + 1))\n expected_length = min(60000, file_length - nskip)\n expected = np.arange(nskip + 1, nskip + 1 + expected_length).astype(str)\n\n # file-like path\n txt = StringIO(data)\n res = np.loadtxt(txt, dtype='str', delimiter=" ", skiprows=nskip, max_rows=60000)\n assert len(res) == expected_length\n # are the right lines read in res?\n assert_array_equal(expected, res[:, 0])\n\n # file-obj path\n tmp_file = tmpdir / "test_data.txt"\n tmp_file.write(data)\n fname = str(tmp_file)\n res = np.loadtxt(fname, dtype='str', delimiter=" ", skiprows=nskip, max_rows=60000)\n assert len(res) == expected_length\n # are the right lines read in res?\n assert_array_equal(expected, res[:, 0])\n | .venv\Lib\site-packages\numpy\lib\tests\test_loadtxt.py | test_loadtxt.py | Python | 41,658 | 0.95 | 0.122616 | 0.085106 | node-utils | 922 | 2024-06-15T19:39:44.733022 | Apache-2.0 | true | abeb0671724294b1b547f6130c9d6230 |
import numbers\nimport operator\n\nimport numpy as np\nfrom numpy.testing import assert_, assert_equal, assert_raises\n\n# NOTE: This class should be kept as an exact copy of the example from the\n# docstring for NDArrayOperatorsMixin.\n\nclass ArrayLike(np.lib.mixins.NDArrayOperatorsMixin):\n def __init__(self, value):\n self.value = np.asarray(value)\n\n # One might also consider adding the built-in list type to this\n # list, to support operations like np.add(array_like, list)\n _HANDLED_TYPES = (np.ndarray, numbers.Number)\n\n def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n out = kwargs.get('out', ())\n for x in inputs + out:\n # Only support operations with instances of _HANDLED_TYPES.\n # Use ArrayLike instead of type(self) for isinstance to\n # allow subclasses that don't override __array_ufunc__ to\n # handle ArrayLike objects.\n if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)):\n return NotImplemented\n\n # Defer to the implementation of the ufunc on unwrapped values.\n inputs = tuple(x.value if isinstance(x, ArrayLike) else x\n for x in inputs)\n if out:\n kwargs['out'] = tuple(\n x.value if isinstance(x, ArrayLike) else x\n for x in out)\n result = getattr(ufunc, method)(*inputs, **kwargs)\n\n if type(result) is tuple:\n # multiple return values\n return tuple(type(self)(x) for x in result)\n elif method == 'at':\n # no return value\n return None\n else:\n # one return value\n return type(self)(result)\n\n def __repr__(self):\n return f'{type(self).__name__}({self.value!r})'\n\n\ndef wrap_array_like(result):\n if type(result) is tuple:\n return tuple(ArrayLike(r) for r in result)\n else:\n return ArrayLike(result)\n\n\ndef _assert_equal_type_and_value(result, expected, err_msg=None):\n assert_equal(type(result), type(expected), err_msg=err_msg)\n if isinstance(result, tuple):\n assert_equal(len(result), len(expected), err_msg=err_msg)\n for result_item, expected_item in zip(result, expected):\n _assert_equal_type_and_value(result_item, expected_item, err_msg)\n else:\n assert_equal(result.value, expected.value, err_msg=err_msg)\n assert_equal(getattr(result.value, 'dtype', None),\n getattr(expected.value, 'dtype', None), err_msg=err_msg)\n\n\n_ALL_BINARY_OPERATORS = [\n operator.lt,\n operator.le,\n operator.eq,\n operator.ne,\n operator.gt,\n operator.ge,\n operator.add,\n operator.sub,\n operator.mul,\n operator.truediv,\n operator.floordiv,\n operator.mod,\n divmod,\n pow,\n operator.lshift,\n operator.rshift,\n operator.and_,\n operator.xor,\n operator.or_,\n]\n\n\nclass TestNDArrayOperatorsMixin:\n\n def test_array_like_add(self):\n\n def check(result):\n _assert_equal_type_and_value(result, ArrayLike(0))\n\n check(ArrayLike(0) + 0)\n check(0 + ArrayLike(0))\n\n check(ArrayLike(0) + np.array(0))\n check(np.array(0) + ArrayLike(0))\n\n check(ArrayLike(np.array(0)) + 0)\n check(0 + ArrayLike(np.array(0)))\n\n check(ArrayLike(np.array(0)) + np.array(0))\n check(np.array(0) + ArrayLike(np.array(0)))\n\n def test_inplace(self):\n array_like = ArrayLike(np.array([0]))\n array_like += 1\n _assert_equal_type_and_value(array_like, ArrayLike(np.array([1])))\n\n array = np.array([0])\n array += ArrayLike(1)\n _assert_equal_type_and_value(array, ArrayLike(np.array([1])))\n\n def test_opt_out(self):\n\n class OptOut:\n """Object that opts out of __array_ufunc__."""\n __array_ufunc__ = None\n\n def __add__(self, other):\n return self\n\n def __radd__(self, other):\n return self\n\n array_like = ArrayLike(1)\n opt_out = OptOut()\n\n # supported operations\n assert_(array_like + opt_out is opt_out)\n assert_(opt_out + array_like is opt_out)\n\n # not supported\n with assert_raises(TypeError):\n # don't use the Python default, array_like = array_like + opt_out\n array_like += opt_out\n with assert_raises(TypeError):\n array_like - opt_out\n with assert_raises(TypeError):\n opt_out - array_like\n\n def test_subclass(self):\n\n class SubArrayLike(ArrayLike):\n """Should take precedence over ArrayLike."""\n\n x = ArrayLike(0)\n y = SubArrayLike(1)\n _assert_equal_type_and_value(x + y, y)\n _assert_equal_type_and_value(y + x, y)\n\n def test_object(self):\n x = ArrayLike(0)\n obj = object()\n with assert_raises(TypeError):\n x + obj\n with assert_raises(TypeError):\n obj + x\n with assert_raises(TypeError):\n x += obj\n\n def test_unary_methods(self):\n array = np.array([-1, 0, 1, 2])\n array_like = ArrayLike(array)\n for op in [operator.neg,\n operator.pos,\n abs,\n operator.invert]:\n _assert_equal_type_and_value(op(array_like), ArrayLike(op(array)))\n\n def test_forward_binary_methods(self):\n array = np.array([-1, 0, 1, 2])\n array_like = ArrayLike(array)\n for op in _ALL_BINARY_OPERATORS:\n expected = wrap_array_like(op(array, 1))\n actual = op(array_like, 1)\n err_msg = f'failed for operator {op}'\n _assert_equal_type_and_value(expected, actual, err_msg=err_msg)\n\n def test_reflected_binary_methods(self):\n for op in _ALL_BINARY_OPERATORS:\n expected = wrap_array_like(op(2, 1))\n actual = op(2, ArrayLike(1))\n err_msg = f'failed for operator {op}'\n _assert_equal_type_and_value(expected, actual, err_msg=err_msg)\n\n def test_matmul(self):\n array = np.array([1, 2], dtype=np.float64)\n array_like = ArrayLike(array)\n expected = ArrayLike(np.float64(5))\n _assert_equal_type_and_value(expected, np.matmul(array_like, array))\n _assert_equal_type_and_value(\n expected, operator.matmul(array_like, array))\n _assert_equal_type_and_value(\n expected, operator.matmul(array, array_like))\n\n def test_ufunc_at(self):\n array = ArrayLike(np.array([1, 2, 3, 4]))\n assert_(np.negative.at(array, np.array([0, 1])) is None)\n _assert_equal_type_and_value(array, ArrayLike([-1, -2, 3, 4]))\n\n def test_ufunc_two_outputs(self):\n mantissa, exponent = np.frexp(2 ** -3)\n expected = (ArrayLike(mantissa), ArrayLike(exponent))\n _assert_equal_type_and_value(\n np.frexp(ArrayLike(2 ** -3)), expected)\n _assert_equal_type_and_value(\n np.frexp(ArrayLike(np.array(2 ** -3))), expected)\n | .venv\Lib\site-packages\numpy\lib\tests\test_mixins.py | test_mixins.py | Python | 7,224 | 0.95 | 0.204651 | 0.086207 | awesome-app | 140 | 2024-09-28T21:24:15.370786 | MIT | true | 31e3a643a9cf11aa70c9505693c6ac39 |
import inspect\nimport warnings\nfrom functools import partial\n\nimport pytest\n\nimport numpy as np\nfrom numpy._core.numeric import normalize_axis_tuple\nfrom numpy.exceptions import AxisError, ComplexWarning\nfrom numpy.lib._nanfunctions_impl import _nan_mask, _replace_nan\nfrom numpy.testing import (\n assert_,\n assert_almost_equal,\n assert_array_equal,\n assert_equal,\n assert_raises,\n assert_raises_regex,\n suppress_warnings,\n)\n\n# Test data\n_ndat = np.array([[0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170],\n [0.5351, -0.9403, np.nan, 0.2100, 0.4759, 0.2833],\n [np.nan, np.nan, np.nan, 0.1042, np.nan, -0.5954],\n [0.1610, np.nan, np.nan, 0.1859, 0.3146, np.nan]])\n\n\n# Rows of _ndat with nans removed\n_rdat = [np.array([0.6244, 0.2692, 0.0116, 0.1170]),\n np.array([0.5351, -0.9403, 0.2100, 0.4759, 0.2833]),\n np.array([0.1042, -0.5954]),\n np.array([0.1610, 0.1859, 0.3146])]\n\n# Rows of _ndat with nans converted to ones\n_ndat_ones = np.array([[0.6244, 1.0, 0.2692, 0.0116, 1.0, 0.1170],\n [0.5351, -0.9403, 1.0, 0.2100, 0.4759, 0.2833],\n [1.0, 1.0, 1.0, 0.1042, 1.0, -0.5954],\n [0.1610, 1.0, 1.0, 0.1859, 0.3146, 1.0]])\n\n# Rows of _ndat with nans converted to zeros\n_ndat_zeros = np.array([[0.6244, 0.0, 0.2692, 0.0116, 0.0, 0.1170],\n [0.5351, -0.9403, 0.0, 0.2100, 0.4759, 0.2833],\n [0.0, 0.0, 0.0, 0.1042, 0.0, -0.5954],\n [0.1610, 0.0, 0.0, 0.1859, 0.3146, 0.0]])\n\n\nclass TestSignatureMatch:\n NANFUNCS = {\n np.nanmin: np.amin,\n np.nanmax: np.amax,\n np.nanargmin: np.argmin,\n np.nanargmax: np.argmax,\n np.nansum: np.sum,\n np.nanprod: np.prod,\n np.nancumsum: np.cumsum,\n np.nancumprod: np.cumprod,\n np.nanmean: np.mean,\n np.nanmedian: np.median,\n np.nanpercentile: np.percentile,\n np.nanquantile: np.quantile,\n np.nanvar: np.var,\n np.nanstd: np.std,\n }\n IDS = [k.__name__ for k in NANFUNCS]\n\n @staticmethod\n def get_signature(func, default="..."):\n """Construct a signature and replace all default parameter-values."""\n prm_list = []\n signature = inspect.signature(func)\n for prm in signature.parameters.values():\n if prm.default is inspect.Parameter.empty:\n prm_list.append(prm)\n else:\n prm_list.append(prm.replace(default=default))\n return inspect.Signature(prm_list)\n\n @pytest.mark.parametrize("nan_func,func", NANFUNCS.items(), ids=IDS)\n def test_signature_match(self, nan_func, func):\n # Ignore the default parameter-values as they can sometimes differ\n # between the two functions (*e.g.* one has `False` while the other\n # has `np._NoValue`)\n signature = self.get_signature(func)\n nan_signature = self.get_signature(nan_func)\n np.testing.assert_equal(signature, nan_signature)\n\n def test_exhaustiveness(self):\n """Validate that all nan functions are actually tested."""\n np.testing.assert_equal(\n set(self.IDS), set(np.lib._nanfunctions_impl.__all__)\n )\n\n\nclass TestNanFunctions_MinMax:\n\n nanfuncs = [np.nanmin, np.nanmax]\n stdfuncs = [np.min, np.max]\n\n def test_mutation(self):\n # Check that passed array is not modified.\n ndat = _ndat.copy()\n for f in self.nanfuncs:\n f(ndat)\n assert_equal(ndat, _ndat)\n\n def test_keepdims(self):\n mat = np.eye(3)\n for nf, rf in zip(self.nanfuncs, self.stdfuncs):\n for axis in [None, 0, 1]:\n tgt = rf(mat, axis=axis, keepdims=True)\n res = nf(mat, axis=axis, keepdims=True)\n assert_(res.ndim == tgt.ndim)\n\n def test_out(self):\n mat = np.eye(3)\n for nf, rf in zip(self.nanfuncs, self.stdfuncs):\n resout = np.zeros(3)\n tgt = rf(mat, axis=1)\n res = nf(mat, axis=1, out=resout)\n assert_almost_equal(res, resout)\n assert_almost_equal(res, tgt)\n\n def test_dtype_from_input(self):\n codes = 'efdgFDG'\n for nf, rf in zip(self.nanfuncs, self.stdfuncs):\n for c in codes:\n mat = np.eye(3, dtype=c)\n tgt = rf(mat, axis=1).dtype.type\n res = nf(mat, axis=1).dtype.type\n assert_(res is tgt)\n # scalar case\n tgt = rf(mat, axis=None).dtype.type\n res = nf(mat, axis=None).dtype.type\n assert_(res is tgt)\n\n def test_result_values(self):\n for nf, rf in zip(self.nanfuncs, self.stdfuncs):\n tgt = [rf(d) for d in _rdat]\n res = nf(_ndat, axis=1)\n assert_almost_equal(res, tgt)\n\n @pytest.mark.parametrize("axis", [None, 0, 1])\n @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])\n @pytest.mark.parametrize("array", [\n np.array(np.nan),\n np.full((3, 3), np.nan),\n ], ids=["0d", "2d"])\n def test_allnans(self, axis, dtype, array):\n if axis is not None and array.ndim == 0:\n pytest.skip("`axis != None` not supported for 0d arrays")\n\n array = array.astype(dtype)\n match = "All-NaN slice encountered"\n for func in self.nanfuncs:\n with pytest.warns(RuntimeWarning, match=match):\n out = func(array, axis=axis)\n assert np.isnan(out).all()\n assert out.dtype == array.dtype\n\n def test_masked(self):\n mat = np.ma.fix_invalid(_ndat)\n msk = mat._mask.copy()\n for f in [np.nanmin]:\n res = f(mat, axis=1)\n tgt = f(_ndat, axis=1)\n assert_equal(res, tgt)\n assert_equal(mat._mask, msk)\n assert_(not np.isinf(mat).any())\n\n def test_scalar(self):\n for f in self.nanfuncs:\n assert_(f(0.) == 0.)\n\n def test_subclass(self):\n class MyNDArray(np.ndarray):\n pass\n\n # Check that it works and that type and\n # shape are preserved\n mine = np.eye(3).view(MyNDArray)\n for f in self.nanfuncs:\n res = f(mine, axis=0)\n assert_(isinstance(res, MyNDArray))\n assert_(res.shape == (3,))\n res = f(mine, axis=1)\n assert_(isinstance(res, MyNDArray))\n assert_(res.shape == (3,))\n res = f(mine)\n assert_(res.shape == ())\n\n # check that rows of nan are dealt with for subclasses (#4628)\n mine[1] = np.nan\n for f in self.nanfuncs:\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n res = f(mine, axis=0)\n assert_(isinstance(res, MyNDArray))\n assert_(not np.any(np.isnan(res)))\n assert_(len(w) == 0)\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n res = f(mine, axis=1)\n assert_(isinstance(res, MyNDArray))\n assert_(np.isnan(res[1]) and not np.isnan(res[0])\n and not np.isnan(res[2]))\n assert_(len(w) == 1, 'no warning raised')\n assert_(issubclass(w[0].category, RuntimeWarning))\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n res = f(mine)\n assert_(res.shape == ())\n assert_(res != np.nan)\n assert_(len(w) == 0)\n\n def test_object_array(self):\n arr = np.array([[1.0, 2.0], [np.nan, 4.0], [np.nan, np.nan]], dtype=object)\n assert_equal(np.nanmin(arr), 1.0)\n assert_equal(np.nanmin(arr, axis=0), [1.0, 2.0])\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n # assert_equal does not work on object arrays of nan\n assert_equal(list(np.nanmin(arr, axis=1)), [1.0, 4.0, np.nan])\n assert_(len(w) == 1, 'no warning raised')\n assert_(issubclass(w[0].category, RuntimeWarning))\n\n @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])\n def test_initial(self, dtype):\n class MyNDArray(np.ndarray):\n pass\n\n ar = np.arange(9).astype(dtype)\n ar[:5] = np.nan\n\n for f in self.nanfuncs:\n initial = 100 if f is np.nanmax else 0\n\n ret1 = f(ar, initial=initial)\n assert ret1.dtype == dtype\n assert ret1 == initial\n\n ret2 = f(ar.view(MyNDArray), initial=initial)\n assert ret2.dtype == dtype\n assert ret2 == initial\n\n @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])\n def test_where(self, dtype):\n class MyNDArray(np.ndarray):\n pass\n\n ar = np.arange(9).reshape(3, 3).astype(dtype)\n ar[0, :] = np.nan\n where = np.ones_like(ar, dtype=np.bool)\n where[:, 0] = False\n\n for f in self.nanfuncs:\n reference = 4 if f is np.nanmin else 8\n\n ret1 = f(ar, where=where, initial=5)\n assert ret1.dtype == dtype\n assert ret1 == reference\n\n ret2 = f(ar.view(MyNDArray), where=where, initial=5)\n assert ret2.dtype == dtype\n assert ret2 == reference\n\n\nclass TestNanFunctions_ArgminArgmax:\n\n nanfuncs = [np.nanargmin, np.nanargmax]\n\n def test_mutation(self):\n # Check that passed array is not modified.\n ndat = _ndat.copy()\n for f in self.nanfuncs:\n f(ndat)\n assert_equal(ndat, _ndat)\n\n def test_result_values(self):\n for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]):\n for row in _ndat:\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning, "invalid value encountered in")\n ind = f(row)\n val = row[ind]\n # comparing with NaN is tricky as the result\n # is always false except for NaN != NaN\n assert_(not np.isnan(val))\n assert_(not fcmp(val, row).any())\n assert_(not np.equal(val, row[:ind]).any())\n\n @pytest.mark.parametrize("axis", [None, 0, 1])\n @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])\n @pytest.mark.parametrize("array", [\n np.array(np.nan),\n np.full((3, 3), np.nan),\n ], ids=["0d", "2d"])\n def test_allnans(self, axis, dtype, array):\n if axis is not None and array.ndim == 0:\n pytest.skip("`axis != None` not supported for 0d arrays")\n\n array = array.astype(dtype)\n for func in self.nanfuncs:\n with pytest.raises(ValueError, match="All-NaN slice encountered"):\n func(array, axis=axis)\n\n def test_empty(self):\n mat = np.zeros((0, 3))\n for f in self.nanfuncs:\n for axis in [0, None]:\n assert_raises_regex(\n ValueError,\n "attempt to get argm.. of an empty sequence",\n f, mat, axis=axis)\n for axis in [1]:\n res = f(mat, axis=axis)\n assert_equal(res, np.zeros(0))\n\n def test_scalar(self):\n for f in self.nanfuncs:\n assert_(f(0.) == 0.)\n\n def test_subclass(self):\n class MyNDArray(np.ndarray):\n pass\n\n # Check that it works and that type and\n # shape are preserved\n mine = np.eye(3).view(MyNDArray)\n for f in self.nanfuncs:\n res = f(mine, axis=0)\n assert_(isinstance(res, MyNDArray))\n assert_(res.shape == (3,))\n res = f(mine, axis=1)\n assert_(isinstance(res, MyNDArray))\n assert_(res.shape == (3,))\n res = f(mine)\n assert_(res.shape == ())\n\n @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])\n def test_keepdims(self, dtype):\n ar = np.arange(9).astype(dtype)\n ar[:5] = np.nan\n\n for f in self.nanfuncs:\n reference = 5 if f is np.nanargmin else 8\n ret = f(ar, keepdims=True)\n assert ret.ndim == ar.ndim\n assert ret == reference\n\n @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])\n def test_out(self, dtype):\n ar = np.arange(9).astype(dtype)\n ar[:5] = np.nan\n\n for f in self.nanfuncs:\n out = np.zeros((), dtype=np.intp)\n reference = 5 if f is np.nanargmin else 8\n ret = f(ar, out=out)\n assert ret is out\n assert ret == reference\n\n\n_TEST_ARRAYS = {\n "0d": np.array(5),\n "1d": np.array([127, 39, 93, 87, 46])\n}\nfor _v in _TEST_ARRAYS.values():\n _v.setflags(write=False)\n\n\n@pytest.mark.parametrize(\n "dtype",\n np.typecodes["AllInteger"] + np.typecodes["AllFloat"] + "O",\n)\n@pytest.mark.parametrize("mat", _TEST_ARRAYS.values(), ids=_TEST_ARRAYS.keys())\nclass TestNanFunctions_NumberTypes:\n nanfuncs = {\n np.nanmin: np.min,\n np.nanmax: np.max,\n np.nanargmin: np.argmin,\n np.nanargmax: np.argmax,\n np.nansum: np.sum,\n np.nanprod: np.prod,\n np.nancumsum: np.cumsum,\n np.nancumprod: np.cumprod,\n np.nanmean: np.mean,\n np.nanmedian: np.median,\n np.nanvar: np.var,\n np.nanstd: np.std,\n }\n nanfunc_ids = [i.__name__ for i in nanfuncs]\n\n @pytest.mark.parametrize("nanfunc,func", nanfuncs.items(), ids=nanfunc_ids)\n @np.errstate(over="ignore")\n def test_nanfunc(self, mat, dtype, nanfunc, func):\n mat = mat.astype(dtype)\n tgt = func(mat)\n out = nanfunc(mat)\n\n assert_almost_equal(out, tgt)\n if dtype == "O":\n assert type(out) is type(tgt)\n else:\n assert out.dtype == tgt.dtype\n\n @pytest.mark.parametrize(\n "nanfunc,func",\n [(np.nanquantile, np.quantile), (np.nanpercentile, np.percentile)],\n ids=["nanquantile", "nanpercentile"],\n )\n def test_nanfunc_q(self, mat, dtype, nanfunc, func):\n mat = mat.astype(dtype)\n if mat.dtype.kind == "c":\n assert_raises(TypeError, func, mat, q=1)\n assert_raises(TypeError, nanfunc, mat, q=1)\n\n else:\n tgt = func(mat, q=1)\n out = nanfunc(mat, q=1)\n\n assert_almost_equal(out, tgt)\n\n if dtype == "O":\n assert type(out) is type(tgt)\n else:\n assert out.dtype == tgt.dtype\n\n @pytest.mark.parametrize(\n "nanfunc,func",\n [(np.nanvar, np.var), (np.nanstd, np.std)],\n ids=["nanvar", "nanstd"],\n )\n def test_nanfunc_ddof(self, mat, dtype, nanfunc, func):\n mat = mat.astype(dtype)\n tgt = func(mat, ddof=0.5)\n out = nanfunc(mat, ddof=0.5)\n\n assert_almost_equal(out, tgt)\n if dtype == "O":\n assert type(out) is type(tgt)\n else:\n assert out.dtype == tgt.dtype\n\n @pytest.mark.parametrize(\n "nanfunc", [np.nanvar, np.nanstd]\n )\n def test_nanfunc_correction(self, mat, dtype, nanfunc):\n mat = mat.astype(dtype)\n assert_almost_equal(\n nanfunc(mat, correction=0.5), nanfunc(mat, ddof=0.5)\n )\n\n err_msg = "ddof and correction can't be provided simultaneously."\n with assert_raises_regex(ValueError, err_msg):\n nanfunc(mat, ddof=0.5, correction=0.5)\n\n with assert_raises_regex(ValueError, err_msg):\n nanfunc(mat, ddof=1, correction=0)\n\n\nclass SharedNanFunctionsTestsMixin:\n def test_mutation(self):\n # Check that passed array is not modified.\n ndat = _ndat.copy()\n for f in self.nanfuncs:\n f(ndat)\n assert_equal(ndat, _ndat)\n\n def test_keepdims(self):\n mat = np.eye(3)\n for nf, rf in zip(self.nanfuncs, self.stdfuncs):\n for axis in [None, 0, 1]:\n tgt = rf(mat, axis=axis, keepdims=True)\n res = nf(mat, axis=axis, keepdims=True)\n assert_(res.ndim == tgt.ndim)\n\n def test_out(self):\n mat = np.eye(3)\n for nf, rf in zip(self.nanfuncs, self.stdfuncs):\n resout = np.zeros(3)\n tgt = rf(mat, axis=1)\n res = nf(mat, axis=1, out=resout)\n assert_almost_equal(res, resout)\n assert_almost_equal(res, tgt)\n\n def test_dtype_from_dtype(self):\n mat = np.eye(3)\n codes = 'efdgFDG'\n for nf, rf in zip(self.nanfuncs, self.stdfuncs):\n for c in codes:\n with suppress_warnings() as sup:\n if nf in {np.nanstd, np.nanvar} and c in 'FDG':\n # Giving the warning is a small bug, see gh-8000\n sup.filter(ComplexWarning)\n tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type\n res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type\n assert_(res is tgt)\n # scalar case\n tgt = rf(mat, dtype=np.dtype(c), axis=None).dtype.type\n res = nf(mat, dtype=np.dtype(c), axis=None).dtype.type\n assert_(res is tgt)\n\n def test_dtype_from_char(self):\n mat = np.eye(3)\n codes = 'efdgFDG'\n for nf, rf in zip(self.nanfuncs, self.stdfuncs):\n for c in codes:\n with suppress_warnings() as sup:\n if nf in {np.nanstd, np.nanvar} and c in 'FDG':\n # Giving the warning is a small bug, see gh-8000\n sup.filter(ComplexWarning)\n tgt = rf(mat, dtype=c, axis=1).dtype.type\n res = nf(mat, dtype=c, axis=1).dtype.type\n assert_(res is tgt)\n # scalar case\n tgt = rf(mat, dtype=c, axis=None).dtype.type\n res = nf(mat, dtype=c, axis=None).dtype.type\n assert_(res is tgt)\n\n def test_dtype_from_input(self):\n codes = 'efdgFDG'\n for nf, rf in zip(self.nanfuncs, self.stdfuncs):\n for c in codes:\n mat = np.eye(3, dtype=c)\n tgt = rf(mat, axis=1).dtype.type\n res = nf(mat, axis=1).dtype.type\n assert_(res is tgt, f"res {res}, tgt {tgt}")\n # scalar case\n tgt = rf(mat, axis=None).dtype.type\n res = nf(mat, axis=None).dtype.type\n assert_(res is tgt)\n\n def test_result_values(self):\n for nf, rf in zip(self.nanfuncs, self.stdfuncs):\n tgt = [rf(d) for d in _rdat]\n res = nf(_ndat, axis=1)\n assert_almost_equal(res, tgt)\n\n def test_scalar(self):\n for f in self.nanfuncs:\n assert_(f(0.) == 0.)\n\n def test_subclass(self):\n class MyNDArray(np.ndarray):\n pass\n\n # Check that it works and that type and\n # shape are preserved\n array = np.eye(3)\n mine = array.view(MyNDArray)\n for f in self.nanfuncs:\n expected_shape = f(array, axis=0).shape\n res = f(mine, axis=0)\n assert_(isinstance(res, MyNDArray))\n assert_(res.shape == expected_shape)\n expected_shape = f(array, axis=1).shape\n res = f(mine, axis=1)\n assert_(isinstance(res, MyNDArray))\n assert_(res.shape == expected_shape)\n expected_shape = f(array).shape\n res = f(mine)\n assert_(isinstance(res, MyNDArray))\n assert_(res.shape == expected_shape)\n\n\nclass TestNanFunctions_SumProd(SharedNanFunctionsTestsMixin):\n\n nanfuncs = [np.nansum, np.nanprod]\n stdfuncs = [np.sum, np.prod]\n\n @pytest.mark.parametrize("axis", [None, 0, 1])\n @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])\n @pytest.mark.parametrize("array", [\n np.array(np.nan),\n np.full((3, 3), np.nan),\n ], ids=["0d", "2d"])\n def test_allnans(self, axis, dtype, array):\n if axis is not None and array.ndim == 0:\n pytest.skip("`axis != None` not supported for 0d arrays")\n\n array = array.astype(dtype)\n for func, identity in zip(self.nanfuncs, [0, 1]):\n out = func(array, axis=axis)\n assert np.all(out == identity)\n assert out.dtype == array.dtype\n\n def test_empty(self):\n for f, tgt_value in zip([np.nansum, np.nanprod], [0, 1]):\n mat = np.zeros((0, 3))\n tgt = [tgt_value] * 3\n res = f(mat, axis=0)\n assert_equal(res, tgt)\n tgt = []\n res = f(mat, axis=1)\n assert_equal(res, tgt)\n tgt = tgt_value\n res = f(mat, axis=None)\n assert_equal(res, tgt)\n\n @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])\n def test_initial(self, dtype):\n ar = np.arange(9).astype(dtype)\n ar[:5] = np.nan\n\n for f in self.nanfuncs:\n reference = 28 if f is np.nansum else 3360\n ret = f(ar, initial=2)\n assert ret.dtype == dtype\n assert ret == reference\n\n @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])\n def test_where(self, dtype):\n ar = np.arange(9).reshape(3, 3).astype(dtype)\n ar[0, :] = np.nan\n where = np.ones_like(ar, dtype=np.bool)\n where[:, 0] = False\n\n for f in self.nanfuncs:\n reference = 26 if f is np.nansum else 2240\n ret = f(ar, where=where, initial=2)\n assert ret.dtype == dtype\n assert ret == reference\n\n\nclass TestNanFunctions_CumSumProd(SharedNanFunctionsTestsMixin):\n\n nanfuncs = [np.nancumsum, np.nancumprod]\n stdfuncs = [np.cumsum, np.cumprod]\n\n @pytest.mark.parametrize("axis", [None, 0, 1])\n @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])\n @pytest.mark.parametrize("array", [\n np.array(np.nan),\n np.full((3, 3), np.nan)\n ], ids=["0d", "2d"])\n def test_allnans(self, axis, dtype, array):\n if axis is not None and array.ndim == 0:\n pytest.skip("`axis != None` not supported for 0d arrays")\n\n array = array.astype(dtype)\n for func, identity in zip(self.nanfuncs, [0, 1]):\n out = func(array)\n assert np.all(out == identity)\n assert out.dtype == array.dtype\n\n def test_empty(self):\n for f, tgt_value in zip(self.nanfuncs, [0, 1]):\n mat = np.zeros((0, 3))\n tgt = tgt_value * np.ones((0, 3))\n res = f(mat, axis=0)\n assert_equal(res, tgt)\n tgt = mat\n res = f(mat, axis=1)\n assert_equal(res, tgt)\n tgt = np.zeros(0)\n res = f(mat, axis=None)\n assert_equal(res, tgt)\n\n def test_keepdims(self):\n for f, g in zip(self.nanfuncs, self.stdfuncs):\n mat = np.eye(3)\n for axis in [None, 0, 1]:\n tgt = f(mat, axis=axis, out=None)\n res = g(mat, axis=axis, out=None)\n assert_(res.ndim == tgt.ndim)\n\n for f in self.nanfuncs:\n d = np.ones((3, 5, 7, 11))\n # Randomly set some elements to NaN:\n rs = np.random.RandomState(0)\n d[rs.rand(*d.shape) < 0.5] = np.nan\n res = f(d, axis=None)\n assert_equal(res.shape, (1155,))\n for axis in np.arange(4):\n res = f(d, axis=axis)\n assert_equal(res.shape, (3, 5, 7, 11))\n\n def test_result_values(self):\n for axis in (-2, -1, 0, 1, None):\n tgt = np.cumprod(_ndat_ones, axis=axis)\n res = np.nancumprod(_ndat, axis=axis)\n assert_almost_equal(res, tgt)\n tgt = np.cumsum(_ndat_zeros, axis=axis)\n res = np.nancumsum(_ndat, axis=axis)\n assert_almost_equal(res, tgt)\n\n def test_out(self):\n mat = np.eye(3)\n for nf, rf in zip(self.nanfuncs, self.stdfuncs):\n resout = np.eye(3)\n for axis in (-2, -1, 0, 1):\n tgt = rf(mat, axis=axis)\n res = nf(mat, axis=axis, out=resout)\n assert_almost_equal(res, resout)\n assert_almost_equal(res, tgt)\n\n\nclass TestNanFunctions_MeanVarStd(SharedNanFunctionsTestsMixin):\n\n nanfuncs = [np.nanmean, np.nanvar, np.nanstd]\n stdfuncs = [np.mean, np.var, np.std]\n\n def test_dtype_error(self):\n for f in self.nanfuncs:\n for dtype in [np.bool, np.int_, np.object_]:\n assert_raises(TypeError, f, _ndat, axis=1, dtype=dtype)\n\n def test_out_dtype_error(self):\n for f in self.nanfuncs:\n for dtype in [np.bool, np.int_, np.object_]:\n out = np.empty(_ndat.shape[0], dtype=dtype)\n assert_raises(TypeError, f, _ndat, axis=1, out=out)\n\n def test_ddof(self):\n nanfuncs = [np.nanvar, np.nanstd]\n stdfuncs = [np.var, np.std]\n for nf, rf in zip(nanfuncs, stdfuncs):\n for ddof in [0, 1]:\n tgt = [rf(d, ddof=ddof) for d in _rdat]\n res = nf(_ndat, axis=1, ddof=ddof)\n assert_almost_equal(res, tgt)\n\n def test_ddof_too_big(self):\n nanfuncs = [np.nanvar, np.nanstd]\n stdfuncs = [np.var, np.std]\n dsize = [len(d) for d in _rdat]\n for nf, rf in zip(nanfuncs, stdfuncs):\n for ddof in range(5):\n with suppress_warnings() as sup:\n sup.record(RuntimeWarning)\n sup.filter(ComplexWarning)\n tgt = [ddof >= d for d in dsize]\n res = nf(_ndat, axis=1, ddof=ddof)\n assert_equal(np.isnan(res), tgt)\n if any(tgt):\n assert_(len(sup.log) == 1)\n else:\n assert_(len(sup.log) == 0)\n\n @pytest.mark.parametrize("axis", [None, 0, 1])\n @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])\n @pytest.mark.parametrize("array", [\n np.array(np.nan),\n np.full((3, 3), np.nan),\n ], ids=["0d", "2d"])\n def test_allnans(self, axis, dtype, array):\n if axis is not None and array.ndim == 0:\n pytest.skip("`axis != None` not supported for 0d arrays")\n\n array = array.astype(dtype)\n match = "(Degrees of freedom <= 0 for slice.)|(Mean of empty slice)"\n for func in self.nanfuncs:\n with pytest.warns(RuntimeWarning, match=match):\n out = func(array, axis=axis)\n assert np.isnan(out).all()\n\n # `nanvar` and `nanstd` convert complex inputs to their\n # corresponding floating dtype\n if func is np.nanmean:\n assert out.dtype == array.dtype\n else:\n assert out.dtype == np.abs(array).dtype\n\n def test_empty(self):\n mat = np.zeros((0, 3))\n for f in self.nanfuncs:\n for axis in [0, None]:\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n assert_(np.isnan(f(mat, axis=axis)).all())\n assert_(len(w) == 1)\n assert_(issubclass(w[0].category, RuntimeWarning))\n for axis in [1]:\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n assert_equal(f(mat, axis=axis), np.zeros([]))\n assert_(len(w) == 0)\n\n @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])\n def test_where(self, dtype):\n ar = np.arange(9).reshape(3, 3).astype(dtype)\n ar[0, :] = np.nan\n where = np.ones_like(ar, dtype=np.bool)\n where[:, 0] = False\n\n for f, f_std in zip(self.nanfuncs, self.stdfuncs):\n reference = f_std(ar[where][2:])\n dtype_reference = dtype if f is np.nanmean else ar.real.dtype\n\n ret = f(ar, where=where)\n assert ret.dtype == dtype_reference\n np.testing.assert_allclose(ret, reference)\n\n def test_nanstd_with_mean_keyword(self):\n # Setting the seed to make the test reproducible\n rng = np.random.RandomState(1234)\n A = rng.randn(10, 20, 5) + 0.5\n A[:, 5, :] = np.nan\n\n mean_out = np.zeros((10, 1, 5))\n std_out = np.zeros((10, 1, 5))\n\n mean = np.nanmean(A,\n out=mean_out,\n axis=1,\n keepdims=True)\n\n # The returned object should be the object specified during calling\n assert mean_out is mean\n\n std = np.nanstd(A,\n out=std_out,\n axis=1,\n keepdims=True,\n mean=mean)\n\n # The returned object should be the object specified during calling\n assert std_out is std\n\n # Shape of returned mean and std should be same\n assert std.shape == mean.shape\n assert std.shape == (10, 1, 5)\n\n # Output should be the same as from the individual algorithms\n std_old = np.nanstd(A, axis=1, keepdims=True)\n\n assert std_old.shape == mean.shape\n assert_almost_equal(std, std_old)\n\n\n_TIME_UNITS = (\n "Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as"\n)\n\n# All `inexact` + `timdelta64` type codes\n_TYPE_CODES = list(np.typecodes["AllFloat"])\n_TYPE_CODES += [f"m8[{unit}]" for unit in _TIME_UNITS]\n\n\nclass TestNanFunctions_Median:\n\n def test_mutation(self):\n # Check that passed array is not modified.\n ndat = _ndat.copy()\n np.nanmedian(ndat)\n assert_equal(ndat, _ndat)\n\n def test_keepdims(self):\n mat = np.eye(3)\n for axis in [None, 0, 1]:\n tgt = np.median(mat, axis=axis, out=None, overwrite_input=False)\n res = np.nanmedian(mat, axis=axis, out=None, overwrite_input=False)\n assert_(res.ndim == tgt.ndim)\n\n d = np.ones((3, 5, 7, 11))\n # Randomly set some elements to NaN:\n w = np.random.random((4, 200)) * np.array(d.shape)[:, None]\n w = w.astype(np.intp)\n d[tuple(w)] = np.nan\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning)\n res = np.nanmedian(d, axis=None, keepdims=True)\n assert_equal(res.shape, (1, 1, 1, 1))\n res = np.nanmedian(d, axis=(0, 1), keepdims=True)\n assert_equal(res.shape, (1, 1, 7, 11))\n res = np.nanmedian(d, axis=(0, 3), keepdims=True)\n assert_equal(res.shape, (1, 5, 7, 1))\n res = np.nanmedian(d, axis=(1,), keepdims=True)\n assert_equal(res.shape, (3, 1, 7, 11))\n res = np.nanmedian(d, axis=(0, 1, 2, 3), keepdims=True)\n assert_equal(res.shape, (1, 1, 1, 1))\n res = np.nanmedian(d, axis=(0, 1, 3), keepdims=True)\n assert_equal(res.shape, (1, 1, 7, 1))\n\n @pytest.mark.parametrize(\n argnames='axis',\n argvalues=[\n None,\n 1,\n (1, ),\n (0, 1),\n (-3, -1),\n ]\n )\n @pytest.mark.filterwarnings("ignore:All-NaN slice:RuntimeWarning")\n def test_keepdims_out(self, axis):\n d = np.ones((3, 5, 7, 11))\n # Randomly set some elements to NaN:\n w = np.random.random((4, 200)) * np.array(d.shape)[:, None]\n w = w.astype(np.intp)\n d[tuple(w)] = np.nan\n if axis is None:\n shape_out = (1,) * d.ndim\n else:\n axis_norm = normalize_axis_tuple(axis, d.ndim)\n shape_out = tuple(\n 1 if i in axis_norm else d.shape[i] for i in range(d.ndim))\n out = np.empty(shape_out)\n result = np.nanmedian(d, axis=axis, keepdims=True, out=out)\n assert result is out\n assert_equal(result.shape, shape_out)\n\n def test_out(self):\n mat = np.random.rand(3, 3)\n nan_mat = np.insert(mat, [0, 2], np.nan, axis=1)\n resout = np.zeros(3)\n tgt = np.median(mat, axis=1)\n res = np.nanmedian(nan_mat, axis=1, out=resout)\n assert_almost_equal(res, resout)\n assert_almost_equal(res, tgt)\n # 0-d output:\n resout = np.zeros(())\n tgt = np.median(mat, axis=None)\n res = np.nanmedian(nan_mat, axis=None, out=resout)\n assert_almost_equal(res, resout)\n assert_almost_equal(res, tgt)\n res = np.nanmedian(nan_mat, axis=(0, 1), out=resout)\n assert_almost_equal(res, resout)\n assert_almost_equal(res, tgt)\n\n def test_small_large(self):\n # test the small and large code paths, current cutoff 400 elements\n for s in [5, 20, 51, 200, 1000]:\n d = np.random.randn(4, s)\n # Randomly set some elements to NaN:\n w = np.random.randint(0, d.size, size=d.size // 5)\n d.ravel()[w] = np.nan\n d[:, 0] = 1. # ensure at least one good value\n # use normal median without nans to compare\n tgt = []\n for x in d:\n nonan = np.compress(~np.isnan(x), x)\n tgt.append(np.median(nonan, overwrite_input=True))\n\n assert_array_equal(np.nanmedian(d, axis=-1), tgt)\n\n def test_result_values(self):\n tgt = [np.median(d) for d in _rdat]\n res = np.nanmedian(_ndat, axis=1)\n assert_almost_equal(res, tgt)\n\n @pytest.mark.parametrize("axis", [None, 0, 1])\n @pytest.mark.parametrize("dtype", _TYPE_CODES)\n def test_allnans(self, dtype, axis):\n mat = np.full((3, 3), np.nan).astype(dtype)\n with suppress_warnings() as sup:\n sup.record(RuntimeWarning)\n\n output = np.nanmedian(mat, axis=axis)\n assert output.dtype == mat.dtype\n assert np.isnan(output).all()\n\n if axis is None:\n assert_(len(sup.log) == 1)\n else:\n assert_(len(sup.log) == 3)\n\n # Check scalar\n scalar = np.array(np.nan).astype(dtype)[()]\n output_scalar = np.nanmedian(scalar)\n assert output_scalar.dtype == scalar.dtype\n assert np.isnan(output_scalar)\n\n if axis is None:\n assert_(len(sup.log) == 2)\n else:\n assert_(len(sup.log) == 4)\n\n def test_empty(self):\n mat = np.zeros((0, 3))\n for axis in [0, None]:\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n assert_(np.isnan(np.nanmedian(mat, axis=axis)).all())\n assert_(len(w) == 1)\n assert_(issubclass(w[0].category, RuntimeWarning))\n for axis in [1]:\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n assert_equal(np.nanmedian(mat, axis=axis), np.zeros([]))\n assert_(len(w) == 0)\n\n def test_scalar(self):\n assert_(np.nanmedian(0.) == 0.)\n\n def test_extended_axis_invalid(self):\n d = np.ones((3, 5, 7, 11))\n assert_raises(AxisError, np.nanmedian, d, axis=-5)\n assert_raises(AxisError, np.nanmedian, d, axis=(0, -5))\n assert_raises(AxisError, np.nanmedian, d, axis=4)\n assert_raises(AxisError, np.nanmedian, d, axis=(0, 4))\n assert_raises(ValueError, np.nanmedian, d, axis=(1, 1))\n\n def test_float_special(self):\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning)\n for inf in [np.inf, -np.inf]:\n a = np.array([[inf, np.nan], [np.nan, np.nan]])\n assert_equal(np.nanmedian(a, axis=0), [inf, np.nan])\n assert_equal(np.nanmedian(a, axis=1), [inf, np.nan])\n assert_equal(np.nanmedian(a), inf)\n\n # minimum fill value check\n a = np.array([[np.nan, np.nan, inf],\n [np.nan, np.nan, inf]])\n assert_equal(np.nanmedian(a), inf)\n assert_equal(np.nanmedian(a, axis=0), [np.nan, np.nan, inf])\n assert_equal(np.nanmedian(a, axis=1), inf)\n\n # no mask path\n a = np.array([[inf, inf], [inf, inf]])\n assert_equal(np.nanmedian(a, axis=1), inf)\n\n a = np.array([[inf, 7, -inf, -9],\n [-10, np.nan, np.nan, 5],\n [4, np.nan, np.nan, inf]],\n dtype=np.float32)\n if inf > 0:\n assert_equal(np.nanmedian(a, axis=0), [4., 7., -inf, 5.])\n assert_equal(np.nanmedian(a), 4.5)\n else:\n assert_equal(np.nanmedian(a, axis=0), [-10., 7., -inf, -9.])\n assert_equal(np.nanmedian(a), -2.5)\n assert_equal(np.nanmedian(a, axis=-1), [-1., -2.5, inf])\n\n for i in range(10):\n for j in range(1, 10):\n a = np.array([([np.nan] * i) + ([inf] * j)] * 2)\n assert_equal(np.nanmedian(a), inf)\n assert_equal(np.nanmedian(a, axis=1), inf)\n assert_equal(np.nanmedian(a, axis=0),\n ([np.nan] * i) + [inf] * j)\n\n a = np.array([([np.nan] * i) + ([-inf] * j)] * 2)\n assert_equal(np.nanmedian(a), -inf)\n assert_equal(np.nanmedian(a, axis=1), -inf)\n assert_equal(np.nanmedian(a, axis=0),\n ([np.nan] * i) + [-inf] * j)\n\n\nclass TestNanFunctions_Percentile:\n\n def test_mutation(self):\n # Check that passed array is not modified.\n ndat = _ndat.copy()\n np.nanpercentile(ndat, 30)\n assert_equal(ndat, _ndat)\n\n def test_keepdims(self):\n mat = np.eye(3)\n for axis in [None, 0, 1]:\n tgt = np.percentile(mat, 70, axis=axis, out=None,\n overwrite_input=False)\n res = np.nanpercentile(mat, 70, axis=axis, out=None,\n overwrite_input=False)\n assert_(res.ndim == tgt.ndim)\n\n d = np.ones((3, 5, 7, 11))\n # Randomly set some elements to NaN:\n w = np.random.random((4, 200)) * np.array(d.shape)[:, None]\n w = w.astype(np.intp)\n d[tuple(w)] = np.nan\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning)\n res = np.nanpercentile(d, 90, axis=None, keepdims=True)\n assert_equal(res.shape, (1, 1, 1, 1))\n res = np.nanpercentile(d, 90, axis=(0, 1), keepdims=True)\n assert_equal(res.shape, (1, 1, 7, 11))\n res = np.nanpercentile(d, 90, axis=(0, 3), keepdims=True)\n assert_equal(res.shape, (1, 5, 7, 1))\n res = np.nanpercentile(d, 90, axis=(1,), keepdims=True)\n assert_equal(res.shape, (3, 1, 7, 11))\n res = np.nanpercentile(d, 90, axis=(0, 1, 2, 3), keepdims=True)\n assert_equal(res.shape, (1, 1, 1, 1))\n res = np.nanpercentile(d, 90, axis=(0, 1, 3), keepdims=True)\n assert_equal(res.shape, (1, 1, 7, 1))\n\n @pytest.mark.parametrize('q', [7, [1, 7]])\n @pytest.mark.parametrize(\n argnames='axis',\n argvalues=[\n None,\n 1,\n (1,),\n (0, 1),\n (-3, -1),\n ]\n )\n @pytest.mark.filterwarnings("ignore:All-NaN slice:RuntimeWarning")\n def test_keepdims_out(self, q, axis):\n d = np.ones((3, 5, 7, 11))\n # Randomly set some elements to NaN:\n w = np.random.random((4, 200)) * np.array(d.shape)[:, None]\n w = w.astype(np.intp)\n d[tuple(w)] = np.nan\n if axis is None:\n shape_out = (1,) * d.ndim\n else:\n axis_norm = normalize_axis_tuple(axis, d.ndim)\n shape_out = tuple(\n 1 if i in axis_norm else d.shape[i] for i in range(d.ndim))\n shape_out = np.shape(q) + shape_out\n\n out = np.empty(shape_out)\n result = np.nanpercentile(d, q, axis=axis, keepdims=True, out=out)\n assert result is out\n assert_equal(result.shape, shape_out)\n\n @pytest.mark.parametrize("weighted", [False, True])\n def test_out(self, weighted):\n mat = np.random.rand(3, 3)\n nan_mat = np.insert(mat, [0, 2], np.nan, axis=1)\n resout = np.zeros(3)\n if weighted:\n w_args = {"weights": np.ones_like(mat), "method": "inverted_cdf"}\n nan_w_args = {\n "weights": np.ones_like(nan_mat), "method": "inverted_cdf"\n }\n else:\n w_args = {}\n nan_w_args = {}\n tgt = np.percentile(mat, 42, axis=1, **w_args)\n res = np.nanpercentile(nan_mat, 42, axis=1, out=resout, **nan_w_args)\n assert_almost_equal(res, resout)\n assert_almost_equal(res, tgt)\n # 0-d output:\n resout = np.zeros(())\n tgt = np.percentile(mat, 42, axis=None, **w_args)\n res = np.nanpercentile(\n nan_mat, 42, axis=None, out=resout, **nan_w_args\n )\n assert_almost_equal(res, resout)\n assert_almost_equal(res, tgt)\n res = np.nanpercentile(\n nan_mat, 42, axis=(0, 1), out=resout, **nan_w_args\n )\n assert_almost_equal(res, resout)\n assert_almost_equal(res, tgt)\n\n def test_complex(self):\n arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='G')\n assert_raises(TypeError, np.nanpercentile, arr_c, 0.5)\n arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='D')\n assert_raises(TypeError, np.nanpercentile, arr_c, 0.5)\n arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='F')\n assert_raises(TypeError, np.nanpercentile, arr_c, 0.5)\n\n @pytest.mark.parametrize("weighted", [False, True])\n @pytest.mark.parametrize("use_out", [False, True])\n def test_result_values(self, weighted, use_out):\n if weighted:\n percentile = partial(np.percentile, method="inverted_cdf")\n nanpercentile = partial(np.nanpercentile, method="inverted_cdf")\n\n def gen_weights(d):\n return np.ones_like(d)\n\n else:\n percentile = np.percentile\n nanpercentile = np.nanpercentile\n\n def gen_weights(d):\n return None\n\n tgt = [percentile(d, 28, weights=gen_weights(d)) for d in _rdat]\n out = np.empty_like(tgt) if use_out else None\n res = nanpercentile(_ndat, 28, axis=1,\n weights=gen_weights(_ndat), out=out)\n assert_almost_equal(res, tgt)\n # Transpose the array to fit the output convention of numpy.percentile\n tgt = np.transpose([percentile(d, (28, 98), weights=gen_weights(d))\n for d in _rdat])\n out = np.empty_like(tgt) if use_out else None\n res = nanpercentile(_ndat, (28, 98), axis=1,\n weights=gen_weights(_ndat), out=out)\n assert_almost_equal(res, tgt)\n\n @pytest.mark.parametrize("axis", [None, 0, 1])\n @pytest.mark.parametrize("dtype", np.typecodes["Float"])\n @pytest.mark.parametrize("array", [\n np.array(np.nan),\n np.full((3, 3), np.nan),\n ], ids=["0d", "2d"])\n def test_allnans(self, axis, dtype, array):\n if axis is not None and array.ndim == 0:\n pytest.skip("`axis != None` not supported for 0d arrays")\n\n array = array.astype(dtype)\n with pytest.warns(RuntimeWarning, match="All-NaN slice encountered"):\n out = np.nanpercentile(array, 60, axis=axis)\n assert np.isnan(out).all()\n assert out.dtype == array.dtype\n\n def test_empty(self):\n mat = np.zeros((0, 3))\n for axis in [0, None]:\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n assert_(np.isnan(np.nanpercentile(mat, 40, axis=axis)).all())\n assert_(len(w) == 1)\n assert_(issubclass(w[0].category, RuntimeWarning))\n for axis in [1]:\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter('always')\n assert_equal(np.nanpercentile(mat, 40, axis=axis), np.zeros([]))\n assert_(len(w) == 0)\n\n def test_scalar(self):\n assert_equal(np.nanpercentile(0., 100), 0.)\n a = np.arange(6)\n r = np.nanpercentile(a, 50, axis=0)\n assert_equal(r, 2.5)\n assert_(np.isscalar(r))\n\n def test_extended_axis_invalid(self):\n d = np.ones((3, 5, 7, 11))\n assert_raises(AxisError, np.nanpercentile, d, q=5, axis=-5)\n assert_raises(AxisError, np.nanpercentile, d, q=5, axis=(0, -5))\n assert_raises(AxisError, np.nanpercentile, d, q=5, axis=4)\n assert_raises(AxisError, np.nanpercentile, d, q=5, axis=(0, 4))\n assert_raises(ValueError, np.nanpercentile, d, q=5, axis=(1, 1))\n\n def test_multiple_percentiles(self):\n perc = [50, 100]\n mat = np.ones((4, 3))\n nan_mat = np.nan * mat\n # For checking consistency in higher dimensional case\n large_mat = np.ones((3, 4, 5))\n large_mat[:, 0:2:4, :] = 0\n large_mat[:, :, 3:] *= 2\n for axis in [None, 0, 1]:\n for keepdim in [False, True]:\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning, "All-NaN slice encountered")\n val = np.percentile(mat, perc, axis=axis, keepdims=keepdim)\n nan_val = np.nanpercentile(nan_mat, perc, axis=axis,\n keepdims=keepdim)\n assert_equal(nan_val.shape, val.shape)\n\n val = np.percentile(large_mat, perc, axis=axis,\n keepdims=keepdim)\n nan_val = np.nanpercentile(large_mat, perc, axis=axis,\n keepdims=keepdim)\n assert_equal(nan_val, val)\n\n megamat = np.ones((3, 4, 5, 6))\n assert_equal(\n np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6)\n )\n\n @pytest.mark.parametrize("nan_weight", [0, 1, 2, 3, 1e200])\n def test_nan_value_with_weight(self, nan_weight):\n x = [1, np.nan, 2, 3]\n result = np.float64(2.0)\n q_unweighted = np.nanpercentile(x, 50, method="inverted_cdf")\n assert_equal(q_unweighted, result)\n\n # The weight value at the nan position should not matter.\n w = [1.0, nan_weight, 1.0, 1.0]\n q_weighted = np.nanpercentile(x, 50, weights=w, method="inverted_cdf")\n assert_equal(q_weighted, result)\n\n @pytest.mark.parametrize("axis", [0, 1, 2])\n def test_nan_value_with_weight_ndim(self, axis):\n # Create a multi-dimensional array to test\n np.random.seed(1)\n x_no_nan = np.random.random(size=(100, 99, 2))\n # Set some places to NaN (not particularly smart) so there is always\n # some non-Nan.\n x = x_no_nan.copy()\n x[np.arange(99), np.arange(99), 0] = np.nan\n\n p = np.array([[20., 50., 30], [70, 33, 80]])\n\n # We just use ones as weights, but replace it with 0 or 1e200 at the\n # NaN positions below.\n weights = np.ones_like(x)\n\n # For comparison use weighted normal percentile with nan weights at\n # 0 (and no NaNs); not sure this is strictly identical but should be\n # sufficiently so (if a percentile lies exactly on a 0 value).\n weights[np.isnan(x)] = 0\n p_expected = np.percentile(\n x_no_nan, p, axis=axis, weights=weights, method="inverted_cdf")\n\n p_unweighted = np.nanpercentile(\n x, p, axis=axis, method="inverted_cdf")\n # The normal and unweighted versions should be identical:\n assert_equal(p_unweighted, p_expected)\n\n weights[np.isnan(x)] = 1e200 # huge value, shouldn't matter\n p_weighted = np.nanpercentile(\n x, p, axis=axis, weights=weights, method="inverted_cdf")\n assert_equal(p_weighted, p_expected)\n # Also check with out passed:\n out = np.empty_like(p_weighted)\n res = np.nanpercentile(\n x, p, axis=axis, weights=weights, out=out, method="inverted_cdf")\n\n assert res is out\n assert_equal(out, p_expected)\n\n\nclass TestNanFunctions_Quantile:\n # most of this is already tested by TestPercentile\n\n @pytest.mark.parametrize("weighted", [False, True])\n def test_regression(self, weighted):\n ar = np.arange(24).reshape(2, 3, 4).astype(float)\n ar[0][1] = np.nan\n if weighted:\n w_args = {"weights": np.ones_like(ar), "method": "inverted_cdf"}\n else:\n w_args = {}\n\n assert_equal(np.nanquantile(ar, q=0.5, **w_args),\n np.nanpercentile(ar, q=50, **w_args))\n assert_equal(np.nanquantile(ar, q=0.5, axis=0, **w_args),\n np.nanpercentile(ar, q=50, axis=0, **w_args))\n assert_equal(np.nanquantile(ar, q=0.5, axis=1, **w_args),\n np.nanpercentile(ar, q=50, axis=1, **w_args))\n assert_equal(np.nanquantile(ar, q=[0.5], axis=1, **w_args),\n np.nanpercentile(ar, q=[50], axis=1, **w_args))\n assert_equal(np.nanquantile(ar, q=[0.25, 0.5, 0.75], axis=1, **w_args),\n np.nanpercentile(ar, q=[25, 50, 75], axis=1, **w_args))\n\n def test_basic(self):\n x = np.arange(8) * 0.5\n assert_equal(np.nanquantile(x, 0), 0.)\n assert_equal(np.nanquantile(x, 1), 3.5)\n assert_equal(np.nanquantile(x, 0.5), 1.75)\n\n def test_complex(self):\n arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='G')\n assert_raises(TypeError, np.nanquantile, arr_c, 0.5)\n arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='D')\n assert_raises(TypeError, np.nanquantile, arr_c, 0.5)\n arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='F')\n assert_raises(TypeError, np.nanquantile, arr_c, 0.5)\n\n def test_no_p_overwrite(self):\n # this is worth retesting, because quantile does not make a copy\n p0 = np.array([0, 0.75, 0.25, 0.5, 1.0])\n p = p0.copy()\n np.nanquantile(np.arange(100.), p, method="midpoint")\n assert_array_equal(p, p0)\n\n p0 = p0.tolist()\n p = p.tolist()\n np.nanquantile(np.arange(100.), p, method="midpoint")\n assert_array_equal(p, p0)\n\n @pytest.mark.parametrize("axis", [None, 0, 1])\n @pytest.mark.parametrize("dtype", np.typecodes["Float"])\n @pytest.mark.parametrize("array", [\n np.array(np.nan),\n np.full((3, 3), np.nan),\n ], ids=["0d", "2d"])\n def test_allnans(self, axis, dtype, array):\n if axis is not None and array.ndim == 0:\n pytest.skip("`axis != None` not supported for 0d arrays")\n\n array = array.astype(dtype)\n with pytest.warns(RuntimeWarning, match="All-NaN slice encountered"):\n out = np.nanquantile(array, 1, axis=axis)\n assert np.isnan(out).all()\n assert out.dtype == array.dtype\n\n@pytest.mark.parametrize("arr, expected", [\n # array of floats with some nans\n (np.array([np.nan, 5.0, np.nan, np.inf]),\n np.array([False, True, False, True])),\n # int64 array that can't possibly have nans\n (np.array([1, 5, 7, 9], dtype=np.int64),\n True),\n # bool array that can't possibly have nans\n (np.array([False, True, False, True]),\n True),\n # 2-D complex array with nans\n (np.array([[np.nan, 5.0],\n [np.nan, np.inf]], dtype=np.complex64),\n np.array([[False, True],\n [False, True]])),\n ])\ndef test__nan_mask(arr, expected):\n for out in [None, np.empty(arr.shape, dtype=np.bool)]:\n actual = _nan_mask(arr, out=out)\n assert_equal(actual, expected)\n # the above won't distinguish between True proper\n # and an array of True values; we want True proper\n # for types that can't possibly contain NaN\n if type(expected) is not np.ndarray:\n assert actual is True\n\n\ndef test__replace_nan():\n """ Test that _replace_nan returns the original array if there are no\n NaNs, not a copy.\n """\n for dtype in [np.bool, np.int32, np.int64]:\n arr = np.array([0, 1], dtype=dtype)\n result, mask = _replace_nan(arr, 0)\n assert mask is None\n # do not make a copy if there are no nans\n assert result is arr\n\n for dtype in [np.float32, np.float64]:\n arr = np.array([0, 1], dtype=dtype)\n result, mask = _replace_nan(arr, 2)\n assert (mask == False).all()\n # mask is not None, so we make a copy\n assert result is not arr\n assert_equal(result, arr)\n\n arr_nan = np.array([0, 1, np.nan], dtype=dtype)\n result_nan, mask_nan = _replace_nan(arr_nan, 2)\n assert_equal(mask_nan, np.array([False, False, True]))\n assert result_nan is not arr_nan\n assert_equal(result_nan, np.array([0, 1, 2]))\n assert np.isnan(arr_nan[-1])\n\n\ndef test_memmap_takes_fast_route(tmpdir):\n # We want memory mapped arrays to take the fast route through nanmax,\n # which avoids creating a mask by using fmax.reduce (see gh-28721). So we\n # check that on bad input, the error is from fmax (rather than maximum).\n a = np.arange(10., dtype=float)\n with open(tmpdir.join("data.bin"), "w+b") as fh:\n fh.write(a.tobytes())\n mm = np.memmap(fh, dtype=a.dtype, shape=a.shape)\n with pytest.raises(ValueError, match="reduction operation fmax"):\n np.nanmax(mm, out=np.zeros(2))\n # For completeness, same for nanmin.\n with pytest.raises(ValueError, match="reduction operation fmin"):\n np.nanmin(mm, out=np.zeros(2))\n | .venv\Lib\site-packages\numpy\lib\tests\test_nanfunctions.py | test_nanfunctions.py | Python | 55,536 | 0.75 | 0.173853 | 0.062197 | react-lib | 344 | 2023-08-03T21:45:27.074330 | BSD-3-Clause | true | 171191939e040e31de00f6e7b8597f5a |
from itertools import chain\n\nimport pytest\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal, assert_equal, assert_raises\n\n\ndef test_packbits():\n # Copied from the docstring.\n a = [[[1, 0, 1], [0, 1, 0]],\n [[1, 1, 0], [0, 0, 1]]]\n for dt in '?bBhHiIlLqQ':\n arr = np.array(a, dtype=dt)\n b = np.packbits(arr, axis=-1)\n assert_equal(b.dtype, np.uint8)\n assert_array_equal(b, np.array([[[160], [64]], [[192], [32]]]))\n\n assert_raises(TypeError, np.packbits, np.array(a, dtype=float))\n\n\ndef test_packbits_empty():\n shapes = [\n (0,), (10, 20, 0), (10, 0, 20), (0, 10, 20), (20, 0, 0), (0, 20, 0),\n (0, 0, 20), (0, 0, 0),\n ]\n for dt in '?bBhHiIlLqQ':\n for shape in shapes:\n a = np.empty(shape, dtype=dt)\n b = np.packbits(a)\n assert_equal(b.dtype, np.uint8)\n assert_equal(b.shape, (0,))\n\n\ndef test_packbits_empty_with_axis():\n # Original shapes and lists of packed shapes for different axes.\n shapes = [\n ((0,), [(0,)]),\n ((10, 20, 0), [(2, 20, 0), (10, 3, 0), (10, 20, 0)]),\n ((10, 0, 20), [(2, 0, 20), (10, 0, 20), (10, 0, 3)]),\n ((0, 10, 20), [(0, 10, 20), (0, 2, 20), (0, 10, 3)]),\n ((20, 0, 0), [(3, 0, 0), (20, 0, 0), (20, 0, 0)]),\n ((0, 20, 0), [(0, 20, 0), (0, 3, 0), (0, 20, 0)]),\n ((0, 0, 20), [(0, 0, 20), (0, 0, 20), (0, 0, 3)]),\n ((0, 0, 0), [(0, 0, 0), (0, 0, 0), (0, 0, 0)]),\n ]\n for dt in '?bBhHiIlLqQ':\n for in_shape, out_shapes in shapes:\n for ax, out_shape in enumerate(out_shapes):\n a = np.empty(in_shape, dtype=dt)\n b = np.packbits(a, axis=ax)\n assert_equal(b.dtype, np.uint8)\n assert_equal(b.shape, out_shape)\n\n@pytest.mark.parametrize('bitorder', ('little', 'big'))\ndef test_packbits_large(bitorder):\n # test data large enough for 16 byte vectorization\n a = np.array([1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0,\n 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1,\n 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0,\n 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1,\n 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1,\n 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1,\n 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1,\n 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1,\n 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0,\n 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1,\n 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0,\n 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1,\n 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0,\n 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0,\n 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0])\n a = a.repeat(3)\n for dtype in '?bBhHiIlLqQ':\n arr = np.array(a, dtype=dtype)\n b = np.packbits(arr, axis=None, bitorder=bitorder)\n assert_equal(b.dtype, np.uint8)\n r = [252, 127, 192, 3, 254, 7, 252, 0, 7, 31, 240, 0, 28, 1, 255, 252,\n 113, 248, 3, 255, 192, 28, 15, 192, 28, 126, 0, 224, 127, 255,\n 227, 142, 7, 31, 142, 63, 28, 126, 56, 227, 240, 0, 227, 128, 63,\n 224, 14, 56, 252, 112, 56, 255, 241, 248, 3, 240, 56, 224, 112,\n 63, 255, 255, 199, 224, 14, 0, 31, 143, 192, 3, 255, 199, 0, 1,\n 255, 224, 1, 255, 252, 126, 63, 0, 1, 192, 252, 14, 63, 0, 15,\n 199, 252, 113, 255, 3, 128, 56, 252, 14, 7, 0, 113, 255, 255, 142, 56, 227,\n 129, 248, 227, 129, 199, 31, 128]\n if bitorder == 'big':\n assert_array_equal(b, r)\n # equal for size being multiple of 8\n assert_array_equal(np.unpackbits(b, bitorder=bitorder)[:-4], a)\n\n # check last byte of different remainders (16 byte vectorization)\n b = [np.packbits(arr[:-i], axis=None)[-1] for i in range(1, 16)]\n assert_array_equal(b, [128, 128, 128, 31, 30, 28, 24, 16, 0, 0, 0, 199,\n 198, 196, 192])\n\n arr = arr.reshape(36, 25)\n b = np.packbits(arr, axis=0)\n assert_equal(b.dtype, np.uint8)\n assert_array_equal(b, [[190, 186, 178, 178, 150, 215, 87, 83, 83, 195,\n 199, 206, 204, 204, 140, 140, 136, 136, 8, 40, 105,\n 107, 75, 74, 88],\n [72, 216, 248, 241, 227, 195, 202, 90, 90, 83,\n 83, 119, 127, 109, 73, 64, 208, 244, 189, 45,\n 41, 104, 122, 90, 18],\n [113, 120, 248, 216, 152, 24, 60, 52, 182, 150,\n 150, 150, 146, 210, 210, 246, 255, 255, 223,\n 151, 21, 17, 17, 131, 163],\n [214, 210, 210, 64, 68, 5, 5, 1, 72, 88, 92,\n 92, 78, 110, 39, 181, 149, 220, 222, 218, 218,\n 202, 234, 170, 168],\n [0, 128, 128, 192, 80, 112, 48, 160, 160, 224,\n 240, 208, 144, 128, 160, 224, 240, 208, 144,\n 144, 176, 240, 224, 192, 128]])\n\n b = np.packbits(arr, axis=1)\n assert_equal(b.dtype, np.uint8)\n assert_array_equal(b, [[252, 127, 192, 0],\n [ 7, 252, 15, 128],\n [240, 0, 28, 0],\n [255, 128, 0, 128],\n [192, 31, 255, 128],\n [142, 63, 0, 0],\n [255, 240, 7, 0],\n [ 7, 224, 14, 0],\n [126, 0, 224, 0],\n [255, 255, 199, 0],\n [ 56, 28, 126, 0],\n [113, 248, 227, 128],\n [227, 142, 63, 0],\n [ 0, 28, 112, 0],\n [ 15, 248, 3, 128],\n [ 28, 126, 56, 0],\n [ 56, 255, 241, 128],\n [240, 7, 224, 0],\n [227, 129, 192, 128],\n [255, 255, 254, 0],\n [126, 0, 224, 0],\n [ 3, 241, 248, 0],\n [ 0, 255, 241, 128],\n [128, 0, 255, 128],\n [224, 1, 255, 128],\n [248, 252, 126, 0],\n [ 0, 7, 3, 128],\n [224, 113, 248, 0],\n [ 0, 252, 127, 128],\n [142, 63, 224, 0],\n [224, 14, 63, 0],\n [ 7, 3, 128, 0],\n [113, 255, 255, 128],\n [ 28, 113, 199, 0],\n [ 7, 227, 142, 0],\n [ 14, 56, 252, 0]])\n\n arr = arr.T.copy()\n b = np.packbits(arr, axis=0)\n assert_equal(b.dtype, np.uint8)\n assert_array_equal(b, [[252, 7, 240, 255, 192, 142, 255, 7, 126, 255,\n 56, 113, 227, 0, 15, 28, 56, 240, 227, 255,\n 126, 3, 0, 128, 224, 248, 0, 224, 0, 142, 224,\n 7, 113, 28, 7, 14],\n [127, 252, 0, 128, 31, 63, 240, 224, 0, 255,\n 28, 248, 142, 28, 248, 126, 255, 7, 129, 255,\n 0, 241, 255, 0, 1, 252, 7, 113, 252, 63, 14,\n 3, 255, 113, 227, 56],\n [192, 15, 28, 0, 255, 0, 7, 14, 224, 199, 126,\n 227, 63, 112, 3, 56, 241, 224, 192, 254, 224,\n 248, 241, 255, 255, 126, 3, 248, 127, 224, 63,\n 128, 255, 199, 142, 252],\n [0, 128, 0, 128, 128, 0, 0, 0, 0, 0, 0, 128, 0,\n 0, 128, 0, 128, 0, 128, 0, 0, 0, 128, 128,\n 128, 0, 128, 0, 128, 0, 0, 0, 128, 0, 0, 0]])\n\n b = np.packbits(arr, axis=1)\n assert_equal(b.dtype, np.uint8)\n assert_array_equal(b, [[190, 72, 113, 214, 0],\n [186, 216, 120, 210, 128],\n [178, 248, 248, 210, 128],\n [178, 241, 216, 64, 192],\n [150, 227, 152, 68, 80],\n [215, 195, 24, 5, 112],\n [ 87, 202, 60, 5, 48],\n [ 83, 90, 52, 1, 160],\n [ 83, 90, 182, 72, 160],\n [195, 83, 150, 88, 224],\n [199, 83, 150, 92, 240],\n [206, 119, 150, 92, 208],\n [204, 127, 146, 78, 144],\n [204, 109, 210, 110, 128],\n [140, 73, 210, 39, 160],\n [140, 64, 246, 181, 224],\n [136, 208, 255, 149, 240],\n [136, 244, 255, 220, 208],\n [ 8, 189, 223, 222, 144],\n [ 40, 45, 151, 218, 144],\n [105, 41, 21, 218, 176],\n [107, 104, 17, 202, 240],\n [ 75, 122, 17, 234, 224],\n [ 74, 90, 131, 170, 192],\n [ 88, 18, 163, 168, 128]])\n\n # result is the same if input is multiplied with a nonzero value\n for dtype in 'bBhHiIlLqQ':\n arr = np.array(a, dtype=dtype)\n rnd = np.random.randint(low=np.iinfo(dtype).min,\n high=np.iinfo(dtype).max, size=arr.size,\n dtype=dtype)\n rnd[rnd == 0] = 1\n arr *= rnd.astype(dtype)\n b = np.packbits(arr, axis=-1)\n assert_array_equal(np.unpackbits(b)[:-4], a)\n\n assert_raises(TypeError, np.packbits, np.array(a, dtype=float))\n\n\ndef test_packbits_very_large():\n # test some with a larger arrays gh-8637\n # code is covered earlier but larger array makes crash on bug more likely\n for s in range(950, 1050):\n for dt in '?bBhHiIlLqQ':\n x = np.ones((200, s), dtype=bool)\n np.packbits(x, axis=1)\n\n\ndef test_unpackbits():\n # Copied from the docstring.\n a = np.array([[2], [7], [23]], dtype=np.uint8)\n b = np.unpackbits(a, axis=1)\n assert_equal(b.dtype, np.uint8)\n assert_array_equal(b, np.array([[0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 0, 1, 1, 1]]))\n\ndef test_pack_unpack_order():\n a = np.array([[2], [7], [23]], dtype=np.uint8)\n b = np.unpackbits(a, axis=1)\n assert_equal(b.dtype, np.uint8)\n b_little = np.unpackbits(a, axis=1, bitorder='little')\n b_big = np.unpackbits(a, axis=1, bitorder='big')\n assert_array_equal(b, b_big)\n assert_array_equal(a, np.packbits(b_little, axis=1, bitorder='little'))\n assert_array_equal(b[:, ::-1], b_little)\n assert_array_equal(a, np.packbits(b_big, axis=1, bitorder='big'))\n assert_raises(ValueError, np.unpackbits, a, bitorder='r')\n assert_raises(TypeError, np.unpackbits, a, bitorder=10)\n\n\ndef test_unpackbits_empty():\n a = np.empty((0,), dtype=np.uint8)\n b = np.unpackbits(a)\n assert_equal(b.dtype, np.uint8)\n assert_array_equal(b, np.empty((0,)))\n\n\ndef test_unpackbits_empty_with_axis():\n # Lists of packed shapes for different axes and unpacked shapes.\n shapes = [\n ([(0,)], (0,)),\n ([(2, 24, 0), (16, 3, 0), (16, 24, 0)], (16, 24, 0)),\n ([(2, 0, 24), (16, 0, 24), (16, 0, 3)], (16, 0, 24)),\n ([(0, 16, 24), (0, 2, 24), (0, 16, 3)], (0, 16, 24)),\n ([(3, 0, 0), (24, 0, 0), (24, 0, 0)], (24, 0, 0)),\n ([(0, 24, 0), (0, 3, 0), (0, 24, 0)], (0, 24, 0)),\n ([(0, 0, 24), (0, 0, 24), (0, 0, 3)], (0, 0, 24)),\n ([(0, 0, 0), (0, 0, 0), (0, 0, 0)], (0, 0, 0)),\n ]\n for in_shapes, out_shape in shapes:\n for ax, in_shape in enumerate(in_shapes):\n a = np.empty(in_shape, dtype=np.uint8)\n b = np.unpackbits(a, axis=ax)\n assert_equal(b.dtype, np.uint8)\n assert_equal(b.shape, out_shape)\n\n\ndef test_unpackbits_large():\n # test all possible numbers via comparison to already tested packbits\n d = np.arange(277, dtype=np.uint8)\n assert_array_equal(np.packbits(np.unpackbits(d)), d)\n assert_array_equal(np.packbits(np.unpackbits(d[::2])), d[::2])\n d = np.tile(d, (3, 1))\n assert_array_equal(np.packbits(np.unpackbits(d, axis=1), axis=1), d)\n d = d.T.copy()\n assert_array_equal(np.packbits(np.unpackbits(d, axis=0), axis=0), d)\n\n\nclass TestCount:\n x = np.array([\n [1, 0, 1, 0, 0, 1, 0],\n [0, 1, 1, 1, 0, 0, 0],\n [0, 0, 1, 0, 0, 1, 1],\n [1, 1, 0, 0, 0, 1, 1],\n [1, 0, 1, 0, 1, 0, 1],\n [0, 0, 1, 1, 1, 0, 0],\n [0, 1, 0, 1, 0, 1, 0],\n ], dtype=np.uint8)\n padded1 = np.zeros(57, dtype=np.uint8)\n padded1[:49] = x.ravel()\n padded1b = np.zeros(57, dtype=np.uint8)\n padded1b[:49] = x[::-1].copy().ravel()\n padded2 = np.zeros((9, 9), dtype=np.uint8)\n padded2[:7, :7] = x\n\n @pytest.mark.parametrize('bitorder', ('little', 'big'))\n @pytest.mark.parametrize('count', chain(range(58), range(-1, -57, -1)))\n def test_roundtrip(self, bitorder, count):\n if count < 0:\n # one extra zero of padding\n cutoff = count - 1\n else:\n cutoff = count\n # test complete invertibility of packbits and unpackbits with count\n packed = np.packbits(self.x, bitorder=bitorder)\n unpacked = np.unpackbits(packed, count=count, bitorder=bitorder)\n assert_equal(unpacked.dtype, np.uint8)\n assert_array_equal(unpacked, self.padded1[:cutoff])\n\n @pytest.mark.parametrize('kwargs', [\n {}, {'count': None},\n ])\n def test_count(self, kwargs):\n packed = np.packbits(self.x)\n unpacked = np.unpackbits(packed, **kwargs)\n assert_equal(unpacked.dtype, np.uint8)\n assert_array_equal(unpacked, self.padded1[:-1])\n\n @pytest.mark.parametrize('bitorder', ('little', 'big'))\n # delta==-1 when count<0 because one extra zero of padding\n @pytest.mark.parametrize('count', chain(range(8), range(-1, -9, -1)))\n def test_roundtrip_axis(self, bitorder, count):\n if count < 0:\n # one extra zero of padding\n cutoff = count - 1\n else:\n cutoff = count\n packed0 = np.packbits(self.x, axis=0, bitorder=bitorder)\n unpacked0 = np.unpackbits(packed0, axis=0, count=count,\n bitorder=bitorder)\n assert_equal(unpacked0.dtype, np.uint8)\n assert_array_equal(unpacked0, self.padded2[:cutoff, :self.x.shape[1]])\n\n packed1 = np.packbits(self.x, axis=1, bitorder=bitorder)\n unpacked1 = np.unpackbits(packed1, axis=1, count=count,\n bitorder=bitorder)\n assert_equal(unpacked1.dtype, np.uint8)\n assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :cutoff])\n\n @pytest.mark.parametrize('kwargs', [\n {}, {'count': None},\n {'bitorder': 'little'},\n {'bitorder': 'little', 'count': None},\n {'bitorder': 'big'},\n {'bitorder': 'big', 'count': None},\n ])\n def test_axis_count(self, kwargs):\n packed0 = np.packbits(self.x, axis=0)\n unpacked0 = np.unpackbits(packed0, axis=0, **kwargs)\n assert_equal(unpacked0.dtype, np.uint8)\n if kwargs.get('bitorder', 'big') == 'big':\n assert_array_equal(unpacked0, self.padded2[:-1, :self.x.shape[1]])\n else:\n assert_array_equal(unpacked0[::-1, :], self.padded2[:-1, :self.x.shape[1]])\n\n packed1 = np.packbits(self.x, axis=1)\n unpacked1 = np.unpackbits(packed1, axis=1, **kwargs)\n assert_equal(unpacked1.dtype, np.uint8)\n if kwargs.get('bitorder', 'big') == 'big':\n assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :-1])\n else:\n assert_array_equal(unpacked1[:, ::-1], self.padded2[:self.x.shape[0], :-1])\n\n def test_bad_count(self):\n packed0 = np.packbits(self.x, axis=0)\n assert_raises(ValueError, np.unpackbits, packed0, axis=0, count=-9)\n packed1 = np.packbits(self.x, axis=1)\n assert_raises(ValueError, np.unpackbits, packed1, axis=1, count=-9)\n packed = np.packbits(self.x)\n assert_raises(ValueError, np.unpackbits, packed, count=-57)\n | .venv\Lib\site-packages\numpy\lib\tests\test_packbits.py | test_packbits.py | Python | 17,919 | 0.95 | 0.103723 | 0.044248 | python-kit | 246 | 2024-05-01T05:45:24.369659 | BSD-3-Clause | true | f524dcfd0676b9c290fb758701eee985 |
import pytest\n\nimport numpy as np\nimport numpy.polynomial.polynomial as poly\nfrom numpy.testing import (\n assert_,\n assert_allclose,\n assert_almost_equal,\n assert_array_almost_equal,\n assert_array_equal,\n assert_equal,\n assert_raises,\n)\n\n# `poly1d` has some support for `np.bool` and `np.timedelta64`,\n# but it is limited and they are therefore excluded here\nTYPE_CODES = np.typecodes["AllInteger"] + np.typecodes["AllFloat"] + "O"\n\n\nclass TestPolynomial:\n def test_poly1d_str_and_repr(self):\n p = np.poly1d([1., 2, 3])\n assert_equal(repr(p), 'poly1d([1., 2., 3.])')\n assert_equal(str(p),\n ' 2\n'\n '1 x + 2 x + 3')\n\n q = np.poly1d([3., 2, 1])\n assert_equal(repr(q), 'poly1d([3., 2., 1.])')\n assert_equal(str(q),\n ' 2\n'\n '3 x + 2 x + 1')\n\n r = np.poly1d([1.89999 + 2j, -3j, -5.12345678, 2 + 1j])\n assert_equal(str(r),\n ' 3 2\n'\n '(1.9 + 2j) x - 3j x - 5.123 x + (2 + 1j)')\n\n assert_equal(str(np.poly1d([-3, -2, -1])),\n ' 2\n'\n '-3 x - 2 x - 1')\n\n def test_poly1d_resolution(self):\n p = np.poly1d([1., 2, 3])\n q = np.poly1d([3., 2, 1])\n assert_equal(p(0), 3.0)\n assert_equal(p(5), 38.0)\n assert_equal(q(0), 1.0)\n assert_equal(q(5), 86.0)\n\n def test_poly1d_math(self):\n # here we use some simple coeffs to make calculations easier\n p = np.poly1d([1., 2, 4])\n q = np.poly1d([4., 2, 1])\n assert_equal(p / q, (np.poly1d([0.25]), np.poly1d([1.5, 3.75])))\n assert_equal(p.integ(), np.poly1d([1 / 3, 1., 4., 0.]))\n assert_equal(p.integ(1), np.poly1d([1 / 3, 1., 4., 0.]))\n\n p = np.poly1d([1., 2, 3])\n q = np.poly1d([3., 2, 1])\n assert_equal(p * q, np.poly1d([3., 8., 14., 8., 3.]))\n assert_equal(p + q, np.poly1d([4., 4., 4.]))\n assert_equal(p - q, np.poly1d([-2., 0., 2.]))\n assert_equal(p ** 4, np.poly1d([1., 8., 36., 104., 214., 312., 324., 216., 81.]))\n assert_equal(p(q), np.poly1d([9., 12., 16., 8., 6.]))\n assert_equal(q(p), np.poly1d([3., 12., 32., 40., 34.]))\n assert_equal(p.deriv(), np.poly1d([2., 2.]))\n assert_equal(p.deriv(2), np.poly1d([2.]))\n assert_equal(np.polydiv(np.poly1d([1, 0, -1]), np.poly1d([1, 1])),\n (np.poly1d([1., -1.]), np.poly1d([0.])))\n\n @pytest.mark.parametrize("type_code", TYPE_CODES)\n def test_poly1d_misc(self, type_code: str) -> None:\n dtype = np.dtype(type_code)\n ar = np.array([1, 2, 3], dtype=dtype)\n p = np.poly1d(ar)\n\n # `__eq__`\n assert_equal(np.asarray(p), ar)\n assert_equal(np.asarray(p).dtype, dtype)\n assert_equal(len(p), 2)\n\n # `__getitem__`\n comparison_dct = {-1: 0, 0: 3, 1: 2, 2: 1, 3: 0}\n for index, ref in comparison_dct.items():\n scalar = p[index]\n assert_equal(scalar, ref)\n if dtype == np.object_:\n assert isinstance(scalar, int)\n else:\n assert_equal(scalar.dtype, dtype)\n\n def test_poly1d_variable_arg(self):\n q = np.poly1d([1., 2, 3], variable='y')\n assert_equal(str(q),\n ' 2\n'\n '1 y + 2 y + 3')\n q = np.poly1d([1., 2, 3], variable='lambda')\n assert_equal(str(q),\n ' 2\n'\n '1 lambda + 2 lambda + 3')\n\n def test_poly(self):\n assert_array_almost_equal(np.poly([3, -np.sqrt(2), np.sqrt(2)]),\n [1, -3, -2, 6])\n\n # From matlab docs\n A = [[1, 2, 3], [4, 5, 6], [7, 8, 0]]\n assert_array_almost_equal(np.poly(A), [1, -6, -72, -27])\n\n # Should produce real output for perfect conjugates\n assert_(np.isrealobj(np.poly([+1.082j, +2.613j, -2.613j, -1.082j])))\n assert_(np.isrealobj(np.poly([0 + 1j, -0 + -1j, 1 + 2j,\n 1 - 2j, 1. + 3.5j, 1 - 3.5j])))\n assert_(np.isrealobj(np.poly([1j, -1j, 1 + 2j, 1 - 2j, 1 + 3j, 1 - 3.j])))\n assert_(np.isrealobj(np.poly([1j, -1j, 1 + 2j, 1 - 2j])))\n assert_(np.isrealobj(np.poly([1j, -1j, 2j, -2j])))\n assert_(np.isrealobj(np.poly([1j, -1j])))\n assert_(np.isrealobj(np.poly([1, -1])))\n\n assert_(np.iscomplexobj(np.poly([1j, -1.0000001j])))\n\n np.random.seed(42)\n a = np.random.randn(100) + 1j * np.random.randn(100)\n assert_(np.isrealobj(np.poly(np.concatenate((a, np.conjugate(a))))))\n\n def test_roots(self):\n assert_array_equal(np.roots([1, 0, 0]), [0, 0])\n\n # Testing for larger root values\n for i in np.logspace(10, 25, num=1000, base=10):\n tgt = np.array([-1, 1, i])\n res = np.sort(np.roots(poly.polyfromroots(tgt)[::-1]))\n assert_almost_equal(res, tgt, 14 - int(np.log10(i))) # Adapting the expected precision according to the root value, to take into account numerical calculation error\n\n for i in np.logspace(10, 25, num=1000, base=10):\n tgt = np.array([-1, 1.01, i])\n res = np.sort(np.roots(poly.polyfromroots(tgt)[::-1]))\n assert_almost_equal(res, tgt, 14 - int(np.log10(i))) # Adapting the expected precision according to the root value, to take into account numerical calculation error\n\n def test_str_leading_zeros(self):\n p = np.poly1d([4, 3, 2, 1])\n p[3] = 0\n assert_equal(str(p),\n " 2\n"\n "3 x + 2 x + 1")\n\n p = np.poly1d([1, 2])\n p[0] = 0\n p[1] = 0\n assert_equal(str(p), " \n0")\n\n def test_polyfit(self):\n c = np.array([3., 2., 1.])\n x = np.linspace(0, 2, 7)\n y = np.polyval(c, x)\n err = [1, -1, 1, -1, 1, -1, 1]\n weights = np.arange(8, 1, -1)**2 / 7.0\n\n # Check exception when too few points for variance estimate. Note that\n # the estimate requires the number of data points to exceed\n # degree + 1\n assert_raises(ValueError, np.polyfit,\n [1], [1], deg=0, cov=True)\n\n # check 1D case\n m, cov = np.polyfit(x, y + err, 2, cov=True)\n est = [3.8571, 0.2857, 1.619]\n assert_almost_equal(est, m, decimal=4)\n val0 = [[ 1.4694, -2.9388, 0.8163],\n [-2.9388, 6.3673, -2.1224],\n [ 0.8163, -2.1224, 1.161 ]] # noqa: E202\n assert_almost_equal(val0, cov, decimal=4)\n\n m2, cov2 = np.polyfit(x, y + err, 2, w=weights, cov=True)\n assert_almost_equal([4.8927, -1.0177, 1.7768], m2, decimal=4)\n val = [[ 4.3964, -5.0052, 0.4878],\n [-5.0052, 6.8067, -0.9089],\n [ 0.4878, -0.9089, 0.3337]]\n assert_almost_equal(val, cov2, decimal=4)\n\n m3, cov3 = np.polyfit(x, y + err, 2, w=weights, cov="unscaled")\n assert_almost_equal([4.8927, -1.0177, 1.7768], m3, decimal=4)\n val = [[ 0.1473, -0.1677, 0.0163],\n [-0.1677, 0.228 , -0.0304], # noqa: E203\n [ 0.0163, -0.0304, 0.0112]]\n assert_almost_equal(val, cov3, decimal=4)\n\n # check 2D (n,1) case\n y = y[:, np.newaxis]\n c = c[:, np.newaxis]\n assert_almost_equal(c, np.polyfit(x, y, 2))\n # check 2D (n,2) case\n yy = np.concatenate((y, y), axis=1)\n cc = np.concatenate((c, c), axis=1)\n assert_almost_equal(cc, np.polyfit(x, yy, 2))\n\n m, cov = np.polyfit(x, yy + np.array(err)[:, np.newaxis], 2, cov=True)\n assert_almost_equal(est, m[:, 0], decimal=4)\n assert_almost_equal(est, m[:, 1], decimal=4)\n assert_almost_equal(val0, cov[:, :, 0], decimal=4)\n assert_almost_equal(val0, cov[:, :, 1], decimal=4)\n\n # check order 1 (deg=0) case, were the analytic results are simple\n np.random.seed(123)\n y = np.random.normal(size=(4, 10000))\n mean, cov = np.polyfit(np.zeros(y.shape[0]), y, deg=0, cov=True)\n # Should get sigma_mean = sigma/sqrt(N) = 1./sqrt(4) = 0.5.\n assert_allclose(mean.std(), 0.5, atol=0.01)\n assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01)\n # Without scaling, since reduced chi2 is 1, the result should be the same.\n mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=np.ones(y.shape[0]),\n deg=0, cov="unscaled")\n assert_allclose(mean.std(), 0.5, atol=0.01)\n assert_almost_equal(np.sqrt(cov.mean()), 0.5)\n # If we estimate our errors wrong, no change with scaling:\n w = np.full(y.shape[0], 1. / 0.5)\n mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov=True)\n assert_allclose(mean.std(), 0.5, atol=0.01)\n assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01)\n # But if we do not scale, our estimate for the error in the mean will\n # differ.\n mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov="unscaled")\n assert_allclose(mean.std(), 0.5, atol=0.01)\n assert_almost_equal(np.sqrt(cov.mean()), 0.25)\n\n def test_objects(self):\n from decimal import Decimal\n p = np.poly1d([Decimal('4.0'), Decimal('3.0'), Decimal('2.0')])\n p2 = p * Decimal('1.333333333333333')\n assert_(p2[1] == Decimal("3.9999999999999990"))\n p2 = p.deriv()\n assert_(p2[1] == Decimal('8.0'))\n p2 = p.integ()\n assert_(p2[3] == Decimal("1.333333333333333333333333333"))\n assert_(p2[2] == Decimal('1.5'))\n assert_(np.issubdtype(p2.coeffs.dtype, np.object_))\n p = np.poly([Decimal(1), Decimal(2)])\n assert_equal(np.poly([Decimal(1), Decimal(2)]),\n [1, Decimal(-3), Decimal(2)])\n\n def test_complex(self):\n p = np.poly1d([3j, 2j, 1j])\n p2 = p.integ()\n assert_((p2.coeffs == [1j, 1j, 1j, 0]).all())\n p2 = p.deriv()\n assert_((p2.coeffs == [6j, 2j]).all())\n\n def test_integ_coeffs(self):\n p = np.poly1d([3, 2, 1])\n p2 = p.integ(3, k=[9, 7, 6])\n assert_(\n (p2.coeffs == [1 / 4. / 5., 1 / 3. / 4., 1 / 2. / 3., 9 / 1. / 2., 7, 6]).all())\n\n def test_zero_dims(self):\n try:\n np.poly(np.zeros((0, 0)))\n except ValueError:\n pass\n\n def test_poly_int_overflow(self):\n """\n Regression test for gh-5096.\n """\n v = np.arange(1, 21)\n assert_almost_equal(np.poly(v), np.poly(np.diag(v)))\n\n def test_zero_poly_dtype(self):\n """\n Regression test for gh-16354.\n """\n z = np.array([0, 0, 0])\n p = np.poly1d(z.astype(np.int64))\n assert_equal(p.coeffs.dtype, np.int64)\n\n p = np.poly1d(z.astype(np.float32))\n assert_equal(p.coeffs.dtype, np.float32)\n\n p = np.poly1d(z.astype(np.complex64))\n assert_equal(p.coeffs.dtype, np.complex64)\n\n def test_poly_eq(self):\n p = np.poly1d([1, 2, 3])\n p2 = np.poly1d([1, 2, 4])\n assert_equal(p == None, False) # noqa: E711\n assert_equal(p != None, True) # noqa: E711\n assert_equal(p == p, True)\n assert_equal(p == p2, False)\n assert_equal(p != p2, True)\n\n def test_polydiv(self):\n b = np.poly1d([2, 6, 6, 1])\n a = np.poly1d([-1j, (1 + 2j), -(2 + 1j), 1])\n q, r = np.polydiv(b, a)\n assert_equal(q.coeffs.dtype, np.complex128)\n assert_equal(r.coeffs.dtype, np.complex128)\n assert_equal(q * a + r, b)\n\n c = [1, 2, 3]\n d = np.poly1d([1, 2, 3])\n s, t = np.polydiv(c, d)\n assert isinstance(s, np.poly1d)\n assert isinstance(t, np.poly1d)\n u, v = np.polydiv(d, c)\n assert isinstance(u, np.poly1d)\n assert isinstance(v, np.poly1d)\n\n def test_poly_coeffs_mutable(self):\n """ Coefficients should be modifiable """\n p = np.poly1d([1, 2, 3])\n\n p.coeffs += 1\n assert_equal(p.coeffs, [2, 3, 4])\n\n p.coeffs[2] += 10\n assert_equal(p.coeffs, [2, 3, 14])\n\n # this never used to be allowed - let's not add features to deprecated\n # APIs\n assert_raises(AttributeError, setattr, p, 'coeffs', np.array(1))\n | .venv\Lib\site-packages\numpy\lib\tests\test_polynomial.py | test_polynomial.py | Python | 12,632 | 0.95 | 0.1 | 0.080586 | react-lib | 334 | 2024-02-06T07:29:45.497459 | MIT | true | 0bed5a22954f07d6c17268e8ef20a87c |
\nimport numpy as np\nimport numpy.ma as ma\nfrom numpy.lib.recfunctions import (\n append_fields,\n apply_along_fields,\n assign_fields_by_name,\n drop_fields,\n find_duplicates,\n get_fieldstructure,\n join_by,\n merge_arrays,\n recursive_fill_fields,\n rename_fields,\n repack_fields,\n require_fields,\n stack_arrays,\n structured_to_unstructured,\n unstructured_to_structured,\n)\nfrom numpy.ma.mrecords import MaskedRecords\nfrom numpy.ma.testutils import assert_equal\nfrom numpy.testing import assert_, assert_raises\n\nget_fieldspec = np.lib.recfunctions._get_fieldspec\nget_names = np.lib.recfunctions.get_names\nget_names_flat = np.lib.recfunctions.get_names_flat\nzip_descr = np.lib.recfunctions._zip_descr\nzip_dtype = np.lib.recfunctions._zip_dtype\n\n\nclass TestRecFunctions:\n # Misc tests\n\n def setup_method(self):\n x = np.array([1, 2, ])\n y = np.array([10, 20, 30])\n z = np.array([('A', 1.), ('B', 2.)],\n dtype=[('A', '|S3'), ('B', float)])\n w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],\n dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])\n self.data = (w, x, y, z)\n\n def test_zip_descr(self):\n # Test zip_descr\n (w, x, y, z) = self.data\n\n # Std array\n test = zip_descr((x, x), flatten=True)\n assert_equal(test,\n np.dtype([('', int), ('', int)]))\n test = zip_descr((x, x), flatten=False)\n assert_equal(test,\n np.dtype([('', int), ('', int)]))\n\n # Std & flexible-dtype\n test = zip_descr((x, z), flatten=True)\n assert_equal(test,\n np.dtype([('', int), ('A', '|S3'), ('B', float)]))\n test = zip_descr((x, z), flatten=False)\n assert_equal(test,\n np.dtype([('', int),\n ('', [('A', '|S3'), ('B', float)])]))\n\n # Standard & nested dtype\n test = zip_descr((x, w), flatten=True)\n assert_equal(test,\n np.dtype([('', int),\n ('a', int),\n ('ba', float), ('bb', int)]))\n test = zip_descr((x, w), flatten=False)\n assert_equal(test,\n np.dtype([('', int),\n ('', [('a', int),\n ('b', [('ba', float), ('bb', int)])])]))\n\n def test_drop_fields(self):\n # Test drop_fields\n a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],\n dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])\n\n # A basic field\n test = drop_fields(a, 'a')\n control = np.array([((2, 3.0),), ((5, 6.0),)],\n dtype=[('b', [('ba', float), ('bb', int)])])\n assert_equal(test, control)\n\n # Another basic field (but nesting two fields)\n test = drop_fields(a, 'b')\n control = np.array([(1,), (4,)], dtype=[('a', int)])\n assert_equal(test, control)\n\n # A nested sub-field\n test = drop_fields(a, ['ba', ])\n control = np.array([(1, (3.0,)), (4, (6.0,))],\n dtype=[('a', int), ('b', [('bb', int)])])\n assert_equal(test, control)\n\n # All the nested sub-field from a field: zap that field\n test = drop_fields(a, ['ba', 'bb'])\n control = np.array([(1,), (4,)], dtype=[('a', int)])\n assert_equal(test, control)\n\n # dropping all fields results in an array with no fields\n test = drop_fields(a, ['a', 'b'])\n control = np.array([(), ()], dtype=[])\n assert_equal(test, control)\n\n def test_rename_fields(self):\n # Test rename fields\n a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],\n dtype=[('a', int),\n ('b', [('ba', float), ('bb', (float, 2))])])\n test = rename_fields(a, {'a': 'A', 'bb': 'BB'})\n newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])]\n control = a.view(newdtype)\n assert_equal(test.dtype, newdtype)\n assert_equal(test, control)\n\n def test_get_names(self):\n # Test get_names\n ndtype = np.dtype([('A', '|S3'), ('B', float)])\n test = get_names(ndtype)\n assert_equal(test, ('A', 'B'))\n\n ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])\n test = get_names(ndtype)\n assert_equal(test, ('a', ('b', ('ba', 'bb'))))\n\n ndtype = np.dtype([('a', int), ('b', [])])\n test = get_names(ndtype)\n assert_equal(test, ('a', ('b', ())))\n\n ndtype = np.dtype([])\n test = get_names(ndtype)\n assert_equal(test, ())\n\n def test_get_names_flat(self):\n # Test get_names_flat\n ndtype = np.dtype([('A', '|S3'), ('B', float)])\n test = get_names_flat(ndtype)\n assert_equal(test, ('A', 'B'))\n\n ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])\n test = get_names_flat(ndtype)\n assert_equal(test, ('a', 'b', 'ba', 'bb'))\n\n ndtype = np.dtype([('a', int), ('b', [])])\n test = get_names_flat(ndtype)\n assert_equal(test, ('a', 'b'))\n\n ndtype = np.dtype([])\n test = get_names_flat(ndtype)\n assert_equal(test, ())\n\n def test_get_fieldstructure(self):\n # Test get_fieldstructure\n\n # No nested fields\n ndtype = np.dtype([('A', '|S3'), ('B', float)])\n test = get_fieldstructure(ndtype)\n assert_equal(test, {'A': [], 'B': []})\n\n # One 1-nested field\n ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])])\n test = get_fieldstructure(ndtype)\n assert_equal(test, {'A': [], 'B': [], 'BA': ['B', ], 'BB': ['B']})\n\n # One 2-nested fields\n ndtype = np.dtype([('A', int),\n ('B', [('BA', int),\n ('BB', [('BBA', int), ('BBB', int)])])])\n test = get_fieldstructure(ndtype)\n control = {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'],\n 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}\n assert_equal(test, control)\n\n # 0 fields\n ndtype = np.dtype([])\n test = get_fieldstructure(ndtype)\n assert_equal(test, {})\n\n def test_find_duplicates(self):\n # Test find_duplicates\n a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')),\n (1, (1., 'B')), (2, (2., 'B')), (2, (2., 'C'))],\n mask=[(0, (0, 0)), (0, (0, 0)), (0, (0, 0)),\n (0, (0, 0)), (1, (0, 0)), (0, (1, 0))],\n dtype=[('A', int), ('B', [('BA', float), ('BB', '|S1')])])\n\n test = find_duplicates(a, ignoremask=False, return_index=True)\n control = [0, 2]\n assert_equal(sorted(test[-1]), control)\n assert_equal(test[0], a[test[-1]])\n\n test = find_duplicates(a, key='A', return_index=True)\n control = [0, 1, 2, 3, 5]\n assert_equal(sorted(test[-1]), control)\n assert_equal(test[0], a[test[-1]])\n\n test = find_duplicates(a, key='B', return_index=True)\n control = [0, 1, 2, 4]\n assert_equal(sorted(test[-1]), control)\n assert_equal(test[0], a[test[-1]])\n\n test = find_duplicates(a, key='BA', return_index=True)\n control = [0, 1, 2, 4]\n assert_equal(sorted(test[-1]), control)\n assert_equal(test[0], a[test[-1]])\n\n test = find_duplicates(a, key='BB', return_index=True)\n control = [0, 1, 2, 3, 4]\n assert_equal(sorted(test[-1]), control)\n assert_equal(test[0], a[test[-1]])\n\n def test_find_duplicates_ignoremask(self):\n # Test the ignoremask option of find_duplicates\n ndtype = [('a', int)]\n a = ma.array([1, 1, 1, 2, 2, 3, 3],\n mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)\n test = find_duplicates(a, ignoremask=True, return_index=True)\n control = [0, 1, 3, 4]\n assert_equal(sorted(test[-1]), control)\n assert_equal(test[0], a[test[-1]])\n\n test = find_duplicates(a, ignoremask=False, return_index=True)\n control = [0, 1, 2, 3, 4, 6]\n assert_equal(sorted(test[-1]), control)\n assert_equal(test[0], a[test[-1]])\n\n def test_repack_fields(self):\n dt = np.dtype('u1,f4,i8', align=True)\n a = np.zeros(2, dtype=dt)\n\n assert_equal(repack_fields(dt), np.dtype('u1,f4,i8'))\n assert_equal(repack_fields(a).itemsize, 13)\n assert_equal(repack_fields(repack_fields(dt), align=True), dt)\n\n # make sure type is preserved\n dt = np.dtype((np.record, dt))\n assert_(repack_fields(dt).type is np.record)\n\n def test_structured_to_unstructured(self, tmp_path):\n a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])\n out = structured_to_unstructured(a)\n assert_equal(out, np.zeros((4, 5), dtype='f8'))\n\n b = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)],\n dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])\n out = np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1)\n assert_equal(out, np.array([3., 5.5, 9., 11.]))\n out = np.mean(structured_to_unstructured(b[['x']]), axis=-1)\n assert_equal(out, np.array([1., 4. , 7., 10.])) # noqa: E203\n\n c = np.arange(20).reshape((4, 5))\n out = unstructured_to_structured(c, a.dtype)\n want = np.array([( 0, ( 1., 2), [ 3., 4.]),\n ( 5, ( 6., 7), [ 8., 9.]),\n (10, (11., 12), [13., 14.]),\n (15, (16., 17), [18., 19.])],\n dtype=[('a', 'i4'),\n ('b', [('f0', 'f4'), ('f1', 'u2')]),\n ('c', 'f4', (2,))])\n assert_equal(out, want)\n\n d = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)],\n dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])\n assert_equal(apply_along_fields(np.mean, d),\n np.array([ 8.0 / 3, 16.0 / 3, 26.0 / 3, 11.]))\n assert_equal(apply_along_fields(np.mean, d[['x', 'z']]),\n np.array([ 3., 5.5, 9., 11.]))\n\n # check that for uniform field dtypes we get a view, not a copy:\n d = np.array([(1, 2, 5), (4, 5, 7), (7, 8, 11), (10, 11, 12)],\n dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')])\n dd = structured_to_unstructured(d)\n ddd = unstructured_to_structured(dd, d.dtype)\n assert_(np.shares_memory(dd, d))\n assert_(np.shares_memory(ddd, d))\n\n # check that reversing the order of attributes works\n dd_attrib_rev = structured_to_unstructured(d[['z', 'x']])\n assert_equal(dd_attrib_rev, [[5, 1], [7, 4], [11, 7], [12, 10]])\n assert_(np.shares_memory(dd_attrib_rev, d))\n\n # including uniform fields with subarrays unpacked\n d = np.array([(1, [2, 3], [[ 4, 5], [ 6, 7]]),\n (8, [9, 10], [[11, 12], [13, 14]])],\n dtype=[('x0', 'i4'), ('x1', ('i4', 2)),\n ('x2', ('i4', (2, 2)))])\n dd = structured_to_unstructured(d)\n ddd = unstructured_to_structured(dd, d.dtype)\n assert_(np.shares_memory(dd, d))\n assert_(np.shares_memory(ddd, d))\n\n # check that reversing with sub-arrays works as expected\n d_rev = d[::-1]\n dd_rev = structured_to_unstructured(d_rev)\n assert_equal(dd_rev, [[8, 9, 10, 11, 12, 13, 14],\n [1, 2, 3, 4, 5, 6, 7]])\n\n # check that sub-arrays keep the order of their values\n d_attrib_rev = d[['x2', 'x1', 'x0']]\n dd_attrib_rev = structured_to_unstructured(d_attrib_rev)\n assert_equal(dd_attrib_rev, [[4, 5, 6, 7, 2, 3, 1],\n [11, 12, 13, 14, 9, 10, 8]])\n\n # with ignored field at the end\n d = np.array([(1, [2, 3], [[4, 5], [6, 7]], 32),\n (8, [9, 10], [[11, 12], [13, 14]], 64)],\n dtype=[('x0', 'i4'), ('x1', ('i4', 2)),\n ('x2', ('i4', (2, 2))), ('ignored', 'u1')])\n dd = structured_to_unstructured(d[['x0', 'x1', 'x2']])\n assert_(np.shares_memory(dd, d))\n assert_equal(dd, [[1, 2, 3, 4, 5, 6, 7],\n [8, 9, 10, 11, 12, 13, 14]])\n\n # test that nested fields with identical names don't break anything\n point = np.dtype([('x', int), ('y', int)])\n triangle = np.dtype([('a', point), ('b', point), ('c', point)])\n arr = np.zeros(10, triangle)\n res = structured_to_unstructured(arr, dtype=int)\n assert_equal(res, np.zeros((10, 6), dtype=int))\n\n # test nested combinations of subarrays and structured arrays, gh-13333\n def subarray(dt, shape):\n return np.dtype((dt, shape))\n\n def structured(*dts):\n return np.dtype([(f'x{i}', dt) for i, dt in enumerate(dts)])\n\n def inspect(dt, dtype=None):\n arr = np.zeros((), dt)\n ret = structured_to_unstructured(arr, dtype=dtype)\n backarr = unstructured_to_structured(ret, dt)\n return ret.shape, ret.dtype, backarr.dtype\n\n dt = structured(subarray(structured(np.int32, np.int32), 3))\n assert_equal(inspect(dt), ((6,), np.int32, dt))\n\n dt = structured(subarray(subarray(np.int32, 2), 2))\n assert_equal(inspect(dt), ((4,), np.int32, dt))\n\n dt = structured(np.int32)\n assert_equal(inspect(dt), ((1,), np.int32, dt))\n\n dt = structured(np.int32, subarray(subarray(np.int32, 2), 2))\n assert_equal(inspect(dt), ((5,), np.int32, dt))\n\n dt = structured()\n assert_raises(ValueError, structured_to_unstructured, np.zeros(3, dt))\n\n # these currently don't work, but we may make it work in the future\n assert_raises(NotImplementedError, structured_to_unstructured,\n np.zeros(3, dt), dtype=np.int32)\n assert_raises(NotImplementedError, unstructured_to_structured,\n np.zeros((3, 0), dtype=np.int32))\n\n # test supported ndarray subclasses\n d_plain = np.array([(1, 2), (3, 4)], dtype=[('a', 'i4'), ('b', 'i4')])\n dd_expected = structured_to_unstructured(d_plain, copy=True)\n\n # recarray\n d = d_plain.view(np.recarray)\n\n dd = structured_to_unstructured(d, copy=False)\n ddd = structured_to_unstructured(d, copy=True)\n assert_(np.shares_memory(d, dd))\n assert_(type(dd) is np.recarray)\n assert_(type(ddd) is np.recarray)\n assert_equal(dd, dd_expected)\n assert_equal(ddd, dd_expected)\n\n # memmap\n d = np.memmap(tmp_path / 'memmap',\n mode='w+',\n dtype=d_plain.dtype,\n shape=d_plain.shape)\n d[:] = d_plain\n dd = structured_to_unstructured(d, copy=False)\n ddd = structured_to_unstructured(d, copy=True)\n assert_(np.shares_memory(d, dd))\n assert_(type(dd) is np.memmap)\n assert_(type(ddd) is np.memmap)\n assert_equal(dd, dd_expected)\n assert_equal(ddd, dd_expected)\n\n def test_unstructured_to_structured(self):\n # test if dtype is the args of np.dtype\n a = np.zeros((20, 2))\n test_dtype_args = [('x', float), ('y', float)]\n test_dtype = np.dtype(test_dtype_args)\n field1 = unstructured_to_structured(a, dtype=test_dtype_args) # now\n field2 = unstructured_to_structured(a, dtype=test_dtype) # before\n assert_equal(field1, field2)\n\n def test_field_assignment_by_name(self):\n a = np.ones(2, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])\n newdt = [('b', 'f4'), ('c', 'u1')]\n\n assert_equal(require_fields(a, newdt), np.ones(2, newdt))\n\n b = np.array([(1, 2), (3, 4)], dtype=newdt)\n assign_fields_by_name(a, b, zero_unassigned=False)\n assert_equal(a, np.array([(1, 1, 2), (1, 3, 4)], dtype=a.dtype))\n assign_fields_by_name(a, b)\n assert_equal(a, np.array([(0, 1, 2), (0, 3, 4)], dtype=a.dtype))\n\n # test nested fields\n a = np.ones(2, dtype=[('a', [('b', 'f8'), ('c', 'u1')])])\n newdt = [('a', [('c', 'u1')])]\n assert_equal(require_fields(a, newdt), np.ones(2, newdt))\n b = np.array([((2,),), ((3,),)], dtype=newdt)\n assign_fields_by_name(a, b, zero_unassigned=False)\n assert_equal(a, np.array([((1, 2),), ((1, 3),)], dtype=a.dtype))\n assign_fields_by_name(a, b)\n assert_equal(a, np.array([((0, 2),), ((0, 3),)], dtype=a.dtype))\n\n # test unstructured code path for 0d arrays\n a, b = np.array(3), np.array(0)\n assign_fields_by_name(b, a)\n assert_equal(b[()], 3)\n\n\nclass TestRecursiveFillFields:\n # Test recursive_fill_fields.\n def test_simple_flexible(self):\n # Test recursive_fill_fields on flexible-array\n a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])\n b = np.zeros((3,), dtype=a.dtype)\n test = recursive_fill_fields(a, b)\n control = np.array([(1, 10.), (2, 20.), (0, 0.)],\n dtype=[('A', int), ('B', float)])\n assert_equal(test, control)\n\n def test_masked_flexible(self):\n # Test recursive_fill_fields on masked flexible-array\n a = ma.array([(1, 10.), (2, 20.)], mask=[(0, 1), (1, 0)],\n dtype=[('A', int), ('B', float)])\n b = ma.zeros((3,), dtype=a.dtype)\n test = recursive_fill_fields(a, b)\n control = ma.array([(1, 10.), (2, 20.), (0, 0.)],\n mask=[(0, 1), (1, 0), (0, 0)],\n dtype=[('A', int), ('B', float)])\n assert_equal(test, control)\n\n\nclass TestMergeArrays:\n # Test merge_arrays\n\n def setup_method(self):\n x = np.array([1, 2, ])\n y = np.array([10, 20, 30])\n z = np.array(\n [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])\n w = np.array(\n [(1, (2, 3.0, ())), (4, (5, 6.0, ()))],\n dtype=[('a', int), ('b', [('ba', float), ('bb', int), ('bc', [])])])\n self.data = (w, x, y, z)\n\n def test_solo(self):\n # Test merge_arrays on a single array.\n (_, x, _, z) = self.data\n\n test = merge_arrays(x)\n control = np.array([(1,), (2,)], dtype=[('f0', int)])\n assert_equal(test, control)\n test = merge_arrays((x,))\n assert_equal(test, control)\n\n test = merge_arrays(z, flatten=False)\n assert_equal(test, z)\n test = merge_arrays(z, flatten=True)\n assert_equal(test, z)\n\n def test_solo_w_flatten(self):\n # Test merge_arrays on a single array w & w/o flattening\n w = self.data[0]\n test = merge_arrays(w, flatten=False)\n assert_equal(test, w)\n\n test = merge_arrays(w, flatten=True)\n control = np.array([(1, 2, 3.0), (4, 5, 6.0)],\n dtype=[('a', int), ('ba', float), ('bb', int)])\n assert_equal(test, control)\n\n def test_standard(self):\n # Test standard & standard\n # Test merge arrays\n (_, x, y, _) = self.data\n test = merge_arrays((x, y), usemask=False)\n control = np.array([(1, 10), (2, 20), (-1, 30)],\n dtype=[('f0', int), ('f1', int)])\n assert_equal(test, control)\n\n test = merge_arrays((x, y), usemask=True)\n control = ma.array([(1, 10), (2, 20), (-1, 30)],\n mask=[(0, 0), (0, 0), (1, 0)],\n dtype=[('f0', int), ('f1', int)])\n assert_equal(test, control)\n assert_equal(test.mask, control.mask)\n\n def test_flatten(self):\n # Test standard & flexible\n (_, x, _, z) = self.data\n test = merge_arrays((x, z), flatten=True)\n control = np.array([(1, 'A', 1.), (2, 'B', 2.)],\n dtype=[('f0', int), ('A', '|S3'), ('B', float)])\n assert_equal(test, control)\n\n test = merge_arrays((x, z), flatten=False)\n control = np.array([(1, ('A', 1.)), (2, ('B', 2.))],\n dtype=[('f0', int),\n ('f1', [('A', '|S3'), ('B', float)])])\n assert_equal(test, control)\n\n def test_flatten_wflexible(self):\n # Test flatten standard & nested\n (w, x, _, _) = self.data\n test = merge_arrays((x, w), flatten=True)\n control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)],\n dtype=[('f0', int),\n ('a', int), ('ba', float), ('bb', int)])\n assert_equal(test, control)\n\n test = merge_arrays((x, w), flatten=False)\n controldtype = [('f0', int),\n ('f1', [('a', int),\n ('b', [('ba', float), ('bb', int), ('bc', [])])])]\n control = np.array([(1., (1, (2, 3.0, ()))), (2, (4, (5, 6.0, ())))],\n dtype=controldtype)\n assert_equal(test, control)\n\n def test_wmasked_arrays(self):\n # Test merge_arrays masked arrays\n (_, x, _, _) = self.data\n mx = ma.array([1, 2, 3], mask=[1, 0, 0])\n test = merge_arrays((x, mx), usemask=True)\n control = ma.array([(1, 1), (2, 2), (-1, 3)],\n mask=[(0, 1), (0, 0), (1, 0)],\n dtype=[('f0', int), ('f1', int)])\n assert_equal(test, control)\n test = merge_arrays((x, mx), usemask=True, asrecarray=True)\n assert_equal(test, control)\n assert_(isinstance(test, MaskedRecords))\n\n def test_w_singlefield(self):\n # Test single field\n test = merge_arrays((np.array([1, 2]).view([('a', int)]),\n np.array([10., 20., 30.])),)\n control = ma.array([(1, 10.), (2, 20.), (-1, 30.)],\n mask=[(0, 0), (0, 0), (1, 0)],\n dtype=[('a', int), ('f1', float)])\n assert_equal(test, control)\n\n def test_w_shorter_flex(self):\n # Test merge_arrays w/ a shorter flexndarray.\n z = self.data[-1]\n\n # Fixme, this test looks incomplete and broken\n #test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))\n #control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],\n # dtype=[('A', '|S3'), ('B', float), ('C', int)])\n #assert_equal(test, control)\n\n merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))\n np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],\n dtype=[('A', '|S3'), ('B', float), ('C', int)])\n\n def test_singlerecord(self):\n (_, x, y, z) = self.data\n test = merge_arrays((x[0], y[0], z[0]), usemask=False)\n control = np.array([(1, 10, ('A', 1))],\n dtype=[('f0', int),\n ('f1', int),\n ('f2', [('A', '|S3'), ('B', float)])])\n assert_equal(test, control)\n\n\nclass TestAppendFields:\n # Test append_fields\n\n def setup_method(self):\n x = np.array([1, 2, ])\n y = np.array([10, 20, 30])\n z = np.array(\n [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])\n w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],\n dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])\n self.data = (w, x, y, z)\n\n def test_append_single(self):\n # Test simple case\n (_, x, _, _) = self.data\n test = append_fields(x, 'A', data=[10, 20, 30])\n control = ma.array([(1, 10), (2, 20), (-1, 30)],\n mask=[(0, 0), (0, 0), (1, 0)],\n dtype=[('f0', int), ('A', int)],)\n assert_equal(test, control)\n\n def test_append_double(self):\n # Test simple case\n (_, x, _, _) = self.data\n test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]])\n control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)],\n mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)],\n dtype=[('f0', int), ('A', int), ('B', int)],)\n assert_equal(test, control)\n\n def test_append_on_flex(self):\n # Test append_fields on flexible type arrays\n z = self.data[-1]\n test = append_fields(z, 'C', data=[10, 20, 30])\n control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)],\n mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)],\n dtype=[('A', '|S3'), ('B', float), ('C', int)],)\n assert_equal(test, control)\n\n def test_append_on_nested(self):\n # Test append_fields on nested fields\n w = self.data[0]\n test = append_fields(w, 'C', data=[10, 20, 30])\n control = ma.array([(1, (2, 3.0), 10),\n (4, (5, 6.0), 20),\n (-1, (-1, -1.), 30)],\n mask=[(\n 0, (0, 0), 0), (0, (0, 0), 0), (1, (1, 1), 0)],\n dtype=[('a', int),\n ('b', [('ba', float), ('bb', int)]),\n ('C', int)],)\n assert_equal(test, control)\n\n\nclass TestStackArrays:\n # Test stack_arrays\n def setup_method(self):\n x = np.array([1, 2, ])\n y = np.array([10, 20, 30])\n z = np.array(\n [('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])\n w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],\n dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])\n self.data = (w, x, y, z)\n\n def test_solo(self):\n # Test stack_arrays on single arrays\n (_, x, _, _) = self.data\n test = stack_arrays((x,))\n assert_equal(test, x)\n assert_(test is x)\n\n test = stack_arrays(x)\n assert_equal(test, x)\n assert_(test is x)\n\n def test_unnamed_fields(self):\n # Tests combinations of arrays w/o named fields\n (_, x, y, _) = self.data\n\n test = stack_arrays((x, x), usemask=False)\n control = np.array([1, 2, 1, 2])\n assert_equal(test, control)\n\n test = stack_arrays((x, y), usemask=False)\n control = np.array([1, 2, 10, 20, 30])\n assert_equal(test, control)\n\n test = stack_arrays((y, x), usemask=False)\n control = np.array([10, 20, 30, 1, 2])\n assert_equal(test, control)\n\n def test_unnamed_and_named_fields(self):\n # Test combination of arrays w/ & w/o named fields\n (_, x, _, z) = self.data\n\n test = stack_arrays((x, z))\n control = ma.array([(1, -1, -1), (2, -1, -1),\n (-1, 'A', 1), (-1, 'B', 2)],\n mask=[(0, 1, 1), (0, 1, 1),\n (1, 0, 0), (1, 0, 0)],\n dtype=[('f0', int), ('A', '|S3'), ('B', float)])\n assert_equal(test, control)\n assert_equal(test.mask, control.mask)\n\n test = stack_arrays((z, x))\n control = ma.array([('A', 1, -1), ('B', 2, -1),\n (-1, -1, 1), (-1, -1, 2), ],\n mask=[(0, 0, 1), (0, 0, 1),\n (1, 1, 0), (1, 1, 0)],\n dtype=[('A', '|S3'), ('B', float), ('f2', int)])\n assert_equal(test, control)\n assert_equal(test.mask, control.mask)\n\n test = stack_arrays((z, z, x))\n control = ma.array([('A', 1, -1), ('B', 2, -1),\n ('A', 1, -1), ('B', 2, -1),\n (-1, -1, 1), (-1, -1, 2), ],\n mask=[(0, 0, 1), (0, 0, 1),\n (0, 0, 1), (0, 0, 1),\n (1, 1, 0), (1, 1, 0)],\n dtype=[('A', '|S3'), ('B', float), ('f2', int)])\n assert_equal(test, control)\n\n def test_matching_named_fields(self):\n # Test combination of arrays w/ matching field names\n (_, x, _, z) = self.data\n zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],\n dtype=[('A', '|S3'), ('B', float), ('C', float)])\n test = stack_arrays((z, zz))\n control = ma.array([('A', 1, -1), ('B', 2, -1),\n (\n 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],\n dtype=[('A', '|S3'), ('B', float), ('C', float)],\n mask=[(0, 0, 1), (0, 0, 1),\n (0, 0, 0), (0, 0, 0), (0, 0, 0)])\n assert_equal(test, control)\n assert_equal(test.mask, control.mask)\n\n test = stack_arrays((z, zz, x))\n ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)]\n control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1),\n ('a', 10., 100., -1), ('b', 20., 200., -1),\n ('c', 30., 300., -1),\n (-1, -1, -1, 1), (-1, -1, -1, 2)],\n dtype=ndtype,\n mask=[(0, 0, 1, 1), (0, 0, 1, 1),\n (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1),\n (1, 1, 1, 0), (1, 1, 1, 0)])\n assert_equal(test, control)\n assert_equal(test.mask, control.mask)\n\n def test_defaults(self):\n # Test defaults: no exception raised if keys of defaults are not fields.\n (_, _, _, z) = self.data\n zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],\n dtype=[('A', '|S3'), ('B', float), ('C', float)])\n defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.}\n test = stack_arrays((z, zz), defaults=defaults)\n control = ma.array([('A', 1, -9999.), ('B', 2, -9999.),\n (\n 'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],\n dtype=[('A', '|S3'), ('B', float), ('C', float)],\n mask=[(0, 0, 1), (0, 0, 1),\n (0, 0, 0), (0, 0, 0), (0, 0, 0)])\n assert_equal(test, control)\n assert_equal(test.data, control.data)\n assert_equal(test.mask, control.mask)\n\n def test_autoconversion(self):\n # Tests autoconversion\n adtype = [('A', int), ('B', bool), ('C', float)]\n a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)\n bdtype = [('A', int), ('B', float), ('C', float)]\n b = ma.array([(4, 5, 6)], dtype=bdtype)\n control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],\n dtype=bdtype)\n test = stack_arrays((a, b), autoconvert=True)\n assert_equal(test, control)\n assert_equal(test.mask, control.mask)\n with assert_raises(TypeError):\n stack_arrays((a, b), autoconvert=False)\n\n def test_checktitles(self):\n # Test using titles in the field names\n adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]\n a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)\n bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]\n b = ma.array([(4, 5, 6)], dtype=bdtype)\n test = stack_arrays((a, b))\n control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],\n dtype=bdtype)\n assert_equal(test, control)\n assert_equal(test.mask, control.mask)\n\n def test_subdtype(self):\n z = np.array([\n ('A', 1), ('B', 2)\n ], dtype=[('A', '|S3'), ('B', float, (1,))])\n zz = np.array([\n ('a', [10.], 100.), ('b', [20.], 200.), ('c', [30.], 300.)\n ], dtype=[('A', '|S3'), ('B', float, (1,)), ('C', float)])\n\n res = stack_arrays((z, zz))\n expected = ma.array(\n data=[\n (b'A', [1.0], 0),\n (b'B', [2.0], 0),\n (b'a', [10.0], 100.0),\n (b'b', [20.0], 200.0),\n (b'c', [30.0], 300.0)],\n mask=[\n (False, [False], True),\n (False, [False], True),\n (False, [False], False),\n (False, [False], False),\n (False, [False], False)\n ],\n dtype=zz.dtype\n )\n assert_equal(res.dtype, expected.dtype)\n assert_equal(res, expected)\n assert_equal(res.mask, expected.mask)\n\n\nclass TestJoinBy:\n def setup_method(self):\n self.a = np.array(list(zip(np.arange(10), np.arange(50, 60),\n np.arange(100, 110))),\n dtype=[('a', int), ('b', int), ('c', int)])\n self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75),\n np.arange(100, 110))),\n dtype=[('a', int), ('b', int), ('d', int)])\n\n def test_inner_join(self):\n # Basic test of join_by\n a, b = self.a, self.b\n\n test = join_by('a', a, b, jointype='inner')\n control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101),\n (7, 57, 67, 107, 102), (8, 58, 68, 108, 103),\n (9, 59, 69, 109, 104)],\n dtype=[('a', int), ('b1', int), ('b2', int),\n ('c', int), ('d', int)])\n assert_equal(test, control)\n\n def test_join(self):\n a, b = self.a, self.b\n\n # Fixme, this test is broken\n #test = join_by(('a', 'b'), a, b)\n #control = np.array([(5, 55, 105, 100), (6, 56, 106, 101),\n # (7, 57, 107, 102), (8, 58, 108, 103),\n # (9, 59, 109, 104)],\n # dtype=[('a', int), ('b', int),\n # ('c', int), ('d', int)])\n #assert_equal(test, control)\n\n join_by(('a', 'b'), a, b)\n np.array([(5, 55, 105, 100), (6, 56, 106, 101),\n (7, 57, 107, 102), (8, 58, 108, 103),\n (9, 59, 109, 104)],\n dtype=[('a', int), ('b', int),\n ('c', int), ('d', int)])\n\n def test_join_subdtype(self):\n # tests the bug in https://stackoverflow.com/q/44769632/102441\n foo = np.array([(1,)],\n dtype=[('key', int)])\n bar = np.array([(1, np.array([1, 2, 3]))],\n dtype=[('key', int), ('value', 'uint16', 3)])\n res = join_by('key', foo, bar)\n assert_equal(res, bar.view(ma.MaskedArray))\n\n def test_outer_join(self):\n a, b = self.a, self.b\n\n test = join_by(('a', 'b'), a, b, 'outer')\n control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1),\n (2, 52, 102, -1), (3, 53, 103, -1),\n (4, 54, 104, -1), (5, 55, 105, -1),\n (5, 65, -1, 100), (6, 56, 106, -1),\n (6, 66, -1, 101), (7, 57, 107, -1),\n (7, 67, -1, 102), (8, 58, 108, -1),\n (8, 68, -1, 103), (9, 59, 109, -1),\n (9, 69, -1, 104), (10, 70, -1, 105),\n (11, 71, -1, 106), (12, 72, -1, 107),\n (13, 73, -1, 108), (14, 74, -1, 109)],\n mask=[(0, 0, 0, 1), (0, 0, 0, 1),\n (0, 0, 0, 1), (0, 0, 0, 1),\n (0, 0, 0, 1), (0, 0, 0, 1),\n (0, 0, 1, 0), (0, 0, 0, 1),\n (0, 0, 1, 0), (0, 0, 0, 1),\n (0, 0, 1, 0), (0, 0, 0, 1),\n (0, 0, 1, 0), (0, 0, 0, 1),\n (0, 0, 1, 0), (0, 0, 1, 0),\n (0, 0, 1, 0), (0, 0, 1, 0),\n (0, 0, 1, 0), (0, 0, 1, 0)],\n dtype=[('a', int), ('b', int),\n ('c', int), ('d', int)])\n assert_equal(test, control)\n\n def test_leftouter_join(self):\n a, b = self.a, self.b\n\n test = join_by(('a', 'b'), a, b, 'leftouter')\n control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1),\n (2, 52, 102, -1), (3, 53, 103, -1),\n (4, 54, 104, -1), (5, 55, 105, -1),\n (6, 56, 106, -1), (7, 57, 107, -1),\n (8, 58, 108, -1), (9, 59, 109, -1)],\n mask=[(0, 0, 0, 1), (0, 0, 0, 1),\n (0, 0, 0, 1), (0, 0, 0, 1),\n (0, 0, 0, 1), (0, 0, 0, 1),\n (0, 0, 0, 1), (0, 0, 0, 1),\n (0, 0, 0, 1), (0, 0, 0, 1)],\n dtype=[('a', int), ('b', int), ('c', int), ('d', int)])\n assert_equal(test, control)\n\n def test_different_field_order(self):\n # gh-8940\n a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')])\n b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')])\n # this should not give a FutureWarning:\n j = join_by(['c', 'b'], a, b, jointype='inner', usemask=False)\n assert_equal(j.dtype.names, ['b', 'c', 'a1', 'a2'])\n\n def test_duplicate_keys(self):\n a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')])\n b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')])\n assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b)\n\n def test_same_name_different_dtypes_key(self):\n a_dtype = np.dtype([('key', 'S5'), ('value', '<f4')])\n b_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])\n expected_dtype = np.dtype([\n ('key', 'S10'), ('value1', '<f4'), ('value2', '<f4')])\n\n a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype)\n b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype)\n res = join_by('key', a, b)\n\n assert_equal(res.dtype, expected_dtype)\n\n def test_same_name_different_dtypes(self):\n # gh-9338\n a_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])\n b_dtype = np.dtype([('key', 'S10'), ('value', '<f8')])\n expected_dtype = np.dtype([\n ('key', '|S10'), ('value1', '<f4'), ('value2', '<f8')])\n\n a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype)\n b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype)\n res = join_by('key', a, b)\n\n assert_equal(res.dtype, expected_dtype)\n\n def test_subarray_key(self):\n a_dtype = np.dtype([('pos', int, 3), ('f', '<f4')])\n a = np.array([([1, 1, 1], np.pi), ([1, 2, 3], 0.0)], dtype=a_dtype)\n\n b_dtype = np.dtype([('pos', int, 3), ('g', '<f4')])\n b = np.array([([1, 1, 1], 3), ([3, 2, 1], 0.0)], dtype=b_dtype)\n\n expected_dtype = np.dtype([('pos', int, 3), ('f', '<f4'), ('g', '<f4')])\n expected = np.array([([1, 1, 1], np.pi, 3)], dtype=expected_dtype)\n\n res = join_by('pos', a, b)\n assert_equal(res.dtype, expected_dtype)\n assert_equal(res, expected)\n\n def test_padded_dtype(self):\n dt = np.dtype('i1,f4', align=True)\n dt.names = ('k', 'v')\n assert_(len(dt.descr), 3) # padding field is inserted\n\n a = np.array([(1, 3), (3, 2)], dt)\n b = np.array([(1, 1), (2, 2)], dt)\n res = join_by('k', a, b)\n\n # no padding fields remain\n expected_dtype = np.dtype([\n ('k', 'i1'), ('v1', 'f4'), ('v2', 'f4')\n ])\n\n assert_equal(res.dtype, expected_dtype)\n\n\nclass TestJoinBy2:\n @classmethod\n def setup_method(cls):\n cls.a = np.array(list(zip(np.arange(10), np.arange(50, 60),\n np.arange(100, 110))),\n dtype=[('a', int), ('b', int), ('c', int)])\n cls.b = np.array(list(zip(np.arange(10), np.arange(65, 75),\n np.arange(100, 110))),\n dtype=[('a', int), ('b', int), ('d', int)])\n\n def test_no_r1postfix(self):\n # Basic test of join_by no_r1postfix\n a, b = self.a, self.b\n\n test = join_by(\n 'a', a, b, r1postfix='', r2postfix='2', jointype='inner')\n control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101),\n (2, 52, 67, 102, 102), (3, 53, 68, 103, 103),\n (4, 54, 69, 104, 104), (5, 55, 70, 105, 105),\n (6, 56, 71, 106, 106), (7, 57, 72, 107, 107),\n (8, 58, 73, 108, 108), (9, 59, 74, 109, 109)],\n dtype=[('a', int), ('b', int), ('b2', int),\n ('c', int), ('d', int)])\n assert_equal(test, control)\n\n def test_no_postfix(self):\n assert_raises(ValueError, join_by, 'a', self.a, self.b,\n r1postfix='', r2postfix='')\n\n def test_no_r2postfix(self):\n # Basic test of join_by no_r2postfix\n a, b = self.a, self.b\n\n test = join_by(\n 'a', a, b, r1postfix='1', r2postfix='', jointype='inner')\n control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101),\n (2, 52, 67, 102, 102), (3, 53, 68, 103, 103),\n (4, 54, 69, 104, 104), (5, 55, 70, 105, 105),\n (6, 56, 71, 106, 106), (7, 57, 72, 107, 107),\n (8, 58, 73, 108, 108), (9, 59, 74, 109, 109)],\n dtype=[('a', int), ('b1', int), ('b', int),\n ('c', int), ('d', int)])\n assert_equal(test, control)\n\n def test_two_keys_two_vars(self):\n a = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),\n np.arange(50, 60), np.arange(10, 20))),\n dtype=[('k', int), ('a', int), ('b', int), ('c', int)])\n\n b = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),\n np.arange(65, 75), np.arange(0, 10))),\n dtype=[('k', int), ('a', int), ('b', int), ('c', int)])\n\n control = np.array([(10, 0, 50, 65, 10, 0), (11, 0, 51, 66, 11, 1),\n (10, 1, 52, 67, 12, 2), (11, 1, 53, 68, 13, 3),\n (10, 2, 54, 69, 14, 4), (11, 2, 55, 70, 15, 5),\n (10, 3, 56, 71, 16, 6), (11, 3, 57, 72, 17, 7),\n (10, 4, 58, 73, 18, 8), (11, 4, 59, 74, 19, 9)],\n dtype=[('k', int), ('a', int), ('b1', int),\n ('b2', int), ('c1', int), ('c2', int)])\n test = join_by(\n ['a', 'k'], a, b, r1postfix='1', r2postfix='2', jointype='inner')\n assert_equal(test.dtype, control.dtype)\n assert_equal(test, control)\n\nclass TestAppendFieldsObj:\n """\n Test append_fields with arrays containing objects\n """\n # https://github.com/numpy/numpy/issues/2346\n\n def setup_method(self):\n from datetime import date\n self.data = {'obj': date(2000, 1, 1)}\n\n def test_append_to_objects(self):\n "Test append_fields when the base array contains objects"\n obj = self.data['obj']\n x = np.array([(obj, 1.), (obj, 2.)],\n dtype=[('A', object), ('B', float)])\n y = np.array([10, 20], dtype=int)\n test = append_fields(x, 'C', data=y, usemask=False)\n control = np.array([(obj, 1.0, 10), (obj, 2.0, 20)],\n dtype=[('A', object), ('B', float), ('C', int)])\n assert_equal(test, control)\n | .venv\Lib\site-packages\numpy\lib\tests\test_recfunctions.py | test_recfunctions.py | Python | 44,980 | 0.95 | 0.070342 | 0.095291 | python-kit | 795 | 2024-02-03T14:49:16.935999 | GPL-3.0 | true | 5357191b26eee8cf1fd872c1eb5dde08 |
import os\n\nimport numpy as np\nfrom numpy.testing import (\n _assert_valid_refcount,\n assert_,\n assert_array_almost_equal,\n assert_array_equal,\n assert_equal,\n assert_raises,\n)\n\n\nclass TestRegression:\n def test_poly1d(self):\n # Ticket #28\n assert_equal(np.poly1d([1]) - np.poly1d([1, 0]),\n np.poly1d([-1, 1]))\n\n def test_cov_parameters(self):\n # Ticket #91\n x = np.random.random((3, 3))\n y = x.copy()\n np.cov(x, rowvar=True)\n np.cov(y, rowvar=False)\n assert_array_equal(x, y)\n\n def test_mem_digitize(self):\n # Ticket #95\n for i in range(100):\n np.digitize([1, 2, 3, 4], [1, 3])\n np.digitize([0, 1, 2, 3, 4], [1, 3])\n\n def test_unique_zero_sized(self):\n # Ticket #205\n assert_array_equal([], np.unique(np.array([])))\n\n def test_mem_vectorise(self):\n # Ticket #325\n vt = np.vectorize(lambda *args: args)\n vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1, 1, 2)))\n vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1,\n 1, 2)), np.zeros((2, 2)))\n\n def test_mgrid_single_element(self):\n # Ticket #339\n assert_array_equal(np.mgrid[0:0:1j], [0])\n assert_array_equal(np.mgrid[0:0], [])\n\n def test_refcount_vectorize(self):\n # Ticket #378\n def p(x, y):\n return 123\n v = np.vectorize(p)\n _assert_valid_refcount(v)\n\n def test_poly1d_nan_roots(self):\n # Ticket #396\n p = np.poly1d([np.nan, np.nan, 1], r=False)\n assert_raises(np.linalg.LinAlgError, getattr, p, "r")\n\n def test_mem_polymul(self):\n # Ticket #448\n np.polymul([], [1.])\n\n def test_mem_string_concat(self):\n # Ticket #469\n x = np.array([])\n np.append(x, 'asdasd\tasdasd')\n\n def test_poly_div(self):\n # Ticket #553\n u = np.poly1d([1, 2, 3])\n v = np.poly1d([1, 2, 3, 4, 5])\n q, r = np.polydiv(u, v)\n assert_equal(q * v + r, u)\n\n def test_poly_eq(self):\n # Ticket #554\n x = np.poly1d([1, 2, 3])\n y = np.poly1d([3, 4])\n assert_(x != y)\n assert_(x == x)\n\n def test_polyfit_build(self):\n # Ticket #628\n ref = [-1.06123820e-06, 5.70886914e-04, -1.13822012e-01,\n 9.95368241e+00, -3.14526520e+02]\n x = [90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103,\n 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,\n 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 129,\n 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,\n 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,\n 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,\n 170, 171, 172, 173, 174, 175, 176]\n y = [9.0, 3.0, 7.0, 4.0, 4.0, 8.0, 6.0, 11.0, 9.0, 8.0, 11.0, 5.0,\n 6.0, 5.0, 9.0, 8.0, 6.0, 10.0, 6.0, 10.0, 7.0, 6.0, 6.0, 6.0,\n 13.0, 4.0, 9.0, 11.0, 4.0, 5.0, 8.0, 5.0, 7.0, 7.0, 6.0, 12.0,\n 7.0, 7.0, 9.0, 4.0, 12.0, 6.0, 6.0, 4.0, 3.0, 9.0, 8.0, 8.0,\n 6.0, 7.0, 9.0, 10.0, 6.0, 8.0, 4.0, 7.0, 7.0, 10.0, 8.0, 8.0,\n 6.0, 3.0, 8.0, 4.0, 5.0, 7.0, 8.0, 6.0, 6.0, 4.0, 12.0, 9.0,\n 8.0, 8.0, 8.0, 6.0, 7.0, 4.0, 4.0, 5.0, 7.0]\n tested = np.polyfit(x, y, 4)\n assert_array_almost_equal(ref, tested)\n\n def test_polydiv_type(self):\n # Make polydiv work for complex types\n msg = "Wrong type, should be complex"\n x = np.ones(3, dtype=complex)\n q, r = np.polydiv(x, x)\n assert_(q.dtype == complex, msg)\n msg = "Wrong type, should be float"\n x = np.ones(3, dtype=int)\n q, r = np.polydiv(x, x)\n assert_(q.dtype == float, msg)\n\n def test_histogramdd_too_many_bins(self):\n # Ticket 928.\n assert_raises(ValueError, np.histogramdd, np.ones((1, 10)), bins=2**10)\n\n def test_polyint_type(self):\n # Ticket #944\n msg = "Wrong type, should be complex"\n x = np.ones(3, dtype=complex)\n assert_(np.polyint(x).dtype == complex, msg)\n msg = "Wrong type, should be float"\n x = np.ones(3, dtype=int)\n assert_(np.polyint(x).dtype == float, msg)\n\n def test_ndenumerate_crash(self):\n # Ticket 1140\n # Shouldn't crash:\n list(np.ndenumerate(np.array([[]])))\n\n def test_large_fancy_indexing(self):\n # Large enough to fail on 64-bit.\n nbits = np.dtype(np.intp).itemsize * 8\n thesize = int((2**nbits)**(1.0 / 5.0) + 1)\n\n def dp():\n n = 3\n a = np.ones((n,) * 5)\n i = np.random.randint(0, n, size=thesize)\n a[np.ix_(i, i, i, i, i)] = 0\n\n def dp2():\n n = 3\n a = np.ones((n,) * 5)\n i = np.random.randint(0, n, size=thesize)\n a[np.ix_(i, i, i, i, i)]\n\n assert_raises(ValueError, dp)\n assert_raises(ValueError, dp2)\n\n def test_void_coercion(self):\n dt = np.dtype([('a', 'f4'), ('b', 'i4')])\n x = np.zeros((1,), dt)\n assert_(np.r_[x, x].dtype == dt)\n\n def test_include_dirs(self):\n # As a sanity check, just test that get_include\n # includes something reasonable. Somewhat\n # related to ticket #1405.\n include_dirs = [np.get_include()]\n for path in include_dirs:\n assert_(isinstance(path, str))\n assert_(path != '')\n\n def test_polyder_return_type(self):\n # Ticket #1249\n assert_(isinstance(np.polyder(np.poly1d([1]), 0), np.poly1d))\n assert_(isinstance(np.polyder([1], 0), np.ndarray))\n assert_(isinstance(np.polyder(np.poly1d([1]), 1), np.poly1d))\n assert_(isinstance(np.polyder([1], 1), np.ndarray))\n\n def test_append_fields_dtype_list(self):\n # Ticket #1676\n from numpy.lib.recfunctions import append_fields\n\n base = np.array([1, 2, 3], dtype=np.int32)\n names = ['a', 'b', 'c']\n data = np.eye(3).astype(np.int32)\n dlist = [np.float64, np.int32, np.int32]\n try:\n append_fields(base, names, data, dlist)\n except Exception:\n raise AssertionError\n\n def test_loadtxt_fields_subarrays(self):\n # For ticket #1936\n from io import StringIO\n\n dt = [("a", 'u1', 2), ("b", 'u1', 2)]\n x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt)\n assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt))\n\n dt = [("a", [("a", 'u1', (1, 3)), ("b", 'u1')])]\n x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt)\n assert_equal(x, np.array([(((0, 1, 2), 3),)], dtype=dt))\n\n dt = [("a", 'u1', (2, 2))]\n x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt)\n assert_equal(x, np.array([(((0, 1), (2, 3)),)], dtype=dt))\n\n dt = [("a", 'u1', (2, 3, 2))]\n x = np.loadtxt(StringIO("0 1 2 3 4 5 6 7 8 9 10 11"), dtype=dt)\n data = [((((0, 1), (2, 3), (4, 5)), ((6, 7), (8, 9), (10, 11))),)]\n assert_equal(x, np.array(data, dtype=dt))\n\n def test_nansum_with_boolean(self):\n # gh-2978\n a = np.zeros(2, dtype=bool)\n try:\n np.nansum(a)\n except Exception:\n raise AssertionError\n\n def test_py3_compat(self):\n # gh-2561\n # Test if the oldstyle class test is bypassed in python3\n class C:\n """Old-style class in python2, normal class in python3"""\n pass\n\n out = open(os.devnull, 'w')\n try:\n np.info(C(), output=out)\n except AttributeError:\n raise AssertionError\n finally:\n out.close()\n | .venv\Lib\site-packages\numpy\lib\tests\test_regression.py | test_regression.py | Python | 7,947 | 0.95 | 0.17316 | 0.14359 | python-kit | 580 | 2024-02-07T09:45:02.605128 | BSD-3-Clause | true | b120ccd8f9d38b2aea5cf7e042bbe080 |
import functools\nimport sys\n\nimport pytest\n\nimport numpy as np\nfrom numpy import (\n apply_along_axis,\n apply_over_axes,\n array_split,\n column_stack,\n dsplit,\n dstack,\n expand_dims,\n hsplit,\n kron,\n put_along_axis,\n split,\n take_along_axis,\n tile,\n vsplit,\n)\nfrom numpy.exceptions import AxisError\nfrom numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises\n\nIS_64BIT = sys.maxsize > 2**32\n\n\ndef _add_keepdims(func):\n """ hack in keepdims behavior into a function taking an axis """\n @functools.wraps(func)\n def wrapped(a, axis, **kwargs):\n res = func(a, axis=axis, **kwargs)\n if axis is None:\n axis = 0 # res is now a scalar, so we can insert this anywhere\n return np.expand_dims(res, axis=axis)\n return wrapped\n\n\nclass TestTakeAlongAxis:\n def test_argequivalent(self):\n """ Test it translates from arg<func> to <func> """\n from numpy.random import rand\n a = rand(3, 4, 5)\n\n funcs = [\n (np.sort, np.argsort, {}),\n (_add_keepdims(np.min), _add_keepdims(np.argmin), {}),\n (_add_keepdims(np.max), _add_keepdims(np.argmax), {}),\n #(np.partition, np.argpartition, dict(kth=2)),\n ]\n\n for func, argfunc, kwargs in funcs:\n for axis in list(range(a.ndim)) + [None]:\n a_func = func(a, axis=axis, **kwargs)\n ai_func = argfunc(a, axis=axis, **kwargs)\n assert_equal(a_func, take_along_axis(a, ai_func, axis=axis))\n\n def test_invalid(self):\n """ Test it errors when indices has too few dimensions """\n a = np.ones((10, 10))\n ai = np.ones((10, 2), dtype=np.intp)\n\n # sanity check\n take_along_axis(a, ai, axis=1)\n\n # not enough indices\n assert_raises(ValueError, take_along_axis, a, np.array(1), axis=1)\n # bool arrays not allowed\n assert_raises(IndexError, take_along_axis, a, ai.astype(bool), axis=1)\n # float arrays not allowed\n assert_raises(IndexError, take_along_axis, a, ai.astype(float), axis=1)\n # invalid axis\n assert_raises(AxisError, take_along_axis, a, ai, axis=10)\n # invalid indices\n assert_raises(ValueError, take_along_axis, a, ai, axis=None)\n\n def test_empty(self):\n """ Test everything is ok with empty results, even with inserted dims """\n a = np.ones((3, 4, 5))\n ai = np.ones((3, 0, 5), dtype=np.intp)\n\n actual = take_along_axis(a, ai, axis=1)\n assert_equal(actual.shape, ai.shape)\n\n def test_broadcast(self):\n """ Test that non-indexing dimensions are broadcast in both directions """\n a = np.ones((3, 4, 1))\n ai = np.ones((1, 2, 5), dtype=np.intp)\n actual = take_along_axis(a, ai, axis=1)\n assert_equal(actual.shape, (3, 2, 5))\n\n\nclass TestPutAlongAxis:\n def test_replace_max(self):\n a_base = np.array([[10, 30, 20], [60, 40, 50]])\n\n for axis in list(range(a_base.ndim)) + [None]:\n # we mutate this in the loop\n a = a_base.copy()\n\n # replace the max with a small value\n i_max = _add_keepdims(np.argmax)(a, axis=axis)\n put_along_axis(a, i_max, -99, axis=axis)\n\n # find the new minimum, which should max\n i_min = _add_keepdims(np.argmin)(a, axis=axis)\n\n assert_equal(i_min, i_max)\n\n def test_broadcast(self):\n """ Test that non-indexing dimensions are broadcast in both directions """\n a = np.ones((3, 4, 1))\n ai = np.arange(10, dtype=np.intp).reshape((1, 2, 5)) % 4\n put_along_axis(a, ai, 20, axis=1)\n assert_equal(take_along_axis(a, ai, axis=1), 20)\n\n def test_invalid(self):\n """ Test invalid inputs """\n a_base = np.array([[10, 30, 20], [60, 40, 50]])\n indices = np.array([[0], [1]])\n values = np.array([[2], [1]])\n\n # sanity check\n a = a_base.copy()\n put_along_axis(a, indices, values, axis=0)\n assert np.all(a == [[2, 2, 2], [1, 1, 1]])\n\n # invalid indices\n a = a_base.copy()\n with assert_raises(ValueError) as exc:\n put_along_axis(a, indices, values, axis=None)\n assert "single dimension" in str(exc.exception)\n\n\nclass TestApplyAlongAxis:\n def test_simple(self):\n a = np.ones((20, 10), 'd')\n assert_array_equal(\n apply_along_axis(len, 0, a), len(a) * np.ones(a.shape[1]))\n\n def test_simple101(self):\n a = np.ones((10, 101), 'd')\n assert_array_equal(\n apply_along_axis(len, 0, a), len(a) * np.ones(a.shape[1]))\n\n def test_3d(self):\n a = np.arange(27).reshape((3, 3, 3))\n assert_array_equal(apply_along_axis(np.sum, 0, a),\n [[27, 30, 33], [36, 39, 42], [45, 48, 51]])\n\n def test_preserve_subclass(self):\n def double(row):\n return row * 2\n\n class MyNDArray(np.ndarray):\n pass\n\n m = np.array([[0, 1], [2, 3]]).view(MyNDArray)\n expected = np.array([[0, 2], [4, 6]]).view(MyNDArray)\n\n result = apply_along_axis(double, 0, m)\n assert_(isinstance(result, MyNDArray))\n assert_array_equal(result, expected)\n\n result = apply_along_axis(double, 1, m)\n assert_(isinstance(result, MyNDArray))\n assert_array_equal(result, expected)\n\n def test_subclass(self):\n class MinimalSubclass(np.ndarray):\n data = 1\n\n def minimal_function(array):\n return array.data\n\n a = np.zeros((6, 3)).view(MinimalSubclass)\n\n assert_array_equal(\n apply_along_axis(minimal_function, 0, a), np.array([1, 1, 1])\n )\n\n def test_scalar_array(self, cls=np.ndarray):\n a = np.ones((6, 3)).view(cls)\n res = apply_along_axis(np.sum, 0, a)\n assert_(isinstance(res, cls))\n assert_array_equal(res, np.array([6, 6, 6]).view(cls))\n\n def test_0d_array(self, cls=np.ndarray):\n def sum_to_0d(x):\n """ Sum x, returning a 0d array of the same class """\n assert_equal(x.ndim, 1)\n return np.squeeze(np.sum(x, keepdims=True))\n a = np.ones((6, 3)).view(cls)\n res = apply_along_axis(sum_to_0d, 0, a)\n assert_(isinstance(res, cls))\n assert_array_equal(res, np.array([6, 6, 6]).view(cls))\n\n res = apply_along_axis(sum_to_0d, 1, a)\n assert_(isinstance(res, cls))\n assert_array_equal(res, np.array([3, 3, 3, 3, 3, 3]).view(cls))\n\n def test_axis_insertion(self, cls=np.ndarray):\n def f1to2(x):\n """produces an asymmetric non-square matrix from x"""\n assert_equal(x.ndim, 1)\n return (x[::-1] * x[1:, None]).view(cls)\n\n a2d = np.arange(6 * 3).reshape((6, 3))\n\n # 2d insertion along first axis\n actual = apply_along_axis(f1to2, 0, a2d)\n expected = np.stack([\n f1to2(a2d[:, i]) for i in range(a2d.shape[1])\n ], axis=-1).view(cls)\n assert_equal(type(actual), type(expected))\n assert_equal(actual, expected)\n\n # 2d insertion along last axis\n actual = apply_along_axis(f1to2, 1, a2d)\n expected = np.stack([\n f1to2(a2d[i, :]) for i in range(a2d.shape[0])\n ], axis=0).view(cls)\n assert_equal(type(actual), type(expected))\n assert_equal(actual, expected)\n\n # 3d insertion along middle axis\n a3d = np.arange(6 * 5 * 3).reshape((6, 5, 3))\n\n actual = apply_along_axis(f1to2, 1, a3d)\n expected = np.stack([\n np.stack([\n f1to2(a3d[i, :, j]) for i in range(a3d.shape[0])\n ], axis=0)\n for j in range(a3d.shape[2])\n ], axis=-1).view(cls)\n assert_equal(type(actual), type(expected))\n assert_equal(actual, expected)\n\n def test_subclass_preservation(self):\n class MinimalSubclass(np.ndarray):\n pass\n self.test_scalar_array(MinimalSubclass)\n self.test_0d_array(MinimalSubclass)\n self.test_axis_insertion(MinimalSubclass)\n\n def test_axis_insertion_ma(self):\n def f1to2(x):\n """produces an asymmetric non-square matrix from x"""\n assert_equal(x.ndim, 1)\n res = x[::-1] * x[1:, None]\n return np.ma.masked_where(res % 5 == 0, res)\n a = np.arange(6 * 3).reshape((6, 3))\n res = apply_along_axis(f1to2, 0, a)\n assert_(isinstance(res, np.ma.masked_array))\n assert_equal(res.ndim, 3)\n assert_array_equal(res[:, :, 0].mask, f1to2(a[:, 0]).mask)\n assert_array_equal(res[:, :, 1].mask, f1to2(a[:, 1]).mask)\n assert_array_equal(res[:, :, 2].mask, f1to2(a[:, 2]).mask)\n\n def test_tuple_func1d(self):\n def sample_1d(x):\n return x[1], x[0]\n res = np.apply_along_axis(sample_1d, 1, np.array([[1, 2], [3, 4]]))\n assert_array_equal(res, np.array([[2, 1], [4, 3]]))\n\n def test_empty(self):\n # can't apply_along_axis when there's no chance to call the function\n def never_call(x):\n assert_(False) # should never be reached\n\n a = np.empty((0, 0))\n assert_raises(ValueError, np.apply_along_axis, never_call, 0, a)\n assert_raises(ValueError, np.apply_along_axis, never_call, 1, a)\n\n # but it's sometimes ok with some non-zero dimensions\n def empty_to_1(x):\n assert_(len(x) == 0)\n return 1\n\n a = np.empty((10, 0))\n actual = np.apply_along_axis(empty_to_1, 1, a)\n assert_equal(actual, np.ones(10))\n assert_raises(ValueError, np.apply_along_axis, empty_to_1, 0, a)\n\n def test_with_iterable_object(self):\n # from issue 5248\n d = np.array([\n [{1, 11}, {2, 22}, {3, 33}],\n [{4, 44}, {5, 55}, {6, 66}]\n ])\n actual = np.apply_along_axis(lambda a: set.union(*a), 0, d)\n expected = np.array([{1, 11, 4, 44}, {2, 22, 5, 55}, {3, 33, 6, 66}])\n\n assert_equal(actual, expected)\n\n # issue 8642 - assert_equal doesn't detect this!\n for i in np.ndindex(actual.shape):\n assert_equal(type(actual[i]), type(expected[i]))\n\n\nclass TestApplyOverAxes:\n def test_simple(self):\n a = np.arange(24).reshape(2, 3, 4)\n aoa_a = apply_over_axes(np.sum, a, [0, 2])\n assert_array_equal(aoa_a, np.array([[[60], [92], [124]]]))\n\n\nclass TestExpandDims:\n def test_functionality(self):\n s = (2, 3, 4, 5)\n a = np.empty(s)\n for axis in range(-5, 4):\n b = expand_dims(a, axis)\n assert_(b.shape[axis] == 1)\n assert_(np.squeeze(b).shape == s)\n\n def test_axis_tuple(self):\n a = np.empty((3, 3, 3))\n assert np.expand_dims(a, axis=(0, 1, 2)).shape == (1, 1, 1, 3, 3, 3)\n assert np.expand_dims(a, axis=(0, -1, -2)).shape == (1, 3, 3, 3, 1, 1)\n assert np.expand_dims(a, axis=(0, 3, 5)).shape == (1, 3, 3, 1, 3, 1)\n assert np.expand_dims(a, axis=(0, -3, -5)).shape == (1, 1, 3, 1, 3, 3)\n\n def test_axis_out_of_range(self):\n s = (2, 3, 4, 5)\n a = np.empty(s)\n assert_raises(AxisError, expand_dims, a, -6)\n assert_raises(AxisError, expand_dims, a, 5)\n\n a = np.empty((3, 3, 3))\n assert_raises(AxisError, expand_dims, a, (0, -6))\n assert_raises(AxisError, expand_dims, a, (0, 5))\n\n def test_repeated_axis(self):\n a = np.empty((3, 3, 3))\n assert_raises(ValueError, expand_dims, a, axis=(1, 1))\n\n def test_subclasses(self):\n a = np.arange(10).reshape((2, 5))\n a = np.ma.array(a, mask=a % 3 == 0)\n\n expanded = np.expand_dims(a, axis=1)\n assert_(isinstance(expanded, np.ma.MaskedArray))\n assert_equal(expanded.shape, (2, 1, 5))\n assert_equal(expanded.mask.shape, (2, 1, 5))\n\n\nclass TestArraySplit:\n def test_integer_0_split(self):\n a = np.arange(10)\n assert_raises(ValueError, array_split, a, 0)\n\n def test_integer_split(self):\n a = np.arange(10)\n res = array_split(a, 1)\n desired = [np.arange(10)]\n compare_results(res, desired)\n\n res = array_split(a, 2)\n desired = [np.arange(5), np.arange(5, 10)]\n compare_results(res, desired)\n\n res = array_split(a, 3)\n desired = [np.arange(4), np.arange(4, 7), np.arange(7, 10)]\n compare_results(res, desired)\n\n res = array_split(a, 4)\n desired = [np.arange(3), np.arange(3, 6), np.arange(6, 8),\n np.arange(8, 10)]\n compare_results(res, desired)\n\n res = array_split(a, 5)\n desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6),\n np.arange(6, 8), np.arange(8, 10)]\n compare_results(res, desired)\n\n res = array_split(a, 6)\n desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6),\n np.arange(6, 8), np.arange(8, 9), np.arange(9, 10)]\n compare_results(res, desired)\n\n res = array_split(a, 7)\n desired = [np.arange(2), np.arange(2, 4), np.arange(4, 6),\n np.arange(6, 7), np.arange(7, 8), np.arange(8, 9),\n np.arange(9, 10)]\n compare_results(res, desired)\n\n res = array_split(a, 8)\n desired = [np.arange(2), np.arange(2, 4), np.arange(4, 5),\n np.arange(5, 6), np.arange(6, 7), np.arange(7, 8),\n np.arange(8, 9), np.arange(9, 10)]\n compare_results(res, desired)\n\n res = array_split(a, 9)\n desired = [np.arange(2), np.arange(2, 3), np.arange(3, 4),\n np.arange(4, 5), np.arange(5, 6), np.arange(6, 7),\n np.arange(7, 8), np.arange(8, 9), np.arange(9, 10)]\n compare_results(res, desired)\n\n res = array_split(a, 10)\n desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3),\n np.arange(3, 4), np.arange(4, 5), np.arange(5, 6),\n np.arange(6, 7), np.arange(7, 8), np.arange(8, 9),\n np.arange(9, 10)]\n compare_results(res, desired)\n\n res = array_split(a, 11)\n desired = [np.arange(1), np.arange(1, 2), np.arange(2, 3),\n np.arange(3, 4), np.arange(4, 5), np.arange(5, 6),\n np.arange(6, 7), np.arange(7, 8), np.arange(8, 9),\n np.arange(9, 10), np.array([])]\n compare_results(res, desired)\n\n def test_integer_split_2D_rows(self):\n a = np.array([np.arange(10), np.arange(10)])\n res = array_split(a, 3, axis=0)\n tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]),\n np.zeros((0, 10))]\n compare_results(res, tgt)\n assert_(a.dtype.type is res[-1].dtype.type)\n\n # Same thing for manual splits:\n res = array_split(a, [0, 1], axis=0)\n tgt = [np.zeros((0, 10)), np.array([np.arange(10)]),\n np.array([np.arange(10)])]\n compare_results(res, tgt)\n assert_(a.dtype.type is res[-1].dtype.type)\n\n def test_integer_split_2D_cols(self):\n a = np.array([np.arange(10), np.arange(10)])\n res = array_split(a, 3, axis=-1)\n desired = [np.array([np.arange(4), np.arange(4)]),\n np.array([np.arange(4, 7), np.arange(4, 7)]),\n np.array([np.arange(7, 10), np.arange(7, 10)])]\n compare_results(res, desired)\n\n def test_integer_split_2D_default(self):\n """ This will fail if we change default axis\n """\n a = np.array([np.arange(10), np.arange(10)])\n res = array_split(a, 3)\n tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]),\n np.zeros((0, 10))]\n compare_results(res, tgt)\n assert_(a.dtype.type is res[-1].dtype.type)\n # perhaps should check higher dimensions\n\n @pytest.mark.skipif(not IS_64BIT, reason="Needs 64bit platform")\n def test_integer_split_2D_rows_greater_max_int32(self):\n a = np.broadcast_to([0], (1 << 32, 2))\n res = array_split(a, 4)\n chunk = np.broadcast_to([0], (1 << 30, 2))\n tgt = [chunk] * 4\n for i in range(len(tgt)):\n assert_equal(res[i].shape, tgt[i].shape)\n\n def test_index_split_simple(self):\n a = np.arange(10)\n indices = [1, 5, 7]\n res = array_split(a, indices, axis=-1)\n desired = [np.arange(0, 1), np.arange(1, 5), np.arange(5, 7),\n np.arange(7, 10)]\n compare_results(res, desired)\n\n def test_index_split_low_bound(self):\n a = np.arange(10)\n indices = [0, 5, 7]\n res = array_split(a, indices, axis=-1)\n desired = [np.array([]), np.arange(0, 5), np.arange(5, 7),\n np.arange(7, 10)]\n compare_results(res, desired)\n\n def test_index_split_high_bound(self):\n a = np.arange(10)\n indices = [0, 5, 7, 10, 12]\n res = array_split(a, indices, axis=-1)\n desired = [np.array([]), np.arange(0, 5), np.arange(5, 7),\n np.arange(7, 10), np.array([]), np.array([])]\n compare_results(res, desired)\n\n\nclass TestSplit:\n # The split function is essentially the same as array_split,\n # except that it test if splitting will result in an\n # equal split. Only test for this case.\n\n def test_equal_split(self):\n a = np.arange(10)\n res = split(a, 2)\n desired = [np.arange(5), np.arange(5, 10)]\n compare_results(res, desired)\n\n def test_unequal_split(self):\n a = np.arange(10)\n assert_raises(ValueError, split, a, 3)\n\n\nclass TestColumnStack:\n def test_non_iterable(self):\n assert_raises(TypeError, column_stack, 1)\n\n def test_1D_arrays(self):\n # example from docstring\n a = np.array((1, 2, 3))\n b = np.array((2, 3, 4))\n expected = np.array([[1, 2],\n [2, 3],\n [3, 4]])\n actual = np.column_stack((a, b))\n assert_equal(actual, expected)\n\n def test_2D_arrays(self):\n # same as hstack 2D docstring example\n a = np.array([[1], [2], [3]])\n b = np.array([[2], [3], [4]])\n expected = np.array([[1, 2],\n [2, 3],\n [3, 4]])\n actual = np.column_stack((a, b))\n assert_equal(actual, expected)\n\n def test_generator(self):\n with pytest.raises(TypeError, match="arrays to stack must be"):\n column_stack(np.arange(3) for _ in range(2))\n\n\nclass TestDstack:\n def test_non_iterable(self):\n assert_raises(TypeError, dstack, 1)\n\n def test_0D_array(self):\n a = np.array(1)\n b = np.array(2)\n res = dstack([a, b])\n desired = np.array([[[1, 2]]])\n assert_array_equal(res, desired)\n\n def test_1D_array(self):\n a = np.array([1])\n b = np.array([2])\n res = dstack([a, b])\n desired = np.array([[[1, 2]]])\n assert_array_equal(res, desired)\n\n def test_2D_array(self):\n a = np.array([[1], [2]])\n b = np.array([[1], [2]])\n res = dstack([a, b])\n desired = np.array([[[1, 1]], [[2, 2, ]]])\n assert_array_equal(res, desired)\n\n def test_2D_array2(self):\n a = np.array([1, 2])\n b = np.array([1, 2])\n res = dstack([a, b])\n desired = np.array([[[1, 1], [2, 2]]])\n assert_array_equal(res, desired)\n\n def test_generator(self):\n with pytest.raises(TypeError, match="arrays to stack must be"):\n dstack(np.arange(3) for _ in range(2))\n\n\n# array_split has more comprehensive test of splitting.\n# only do simple test on hsplit, vsplit, and dsplit\nclass TestHsplit:\n """Only testing for integer splits.\n\n """\n def test_non_iterable(self):\n assert_raises(ValueError, hsplit, 1, 1)\n\n def test_0D_array(self):\n a = np.array(1)\n try:\n hsplit(a, 2)\n assert_(0)\n except ValueError:\n pass\n\n def test_1D_array(self):\n a = np.array([1, 2, 3, 4])\n res = hsplit(a, 2)\n desired = [np.array([1, 2]), np.array([3, 4])]\n compare_results(res, desired)\n\n def test_2D_array(self):\n a = np.array([[1, 2, 3, 4],\n [1, 2, 3, 4]])\n res = hsplit(a, 2)\n desired = [np.array([[1, 2], [1, 2]]), np.array([[3, 4], [3, 4]])]\n compare_results(res, desired)\n\n\nclass TestVsplit:\n """Only testing for integer splits.\n\n """\n def test_non_iterable(self):\n assert_raises(ValueError, vsplit, 1, 1)\n\n def test_0D_array(self):\n a = np.array(1)\n assert_raises(ValueError, vsplit, a, 2)\n\n def test_1D_array(self):\n a = np.array([1, 2, 3, 4])\n try:\n vsplit(a, 2)\n assert_(0)\n except ValueError:\n pass\n\n def test_2D_array(self):\n a = np.array([[1, 2, 3, 4],\n [1, 2, 3, 4]])\n res = vsplit(a, 2)\n desired = [np.array([[1, 2, 3, 4]]), np.array([[1, 2, 3, 4]])]\n compare_results(res, desired)\n\n\nclass TestDsplit:\n # Only testing for integer splits.\n def test_non_iterable(self):\n assert_raises(ValueError, dsplit, 1, 1)\n\n def test_0D_array(self):\n a = np.array(1)\n assert_raises(ValueError, dsplit, a, 2)\n\n def test_1D_array(self):\n a = np.array([1, 2, 3, 4])\n assert_raises(ValueError, dsplit, a, 2)\n\n def test_2D_array(self):\n a = np.array([[1, 2, 3, 4],\n [1, 2, 3, 4]])\n try:\n dsplit(a, 2)\n assert_(0)\n except ValueError:\n pass\n\n def test_3D_array(self):\n a = np.array([[[1, 2, 3, 4],\n [1, 2, 3, 4]],\n [[1, 2, 3, 4],\n [1, 2, 3, 4]]])\n res = dsplit(a, 2)\n desired = [np.array([[[1, 2], [1, 2]], [[1, 2], [1, 2]]]),\n np.array([[[3, 4], [3, 4]], [[3, 4], [3, 4]]])]\n compare_results(res, desired)\n\n\nclass TestSqueeze:\n def test_basic(self):\n from numpy.random import rand\n\n a = rand(20, 10, 10, 1, 1)\n b = rand(20, 1, 10, 1, 20)\n c = rand(1, 1, 20, 10)\n assert_array_equal(np.squeeze(a), np.reshape(a, (20, 10, 10)))\n assert_array_equal(np.squeeze(b), np.reshape(b, (20, 10, 20)))\n assert_array_equal(np.squeeze(c), np.reshape(c, (20, 10)))\n\n # Squeezing to 0-dim should still give an ndarray\n a = [[[1.5]]]\n res = np.squeeze(a)\n assert_equal(res, 1.5)\n assert_equal(res.ndim, 0)\n assert_equal(type(res), np.ndarray)\n\n\nclass TestKron:\n def test_basic(self):\n # Using 0-dimensional ndarray\n a = np.array(1)\n b = np.array([[1, 2], [3, 4]])\n k = np.array([[1, 2], [3, 4]])\n assert_array_equal(np.kron(a, b), k)\n a = np.array([[1, 2], [3, 4]])\n b = np.array(1)\n assert_array_equal(np.kron(a, b), k)\n\n # Using 1-dimensional ndarray\n a = np.array([3])\n b = np.array([[1, 2], [3, 4]])\n k = np.array([[3, 6], [9, 12]])\n assert_array_equal(np.kron(a, b), k)\n a = np.array([[1, 2], [3, 4]])\n b = np.array([3])\n assert_array_equal(np.kron(a, b), k)\n\n # Using 3-dimensional ndarray\n a = np.array([[[1]], [[2]]])\n b = np.array([[1, 2], [3, 4]])\n k = np.array([[[1, 2], [3, 4]], [[2, 4], [6, 8]]])\n assert_array_equal(np.kron(a, b), k)\n a = np.array([[1, 2], [3, 4]])\n b = np.array([[[1]], [[2]]])\n k = np.array([[[1, 2], [3, 4]], [[2, 4], [6, 8]]])\n assert_array_equal(np.kron(a, b), k)\n\n def test_return_type(self):\n class myarray(np.ndarray):\n __array_priority__ = 1.0\n\n a = np.ones([2, 2])\n ma = myarray(a.shape, a.dtype, a.data)\n assert_equal(type(kron(a, a)), np.ndarray)\n assert_equal(type(kron(ma, ma)), myarray)\n assert_equal(type(kron(a, ma)), myarray)\n assert_equal(type(kron(ma, a)), myarray)\n\n @pytest.mark.parametrize(\n "array_class", [np.asarray, np.asmatrix]\n )\n def test_kron_smoke(self, array_class):\n a = array_class(np.ones([3, 3]))\n b = array_class(np.ones([3, 3]))\n k = array_class(np.ones([9, 9]))\n\n assert_array_equal(np.kron(a, b), k)\n\n def test_kron_ma(self):\n x = np.ma.array([[1, 2], [3, 4]], mask=[[0, 1], [1, 0]])\n k = np.ma.array(np.diag([1, 4, 4, 16]),\n mask=~np.array(np.identity(4), dtype=bool))\n\n assert_array_equal(k, np.kron(x, x))\n\n @pytest.mark.parametrize(\n "shape_a,shape_b", [\n ((1, 1), (1, 1)),\n ((1, 2, 3), (4, 5, 6)),\n ((2, 2), (2, 2, 2)),\n ((1, 0), (1, 1)),\n ((2, 0, 2), (2, 2)),\n ((2, 0, 0, 2), (2, 0, 2)),\n ])\n def test_kron_shape(self, shape_a, shape_b):\n a = np.ones(shape_a)\n b = np.ones(shape_b)\n normalised_shape_a = (1,) * max(0, len(shape_b) - len(shape_a)) + shape_a\n normalised_shape_b = (1,) * max(0, len(shape_a) - len(shape_b)) + shape_b\n expected_shape = np.multiply(normalised_shape_a, normalised_shape_b)\n\n k = np.kron(a, b)\n assert np.array_equal(\n k.shape, expected_shape), "Unexpected shape from kron"\n\n\nclass TestTile:\n def test_basic(self):\n a = np.array([0, 1, 2])\n b = [[1, 2], [3, 4]]\n assert_equal(tile(a, 2), [0, 1, 2, 0, 1, 2])\n assert_equal(tile(a, (2, 2)), [[0, 1, 2, 0, 1, 2], [0, 1, 2, 0, 1, 2]])\n assert_equal(tile(a, (1, 2)), [[0, 1, 2, 0, 1, 2]])\n assert_equal(tile(b, 2), [[1, 2, 1, 2], [3, 4, 3, 4]])\n assert_equal(tile(b, (2, 1)), [[1, 2], [3, 4], [1, 2], [3, 4]])\n assert_equal(tile(b, (2, 2)), [[1, 2, 1, 2], [3, 4, 3, 4],\n [1, 2, 1, 2], [3, 4, 3, 4]])\n\n def test_tile_one_repetition_on_array_gh4679(self):\n a = np.arange(5)\n b = tile(a, 1)\n b += 2\n assert_equal(a, np.arange(5))\n\n def test_empty(self):\n a = np.array([[[]]])\n b = np.array([[], []])\n c = tile(b, 2).shape\n d = tile(a, (3, 2, 5)).shape\n assert_equal(c, (2, 0))\n assert_equal(d, (3, 2, 0))\n\n def test_kroncompare(self):\n from numpy.random import randint\n\n reps = [(2,), (1, 2), (2, 1), (2, 2), (2, 3, 2), (3, 2)]\n shape = [(3,), (2, 3), (3, 4, 3), (3, 2, 3), (4, 3, 2, 4), (2, 2)]\n for s in shape:\n b = randint(0, 10, size=s)\n for r in reps:\n a = np.ones(r, b.dtype)\n large = tile(b, r)\n klarge = kron(a, b)\n assert_equal(large, klarge)\n\n\nclass TestMayShareMemory:\n def test_basic(self):\n d = np.ones((50, 60))\n d2 = np.ones((30, 60, 6))\n assert_(np.may_share_memory(d, d))\n assert_(np.may_share_memory(d, d[::-1]))\n assert_(np.may_share_memory(d, d[::2]))\n assert_(np.may_share_memory(d, d[1:, ::-1]))\n\n assert_(not np.may_share_memory(d[::-1], d2))\n assert_(not np.may_share_memory(d[::2], d2))\n assert_(not np.may_share_memory(d[1:, ::-1], d2))\n assert_(np.may_share_memory(d2[1:, ::-1], d2))\n\n\n# Utility\ndef compare_results(res, desired):\n """Compare lists of arrays."""\n for x, y in zip(res, desired, strict=False):\n assert_array_equal(x, y)\n | .venv\Lib\site-packages\numpy\lib\tests\test_shape_base.py | test_shape_base.py | Python | 28,219 | 0.95 | 0.162362 | 0.051205 | node-utils | 996 | 2025-02-01T09:21:10.918778 | Apache-2.0 | true | 545c02b31adf4d79baf990cff6a83337 |
import pytest\n\nimport numpy as np\nfrom numpy._core._rational_tests import rational\nfrom numpy.lib._stride_tricks_impl import (\n _broadcast_shape,\n as_strided,\n broadcast_arrays,\n broadcast_shapes,\n broadcast_to,\n sliding_window_view,\n)\nfrom numpy.testing import (\n assert_,\n assert_array_equal,\n assert_equal,\n assert_raises,\n assert_raises_regex,\n assert_warns,\n)\n\n\ndef assert_shapes_correct(input_shapes, expected_shape):\n # Broadcast a list of arrays with the given input shapes and check the\n # common output shape.\n\n inarrays = [np.zeros(s) for s in input_shapes]\n outarrays = broadcast_arrays(*inarrays)\n outshapes = [a.shape for a in outarrays]\n expected = [expected_shape] * len(inarrays)\n assert_equal(outshapes, expected)\n\n\ndef assert_incompatible_shapes_raise(input_shapes):\n # Broadcast a list of arrays with the given (incompatible) input shapes\n # and check that they raise a ValueError.\n\n inarrays = [np.zeros(s) for s in input_shapes]\n assert_raises(ValueError, broadcast_arrays, *inarrays)\n\n\ndef assert_same_as_ufunc(shape0, shape1, transposed=False, flipped=False):\n # Broadcast two shapes against each other and check that the data layout\n # is the same as if a ufunc did the broadcasting.\n\n x0 = np.zeros(shape0, dtype=int)\n # Note that multiply.reduce's identity element is 1.0, so when shape1==(),\n # this gives the desired n==1.\n n = int(np.multiply.reduce(shape1))\n x1 = np.arange(n).reshape(shape1)\n if transposed:\n x0 = x0.T\n x1 = x1.T\n if flipped:\n x0 = x0[::-1]\n x1 = x1[::-1]\n # Use the add ufunc to do the broadcasting. Since we're adding 0s to x1, the\n # result should be exactly the same as the broadcasted view of x1.\n y = x0 + x1\n b0, b1 = broadcast_arrays(x0, x1)\n assert_array_equal(y, b1)\n\n\ndef test_same():\n x = np.arange(10)\n y = np.arange(10)\n bx, by = broadcast_arrays(x, y)\n assert_array_equal(x, bx)\n assert_array_equal(y, by)\n\ndef test_broadcast_kwargs():\n # ensure that a TypeError is appropriately raised when\n # np.broadcast_arrays() is called with any keyword\n # argument other than 'subok'\n x = np.arange(10)\n y = np.arange(10)\n\n with assert_raises_regex(TypeError, 'got an unexpected keyword'):\n broadcast_arrays(x, y, dtype='float64')\n\n\ndef test_one_off():\n x = np.array([[1, 2, 3]])\n y = np.array([[1], [2], [3]])\n bx, by = broadcast_arrays(x, y)\n bx0 = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]])\n by0 = bx0.T\n assert_array_equal(bx0, bx)\n assert_array_equal(by0, by)\n\n\ndef test_same_input_shapes():\n # Check that the final shape is just the input shape.\n\n data = [\n (),\n (1,),\n (3,),\n (0, 1),\n (0, 3),\n (1, 0),\n (3, 0),\n (1, 3),\n (3, 1),\n (3, 3),\n ]\n for shape in data:\n input_shapes = [shape]\n # Single input.\n assert_shapes_correct(input_shapes, shape)\n # Double input.\n input_shapes2 = [shape, shape]\n assert_shapes_correct(input_shapes2, shape)\n # Triple input.\n input_shapes3 = [shape, shape, shape]\n assert_shapes_correct(input_shapes3, shape)\n\n\ndef test_two_compatible_by_ones_input_shapes():\n # Check that two different input shapes of the same length, but some have\n # ones, broadcast to the correct shape.\n\n data = [\n [[(1,), (3,)], (3,)],\n [[(1, 3), (3, 3)], (3, 3)],\n [[(3, 1), (3, 3)], (3, 3)],\n [[(1, 3), (3, 1)], (3, 3)],\n [[(1, 1), (3, 3)], (3, 3)],\n [[(1, 1), (1, 3)], (1, 3)],\n [[(1, 1), (3, 1)], (3, 1)],\n [[(1, 0), (0, 0)], (0, 0)],\n [[(0, 1), (0, 0)], (0, 0)],\n [[(1, 0), (0, 1)], (0, 0)],\n [[(1, 1), (0, 0)], (0, 0)],\n [[(1, 1), (1, 0)], (1, 0)],\n [[(1, 1), (0, 1)], (0, 1)],\n ]\n for input_shapes, expected_shape in data:\n assert_shapes_correct(input_shapes, expected_shape)\n # Reverse the input shapes since broadcasting should be symmetric.\n assert_shapes_correct(input_shapes[::-1], expected_shape)\n\n\ndef test_two_compatible_by_prepending_ones_input_shapes():\n # Check that two different input shapes (of different lengths) broadcast\n # to the correct shape.\n\n data = [\n [[(), (3,)], (3,)],\n [[(3,), (3, 3)], (3, 3)],\n [[(3,), (3, 1)], (3, 3)],\n [[(1,), (3, 3)], (3, 3)],\n [[(), (3, 3)], (3, 3)],\n [[(1, 1), (3,)], (1, 3)],\n [[(1,), (3, 1)], (3, 1)],\n [[(1,), (1, 3)], (1, 3)],\n [[(), (1, 3)], (1, 3)],\n [[(), (3, 1)], (3, 1)],\n [[(), (0,)], (0,)],\n [[(0,), (0, 0)], (0, 0)],\n [[(0,), (0, 1)], (0, 0)],\n [[(1,), (0, 0)], (0, 0)],\n [[(), (0, 0)], (0, 0)],\n [[(1, 1), (0,)], (1, 0)],\n [[(1,), (0, 1)], (0, 1)],\n [[(1,), (1, 0)], (1, 0)],\n [[(), (1, 0)], (1, 0)],\n [[(), (0, 1)], (0, 1)],\n ]\n for input_shapes, expected_shape in data:\n assert_shapes_correct(input_shapes, expected_shape)\n # Reverse the input shapes since broadcasting should be symmetric.\n assert_shapes_correct(input_shapes[::-1], expected_shape)\n\n\ndef test_incompatible_shapes_raise_valueerror():\n # Check that a ValueError is raised for incompatible shapes.\n\n data = [\n [(3,), (4,)],\n [(2, 3), (2,)],\n [(3,), (3,), (4,)],\n [(1, 3, 4), (2, 3, 3)],\n ]\n for input_shapes in data:\n assert_incompatible_shapes_raise(input_shapes)\n # Reverse the input shapes since broadcasting should be symmetric.\n assert_incompatible_shapes_raise(input_shapes[::-1])\n\n\ndef test_same_as_ufunc():\n # Check that the data layout is the same as if a ufunc did the operation.\n\n data = [\n [[(1,), (3,)], (3,)],\n [[(1, 3), (3, 3)], (3, 3)],\n [[(3, 1), (3, 3)], (3, 3)],\n [[(1, 3), (3, 1)], (3, 3)],\n [[(1, 1), (3, 3)], (3, 3)],\n [[(1, 1), (1, 3)], (1, 3)],\n [[(1, 1), (3, 1)], (3, 1)],\n [[(1, 0), (0, 0)], (0, 0)],\n [[(0, 1), (0, 0)], (0, 0)],\n [[(1, 0), (0, 1)], (0, 0)],\n [[(1, 1), (0, 0)], (0, 0)],\n [[(1, 1), (1, 0)], (1, 0)],\n [[(1, 1), (0, 1)], (0, 1)],\n [[(), (3,)], (3,)],\n [[(3,), (3, 3)], (3, 3)],\n [[(3,), (3, 1)], (3, 3)],\n [[(1,), (3, 3)], (3, 3)],\n [[(), (3, 3)], (3, 3)],\n [[(1, 1), (3,)], (1, 3)],\n [[(1,), (3, 1)], (3, 1)],\n [[(1,), (1, 3)], (1, 3)],\n [[(), (1, 3)], (1, 3)],\n [[(), (3, 1)], (3, 1)],\n [[(), (0,)], (0,)],\n [[(0,), (0, 0)], (0, 0)],\n [[(0,), (0, 1)], (0, 0)],\n [[(1,), (0, 0)], (0, 0)],\n [[(), (0, 0)], (0, 0)],\n [[(1, 1), (0,)], (1, 0)],\n [[(1,), (0, 1)], (0, 1)],\n [[(1,), (1, 0)], (1, 0)],\n [[(), (1, 0)], (1, 0)],\n [[(), (0, 1)], (0, 1)],\n ]\n for input_shapes, expected_shape in data:\n assert_same_as_ufunc(input_shapes[0], input_shapes[1],\n f"Shapes: {input_shapes[0]} {input_shapes[1]}")\n # Reverse the input shapes since broadcasting should be symmetric.\n assert_same_as_ufunc(input_shapes[1], input_shapes[0])\n # Try them transposed, too.\n assert_same_as_ufunc(input_shapes[0], input_shapes[1], True)\n # ... and flipped for non-rank-0 inputs in order to test negative\n # strides.\n if () not in input_shapes:\n assert_same_as_ufunc(input_shapes[0], input_shapes[1], False, True)\n assert_same_as_ufunc(input_shapes[0], input_shapes[1], True, True)\n\n\ndef test_broadcast_to_succeeds():\n data = [\n [np.array(0), (0,), np.array(0)],\n [np.array(0), (1,), np.zeros(1)],\n [np.array(0), (3,), np.zeros(3)],\n [np.ones(1), (1,), np.ones(1)],\n [np.ones(1), (2,), np.ones(2)],\n [np.ones(1), (1, 2, 3), np.ones((1, 2, 3))],\n [np.arange(3), (3,), np.arange(3)],\n [np.arange(3), (1, 3), np.arange(3).reshape(1, -1)],\n [np.arange(3), (2, 3), np.array([[0, 1, 2], [0, 1, 2]])],\n # test if shape is not a tuple\n [np.ones(0), 0, np.ones(0)],\n [np.ones(1), 1, np.ones(1)],\n [np.ones(1), 2, np.ones(2)],\n # these cases with size 0 are strange, but they reproduce the behavior\n # of broadcasting with ufuncs (see test_same_as_ufunc above)\n [np.ones(1), (0,), np.ones(0)],\n [np.ones((1, 2)), (0, 2), np.ones((0, 2))],\n [np.ones((2, 1)), (2, 0), np.ones((2, 0))],\n ]\n for input_array, shape, expected in data:\n actual = broadcast_to(input_array, shape)\n assert_array_equal(expected, actual)\n\n\ndef test_broadcast_to_raises():\n data = [\n [(0,), ()],\n [(1,), ()],\n [(3,), ()],\n [(3,), (1,)],\n [(3,), (2,)],\n [(3,), (4,)],\n [(1, 2), (2, 1)],\n [(1, 1), (1,)],\n [(1,), -1],\n [(1,), (-1,)],\n [(1, 2), (-1, 2)],\n ]\n for orig_shape, target_shape in data:\n arr = np.zeros(orig_shape)\n assert_raises(ValueError, lambda: broadcast_to(arr, target_shape))\n\n\ndef test_broadcast_shape():\n # tests internal _broadcast_shape\n # _broadcast_shape is already exercised indirectly by broadcast_arrays\n # _broadcast_shape is also exercised by the public broadcast_shapes function\n assert_equal(_broadcast_shape(), ())\n assert_equal(_broadcast_shape([1, 2]), (2,))\n assert_equal(_broadcast_shape(np.ones((1, 1))), (1, 1))\n assert_equal(_broadcast_shape(np.ones((1, 1)), np.ones((3, 4))), (3, 4))\n assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 32)), (1, 2))\n assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 100)), (1, 2))\n\n # regression tests for gh-5862\n assert_equal(_broadcast_shape(*([np.ones(2)] * 32 + [1])), (2,))\n bad_args = [np.ones(2)] * 32 + [np.ones(3)] * 32\n assert_raises(ValueError, lambda: _broadcast_shape(*bad_args))\n\n\ndef test_broadcast_shapes_succeeds():\n # tests public broadcast_shapes\n data = [\n [[], ()],\n [[()], ()],\n [[(7,)], (7,)],\n [[(1, 2), (2,)], (1, 2)],\n [[(1, 1)], (1, 1)],\n [[(1, 1), (3, 4)], (3, 4)],\n [[(6, 7), (5, 6, 1), (7,), (5, 1, 7)], (5, 6, 7)],\n [[(5, 6, 1)], (5, 6, 1)],\n [[(1, 3), (3, 1)], (3, 3)],\n [[(1, 0), (0, 0)], (0, 0)],\n [[(0, 1), (0, 0)], (0, 0)],\n [[(1, 0), (0, 1)], (0, 0)],\n [[(1, 1), (0, 0)], (0, 0)],\n [[(1, 1), (1, 0)], (1, 0)],\n [[(1, 1), (0, 1)], (0, 1)],\n [[(), (0,)], (0,)],\n [[(0,), (0, 0)], (0, 0)],\n [[(0,), (0, 1)], (0, 0)],\n [[(1,), (0, 0)], (0, 0)],\n [[(), (0, 0)], (0, 0)],\n [[(1, 1), (0,)], (1, 0)],\n [[(1,), (0, 1)], (0, 1)],\n [[(1,), (1, 0)], (1, 0)],\n [[(), (1, 0)], (1, 0)],\n [[(), (0, 1)], (0, 1)],\n [[(1,), (3,)], (3,)],\n [[2, (3, 2)], (3, 2)],\n ]\n for input_shapes, target_shape in data:\n assert_equal(broadcast_shapes(*input_shapes), target_shape)\n\n assert_equal(broadcast_shapes(*([(1, 2)] * 32)), (1, 2))\n assert_equal(broadcast_shapes(*([(1, 2)] * 100)), (1, 2))\n\n # regression tests for gh-5862\n assert_equal(broadcast_shapes(*([(2,)] * 32)), (2,))\n\n\ndef test_broadcast_shapes_raises():\n # tests public broadcast_shapes\n data = [\n [(3,), (4,)],\n [(2, 3), (2,)],\n [(3,), (3,), (4,)],\n [(1, 3, 4), (2, 3, 3)],\n [(1, 2), (3, 1), (3, 2), (10, 5)],\n [2, (2, 3)],\n ]\n for input_shapes in data:\n assert_raises(ValueError, lambda: broadcast_shapes(*input_shapes))\n\n bad_args = [(2,)] * 32 + [(3,)] * 32\n assert_raises(ValueError, lambda: broadcast_shapes(*bad_args))\n\n\ndef test_as_strided():\n a = np.array([None])\n a_view = as_strided(a)\n expected = np.array([None])\n assert_array_equal(a_view, np.array([None]))\n\n a = np.array([1, 2, 3, 4])\n a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,))\n expected = np.array([1, 3])\n assert_array_equal(a_view, expected)\n\n a = np.array([1, 2, 3, 4])\n a_view = as_strided(a, shape=(3, 4), strides=(0, 1 * a.itemsize))\n expected = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])\n assert_array_equal(a_view, expected)\n\n # Regression test for gh-5081\n dt = np.dtype([('num', 'i4'), ('obj', 'O')])\n a = np.empty((4,), dtype=dt)\n a['num'] = np.arange(1, 5)\n a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))\n expected_num = [[1, 2, 3, 4]] * 3\n expected_obj = [[None] * 4] * 3\n assert_equal(a_view.dtype, dt)\n assert_array_equal(expected_num, a_view['num'])\n assert_array_equal(expected_obj, a_view['obj'])\n\n # Make sure that void types without fields are kept unchanged\n a = np.empty((4,), dtype='V4')\n a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))\n assert_equal(a.dtype, a_view.dtype)\n\n # Make sure that the only type that could fail is properly handled\n dt = np.dtype({'names': [''], 'formats': ['V4']})\n a = np.empty((4,), dtype=dt)\n a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))\n assert_equal(a.dtype, a_view.dtype)\n\n # Custom dtypes should not be lost (gh-9161)\n r = [rational(i) for i in range(4)]\n a = np.array(r, dtype=rational)\n a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))\n assert_equal(a.dtype, a_view.dtype)\n assert_array_equal([r] * 3, a_view)\n\n\nclass TestSlidingWindowView:\n def test_1d(self):\n arr = np.arange(5)\n arr_view = sliding_window_view(arr, 2)\n expected = np.array([[0, 1],\n [1, 2],\n [2, 3],\n [3, 4]])\n assert_array_equal(arr_view, expected)\n\n def test_2d(self):\n i, j = np.ogrid[:3, :4]\n arr = 10 * i + j\n shape = (2, 2)\n arr_view = sliding_window_view(arr, shape)\n expected = np.array([[[[0, 1], [10, 11]],\n [[1, 2], [11, 12]],\n [[2, 3], [12, 13]]],\n [[[10, 11], [20, 21]],\n [[11, 12], [21, 22]],\n [[12, 13], [22, 23]]]])\n assert_array_equal(arr_view, expected)\n\n def test_2d_with_axis(self):\n i, j = np.ogrid[:3, :4]\n arr = 10 * i + j\n arr_view = sliding_window_view(arr, 3, 0)\n expected = np.array([[[0, 10, 20],\n [1, 11, 21],\n [2, 12, 22],\n [3, 13, 23]]])\n assert_array_equal(arr_view, expected)\n\n def test_2d_repeated_axis(self):\n i, j = np.ogrid[:3, :4]\n arr = 10 * i + j\n arr_view = sliding_window_view(arr, (2, 3), (1, 1))\n expected = np.array([[[[0, 1, 2],\n [1, 2, 3]]],\n [[[10, 11, 12],\n [11, 12, 13]]],\n [[[20, 21, 22],\n [21, 22, 23]]]])\n assert_array_equal(arr_view, expected)\n\n def test_2d_without_axis(self):\n i, j = np.ogrid[:4, :4]\n arr = 10 * i + j\n shape = (2, 3)\n arr_view = sliding_window_view(arr, shape)\n expected = np.array([[[[0, 1, 2], [10, 11, 12]],\n [[1, 2, 3], [11, 12, 13]]],\n [[[10, 11, 12], [20, 21, 22]],\n [[11, 12, 13], [21, 22, 23]]],\n [[[20, 21, 22], [30, 31, 32]],\n [[21, 22, 23], [31, 32, 33]]]])\n assert_array_equal(arr_view, expected)\n\n def test_errors(self):\n i, j = np.ogrid[:4, :4]\n arr = 10 * i + j\n with pytest.raises(ValueError, match='cannot contain negative values'):\n sliding_window_view(arr, (-1, 3))\n with pytest.raises(\n ValueError,\n match='must provide window_shape for all dimensions of `x`'):\n sliding_window_view(arr, (1,))\n with pytest.raises(\n ValueError,\n match='Must provide matching length window_shape and axis'):\n sliding_window_view(arr, (1, 3, 4), axis=(0, 1))\n with pytest.raises(\n ValueError,\n match='window shape cannot be larger than input array'):\n sliding_window_view(arr, (5, 5))\n\n def test_writeable(self):\n arr = np.arange(5)\n view = sliding_window_view(arr, 2, writeable=False)\n assert_(not view.flags.writeable)\n with pytest.raises(\n ValueError,\n match='assignment destination is read-only'):\n view[0, 0] = 3\n view = sliding_window_view(arr, 2, writeable=True)\n assert_(view.flags.writeable)\n view[0, 1] = 3\n assert_array_equal(arr, np.array([0, 3, 2, 3, 4]))\n\n def test_subok(self):\n class MyArray(np.ndarray):\n pass\n\n arr = np.arange(5).view(MyArray)\n assert_(not isinstance(sliding_window_view(arr, 2,\n subok=False),\n MyArray))\n assert_(isinstance(sliding_window_view(arr, 2, subok=True), MyArray))\n # Default behavior\n assert_(not isinstance(sliding_window_view(arr, 2), MyArray))\n\n\ndef as_strided_writeable():\n arr = np.ones(10)\n view = as_strided(arr, writeable=False)\n assert_(not view.flags.writeable)\n\n # Check that writeable also is fine:\n view = as_strided(arr, writeable=True)\n assert_(view.flags.writeable)\n view[...] = 3\n assert_array_equal(arr, np.full_like(arr, 3))\n\n # Test that things do not break down for readonly:\n arr.flags.writeable = False\n view = as_strided(arr, writeable=False)\n view = as_strided(arr, writeable=True)\n assert_(not view.flags.writeable)\n\n\nclass VerySimpleSubClass(np.ndarray):\n def __new__(cls, *args, **kwargs):\n return np.array(*args, subok=True, **kwargs).view(cls)\n\n\nclass SimpleSubClass(VerySimpleSubClass):\n def __new__(cls, *args, **kwargs):\n self = np.array(*args, subok=True, **kwargs).view(cls)\n self.info = 'simple'\n return self\n\n def __array_finalize__(self, obj):\n self.info = getattr(obj, 'info', '') + ' finalized'\n\n\ndef test_subclasses():\n # test that subclass is preserved only if subok=True\n a = VerySimpleSubClass([1, 2, 3, 4])\n assert_(type(a) is VerySimpleSubClass)\n a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,))\n assert_(type(a_view) is np.ndarray)\n a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True)\n assert_(type(a_view) is VerySimpleSubClass)\n # test that if a subclass has __array_finalize__, it is used\n a = SimpleSubClass([1, 2, 3, 4])\n a_view = as_strided(a, shape=(2,), strides=(2 * a.itemsize,), subok=True)\n assert_(type(a_view) is SimpleSubClass)\n assert_(a_view.info == 'simple finalized')\n\n # similar tests for broadcast_arrays\n b = np.arange(len(a)).reshape(-1, 1)\n a_view, b_view = broadcast_arrays(a, b)\n assert_(type(a_view) is np.ndarray)\n assert_(type(b_view) is np.ndarray)\n assert_(a_view.shape == b_view.shape)\n a_view, b_view = broadcast_arrays(a, b, subok=True)\n assert_(type(a_view) is SimpleSubClass)\n assert_(a_view.info == 'simple finalized')\n assert_(type(b_view) is np.ndarray)\n assert_(a_view.shape == b_view.shape)\n\n # and for broadcast_to\n shape = (2, 4)\n a_view = broadcast_to(a, shape)\n assert_(type(a_view) is np.ndarray)\n assert_(a_view.shape == shape)\n a_view = broadcast_to(a, shape, subok=True)\n assert_(type(a_view) is SimpleSubClass)\n assert_(a_view.info == 'simple finalized')\n assert_(a_view.shape == shape)\n\n\ndef test_writeable():\n # broadcast_to should return a readonly array\n original = np.array([1, 2, 3])\n result = broadcast_to(original, (2, 3))\n assert_equal(result.flags.writeable, False)\n assert_raises(ValueError, result.__setitem__, slice(None), 0)\n\n # but the result of broadcast_arrays needs to be writeable, to\n # preserve backwards compatibility\n test_cases = [((False,), broadcast_arrays(original,)),\n ((True, False), broadcast_arrays(0, original))]\n for is_broadcast, results in test_cases:\n for array_is_broadcast, result in zip(is_broadcast, results):\n # This will change to False in a future version\n if array_is_broadcast:\n with assert_warns(FutureWarning):\n assert_equal(result.flags.writeable, True)\n with assert_warns(DeprecationWarning):\n result[:] = 0\n # Warning not emitted, writing to the array resets it\n assert_equal(result.flags.writeable, True)\n else:\n # No warning:\n assert_equal(result.flags.writeable, True)\n\n for results in [broadcast_arrays(original),\n broadcast_arrays(0, original)]:\n for result in results:\n # resets the warn_on_write DeprecationWarning\n result.flags.writeable = True\n # check: no warning emitted\n assert_equal(result.flags.writeable, True)\n result[:] = 0\n\n # keep readonly input readonly\n original.flags.writeable = False\n _, result = broadcast_arrays(0, original)\n assert_equal(result.flags.writeable, False)\n\n # regression test for GH6491\n shape = (2,)\n strides = [0]\n tricky_array = as_strided(np.array(0), shape, strides)\n other = np.zeros((1,))\n first, second = broadcast_arrays(tricky_array, other)\n assert_(first.shape == second.shape)\n\n\ndef test_writeable_memoryview():\n # The result of broadcast_arrays exports as a non-writeable memoryview\n # because otherwise there is no good way to opt in to the new behaviour\n # (i.e. you would need to set writeable to False explicitly).\n # See gh-13929.\n original = np.array([1, 2, 3])\n\n test_cases = [((False, ), broadcast_arrays(original,)),\n ((True, False), broadcast_arrays(0, original))]\n for is_broadcast, results in test_cases:\n for array_is_broadcast, result in zip(is_broadcast, results):\n # This will change to False in a future version\n if array_is_broadcast:\n # memoryview(result, writable=True) will give warning but cannot\n # be tested using the python API.\n assert memoryview(result).readonly\n else:\n assert not memoryview(result).readonly\n\n\ndef test_reference_types():\n input_array = np.array('a', dtype=object)\n expected = np.array(['a'] * 3, dtype=object)\n actual = broadcast_to(input_array, (3,))\n assert_array_equal(expected, actual)\n\n actual, _ = broadcast_arrays(input_array, np.ones(3))\n assert_array_equal(expected, actual)\n | .venv\Lib\site-packages\numpy\lib\tests\test_stride_tricks.py | test_stride_tricks.py | Python | 23,686 | 0.95 | 0.117378 | 0.119718 | awesome-app | 350 | 2023-10-25T08:41:23.205047 | MIT | true | a6507b70447c82e2d9910fdfd1b399c0 |
"""Test functions for matrix module\n\n"""\nimport pytest\n\nimport numpy as np\nfrom numpy import (\n add,\n arange,\n array,\n diag,\n eye,\n fliplr,\n flipud,\n histogram2d,\n mask_indices,\n ones,\n tri,\n tril_indices,\n tril_indices_from,\n triu_indices,\n triu_indices_from,\n vander,\n zeros,\n)\nfrom numpy.testing import (\n assert_,\n assert_array_almost_equal,\n assert_array_equal,\n assert_array_max_ulp,\n assert_equal,\n assert_raises,\n)\n\n\ndef get_mat(n):\n data = arange(n)\n data = add.outer(data, data)\n return data\n\n\nclass TestEye:\n def test_basic(self):\n assert_equal(eye(4),\n array([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]]))\n\n assert_equal(eye(4, dtype='f'),\n array([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]], 'f'))\n\n assert_equal(eye(3) == 1,\n eye(3, dtype=bool))\n\n def test_uint64(self):\n # Regression test for gh-9982\n assert_equal(eye(np.uint64(2), dtype=int), array([[1, 0], [0, 1]]))\n assert_equal(eye(np.uint64(2), M=np.uint64(4), k=np.uint64(1)),\n array([[0, 1, 0, 0], [0, 0, 1, 0]]))\n\n def test_diag(self):\n assert_equal(eye(4, k=1),\n array([[0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [0, 0, 0, 0]]))\n\n assert_equal(eye(4, k=-1),\n array([[0, 0, 0, 0],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0]]))\n\n def test_2d(self):\n assert_equal(eye(4, 3),\n array([[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1],\n [0, 0, 0]]))\n\n assert_equal(eye(3, 4),\n array([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0]]))\n\n def test_diag2d(self):\n assert_equal(eye(3, 4, k=2),\n array([[0, 0, 1, 0],\n [0, 0, 0, 1],\n [0, 0, 0, 0]]))\n\n assert_equal(eye(4, 3, k=-2),\n array([[0, 0, 0],\n [0, 0, 0],\n [1, 0, 0],\n [0, 1, 0]]))\n\n def test_eye_bounds(self):\n assert_equal(eye(2, 2, 1), [[0, 1], [0, 0]])\n assert_equal(eye(2, 2, -1), [[0, 0], [1, 0]])\n assert_equal(eye(2, 2, 2), [[0, 0], [0, 0]])\n assert_equal(eye(2, 2, -2), [[0, 0], [0, 0]])\n assert_equal(eye(3, 2, 2), [[0, 0], [0, 0], [0, 0]])\n assert_equal(eye(3, 2, 1), [[0, 1], [0, 0], [0, 0]])\n assert_equal(eye(3, 2, -1), [[0, 0], [1, 0], [0, 1]])\n assert_equal(eye(3, 2, -2), [[0, 0], [0, 0], [1, 0]])\n assert_equal(eye(3, 2, -3), [[0, 0], [0, 0], [0, 0]])\n\n def test_strings(self):\n assert_equal(eye(2, 2, dtype='S3'),\n [[b'1', b''], [b'', b'1']])\n\n def test_bool(self):\n assert_equal(eye(2, 2, dtype=bool), [[True, False], [False, True]])\n\n def test_order(self):\n mat_c = eye(4, 3, k=-1)\n mat_f = eye(4, 3, k=-1, order='F')\n assert_equal(mat_c, mat_f)\n assert mat_c.flags.c_contiguous\n assert not mat_c.flags.f_contiguous\n assert not mat_f.flags.c_contiguous\n assert mat_f.flags.f_contiguous\n\n\nclass TestDiag:\n def test_vector(self):\n vals = (100 * arange(5)).astype('l')\n b = zeros((5, 5))\n for k in range(5):\n b[k, k] = vals[k]\n assert_equal(diag(vals), b)\n b = zeros((7, 7))\n c = b.copy()\n for k in range(5):\n b[k, k + 2] = vals[k]\n c[k + 2, k] = vals[k]\n assert_equal(diag(vals, k=2), b)\n assert_equal(diag(vals, k=-2), c)\n\n def test_matrix(self, vals=None):\n if vals is None:\n vals = (100 * get_mat(5) + 1).astype('l')\n b = zeros((5,))\n for k in range(5):\n b[k] = vals[k, k]\n assert_equal(diag(vals), b)\n b = b * 0\n for k in range(3):\n b[k] = vals[k, k + 2]\n assert_equal(diag(vals, 2), b[:3])\n for k in range(3):\n b[k] = vals[k + 2, k]\n assert_equal(diag(vals, -2), b[:3])\n\n def test_fortran_order(self):\n vals = array((100 * get_mat(5) + 1), order='F', dtype='l')\n self.test_matrix(vals)\n\n def test_diag_bounds(self):\n A = [[1, 2], [3, 4], [5, 6]]\n assert_equal(diag(A, k=2), [])\n assert_equal(diag(A, k=1), [2])\n assert_equal(diag(A, k=0), [1, 4])\n assert_equal(diag(A, k=-1), [3, 6])\n assert_equal(diag(A, k=-2), [5])\n assert_equal(diag(A, k=-3), [])\n\n def test_failure(self):\n assert_raises(ValueError, diag, [[[1]]])\n\n\nclass TestFliplr:\n def test_basic(self):\n assert_raises(ValueError, fliplr, ones(4))\n a = get_mat(4)\n b = a[:, ::-1]\n assert_equal(fliplr(a), b)\n a = [[0, 1, 2],\n [3, 4, 5]]\n b = [[2, 1, 0],\n [5, 4, 3]]\n assert_equal(fliplr(a), b)\n\n\nclass TestFlipud:\n def test_basic(self):\n a = get_mat(4)\n b = a[::-1, :]\n assert_equal(flipud(a), b)\n a = [[0, 1, 2],\n [3, 4, 5]]\n b = [[3, 4, 5],\n [0, 1, 2]]\n assert_equal(flipud(a), b)\n\n\nclass TestHistogram2d:\n def test_simple(self):\n x = array(\n [0.41702200, 0.72032449, 1.1437481e-4, 0.302332573, 0.146755891])\n y = array(\n [0.09233859, 0.18626021, 0.34556073, 0.39676747, 0.53881673])\n xedges = np.linspace(0, 1, 10)\n yedges = np.linspace(0, 1, 10)\n H = histogram2d(x, y, (xedges, yedges))[0]\n answer = array(\n [[0, 0, 0, 1, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [1, 0, 1, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0]])\n assert_array_equal(H.T, answer)\n H = histogram2d(x, y, xedges)[0]\n assert_array_equal(H.T, answer)\n H, xedges, yedges = histogram2d(list(range(10)), list(range(10)))\n assert_array_equal(H, eye(10, 10))\n assert_array_equal(xedges, np.linspace(0, 9, 11))\n assert_array_equal(yedges, np.linspace(0, 9, 11))\n\n def test_asym(self):\n x = array([1, 1, 2, 3, 4, 4, 4, 5])\n y = array([1, 3, 2, 0, 1, 2, 3, 4])\n H, xed, yed = histogram2d(\n x, y, (6, 5), range=[[0, 6], [0, 5]], density=True)\n answer = array(\n [[0., 0, 0, 0, 0],\n [0, 1, 0, 1, 0],\n [0, 0, 1, 0, 0],\n [1, 0, 0, 0, 0],\n [0, 1, 1, 1, 0],\n [0, 0, 0, 0, 1]])\n assert_array_almost_equal(H, answer / 8., 3)\n assert_array_equal(xed, np.linspace(0, 6, 7))\n assert_array_equal(yed, np.linspace(0, 5, 6))\n\n def test_density(self):\n x = array([1, 2, 3, 1, 2, 3, 1, 2, 3])\n y = array([1, 1, 1, 2, 2, 2, 3, 3, 3])\n H, xed, yed = histogram2d(\n x, y, [[1, 2, 3, 5], [1, 2, 3, 5]], density=True)\n answer = array([[1, 1, .5],\n [1, 1, .5],\n [.5, .5, .25]]) / 9.\n assert_array_almost_equal(H, answer, 3)\n\n def test_all_outliers(self):\n r = np.random.rand(100) + 1. + 1e6 # histogramdd rounds by decimal=6\n H, xed, yed = histogram2d(r, r, (4, 5), range=([0, 1], [0, 1]))\n assert_array_equal(H, 0)\n\n def test_empty(self):\n a, edge1, edge2 = histogram2d([], [], bins=([0, 1], [0, 1]))\n assert_array_max_ulp(a, array([[0.]]))\n\n a, edge1, edge2 = histogram2d([], [], bins=4)\n assert_array_max_ulp(a, np.zeros((4, 4)))\n\n def test_binparameter_combination(self):\n x = array(\n [0, 0.09207008, 0.64575234, 0.12875982, 0.47390599,\n 0.59944483, 1])\n y = array(\n [0, 0.14344267, 0.48988575, 0.30558665, 0.44700682,\n 0.15886423, 1])\n edges = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1)\n H, xe, ye = histogram2d(x, y, (edges, 4))\n answer = array(\n [[2., 0., 0., 0.],\n [0., 1., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 1., 0., 0.],\n [1., 0., 0., 0.],\n [0., 1., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 1.]])\n assert_array_equal(H, answer)\n assert_array_equal(ye, array([0., 0.25, 0.5, 0.75, 1]))\n H, xe, ye = histogram2d(x, y, (4, edges))\n answer = array(\n [[1., 1., 0., 1., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],\n [0., 1., 0., 0., 1., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]])\n assert_array_equal(H, answer)\n assert_array_equal(xe, array([0., 0.25, 0.5, 0.75, 1]))\n\n def test_dispatch(self):\n class ShouldDispatch:\n def __array_function__(self, function, types, args, kwargs):\n return types, args, kwargs\n\n xy = [1, 2]\n s_d = ShouldDispatch()\n r = histogram2d(s_d, xy)\n # Cannot use assert_equal since that dispatches...\n assert_(r == ((ShouldDispatch,), (s_d, xy), {}))\n r = histogram2d(xy, s_d)\n assert_(r == ((ShouldDispatch,), (xy, s_d), {}))\n r = histogram2d(xy, xy, bins=s_d)\n assert_(r, ((ShouldDispatch,), (xy, xy), {'bins': s_d}))\n r = histogram2d(xy, xy, bins=[s_d, 5])\n assert_(r, ((ShouldDispatch,), (xy, xy), {'bins': [s_d, 5]}))\n assert_raises(Exception, histogram2d, xy, xy, bins=[s_d])\n r = histogram2d(xy, xy, weights=s_d)\n assert_(r, ((ShouldDispatch,), (xy, xy), {'weights': s_d}))\n\n @pytest.mark.parametrize(("x_len", "y_len"), [(10, 11), (20, 19)])\n def test_bad_length(self, x_len, y_len):\n x, y = np.ones(x_len), np.ones(y_len)\n with pytest.raises(ValueError,\n match='x and y must have the same length.'):\n histogram2d(x, y)\n\n\nclass TestTri:\n def test_dtype(self):\n out = array([[1, 0, 0],\n [1, 1, 0],\n [1, 1, 1]])\n assert_array_equal(tri(3), out)\n assert_array_equal(tri(3, dtype=bool), out.astype(bool))\n\n\ndef test_tril_triu_ndim2():\n for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']:\n a = np.ones((2, 2), dtype=dtype)\n b = np.tril(a)\n c = np.triu(a)\n assert_array_equal(b, [[1, 0], [1, 1]])\n assert_array_equal(c, b.T)\n # should return the same dtype as the original array\n assert_equal(b.dtype, a.dtype)\n assert_equal(c.dtype, a.dtype)\n\n\ndef test_tril_triu_ndim3():\n for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']:\n a = np.array([\n [[1, 1], [1, 1]],\n [[1, 1], [1, 0]],\n [[1, 1], [0, 0]],\n ], dtype=dtype)\n a_tril_desired = np.array([\n [[1, 0], [1, 1]],\n [[1, 0], [1, 0]],\n [[1, 0], [0, 0]],\n ], dtype=dtype)\n a_triu_desired = np.array([\n [[1, 1], [0, 1]],\n [[1, 1], [0, 0]],\n [[1, 1], [0, 0]],\n ], dtype=dtype)\n a_triu_observed = np.triu(a)\n a_tril_observed = np.tril(a)\n assert_array_equal(a_triu_observed, a_triu_desired)\n assert_array_equal(a_tril_observed, a_tril_desired)\n assert_equal(a_triu_observed.dtype, a.dtype)\n assert_equal(a_tril_observed.dtype, a.dtype)\n\n\ndef test_tril_triu_with_inf():\n # Issue 4859\n arr = np.array([[1, 1, np.inf],\n [1, 1, 1],\n [np.inf, 1, 1]])\n out_tril = np.array([[1, 0, 0],\n [1, 1, 0],\n [np.inf, 1, 1]])\n out_triu = out_tril.T\n assert_array_equal(np.triu(arr), out_triu)\n assert_array_equal(np.tril(arr), out_tril)\n\n\ndef test_tril_triu_dtype():\n # Issue 4916\n # tril and triu should return the same dtype as input\n for c in np.typecodes['All']:\n if c == 'V':\n continue\n arr = np.zeros((3, 3), dtype=c)\n assert_equal(np.triu(arr).dtype, arr.dtype)\n assert_equal(np.tril(arr).dtype, arr.dtype)\n\n # check special cases\n arr = np.array([['2001-01-01T12:00', '2002-02-03T13:56'],\n ['2004-01-01T12:00', '2003-01-03T13:45']],\n dtype='datetime64')\n assert_equal(np.triu(arr).dtype, arr.dtype)\n assert_equal(np.tril(arr).dtype, arr.dtype)\n\n arr = np.zeros((3, 3), dtype='f4,f4')\n assert_equal(np.triu(arr).dtype, arr.dtype)\n assert_equal(np.tril(arr).dtype, arr.dtype)\n\n\ndef test_mask_indices():\n # simple test without offset\n iu = mask_indices(3, np.triu)\n a = np.arange(9).reshape(3, 3)\n assert_array_equal(a[iu], array([0, 1, 2, 4, 5, 8]))\n # Now with an offset\n iu1 = mask_indices(3, np.triu, 1)\n assert_array_equal(a[iu1], array([1, 2, 5]))\n\n\ndef test_tril_indices():\n # indices without and with offset\n il1 = tril_indices(4)\n il2 = tril_indices(4, k=2)\n il3 = tril_indices(4, m=5)\n il4 = tril_indices(4, k=2, m=5)\n\n a = np.array([[1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16]])\n b = np.arange(1, 21).reshape(4, 5)\n\n # indexing:\n assert_array_equal(a[il1],\n array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16]))\n assert_array_equal(b[il3],\n array([1, 6, 7, 11, 12, 13, 16, 17, 18, 19]))\n\n # And for assigning values:\n a[il1] = -1\n assert_array_equal(a,\n array([[-1, 2, 3, 4],\n [-1, -1, 7, 8],\n [-1, -1, -1, 12],\n [-1, -1, -1, -1]]))\n b[il3] = -1\n assert_array_equal(b,\n array([[-1, 2, 3, 4, 5],\n [-1, -1, 8, 9, 10],\n [-1, -1, -1, 14, 15],\n [-1, -1, -1, -1, 20]]))\n # These cover almost the whole array (two diagonals right of the main one):\n a[il2] = -10\n assert_array_equal(a,\n array([[-10, -10, -10, 4],\n [-10, -10, -10, -10],\n [-10, -10, -10, -10],\n [-10, -10, -10, -10]]))\n b[il4] = -10\n assert_array_equal(b,\n array([[-10, -10, -10, 4, 5],\n [-10, -10, -10, -10, 10],\n [-10, -10, -10, -10, -10],\n [-10, -10, -10, -10, -10]]))\n\n\nclass TestTriuIndices:\n def test_triu_indices(self):\n iu1 = triu_indices(4)\n iu2 = triu_indices(4, k=2)\n iu3 = triu_indices(4, m=5)\n iu4 = triu_indices(4, k=2, m=5)\n\n a = np.array([[1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 16]])\n b = np.arange(1, 21).reshape(4, 5)\n\n # Both for indexing:\n assert_array_equal(a[iu1],\n array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16]))\n assert_array_equal(b[iu3],\n array([1, 2, 3, 4, 5, 7, 8, 9,\n 10, 13, 14, 15, 19, 20]))\n\n # And for assigning values:\n a[iu1] = -1\n assert_array_equal(a,\n array([[-1, -1, -1, -1],\n [5, -1, -1, -1],\n [9, 10, -1, -1],\n [13, 14, 15, -1]]))\n b[iu3] = -1\n assert_array_equal(b,\n array([[-1, -1, -1, -1, -1],\n [6, -1, -1, -1, -1],\n [11, 12, -1, -1, -1],\n [16, 17, 18, -1, -1]]))\n\n # These cover almost the whole array (two diagonals right of the\n # main one):\n a[iu2] = -10\n assert_array_equal(a,\n array([[-1, -1, -10, -10],\n [5, -1, -1, -10],\n [9, 10, -1, -1],\n [13, 14, 15, -1]]))\n b[iu4] = -10\n assert_array_equal(b,\n array([[-1, -1, -10, -10, -10],\n [6, -1, -1, -10, -10],\n [11, 12, -1, -1, -10],\n [16, 17, 18, -1, -1]]))\n\n\nclass TestTrilIndicesFrom:\n def test_exceptions(self):\n assert_raises(ValueError, tril_indices_from, np.ones((2,)))\n assert_raises(ValueError, tril_indices_from, np.ones((2, 2, 2)))\n # assert_raises(ValueError, tril_indices_from, np.ones((2, 3)))\n\n\nclass TestTriuIndicesFrom:\n def test_exceptions(self):\n assert_raises(ValueError, triu_indices_from, np.ones((2,)))\n assert_raises(ValueError, triu_indices_from, np.ones((2, 2, 2)))\n # assert_raises(ValueError, triu_indices_from, np.ones((2, 3)))\n\n\nclass TestVander:\n def test_basic(self):\n c = np.array([0, 1, -2, 3])\n v = vander(c)\n powers = np.array([[0, 0, 0, 0, 1],\n [1, 1, 1, 1, 1],\n [16, -8, 4, -2, 1],\n [81, 27, 9, 3, 1]])\n # Check default value of N:\n assert_array_equal(v, powers[:, 1:])\n # Check a range of N values, including 0 and 5 (greater than default)\n m = powers.shape[1]\n for n in range(6):\n v = vander(c, N=n)\n assert_array_equal(v, powers[:, m - n:m])\n\n def test_dtypes(self):\n c = array([11, -12, 13], dtype=np.int8)\n v = vander(c)\n expected = np.array([[121, 11, 1],\n [144, -12, 1],\n [169, 13, 1]])\n assert_array_equal(v, expected)\n\n c = array([1.0 + 1j, 1.0 - 1j])\n v = vander(c, N=3)\n expected = np.array([[2j, 1 + 1j, 1],\n [-2j, 1 - 1j, 1]])\n # The data is floating point, but the values are small integers,\n # so assert_array_equal *should* be safe here (rather than, say,\n # assert_array_almost_equal).\n assert_array_equal(v, expected)\n | .venv\Lib\site-packages\numpy\lib\tests\test_twodim_base.py | test_twodim_base.py | Python | 19,484 | 0.95 | 0.118068 | 0.049383 | react-lib | 505 | 2023-11-14T21:48:59.605507 | Apache-2.0 | true | 67f9f1674e5da760318bdfc6da0d886a |
import numpy as np\nfrom numpy import (\n common_type,\n iscomplex,\n iscomplexobj,\n isneginf,\n isposinf,\n isreal,\n isrealobj,\n mintypecode,\n nan_to_num,\n real_if_close,\n)\nfrom numpy.testing import assert_, assert_array_equal, assert_equal\n\n\ndef assert_all(x):\n assert_(np.all(x), x)\n\n\nclass TestCommonType:\n def test_basic(self):\n ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32)\n af16 = np.array([[1, 2], [3, 4]], dtype=np.float16)\n af32 = np.array([[1, 2], [3, 4]], dtype=np.float32)\n af64 = np.array([[1, 2], [3, 4]], dtype=np.float64)\n acs = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.complex64)\n acd = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.complex128)\n assert_(common_type(ai32) == np.float64)\n assert_(common_type(af16) == np.float16)\n assert_(common_type(af32) == np.float32)\n assert_(common_type(af64) == np.float64)\n assert_(common_type(acs) == np.complex64)\n assert_(common_type(acd) == np.complex128)\n\n\nclass TestMintypecode:\n\n def test_default_1(self):\n for itype in '1bcsuwil':\n assert_equal(mintypecode(itype), 'd')\n assert_equal(mintypecode('f'), 'f')\n assert_equal(mintypecode('d'), 'd')\n assert_equal(mintypecode('F'), 'F')\n assert_equal(mintypecode('D'), 'D')\n\n def test_default_2(self):\n for itype in '1bcsuwil':\n assert_equal(mintypecode(itype + 'f'), 'f')\n assert_equal(mintypecode(itype + 'd'), 'd')\n assert_equal(mintypecode(itype + 'F'), 'F')\n assert_equal(mintypecode(itype + 'D'), 'D')\n assert_equal(mintypecode('ff'), 'f')\n assert_equal(mintypecode('fd'), 'd')\n assert_equal(mintypecode('fF'), 'F')\n assert_equal(mintypecode('fD'), 'D')\n assert_equal(mintypecode('df'), 'd')\n assert_equal(mintypecode('dd'), 'd')\n #assert_equal(mintypecode('dF',savespace=1),'F')\n assert_equal(mintypecode('dF'), 'D')\n assert_equal(mintypecode('dD'), 'D')\n assert_equal(mintypecode('Ff'), 'F')\n #assert_equal(mintypecode('Fd',savespace=1),'F')\n assert_equal(mintypecode('Fd'), 'D')\n assert_equal(mintypecode('FF'), 'F')\n assert_equal(mintypecode('FD'), 'D')\n assert_equal(mintypecode('Df'), 'D')\n assert_equal(mintypecode('Dd'), 'D')\n assert_equal(mintypecode('DF'), 'D')\n assert_equal(mintypecode('DD'), 'D')\n\n def test_default_3(self):\n assert_equal(mintypecode('fdF'), 'D')\n #assert_equal(mintypecode('fdF',savespace=1),'F')\n assert_equal(mintypecode('fdD'), 'D')\n assert_equal(mintypecode('fFD'), 'D')\n assert_equal(mintypecode('dFD'), 'D')\n\n assert_equal(mintypecode('ifd'), 'd')\n assert_equal(mintypecode('ifF'), 'F')\n assert_equal(mintypecode('ifD'), 'D')\n assert_equal(mintypecode('idF'), 'D')\n #assert_equal(mintypecode('idF',savespace=1),'F')\n assert_equal(mintypecode('idD'), 'D')\n\n\nclass TestIsscalar:\n\n def test_basic(self):\n assert_(np.isscalar(3))\n assert_(not np.isscalar([3]))\n assert_(not np.isscalar((3,)))\n assert_(np.isscalar(3j))\n assert_(np.isscalar(4.0))\n\n\nclass TestReal:\n\n def test_real(self):\n y = np.random.rand(10,)\n assert_array_equal(y, np.real(y))\n\n y = np.array(1)\n out = np.real(y)\n assert_array_equal(y, out)\n assert_(isinstance(out, np.ndarray))\n\n y = 1\n out = np.real(y)\n assert_equal(y, out)\n assert_(not isinstance(out, np.ndarray))\n\n def test_cmplx(self):\n y = np.random.rand(10,) + 1j * np.random.rand(10,)\n assert_array_equal(y.real, np.real(y))\n\n y = np.array(1 + 1j)\n out = np.real(y)\n assert_array_equal(y.real, out)\n assert_(isinstance(out, np.ndarray))\n\n y = 1 + 1j\n out = np.real(y)\n assert_equal(1.0, out)\n assert_(not isinstance(out, np.ndarray))\n\n\nclass TestImag:\n\n def test_real(self):\n y = np.random.rand(10,)\n assert_array_equal(0, np.imag(y))\n\n y = np.array(1)\n out = np.imag(y)\n assert_array_equal(0, out)\n assert_(isinstance(out, np.ndarray))\n\n y = 1\n out = np.imag(y)\n assert_equal(0, out)\n assert_(not isinstance(out, np.ndarray))\n\n def test_cmplx(self):\n y = np.random.rand(10,) + 1j * np.random.rand(10,)\n assert_array_equal(y.imag, np.imag(y))\n\n y = np.array(1 + 1j)\n out = np.imag(y)\n assert_array_equal(y.imag, out)\n assert_(isinstance(out, np.ndarray))\n\n y = 1 + 1j\n out = np.imag(y)\n assert_equal(1.0, out)\n assert_(not isinstance(out, np.ndarray))\n\n\nclass TestIscomplex:\n\n def test_fail(self):\n z = np.array([-1, 0, 1])\n res = iscomplex(z)\n assert_(not np.any(res, axis=0))\n\n def test_pass(self):\n z = np.array([-1j, 1, 0])\n res = iscomplex(z)\n assert_array_equal(res, [1, 0, 0])\n\n\nclass TestIsreal:\n\n def test_pass(self):\n z = np.array([-1, 0, 1j])\n res = isreal(z)\n assert_array_equal(res, [1, 1, 0])\n\n def test_fail(self):\n z = np.array([-1j, 1, 0])\n res = isreal(z)\n assert_array_equal(res, [0, 1, 1])\n\n\nclass TestIscomplexobj:\n\n def test_basic(self):\n z = np.array([-1, 0, 1])\n assert_(not iscomplexobj(z))\n z = np.array([-1j, 0, -1])\n assert_(iscomplexobj(z))\n\n def test_scalar(self):\n assert_(not iscomplexobj(1.0))\n assert_(iscomplexobj(1 + 0j))\n\n def test_list(self):\n assert_(iscomplexobj([3, 1 + 0j, True]))\n assert_(not iscomplexobj([3, 1, True]))\n\n def test_duck(self):\n class DummyComplexArray:\n @property\n def dtype(self):\n return np.dtype(complex)\n dummy = DummyComplexArray()\n assert_(iscomplexobj(dummy))\n\n def test_pandas_duck(self):\n # This tests a custom np.dtype duck-typed class, such as used by pandas\n # (pandas.core.dtypes)\n class PdComplex(np.complex128):\n pass\n\n class PdDtype:\n name = 'category'\n names = None\n type = PdComplex\n kind = 'c'\n str = '<c16'\n base = np.dtype('complex128')\n\n class DummyPd:\n @property\n def dtype(self):\n return PdDtype\n dummy = DummyPd()\n assert_(iscomplexobj(dummy))\n\n def test_custom_dtype_duck(self):\n class MyArray(list):\n @property\n def dtype(self):\n return complex\n\n a = MyArray([1 + 0j, 2 + 0j, 3 + 0j])\n assert_(iscomplexobj(a))\n\n\nclass TestIsrealobj:\n def test_basic(self):\n z = np.array([-1, 0, 1])\n assert_(isrealobj(z))\n z = np.array([-1j, 0, -1])\n assert_(not isrealobj(z))\n\n\nclass TestIsnan:\n\n def test_goodvalues(self):\n z = np.array((-1., 0., 1.))\n res = np.isnan(z) == 0\n assert_all(np.all(res, axis=0))\n\n def test_posinf(self):\n with np.errstate(divide='ignore'):\n assert_all(np.isnan(np.array((1.,)) / 0.) == 0)\n\n def test_neginf(self):\n with np.errstate(divide='ignore'):\n assert_all(np.isnan(np.array((-1.,)) / 0.) == 0)\n\n def test_ind(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isnan(np.array((0.,)) / 0.) == 1)\n\n def test_integer(self):\n assert_all(np.isnan(1) == 0)\n\n def test_complex(self):\n assert_all(np.isnan(1 + 1j) == 0)\n\n def test_complex1(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isnan(np.array(0 + 0j) / 0.) == 1)\n\n\nclass TestIsfinite:\n # Fixme, wrong place, isfinite now ufunc\n\n def test_goodvalues(self):\n z = np.array((-1., 0., 1.))\n res = np.isfinite(z) == 1\n assert_all(np.all(res, axis=0))\n\n def test_posinf(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isfinite(np.array((1.,)) / 0.) == 0)\n\n def test_neginf(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isfinite(np.array((-1.,)) / 0.) == 0)\n\n def test_ind(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isfinite(np.array((0.,)) / 0.) == 0)\n\n def test_integer(self):\n assert_all(np.isfinite(1) == 1)\n\n def test_complex(self):\n assert_all(np.isfinite(1 + 1j) == 1)\n\n def test_complex1(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isfinite(np.array(1 + 1j) / 0.) == 0)\n\n\nclass TestIsinf:\n # Fixme, wrong place, isinf now ufunc\n\n def test_goodvalues(self):\n z = np.array((-1., 0., 1.))\n res = np.isinf(z) == 0\n assert_all(np.all(res, axis=0))\n\n def test_posinf(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isinf(np.array((1.,)) / 0.) == 1)\n\n def test_posinf_scalar(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isinf(np.array(1.,) / 0.) == 1)\n\n def test_neginf(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isinf(np.array((-1.,)) / 0.) == 1)\n\n def test_neginf_scalar(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isinf(np.array(-1.) / 0.) == 1)\n\n def test_ind(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n assert_all(np.isinf(np.array((0.,)) / 0.) == 0)\n\n\nclass TestIsposinf:\n\n def test_generic(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n vals = isposinf(np.array((-1., 0, 1)) / 0.)\n assert_(vals[0] == 0)\n assert_(vals[1] == 0)\n assert_(vals[2] == 1)\n\n\nclass TestIsneginf:\n\n def test_generic(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n vals = isneginf(np.array((-1., 0, 1)) / 0.)\n assert_(vals[0] == 1)\n assert_(vals[1] == 0)\n assert_(vals[2] == 0)\n\n\nclass TestNanToNum:\n\n def test_generic(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n vals = nan_to_num(np.array((-1., 0, 1)) / 0.)\n assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0]))\n assert_(vals[1] == 0)\n assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))\n assert_equal(type(vals), np.ndarray)\n\n # perform the same tests but with nan, posinf and neginf keywords\n with np.errstate(divide='ignore', invalid='ignore'):\n vals = nan_to_num(np.array((-1., 0, 1)) / 0.,\n nan=10, posinf=20, neginf=30)\n assert_equal(vals, [30, 10, 20])\n assert_all(np.isfinite(vals[[0, 2]]))\n assert_equal(type(vals), np.ndarray)\n\n # perform the same test but in-place\n with np.errstate(divide='ignore', invalid='ignore'):\n vals = np.array((-1., 0, 1)) / 0.\n result = nan_to_num(vals, copy=False)\n\n assert_(result is vals)\n assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0]))\n assert_(vals[1] == 0)\n assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))\n assert_equal(type(vals), np.ndarray)\n\n # perform the same test but in-place\n with np.errstate(divide='ignore', invalid='ignore'):\n vals = np.array((-1., 0, 1)) / 0.\n result = nan_to_num(vals, copy=False, nan=10, posinf=20, neginf=30)\n\n assert_(result is vals)\n assert_equal(vals, [30, 10, 20])\n assert_all(np.isfinite(vals[[0, 2]]))\n assert_equal(type(vals), np.ndarray)\n\n def test_array(self):\n vals = nan_to_num([1])\n assert_array_equal(vals, np.array([1], int))\n assert_equal(type(vals), np.ndarray)\n vals = nan_to_num([1], nan=10, posinf=20, neginf=30)\n assert_array_equal(vals, np.array([1], int))\n assert_equal(type(vals), np.ndarray)\n\n def test_integer(self):\n vals = nan_to_num(1)\n assert_all(vals == 1)\n assert_equal(type(vals), np.int_)\n vals = nan_to_num(1, nan=10, posinf=20, neginf=30)\n assert_all(vals == 1)\n assert_equal(type(vals), np.int_)\n\n def test_float(self):\n vals = nan_to_num(1.0)\n assert_all(vals == 1.0)\n assert_equal(type(vals), np.float64)\n vals = nan_to_num(1.1, nan=10, posinf=20, neginf=30)\n assert_all(vals == 1.1)\n assert_equal(type(vals), np.float64)\n\n def test_complex_good(self):\n vals = nan_to_num(1 + 1j)\n assert_all(vals == 1 + 1j)\n assert_equal(type(vals), np.complex128)\n vals = nan_to_num(1 + 1j, nan=10, posinf=20, neginf=30)\n assert_all(vals == 1 + 1j)\n assert_equal(type(vals), np.complex128)\n\n def test_complex_bad(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n v = 1 + 1j\n v += np.array(0 + 1.j) / 0.\n vals = nan_to_num(v)\n # !! This is actually (unexpectedly) zero\n assert_all(np.isfinite(vals))\n assert_equal(type(vals), np.complex128)\n\n def test_complex_bad2(self):\n with np.errstate(divide='ignore', invalid='ignore'):\n v = 1 + 1j\n v += np.array(-1 + 1.j) / 0.\n vals = nan_to_num(v)\n assert_all(np.isfinite(vals))\n assert_equal(type(vals), np.complex128)\n # Fixme\n #assert_all(vals.imag > 1e10) and assert_all(np.isfinite(vals))\n # !! This is actually (unexpectedly) positive\n # !! inf. Comment out for now, and see if it\n # !! changes\n #assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals))\n\n def test_do_not_rewrite_previous_keyword(self):\n # This is done to test that when, for instance, nan=np.inf then these\n # values are not rewritten by posinf keyword to the posinf value.\n with np.errstate(divide='ignore', invalid='ignore'):\n vals = nan_to_num(np.array((-1., 0, 1)) / 0., nan=np.inf, posinf=999)\n assert_all(np.isfinite(vals[[0, 2]]))\n assert_all(vals[0] < -1e10)\n assert_equal(vals[[1, 2]], [np.inf, 999])\n assert_equal(type(vals), np.ndarray)\n\n\nclass TestRealIfClose:\n\n def test_basic(self):\n a = np.random.rand(10)\n b = real_if_close(a + 1e-15j)\n assert_all(isrealobj(b))\n assert_array_equal(a, b)\n b = real_if_close(a + 1e-7j)\n assert_all(iscomplexobj(b))\n b = real_if_close(a + 1e-7j, tol=1e-6)\n assert_all(isrealobj(b))\n | .venv\Lib\site-packages\numpy\lib\tests\test_type_check.py | test_type_check.py | Python | 15,269 | 0.95 | 0.173362 | 0.053619 | node-utils | 887 | 2025-02-27T23:30:57.698590 | BSD-3-Clause | true | e4ef0a469cf1a521d4720a175bee8fef |
import numpy as np\nfrom numpy import fix, isneginf, isposinf\nfrom numpy.testing import assert_, assert_array_equal, assert_equal, assert_raises\n\n\nclass TestUfunclike:\n\n def test_isposinf(self):\n a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0])\n out = np.zeros(a.shape, bool)\n tgt = np.array([True, False, False, False, False, False])\n\n res = isposinf(a)\n assert_equal(res, tgt)\n res = isposinf(a, out)\n assert_equal(res, tgt)\n assert_equal(out, tgt)\n\n a = a.astype(np.complex128)\n with assert_raises(TypeError):\n isposinf(a)\n\n def test_isneginf(self):\n a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0])\n out = np.zeros(a.shape, bool)\n tgt = np.array([False, True, False, False, False, False])\n\n res = isneginf(a)\n assert_equal(res, tgt)\n res = isneginf(a, out)\n assert_equal(res, tgt)\n assert_equal(out, tgt)\n\n a = a.astype(np.complex128)\n with assert_raises(TypeError):\n isneginf(a)\n\n def test_fix(self):\n a = np.array([[1.0, 1.1, 1.5, 1.8], [-1.0, -1.1, -1.5, -1.8]])\n out = np.zeros(a.shape, float)\n tgt = np.array([[1., 1., 1., 1.], [-1., -1., -1., -1.]])\n\n res = fix(a)\n assert_equal(res, tgt)\n res = fix(a, out)\n assert_equal(res, tgt)\n assert_equal(out, tgt)\n assert_equal(fix(3.14), 3)\n\n def test_fix_with_subclass(self):\n class MyArray(np.ndarray):\n def __new__(cls, data, metadata=None):\n res = np.array(data, copy=True).view(cls)\n res.metadata = metadata\n return res\n\n def __array_wrap__(self, obj, context=None, return_scalar=False):\n if not isinstance(obj, MyArray):\n obj = obj.view(MyArray)\n if obj.metadata is None:\n obj.metadata = self.metadata\n return obj\n\n def __array_finalize__(self, obj):\n self.metadata = getattr(obj, 'metadata', None)\n return self\n\n a = np.array([1.1, -1.1])\n m = MyArray(a, metadata='foo')\n f = fix(m)\n assert_array_equal(f, np.array([1, -1]))\n assert_(isinstance(f, MyArray))\n assert_equal(f.metadata, 'foo')\n\n # check 0d arrays don't decay to scalars\n m0d = m[0, ...]\n m0d.metadata = 'bar'\n f0d = fix(m0d)\n assert_(isinstance(f0d, MyArray))\n assert_equal(f0d.metadata, 'bar')\n\n def test_scalar(self):\n x = np.inf\n actual = np.isposinf(x)\n expected = np.True_\n assert_equal(actual, expected)\n assert_equal(type(actual), type(expected))\n\n x = -3.4\n actual = np.fix(x)\n expected = np.float64(-3.0)\n assert_equal(actual, expected)\n assert_equal(type(actual), type(expected))\n\n out = np.array(0.0)\n actual = np.fix(x, out=out)\n assert_(actual is out)\n | .venv\Lib\site-packages\numpy\lib\tests\test_ufunclike.py | test_ufunclike.py | Python | 3,112 | 0.95 | 0.123711 | 0.012658 | vue-tools | 541 | 2024-03-22T06:21:11.325890 | BSD-3-Clause | true | bb59ea38c8541de49baaf139f6ae3c80 |
from io import StringIO\n\nimport pytest\n\nimport numpy as np\nimport numpy.lib._utils_impl as _utils_impl\nfrom numpy.testing import assert_raises_regex\n\n\ndef test_assert_raises_regex_context_manager():\n with assert_raises_regex(ValueError, 'no deprecation warning'):\n raise ValueError('no deprecation warning')\n\n\ndef test_info_method_heading():\n # info(class) should only print "Methods:" heading if methods exist\n\n class NoPublicMethods:\n pass\n\n class WithPublicMethods:\n def first_method():\n pass\n\n def _has_method_heading(cls):\n out = StringIO()\n np.info(cls, output=out)\n return 'Methods:' in out.getvalue()\n\n assert _has_method_heading(WithPublicMethods)\n assert not _has_method_heading(NoPublicMethods)\n\n\ndef test_drop_metadata():\n def _compare_dtypes(dt1, dt2):\n return np.can_cast(dt1, dt2, casting='no')\n\n # structured dtype\n dt = np.dtype([('l1', [('l2', np.dtype('S8', metadata={'msg': 'toto'}))])],\n metadata={'msg': 'titi'})\n dt_m = _utils_impl.drop_metadata(dt)\n assert _compare_dtypes(dt, dt_m) is True\n assert dt_m.metadata is None\n assert dt_m['l1'].metadata is None\n assert dt_m['l1']['l2'].metadata is None\n\n # alignment\n dt = np.dtype([('x', '<f8'), ('y', '<i4')],\n align=True,\n metadata={'msg': 'toto'})\n dt_m = _utils_impl.drop_metadata(dt)\n assert _compare_dtypes(dt, dt_m) is True\n assert dt_m.metadata is None\n\n # subdtype\n dt = np.dtype('8f',\n metadata={'msg': 'toto'})\n dt_m = _utils_impl.drop_metadata(dt)\n assert _compare_dtypes(dt, dt_m) is True\n assert dt_m.metadata is None\n\n # scalar\n dt = np.dtype('uint32',\n metadata={'msg': 'toto'})\n dt_m = _utils_impl.drop_metadata(dt)\n assert _compare_dtypes(dt, dt_m) is True\n assert dt_m.metadata is None\n\n\n@pytest.mark.parametrize("dtype",\n [np.dtype("i,i,i,i")[["f1", "f3"]],\n np.dtype("f8"),\n np.dtype("10i")])\ndef test_drop_metadata_identity_and_copy(dtype):\n # If there is no metadata, the identity is preserved:\n assert _utils_impl.drop_metadata(dtype) is dtype\n\n # If there is any, it is dropped (subforms are checked above)\n dtype = np.dtype(dtype, metadata={1: 2})\n assert _utils_impl.drop_metadata(dtype).metadata is None\n | .venv\Lib\site-packages\numpy\lib\tests\test_utils.py | test_utils.py | Python | 2,454 | 0.95 | 0.1375 | 0.114754 | awesome-app | 830 | 2024-10-22T12:55:02.035419 | Apache-2.0 | true | 2e1daca389219ab738fded568b8d4ee7 |
import os\nimport urllib.request as urllib_request\nfrom shutil import rmtree\nfrom tempfile import NamedTemporaryFile, mkdtemp, mkstemp\nfrom urllib.error import URLError\nfrom urllib.parse import urlparse\n\nimport pytest\n\nimport numpy.lib._datasource as datasource\nfrom numpy.testing import assert_, assert_equal, assert_raises\n\n\ndef urlopen_stub(url, data=None):\n '''Stub to replace urlopen for testing.'''\n if url == valid_httpurl():\n tmpfile = NamedTemporaryFile(prefix='urltmp_')\n return tmpfile\n else:\n raise URLError('Name or service not known')\n\n\n# setup and teardown\nold_urlopen = None\n\n\ndef setup_module():\n global old_urlopen\n\n old_urlopen = urllib_request.urlopen\n urllib_request.urlopen = urlopen_stub\n\n\ndef teardown_module():\n urllib_request.urlopen = old_urlopen\n\n\n# A valid website for more robust testing\nhttp_path = 'http://www.google.com/'\nhttp_file = 'index.html'\n\nhttp_fakepath = 'http://fake.abc.web/site/'\nhttp_fakefile = 'fake.txt'\n\nmalicious_files = ['/etc/shadow', '../../shadow',\n '..\\system.dat', 'c:\\windows\\system.dat']\n\nmagic_line = b'three is the magic number'\n\n\n# Utility functions used by many tests\ndef valid_textfile(filedir):\n # Generate and return a valid temporary file.\n fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir, text=True)\n os.close(fd)\n return path\n\n\ndef invalid_textfile(filedir):\n # Generate and return an invalid filename.\n fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir)\n os.close(fd)\n os.remove(path)\n return path\n\n\ndef valid_httpurl():\n return http_path + http_file\n\n\ndef invalid_httpurl():\n return http_fakepath + http_fakefile\n\n\ndef valid_baseurl():\n return http_path\n\n\ndef invalid_baseurl():\n return http_fakepath\n\n\ndef valid_httpfile():\n return http_file\n\n\ndef invalid_httpfile():\n return http_fakefile\n\n\nclass TestDataSourceOpen:\n def setup_method(self):\n self.tmpdir = mkdtemp()\n self.ds = datasource.DataSource(self.tmpdir)\n\n def teardown_method(self):\n rmtree(self.tmpdir)\n del self.ds\n\n def test_ValidHTTP(self):\n fh = self.ds.open(valid_httpurl())\n assert_(fh)\n fh.close()\n\n def test_InvalidHTTP(self):\n url = invalid_httpurl()\n assert_raises(OSError, self.ds.open, url)\n try:\n self.ds.open(url)\n except OSError as e:\n # Regression test for bug fixed in r4342.\n assert_(e.errno is None)\n\n def test_InvalidHTTPCacheURLError(self):\n assert_raises(URLError, self.ds._cache, invalid_httpurl())\n\n def test_ValidFile(self):\n local_file = valid_textfile(self.tmpdir)\n fh = self.ds.open(local_file)\n assert_(fh)\n fh.close()\n\n def test_InvalidFile(self):\n invalid_file = invalid_textfile(self.tmpdir)\n assert_raises(OSError, self.ds.open, invalid_file)\n\n def test_ValidGzipFile(self):\n try:\n import gzip\n except ImportError:\n # We don't have the gzip capabilities to test.\n pytest.skip()\n # Test datasource's internal file_opener for Gzip files.\n filepath = os.path.join(self.tmpdir, 'foobar.txt.gz')\n fp = gzip.open(filepath, 'w')\n fp.write(magic_line)\n fp.close()\n fp = self.ds.open(filepath)\n result = fp.readline()\n fp.close()\n assert_equal(magic_line, result)\n\n def test_ValidBz2File(self):\n try:\n import bz2\n except ImportError:\n # We don't have the bz2 capabilities to test.\n pytest.skip()\n # Test datasource's internal file_opener for BZip2 files.\n filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2')\n fp = bz2.BZ2File(filepath, 'w')\n fp.write(magic_line)\n fp.close()\n fp = self.ds.open(filepath)\n result = fp.readline()\n fp.close()\n assert_equal(magic_line, result)\n\n\nclass TestDataSourceExists:\n def setup_method(self):\n self.tmpdir = mkdtemp()\n self.ds = datasource.DataSource(self.tmpdir)\n\n def teardown_method(self):\n rmtree(self.tmpdir)\n del self.ds\n\n def test_ValidHTTP(self):\n assert_(self.ds.exists(valid_httpurl()))\n\n def test_InvalidHTTP(self):\n assert_equal(self.ds.exists(invalid_httpurl()), False)\n\n def test_ValidFile(self):\n # Test valid file in destpath\n tmpfile = valid_textfile(self.tmpdir)\n assert_(self.ds.exists(tmpfile))\n # Test valid local file not in destpath\n localdir = mkdtemp()\n tmpfile = valid_textfile(localdir)\n assert_(self.ds.exists(tmpfile))\n rmtree(localdir)\n\n def test_InvalidFile(self):\n tmpfile = invalid_textfile(self.tmpdir)\n assert_equal(self.ds.exists(tmpfile), False)\n\n\nclass TestDataSourceAbspath:\n def setup_method(self):\n self.tmpdir = os.path.abspath(mkdtemp())\n self.ds = datasource.DataSource(self.tmpdir)\n\n def teardown_method(self):\n rmtree(self.tmpdir)\n del self.ds\n\n def test_ValidHTTP(self):\n scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl())\n local_path = os.path.join(self.tmpdir, netloc,\n upath.strip(os.sep).strip('/'))\n assert_equal(local_path, self.ds.abspath(valid_httpurl()))\n\n def test_ValidFile(self):\n tmpfile = valid_textfile(self.tmpdir)\n tmpfilename = os.path.split(tmpfile)[-1]\n # Test with filename only\n assert_equal(tmpfile, self.ds.abspath(tmpfilename))\n # Test filename with complete path\n assert_equal(tmpfile, self.ds.abspath(tmpfile))\n\n def test_InvalidHTTP(self):\n scheme, netloc, upath, pms, qry, frg = urlparse(invalid_httpurl())\n invalidhttp = os.path.join(self.tmpdir, netloc,\n upath.strip(os.sep).strip('/'))\n assert_(invalidhttp != self.ds.abspath(valid_httpurl()))\n\n def test_InvalidFile(self):\n invalidfile = valid_textfile(self.tmpdir)\n tmpfile = valid_textfile(self.tmpdir)\n tmpfilename = os.path.split(tmpfile)[-1]\n # Test with filename only\n assert_(invalidfile != self.ds.abspath(tmpfilename))\n # Test filename with complete path\n assert_(invalidfile != self.ds.abspath(tmpfile))\n\n def test_sandboxing(self):\n tmpfile = valid_textfile(self.tmpdir)\n tmpfilename = os.path.split(tmpfile)[-1]\n\n tmp_path = lambda x: os.path.abspath(self.ds.abspath(x))\n\n assert_(tmp_path(valid_httpurl()).startswith(self.tmpdir))\n assert_(tmp_path(invalid_httpurl()).startswith(self.tmpdir))\n assert_(tmp_path(tmpfile).startswith(self.tmpdir))\n assert_(tmp_path(tmpfilename).startswith(self.tmpdir))\n for fn in malicious_files:\n assert_(tmp_path(http_path + fn).startswith(self.tmpdir))\n assert_(tmp_path(fn).startswith(self.tmpdir))\n\n def test_windows_os_sep(self):\n orig_os_sep = os.sep\n try:\n os.sep = '\\'\n self.test_ValidHTTP()\n self.test_ValidFile()\n self.test_InvalidHTTP()\n self.test_InvalidFile()\n self.test_sandboxing()\n finally:\n os.sep = orig_os_sep\n\n\nclass TestRepositoryAbspath:\n def setup_method(self):\n self.tmpdir = os.path.abspath(mkdtemp())\n self.repos = datasource.Repository(valid_baseurl(), self.tmpdir)\n\n def teardown_method(self):\n rmtree(self.tmpdir)\n del self.repos\n\n def test_ValidHTTP(self):\n scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl())\n local_path = os.path.join(self.repos._destpath, netloc,\n upath.strip(os.sep).strip('/'))\n filepath = self.repos.abspath(valid_httpfile())\n assert_equal(local_path, filepath)\n\n def test_sandboxing(self):\n tmp_path = lambda x: os.path.abspath(self.repos.abspath(x))\n assert_(tmp_path(valid_httpfile()).startswith(self.tmpdir))\n for fn in malicious_files:\n assert_(tmp_path(http_path + fn).startswith(self.tmpdir))\n assert_(tmp_path(fn).startswith(self.tmpdir))\n\n def test_windows_os_sep(self):\n orig_os_sep = os.sep\n try:\n os.sep = '\\'\n self.test_ValidHTTP()\n self.test_sandboxing()\n finally:\n os.sep = orig_os_sep\n\n\nclass TestRepositoryExists:\n def setup_method(self):\n self.tmpdir = mkdtemp()\n self.repos = datasource.Repository(valid_baseurl(), self.tmpdir)\n\n def teardown_method(self):\n rmtree(self.tmpdir)\n del self.repos\n\n def test_ValidFile(self):\n # Create local temp file\n tmpfile = valid_textfile(self.tmpdir)\n assert_(self.repos.exists(tmpfile))\n\n def test_InvalidFile(self):\n tmpfile = invalid_textfile(self.tmpdir)\n assert_equal(self.repos.exists(tmpfile), False)\n\n def test_RemoveHTTPFile(self):\n assert_(self.repos.exists(valid_httpurl()))\n\n def test_CachedHTTPFile(self):\n localfile = valid_httpurl()\n # Create a locally cached temp file with an URL based\n # directory structure. This is similar to what Repository.open\n # would do.\n scheme, netloc, upath, pms, qry, frg = urlparse(localfile)\n local_path = os.path.join(self.repos._destpath, netloc)\n os.mkdir(local_path, 0o0700)\n tmpfile = valid_textfile(local_path)\n assert_(self.repos.exists(tmpfile))\n\n\nclass TestOpenFunc:\n def setup_method(self):\n self.tmpdir = mkdtemp()\n\n def teardown_method(self):\n rmtree(self.tmpdir)\n\n def test_DataSourceOpen(self):\n local_file = valid_textfile(self.tmpdir)\n # Test case where destpath is passed in\n fp = datasource.open(local_file, destpath=self.tmpdir)\n assert_(fp)\n fp.close()\n # Test case where default destpath is used\n fp = datasource.open(local_file)\n assert_(fp)\n fp.close()\n\ndef test_del_attr_handling():\n # DataSource __del__ can be called\n # even if __init__ fails when the\n # Exception object is caught by the\n # caller as happens in refguide_check\n # is_deprecated() function\n\n ds = datasource.DataSource()\n # simulate failed __init__ by removing key attribute\n # produced within __init__ and expected by __del__\n del ds._istmpdest\n # should not raise an AttributeError if __del__\n # gracefully handles failed __init__:\n ds.__del__()\n | .venv\Lib\site-packages\numpy\lib\tests\test__datasource.py | test__datasource.py | Python | 10,933 | 0.95 | 0.201705 | 0.113553 | node-utils | 1,000 | 2023-09-19T15:22:59.470585 | GPL-3.0 | true | e465baf526c9257274432129e31019af |
import time\nfrom datetime import date\n\nimport numpy as np\nfrom numpy.lib._iotools import (\n LineSplitter,\n NameValidator,\n StringConverter,\n easy_dtype,\n flatten_dtype,\n has_nested_fields,\n)\nfrom numpy.testing import (\n assert_,\n assert_allclose,\n assert_equal,\n assert_raises,\n)\n\n\nclass TestLineSplitter:\n "Tests the LineSplitter class."\n\n def test_no_delimiter(self):\n "Test LineSplitter w/o delimiter"\n strg = " 1 2 3 4 5 # test"\n test = LineSplitter()(strg)\n assert_equal(test, ['1', '2', '3', '4', '5'])\n test = LineSplitter('')(strg)\n assert_equal(test, ['1', '2', '3', '4', '5'])\n\n def test_space_delimiter(self):\n "Test space delimiter"\n strg = " 1 2 3 4 5 # test"\n test = LineSplitter(' ')(strg)\n assert_equal(test, ['1', '2', '3', '4', '', '5'])\n test = LineSplitter(' ')(strg)\n assert_equal(test, ['1 2 3 4', '5'])\n\n def test_tab_delimiter(self):\n "Test tab delimiter"\n strg = " 1\t 2\t 3\t 4\t 5 6"\n test = LineSplitter('\t')(strg)\n assert_equal(test, ['1', '2', '3', '4', '5 6'])\n strg = " 1 2\t 3 4\t 5 6"\n test = LineSplitter('\t')(strg)\n assert_equal(test, ['1 2', '3 4', '5 6'])\n\n def test_other_delimiter(self):\n "Test LineSplitter on delimiter"\n strg = "1,2,3,4,,5"\n test = LineSplitter(',')(strg)\n assert_equal(test, ['1', '2', '3', '4', '', '5'])\n #\n strg = " 1,2,3,4,,5 # test"\n test = LineSplitter(',')(strg)\n assert_equal(test, ['1', '2', '3', '4', '', '5'])\n\n # gh-11028 bytes comment/delimiters should get encoded\n strg = b" 1,2,3,4,,5 % test"\n test = LineSplitter(delimiter=b',', comments=b'%')(strg)\n assert_equal(test, ['1', '2', '3', '4', '', '5'])\n\n def test_constant_fixed_width(self):\n "Test LineSplitter w/ fixed-width fields"\n strg = " 1 2 3 4 5 # test"\n test = LineSplitter(3)(strg)\n assert_equal(test, ['1', '2', '3', '4', '', '5', ''])\n #\n strg = " 1 3 4 5 6# test"\n test = LineSplitter(20)(strg)\n assert_equal(test, ['1 3 4 5 6'])\n #\n strg = " 1 3 4 5 6# test"\n test = LineSplitter(30)(strg)\n assert_equal(test, ['1 3 4 5 6'])\n\n def test_variable_fixed_width(self):\n strg = " 1 3 4 5 6# test"\n test = LineSplitter((3, 6, 6, 3))(strg)\n assert_equal(test, ['1', '3', '4 5', '6'])\n #\n strg = " 1 3 4 5 6# test"\n test = LineSplitter((6, 6, 9))(strg)\n assert_equal(test, ['1', '3 4', '5 6'])\n\n# -----------------------------------------------------------------------------\n\n\nclass TestNameValidator:\n\n def test_case_sensitivity(self):\n "Test case sensitivity"\n names = ['A', 'a', 'b', 'c']\n test = NameValidator().validate(names)\n assert_equal(test, ['A', 'a', 'b', 'c'])\n test = NameValidator(case_sensitive=False).validate(names)\n assert_equal(test, ['A', 'A_1', 'B', 'C'])\n test = NameValidator(case_sensitive='upper').validate(names)\n assert_equal(test, ['A', 'A_1', 'B', 'C'])\n test = NameValidator(case_sensitive='lower').validate(names)\n assert_equal(test, ['a', 'a_1', 'b', 'c'])\n\n # check exceptions\n assert_raises(ValueError, NameValidator, case_sensitive='foobar')\n\n def test_excludelist(self):\n "Test excludelist"\n names = ['dates', 'data', 'Other Data', 'mask']\n validator = NameValidator(excludelist=['dates', 'data', 'mask'])\n test = validator.validate(names)\n assert_equal(test, ['dates_', 'data_', 'Other_Data', 'mask_'])\n\n def test_missing_names(self):\n "Test validate missing names"\n namelist = ('a', 'b', 'c')\n validator = NameValidator()\n assert_equal(validator(namelist), ['a', 'b', 'c'])\n namelist = ('', 'b', 'c')\n assert_equal(validator(namelist), ['f0', 'b', 'c'])\n namelist = ('a', 'b', '')\n assert_equal(validator(namelist), ['a', 'b', 'f0'])\n namelist = ('', 'f0', '')\n assert_equal(validator(namelist), ['f1', 'f0', 'f2'])\n\n def test_validate_nb_names(self):\n "Test validate nb names"\n namelist = ('a', 'b', 'c')\n validator = NameValidator()\n assert_equal(validator(namelist, nbfields=1), ('a',))\n assert_equal(validator(namelist, nbfields=5, defaultfmt="g%i"),\n ['a', 'b', 'c', 'g0', 'g1'])\n\n def test_validate_wo_names(self):\n "Test validate no names"\n namelist = None\n validator = NameValidator()\n assert_(validator(namelist) is None)\n assert_equal(validator(namelist, nbfields=3), ['f0', 'f1', 'f2'])\n\n# -----------------------------------------------------------------------------\n\n\ndef _bytes_to_date(s):\n return date(*time.strptime(s, "%Y-%m-%d")[:3])\n\n\nclass TestStringConverter:\n "Test StringConverter"\n\n def test_creation(self):\n "Test creation of a StringConverter"\n converter = StringConverter(int, -99999)\n assert_equal(converter._status, 1)\n assert_equal(converter.default, -99999)\n\n def test_upgrade(self):\n "Tests the upgrade method."\n\n converter = StringConverter()\n assert_equal(converter._status, 0)\n\n # test int\n assert_equal(converter.upgrade('0'), 0)\n assert_equal(converter._status, 1)\n\n # On systems where long defaults to 32-bit, the statuses will be\n # offset by one, so we check for this here.\n import numpy._core.numeric as nx\n status_offset = int(nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize)\n\n # test int > 2**32\n assert_equal(converter.upgrade('17179869184'), 17179869184)\n assert_equal(converter._status, 1 + status_offset)\n\n # test float\n assert_allclose(converter.upgrade('0.'), 0.0)\n assert_equal(converter._status, 2 + status_offset)\n\n # test complex\n assert_equal(converter.upgrade('0j'), complex('0j'))\n assert_equal(converter._status, 3 + status_offset)\n\n # test str\n # note that the longdouble type has been skipped, so the\n # _status increases by 2. Everything should succeed with\n # unicode conversion (8).\n for s in ['a', b'a']:\n res = converter.upgrade(s)\n assert_(type(res) is str)\n assert_equal(res, 'a')\n assert_equal(converter._status, 8 + status_offset)\n\n def test_missing(self):\n "Tests the use of missing values."\n converter = StringConverter(missing_values=('missing',\n 'missed'))\n converter.upgrade('0')\n assert_equal(converter('0'), 0)\n assert_equal(converter(''), converter.default)\n assert_equal(converter('missing'), converter.default)\n assert_equal(converter('missed'), converter.default)\n try:\n converter('miss')\n except ValueError:\n pass\n\n def test_upgrademapper(self):\n "Tests updatemapper"\n dateparser = _bytes_to_date\n _original_mapper = StringConverter._mapper[:]\n try:\n StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1))\n convert = StringConverter(dateparser, date(2000, 1, 1))\n test = convert('2001-01-01')\n assert_equal(test, date(2001, 1, 1))\n test = convert('2009-01-01')\n assert_equal(test, date(2009, 1, 1))\n test = convert('')\n assert_equal(test, date(2000, 1, 1))\n finally:\n StringConverter._mapper = _original_mapper\n\n def test_string_to_object(self):\n "Make sure that string-to-object functions are properly recognized"\n old_mapper = StringConverter._mapper[:] # copy of list\n conv = StringConverter(_bytes_to_date)\n assert_equal(conv._mapper, old_mapper)\n assert_(hasattr(conv, 'default'))\n\n def test_keep_default(self):\n "Make sure we don't lose an explicit default"\n converter = StringConverter(None, missing_values='',\n default=-999)\n converter.upgrade('3.14159265')\n assert_equal(converter.default, -999)\n assert_equal(converter.type, np.dtype(float))\n #\n converter = StringConverter(\n None, missing_values='', default=0)\n converter.upgrade('3.14159265')\n assert_equal(converter.default, 0)\n assert_equal(converter.type, np.dtype(float))\n\n def test_keep_default_zero(self):\n "Check that we don't lose a default of 0"\n converter = StringConverter(int, default=0,\n missing_values="N/A")\n assert_equal(converter.default, 0)\n\n def test_keep_missing_values(self):\n "Check that we're not losing missing values"\n converter = StringConverter(int, default=0,\n missing_values="N/A")\n assert_equal(\n converter.missing_values, {'', 'N/A'})\n\n def test_int64_dtype(self):\n "Check that int64 integer types can be specified"\n converter = StringConverter(np.int64, default=0)\n val = "-9223372036854775807"\n assert_(converter(val) == -9223372036854775807)\n val = "9223372036854775807"\n assert_(converter(val) == 9223372036854775807)\n\n def test_uint64_dtype(self):\n "Check that uint64 integer types can be specified"\n converter = StringConverter(np.uint64, default=0)\n val = "9223372043271415339"\n assert_(converter(val) == 9223372043271415339)\n\n\nclass TestMiscFunctions:\n\n def test_has_nested_dtype(self):\n "Test has_nested_dtype"\n ndtype = np.dtype(float)\n assert_equal(has_nested_fields(ndtype), False)\n ndtype = np.dtype([('A', '|S3'), ('B', float)])\n assert_equal(has_nested_fields(ndtype), False)\n ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])])\n assert_equal(has_nested_fields(ndtype), True)\n\n def test_easy_dtype(self):\n "Test ndtype on dtypes"\n # Simple case\n ndtype = float\n assert_equal(easy_dtype(ndtype), np.dtype(float))\n # As string w/o names\n ndtype = "i4, f8"\n assert_equal(easy_dtype(ndtype),\n np.dtype([('f0', "i4"), ('f1', "f8")]))\n # As string w/o names but different default format\n assert_equal(easy_dtype(ndtype, defaultfmt="field_%03i"),\n np.dtype([('field_000', "i4"), ('field_001', "f8")]))\n # As string w/ names\n ndtype = "i4, f8"\n assert_equal(easy_dtype(ndtype, names="a, b"),\n np.dtype([('a', "i4"), ('b', "f8")]))\n # As string w/ names (too many)\n ndtype = "i4, f8"\n assert_equal(easy_dtype(ndtype, names="a, b, c"),\n np.dtype([('a', "i4"), ('b', "f8")]))\n # As string w/ names (not enough)\n ndtype = "i4, f8"\n assert_equal(easy_dtype(ndtype, names=", b"),\n np.dtype([('f0', "i4"), ('b', "f8")]))\n # ... (with different default format)\n assert_equal(easy_dtype(ndtype, names="a", defaultfmt="f%02i"),\n np.dtype([('a', "i4"), ('f00', "f8")]))\n # As list of tuples w/o names\n ndtype = [('A', int), ('B', float)]\n assert_equal(easy_dtype(ndtype), np.dtype([('A', int), ('B', float)]))\n # As list of tuples w/ names\n assert_equal(easy_dtype(ndtype, names="a,b"),\n np.dtype([('a', int), ('b', float)]))\n # As list of tuples w/ not enough names\n assert_equal(easy_dtype(ndtype, names="a"),\n np.dtype([('a', int), ('f0', float)]))\n # As list of tuples w/ too many names\n assert_equal(easy_dtype(ndtype, names="a,b,c"),\n np.dtype([('a', int), ('b', float)]))\n # As list of types w/o names\n ndtype = (int, float, float)\n assert_equal(easy_dtype(ndtype),\n np.dtype([('f0', int), ('f1', float), ('f2', float)]))\n # As list of types w names\n ndtype = (int, float, float)\n assert_equal(easy_dtype(ndtype, names="a, b, c"),\n np.dtype([('a', int), ('b', float), ('c', float)]))\n # As simple dtype w/ names\n ndtype = np.dtype(float)\n assert_equal(easy_dtype(ndtype, names="a, b, c"),\n np.dtype([(_, float) for _ in ('a', 'b', 'c')]))\n # As simple dtype w/o names (but multiple fields)\n ndtype = np.dtype(float)\n assert_equal(\n easy_dtype(ndtype, names=['', '', ''], defaultfmt="f%02i"),\n np.dtype([(_, float) for _ in ('f00', 'f01', 'f02')]))\n\n def test_flatten_dtype(self):\n "Testing flatten_dtype"\n # Standard dtype\n dt = np.dtype([("a", "f8"), ("b", "f8")])\n dt_flat = flatten_dtype(dt)\n assert_equal(dt_flat, [float, float])\n # Recursive dtype\n dt = np.dtype([("a", [("aa", '|S1'), ("ab", '|S2')]), ("b", int)])\n dt_flat = flatten_dtype(dt)\n assert_equal(dt_flat, [np.dtype('|S1'), np.dtype('|S2'), int])\n # dtype with shaped fields\n dt = np.dtype([("a", (float, 2)), ("b", (int, 3))])\n dt_flat = flatten_dtype(dt)\n assert_equal(dt_flat, [float, int])\n dt_flat = flatten_dtype(dt, True)\n assert_equal(dt_flat, [float] * 2 + [int] * 3)\n # dtype w/ titles\n dt = np.dtype([(("a", "A"), "f8"), (("b", "B"), "f8")])\n dt_flat = flatten_dtype(dt)\n assert_equal(dt_flat, [float, float])\n | .venv\Lib\site-packages\numpy\lib\tests\test__iotools.py | test__iotools.py | Python | 14,125 | 0.95 | 0.1 | 0.121019 | python-kit | 904 | 2025-01-04T11:37:40.936391 | Apache-2.0 | true | d69abaaa392d67915ba0430b5ac3bd05 |
"""Tests for the NumpyVersion class.\n\n"""\nfrom numpy.lib import NumpyVersion\nfrom numpy.testing import assert_, assert_raises\n\n\ndef test_main_versions():\n assert_(NumpyVersion('1.8.0') == '1.8.0')\n for ver in ['1.9.0', '2.0.0', '1.8.1', '10.0.1']:\n assert_(NumpyVersion('1.8.0') < ver)\n\n for ver in ['1.7.0', '1.7.1', '0.9.9']:\n assert_(NumpyVersion('1.8.0') > ver)\n\n\ndef test_version_1_point_10():\n # regression test for gh-2998.\n assert_(NumpyVersion('1.9.0') < '1.10.0')\n assert_(NumpyVersion('1.11.0') < '1.11.1')\n assert_(NumpyVersion('1.11.0') == '1.11.0')\n assert_(NumpyVersion('1.99.11') < '1.99.12')\n\n\ndef test_alpha_beta_rc():\n assert_(NumpyVersion('1.8.0rc1') == '1.8.0rc1')\n for ver in ['1.8.0', '1.8.0rc2']:\n assert_(NumpyVersion('1.8.0rc1') < ver)\n\n for ver in ['1.8.0a2', '1.8.0b3', '1.7.2rc4']:\n assert_(NumpyVersion('1.8.0rc1') > ver)\n\n assert_(NumpyVersion('1.8.0b1') > '1.8.0a2')\n\n\ndef test_dev_version():\n assert_(NumpyVersion('1.9.0.dev-Unknown') < '1.9.0')\n for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev-ffffffff']:\n assert_(NumpyVersion('1.9.0.dev-f16acvda') < ver)\n\n assert_(NumpyVersion('1.9.0.dev-f16acvda') == '1.9.0.dev-11111111')\n\n\ndef test_dev_a_b_rc_mixed():\n assert_(NumpyVersion('1.9.0a2.dev-f16acvda') == '1.9.0a2.dev-11111111')\n assert_(NumpyVersion('1.9.0a2.dev-6acvda54') < '1.9.0a2')\n\n\ndef test_dev0_version():\n assert_(NumpyVersion('1.9.0.dev0+Unknown') < '1.9.0')\n for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']:\n assert_(NumpyVersion('1.9.0.dev0+f16acvda') < ver)\n\n assert_(NumpyVersion('1.9.0.dev0+f16acvda') == '1.9.0.dev0+11111111')\n\n\ndef test_dev0_a_b_rc_mixed():\n assert_(NumpyVersion('1.9.0a2.dev0+f16acvda') == '1.9.0a2.dev0+11111111')\n assert_(NumpyVersion('1.9.0a2.dev0+6acvda54') < '1.9.0a2')\n\n\ndef test_raises():\n for ver in ['1.9', '1,9.0', '1.7.x']:\n assert_raises(ValueError, NumpyVersion, ver)\n | .venv\Lib\site-packages\numpy\lib\tests\test__version.py | test__version.py | Python | 2,063 | 0.95 | 0.28125 | 0.02381 | python-kit | 589 | 2024-01-30T04:41:13.943246 | Apache-2.0 | true | ecd132735dc295965034126258d76da8 |
NUMPY | .venv\Lib\site-packages\numpy\lib\tests\data\py2-np0-objarr.npy | py2-np0-objarr.npy | Other | 258 | 0.7 | 0 | 0 | vue-tools | 130 | 2024-03-01T23:26:50.941110 | GPL-3.0 | true | fd9274c0b0f7f9eeb933df64a8490889 |
NUMPY | .venv\Lib\site-packages\numpy\lib\tests\data\py2-objarr.npy | py2-objarr.npy | Other | 258 | 0.7 | 0 | 0 | vue-tools | 663 | 2024-12-19T21:52:06.710439 | MIT | true | eb31460bb4d6d38bd00c016e664e88f4 |
PK | .venv\Lib\site-packages\numpy\lib\tests\data\py2-objarr.npz | py2-objarr.npz | Other | 366 | 0.7 | 0 | 0 | vue-tools | 773 | 2024-08-29T09:25:38.313643 | MIT | true | fa746445f9cfd2450137cd66f1b5d4f6 |
NUMPY | .venv\Lib\site-packages\numpy\lib\tests\data\py3-objarr.npy | py3-objarr.npy | Other | 325 | 0.7 | 0 | 0 | awesome-app | 563 | 2025-07-03T21:00:36.282755 | GPL-3.0 | true | 97bd16c6401130e9a43208f2cf43493d |
PK | .venv\Lib\site-packages\numpy\lib\tests\data\py3-objarr.npz | py3-objarr.npz | Other | 453 | 0.7 | 0 | 0 | vue-tools | 790 | 2023-09-27T14:41:50.243608 | GPL-3.0 | true | 6cb135f375e1216284bc4bc12d6155d1 |
NUMPY | .venv\Lib\site-packages\numpy\lib\tests\data\python3.npy | python3.npy | Other | 96 | 0.5 | 0 | 0 | awesome-app | 738 | 2024-07-11T18:02:11.816424 | BSD-3-Clause | true | f6194a36c42fda0d01a86f3b766043dd |
NUMPY | .venv\Lib\site-packages\numpy\lib\tests\data\win64python2.npy | win64python2.npy | Other | 96 | 0.5 | 0 | 0 | react-lib | 375 | 2025-01-21T04:10:47.091976 | Apache-2.0 | true | 14a911bf052baffdc7b88590a86f5760 |
\n\n | .venv\Lib\site-packages\numpy\lib\tests\__pycache__\test_arraypad.cpython-313.pyc | test_arraypad.cpython-313.pyc | Other | 72,032 | 0.75 | 0.022117 | 0.004934 | node-utils | 544 | 2025-02-25T10:40:58.724698 | BSD-3-Clause | true | 1fdc6163ec68dbdad100f711fb727a68 |
\n\n | .venv\Lib\site-packages\numpy\lib\tests\__pycache__\test_arraysetops.cpython-313.pyc | test_arraysetops.cpython-313.pyc | Other | 57,902 | 0.75 | 0.020316 | 0.013857 | python-kit | 910 | 2024-10-19T06:12:25.550859 | Apache-2.0 | true | a3ebe256687a2bdc5c552ae80dcc5fce |
\n\n | .venv\Lib\site-packages\numpy\lib\tests\__pycache__\test_arrayterator.cpython-313.pyc | test_arrayterator.cpython-313.pyc | Other | 2,654 | 0.8 | 0 | 0.045455 | react-lib | 271 | 2025-03-18T00:19:22.276078 | BSD-3-Clause | true | 5a9914508edd76f3ba5dca9e43f0c0b8 |
\n\n | .venv\Lib\site-packages\numpy\lib\tests\__pycache__\test_array_utils.cpython-313.pyc | test_array_utils.cpython-313.pyc | Other | 2,584 | 0.8 | 0 | 0 | vue-tools | 304 | 2024-10-15T04:52:03.108227 | Apache-2.0 | true | 3bb36ca38bc8ec1f80628d077b801253 |
\n\n | .venv\Lib\site-packages\numpy\lib\tests\__pycache__\test_format.cpython-313.pyc | test_format.cpython-313.pyc | Other | 54,947 | 0.75 | 0.007375 | 0.006144 | vue-tools | 284 | 2024-01-30T23:01:12.500771 | MIT | true | d192f24e821fcbff27086592efb11e45 |
\n\n | .venv\Lib\site-packages\numpy\lib\tests\__pycache__\test_histograms.cpython-313.pyc | test_histograms.cpython-313.pyc | Other | 50,208 | 0.95 | 0.019851 | 0.007692 | awesome-app | 585 | 2023-12-15T03:03:16.012503 | MIT | true | 05ca841f2daffdef9727f4aeb5f7c1ea |
\n\n | .venv\Lib\site-packages\numpy\lib\tests\__pycache__\test_index_tricks.cpython-313.pyc | test_index_tricks.cpython-313.pyc | Other | 35,787 | 0.8 | 0 | 0 | awesome-app | 689 | 2024-09-28T12:04:31.140733 | MIT | true | dba1cc37f47b285f521bb9114ecded39 |
\n\n | .venv\Lib\site-packages\numpy\lib\tests\__pycache__\test_loadtxt.cpython-313.pyc | test_loadtxt.cpython-313.pyc | Other | 66,363 | 0.75 | 0.009324 | 0.068 | python-kit | 277 | 2025-05-25T16:11:18.570542 | BSD-3-Clause | true | 9145d95fa65392deab650930f557e29a |
\n\n | .venv\Lib\site-packages\numpy\lib\tests\__pycache__\test_mixins.cpython-313.pyc | test_mixins.cpython-313.pyc | Other | 12,783 | 0.8 | 0.008929 | 0 | vue-tools | 795 | 2023-09-19T05:04:20.254136 | MIT | true | 271ee5a10dcddb356fc59f831e648f40 |
\n\n | .venv\Lib\site-packages\numpy\lib\tests\__pycache__\test_nanfunctions.cpython-313.pyc | test_nanfunctions.cpython-313.pyc | Other | 92,106 | 0.75 | 0.004552 | 0 | react-lib | 154 | 2025-03-10T01:35:00.873628 | MIT | true | d2ea25f616b31bf0c413ec28c278f9f7 |
\n\n | .venv\Lib\site-packages\numpy\lib\tests\__pycache__\test_packbits.cpython-313.pyc | test_packbits.cpython-313.pyc | Other | 22,929 | 0.8 | 0 | 0.015748 | awesome-app | 582 | 2024-01-23T19:22:36.591081 | BSD-3-Clause | true | 5b7d01a3fe97ae48ac8f36c4d224c1e8 |
\n\n | .venv\Lib\site-packages\numpy\lib\tests\__pycache__\test_polynomial.cpython-313.pyc | test_polynomial.cpython-313.pyc | Other | 22,323 | 0.8 | 0.014599 | 0 | node-utils | 707 | 2023-09-18T16:14:51.896680 | GPL-3.0 | true | be2bc9666f057110d2e5735f4ab574f2 |
\n\n | .venv\Lib\site-packages\numpy\lib\tests\__pycache__\test_recfunctions.cpython-313.pyc | test_recfunctions.cpython-313.pyc | Other | 56,943 | 0.75 | 0 | 0.007229 | node-utils | 499 | 2023-10-25T20:14:26.981604 | MIT | true | ce701e4739caf3981df05aa2aa13ea18 |
\n\n | .venv\Lib\site-packages\numpy\lib\tests\__pycache__\test_regression.cpython-313.pyc | test_regression.cpython-313.pyc | Other | 15,310 | 0.95 | 0.02 | 0 | react-lib | 10 | 2024-05-31T07:52:15.312850 | Apache-2.0 | true | 31934ed122c52f9b353145d480b7d57b |
\n\n | .venv\Lib\site-packages\numpy\lib\tests\__pycache__\test_shape_base.cpython-313.pyc | test_shape_base.cpython-313.pyc | Other | 56,967 | 0.75 | 0.015748 | 0.004115 | vue-tools | 22 | 2023-12-16T14:05:58.528763 | BSD-3-Clause | true | 32b03e89e417660bbe2329a5a8feea02 |
\n\n | .venv\Lib\site-packages\numpy\lib\tests\__pycache__\test_stride_tricks.cpython-313.pyc | test_stride_tricks.cpython-313.pyc | Other | 30,040 | 0.8 | 0.00295 | 0.009119 | vue-tools | 11 | 2024-01-12T03:26:13.505245 | MIT | true | 5a93df75577372d2e31f4fcf03ac110c |
\n\n | .venv\Lib\site-packages\numpy\lib\tests\__pycache__\test_twodim_base.cpython-313.pyc | test_twodim_base.cpython-313.pyc | Other | 29,203 | 0.95 | 0.009091 | 0.009804 | vue-tools | 592 | 2025-05-23T17:01:56.048298 | GPL-3.0 | true | 2ea8032c19233c9290a217a292e6bd92 |
\n\n | .venv\Lib\site-packages\numpy\lib\tests\__pycache__\test_type_check.cpython-313.pyc | test_type_check.cpython-313.pyc | Other | 32,478 | 0.8 | 0 | 0 | react-lib | 924 | 2024-12-15T22:09:33.745124 | MIT | true | ff0806a5ac8b3b8aecd99d6064e801a0 |
\n\n | .venv\Lib\site-packages\numpy\lib\tests\__pycache__\test_ufunclike.cpython-313.pyc | test_ufunclike.cpython-313.pyc | Other | 6,108 | 0.8 | 0 | 0 | react-lib | 338 | 2024-11-02T20:51:09.191950 | Apache-2.0 | true | e125abe737e6e7d05a2693e2be6cd450 |
\n\n | .venv\Lib\site-packages\numpy\lib\tests\__pycache__\test_utils.cpython-313.pyc | test_utils.cpython-313.pyc | Other | 4,264 | 0.8 | 0 | 0 | awesome-app | 730 | 2025-04-02T20:06:02.082504 | BSD-3-Clause | true | c2791d8faf4a2fa2db8fff2e8557b23f |
\n\n | .venv\Lib\site-packages\numpy\lib\tests\__pycache__\test__datasource.cpython-313.pyc | test__datasource.cpython-313.pyc | Other | 21,042 | 0.8 | 0.007092 | 0.022727 | python-kit | 791 | 2025-03-30T14:00:49.405040 | BSD-3-Clause | true | b6408707dfd3b329c28c235ec9456401 |
\n\n | .venv\Lib\site-packages\numpy\lib\tests\__pycache__\test__iotools.cpython-313.pyc | test__iotools.cpython-313.pyc | Other | 18,644 | 0.95 | 0.010417 | 0 | awesome-app | 886 | 2023-11-15T02:47:02.556036 | Apache-2.0 | true | 52d040a0418210918e8a373016ac043c |
\n\n | .venv\Lib\site-packages\numpy\lib\tests\__pycache__\test__version.cpython-313.pyc | test__version.cpython-313.pyc | Other | 3,475 | 0.8 | 0.1 | 0 | python-kit | 56 | 2025-04-01T08:16:27.949198 | GPL-3.0 | true | 43806c6a298a35324e697cbe36a56ee9 |
\n\n | .venv\Lib\site-packages\numpy\lib\tests\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 190 | 0.7 | 0 | 0 | node-utils | 315 | 2024-12-12T10:06:34.279165 | Apache-2.0 | true | e50813ce529e22db52a1593c2f745db8 |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\array_utils.cpython-313.pyc | array_utils.cpython-313.pyc | Other | 348 | 0.7 | 0 | 0 | awesome-app | 370 | 2023-11-03T04:12:15.253661 | GPL-3.0 | false | 48d7d78560af766d36a074e3fc09c6b7 |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\format.cpython-313.pyc | format.cpython-313.pyc | Other | 786 | 0.7 | 0 | 0 | react-lib | 312 | 2025-04-03T02:54:02.159545 | Apache-2.0 | false | 5e30e4c27577225871bf47ff3a97f6a1 |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\introspect.cpython-313.pyc | introspect.cpython-313.pyc | Other | 3,149 | 0.95 | 0.050633 | 0 | node-utils | 49 | 2024-07-16T10:08:43.571794 | GPL-3.0 | false | 9447aaa6ea0c19206cb786ddf4df3618 |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\mixins.cpython-313.pyc | mixins.cpython-313.pyc | Other | 8,758 | 0.95 | 0.154472 | 0.018018 | vue-tools | 562 | 2023-12-11T06:17:50.265442 | BSD-3-Clause | false | 310fd435362ed2d4b32dbde7133e80d5 |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\npyio.cpython-313.pyc | npyio.cpython-313.pyc | Other | 269 | 0.7 | 0 | 0 | awesome-app | 362 | 2024-09-03T08:11:04.357732 | MIT | false | 09edb7e09b5f7fc1361e10b1a8db4fef |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\recfunctions.cpython-313.pyc | recfunctions.cpython-313.pyc | Other | 62,042 | 0.75 | 0.033429 | 0.012712 | python-kit | 214 | 2023-10-03T08:10:51.157320 | MIT | false | 2dc914b53b2f069b6f0939f05b3f9c88 |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\scimath.cpython-313.pyc | scimath.cpython-313.pyc | Other | 409 | 0.7 | 0 | 0 | vue-tools | 470 | 2025-05-25T01:38:31.969061 | BSD-3-Clause | false | 3360c9d48fa9f261ddf257b483f94487 |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\stride_tricks.cpython-313.pyc | stride_tricks.cpython-313.pyc | Other | 297 | 0.7 | 0 | 0 | node-utils | 799 | 2023-11-20T06:21:24.451121 | GPL-3.0 | false | e59bd49157d98a223bfa228ee5fd94ce |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\user_array.cpython-313.pyc | user_array.cpython-313.pyc | Other | 260 | 0.7 | 0 | 0 | node-utils | 790 | 2025-05-23T19:36:24.693383 | GPL-3.0 | false | 05ff28199c9cbcf0df05b7a1f6c04125 |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\_arraypad_impl.cpython-313.pyc | _arraypad_impl.cpython-313.pyc | Other | 27,298 | 0.95 | 0.078261 | 0 | awesome-app | 129 | 2023-07-21T23:19:39.386666 | GPL-3.0 | false | ed0535d54c098c7250e1cadaeec9b72c |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\_arraysetops_impl.cpython-313.pyc | _arraysetops_impl.cpython-313.pyc | Other | 41,155 | 0.95 | 0.076243 | 0.029716 | python-kit | 712 | 2024-11-08T09:28:52.496309 | MIT | false | c583c5923142d01ca1a900f40e510efb |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\_arrayterator_impl.cpython-313.pyc | _arrayterator_impl.cpython-313.pyc | Other | 9,307 | 0.95 | 0.083333 | 0.013986 | vue-tools | 34 | 2023-11-04T13:02:31.894960 | MIT | false | 89d9eebc2f81d38dd21a60755f6e3648 |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\_array_utils_impl.cpython-313.pyc | _array_utils_impl.cpython-313.pyc | Other | 2,020 | 0.95 | 0 | 0 | python-kit | 559 | 2024-06-19T04:17:38.121221 | GPL-3.0 | false | fe637e109fcd3ceb7b57cda6bc04e4d8 |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\_datasource.cpython-313.pyc | _datasource.cpython-313.pyc | Other | 24,203 | 0.95 | 0.088937 | 0 | awesome-app | 129 | 2024-12-24T12:35:57.152001 | MIT | false | 54663b51f08b57fe5c54780d68439ff3 |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\_format_impl.cpython-313.pyc | _format_impl.cpython-313.pyc | Other | 35,160 | 0.95 | 0.061856 | 0 | vue-tools | 312 | 2024-03-29T01:31:29.279801 | Apache-2.0 | false | 309081e6e672b684884c8515b0a240de |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\_histograms_impl.cpython-313.pyc | _histograms_impl.cpython-313.pyc | Other | 39,102 | 0.95 | 0.086667 | 0.009375 | vue-tools | 487 | 2023-08-01T05:14:25.115379 | Apache-2.0 | false | e7d9a772d05c8fdecf5cf48cbd7b8c06 |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\_index_tricks_impl.cpython-313.pyc | _index_tricks_impl.cpython-313.pyc | Other | 34,498 | 0.95 | 0.033738 | 0.001621 | awesome-app | 361 | 2024-09-09T12:34:42.178704 | GPL-3.0 | false | e2249e5d1f76b3366a915bc625b7d572 |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\_iotools.cpython-313.pyc | _iotools.cpython-313.pyc | Other | 34,043 | 0.95 | 0.061538 | 0.010917 | python-kit | 477 | 2024-09-15T13:30:48.503145 | GPL-3.0 | false | b8a1760677c32ab51916d58dd7216cef |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\_nanfunctions_impl.cpython-313.pyc | _nanfunctions_impl.cpython-313.pyc | Other | 71,316 | 0.75 | 0.080026 | 0.006584 | awesome-app | 724 | 2023-08-07T10:02:27.715177 | MIT | false | 6b1783cc9ba885913d6aee5ba128a9a5 |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\_npyio_impl.cpython-313.pyc | _npyio_impl.cpython-313.pyc | Other | 94,166 | 0.75 | 0.052445 | 0.008684 | react-lib | 886 | 2024-07-18T10:01:57.050293 | GPL-3.0 | false | ac166fd98d246e4ec952a8187877e49a |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\_polynomial_impl.cpython-313.pyc | _polynomial_impl.cpython-313.pyc | Other | 51,262 | 0.95 | 0.032287 | 0.005501 | python-kit | 456 | 2023-11-25T22:15:45.473349 | MIT | false | a8af414739eae273950a383894ee5f92 |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\_scimath_impl.cpython-313.pyc | _scimath_impl.cpython-313.pyc | Other | 16,901 | 0.95 | 0.044723 | 0.002309 | vue-tools | 12 | 2023-09-24T08:01:38.027722 | Apache-2.0 | false | 997bf784338961e0e8e35db8195f6b7a |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\_shape_base_impl.cpython-313.pyc | _shape_base_impl.cpython-313.pyc | Other | 41,125 | 0.95 | 0.051777 | 0 | vue-tools | 573 | 2023-09-29T03:40:39.411567 | MIT | false | 79af3398f327c8b651555c77ae897237 |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\_stride_tricks_impl.cpython-313.pyc | _stride_tricks_impl.cpython-313.pyc | Other | 18,496 | 0.95 | 0.050343 | 0.005305 | vue-tools | 187 | 2024-02-04T17:07:05.272175 | BSD-3-Clause | false | 2c90e2b1254df7c84e370ab11346cbd3 |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\_twodim_base_impl.cpython-313.pyc | _twodim_base_impl.cpython-313.pyc | Other | 35,429 | 0.95 | 0.075433 | 0.006158 | react-lib | 512 | 2023-11-19T01:00:51.831513 | MIT | false | ba257a8795e0f072d58c54c0466b765f |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\_type_check_impl.cpython-313.pyc | _type_check_impl.cpython-313.pyc | Other | 20,796 | 0.95 | 0.071168 | 0 | node-utils | 575 | 2024-11-04T14:36:14.642124 | BSD-3-Clause | false | 05e3b84c809c1df5468a2380e56aad2f |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\_ufunclike_impl.cpython-313.pyc | _ufunclike_impl.cpython-313.pyc | Other | 6,750 | 0.95 | 0.082873 | 0 | python-kit | 907 | 2024-10-30T06:50:53.678617 | Apache-2.0 | false | 9c88bd5297dff22a561ca0d52d6308b9 |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\_user_array_impl.cpython-313.pyc | _user_array_impl.cpython-313.pyc | Other | 16,208 | 0.95 | 0.090909 | 0.013889 | python-kit | 445 | 2024-08-01T23:08:01.796303 | BSD-3-Clause | false | 4ce0aa205b3b6b60606951890b386020 |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\_utils_impl.cpython-313.pyc | _utils_impl.cpython-313.pyc | Other | 24,709 | 0.95 | 0.085417 | 0.01937 | awesome-app | 982 | 2024-11-27T15:16:47.710224 | GPL-3.0 | false | 683f7a4c62c6c101f66e263d6d99d8a9 |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\_version.cpython-313.pyc | _version.cpython-313.pyc | Other | 6,715 | 0.95 | 0.020619 | 0 | awesome-app | 808 | 2023-12-08T03:14:31.360595 | MIT | false | cc89f31ed59325dd6f2c9e47f9dd5cf6 |
\n\n | .venv\Lib\site-packages\numpy\lib\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 2,998 | 0.95 | 0.09434 | 0 | vue-tools | 546 | 2025-05-28T04:58:53.839038 | Apache-2.0 | false | 08449038f87e252477b9730c61a06ede |
!<arch>\n/ -1 0 186 `\n | .venv\Lib\site-packages\numpy\linalg\lapack_lite.cp313-win_amd64.lib | lapack_lite.cp313-win_amd64.lib | Other | 2,084 | 0.8 | 0 | 0 | vue-tools | 626 | 2024-04-01T00:01:08.165433 | GPL-3.0 | false | b9da89ade641ca61624742341b046a88 |
MZ | .venv\Lib\site-packages\numpy\linalg\lapack_lite.cp313-win_amd64.pyd | lapack_lite.cp313-win_amd64.pyd | Other | 18,432 | 0.95 | 0.018519 | 0.037736 | react-lib | 823 | 2024-10-10T00:00:13.221786 | BSD-3-Clause | false | 36165f3e64566cbb0b4cb56b20d93db0 |
from typing import Final, TypedDict, type_check_only\n\nimport numpy as np\nfrom numpy._typing import NDArray\n\nfrom ._linalg import fortran_int\n\n###\n\n@type_check_only\nclass _GELSD(TypedDict):\n m: int\n n: int\n nrhs: int\n lda: int\n ldb: int\n rank: int\n lwork: int\n info: int\n\n@type_check_only\nclass _DGELSD(_GELSD):\n dgelsd_: int\n rcond: float\n\n@type_check_only\nclass _ZGELSD(_GELSD):\n zgelsd_: int\n\n@type_check_only\nclass _GEQRF(TypedDict):\n m: int\n n: int\n lda: int\n lwork: int\n info: int\n\n@type_check_only\nclass _DGEQRF(_GEQRF):\n dgeqrf_: int\n\n@type_check_only\nclass _ZGEQRF(_GEQRF):\n zgeqrf_: int\n\n@type_check_only\nclass _DORGQR(TypedDict):\n dorgqr_: int\n info: int\n\n@type_check_only\nclass _ZUNGQR(TypedDict):\n zungqr_: int\n info: int\n\n###\n\n_ilp64: Final[bool] = ...\n\ndef dgelsd(\n m: int,\n n: int,\n nrhs: int,\n a: NDArray[np.float64],\n lda: int,\n b: NDArray[np.float64],\n ldb: int,\n s: NDArray[np.float64],\n rcond: float,\n rank: int,\n work: NDArray[np.float64],\n lwork: int,\n iwork: NDArray[fortran_int],\n info: int,\n) -> _DGELSD: ...\ndef zgelsd(\n m: int,\n n: int,\n nrhs: int,\n a: NDArray[np.complex128],\n lda: int,\n b: NDArray[np.complex128],\n ldb: int,\n s: NDArray[np.float64],\n rcond: float,\n rank: int,\n work: NDArray[np.complex128],\n lwork: int,\n rwork: NDArray[np.float64],\n iwork: NDArray[fortran_int],\n info: int,\n) -> _ZGELSD: ...\n\n#\ndef dgeqrf(\n m: int,\n n: int,\n a: NDArray[np.float64], # in/out, shape: (lda, n)\n lda: int,\n tau: NDArray[np.float64], # out, shape: (min(m, n),)\n work: NDArray[np.float64], # out, shape: (max(1, lwork),)\n lwork: int,\n info: int, # out\n) -> _DGEQRF: ...\ndef zgeqrf(\n m: int,\n n: int,\n a: NDArray[np.complex128], # in/out, shape: (lda, n)\n lda: int,\n tau: NDArray[np.complex128], # out, shape: (min(m, n),)\n work: NDArray[np.complex128], # out, shape: (max(1, lwork),)\n lwork: int,\n info: int, # out\n) -> _ZGEQRF: ...\n\n#\ndef dorgqr(\n m: int, # >=0\n n: int, # m >= n >= 0\n k: int, # n >= k >= 0\n a: NDArray[np.float64], # in/out, shape: (lda, n)\n lda: int, # >= max(1, m)\n tau: NDArray[np.float64], # in, shape: (k,)\n work: NDArray[np.float64], # out, shape: (max(1, lwork),)\n lwork: int,\n info: int, # out\n) -> _DORGQR: ...\ndef zungqr(\n m: int,\n n: int,\n k: int,\n a: NDArray[np.complex128],\n lda: int,\n tau: NDArray[np.complex128],\n work: NDArray[np.complex128],\n lwork: int,\n info: int,\n) -> _ZUNGQR: ...\n\n#\ndef xerbla(srname: object, info: int) -> None: ...\n | .venv\Lib\site-packages\numpy\linalg\lapack_lite.pyi | lapack_lite.pyi | Other | 2,813 | 0.95 | 0.106383 | 0.040323 | react-lib | 50 | 2024-10-29T15:49:04.422790 | BSD-3-Clause | false | 1376d270289a87b52b72614d84598693 |
def __getattr__(attr_name):\n import warnings\n\n from numpy.linalg import _linalg\n ret = getattr(_linalg, attr_name, None)\n if ret is None:\n raise AttributeError(\n f"module 'numpy.linalg.linalg' has no attribute {attr_name}")\n warnings.warn(\n "The numpy.linalg.linalg has been made private and renamed to "\n "numpy.linalg._linalg. All public functions exported by it are "\n f"available from numpy.linalg. Please use numpy.linalg.{attr_name} "\n "instead.",\n DeprecationWarning,\n stacklevel=3\n )\n return ret\n | .venv\Lib\site-packages\numpy\linalg\linalg.py | linalg.py | Python | 602 | 0.85 | 0.117647 | 0 | vue-tools | 777 | 2023-07-21T04:34:32.324396 | Apache-2.0 | false | 3f15c93034dc47ae220ef1fb3f1ae31e |
from ._linalg import (\n LinAlgError,\n cholesky,\n cond,\n cross,\n det,\n diagonal,\n eig,\n eigh,\n eigvals,\n eigvalsh,\n inv,\n lstsq,\n matmul,\n matrix_norm,\n matrix_power,\n matrix_rank,\n matrix_transpose,\n multi_dot,\n norm,\n outer,\n pinv,\n qr,\n slogdet,\n solve,\n svd,\n svdvals,\n tensordot,\n tensorinv,\n tensorsolve,\n trace,\n vecdot,\n vector_norm,\n)\n\n__all__ = [\n "LinAlgError",\n "cholesky",\n "cond",\n "cross",\n "det",\n "diagonal",\n "eig",\n "eigh",\n "eigvals",\n "eigvalsh",\n "inv",\n "lstsq",\n "matmul",\n "matrix_norm",\n "matrix_power",\n "matrix_rank",\n "matrix_transpose",\n "multi_dot",\n "norm",\n "outer",\n "pinv",\n "qr",\n "slogdet",\n "solve",\n "svd",\n "svdvals",\n "tensordot",\n "tensorinv",\n "tensorsolve",\n "trace",\n "vecdot",\n "vector_norm",\n]\n | .venv\Lib\site-packages\numpy\linalg\linalg.pyi | linalg.pyi | Other | 1,001 | 0.85 | 0 | 0 | vue-tools | 468 | 2024-10-09T15:00:21.144292 | Apache-2.0 | false | 93d6e1a210375d9f17393dba3a1d2926 |
from collections.abc import Iterable\nfrom typing import (\n Any,\n NamedTuple,\n Never,\n SupportsIndex,\n SupportsInt,\n TypeAlias,\n TypeVar,\n overload,\n)\nfrom typing import Literal as L\n\nimport numpy as np\nfrom numpy import (\n complex128,\n complexfloating,\n float64,\n # other\n floating,\n int32,\n object_,\n signedinteger,\n timedelta64,\n unsignedinteger,\n # re-exports\n vecdot,\n)\nfrom numpy._core.fromnumeric import matrix_transpose\nfrom numpy._core.numeric import tensordot\nfrom numpy._typing import (\n ArrayLike,\n DTypeLike,\n NDArray,\n _ArrayLike,\n _ArrayLikeBool_co,\n _ArrayLikeComplex_co,\n _ArrayLikeFloat_co,\n _ArrayLikeInt_co,\n _ArrayLikeObject_co,\n _ArrayLikeTD64_co,\n _ArrayLikeUInt_co,\n)\nfrom numpy.linalg import LinAlgError\n\n__all__ = [\n "matrix_power",\n "solve",\n "tensorsolve",\n "tensorinv",\n "inv",\n "cholesky",\n "eigvals",\n "eigvalsh",\n "pinv",\n "slogdet",\n "det",\n "svd",\n "svdvals",\n "eig",\n "eigh",\n "lstsq",\n "norm",\n "qr",\n "cond",\n "matrix_rank",\n "LinAlgError",\n "multi_dot",\n "trace",\n "diagonal",\n "cross",\n "outer",\n "tensordot",\n "matmul",\n "matrix_transpose",\n "matrix_norm",\n "vector_norm",\n "vecdot",\n]\n\n_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any])\n\n_ModeKind: TypeAlias = L["reduced", "complete", "r", "raw"]\n\n###\n\nfortran_int = np.intc\n\nclass EigResult(NamedTuple):\n eigenvalues: NDArray[Any]\n eigenvectors: NDArray[Any]\n\nclass EighResult(NamedTuple):\n eigenvalues: NDArray[Any]\n eigenvectors: NDArray[Any]\n\nclass QRResult(NamedTuple):\n Q: NDArray[Any]\n R: NDArray[Any]\n\nclass SlogdetResult(NamedTuple):\n # TODO: `sign` and `logabsdet` are scalars for input 2D arrays and\n # a `(x.ndim - 2)`` dimensionl arrays otherwise\n sign: Any\n logabsdet: Any\n\nclass SVDResult(NamedTuple):\n U: NDArray[Any]\n S: NDArray[Any]\n Vh: NDArray[Any]\n\n@overload\ndef tensorsolve(\n a: _ArrayLikeInt_co,\n b: _ArrayLikeInt_co,\n axes: Iterable[int] | None = ...,\n) -> NDArray[float64]: ...\n@overload\ndef tensorsolve(\n a: _ArrayLikeFloat_co,\n b: _ArrayLikeFloat_co,\n axes: Iterable[int] | None = ...,\n) -> NDArray[floating]: ...\n@overload\ndef tensorsolve(\n a: _ArrayLikeComplex_co,\n b: _ArrayLikeComplex_co,\n axes: Iterable[int] | None = ...,\n) -> NDArray[complexfloating]: ...\n\n@overload\ndef solve(\n a: _ArrayLikeInt_co,\n b: _ArrayLikeInt_co,\n) -> NDArray[float64]: ...\n@overload\ndef solve(\n a: _ArrayLikeFloat_co,\n b: _ArrayLikeFloat_co,\n) -> NDArray[floating]: ...\n@overload\ndef solve(\n a: _ArrayLikeComplex_co,\n b: _ArrayLikeComplex_co,\n) -> NDArray[complexfloating]: ...\n\n@overload\ndef tensorinv(\n a: _ArrayLikeInt_co,\n ind: int = ...,\n) -> NDArray[float64]: ...\n@overload\ndef tensorinv(\n a: _ArrayLikeFloat_co,\n ind: int = ...,\n) -> NDArray[floating]: ...\n@overload\ndef tensorinv(\n a: _ArrayLikeComplex_co,\n ind: int = ...,\n) -> NDArray[complexfloating]: ...\n\n@overload\ndef inv(a: _ArrayLikeInt_co) -> NDArray[float64]: ...\n@overload\ndef inv(a: _ArrayLikeFloat_co) -> NDArray[floating]: ...\n@overload\ndef inv(a: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...\n\n# TODO: The supported input and output dtypes are dependent on the value of `n`.\n# For example: `n < 0` always casts integer types to float64\ndef matrix_power(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n n: SupportsIndex,\n) -> NDArray[Any]: ...\n\n@overload\ndef cholesky(a: _ArrayLikeInt_co, /, *, upper: bool = False) -> NDArray[float64]: ...\n@overload\ndef cholesky(a: _ArrayLikeFloat_co, /, *, upper: bool = False) -> NDArray[floating]: ...\n@overload\ndef cholesky(a: _ArrayLikeComplex_co, /, *, upper: bool = False) -> NDArray[complexfloating]: ...\n\n@overload\ndef outer(x1: _ArrayLike[Never], x2: _ArrayLike[Never]) -> NDArray[Any]: ...\n@overload\ndef outer(x1: _ArrayLikeBool_co, x2: _ArrayLikeBool_co) -> NDArray[np.bool]: ...\n@overload\ndef outer(x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co) -> NDArray[unsignedinteger]: ...\n@overload\ndef outer(x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co) -> NDArray[signedinteger]: ...\n@overload\ndef outer(x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co) -> NDArray[floating]: ...\n@overload\ndef outer(\n x1: _ArrayLikeComplex_co,\n x2: _ArrayLikeComplex_co,\n) -> NDArray[complexfloating]: ...\n@overload\ndef outer(\n x1: _ArrayLikeTD64_co,\n x2: _ArrayLikeTD64_co,\n out: None = ...,\n) -> NDArray[timedelta64]: ...\n@overload\ndef outer(x1: _ArrayLikeObject_co, x2: _ArrayLikeObject_co) -> NDArray[object_]: ...\n@overload\ndef outer(\n x1: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,\n x2: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,\n) -> _ArrayT: ...\n\n@overload\ndef qr(a: _ArrayLikeInt_co, mode: _ModeKind = ...) -> QRResult: ...\n@overload\ndef qr(a: _ArrayLikeFloat_co, mode: _ModeKind = ...) -> QRResult: ...\n@overload\ndef qr(a: _ArrayLikeComplex_co, mode: _ModeKind = ...) -> QRResult: ...\n\n@overload\ndef eigvals(a: _ArrayLikeInt_co) -> NDArray[float64] | NDArray[complex128]: ...\n@overload\ndef eigvals(a: _ArrayLikeFloat_co) -> NDArray[floating] | NDArray[complexfloating]: ...\n@overload\ndef eigvals(a: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...\n\n@overload\ndef eigvalsh(a: _ArrayLikeInt_co, UPLO: L["L", "U", "l", "u"] = ...) -> NDArray[float64]: ...\n@overload\ndef eigvalsh(a: _ArrayLikeComplex_co, UPLO: L["L", "U", "l", "u"] = ...) -> NDArray[floating]: ...\n\n@overload\ndef eig(a: _ArrayLikeInt_co) -> EigResult: ...\n@overload\ndef eig(a: _ArrayLikeFloat_co) -> EigResult: ...\n@overload\ndef eig(a: _ArrayLikeComplex_co) -> EigResult: ...\n\n@overload\ndef eigh(\n a: _ArrayLikeInt_co,\n UPLO: L["L", "U", "l", "u"] = ...,\n) -> EighResult: ...\n@overload\ndef eigh(\n a: _ArrayLikeFloat_co,\n UPLO: L["L", "U", "l", "u"] = ...,\n) -> EighResult: ...\n@overload\ndef eigh(\n a: _ArrayLikeComplex_co,\n UPLO: L["L", "U", "l", "u"] = ...,\n) -> EighResult: ...\n\n@overload\ndef svd(\n a: _ArrayLikeInt_co,\n full_matrices: bool = ...,\n compute_uv: L[True] = ...,\n hermitian: bool = ...,\n) -> SVDResult: ...\n@overload\ndef svd(\n a: _ArrayLikeFloat_co,\n full_matrices: bool = ...,\n compute_uv: L[True] = ...,\n hermitian: bool = ...,\n) -> SVDResult: ...\n@overload\ndef svd(\n a: _ArrayLikeComplex_co,\n full_matrices: bool = ...,\n compute_uv: L[True] = ...,\n hermitian: bool = ...,\n) -> SVDResult: ...\n@overload\ndef svd(\n a: _ArrayLikeInt_co,\n full_matrices: bool = ...,\n compute_uv: L[False] = ...,\n hermitian: bool = ...,\n) -> NDArray[float64]: ...\n@overload\ndef svd(\n a: _ArrayLikeComplex_co,\n full_matrices: bool = ...,\n compute_uv: L[False] = ...,\n hermitian: bool = ...,\n) -> NDArray[floating]: ...\n\ndef svdvals(\n x: _ArrayLikeInt_co | _ArrayLikeFloat_co | _ArrayLikeComplex_co\n) -> NDArray[floating]: ...\n\n# TODO: Returns a scalar for 2D arrays and\n# a `(x.ndim - 2)`` dimensionl array otherwise\ndef cond(x: _ArrayLikeComplex_co, p: float | L["fro", "nuc"] | None = ...) -> Any: ...\n\n# TODO: Returns `int` for <2D arrays and `intp` otherwise\ndef matrix_rank(\n A: _ArrayLikeComplex_co,\n tol: _ArrayLikeFloat_co | None = ...,\n hermitian: bool = ...,\n *,\n rtol: _ArrayLikeFloat_co | None = ...,\n) -> Any: ...\n\n@overload\ndef pinv(\n a: _ArrayLikeInt_co,\n rcond: _ArrayLikeFloat_co = ...,\n hermitian: bool = ...,\n) -> NDArray[float64]: ...\n@overload\ndef pinv(\n a: _ArrayLikeFloat_co,\n rcond: _ArrayLikeFloat_co = ...,\n hermitian: bool = ...,\n) -> NDArray[floating]: ...\n@overload\ndef pinv(\n a: _ArrayLikeComplex_co,\n rcond: _ArrayLikeFloat_co = ...,\n hermitian: bool = ...,\n) -> NDArray[complexfloating]: ...\n\n# TODO: Returns a 2-tuple of scalars for 2D arrays and\n# a 2-tuple of `(a.ndim - 2)`` dimensionl arrays otherwise\ndef slogdet(a: _ArrayLikeComplex_co) -> SlogdetResult: ...\n\n# TODO: Returns a 2-tuple of scalars for 2D arrays and\n# a 2-tuple of `(a.ndim - 2)`` dimensionl arrays otherwise\ndef det(a: _ArrayLikeComplex_co) -> Any: ...\n\n@overload\ndef lstsq(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, rcond: float | None = ...) -> tuple[\n NDArray[float64],\n NDArray[float64],\n int32,\n NDArray[float64],\n]: ...\n@overload\ndef lstsq(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, rcond: float | None = ...) -> tuple[\n NDArray[floating],\n NDArray[floating],\n int32,\n NDArray[floating],\n]: ...\n@overload\ndef lstsq(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, rcond: float | None = ...) -> tuple[\n NDArray[complexfloating],\n NDArray[floating],\n int32,\n NDArray[floating],\n]: ...\n\n@overload\ndef norm(\n x: ArrayLike,\n ord: float | L["fro", "nuc"] | None = ...,\n axis: None = ...,\n keepdims: bool = ...,\n) -> floating: ...\n@overload\ndef norm(\n x: ArrayLike,\n ord: float | L["fro", "nuc"] | None = ...,\n axis: SupportsInt | SupportsIndex | tuple[int, ...] = ...,\n keepdims: bool = ...,\n) -> Any: ...\n\n@overload\ndef matrix_norm(\n x: ArrayLike,\n /,\n *,\n ord: float | L["fro", "nuc"] | None = ...,\n keepdims: bool = ...,\n) -> floating: ...\n@overload\ndef matrix_norm(\n x: ArrayLike,\n /,\n *,\n ord: float | L["fro", "nuc"] | None = ...,\n keepdims: bool = ...,\n) -> Any: ...\n\n@overload\ndef vector_norm(\n x: ArrayLike,\n /,\n *,\n axis: None = ...,\n ord: float | None = ...,\n keepdims: bool = ...,\n) -> floating: ...\n@overload\ndef vector_norm(\n x: ArrayLike,\n /,\n *,\n axis: SupportsInt | SupportsIndex | tuple[int, ...] = ...,\n ord: float | None = ...,\n keepdims: bool = ...,\n) -> Any: ...\n\n# TODO: Returns a scalar or array\ndef multi_dot(\n arrays: Iterable[_ArrayLikeComplex_co | _ArrayLikeObject_co | _ArrayLikeTD64_co],\n *,\n out: NDArray[Any] | None = ...,\n) -> Any: ...\n\ndef diagonal(\n x: ArrayLike, # >= 2D array\n /,\n *,\n offset: SupportsIndex = ...,\n) -> NDArray[Any]: ...\n\ndef trace(\n x: ArrayLike, # >= 2D array\n /,\n *,\n offset: SupportsIndex = ...,\n dtype: DTypeLike = ...,\n) -> Any: ...\n\n@overload\ndef cross(\n x1: _ArrayLikeUInt_co,\n x2: _ArrayLikeUInt_co,\n /,\n *,\n axis: int = ...,\n) -> NDArray[unsignedinteger]: ...\n@overload\ndef cross(\n x1: _ArrayLikeInt_co,\n x2: _ArrayLikeInt_co,\n /,\n *,\n axis: int = ...,\n) -> NDArray[signedinteger]: ...\n@overload\ndef cross(\n x1: _ArrayLikeFloat_co,\n x2: _ArrayLikeFloat_co,\n /,\n *,\n axis: int = ...,\n) -> NDArray[floating]: ...\n@overload\ndef cross(\n x1: _ArrayLikeComplex_co,\n x2: _ArrayLikeComplex_co,\n /,\n *,\n axis: int = ...,\n) -> NDArray[complexfloating]: ...\n\n@overload\ndef matmul(\n x1: _ArrayLikeInt_co,\n x2: _ArrayLikeInt_co,\n) -> NDArray[signedinteger]: ...\n@overload\ndef matmul(\n x1: _ArrayLikeUInt_co,\n x2: _ArrayLikeUInt_co,\n) -> NDArray[unsignedinteger]: ...\n@overload\ndef matmul(\n x1: _ArrayLikeFloat_co,\n x2: _ArrayLikeFloat_co,\n) -> NDArray[floating]: ...\n@overload\ndef matmul(\n x1: _ArrayLikeComplex_co,\n x2: _ArrayLikeComplex_co,\n) -> NDArray[complexfloating]: ...\n | .venv\Lib\site-packages\numpy\linalg\_linalg.pyi | _linalg.pyi | Other | 11,623 | 0.95 | 0.170124 | 0.060948 | node-utils | 583 | 2024-03-20T20:44:09.855774 | Apache-2.0 | false | 0885a254f4940431712124c18b404800 |
!<arch>\n/ -1 0 194 `\n | .venv\Lib\site-packages\numpy\linalg\_umath_linalg.cp313-win_amd64.lib | _umath_linalg.cp313-win_amd64.lib | Other | 2,120 | 0.8 | 0 | 0 | react-lib | 4 | 2025-02-06T18:06:04.039460 | Apache-2.0 | false | be1df0919eeaaf7439188c943f6e9ec6 |
MZ | .venv\Lib\site-packages\numpy\linalg\_umath_linalg.cp313-win_amd64.pyd | _umath_linalg.cp313-win_amd64.pyd | Other | 111,616 | 0.75 | 0.015823 | 0 | node-utils | 292 | 2024-08-21T13:04:08.312979 | Apache-2.0 | false | dad8c35150bd1996d3e0f2fe484f0f1d |
from typing import Final\nfrom typing import Literal as L\n\nimport numpy as np\nfrom numpy._typing._ufunc import _GUFunc_Nin2_Nout1\n\n__version__: Final[str] = ...\n_ilp64: Final[bool] = ...\n\n###\n# 1 -> 1\n\n# (m,m) -> ()\ndet: Final[np.ufunc] = ...\n# (m,m) -> (m)\ncholesky_lo: Final[np.ufunc] = ...\ncholesky_up: Final[np.ufunc] = ...\neigvals: Final[np.ufunc] = ...\neigvalsh_lo: Final[np.ufunc] = ...\neigvalsh_up: Final[np.ufunc] = ...\n# (m,m) -> (m,m)\ninv: Final[np.ufunc] = ...\n# (m,n) -> (p)\nqr_r_raw: Final[np.ufunc] = ...\nsvd: Final[np.ufunc] = ...\n\n###\n# 1 -> 2\n\n# (m,m) -> (), ()\nslogdet: Final[np.ufunc] = ...\n# (m,m) -> (m), (m,m)\neig: Final[np.ufunc] = ...\neigh_lo: Final[np.ufunc] = ...\neigh_up: Final[np.ufunc] = ...\n\n###\n# 2 -> 1\n\n# (m,n), (n) -> (m,m)\nqr_complete: Final[_GUFunc_Nin2_Nout1[L["qr_complete"], L[2], None, L["(m,n),(n)->(m,m)"]]] = ...\n# (m,n), (k) -> (m,k)\nqr_reduced: Final[_GUFunc_Nin2_Nout1[L["qr_reduced"], L[2], None, L["(m,n),(k)->(m,k)"]]] = ...\n# (m,m), (m,n) -> (m,n)\nsolve: Final[_GUFunc_Nin2_Nout1[L["solve"], L[4], None, L["(m,m),(m,n)->(m,n)"]]] = ...\n# (m,m), (m) -> (m)\nsolve1: Final[_GUFunc_Nin2_Nout1[L["solve1"], L[4], None, L["(m,m),(m)->(m)"]]] = ...\n\n###\n# 1 -> 3\n\n# (m,n) -> (m,m), (p), (n,n)\nsvd_f: Final[np.ufunc] = ...\n# (m,n) -> (m,p), (p), (p,n)\nsvd_s: Final[np.ufunc] = ...\n\n###\n# 3 -> 4\n\n# (m,n), (m,k), () -> (n,k), (k), (), (p)\nlstsq: Final[np.ufunc] = ...\n | .venv\Lib\site-packages\numpy\linalg\_umath_linalg.pyi | _umath_linalg.pyi | Other | 1,470 | 0.95 | 0 | 0.469388 | vue-tools | 851 | 2024-12-01T07:39:15.199556 | MIT | false | 2913a60e751ab014be7186e4b18a6789 |
"""\n``numpy.linalg``\n================\n\nThe NumPy linear algebra functions rely on BLAS and LAPACK to provide efficient\nlow level implementations of standard linear algebra algorithms. Those\nlibraries may be provided by NumPy itself using C versions of a subset of their\nreference implementations but, when possible, highly optimized libraries that\ntake advantage of specialized processor functionality are preferred. Examples\nof such libraries are OpenBLAS, MKL (TM), and ATLAS. Because those libraries\nare multithreaded and processor dependent, environmental variables and external\npackages such as threadpoolctl may be needed to control the number of threads\nor specify the processor architecture.\n\n- OpenBLAS: https://www.openblas.net/\n- threadpoolctl: https://github.com/joblib/threadpoolctl\n\nPlease note that the most-used linear algebra functions in NumPy are present in\nthe main ``numpy`` namespace rather than in ``numpy.linalg``. There are:\n``dot``, ``vdot``, ``inner``, ``outer``, ``matmul``, ``tensordot``, ``einsum``,\n``einsum_path`` and ``kron``.\n\nFunctions present in numpy.linalg are listed below.\n\n\nMatrix and vector products\n--------------------------\n\n cross\n multi_dot\n matrix_power\n tensordot\n matmul\n\nDecompositions\n--------------\n\n cholesky\n outer\n qr\n svd\n svdvals\n\nMatrix eigenvalues\n------------------\n\n eig\n eigh\n eigvals\n eigvalsh\n\nNorms and other numbers\n-----------------------\n\n norm\n matrix_norm\n vector_norm\n cond\n det\n matrix_rank\n slogdet\n trace (Array API compatible)\n\nSolving equations and inverting matrices\n----------------------------------------\n\n solve\n tensorsolve\n lstsq\n inv\n pinv\n tensorinv\n\nOther matrix operations\n-----------------------\n\n diagonal (Array API compatible)\n matrix_transpose (Array API compatible)\n\nExceptions\n----------\n\n LinAlgError\n\n"""\n# To get sub-modules\nfrom . import (\n _linalg,\n linalg, # deprecated in NumPy 2.0\n)\nfrom ._linalg import *\n\n__all__ = _linalg.__all__.copy() # noqa: PLE0605\n\nfrom numpy._pytesttester import PytestTester\n\ntest = PytestTester(__name__)\ndel PytestTester\n | .venv\Lib\site-packages\numpy\linalg\__init__.py | __init__.py | Python | 2,222 | 0.95 | 0 | 0.013333 | react-lib | 793 | 2024-05-26T03:26:14.248212 | GPL-3.0 | false | b0cd0cab2e60051e5736363a0bb14053 |
from . import _linalg as _linalg\nfrom . import _umath_linalg as _umath_linalg\nfrom . import linalg as linalg\nfrom ._linalg import (\n cholesky,\n cond,\n cross,\n det,\n diagonal,\n eig,\n eigh,\n eigvals,\n eigvalsh,\n inv,\n lstsq,\n matmul,\n matrix_norm,\n matrix_power,\n matrix_rank,\n matrix_transpose,\n multi_dot,\n norm,\n outer,\n pinv,\n qr,\n slogdet,\n solve,\n svd,\n svdvals,\n tensordot,\n tensorinv,\n tensorsolve,\n trace,\n vecdot,\n vector_norm,\n)\n\n__all__ = [\n "LinAlgError",\n "cholesky",\n "cond",\n "cross",\n "det",\n "diagonal",\n "eig",\n "eigh",\n "eigvals",\n "eigvalsh",\n "inv",\n "lstsq",\n "matmul",\n "matrix_norm",\n "matrix_power",\n "matrix_rank",\n "matrix_transpose",\n "multi_dot",\n "norm",\n "outer",\n "pinv",\n "qr",\n "slogdet",\n "solve",\n "svd",\n "svdvals",\n "tensordot",\n "tensorinv",\n "tensorsolve",\n "trace",\n "vecdot",\n "vector_norm",\n]\n\nclass LinAlgError(ValueError): ...\n | .venv\Lib\site-packages\numpy\linalg\__init__.pyi | __init__.pyi | Other | 1,133 | 0.85 | 0.013699 | 0 | react-lib | 473 | 2025-04-17T22:15:24.030635 | BSD-3-Clause | false | 247396d354ef444f2985136fd470011b |
"""Test deprecation and future warnings.\n\n"""\nimport numpy as np\nfrom numpy.testing import assert_warns\n\n\ndef test_qr_mode_full_future_warning():\n """Check mode='full' FutureWarning.\n\n In numpy 1.8 the mode options 'full' and 'economic' in linalg.qr were\n deprecated. The release date will probably be sometime in the summer\n of 2013.\n\n """\n a = np.eye(2)\n assert_warns(DeprecationWarning, np.linalg.qr, a, mode='full')\n assert_warns(DeprecationWarning, np.linalg.qr, a, mode='f')\n assert_warns(DeprecationWarning, np.linalg.qr, a, mode='economic')\n assert_warns(DeprecationWarning, np.linalg.qr, a, mode='e')\n | .venv\Lib\site-packages\numpy\linalg\tests\test_deprecations.py | test_deprecations.py | Python | 660 | 0.85 | 0.05 | 0 | vue-tools | 212 | 2024-05-30T14:05:29.961288 | MIT | true | 2a08aa64d05a819eaad56cbd033ea9d4 |
""" Test functions for linalg module\n\n"""\nimport itertools\nimport os\nimport subprocess\nimport sys\nimport textwrap\nimport threading\nimport traceback\n\nimport pytest\n\nimport numpy as np\nfrom numpy import (\n array,\n asarray,\n atleast_2d,\n cdouble,\n csingle,\n dot,\n double,\n identity,\n inf,\n linalg,\n matmul,\n multiply,\n single,\n)\nfrom numpy._core import swapaxes\nfrom numpy.exceptions import AxisError\nfrom numpy.linalg import LinAlgError, matrix_power, matrix_rank, multi_dot, norm\nfrom numpy.linalg._linalg import _multi_dot_matrix_chain_order\nfrom numpy.testing import (\n HAS_LAPACK64,\n IS_WASM,\n NOGIL_BUILD,\n assert_,\n assert_allclose,\n assert_almost_equal,\n assert_array_equal,\n assert_equal,\n assert_raises,\n assert_raises_regex,\n suppress_warnings,\n)\n\ntry:\n import numpy.linalg.lapack_lite\nexcept ImportError:\n # May be broken when numpy was built without BLAS/LAPACK present\n # If so, ensure we don't break the whole test suite - the `lapack_lite`\n # submodule should be removed, it's only used in two tests in this file.\n pass\n\n\ndef consistent_subclass(out, in_):\n # For ndarray subclass input, our output should have the same subclass\n # (non-ndarray input gets converted to ndarray).\n return type(out) is (type(in_) if isinstance(in_, np.ndarray)\n else np.ndarray)\n\n\nold_assert_almost_equal = assert_almost_equal\n\n\ndef assert_almost_equal(a, b, single_decimal=6, double_decimal=12, **kw):\n if asarray(a).dtype.type in (single, csingle):\n decimal = single_decimal\n else:\n decimal = double_decimal\n old_assert_almost_equal(a, b, decimal=decimal, **kw)\n\n\ndef get_real_dtype(dtype):\n return {single: single, double: double,\n csingle: single, cdouble: double}[dtype]\n\n\ndef get_complex_dtype(dtype):\n return {single: csingle, double: cdouble,\n csingle: csingle, cdouble: cdouble}[dtype]\n\n\ndef get_rtol(dtype):\n # Choose a safe rtol\n if dtype in (single, csingle):\n return 1e-5\n else:\n return 1e-11\n\n\n# used to categorize tests\nall_tags = {\n 'square', 'nonsquare', 'hermitian', # mutually exclusive\n 'generalized', 'size-0', 'strided' # optional additions\n}\n\n\nclass LinalgCase:\n def __init__(self, name, a, b, tags=set()):\n """\n A bundle of arguments to be passed to a test case, with an identifying\n name, the operands a and b, and a set of tags to filter the tests\n """\n assert_(isinstance(name, str))\n self.name = name\n self.a = a\n self.b = b\n self.tags = frozenset(tags) # prevent shared tags\n\n def check(self, do):\n """\n Run the function `do` on this test case, expanding arguments\n """\n do(self.a, self.b, tags=self.tags)\n\n def __repr__(self):\n return f'<LinalgCase: {self.name}>'\n\n\ndef apply_tag(tag, cases):\n """\n Add the given tag (a string) to each of the cases (a list of LinalgCase\n objects)\n """\n assert tag in all_tags, "Invalid tag"\n for case in cases:\n case.tags = case.tags | {tag}\n return cases\n\n\n#\n# Base test cases\n#\n\nnp.random.seed(1234)\n\nCASES = []\n\n# square test cases\nCASES += apply_tag('square', [\n LinalgCase("single",\n array([[1., 2.], [3., 4.]], dtype=single),\n array([2., 1.], dtype=single)),\n LinalgCase("double",\n array([[1., 2.], [3., 4.]], dtype=double),\n array([2., 1.], dtype=double)),\n LinalgCase("double_2",\n array([[1., 2.], [3., 4.]], dtype=double),\n array([[2., 1., 4.], [3., 4., 6.]], dtype=double)),\n LinalgCase("csingle",\n array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=csingle),\n array([2. + 1j, 1. + 2j], dtype=csingle)),\n LinalgCase("cdouble",\n array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble),\n array([2. + 1j, 1. + 2j], dtype=cdouble)),\n LinalgCase("cdouble_2",\n array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble),\n array([[2. + 1j, 1. + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], dtype=cdouble)),\n LinalgCase("0x0",\n np.empty((0, 0), dtype=double),\n np.empty((0,), dtype=double),\n tags={'size-0'}),\n LinalgCase("8x8",\n np.random.rand(8, 8),\n np.random.rand(8)),\n LinalgCase("1x1",\n np.random.rand(1, 1),\n np.random.rand(1)),\n LinalgCase("nonarray",\n [[1, 2], [3, 4]],\n [2, 1]),\n])\n\n# non-square test-cases\nCASES += apply_tag('nonsquare', [\n LinalgCase("single_nsq_1",\n array([[1., 2., 3.], [3., 4., 6.]], dtype=single),\n array([2., 1.], dtype=single)),\n LinalgCase("single_nsq_2",\n array([[1., 2.], [3., 4.], [5., 6.]], dtype=single),\n array([2., 1., 3.], dtype=single)),\n LinalgCase("double_nsq_1",\n array([[1., 2., 3.], [3., 4., 6.]], dtype=double),\n array([2., 1.], dtype=double)),\n LinalgCase("double_nsq_2",\n array([[1., 2.], [3., 4.], [5., 6.]], dtype=double),\n array([2., 1., 3.], dtype=double)),\n LinalgCase("csingle_nsq_1",\n array(\n [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=csingle),\n array([2. + 1j, 1. + 2j], dtype=csingle)),\n LinalgCase("csingle_nsq_2",\n array(\n [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=csingle),\n array([2. + 1j, 1. + 2j, 3. - 3j], dtype=csingle)),\n LinalgCase("cdouble_nsq_1",\n array(\n [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble),\n array([2. + 1j, 1. + 2j], dtype=cdouble)),\n LinalgCase("cdouble_nsq_2",\n array(\n [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble),\n array([2. + 1j, 1. + 2j, 3. - 3j], dtype=cdouble)),\n LinalgCase("cdouble_nsq_1_2",\n array(\n [[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble),\n array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)),\n LinalgCase("cdouble_nsq_2_2",\n array(\n [[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble),\n array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)),\n LinalgCase("8x11",\n np.random.rand(8, 11),\n np.random.rand(8)),\n LinalgCase("1x5",\n np.random.rand(1, 5),\n np.random.rand(1)),\n LinalgCase("5x1",\n np.random.rand(5, 1),\n np.random.rand(5)),\n LinalgCase("0x4",\n np.random.rand(0, 4),\n np.random.rand(0),\n tags={'size-0'}),\n LinalgCase("4x0",\n np.random.rand(4, 0),\n np.random.rand(4),\n tags={'size-0'}),\n])\n\n# hermitian test-cases\nCASES += apply_tag('hermitian', [\n LinalgCase("hsingle",\n array([[1., 2.], [2., 1.]], dtype=single),\n None),\n LinalgCase("hdouble",\n array([[1., 2.], [2., 1.]], dtype=double),\n None),\n LinalgCase("hcsingle",\n array([[1., 2 + 3j], [2 - 3j, 1]], dtype=csingle),\n None),\n LinalgCase("hcdouble",\n array([[1., 2 + 3j], [2 - 3j, 1]], dtype=cdouble),\n None),\n LinalgCase("hempty",\n np.empty((0, 0), dtype=double),\n None,\n tags={'size-0'}),\n LinalgCase("hnonarray",\n [[1, 2], [2, 1]],\n None),\n LinalgCase("matrix_b_only",\n array([[1., 2.], [2., 1.]]),\n None),\n LinalgCase("hmatrix_1x1",\n np.random.rand(1, 1),\n None),\n])\n\n\n#\n# Gufunc test cases\n#\ndef _make_generalized_cases():\n new_cases = []\n\n for case in CASES:\n if not isinstance(case.a, np.ndarray):\n continue\n\n a = np.array([case.a, 2 * case.a, 3 * case.a])\n if case.b is None:\n b = None\n elif case.b.ndim == 1:\n b = case.b\n else:\n b = np.array([case.b, 7 * case.b, 6 * case.b])\n new_case = LinalgCase(case.name + "_tile3", a, b,\n tags=case.tags | {'generalized'})\n new_cases.append(new_case)\n\n a = np.array([case.a] * 2 * 3).reshape((3, 2) + case.a.shape)\n if case.b is None:\n b = None\n elif case.b.ndim == 1:\n b = np.array([case.b] * 2 * 3 * a.shape[-1])\\n .reshape((3, 2) + case.a.shape[-2:])\n else:\n b = np.array([case.b] * 2 * 3).reshape((3, 2) + case.b.shape)\n new_case = LinalgCase(case.name + "_tile213", a, b,\n tags=case.tags | {'generalized'})\n new_cases.append(new_case)\n\n return new_cases\n\n\nCASES += _make_generalized_cases()\n\n\n#\n# Generate stride combination variations of the above\n#\ndef _stride_comb_iter(x):\n """\n Generate cartesian product of strides for all axes\n """\n\n if not isinstance(x, np.ndarray):\n yield x, "nop"\n return\n\n stride_set = [(1,)] * x.ndim\n stride_set[-1] = (1, 3, -4)\n if x.ndim > 1:\n stride_set[-2] = (1, 3, -4)\n if x.ndim > 2:\n stride_set[-3] = (1, -4)\n\n for repeats in itertools.product(*tuple(stride_set)):\n new_shape = [abs(a * b) for a, b in zip(x.shape, repeats)]\n slices = tuple(slice(None, None, repeat) for repeat in repeats)\n\n # new array with different strides, but same data\n xi = np.empty(new_shape, dtype=x.dtype)\n xi.view(np.uint32).fill(0xdeadbeef)\n xi = xi[slices]\n xi[...] = x\n xi = xi.view(x.__class__)\n assert_(np.all(xi == x))\n yield xi, "stride_" + "_".join(["%+d" % j for j in repeats])\n\n # generate also zero strides if possible\n if x.ndim >= 1 and x.shape[-1] == 1:\n s = list(x.strides)\n s[-1] = 0\n xi = np.lib.stride_tricks.as_strided(x, strides=s)\n yield xi, "stride_xxx_0"\n if x.ndim >= 2 and x.shape[-2] == 1:\n s = list(x.strides)\n s[-2] = 0\n xi = np.lib.stride_tricks.as_strided(x, strides=s)\n yield xi, "stride_xxx_0_x"\n if x.ndim >= 2 and x.shape[:-2] == (1, 1):\n s = list(x.strides)\n s[-1] = 0\n s[-2] = 0\n xi = np.lib.stride_tricks.as_strided(x, strides=s)\n yield xi, "stride_xxx_0_0"\n\n\ndef _make_strided_cases():\n new_cases = []\n for case in CASES:\n for a, a_label in _stride_comb_iter(case.a):\n for b, b_label in _stride_comb_iter(case.b):\n new_case = LinalgCase(case.name + "_" + a_label + "_" + b_label, a, b,\n tags=case.tags | {'strided'})\n new_cases.append(new_case)\n return new_cases\n\n\nCASES += _make_strided_cases()\n\n\n#\n# Test different routines against the above cases\n#\nclass LinalgTestCase:\n TEST_CASES = CASES\n\n def check_cases(self, require=set(), exclude=set()):\n """\n Run func on each of the cases with all of the tags in require, and none\n of the tags in exclude\n """\n for case in self.TEST_CASES:\n # filter by require and exclude\n if case.tags & require != require:\n continue\n if case.tags & exclude:\n continue\n\n try:\n case.check(self.do)\n except Exception as e:\n msg = f'In test case: {case!r}\n\n'\n msg += traceback.format_exc()\n raise AssertionError(msg) from e\n\n\nclass LinalgSquareTestCase(LinalgTestCase):\n\n def test_sq_cases(self):\n self.check_cases(require={'square'},\n exclude={'generalized', 'size-0'})\n\n def test_empty_sq_cases(self):\n self.check_cases(require={'square', 'size-0'},\n exclude={'generalized'})\n\n\nclass LinalgNonsquareTestCase(LinalgTestCase):\n\n def test_nonsq_cases(self):\n self.check_cases(require={'nonsquare'},\n exclude={'generalized', 'size-0'})\n\n def test_empty_nonsq_cases(self):\n self.check_cases(require={'nonsquare', 'size-0'},\n exclude={'generalized'})\n\n\nclass HermitianTestCase(LinalgTestCase):\n\n def test_herm_cases(self):\n self.check_cases(require={'hermitian'},\n exclude={'generalized', 'size-0'})\n\n def test_empty_herm_cases(self):\n self.check_cases(require={'hermitian', 'size-0'},\n exclude={'generalized'})\n\n\nclass LinalgGeneralizedSquareTestCase(LinalgTestCase):\n\n @pytest.mark.slow\n def test_generalized_sq_cases(self):\n self.check_cases(require={'generalized', 'square'},\n exclude={'size-0'})\n\n @pytest.mark.slow\n def test_generalized_empty_sq_cases(self):\n self.check_cases(require={'generalized', 'square', 'size-0'})\n\n\nclass LinalgGeneralizedNonsquareTestCase(LinalgTestCase):\n\n @pytest.mark.slow\n def test_generalized_nonsq_cases(self):\n self.check_cases(require={'generalized', 'nonsquare'},\n exclude={'size-0'})\n\n @pytest.mark.slow\n def test_generalized_empty_nonsq_cases(self):\n self.check_cases(require={'generalized', 'nonsquare', 'size-0'})\n\n\nclass HermitianGeneralizedTestCase(LinalgTestCase):\n\n @pytest.mark.slow\n def test_generalized_herm_cases(self):\n self.check_cases(require={'generalized', 'hermitian'},\n exclude={'size-0'})\n\n @pytest.mark.slow\n def test_generalized_empty_herm_cases(self):\n self.check_cases(require={'generalized', 'hermitian', 'size-0'},\n exclude={'none'})\n\n\ndef identity_like_generalized(a):\n a = asarray(a)\n if a.ndim >= 3:\n r = np.empty(a.shape, dtype=a.dtype)\n r[...] = identity(a.shape[-2])\n return r\n else:\n return identity(a.shape[0])\n\n\nclass SolveCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):\n # kept apart from TestSolve for use for testing with matrices.\n def do(self, a, b, tags):\n x = linalg.solve(a, b)\n if np.array(b).ndim == 1:\n # When a is (..., M, M) and b is (M,), it is the same as when b is\n # (M, 1), except the result has shape (..., M)\n adotx = matmul(a, x[..., None])[..., 0]\n assert_almost_equal(np.broadcast_to(b, adotx.shape), adotx)\n else:\n adotx = matmul(a, x)\n assert_almost_equal(b, adotx)\n assert_(consistent_subclass(x, b))\n\n\nclass TestSolve(SolveCases):\n @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])\n def test_types(self, dtype):\n x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)\n assert_equal(linalg.solve(x, x).dtype, dtype)\n\n def test_1_d(self):\n class ArraySubclass(np.ndarray):\n pass\n a = np.arange(8).reshape(2, 2, 2)\n b = np.arange(2).view(ArraySubclass)\n result = linalg.solve(a, b)\n assert result.shape == (2, 2)\n\n # If b is anything other than 1-D it should be treated as a stack of\n # matrices\n b = np.arange(4).reshape(2, 2).view(ArraySubclass)\n result = linalg.solve(a, b)\n assert result.shape == (2, 2, 2)\n\n b = np.arange(2).reshape(1, 2).view(ArraySubclass)\n assert_raises(ValueError, linalg.solve, a, b)\n\n def test_0_size(self):\n class ArraySubclass(np.ndarray):\n pass\n # Test system of 0x0 matrices\n a = np.arange(8).reshape(2, 2, 2)\n b = np.arange(6).reshape(1, 2, 3).view(ArraySubclass)\n\n expected = linalg.solve(a, b)[:, 0:0, :]\n result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, :])\n assert_array_equal(result, expected)\n assert_(isinstance(result, ArraySubclass))\n\n # Test errors for non-square and only b's dimension being 0\n assert_raises(linalg.LinAlgError, linalg.solve, a[:, 0:0, 0:1], b)\n assert_raises(ValueError, linalg.solve, a, b[:, 0:0, :])\n\n # Test broadcasting error\n b = np.arange(6).reshape(1, 3, 2) # broadcasting error\n assert_raises(ValueError, linalg.solve, a, b)\n assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])\n\n # Test zero "single equations" with 0x0 matrices.\n b = np.arange(2).view(ArraySubclass)\n expected = linalg.solve(a, b)[:, 0:0]\n result = linalg.solve(a[:, 0:0, 0:0], b[0:0])\n assert_array_equal(result, expected)\n assert_(isinstance(result, ArraySubclass))\n\n b = np.arange(3).reshape(1, 3)\n assert_raises(ValueError, linalg.solve, a, b)\n assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])\n assert_raises(ValueError, linalg.solve, a[:, 0:0, 0:0], b)\n\n def test_0_size_k(self):\n # test zero multiple equation (K=0) case.\n class ArraySubclass(np.ndarray):\n pass\n a = np.arange(4).reshape(1, 2, 2)\n b = np.arange(6).reshape(3, 2, 1).view(ArraySubclass)\n\n expected = linalg.solve(a, b)[:, :, 0:0]\n result = linalg.solve(a, b[:, :, 0:0])\n assert_array_equal(result, expected)\n assert_(isinstance(result, ArraySubclass))\n\n # test both zero.\n expected = linalg.solve(a, b)[:, 0:0, 0:0]\n result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, 0:0])\n assert_array_equal(result, expected)\n assert_(isinstance(result, ArraySubclass))\n\n\nclass InvCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):\n\n def do(self, a, b, tags):\n a_inv = linalg.inv(a)\n assert_almost_equal(matmul(a, a_inv),\n identity_like_generalized(a))\n assert_(consistent_subclass(a_inv, a))\n\n\nclass TestInv(InvCases):\n @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])\n def test_types(self, dtype):\n x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)\n assert_equal(linalg.inv(x).dtype, dtype)\n\n def test_0_size(self):\n # Check that all kinds of 0-sized arrays work\n class ArraySubclass(np.ndarray):\n pass\n a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)\n res = linalg.inv(a)\n assert_(res.dtype.type is np.float64)\n assert_equal(a.shape, res.shape)\n assert_(isinstance(res, ArraySubclass))\n\n a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)\n res = linalg.inv(a)\n assert_(res.dtype.type is np.complex64)\n assert_equal(a.shape, res.shape)\n assert_(isinstance(res, ArraySubclass))\n\n\nclass EigvalsCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):\n\n def do(self, a, b, tags):\n ev = linalg.eigvals(a)\n evalues, evectors = linalg.eig(a)\n assert_almost_equal(ev, evalues)\n\n\nclass TestEigvals(EigvalsCases):\n @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])\n def test_types(self, dtype):\n x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)\n assert_equal(linalg.eigvals(x).dtype, dtype)\n x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)\n assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype))\n\n def test_0_size(self):\n # Check that all kinds of 0-sized arrays work\n class ArraySubclass(np.ndarray):\n pass\n a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)\n res = linalg.eigvals(a)\n assert_(res.dtype.type is np.float64)\n assert_equal((0, 1), res.shape)\n # This is just for documentation, it might make sense to change:\n assert_(isinstance(res, np.ndarray))\n\n a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)\n res = linalg.eigvals(a)\n assert_(res.dtype.type is np.complex64)\n assert_equal((0,), res.shape)\n # This is just for documentation, it might make sense to change:\n assert_(isinstance(res, np.ndarray))\n\n\nclass EigCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):\n\n def do(self, a, b, tags):\n res = linalg.eig(a)\n eigenvalues, eigenvectors = res.eigenvalues, res.eigenvectors\n assert_allclose(matmul(a, eigenvectors),\n np.asarray(eigenvectors) * np.asarray(eigenvalues)[..., None, :],\n rtol=get_rtol(eigenvalues.dtype))\n assert_(consistent_subclass(eigenvectors, a))\n\n\nclass TestEig(EigCases):\n @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])\n def test_types(self, dtype):\n x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)\n w, v = np.linalg.eig(x)\n assert_equal(w.dtype, dtype)\n assert_equal(v.dtype, dtype)\n\n x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)\n w, v = np.linalg.eig(x)\n assert_equal(w.dtype, get_complex_dtype(dtype))\n assert_equal(v.dtype, get_complex_dtype(dtype))\n\n def test_0_size(self):\n # Check that all kinds of 0-sized arrays work\n class ArraySubclass(np.ndarray):\n pass\n a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)\n res, res_v = linalg.eig(a)\n assert_(res_v.dtype.type is np.float64)\n assert_(res.dtype.type is np.float64)\n assert_equal(a.shape, res_v.shape)\n assert_equal((0, 1), res.shape)\n # This is just for documentation, it might make sense to change:\n assert_(isinstance(a, np.ndarray))\n\n a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)\n res, res_v = linalg.eig(a)\n assert_(res_v.dtype.type is np.complex64)\n assert_(res.dtype.type is np.complex64)\n assert_equal(a.shape, res_v.shape)\n assert_equal((0,), res.shape)\n # This is just for documentation, it might make sense to change:\n assert_(isinstance(a, np.ndarray))\n\n\nclass SVDBaseTests:\n hermitian = False\n\n @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])\n def test_types(self, dtype):\n x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)\n res = linalg.svd(x)\n U, S, Vh = res.U, res.S, res.Vh\n assert_equal(U.dtype, dtype)\n assert_equal(S.dtype, get_real_dtype(dtype))\n assert_equal(Vh.dtype, dtype)\n s = linalg.svd(x, compute_uv=False, hermitian=self.hermitian)\n assert_equal(s.dtype, get_real_dtype(dtype))\n\n\nclass SVDCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):\n\n def do(self, a, b, tags):\n u, s, vt = linalg.svd(a, False)\n assert_allclose(a, matmul(np.asarray(u) * np.asarray(s)[..., None, :],\n np.asarray(vt)),\n rtol=get_rtol(u.dtype))\n assert_(consistent_subclass(u, a))\n assert_(consistent_subclass(vt, a))\n\n\nclass TestSVD(SVDCases, SVDBaseTests):\n def test_empty_identity(self):\n """ Empty input should put an identity matrix in u or vh """\n x = np.empty((4, 0))\n u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian)\n assert_equal(u.shape, (4, 4))\n assert_equal(vh.shape, (0, 0))\n assert_equal(u, np.eye(4))\n\n x = np.empty((0, 4))\n u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian)\n assert_equal(u.shape, (0, 0))\n assert_equal(vh.shape, (4, 4))\n assert_equal(vh, np.eye(4))\n\n def test_svdvals(self):\n x = np.array([[1, 0.5], [0.5, 1]])\n s_from_svd = linalg.svd(x, compute_uv=False, hermitian=self.hermitian)\n s_from_svdvals = linalg.svdvals(x)\n assert_almost_equal(s_from_svd, s_from_svdvals)\n\n\nclass SVDHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase):\n\n def do(self, a, b, tags):\n u, s, vt = linalg.svd(a, False, hermitian=True)\n assert_allclose(a, matmul(np.asarray(u) * np.asarray(s)[..., None, :],\n np.asarray(vt)),\n rtol=get_rtol(u.dtype))\n\n def hermitian(mat):\n axes = list(range(mat.ndim))\n axes[-1], axes[-2] = axes[-2], axes[-1]\n return np.conj(np.transpose(mat, axes=axes))\n\n assert_almost_equal(np.matmul(u, hermitian(u)), np.broadcast_to(np.eye(u.shape[-1]), u.shape))\n assert_almost_equal(np.matmul(vt, hermitian(vt)), np.broadcast_to(np.eye(vt.shape[-1]), vt.shape))\n assert_equal(np.sort(s)[..., ::-1], s)\n assert_(consistent_subclass(u, a))\n assert_(consistent_subclass(vt, a))\n\n\nclass TestSVDHermitian(SVDHermitianCases, SVDBaseTests):\n hermitian = True\n\n\nclass CondCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):\n # cond(x, p) for p in (None, 2, -2)\n\n def do(self, a, b, tags):\n c = asarray(a) # a might be a matrix\n if 'size-0' in tags:\n assert_raises(LinAlgError, linalg.cond, c)\n return\n\n # +-2 norms\n s = linalg.svd(c, compute_uv=False)\n assert_almost_equal(\n linalg.cond(a), s[..., 0] / s[..., -1],\n single_decimal=5, double_decimal=11)\n assert_almost_equal(\n linalg.cond(a, 2), s[..., 0] / s[..., -1],\n single_decimal=5, double_decimal=11)\n assert_almost_equal(\n linalg.cond(a, -2), s[..., -1] / s[..., 0],\n single_decimal=5, double_decimal=11)\n\n # Other norms\n cinv = np.linalg.inv(c)\n assert_almost_equal(\n linalg.cond(a, 1),\n abs(c).sum(-2).max(-1) * abs(cinv).sum(-2).max(-1),\n single_decimal=5, double_decimal=11)\n assert_almost_equal(\n linalg.cond(a, -1),\n abs(c).sum(-2).min(-1) * abs(cinv).sum(-2).min(-1),\n single_decimal=5, double_decimal=11)\n assert_almost_equal(\n linalg.cond(a, np.inf),\n abs(c).sum(-1).max(-1) * abs(cinv).sum(-1).max(-1),\n single_decimal=5, double_decimal=11)\n assert_almost_equal(\n linalg.cond(a, -np.inf),\n abs(c).sum(-1).min(-1) * abs(cinv).sum(-1).min(-1),\n single_decimal=5, double_decimal=11)\n assert_almost_equal(\n linalg.cond(a, 'fro'),\n np.sqrt((abs(c)**2).sum(-1).sum(-1)\n * (abs(cinv)**2).sum(-1).sum(-1)),\n single_decimal=5, double_decimal=11)\n\n\nclass TestCond(CondCases):\n def test_basic_nonsvd(self):\n # Smoketest the non-svd norms\n A = array([[1., 0, 1], [0, -2., 0], [0, 0, 3.]])\n assert_almost_equal(linalg.cond(A, inf), 4)\n assert_almost_equal(linalg.cond(A, -inf), 2 / 3)\n assert_almost_equal(linalg.cond(A, 1), 4)\n assert_almost_equal(linalg.cond(A, -1), 0.5)\n assert_almost_equal(linalg.cond(A, 'fro'), np.sqrt(265 / 12))\n\n def test_singular(self):\n # Singular matrices have infinite condition number for\n # positive norms, and negative norms shouldn't raise\n # exceptions\n As = [np.zeros((2, 2)), np.ones((2, 2))]\n p_pos = [None, 1, 2, 'fro']\n p_neg = [-1, -2]\n for A, p in itertools.product(As, p_pos):\n # Inversion may not hit exact infinity, so just check the\n # number is large\n assert_(linalg.cond(A, p) > 1e15)\n for A, p in itertools.product(As, p_neg):\n linalg.cond(A, p)\n\n @pytest.mark.xfail(True, run=False,\n reason="Platform/LAPACK-dependent failure, "\n "see gh-18914")\n def test_nan(self):\n # nans should be passed through, not converted to infs\n ps = [None, 1, -1, 2, -2, 'fro']\n p_pos = [None, 1, 2, 'fro']\n\n A = np.ones((2, 2))\n A[0, 1] = np.nan\n for p in ps:\n c = linalg.cond(A, p)\n assert_(isinstance(c, np.float64))\n assert_(np.isnan(c))\n\n A = np.ones((3, 2, 2))\n A[1, 0, 1] = np.nan\n for p in ps:\n c = linalg.cond(A, p)\n assert_(np.isnan(c[1]))\n if p in p_pos:\n assert_(c[0] > 1e15)\n assert_(c[2] > 1e15)\n else:\n assert_(not np.isnan(c[0]))\n assert_(not np.isnan(c[2]))\n\n def test_stacked_singular(self):\n # Check behavior when only some of the stacked matrices are\n # singular\n np.random.seed(1234)\n A = np.random.rand(2, 2, 2, 2)\n A[0, 0] = 0\n A[1, 1] = 0\n\n for p in (None, 1, 2, 'fro', -1, -2):\n c = linalg.cond(A, p)\n assert_equal(c[0, 0], np.inf)\n assert_equal(c[1, 1], np.inf)\n assert_(np.isfinite(c[0, 1]))\n assert_(np.isfinite(c[1, 0]))\n\n\nclass PinvCases(LinalgSquareTestCase,\n LinalgNonsquareTestCase,\n LinalgGeneralizedSquareTestCase,\n LinalgGeneralizedNonsquareTestCase):\n\n def do(self, a, b, tags):\n a_ginv = linalg.pinv(a)\n # `a @ a_ginv == I` does not hold if a is singular\n dot = matmul\n assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11)\n assert_(consistent_subclass(a_ginv, a))\n\n\nclass TestPinv(PinvCases):\n pass\n\n\nclass PinvHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase):\n\n def do(self, a, b, tags):\n a_ginv = linalg.pinv(a, hermitian=True)\n # `a @ a_ginv == I` does not hold if a is singular\n dot = matmul\n assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11)\n assert_(consistent_subclass(a_ginv, a))\n\n\nclass TestPinvHermitian(PinvHermitianCases):\n pass\n\n\ndef test_pinv_rtol_arg():\n a = np.array([[1, 2, 3], [4, 1, 1], [2, 3, 1]])\n\n assert_almost_equal(\n np.linalg.pinv(a, rcond=0.5),\n np.linalg.pinv(a, rtol=0.5),\n )\n\n with pytest.raises(\n ValueError, match=r"`rtol` and `rcond` can't be both set."\n ):\n np.linalg.pinv(a, rcond=0.5, rtol=0.5)\n\n\nclass DetCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):\n\n def do(self, a, b, tags):\n d = linalg.det(a)\n res = linalg.slogdet(a)\n s, ld = res.sign, res.logabsdet\n if asarray(a).dtype.type in (single, double):\n ad = asarray(a).astype(double)\n else:\n ad = asarray(a).astype(cdouble)\n ev = linalg.eigvals(ad)\n assert_almost_equal(d, multiply.reduce(ev, axis=-1))\n assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1))\n\n s = np.atleast_1d(s)\n ld = np.atleast_1d(ld)\n m = (s != 0)\n assert_almost_equal(np.abs(s[m]), 1)\n assert_equal(ld[~m], -inf)\n\n\nclass TestDet(DetCases):\n def test_zero(self):\n assert_equal(linalg.det([[0.0]]), 0.0)\n assert_equal(type(linalg.det([[0.0]])), double)\n assert_equal(linalg.det([[0.0j]]), 0.0)\n assert_equal(type(linalg.det([[0.0j]])), cdouble)\n\n assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf))\n assert_equal(type(linalg.slogdet([[0.0]])[0]), double)\n assert_equal(type(linalg.slogdet([[0.0]])[1]), double)\n assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf))\n assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble)\n assert_equal(type(linalg.slogdet([[0.0j]])[1]), double)\n\n @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])\n def test_types(self, dtype):\n x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)\n assert_equal(np.linalg.det(x).dtype, dtype)\n ph, s = np.linalg.slogdet(x)\n assert_equal(s.dtype, get_real_dtype(dtype))\n assert_equal(ph.dtype, dtype)\n\n def test_0_size(self):\n a = np.zeros((0, 0), dtype=np.complex64)\n res = linalg.det(a)\n assert_equal(res, 1.)\n assert_(res.dtype.type is np.complex64)\n res = linalg.slogdet(a)\n assert_equal(res, (1, 0))\n assert_(res[0].dtype.type is np.complex64)\n assert_(res[1].dtype.type is np.float32)\n\n a = np.zeros((0, 0), dtype=np.float64)\n res = linalg.det(a)\n assert_equal(res, 1.)\n assert_(res.dtype.type is np.float64)\n res = linalg.slogdet(a)\n assert_equal(res, (1, 0))\n assert_(res[0].dtype.type is np.float64)\n assert_(res[1].dtype.type is np.float64)\n\n\nclass LstsqCases(LinalgSquareTestCase, LinalgNonsquareTestCase):\n\n def do(self, a, b, tags):\n arr = np.asarray(a)\n m, n = arr.shape\n u, s, vt = linalg.svd(a, False)\n x, residuals, rank, sv = linalg.lstsq(a, b, rcond=-1)\n if m == 0:\n assert_((x == 0).all())\n if m <= n:\n assert_almost_equal(b, dot(a, x))\n assert_equal(rank, m)\n else:\n assert_equal(rank, n)\n assert_almost_equal(sv, sv.__array_wrap__(s))\n if rank == n and m > n:\n expect_resids = (\n np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)\n expect_resids = np.asarray(expect_resids)\n if np.asarray(b).ndim == 1:\n expect_resids.shape = (1,)\n assert_equal(residuals.shape, expect_resids.shape)\n else:\n expect_resids = np.array([]).view(type(x))\n assert_almost_equal(residuals, expect_resids)\n assert_(np.issubdtype(residuals.dtype, np.floating))\n assert_(consistent_subclass(x, b))\n assert_(consistent_subclass(residuals, b))\n\n\nclass TestLstsq(LstsqCases):\n def test_rcond(self):\n a = np.array([[0., 1., 0., 1., 2., 0.],\n [0., 2., 0., 0., 1., 0.],\n [1., 0., 1., 0., 0., 4.],\n [0., 0., 0., 2., 3., 0.]]).T\n\n b = np.array([1, 0, 0, 0, 0, 0])\n\n x, residuals, rank, s = linalg.lstsq(a, b, rcond=-1)\n assert_(rank == 4)\n x, residuals, rank, s = linalg.lstsq(a, b)\n assert_(rank == 3)\n x, residuals, rank, s = linalg.lstsq(a, b, rcond=None)\n assert_(rank == 3)\n\n @pytest.mark.parametrize(["m", "n", "n_rhs"], [\n (4, 2, 2),\n (0, 4, 1),\n (0, 4, 2),\n (4, 0, 1),\n (4, 0, 2),\n (4, 2, 0),\n (0, 0, 0)\n ])\n def test_empty_a_b(self, m, n, n_rhs):\n a = np.arange(m * n).reshape(m, n)\n b = np.ones((m, n_rhs))\n x, residuals, rank, s = linalg.lstsq(a, b, rcond=None)\n if m == 0:\n assert_((x == 0).all())\n assert_equal(x.shape, (n, n_rhs))\n assert_equal(residuals.shape, ((n_rhs,) if m > n else (0,)))\n if m > n and n_rhs > 0:\n # residuals are exactly the squared norms of b's columns\n r = b - np.dot(a, x)\n assert_almost_equal(residuals, (r * r).sum(axis=-2))\n assert_equal(rank, min(m, n))\n assert_equal(s.shape, (min(m, n),))\n\n def test_incompatible_dims(self):\n # use modified version of docstring example\n x = np.array([0, 1, 2, 3])\n y = np.array([-1, 0.2, 0.9, 2.1, 3.3])\n A = np.vstack([x, np.ones(len(x))]).T\n with assert_raises_regex(LinAlgError, "Incompatible dimensions"):\n linalg.lstsq(A, y, rcond=None)\n\n\n@pytest.mark.parametrize('dt', [np.dtype(c) for c in '?bBhHiIqQefdgFDGO'])\nclass TestMatrixPower:\n\n rshft_0 = np.eye(4)\n rshft_1 = rshft_0[[3, 0, 1, 2]]\n rshft_2 = rshft_0[[2, 3, 0, 1]]\n rshft_3 = rshft_0[[1, 2, 3, 0]]\n rshft_all = [rshft_0, rshft_1, rshft_2, rshft_3]\n noninv = array([[1, 0], [0, 0]])\n stacked = np.block([[[rshft_0]]] * 2)\n # FIXME the 'e' dtype might work in future\n dtnoinv = [object, np.dtype('e'), np.dtype('g'), np.dtype('G')]\n\n def test_large_power(self, dt):\n rshft = self.rshft_1.astype(dt)\n assert_equal(\n matrix_power(rshft, 2**100 + 2**10 + 2**5 + 0), self.rshft_0)\n assert_equal(\n matrix_power(rshft, 2**100 + 2**10 + 2**5 + 1), self.rshft_1)\n assert_equal(\n matrix_power(rshft, 2**100 + 2**10 + 2**5 + 2), self.rshft_2)\n assert_equal(\n matrix_power(rshft, 2**100 + 2**10 + 2**5 + 3), self.rshft_3)\n\n def test_power_is_zero(self, dt):\n def tz(M):\n mz = matrix_power(M, 0)\n assert_equal(mz, identity_like_generalized(M))\n assert_equal(mz.dtype, M.dtype)\n\n for mat in self.rshft_all:\n tz(mat.astype(dt))\n if dt != object:\n tz(self.stacked.astype(dt))\n\n def test_power_is_one(self, dt):\n def tz(mat):\n mz = matrix_power(mat, 1)\n assert_equal(mz, mat)\n assert_equal(mz.dtype, mat.dtype)\n\n for mat in self.rshft_all:\n tz(mat.astype(dt))\n if dt != object:\n tz(self.stacked.astype(dt))\n\n def test_power_is_two(self, dt):\n def tz(mat):\n mz = matrix_power(mat, 2)\n mmul = matmul if mat.dtype != object else dot\n assert_equal(mz, mmul(mat, mat))\n assert_equal(mz.dtype, mat.dtype)\n\n for mat in self.rshft_all:\n tz(mat.astype(dt))\n if dt != object:\n tz(self.stacked.astype(dt))\n\n def test_power_is_minus_one(self, dt):\n def tz(mat):\n invmat = matrix_power(mat, -1)\n mmul = matmul if mat.dtype != object else dot\n assert_almost_equal(\n mmul(invmat, mat), identity_like_generalized(mat))\n\n for mat in self.rshft_all:\n if dt not in self.dtnoinv:\n tz(mat.astype(dt))\n\n def test_exceptions_bad_power(self, dt):\n mat = self.rshft_0.astype(dt)\n assert_raises(TypeError, matrix_power, mat, 1.5)\n assert_raises(TypeError, matrix_power, mat, [1])\n\n def test_exceptions_non_square(self, dt):\n assert_raises(LinAlgError, matrix_power, np.array([1], dt), 1)\n assert_raises(LinAlgError, matrix_power, np.array([[1], [2]], dt), 1)\n assert_raises(LinAlgError, matrix_power, np.ones((4, 3, 2), dt), 1)\n\n @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")\n def test_exceptions_not_invertible(self, dt):\n if dt in self.dtnoinv:\n return\n mat = self.noninv.astype(dt)\n assert_raises(LinAlgError, matrix_power, mat, -1)\n\n\nclass TestEigvalshCases(HermitianTestCase, HermitianGeneralizedTestCase):\n\n def do(self, a, b, tags):\n # note that eigenvalue arrays returned by eig must be sorted since\n # their order isn't guaranteed.\n ev = linalg.eigvalsh(a, 'L')\n evalues, evectors = linalg.eig(a)\n evalues.sort(axis=-1)\n assert_allclose(ev, evalues, rtol=get_rtol(ev.dtype))\n\n ev2 = linalg.eigvalsh(a, 'U')\n assert_allclose(ev2, evalues, rtol=get_rtol(ev.dtype))\n\n\nclass TestEigvalsh:\n @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])\n def test_types(self, dtype):\n x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)\n w = np.linalg.eigvalsh(x)\n assert_equal(w.dtype, get_real_dtype(dtype))\n\n def test_invalid(self):\n x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32)\n assert_raises(ValueError, np.linalg.eigvalsh, x, UPLO="lrong")\n assert_raises(ValueError, np.linalg.eigvalsh, x, "lower")\n assert_raises(ValueError, np.linalg.eigvalsh, x, "upper")\n\n def test_UPLO(self):\n Klo = np.array([[0, 0], [1, 0]], dtype=np.double)\n Kup = np.array([[0, 1], [0, 0]], dtype=np.double)\n tgt = np.array([-1, 1], dtype=np.double)\n rtol = get_rtol(np.double)\n\n # Check default is 'L'\n w = np.linalg.eigvalsh(Klo)\n assert_allclose(w, tgt, rtol=rtol)\n # Check 'L'\n w = np.linalg.eigvalsh(Klo, UPLO='L')\n assert_allclose(w, tgt, rtol=rtol)\n # Check 'l'\n w = np.linalg.eigvalsh(Klo, UPLO='l')\n assert_allclose(w, tgt, rtol=rtol)\n # Check 'U'\n w = np.linalg.eigvalsh(Kup, UPLO='U')\n assert_allclose(w, tgt, rtol=rtol)\n # Check 'u'\n w = np.linalg.eigvalsh(Kup, UPLO='u')\n assert_allclose(w, tgt, rtol=rtol)\n\n def test_0_size(self):\n # Check that all kinds of 0-sized arrays work\n class ArraySubclass(np.ndarray):\n pass\n a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)\n res = linalg.eigvalsh(a)\n assert_(res.dtype.type is np.float64)\n assert_equal((0, 1), res.shape)\n # This is just for documentation, it might make sense to change:\n assert_(isinstance(res, np.ndarray))\n\n a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)\n res = linalg.eigvalsh(a)\n assert_(res.dtype.type is np.float32)\n assert_equal((0,), res.shape)\n # This is just for documentation, it might make sense to change:\n assert_(isinstance(res, np.ndarray))\n\n\nclass TestEighCases(HermitianTestCase, HermitianGeneralizedTestCase):\n\n def do(self, a, b, tags):\n # note that eigenvalue arrays returned by eig must be sorted since\n # their order isn't guaranteed.\n res = linalg.eigh(a)\n ev, evc = res.eigenvalues, res.eigenvectors\n evalues, evectors = linalg.eig(a)\n evalues.sort(axis=-1)\n assert_almost_equal(ev, evalues)\n\n assert_allclose(matmul(a, evc),\n np.asarray(ev)[..., None, :] * np.asarray(evc),\n rtol=get_rtol(ev.dtype))\n\n ev2, evc2 = linalg.eigh(a, 'U')\n assert_almost_equal(ev2, evalues)\n\n assert_allclose(matmul(a, evc2),\n np.asarray(ev2)[..., None, :] * np.asarray(evc2),\n rtol=get_rtol(ev.dtype), err_msg=repr(a))\n\n\nclass TestEigh:\n @pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])\n def test_types(self, dtype):\n x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)\n w, v = np.linalg.eigh(x)\n assert_equal(w.dtype, get_real_dtype(dtype))\n assert_equal(v.dtype, dtype)\n\n def test_invalid(self):\n x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32)\n assert_raises(ValueError, np.linalg.eigh, x, UPLO="lrong")\n assert_raises(ValueError, np.linalg.eigh, x, "lower")\n assert_raises(ValueError, np.linalg.eigh, x, "upper")\n\n def test_UPLO(self):\n Klo = np.array([[0, 0], [1, 0]], dtype=np.double)\n Kup = np.array([[0, 1], [0, 0]], dtype=np.double)\n tgt = np.array([-1, 1], dtype=np.double)\n rtol = get_rtol(np.double)\n\n # Check default is 'L'\n w, v = np.linalg.eigh(Klo)\n assert_allclose(w, tgt, rtol=rtol)\n # Check 'L'\n w, v = np.linalg.eigh(Klo, UPLO='L')\n assert_allclose(w, tgt, rtol=rtol)\n # Check 'l'\n w, v = np.linalg.eigh(Klo, UPLO='l')\n assert_allclose(w, tgt, rtol=rtol)\n # Check 'U'\n w, v = np.linalg.eigh(Kup, UPLO='U')\n assert_allclose(w, tgt, rtol=rtol)\n # Check 'u'\n w, v = np.linalg.eigh(Kup, UPLO='u')\n assert_allclose(w, tgt, rtol=rtol)\n\n def test_0_size(self):\n # Check that all kinds of 0-sized arrays work\n class ArraySubclass(np.ndarray):\n pass\n a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)\n res, res_v = linalg.eigh(a)\n assert_(res_v.dtype.type is np.float64)\n assert_(res.dtype.type is np.float64)\n assert_equal(a.shape, res_v.shape)\n assert_equal((0, 1), res.shape)\n # This is just for documentation, it might make sense to change:\n assert_(isinstance(a, np.ndarray))\n\n a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)\n res, res_v = linalg.eigh(a)\n assert_(res_v.dtype.type is np.complex64)\n assert_(res.dtype.type is np.float32)\n assert_equal(a.shape, res_v.shape)\n assert_equal((0,), res.shape)\n # This is just for documentation, it might make sense to change:\n assert_(isinstance(a, np.ndarray))\n\n\nclass _TestNormBase:\n dt = None\n dec = None\n\n @staticmethod\n def check_dtype(x, res):\n if issubclass(x.dtype.type, np.inexact):\n assert_equal(res.dtype, x.real.dtype)\n else:\n # For integer input, don't have to test float precision of output.\n assert_(issubclass(res.dtype.type, np.floating))\n\n\nclass _TestNormGeneral(_TestNormBase):\n\n def test_empty(self):\n assert_equal(norm([]), 0.0)\n assert_equal(norm(array([], dtype=self.dt)), 0.0)\n assert_equal(norm(atleast_2d(array([], dtype=self.dt))), 0.0)\n\n def test_vector_return_type(self):\n a = np.array([1, 0, 1])\n\n exact_types = np.typecodes['AllInteger']\n inexact_types = np.typecodes['AllFloat']\n\n all_types = exact_types + inexact_types\n\n for each_type in all_types:\n at = a.astype(each_type)\n\n an = norm(at, -np.inf)\n self.check_dtype(at, an)\n assert_almost_equal(an, 0.0)\n\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning, "divide by zero encountered")\n an = norm(at, -1)\n self.check_dtype(at, an)\n assert_almost_equal(an, 0.0)\n\n an = norm(at, 0)\n self.check_dtype(at, an)\n assert_almost_equal(an, 2)\n\n an = norm(at, 1)\n self.check_dtype(at, an)\n assert_almost_equal(an, 2.0)\n\n an = norm(at, 2)\n self.check_dtype(at, an)\n assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0 / 2.0))\n\n an = norm(at, 4)\n self.check_dtype(at, an)\n assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0 / 4.0))\n\n an = norm(at, np.inf)\n self.check_dtype(at, an)\n assert_almost_equal(an, 1.0)\n\n def test_vector(self):\n a = [1, 2, 3, 4]\n b = [-1, -2, -3, -4]\n c = [-1, 2, -3, 4]\n\n def _test(v):\n np.testing.assert_almost_equal(norm(v), 30 ** 0.5,\n decimal=self.dec)\n np.testing.assert_almost_equal(norm(v, inf), 4.0,\n decimal=self.dec)\n np.testing.assert_almost_equal(norm(v, -inf), 1.0,\n decimal=self.dec)\n np.testing.assert_almost_equal(norm(v, 1), 10.0,\n decimal=self.dec)\n np.testing.assert_almost_equal(norm(v, -1), 12.0 / 25,\n decimal=self.dec)\n np.testing.assert_almost_equal(norm(v, 2), 30 ** 0.5,\n decimal=self.dec)\n np.testing.assert_almost_equal(norm(v, -2), ((205. / 144) ** -0.5),\n decimal=self.dec)\n np.testing.assert_almost_equal(norm(v, 0), 4,\n decimal=self.dec)\n\n for v in (a, b, c,):\n _test(v)\n\n for v in (array(a, dtype=self.dt), array(b, dtype=self.dt),\n array(c, dtype=self.dt)):\n _test(v)\n\n def test_axis(self):\n # Vector norms.\n # Compare the use of `axis` with computing the norm of each row\n # or column separately.\n A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)\n for order in [None, -1, 0, 1, 2, 3, np.inf, -np.inf]:\n expected0 = [norm(A[:, k], ord=order) for k in range(A.shape[1])]\n assert_almost_equal(norm(A, ord=order, axis=0), expected0)\n expected1 = [norm(A[k, :], ord=order) for k in range(A.shape[0])]\n assert_almost_equal(norm(A, ord=order, axis=1), expected1)\n\n # Matrix norms.\n B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)\n nd = B.ndim\n for order in [None, -2, 2, -1, 1, np.inf, -np.inf, 'fro']:\n for axis in itertools.combinations(range(-nd, nd), 2):\n row_axis, col_axis = axis\n if row_axis < 0:\n row_axis += nd\n if col_axis < 0:\n col_axis += nd\n if row_axis == col_axis:\n assert_raises(ValueError, norm, B, ord=order, axis=axis)\n else:\n n = norm(B, ord=order, axis=axis)\n\n # The logic using k_index only works for nd = 3.\n # This has to be changed if nd is increased.\n k_index = nd - (row_axis + col_axis)\n if row_axis < col_axis:\n expected = [norm(B[:].take(k, axis=k_index), ord=order)\n for k in range(B.shape[k_index])]\n else:\n expected = [norm(B[:].take(k, axis=k_index).T, ord=order)\n for k in range(B.shape[k_index])]\n assert_almost_equal(n, expected)\n\n def test_keepdims(self):\n A = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)\n\n allclose_err = 'order {0}, axis = {1}'\n shape_err = 'Shape mismatch found {0}, expected {1}, order={2}, axis={3}'\n\n # check the order=None, axis=None case\n expected = norm(A, ord=None, axis=None)\n found = norm(A, ord=None, axis=None, keepdims=True)\n assert_allclose(np.squeeze(found), expected,\n err_msg=allclose_err.format(None, None))\n expected_shape = (1, 1, 1)\n assert_(found.shape == expected_shape,\n shape_err.format(found.shape, expected_shape, None, None))\n\n # Vector norms.\n for order in [None, -1, 0, 1, 2, 3, np.inf, -np.inf]:\n for k in range(A.ndim):\n expected = norm(A, ord=order, axis=k)\n found = norm(A, ord=order, axis=k, keepdims=True)\n assert_allclose(np.squeeze(found), expected,\n err_msg=allclose_err.format(order, k))\n expected_shape = list(A.shape)\n expected_shape[k] = 1\n expected_shape = tuple(expected_shape)\n assert_(found.shape == expected_shape,\n shape_err.format(found.shape, expected_shape, order, k))\n\n # Matrix norms.\n for order in [None, -2, 2, -1, 1, np.inf, -np.inf, 'fro', 'nuc']:\n for k in itertools.permutations(range(A.ndim), 2):\n expected = norm(A, ord=order, axis=k)\n found = norm(A, ord=order, axis=k, keepdims=True)\n assert_allclose(np.squeeze(found), expected,\n err_msg=allclose_err.format(order, k))\n expected_shape = list(A.shape)\n expected_shape[k[0]] = 1\n expected_shape[k[1]] = 1\n expected_shape = tuple(expected_shape)\n assert_(found.shape == expected_shape,\n shape_err.format(found.shape, expected_shape, order, k))\n\n\nclass _TestNorm2D(_TestNormBase):\n # Define the part for 2d arrays separately, so we can subclass this\n # and run the tests using np.matrix in matrixlib.tests.test_matrix_linalg.\n array = np.array\n\n def test_matrix_empty(self):\n assert_equal(norm(self.array([[]], dtype=self.dt)), 0.0)\n\n def test_matrix_return_type(self):\n a = self.array([[1, 0, 1], [0, 1, 1]])\n\n exact_types = np.typecodes['AllInteger']\n\n # float32, complex64, float64, complex128 types are the only types\n # allowed by `linalg`, which performs the matrix operations used\n # within `norm`.\n inexact_types = 'fdFD'\n\n all_types = exact_types + inexact_types\n\n for each_type in all_types:\n at = a.astype(each_type)\n\n an = norm(at, -np.inf)\n self.check_dtype(at, an)\n assert_almost_equal(an, 2.0)\n\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning, "divide by zero encountered")\n an = norm(at, -1)\n self.check_dtype(at, an)\n assert_almost_equal(an, 1.0)\n\n an = norm(at, 1)\n self.check_dtype(at, an)\n assert_almost_equal(an, 2.0)\n\n an = norm(at, 2)\n self.check_dtype(at, an)\n assert_almost_equal(an, 3.0**(1.0 / 2.0))\n\n an = norm(at, -2)\n self.check_dtype(at, an)\n assert_almost_equal(an, 1.0)\n\n an = norm(at, np.inf)\n self.check_dtype(at, an)\n assert_almost_equal(an, 2.0)\n\n an = norm(at, 'fro')\n self.check_dtype(at, an)\n assert_almost_equal(an, 2.0)\n\n an = norm(at, 'nuc')\n self.check_dtype(at, an)\n # Lower bar needed to support low precision floats.\n # They end up being off by 1 in the 7th place.\n np.testing.assert_almost_equal(an, 2.7320508075688772, decimal=6)\n\n def test_matrix_2x2(self):\n A = self.array([[1, 3], [5, 7]], dtype=self.dt)\n assert_almost_equal(norm(A), 84 ** 0.5)\n assert_almost_equal(norm(A, 'fro'), 84 ** 0.5)\n assert_almost_equal(norm(A, 'nuc'), 10.0)\n assert_almost_equal(norm(A, inf), 12.0)\n assert_almost_equal(norm(A, -inf), 4.0)\n assert_almost_equal(norm(A, 1), 10.0)\n assert_almost_equal(norm(A, -1), 6.0)\n assert_almost_equal(norm(A, 2), 9.1231056256176615)\n assert_almost_equal(norm(A, -2), 0.87689437438234041)\n\n assert_raises(ValueError, norm, A, 'nofro')\n assert_raises(ValueError, norm, A, -3)\n assert_raises(ValueError, norm, A, 0)\n\n def test_matrix_3x3(self):\n # This test has been added because the 2x2 example\n # happened to have equal nuclear norm and induced 1-norm.\n # The 1/10 scaling factor accommodates the absolute tolerance\n # used in assert_almost_equal.\n A = (1 / 10) * \\n self.array([[1, 2, 3], [6, 0, 5], [3, 2, 1]], dtype=self.dt)\n assert_almost_equal(norm(A), (1 / 10) * 89 ** 0.5)\n assert_almost_equal(norm(A, 'fro'), (1 / 10) * 89 ** 0.5)\n assert_almost_equal(norm(A, 'nuc'), 1.3366836911774836)\n assert_almost_equal(norm(A, inf), 1.1)\n assert_almost_equal(norm(A, -inf), 0.6)\n assert_almost_equal(norm(A, 1), 1.0)\n assert_almost_equal(norm(A, -1), 0.4)\n assert_almost_equal(norm(A, 2), 0.88722940323461277)\n assert_almost_equal(norm(A, -2), 0.19456584790481812)\n\n def test_bad_args(self):\n # Check that bad arguments raise the appropriate exceptions.\n\n A = self.array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)\n B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)\n\n # Using `axis=<integer>` or passing in a 1-D array implies vector\n # norms are being computed, so also using `ord='fro'`\n # or `ord='nuc'` or any other string raises a ValueError.\n assert_raises(ValueError, norm, A, 'fro', 0)\n assert_raises(ValueError, norm, A, 'nuc', 0)\n assert_raises(ValueError, norm, [3, 4], 'fro', None)\n assert_raises(ValueError, norm, [3, 4], 'nuc', None)\n assert_raises(ValueError, norm, [3, 4], 'test', None)\n\n # Similarly, norm should raise an exception when ord is any finite\n # number other than 1, 2, -1 or -2 when computing matrix norms.\n for order in [0, 3]:\n assert_raises(ValueError, norm, A, order, None)\n assert_raises(ValueError, norm, A, order, (0, 1))\n assert_raises(ValueError, norm, B, order, (1, 2))\n\n # Invalid axis\n assert_raises(AxisError, norm, B, None, 3)\n assert_raises(AxisError, norm, B, None, (2, 3))\n assert_raises(ValueError, norm, B, None, (0, 1, 2))\n\n\nclass _TestNorm(_TestNorm2D, _TestNormGeneral):\n pass\n\n\nclass TestNorm_NonSystematic:\n\n def test_longdouble_norm(self):\n # Non-regression test: p-norm of longdouble would previously raise\n # UnboundLocalError.\n x = np.arange(10, dtype=np.longdouble)\n old_assert_almost_equal(norm(x, ord=3), 12.65, decimal=2)\n\n def test_intmin(self):\n # Non-regression test: p-norm of signed integer would previously do\n # float cast and abs in the wrong order.\n x = np.array([-2 ** 31], dtype=np.int32)\n old_assert_almost_equal(norm(x, ord=3), 2 ** 31, decimal=5)\n\n def test_complex_high_ord(self):\n # gh-4156\n d = np.empty((2,), dtype=np.clongdouble)\n d[0] = 6 + 7j\n d[1] = -6 + 7j\n res = 11.615898132184\n old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=10)\n d = d.astype(np.complex128)\n old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=9)\n d = d.astype(np.complex64)\n old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=5)\n\n\n# Separate definitions so we can use them for matrix tests.\nclass _TestNormDoubleBase(_TestNormBase):\n dt = np.double\n dec = 12\n\n\nclass _TestNormSingleBase(_TestNormBase):\n dt = np.float32\n dec = 6\n\n\nclass _TestNormInt64Base(_TestNormBase):\n dt = np.int64\n dec = 12\n\n\nclass TestNormDouble(_TestNorm, _TestNormDoubleBase):\n pass\n\n\nclass TestNormSingle(_TestNorm, _TestNormSingleBase):\n pass\n\n\nclass TestNormInt64(_TestNorm, _TestNormInt64Base):\n pass\n\n\nclass TestMatrixRank:\n\n def test_matrix_rank(self):\n # Full rank matrix\n assert_equal(4, matrix_rank(np.eye(4)))\n # rank deficient matrix\n I = np.eye(4)\n I[-1, -1] = 0.\n assert_equal(matrix_rank(I), 3)\n # All zeros - zero rank\n assert_equal(matrix_rank(np.zeros((4, 4))), 0)\n # 1 dimension - rank 1 unless all 0\n assert_equal(matrix_rank([1, 0, 0, 0]), 1)\n assert_equal(matrix_rank(np.zeros((4,))), 0)\n # accepts array-like\n assert_equal(matrix_rank([1]), 1)\n # greater than 2 dimensions treated as stacked matrices\n ms = np.array([I, np.eye(4), np.zeros((4, 4))])\n assert_equal(matrix_rank(ms), np.array([3, 4, 0]))\n # works on scalar\n assert_equal(matrix_rank(1), 1)\n\n with assert_raises_regex(\n ValueError, "`tol` and `rtol` can\'t be both set."\n ):\n matrix_rank(I, tol=0.01, rtol=0.01)\n\n def test_symmetric_rank(self):\n assert_equal(4, matrix_rank(np.eye(4), hermitian=True))\n assert_equal(1, matrix_rank(np.ones((4, 4)), hermitian=True))\n assert_equal(0, matrix_rank(np.zeros((4, 4)), hermitian=True))\n # rank deficient matrix\n I = np.eye(4)\n I[-1, -1] = 0.\n assert_equal(3, matrix_rank(I, hermitian=True))\n # manually supplied tolerance\n I[-1, -1] = 1e-8\n assert_equal(4, matrix_rank(I, hermitian=True, tol=0.99e-8))\n assert_equal(3, matrix_rank(I, hermitian=True, tol=1.01e-8))\n\n\ndef test_reduced_rank():\n # Test matrices with reduced rank\n rng = np.random.RandomState(20120714)\n for i in range(100):\n # Make a rank deficient matrix\n X = rng.normal(size=(40, 10))\n X[:, 0] = X[:, 1] + X[:, 2]\n # Assert that matrix_rank detected deficiency\n assert_equal(matrix_rank(X), 9)\n X[:, 3] = X[:, 4] + X[:, 5]\n assert_equal(matrix_rank(X), 8)\n\n\nclass TestQR:\n # Define the array class here, so run this on matrices elsewhere.\n array = np.array\n\n def check_qr(self, a):\n # This test expects the argument `a` to be an ndarray or\n # a subclass of an ndarray of inexact type.\n a_type = type(a)\n a_dtype = a.dtype\n m, n = a.shape\n k = min(m, n)\n\n # mode == 'complete'\n res = linalg.qr(a, mode='complete')\n Q, R = res.Q, res.R\n assert_(Q.dtype == a_dtype)\n assert_(R.dtype == a_dtype)\n assert_(isinstance(Q, a_type))\n assert_(isinstance(R, a_type))\n assert_(Q.shape == (m, m))\n assert_(R.shape == (m, n))\n assert_almost_equal(dot(Q, R), a)\n assert_almost_equal(dot(Q.T.conj(), Q), np.eye(m))\n assert_almost_equal(np.triu(R), R)\n\n # mode == 'reduced'\n q1, r1 = linalg.qr(a, mode='reduced')\n assert_(q1.dtype == a_dtype)\n assert_(r1.dtype == a_dtype)\n assert_(isinstance(q1, a_type))\n assert_(isinstance(r1, a_type))\n assert_(q1.shape == (m, k))\n assert_(r1.shape == (k, n))\n assert_almost_equal(dot(q1, r1), a)\n assert_almost_equal(dot(q1.T.conj(), q1), np.eye(k))\n assert_almost_equal(np.triu(r1), r1)\n\n # mode == 'r'\n r2 = linalg.qr(a, mode='r')\n assert_(r2.dtype == a_dtype)\n assert_(isinstance(r2, a_type))\n assert_almost_equal(r2, r1)\n\n @pytest.mark.parametrize(["m", "n"], [\n (3, 0),\n (0, 3),\n (0, 0)\n ])\n def test_qr_empty(self, m, n):\n k = min(m, n)\n a = np.empty((m, n))\n\n self.check_qr(a)\n\n h, tau = np.linalg.qr(a, mode='raw')\n assert_equal(h.dtype, np.double)\n assert_equal(tau.dtype, np.double)\n assert_equal(h.shape, (n, m))\n assert_equal(tau.shape, (k,))\n\n def test_mode_raw(self):\n # The factorization is not unique and varies between libraries,\n # so it is not possible to check against known values. Functional\n # testing is a possibility, but awaits the exposure of more\n # of the functions in lapack_lite. Consequently, this test is\n # very limited in scope. Note that the results are in FORTRAN\n # order, hence the h arrays are transposed.\n a = self.array([[1, 2], [3, 4], [5, 6]], dtype=np.double)\n\n # Test double\n h, tau = linalg.qr(a, mode='raw')\n assert_(h.dtype == np.double)\n assert_(tau.dtype == np.double)\n assert_(h.shape == (2, 3))\n assert_(tau.shape == (2,))\n\n h, tau = linalg.qr(a.T, mode='raw')\n assert_(h.dtype == np.double)\n assert_(tau.dtype == np.double)\n assert_(h.shape == (3, 2))\n assert_(tau.shape == (2,))\n\n def test_mode_all_but_economic(self):\n a = self.array([[1, 2], [3, 4]])\n b = self.array([[1, 2], [3, 4], [5, 6]])\n for dt in "fd":\n m1 = a.astype(dt)\n m2 = b.astype(dt)\n self.check_qr(m1)\n self.check_qr(m2)\n self.check_qr(m2.T)\n\n for dt in "fd":\n m1 = 1 + 1j * a.astype(dt)\n m2 = 1 + 1j * b.astype(dt)\n self.check_qr(m1)\n self.check_qr(m2)\n self.check_qr(m2.T)\n\n def check_qr_stacked(self, a):\n # This test expects the argument `a` to be an ndarray or\n # a subclass of an ndarray of inexact type.\n a_type = type(a)\n a_dtype = a.dtype\n m, n = a.shape[-2:]\n k = min(m, n)\n\n # mode == 'complete'\n q, r = linalg.qr(a, mode='complete')\n assert_(q.dtype == a_dtype)\n assert_(r.dtype == a_dtype)\n assert_(isinstance(q, a_type))\n assert_(isinstance(r, a_type))\n assert_(q.shape[-2:] == (m, m))\n assert_(r.shape[-2:] == (m, n))\n assert_almost_equal(matmul(q, r), a)\n I_mat = np.identity(q.shape[-1])\n stack_I_mat = np.broadcast_to(I_mat,\n q.shape[:-2] + (q.shape[-1],) * 2)\n assert_almost_equal(matmul(swapaxes(q, -1, -2).conj(), q), stack_I_mat)\n assert_almost_equal(np.triu(r[..., :, :]), r)\n\n # mode == 'reduced'\n q1, r1 = linalg.qr(a, mode='reduced')\n assert_(q1.dtype == a_dtype)\n assert_(r1.dtype == a_dtype)\n assert_(isinstance(q1, a_type))\n assert_(isinstance(r1, a_type))\n assert_(q1.shape[-2:] == (m, k))\n assert_(r1.shape[-2:] == (k, n))\n assert_almost_equal(matmul(q1, r1), a)\n I_mat = np.identity(q1.shape[-1])\n stack_I_mat = np.broadcast_to(I_mat,\n q1.shape[:-2] + (q1.shape[-1],) * 2)\n assert_almost_equal(matmul(swapaxes(q1, -1, -2).conj(), q1),\n stack_I_mat)\n assert_almost_equal(np.triu(r1[..., :, :]), r1)\n\n # mode == 'r'\n r2 = linalg.qr(a, mode='r')\n assert_(r2.dtype == a_dtype)\n assert_(isinstance(r2, a_type))\n assert_almost_equal(r2, r1)\n\n @pytest.mark.parametrize("size", [\n (3, 4), (4, 3), (4, 4),\n (3, 0), (0, 3)])\n @pytest.mark.parametrize("outer_size", [\n (2, 2), (2,), (2, 3, 4)])\n @pytest.mark.parametrize("dt", [\n np.single, np.double,\n np.csingle, np.cdouble])\n def test_stacked_inputs(self, outer_size, size, dt):\n\n rng = np.random.default_rng(123)\n A = rng.normal(size=outer_size + size).astype(dt)\n B = rng.normal(size=outer_size + size).astype(dt)\n self.check_qr_stacked(A)\n self.check_qr_stacked(A + 1.j * B)\n\n\nclass TestCholesky:\n\n @pytest.mark.parametrize(\n 'shape', [(1, 1), (2, 2), (3, 3), (50, 50), (3, 10, 10)]\n )\n @pytest.mark.parametrize(\n 'dtype', (np.float32, np.float64, np.complex64, np.complex128)\n )\n @pytest.mark.parametrize(\n 'upper', [False, True])\n def test_basic_property(self, shape, dtype, upper):\n np.random.seed(1)\n a = np.random.randn(*shape)\n if np.issubdtype(dtype, np.complexfloating):\n a = a + 1j * np.random.randn(*shape)\n\n t = list(range(len(shape)))\n t[-2:] = -1, -2\n\n a = np.matmul(a.transpose(t).conj(), a)\n a = np.asarray(a, dtype=dtype)\n\n c = np.linalg.cholesky(a, upper=upper)\n\n # Check A = L L^H or A = U^H U\n if upper:\n b = np.matmul(c.transpose(t).conj(), c)\n else:\n b = np.matmul(c, c.transpose(t).conj())\n\n atol = 500 * a.shape[0] * np.finfo(dtype).eps\n assert_allclose(b, a, atol=atol, err_msg=f'{shape} {dtype}\n{a}\n{c}')\n\n # Check diag(L or U) is real and positive\n d = np.diagonal(c, axis1=-2, axis2=-1)\n assert_(np.all(np.isreal(d)))\n assert_(np.all(d >= 0))\n\n def test_0_size(self):\n class ArraySubclass(np.ndarray):\n pass\n a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)\n res = linalg.cholesky(a)\n assert_equal(a.shape, res.shape)\n assert_(res.dtype.type is np.float64)\n # for documentation purpose:\n assert_(isinstance(res, np.ndarray))\n\n a = np.zeros((1, 0, 0), dtype=np.complex64).view(ArraySubclass)\n res = linalg.cholesky(a)\n assert_equal(a.shape, res.shape)\n assert_(res.dtype.type is np.complex64)\n assert_(isinstance(res, np.ndarray))\n\n def test_upper_lower_arg(self):\n # Explicit test of upper argument that also checks the default.\n a = np.array([[1 + 0j, 0 - 2j], [0 + 2j, 5 + 0j]])\n\n assert_equal(linalg.cholesky(a), linalg.cholesky(a, upper=False))\n\n assert_equal(\n linalg.cholesky(a, upper=True),\n linalg.cholesky(a).T.conj()\n )\n\n\nclass TestOuter:\n arr1 = np.arange(3)\n arr2 = np.arange(3)\n expected = np.array(\n [[0, 0, 0],\n [0, 1, 2],\n [0, 2, 4]]\n )\n\n assert_array_equal(np.linalg.outer(arr1, arr2), expected)\n\n with assert_raises_regex(\n ValueError, "Input arrays must be one-dimensional"\n ):\n np.linalg.outer(arr1[:, np.newaxis], arr2)\n\n\ndef test_byteorder_check():\n # Byte order check should pass for native order\n if sys.byteorder == 'little':\n native = '<'\n else:\n native = '>'\n\n for dtt in (np.float32, np.float64):\n arr = np.eye(4, dtype=dtt)\n n_arr = arr.view(arr.dtype.newbyteorder(native))\n sw_arr = arr.view(arr.dtype.newbyteorder("S")).byteswap()\n assert_equal(arr.dtype.byteorder, '=')\n for routine in (linalg.inv, linalg.det, linalg.pinv):\n # Normal call\n res = routine(arr)\n # Native but not '='\n assert_array_equal(res, routine(n_arr))\n # Swapped\n assert_array_equal(res, routine(sw_arr))\n\n\n@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")\ndef test_generalized_raise_multiloop():\n # It should raise an error even if the error doesn't occur in the\n # last iteration of the ufunc inner loop\n\n invertible = np.array([[1, 2], [3, 4]])\n non_invertible = np.array([[1, 1], [1, 1]])\n\n x = np.zeros([4, 4, 2, 2])[1::2]\n x[...] = invertible\n x[0, 0] = non_invertible\n\n assert_raises(np.linalg.LinAlgError, np.linalg.inv, x)\n\n\n@pytest.mark.skipif(\n threading.active_count() > 1,\n reason="skipping test that uses fork because there are multiple threads")\n@pytest.mark.skipif(\n NOGIL_BUILD,\n reason="Cannot safely use fork in tests on the free-threaded build")\ndef test_xerbla_override():\n # Check that our xerbla has been successfully linked in. If it is not,\n # the default xerbla routine is called, which prints a message to stdout\n # and may, or may not, abort the process depending on the LAPACK package.\n\n XERBLA_OK = 255\n\n try:\n pid = os.fork()\n except (OSError, AttributeError):\n # fork failed, or not running on POSIX\n pytest.skip("Not POSIX or fork failed.")\n\n if pid == 0:\n # child; close i/o file handles\n os.close(1)\n os.close(0)\n # Avoid producing core files.\n import resource\n resource.setrlimit(resource.RLIMIT_CORE, (0, 0))\n # These calls may abort.\n try:\n np.linalg.lapack_lite.xerbla()\n except ValueError:\n pass\n except Exception:\n os._exit(os.EX_CONFIG)\n\n try:\n a = np.array([[1.]])\n np.linalg.lapack_lite.dorgqr(\n 1, 1, 1, a,\n 0, # <- invalid value\n a, a, 0, 0)\n except ValueError as e:\n if "DORGQR parameter number 5" in str(e):\n # success, reuse error code to mark success as\n # FORTRAN STOP returns as success.\n os._exit(XERBLA_OK)\n\n # Did not abort, but our xerbla was not linked in.\n os._exit(os.EX_CONFIG)\n else:\n # parent\n pid, status = os.wait()\n if os.WEXITSTATUS(status) != XERBLA_OK:\n pytest.skip('Numpy xerbla not linked in.')\n\n\n@pytest.mark.skipif(IS_WASM, reason="Cannot start subprocess")\n@pytest.mark.slow\ndef test_sdot_bug_8577():\n # Regression test that loading certain other libraries does not\n # result to wrong results in float32 linear algebra.\n #\n # There's a bug gh-8577 on OSX that can trigger this, and perhaps\n # there are also other situations in which it occurs.\n #\n # Do the check in a separate process.\n\n bad_libs = ['PyQt5.QtWidgets', 'IPython']\n\n template = textwrap.dedent("""\n import sys\n {before}\n try:\n import {bad_lib}\n except ImportError:\n sys.exit(0)\n {after}\n x = np.ones(2, dtype=np.float32)\n sys.exit(0 if np.allclose(x.dot(x), 2.0) else 1)\n """)\n\n for bad_lib in bad_libs:\n code = template.format(before="import numpy as np", after="",\n bad_lib=bad_lib)\n subprocess.check_call([sys.executable, "-c", code])\n\n # Swapped import order\n code = template.format(after="import numpy as np", before="",\n bad_lib=bad_lib)\n subprocess.check_call([sys.executable, "-c", code])\n\n\nclass TestMultiDot:\n\n def test_basic_function_with_three_arguments(self):\n # multi_dot with three arguments uses a fast hand coded algorithm to\n # determine the optimal order. Therefore test it separately.\n A = np.random.random((6, 2))\n B = np.random.random((2, 6))\n C = np.random.random((6, 2))\n\n assert_almost_equal(multi_dot([A, B, C]), A.dot(B).dot(C))\n assert_almost_equal(multi_dot([A, B, C]), np.dot(A, np.dot(B, C)))\n\n def test_basic_function_with_two_arguments(self):\n # separate code path with two arguments\n A = np.random.random((6, 2))\n B = np.random.random((2, 6))\n\n assert_almost_equal(multi_dot([A, B]), A.dot(B))\n assert_almost_equal(multi_dot([A, B]), np.dot(A, B))\n\n def test_basic_function_with_dynamic_programming_optimization(self):\n # multi_dot with four or more arguments uses the dynamic programming\n # optimization and therefore deserve a separate\n A = np.random.random((6, 2))\n B = np.random.random((2, 6))\n C = np.random.random((6, 2))\n D = np.random.random((2, 1))\n assert_almost_equal(multi_dot([A, B, C, D]), A.dot(B).dot(C).dot(D))\n\n def test_vector_as_first_argument(self):\n # The first argument can be 1-D\n A1d = np.random.random(2) # 1-D\n B = np.random.random((2, 6))\n C = np.random.random((6, 2))\n D = np.random.random((2, 2))\n\n # the result should be 1-D\n assert_equal(multi_dot([A1d, B, C, D]).shape, (2,))\n\n def test_vector_as_last_argument(self):\n # The last argument can be 1-D\n A = np.random.random((6, 2))\n B = np.random.random((2, 6))\n C = np.random.random((6, 2))\n D1d = np.random.random(2) # 1-D\n\n # the result should be 1-D\n assert_equal(multi_dot([A, B, C, D1d]).shape, (6,))\n\n def test_vector_as_first_and_last_argument(self):\n # The first and last arguments can be 1-D\n A1d = np.random.random(2) # 1-D\n B = np.random.random((2, 6))\n C = np.random.random((6, 2))\n D1d = np.random.random(2) # 1-D\n\n # the result should be a scalar\n assert_equal(multi_dot([A1d, B, C, D1d]).shape, ())\n\n def test_three_arguments_and_out(self):\n # multi_dot with three arguments uses a fast hand coded algorithm to\n # determine the optimal order. Therefore test it separately.\n A = np.random.random((6, 2))\n B = np.random.random((2, 6))\n C = np.random.random((6, 2))\n\n out = np.zeros((6, 2))\n ret = multi_dot([A, B, C], out=out)\n assert out is ret\n assert_almost_equal(out, A.dot(B).dot(C))\n assert_almost_equal(out, np.dot(A, np.dot(B, C)))\n\n def test_two_arguments_and_out(self):\n # separate code path with two arguments\n A = np.random.random((6, 2))\n B = np.random.random((2, 6))\n out = np.zeros((6, 6))\n ret = multi_dot([A, B], out=out)\n assert out is ret\n assert_almost_equal(out, A.dot(B))\n assert_almost_equal(out, np.dot(A, B))\n\n def test_dynamic_programming_optimization_and_out(self):\n # multi_dot with four or more arguments uses the dynamic programming\n # optimization and therefore deserve a separate test\n A = np.random.random((6, 2))\n B = np.random.random((2, 6))\n C = np.random.random((6, 2))\n D = np.random.random((2, 1))\n out = np.zeros((6, 1))\n ret = multi_dot([A, B, C, D], out=out)\n assert out is ret\n assert_almost_equal(out, A.dot(B).dot(C).dot(D))\n\n def test_dynamic_programming_logic(self):\n # Test for the dynamic programming part\n # This test is directly taken from Cormen page 376.\n arrays = [np.random.random((30, 35)),\n np.random.random((35, 15)),\n np.random.random((15, 5)),\n np.random.random((5, 10)),\n np.random.random((10, 20)),\n np.random.random((20, 25))]\n m_expected = np.array([[0., 15750., 7875., 9375., 11875., 15125.],\n [0., 0., 2625., 4375., 7125., 10500.],\n [0., 0., 0., 750., 2500., 5375.],\n [0., 0., 0., 0., 1000., 3500.],\n [0., 0., 0., 0., 0., 5000.],\n [0., 0., 0., 0., 0., 0.]])\n s_expected = np.array([[0, 1, 1, 3, 3, 3],\n [0, 0, 2, 3, 3, 3],\n [0, 0, 0, 3, 3, 3],\n [0, 0, 0, 0, 4, 5],\n [0, 0, 0, 0, 0, 5],\n [0, 0, 0, 0, 0, 0]], dtype=int)\n s_expected -= 1 # Cormen uses 1-based index, python does not.\n\n s, m = _multi_dot_matrix_chain_order(arrays, return_costs=True)\n\n # Only the upper triangular part (without the diagonal) is interesting.\n assert_almost_equal(np.triu(s[:-1, 1:]),\n np.triu(s_expected[:-1, 1:]))\n assert_almost_equal(np.triu(m), np.triu(m_expected))\n\n def test_too_few_input_arrays(self):\n assert_raises(ValueError, multi_dot, [])\n assert_raises(ValueError, multi_dot, [np.random.random((3, 3))])\n\n\nclass TestTensorinv:\n\n @pytest.mark.parametrize("arr, ind", [\n (np.ones((4, 6, 8, 2)), 2),\n (np.ones((3, 3, 2)), 1),\n ])\n def test_non_square_handling(self, arr, ind):\n with assert_raises(LinAlgError):\n linalg.tensorinv(arr, ind=ind)\n\n @pytest.mark.parametrize("shape, ind", [\n # examples from docstring\n ((4, 6, 8, 3), 2),\n ((24, 8, 3), 1),\n ])\n def test_tensorinv_shape(self, shape, ind):\n a = np.eye(24)\n a.shape = shape\n ainv = linalg.tensorinv(a=a, ind=ind)\n expected = a.shape[ind:] + a.shape[:ind]\n actual = ainv.shape\n assert_equal(actual, expected)\n\n @pytest.mark.parametrize("ind", [\n 0, -2,\n ])\n def test_tensorinv_ind_limit(self, ind):\n a = np.eye(24)\n a.shape = (4, 6, 8, 3)\n with assert_raises(ValueError):\n linalg.tensorinv(a=a, ind=ind)\n\n def test_tensorinv_result(self):\n # mimic a docstring example\n a = np.eye(24)\n a.shape = (24, 8, 3)\n ainv = linalg.tensorinv(a, ind=1)\n b = np.ones(24)\n assert_allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))\n\n\nclass TestTensorsolve:\n\n @pytest.mark.parametrize("a, axes", [\n (np.ones((4, 6, 8, 2)), None),\n (np.ones((3, 3, 2)), (0, 2)),\n ])\n def test_non_square_handling(self, a, axes):\n with assert_raises(LinAlgError):\n b = np.ones(a.shape[:2])\n linalg.tensorsolve(a, b, axes=axes)\n\n @pytest.mark.parametrize("shape",\n [(2, 3, 6), (3, 4, 4, 3), (0, 3, 3, 0)],\n )\n def test_tensorsolve_result(self, shape):\n a = np.random.randn(*shape)\n b = np.ones(a.shape[:2])\n x = np.linalg.tensorsolve(a, b)\n assert_allclose(np.tensordot(a, x, axes=len(x.shape)), b)\n\n\ndef test_unsupported_commontype():\n # linalg gracefully handles unsupported type\n arr = np.array([[1, -2], [2, 5]], dtype='float16')\n with assert_raises_regex(TypeError, "unsupported in linalg"):\n linalg.cholesky(arr)\n\n\n#@pytest.mark.slow\n#@pytest.mark.xfail(not HAS_LAPACK64, run=False,\n# reason="Numpy not compiled with 64-bit BLAS/LAPACK")\n#@requires_memory(free_bytes=16e9)\n@pytest.mark.skip(reason="Bad memory reports lead to OOM in ci testing")\ndef test_blas64_dot():\n n = 2**32\n a = np.zeros([1, n], dtype=np.float32)\n b = np.ones([1, 1], dtype=np.float32)\n a[0, -1] = 1\n c = np.dot(b, a)\n assert_equal(c[0, -1], 1)\n\n\n@pytest.mark.xfail(not HAS_LAPACK64,\n reason="Numpy not compiled with 64-bit BLAS/LAPACK")\ndef test_blas64_geqrf_lwork_smoketest():\n # Smoke test LAPACK geqrf lwork call with 64-bit integers\n dtype = np.float64\n lapack_routine = np.linalg.lapack_lite.dgeqrf\n\n m = 2**32 + 1\n n = 2**32 + 1\n lda = m\n\n # Dummy arrays, not referenced by the lapack routine, so don't\n # need to be of the right size\n a = np.zeros([1, 1], dtype=dtype)\n work = np.zeros([1], dtype=dtype)\n tau = np.zeros([1], dtype=dtype)\n\n # Size query\n results = lapack_routine(m, n, a, lda, tau, work, -1, 0)\n assert_equal(results['info'], 0)\n assert_equal(results['m'], m)\n assert_equal(results['n'], m)\n\n # Should result to an integer of a reasonable size\n lwork = int(work.item())\n assert_(2**32 < lwork < 2**42)\n\n\ndef test_diagonal():\n # Here we only test if selected axes are compatible\n # with Array API (last two). Core implementation\n # of `diagonal` is tested in `test_multiarray.py`.\n x = np.arange(60).reshape((3, 4, 5))\n actual = np.linalg.diagonal(x)\n expected = np.array(\n [\n [0, 6, 12, 18],\n [20, 26, 32, 38],\n [40, 46, 52, 58],\n ]\n )\n assert_equal(actual, expected)\n\n\ndef test_trace():\n # Here we only test if selected axes are compatible\n # with Array API (last two). Core implementation\n # of `trace` is tested in `test_multiarray.py`.\n x = np.arange(60).reshape((3, 4, 5))\n actual = np.linalg.trace(x)\n expected = np.array([36, 116, 196])\n\n assert_equal(actual, expected)\n\n\ndef test_cross():\n x = np.arange(9).reshape((3, 3))\n actual = np.linalg.cross(x, x + 1)\n expected = np.array([\n [-1, 2, -1],\n [-1, 2, -1],\n [-1, 2, -1],\n ])\n\n assert_equal(actual, expected)\n\n # We test that lists are converted to arrays.\n u = [1, 2, 3]\n v = [4, 5, 6]\n actual = np.linalg.cross(u, v)\n expected = array([-3, 6, -3])\n\n assert_equal(actual, expected)\n\n with assert_raises_regex(\n ValueError,\n r"input arrays must be \(arrays of\) 3-dimensional vectors"\n ):\n x_2dim = x[:, 1:]\n np.linalg.cross(x_2dim, x_2dim)\n\n\ndef test_tensordot():\n # np.linalg.tensordot is just an alias for np.tensordot\n x = np.arange(6).reshape((2, 3))\n\n assert np.linalg.tensordot(x, x) == 55\n assert np.linalg.tensordot(x, x, axes=[(0, 1), (0, 1)]) == 55\n\n\ndef test_matmul():\n # np.linalg.matmul and np.matmul only differs in the number\n # of arguments in the signature\n x = np.arange(6).reshape((2, 3))\n actual = np.linalg.matmul(x, x.T)\n expected = np.array([[5, 14], [14, 50]])\n\n assert_equal(actual, expected)\n\n\ndef test_matrix_transpose():\n x = np.arange(6).reshape((2, 3))\n actual = np.linalg.matrix_transpose(x)\n expected = x.T\n\n assert_equal(actual, expected)\n\n with assert_raises_regex(\n ValueError, "array must be at least 2-dimensional"\n ):\n np.linalg.matrix_transpose(x[:, 0])\n\n\ndef test_matrix_norm():\n x = np.arange(9).reshape((3, 3))\n actual = np.linalg.matrix_norm(x)\n\n assert_almost_equal(actual, np.float64(14.2828), double_decimal=3)\n\n actual = np.linalg.matrix_norm(x, keepdims=True)\n\n assert_almost_equal(actual, np.array([[14.2828]]), double_decimal=3)\n\n\ndef test_matrix_norm_empty():\n for shape in [(0, 2), (2, 0), (0, 0)]:\n for dtype in [np.float64, np.float32, np.int32]:\n x = np.zeros(shape, dtype)\n assert_equal(np.linalg.matrix_norm(x, ord="fro"), 0)\n assert_equal(np.linalg.matrix_norm(x, ord="nuc"), 0)\n assert_equal(np.linalg.matrix_norm(x, ord=1), 0)\n assert_equal(np.linalg.matrix_norm(x, ord=2), 0)\n assert_equal(np.linalg.matrix_norm(x, ord=np.inf), 0)\n\ndef test_vector_norm():\n x = np.arange(9).reshape((3, 3))\n actual = np.linalg.vector_norm(x)\n\n assert_almost_equal(actual, np.float64(14.2828), double_decimal=3)\n\n actual = np.linalg.vector_norm(x, axis=0)\n\n assert_almost_equal(\n actual, np.array([6.7082, 8.124, 9.6436]), double_decimal=3\n )\n\n actual = np.linalg.vector_norm(x, keepdims=True)\n expected = np.full((1, 1), 14.2828, dtype='float64')\n assert_equal(actual.shape, expected.shape)\n assert_almost_equal(actual, expected, double_decimal=3)\n\n\ndef test_vector_norm_empty():\n for dtype in [np.float64, np.float32, np.int32]:\n x = np.zeros(0, dtype)\n assert_equal(np.linalg.vector_norm(x, ord=1), 0)\n assert_equal(np.linalg.vector_norm(x, ord=2), 0)\n assert_equal(np.linalg.vector_norm(x, ord=np.inf), 0)\n | .venv\Lib\site-packages\numpy\linalg\tests\test_linalg.py | test_linalg.py | Python | 86,742 | 0.75 | 0.13786 | 0.107715 | python-kit | 742 | 2024-11-14T17:01:37.108279 | BSD-3-Clause | true | bf22590d53398da048b4c3156bd760e6 |
""" Test functions for linalg module\n"""\n\nimport pytest\n\nimport numpy as np\nfrom numpy import arange, array, dot, float64, linalg, transpose\nfrom numpy.testing import (\n assert_,\n assert_array_almost_equal,\n assert_array_equal,\n assert_array_less,\n assert_equal,\n assert_raises,\n)\n\n\nclass TestRegression:\n\n def test_eig_build(self):\n # Ticket #652\n rva = array([1.03221168e+02 + 0.j,\n -1.91843603e+01 + 0.j,\n -6.04004526e-01 + 15.84422474j,\n -6.04004526e-01 - 15.84422474j,\n -1.13692929e+01 + 0.j,\n -6.57612485e-01 + 10.41755503j,\n -6.57612485e-01 - 10.41755503j,\n 1.82126812e+01 + 0.j,\n 1.06011014e+01 + 0.j,\n 7.80732773e+00 + 0.j,\n -7.65390898e-01 + 0.j,\n 1.51971555e-15 + 0.j,\n -1.51308713e-15 + 0.j])\n a = arange(13 * 13, dtype=float64)\n a.shape = (13, 13)\n a = a % 17\n va, ve = linalg.eig(a)\n va.sort()\n rva.sort()\n assert_array_almost_equal(va, rva)\n\n def test_eigh_build(self):\n # Ticket 662.\n rvals = [68.60568999, 89.57756725, 106.67185574]\n\n cov = array([[77.70273908, 3.51489954, 15.64602427],\n [ 3.51489954, 88.97013878, -1.07431931],\n [15.64602427, -1.07431931, 98.18223512]])\n\n vals, vecs = linalg.eigh(cov)\n assert_array_almost_equal(vals, rvals)\n\n def test_svd_build(self):\n # Ticket 627.\n a = array([[0., 1.], [1., 1.], [2., 1.], [3., 1.]])\n m, n = a.shape\n u, s, vh = linalg.svd(a)\n\n b = dot(transpose(u[:, n:]), a)\n\n assert_array_almost_equal(b, np.zeros((2, 2)))\n\n def test_norm_vector_badarg(self):\n # Regression for #786: Frobenius norm for vectors raises\n # ValueError.\n assert_raises(ValueError, linalg.norm, array([1., 2., 3.]), 'fro')\n\n def test_lapack_endian(self):\n # For bug #1482\n a = array([[ 5.7998084, -2.1825367],\n [-2.1825367, 9.85910595]], dtype='>f8')\n b = array(a, dtype='<f8')\n\n ap = linalg.cholesky(a)\n bp = linalg.cholesky(b)\n assert_array_equal(ap, bp)\n\n def test_large_svd_32bit(self):\n # See gh-4442, 64bit would require very large/slow matrices.\n x = np.eye(1000, 66)\n np.linalg.svd(x)\n\n def test_svd_no_uv(self):\n # gh-4733\n for shape in (3, 4), (4, 4), (4, 3):\n for t in float, complex:\n a = np.ones(shape, dtype=t)\n w = linalg.svd(a, compute_uv=False)\n c = np.count_nonzero(np.absolute(w) > 0.5)\n assert_equal(c, 1)\n assert_equal(np.linalg.matrix_rank(a), 1)\n assert_array_less(1, np.linalg.norm(a, ord=2))\n\n w_svdvals = linalg.svdvals(a)\n assert_array_almost_equal(w, w_svdvals)\n\n def test_norm_object_array(self):\n # gh-7575\n testvector = np.array([np.array([0, 1]), 0, 0], dtype=object)\n\n norm = linalg.norm(testvector)\n assert_array_equal(norm, [0, 1])\n assert_(norm.dtype == np.dtype('float64'))\n\n norm = linalg.norm(testvector, ord=1)\n assert_array_equal(norm, [0, 1])\n assert_(norm.dtype != np.dtype('float64'))\n\n norm = linalg.norm(testvector, ord=2)\n assert_array_equal(norm, [0, 1])\n assert_(norm.dtype == np.dtype('float64'))\n\n assert_raises(ValueError, linalg.norm, testvector, ord='fro')\n assert_raises(ValueError, linalg.norm, testvector, ord='nuc')\n assert_raises(ValueError, linalg.norm, testvector, ord=np.inf)\n assert_raises(ValueError, linalg.norm, testvector, ord=-np.inf)\n assert_raises(ValueError, linalg.norm, testvector, ord=0)\n assert_raises(ValueError, linalg.norm, testvector, ord=-1)\n assert_raises(ValueError, linalg.norm, testvector, ord=-2)\n\n testmatrix = np.array([[np.array([0, 1]), 0, 0],\n [0, 0, 0]], dtype=object)\n\n norm = linalg.norm(testmatrix)\n assert_array_equal(norm, [0, 1])\n assert_(norm.dtype == np.dtype('float64'))\n\n norm = linalg.norm(testmatrix, ord='fro')\n assert_array_equal(norm, [0, 1])\n assert_(norm.dtype == np.dtype('float64'))\n\n assert_raises(TypeError, linalg.norm, testmatrix, ord='nuc')\n assert_raises(ValueError, linalg.norm, testmatrix, ord=np.inf)\n assert_raises(ValueError, linalg.norm, testmatrix, ord=-np.inf)\n assert_raises(ValueError, linalg.norm, testmatrix, ord=0)\n assert_raises(ValueError, linalg.norm, testmatrix, ord=1)\n assert_raises(ValueError, linalg.norm, testmatrix, ord=-1)\n assert_raises(TypeError, linalg.norm, testmatrix, ord=2)\n assert_raises(TypeError, linalg.norm, testmatrix, ord=-2)\n assert_raises(ValueError, linalg.norm, testmatrix, ord=3)\n\n def test_lstsq_complex_larger_rhs(self):\n # gh-9891\n size = 20\n n_rhs = 70\n G = np.random.randn(size, size) + 1j * np.random.randn(size, size)\n u = np.random.randn(size, n_rhs) + 1j * np.random.randn(size, n_rhs)\n b = G.dot(u)\n # This should work without segmentation fault.\n u_lstsq, res, rank, sv = linalg.lstsq(G, b, rcond=None)\n # check results just in case\n assert_array_almost_equal(u_lstsq, u)\n\n @pytest.mark.parametrize("upper", [True, False])\n def test_cholesky_empty_array(self, upper):\n # gh-25840 - upper=True hung before.\n res = np.linalg.cholesky(np.zeros((0, 0)), upper=upper)\n assert res.size == 0\n\n @pytest.mark.parametrize("rtol", [0.0, [0.0] * 4, np.zeros((4,))])\n def test_matrix_rank_rtol_argument(self, rtol):\n # gh-25877\n x = np.zeros((4, 3, 2))\n res = np.linalg.matrix_rank(x, rtol=rtol)\n assert res.shape == (4,)\n\n def test_openblas_threading(self):\n # gh-27036\n # Test whether matrix multiplication involving a large matrix always\n # gives the same (correct) answer\n x = np.arange(500000, dtype=np.float64)\n src = np.vstack((x, -10 * x)).T\n matrix = np.array([[0, 1], [1, 0]])\n expected = np.vstack((-10 * x, x)).T # src @ matrix\n for i in range(200):\n result = src @ matrix\n mismatches = (~np.isclose(result, expected)).sum()\n if mismatches != 0:\n assert False, ("unexpected result from matmul, "\n "probably due to OpenBLAS threading issues")\n | .venv\Lib\site-packages\numpy\linalg\tests\test_regression.py | test_regression.py | Python | 6,885 | 0.95 | 0.110497 | 0.112583 | python-kit | 461 | 2025-05-22T05:41:03.634145 | MIT | true | e5032f829cee30f361559bdf0501fb62 |
\n\n | .venv\Lib\site-packages\numpy\linalg\tests\__pycache__\test_deprecations.cpython-313.pyc | test_deprecations.cpython-313.pyc | Other | 1,199 | 0.8 | 0 | 0 | vue-tools | 86 | 2025-04-28T13:03:30.572291 | GPL-3.0 | true | 8369cfb65d9f6b230e463c4b6e0dc0f5 |
\n\n | .venv\Lib\site-packages\numpy\linalg\tests\__pycache__\test_regression.cpython-313.pyc | test_regression.cpython-313.pyc | Other | 10,786 | 0.95 | 0.011494 | 0 | vue-tools | 744 | 2025-02-04T15:11:25.672141 | GPL-3.0 | true | 71c1f4a89ac797739d31ebab0b9d63a3 |
\n\n | .venv\Lib\site-packages\numpy\linalg\tests\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 193 | 0.7 | 0 | 0 | python-kit | 340 | 2024-08-05T20:58:10.894281 | Apache-2.0 | true | b403e743fffb1c53a44f939a0b4bea64 |
\n\n | .venv\Lib\site-packages\numpy\linalg\__pycache__\linalg.cpython-313.pyc | linalg.cpython-313.pyc | Other | 893 | 0.85 | 0 | 0 | node-utils | 846 | 2024-02-22T01:24:38.720034 | Apache-2.0 | false | ecf304c82fb81526cb2bee20504e13cf |
\n\n | .venv\Lib\site-packages\numpy\linalg\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 2,338 | 0.95 | 0 | 0 | python-kit | 453 | 2025-06-04T04:23:28.132771 | Apache-2.0 | false | e1f8bda2396b7eafd55d2bd123de024c |
.. -*- rest -*-\n\n==================================================\nAPI changes in the new masked array implementation\n==================================================\n\nMasked arrays are subclasses of ndarray\n---------------------------------------\n\nContrary to the original implementation, masked arrays are now regular\nndarrays::\n\n >>> x = masked_array([1,2,3],mask=[0,0,1])\n >>> print isinstance(x, numpy.ndarray)\n True\n\n\n``_data`` returns a view of the masked array\n--------------------------------------------\n\nMasked arrays are composed of a ``_data`` part and a ``_mask``. Accessing the\n``_data`` part will return a regular ndarray or any of its subclass, depending\non the initial data::\n\n >>> x = masked_array(numpy.matrix([[1,2],[3,4]]),mask=[[0,0],[0,1]])\n >>> print x._data\n [[1 2]\n [3 4]]\n >>> print type(x._data)\n <class 'numpy.matrixlib.defmatrix.matrix'>\n\n\nIn practice, ``_data`` is implemented as a property, not as an attribute.\nTherefore, you cannot access it directly, and some simple tests such as the\nfollowing one will fail::\n\n >>>x._data is x._data\n False\n\n\n``filled(x)`` can return a subclass of ndarray\n----------------------------------------------\nThe function ``filled(a)`` returns an array of the same type as ``a._data``::\n\n >>> x = masked_array(numpy.matrix([[1,2],[3,4]]),mask=[[0,0],[0,1]])\n >>> y = filled(x)\n >>> print type(y)\n <class 'numpy.matrixlib.defmatrix.matrix'>\n >>> print y\n matrix([[ 1, 2],\n [ 3, 999999]])\n\n\n``put``, ``putmask`` behave like their ndarray counterparts\n-----------------------------------------------------------\n\nPreviously, ``putmask`` was used like this::\n\n mask = [False,True,True]\n x = array([1,4,7],mask=mask)\n putmask(x,mask,[3])\n\nwhich translated to::\n\n x[~mask] = [3]\n\n(Note that a ``True``-value in a mask suppresses a value.)\n\nIn other words, the mask had the same length as ``x``, whereas\n``values`` had ``sum(~mask)`` elements.\n\nNow, the behaviour is similar to that of ``ndarray.putmask``, where\nthe mask and the values are both the same length as ``x``, i.e.\n\n::\n\n putmask(x,mask,[3,0,0])\n\n\n``fill_value`` is a property\n----------------------------\n\n``fill_value`` is no longer a method, but a property::\n\n >>> print x.fill_value\n 999999\n\n``cumsum`` and ``cumprod`` ignore missing values\n------------------------------------------------\n\nMissing values are assumed to be the identity element, i.e. 0 for\n``cumsum`` and 1 for ``cumprod``::\n\n >>> x = N.ma.array([1,2,3,4],mask=[False,True,False,False])\n >>> print x\n [1 -- 3 4]\n >>> print x.cumsum()\n [1 -- 4 8]\n >> print x.cumprod()\n [1 -- 3 12]\n\n``bool(x)`` raises a ValueError\n-------------------------------\n\nMasked arrays now behave like regular ``ndarrays``, in that they cannot be\nconverted to booleans:\n\n::\n\n >>> x = N.ma.array([1,2,3])\n >>> bool(x)\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()\n\n\n==================================\nNew features (non exhaustive list)\n==================================\n\n``mr_``\n-------\n\n``mr_`` mimics the behavior of ``r_`` for masked arrays::\n\n >>> np.ma.mr_[3,4,5]\n masked_array(data = [3 4 5],\n mask = False,\n fill_value=999999)\n\n\n``anom``\n--------\n\nThe ``anom`` method returns the deviations from the average (anomalies).\n | .venv\Lib\site-packages\numpy\ma\API_CHANGES.txt | API_CHANGES.txt | Other | 3,540 | 0.85 | 0.044444 | 0 | node-utils | 745 | 2024-06-14T01:16:17.546369 | Apache-2.0 | false | e275133c8f869be496724fba9156453f |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.