ZTWHHH commited on
Commit
db528b9
·
verified ·
1 Parent(s): 8b79ecd

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +10 -0
  2. parrot/lib/python3.10/site-packages/scipy/cluster/__init__.py +31 -0
  3. parrot/lib/python3.10/site-packages/scipy/cluster/__pycache__/__init__.cpython-310.pyc +0 -0
  4. parrot/lib/python3.10/site-packages/scipy/cluster/__pycache__/vq.cpython-310.pyc +0 -0
  5. parrot/lib/python3.10/site-packages/scipy/cluster/hierarchy.py +0 -0
  6. parrot/lib/python3.10/site-packages/scipy/cluster/tests/__init__.py +0 -0
  7. parrot/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  8. parrot/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/hierarchy_test_data.cpython-310.pyc +0 -0
  9. parrot/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_disjoint_set.cpython-310.pyc +0 -0
  10. parrot/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_hierarchy.cpython-310.pyc +0 -0
  11. parrot/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_vq.cpython-310.pyc +0 -0
  12. parrot/lib/python3.10/site-packages/scipy/cluster/tests/hierarchy_test_data.py +145 -0
  13. parrot/lib/python3.10/site-packages/scipy/cluster/tests/test_disjoint_set.py +202 -0
  14. parrot/lib/python3.10/site-packages/scipy/cluster/tests/test_hierarchy.py +1262 -0
  15. parrot/lib/python3.10/site-packages/scipy/cluster/tests/test_vq.py +435 -0
  16. parrot/lib/python3.10/site-packages/scipy/cluster/vq.py +835 -0
  17. parrot/lib/python3.10/site-packages/scipy/fft/__init__.py +114 -0
  18. parrot/lib/python3.10/site-packages/scipy/fft/_backend.py +196 -0
  19. parrot/lib/python3.10/site-packages/scipy/fft/_basic.py +1630 -0
  20. parrot/lib/python3.10/site-packages/scipy/fft/_basic_backend.py +180 -0
  21. parrot/lib/python3.10/site-packages/scipy/fft/_debug_backends.py +22 -0
  22. parrot/lib/python3.10/site-packages/scipy/fft/_fftlog.py +223 -0
  23. parrot/lib/python3.10/site-packages/scipy/fft/_fftlog_backend.py +199 -0
  24. parrot/lib/python3.10/site-packages/scipy/fft/_helper.py +379 -0
  25. parrot/lib/python3.10/site-packages/scipy/fft/_pocketfft/LICENSE.md +25 -0
  26. parrot/lib/python3.10/site-packages/scipy/fft/_realtransforms.py +693 -0
  27. parrot/lib/python3.10/site-packages/scipy/fft/_realtransforms_backend.py +63 -0
  28. parrot/lib/python3.10/site-packages/scipy/fft/tests/mock_backend.py +92 -0
  29. parrot/lib/python3.10/site-packages/scipy/fft/tests/test_backend.py +98 -0
  30. parrot/lib/python3.10/site-packages/scipy/fftpack/convolve.cpython-310-x86_64-linux-gnu.so +3 -0
  31. parrot/lib/python3.10/site-packages/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav +3 -0
  32. parrot/lib/python3.10/site-packages/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes.wav +3 -0
  33. parrot/lib/python3.10/site-packages/scipy/io/tests/data/test-48000Hz-2ch-64bit-float-le-wavex.wav +3 -0
  34. parrot/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit-inconsistent.wav +3 -0
  35. parrot/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-3ch-5S-36bit.wav +3 -0
  36. parrot/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-3ch-5S-45bit.wav +3 -0
  37. parrot/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-4ch-9S-12bit.wav +3 -0
  38. parrot/lib/python3.10/site-packages/scipy/spatial/__pycache__/__init__.cpython-310.pyc +0 -0
  39. parrot/lib/python3.10/site-packages/scipy/spatial/__pycache__/_geometric_slerp.cpython-310.pyc +0 -0
  40. parrot/lib/python3.10/site-packages/scipy/spatial/__pycache__/_plotutils.cpython-310.pyc +0 -0
  41. parrot/lib/python3.10/site-packages/scipy/spatial/__pycache__/_procrustes.cpython-310.pyc +0 -0
  42. parrot/lib/python3.10/site-packages/scipy/spatial/__pycache__/ckdtree.cpython-310.pyc +0 -0
  43. parrot/lib/python3.10/site-packages/scipy/spatial/__pycache__/distance.cpython-310.pyc +0 -0
  44. parrot/lib/python3.10/site-packages/scipy/spatial/__pycache__/kdtree.cpython-310.pyc +0 -0
  45. parrot/lib/python3.10/site-packages/scipy/spatial/__pycache__/qhull.cpython-310.pyc +0 -0
  46. parrot/lib/python3.10/site-packages/scipy/spatial/qhull_src/COPYING.txt +38 -0
  47. parrot/lib/python3.10/site-packages/scipy/spatial/tests/data/pdist-cosine-ml-iris.txt +0 -0
  48. parrot/lib/python3.10/site-packages/scipy/spatial/tests/data/pdist-minkowski-3.2-ml-iris.txt +0 -0
  49. parrot/lib/python3.10/site-packages/scipy/spatial/transform/__init__.py +29 -0
  50. parrot/lib/python3.10/site-packages/scipy/spatial/transform/__pycache__/__init__.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -1494,3 +1494,13 @@ vllm/lib/python3.10/site-packages/torchvision/image.so filter=lfs diff=lfs merge
1494
  vllm/lib/python3.10/site-packages/torchvision/_C.so filter=lfs diff=lfs merge=lfs -text
1495
  vllm/lib/python3.10/site-packages/tokenizers/tokenizers.abi3.so filter=lfs diff=lfs merge=lfs -text
1496
  vllm/lib/python3.10/site-packages/rpds/rpds.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
1494
  vllm/lib/python3.10/site-packages/torchvision/_C.so filter=lfs diff=lfs merge=lfs -text
1495
  vllm/lib/python3.10/site-packages/tokenizers/tokenizers.abi3.so filter=lfs diff=lfs merge=lfs -text
1496
  vllm/lib/python3.10/site-packages/rpds/rpds.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1497
+ vllm/lib/python3.10/site-packages/imageio/plugins/__pycache__/_tifffile.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1498
+ vllm/lib/python3.10/site-packages/av.libs/libavutil-6eb452c3.so.59.39.100 filter=lfs diff=lfs merge=lfs -text
1499
+ vllm/lib/python3.10/site-packages/av.libs/libxml2-c46e7314.so.2.9.13 filter=lfs diff=lfs merge=lfs -text
1500
+ vllm/lib/python3.10/site-packages/av.libs/libgssapi_krb5-497db0c6.so.2.2 filter=lfs diff=lfs merge=lfs -text
1501
+ vllm/lib/python3.10/site-packages/charset_normalizer/md__mypyc.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1502
+ vllm/lib/python3.10/site-packages/av.libs/libmp3lame-3ecc6556.so.0.0.0 filter=lfs diff=lfs merge=lfs -text
1503
+ vllm/lib/python3.10/site-packages/av.libs/libopus-21bd4123.so.0.10.1 filter=lfs diff=lfs merge=lfs -text
1504
+ parrot/lib/python3.10/site-packages/scipy/fftpack/convolve.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1505
+ parrot/lib/python3.10/site-packages/scipy/stats/_stats_pythran.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1506
+ parrot/lib/python3.10/site-packages/scipy/stats/tests/__pycache__/test_stats.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
parrot/lib/python3.10/site-packages/scipy/cluster/__init__.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ =========================================
3
+ Clustering package (:mod:`scipy.cluster`)
4
+ =========================================
5
+
6
+ .. currentmodule:: scipy.cluster
7
+
8
+ .. toctree::
9
+ :hidden:
10
+
11
+ cluster.vq
12
+ cluster.hierarchy
13
+
14
+ Clustering algorithms are useful in information theory, target detection,
15
+ communications, compression, and other areas. The `vq` module only
16
+ supports vector quantization and the k-means algorithms.
17
+
18
+ The `hierarchy` module provides functions for hierarchical and
19
+ agglomerative clustering. Its features include generating hierarchical
20
+ clusters from distance matrices,
21
+ calculating statistics on clusters, cutting linkages
22
+ to generate flat clusters, and visualizing clusters with dendrograms.
23
+
24
+ """
25
+ __all__ = ['vq', 'hierarchy']
26
+
27
+ from . import vq, hierarchy
28
+
29
+ from scipy._lib._testutils import PytestTester
30
+ test = PytestTester(__name__)
31
+ del PytestTester
parrot/lib/python3.10/site-packages/scipy/cluster/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.07 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/cluster/__pycache__/vq.cpython-310.pyc ADDED
Binary file (28.2 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/cluster/hierarchy.py ADDED
The diff for this file is too large to render. See raw diff
 
parrot/lib/python3.10/site-packages/scipy/cluster/tests/__init__.py ADDED
File without changes
parrot/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (171 Bytes). View file
 
parrot/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/hierarchy_test_data.cpython-310.pyc ADDED
Binary file (4.67 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_disjoint_set.cpython-310.pyc ADDED
Binary file (6.2 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_hierarchy.cpython-310.pyc ADDED
Binary file (41.8 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/cluster/tests/__pycache__/test_vq.cpython-310.pyc ADDED
Binary file (17.2 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/cluster/tests/hierarchy_test_data.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy import array
2
+
3
+
4
+ Q_X = array([[5.26563660e-01, 3.14160190e-01, 8.00656370e-02],
5
+ [7.50205180e-01, 4.60299830e-01, 8.98696460e-01],
6
+ [6.65461230e-01, 6.94011420e-01, 9.10465700e-01],
7
+ [9.64047590e-01, 1.43082200e-03, 7.39874220e-01],
8
+ [1.08159060e-01, 5.53028790e-01, 6.63804780e-02],
9
+ [9.31359130e-01, 8.25424910e-01, 9.52315440e-01],
10
+ [6.78086960e-01, 3.41903970e-01, 5.61481950e-01],
11
+ [9.82730940e-01, 7.04605210e-01, 8.70978630e-02],
12
+ [6.14691610e-01, 4.69989230e-02, 6.02406450e-01],
13
+ [5.80161260e-01, 9.17354970e-01, 5.88163850e-01],
14
+ [1.38246310e+00, 1.96358160e+00, 1.94437880e+00],
15
+ [2.10675860e+00, 1.67148730e+00, 1.34854480e+00],
16
+ [1.39880070e+00, 1.66142050e+00, 1.32224550e+00],
17
+ [1.71410460e+00, 1.49176380e+00, 1.45432170e+00],
18
+ [1.54102340e+00, 1.84374950e+00, 1.64658950e+00],
19
+ [2.08512480e+00, 1.84524350e+00, 2.17340850e+00],
20
+ [1.30748740e+00, 1.53801650e+00, 2.16007740e+00],
21
+ [1.41447700e+00, 1.99329070e+00, 1.99107420e+00],
22
+ [1.61943490e+00, 1.47703280e+00, 1.89788160e+00],
23
+ [1.59880600e+00, 1.54988980e+00, 1.57563350e+00],
24
+ [3.37247380e+00, 2.69635310e+00, 3.39981700e+00],
25
+ [3.13705120e+00, 3.36528090e+00, 3.06089070e+00],
26
+ [3.29413250e+00, 3.19619500e+00, 2.90700170e+00],
27
+ [2.65510510e+00, 3.06785900e+00, 2.97198540e+00],
28
+ [3.30941040e+00, 2.59283970e+00, 2.57714110e+00],
29
+ [2.59557220e+00, 3.33477370e+00, 3.08793190e+00],
30
+ [2.58206180e+00, 3.41615670e+00, 3.26441990e+00],
31
+ [2.71127000e+00, 2.77032450e+00, 2.63466500e+00],
32
+ [2.79617850e+00, 3.25473720e+00, 3.41801560e+00],
33
+ [2.64741750e+00, 2.54538040e+00, 3.25354110e+00]])
34
+
35
+ ytdist = array([662., 877., 255., 412., 996., 295., 468., 268., 400., 754.,
36
+ 564., 138., 219., 869., 669.])
37
+
38
+ linkage_ytdist_single = array([[2., 5., 138., 2.],
39
+ [3., 4., 219., 2.],
40
+ [0., 7., 255., 3.],
41
+ [1., 8., 268., 4.],
42
+ [6., 9., 295., 6.]])
43
+
44
+ linkage_ytdist_complete = array([[2., 5., 138., 2.],
45
+ [3., 4., 219., 2.],
46
+ [1., 6., 400., 3.],
47
+ [0., 7., 412., 3.],
48
+ [8., 9., 996., 6.]])
49
+
50
+ linkage_ytdist_average = array([[2., 5., 138., 2.],
51
+ [3., 4., 219., 2.],
52
+ [0., 7., 333.5, 3.],
53
+ [1., 6., 347.5, 3.],
54
+ [8., 9., 680.77777778, 6.]])
55
+
56
+ linkage_ytdist_weighted = array([[2., 5., 138., 2.],
57
+ [3., 4., 219., 2.],
58
+ [0., 7., 333.5, 3.],
59
+ [1., 6., 347.5, 3.],
60
+ [8., 9., 670.125, 6.]])
61
+
62
+ # the optimal leaf ordering of linkage_ytdist_single
63
+ linkage_ytdist_single_olo = array([[5., 2., 138., 2.],
64
+ [4., 3., 219., 2.],
65
+ [7., 0., 255., 3.],
66
+ [1., 8., 268., 4.],
67
+ [6., 9., 295., 6.]])
68
+
69
+ X = array([[1.43054825, -7.5693489],
70
+ [6.95887839, 6.82293382],
71
+ [2.87137846, -9.68248579],
72
+ [7.87974764, -6.05485803],
73
+ [8.24018364, -6.09495602],
74
+ [7.39020262, 8.54004355]])
75
+
76
+ linkage_X_centroid = array([[3., 4., 0.36265956, 2.],
77
+ [1., 5., 1.77045373, 2.],
78
+ [0., 2., 2.55760419, 2.],
79
+ [6., 8., 6.43614494, 4.],
80
+ [7., 9., 15.17363237, 6.]])
81
+
82
+ linkage_X_median = array([[3., 4., 0.36265956, 2.],
83
+ [1., 5., 1.77045373, 2.],
84
+ [0., 2., 2.55760419, 2.],
85
+ [6., 8., 6.43614494, 4.],
86
+ [7., 9., 15.17363237, 6.]])
87
+
88
+ linkage_X_ward = array([[3., 4., 0.36265956, 2.],
89
+ [1., 5., 1.77045373, 2.],
90
+ [0., 2., 2.55760419, 2.],
91
+ [6., 8., 9.10208346, 4.],
92
+ [7., 9., 24.7784379, 6.]])
93
+
94
+ # the optimal leaf ordering of linkage_X_ward
95
+ linkage_X_ward_olo = array([[4., 3., 0.36265956, 2.],
96
+ [5., 1., 1.77045373, 2.],
97
+ [2., 0., 2.55760419, 2.],
98
+ [6., 8., 9.10208346, 4.],
99
+ [7., 9., 24.7784379, 6.]])
100
+
101
+ inconsistent_ytdist = {
102
+ 1: array([[138., 0., 1., 0.],
103
+ [219., 0., 1., 0.],
104
+ [255., 0., 1., 0.],
105
+ [268., 0., 1., 0.],
106
+ [295., 0., 1., 0.]]),
107
+ 2: array([[138., 0., 1., 0.],
108
+ [219., 0., 1., 0.],
109
+ [237., 25.45584412, 2., 0.70710678],
110
+ [261.5, 9.19238816, 2., 0.70710678],
111
+ [233.66666667, 83.9424406, 3., 0.7306594]]),
112
+ 3: array([[138., 0., 1., 0.],
113
+ [219., 0., 1., 0.],
114
+ [237., 25.45584412, 2., 0.70710678],
115
+ [247.33333333, 25.38372182, 3., 0.81417007],
116
+ [239., 69.36377537, 4., 0.80733783]]),
117
+ 4: array([[138., 0., 1., 0.],
118
+ [219., 0., 1., 0.],
119
+ [237., 25.45584412, 2., 0.70710678],
120
+ [247.33333333, 25.38372182, 3., 0.81417007],
121
+ [235., 60.73302232, 5., 0.98793042]])}
122
+
123
+ fcluster_inconsistent = {
124
+ 0.8: array([6, 2, 2, 4, 6, 2, 3, 7, 3, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1,
125
+ 1, 1, 1, 1, 1, 1, 1, 1, 1]),
126
+ 1.0: array([6, 2, 2, 4, 6, 2, 3, 7, 3, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 1,
127
+ 1, 1, 1, 1, 1, 1, 1, 1, 1]),
128
+ 2.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
129
+ 1, 1, 1, 1, 1, 1, 1, 1, 1])}
130
+
131
+ fcluster_distance = {
132
+ 0.6: array([4, 4, 4, 4, 4, 4, 4, 5, 4, 4, 6, 6, 6, 6, 6, 7, 6, 6, 6, 6, 3,
133
+ 1, 1, 1, 2, 1, 1, 1, 1, 1]),
134
+ 1.0: array([2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 1,
135
+ 1, 1, 1, 1, 1, 1, 1, 1, 1]),
136
+ 2.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
137
+ 1, 1, 1, 1, 1, 1, 1, 1, 1])}
138
+
139
+ fcluster_maxclust = {
140
+ 8.0: array([5, 5, 5, 5, 5, 5, 5, 6, 5, 5, 7, 7, 7, 7, 7, 8, 7, 7, 7, 7, 4,
141
+ 1, 1, 1, 3, 1, 1, 1, 1, 2]),
142
+ 4.0: array([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 2,
143
+ 1, 1, 1, 1, 1, 1, 1, 1, 1]),
144
+ 1.0: array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
145
+ 1, 1, 1, 1, 1, 1, 1, 1, 1])}
parrot/lib/python3.10/site-packages/scipy/cluster/tests/test_disjoint_set.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+ from pytest import raises as assert_raises
3
+ import numpy as np
4
+ from scipy.cluster.hierarchy import DisjointSet
5
+ import string
6
+
7
+
8
+ def generate_random_token():
9
+ k = len(string.ascii_letters)
10
+ tokens = list(np.arange(k, dtype=int))
11
+ tokens += list(np.arange(k, dtype=float))
12
+ tokens += list(string.ascii_letters)
13
+ tokens += [None for i in range(k)]
14
+ tokens = np.array(tokens, dtype=object)
15
+ rng = np.random.RandomState(seed=0)
16
+
17
+ while 1:
18
+ size = rng.randint(1, 3)
19
+ element = rng.choice(tokens, size)
20
+ if size == 1:
21
+ yield element[0]
22
+ else:
23
+ yield tuple(element)
24
+
25
+
26
+ def get_elements(n):
27
+ # dict is deterministic without difficulty of comparing numpy ints
28
+ elements = {}
29
+ for element in generate_random_token():
30
+ if element not in elements:
31
+ elements[element] = len(elements)
32
+ if len(elements) >= n:
33
+ break
34
+ return list(elements.keys())
35
+
36
+
37
+ def test_init():
38
+ n = 10
39
+ elements = get_elements(n)
40
+ dis = DisjointSet(elements)
41
+ assert dis.n_subsets == n
42
+ assert list(dis) == elements
43
+
44
+
45
+ def test_len():
46
+ n = 10
47
+ elements = get_elements(n)
48
+ dis = DisjointSet(elements)
49
+ assert len(dis) == n
50
+
51
+ dis.add("dummy")
52
+ assert len(dis) == n + 1
53
+
54
+
55
+ @pytest.mark.parametrize("n", [10, 100])
56
+ def test_contains(n):
57
+ elements = get_elements(n)
58
+ dis = DisjointSet(elements)
59
+ for x in elements:
60
+ assert x in dis
61
+
62
+ assert "dummy" not in dis
63
+
64
+
65
+ @pytest.mark.parametrize("n", [10, 100])
66
+ def test_add(n):
67
+ elements = get_elements(n)
68
+ dis1 = DisjointSet(elements)
69
+
70
+ dis2 = DisjointSet()
71
+ for i, x in enumerate(elements):
72
+ dis2.add(x)
73
+ assert len(dis2) == i + 1
74
+
75
+ # test idempotency by adding element again
76
+ dis2.add(x)
77
+ assert len(dis2) == i + 1
78
+
79
+ assert list(dis1) == list(dis2)
80
+
81
+
82
+ def test_element_not_present():
83
+ elements = get_elements(n=10)
84
+ dis = DisjointSet(elements)
85
+
86
+ with assert_raises(KeyError):
87
+ dis["dummy"]
88
+
89
+ with assert_raises(KeyError):
90
+ dis.merge(elements[0], "dummy")
91
+
92
+ with assert_raises(KeyError):
93
+ dis.connected(elements[0], "dummy")
94
+
95
+
96
+ @pytest.mark.parametrize("direction", ["forwards", "backwards"])
97
+ @pytest.mark.parametrize("n", [10, 100])
98
+ def test_linear_union_sequence(n, direction):
99
+ elements = get_elements(n)
100
+ dis = DisjointSet(elements)
101
+ assert elements == list(dis)
102
+
103
+ indices = list(range(n - 1))
104
+ if direction == "backwards":
105
+ indices = indices[::-1]
106
+
107
+ for it, i in enumerate(indices):
108
+ assert not dis.connected(elements[i], elements[i + 1])
109
+ assert dis.merge(elements[i], elements[i + 1])
110
+ assert dis.connected(elements[i], elements[i + 1])
111
+ assert dis.n_subsets == n - 1 - it
112
+
113
+ roots = [dis[i] for i in elements]
114
+ if direction == "forwards":
115
+ assert all(elements[0] == r for r in roots)
116
+ else:
117
+ assert all(elements[-2] == r for r in roots)
118
+ assert not dis.merge(elements[0], elements[-1])
119
+
120
+
121
+ @pytest.mark.parametrize("n", [10, 100])
122
+ def test_self_unions(n):
123
+ elements = get_elements(n)
124
+ dis = DisjointSet(elements)
125
+
126
+ for x in elements:
127
+ assert dis.connected(x, x)
128
+ assert not dis.merge(x, x)
129
+ assert dis.connected(x, x)
130
+ assert dis.n_subsets == len(elements)
131
+
132
+ assert elements == list(dis)
133
+ roots = [dis[x] for x in elements]
134
+ assert elements == roots
135
+
136
+
137
+ @pytest.mark.parametrize("order", ["ab", "ba"])
138
+ @pytest.mark.parametrize("n", [10, 100])
139
+ def test_equal_size_ordering(n, order):
140
+ elements = get_elements(n)
141
+ dis = DisjointSet(elements)
142
+
143
+ rng = np.random.RandomState(seed=0)
144
+ indices = np.arange(n)
145
+ rng.shuffle(indices)
146
+
147
+ for i in range(0, len(indices), 2):
148
+ a, b = elements[indices[i]], elements[indices[i + 1]]
149
+ if order == "ab":
150
+ assert dis.merge(a, b)
151
+ else:
152
+ assert dis.merge(b, a)
153
+
154
+ expected = elements[min(indices[i], indices[i + 1])]
155
+ assert dis[a] == expected
156
+ assert dis[b] == expected
157
+
158
+
159
+ @pytest.mark.parametrize("kmax", [5, 10])
160
+ def test_binary_tree(kmax):
161
+ n = 2**kmax
162
+ elements = get_elements(n)
163
+ dis = DisjointSet(elements)
164
+ rng = np.random.RandomState(seed=0)
165
+
166
+ for k in 2**np.arange(kmax):
167
+ for i in range(0, n, 2 * k):
168
+ r1, r2 = rng.randint(0, k, size=2)
169
+ a, b = elements[i + r1], elements[i + k + r2]
170
+ assert not dis.connected(a, b)
171
+ assert dis.merge(a, b)
172
+ assert dis.connected(a, b)
173
+
174
+ assert elements == list(dis)
175
+ roots = [dis[i] for i in elements]
176
+ expected_indices = np.arange(n) - np.arange(n) % (2 * k)
177
+ expected = [elements[i] for i in expected_indices]
178
+ assert roots == expected
179
+
180
+
181
+ @pytest.mark.parametrize("n", [10, 100])
182
+ def test_subsets(n):
183
+ elements = get_elements(n)
184
+ dis = DisjointSet(elements)
185
+
186
+ rng = np.random.RandomState(seed=0)
187
+ for i, j in rng.randint(0, n, (n, 2)):
188
+ x = elements[i]
189
+ y = elements[j]
190
+
191
+ expected = {element for element in dis if {dis[element]} == {dis[x]}}
192
+ assert dis.subset_size(x) == len(dis.subset(x))
193
+ assert expected == dis.subset(x)
194
+
195
+ expected = {dis[element]: set() for element in dis}
196
+ for element in dis:
197
+ expected[dis[element]].add(element)
198
+ expected = list(expected.values())
199
+ assert expected == dis.subsets()
200
+
201
+ dis.merge(x, y)
202
+ assert dis.subset(x) == dis.subset(y)
parrot/lib/python3.10/site-packages/scipy/cluster/tests/test_hierarchy.py ADDED
@@ -0,0 +1,1262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Author: Damian Eads
3
+ # Date: April 17, 2008
4
+ #
5
+ # Copyright (C) 2008 Damian Eads
6
+ #
7
+ # Redistribution and use in source and binary forms, with or without
8
+ # modification, are permitted provided that the following conditions
9
+ # are met:
10
+ #
11
+ # 1. Redistributions of source code must retain the above copyright
12
+ # notice, this list of conditions and the following disclaimer.
13
+ #
14
+ # 2. Redistributions in binary form must reproduce the above
15
+ # copyright notice, this list of conditions and the following
16
+ # disclaimer in the documentation and/or other materials provided
17
+ # with the distribution.
18
+ #
19
+ # 3. The name of the author may not be used to endorse or promote
20
+ # products derived from this software without specific prior
21
+ # written permission.
22
+ #
23
+ # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
24
+ # OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25
+ # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26
+ # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
27
+ # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28
+ # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
29
+ # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
30
+ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31
+ # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32
+ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33
+ # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34
+ import numpy as np
35
+ from numpy.testing import assert_allclose, assert_equal, assert_, assert_warns
36
+ import pytest
37
+ from pytest import raises as assert_raises
38
+
39
+ import scipy.cluster.hierarchy
40
+ from scipy.cluster.hierarchy import (
41
+ ClusterWarning, linkage, from_mlab_linkage, to_mlab_linkage,
42
+ num_obs_linkage, inconsistent, cophenet, fclusterdata, fcluster,
43
+ is_isomorphic, single, leaders,
44
+ correspond, is_monotonic, maxdists, maxinconsts, maxRstat,
45
+ is_valid_linkage, is_valid_im, to_tree, leaves_list, dendrogram,
46
+ set_link_color_palette, cut_tree, optimal_leaf_ordering,
47
+ _order_cluster_tree, _hierarchy, _LINKAGE_METHODS)
48
+ from scipy.spatial.distance import pdist
49
+ from scipy.cluster._hierarchy import Heap
50
+ from scipy.conftest import array_api_compatible
51
+ from scipy._lib._array_api import xp_assert_close, xp_assert_equal
52
+
53
+ from . import hierarchy_test_data
54
+
55
+
56
+ # Matplotlib is not a scipy dependency but is optionally used in dendrogram, so
57
+ # check if it's available
58
+ try:
59
+ import matplotlib
60
+ # and set the backend to be Agg (no gui)
61
+ matplotlib.use('Agg')
62
+ # before importing pyplot
63
+ import matplotlib.pyplot as plt
64
+ have_matplotlib = True
65
+ except Exception:
66
+ have_matplotlib = False
67
+
68
+
69
+ pytestmark = [array_api_compatible, pytest.mark.usefixtures("skip_xp_backends")]
70
+ skip_xp_backends = pytest.mark.skip_xp_backends
71
+
72
+
73
+ class TestLinkage:
74
+
75
+ @skip_xp_backends(cpu_only=True)
76
+ def test_linkage_non_finite_elements_in_distance_matrix(self, xp):
77
+ # Tests linkage(Y) where Y contains a non-finite element (e.g. NaN or Inf).
78
+ # Exception expected.
79
+ y = xp.asarray([xp.nan] + [0.0]*5)
80
+ assert_raises(ValueError, linkage, y)
81
+
82
+ @skip_xp_backends(cpu_only=True)
83
+ def test_linkage_empty_distance_matrix(self, xp):
84
+ # Tests linkage(Y) where Y is a 0x4 linkage matrix. Exception expected.
85
+ y = xp.zeros((0,))
86
+ assert_raises(ValueError, linkage, y)
87
+
88
+ @skip_xp_backends(cpu_only=True)
89
+ def test_linkage_tdist(self, xp):
90
+ for method in ['single', 'complete', 'average', 'weighted']:
91
+ self.check_linkage_tdist(method, xp)
92
+
93
+ def check_linkage_tdist(self, method, xp):
94
+ # Tests linkage(Y, method) on the tdist data set.
95
+ Z = linkage(xp.asarray(hierarchy_test_data.ytdist), method)
96
+ expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_' + method)
97
+ xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-10)
98
+
99
+ @skip_xp_backends(cpu_only=True)
100
+ def test_linkage_X(self, xp):
101
+ for method in ['centroid', 'median', 'ward']:
102
+ self.check_linkage_q(method, xp)
103
+
104
+ def check_linkage_q(self, method, xp):
105
+ # Tests linkage(Y, method) on the Q data set.
106
+ Z = linkage(xp.asarray(hierarchy_test_data.X), method)
107
+ expectedZ = getattr(hierarchy_test_data, 'linkage_X_' + method)
108
+ xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-06)
109
+
110
+ y = scipy.spatial.distance.pdist(hierarchy_test_data.X,
111
+ metric="euclidean")
112
+ Z = linkage(xp.asarray(y), method)
113
+ xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-06)
114
+
115
+ @skip_xp_backends(cpu_only=True)
116
+ def test_compare_with_trivial(self, xp):
117
+ rng = np.random.RandomState(0)
118
+ n = 20
119
+ X = rng.rand(n, 2)
120
+ d = pdist(X)
121
+
122
+ for method, code in _LINKAGE_METHODS.items():
123
+ Z_trivial = _hierarchy.linkage(d, n, code)
124
+ Z = linkage(xp.asarray(d), method)
125
+ xp_assert_close(Z, xp.asarray(Z_trivial), rtol=1e-14, atol=1e-15)
126
+
127
+ @skip_xp_backends(cpu_only=True)
128
+ def test_optimal_leaf_ordering(self, xp):
129
+ Z = linkage(xp.asarray(hierarchy_test_data.ytdist), optimal_ordering=True)
130
+ expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_single_olo')
131
+ xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-10)
132
+
133
+
134
+ @skip_xp_backends(cpu_only=True)
135
+ class TestLinkageTies:
136
+
137
+ _expectations = {
138
+ 'single': np.array([[0, 1, 1.41421356, 2],
139
+ [2, 3, 1.41421356, 3]]),
140
+ 'complete': np.array([[0, 1, 1.41421356, 2],
141
+ [2, 3, 2.82842712, 3]]),
142
+ 'average': np.array([[0, 1, 1.41421356, 2],
143
+ [2, 3, 2.12132034, 3]]),
144
+ 'weighted': np.array([[0, 1, 1.41421356, 2],
145
+ [2, 3, 2.12132034, 3]]),
146
+ 'centroid': np.array([[0, 1, 1.41421356, 2],
147
+ [2, 3, 2.12132034, 3]]),
148
+ 'median': np.array([[0, 1, 1.41421356, 2],
149
+ [2, 3, 2.12132034, 3]]),
150
+ 'ward': np.array([[0, 1, 1.41421356, 2],
151
+ [2, 3, 2.44948974, 3]]),
152
+ }
153
+
154
+ def test_linkage_ties(self, xp):
155
+ for method in ['single', 'complete', 'average', 'weighted',
156
+ 'centroid', 'median', 'ward']:
157
+ self.check_linkage_ties(method, xp)
158
+
159
+ def check_linkage_ties(self, method, xp):
160
+ X = xp.asarray([[-1, -1], [0, 0], [1, 1]])
161
+ Z = linkage(X, method=method)
162
+ expectedZ = self._expectations[method]
163
+ xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-06)
164
+
165
+
166
+ @skip_xp_backends(cpu_only=True)
167
+ class TestInconsistent:
168
+
169
+ def test_inconsistent_tdist(self, xp):
170
+ for depth in hierarchy_test_data.inconsistent_ytdist:
171
+ self.check_inconsistent_tdist(depth, xp)
172
+
173
+ def check_inconsistent_tdist(self, depth, xp):
174
+ Z = xp.asarray(hierarchy_test_data.linkage_ytdist_single)
175
+ xp_assert_close(inconsistent(Z, depth),
176
+ xp.asarray(hierarchy_test_data.inconsistent_ytdist[depth]))
177
+
178
+
179
+ @skip_xp_backends(cpu_only=True)
180
+ class TestCopheneticDistance:
181
+
182
+ def test_linkage_cophenet_tdist_Z(self, xp):
183
+ # Tests cophenet(Z) on tdist data set.
184
+ expectedM = xp.asarray([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
185
+ 295, 138, 219, 295, 295])
186
+ Z = xp.asarray(hierarchy_test_data.linkage_ytdist_single)
187
+ M = cophenet(Z)
188
+ xp_assert_close(M, xp.asarray(expectedM, dtype=xp.float64), atol=1e-10)
189
+
190
+ def test_linkage_cophenet_tdist_Z_Y(self, xp):
191
+ # Tests cophenet(Z, Y) on tdist data set.
192
+ Z = xp.asarray(hierarchy_test_data.linkage_ytdist_single)
193
+ (c, M) = cophenet(Z, xp.asarray(hierarchy_test_data.ytdist))
194
+ expectedM = xp.asarray([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
195
+ 295, 138, 219, 295, 295], dtype=xp.float64)
196
+ expectedc = xp.asarray(0.639931296433393415057366837573, dtype=xp.float64)[()]
197
+ xp_assert_close(c, expectedc, atol=1e-10)
198
+ xp_assert_close(M, expectedM, atol=1e-10)
199
+
200
+
201
+ class TestMLabLinkageConversion:
202
+
203
+ def test_mlab_linkage_conversion_empty(self, xp):
204
+ # Tests from/to_mlab_linkage on empty linkage array.
205
+ X = xp.asarray([], dtype=xp.float64)
206
+ xp_assert_equal(from_mlab_linkage(X), X)
207
+ xp_assert_equal(to_mlab_linkage(X), X)
208
+
209
+ @skip_xp_backends(cpu_only=True)
210
+ def test_mlab_linkage_conversion_single_row(self, xp):
211
+ # Tests from/to_mlab_linkage on linkage array with single row.
212
+ Z = xp.asarray([[0., 1., 3., 2.]])
213
+ Zm = xp.asarray([[1, 2, 3]])
214
+ xp_assert_close(from_mlab_linkage(Zm), xp.asarray(Z, dtype=xp.float64),
215
+ rtol=1e-15)
216
+ xp_assert_close(to_mlab_linkage(Z), xp.asarray(Zm, dtype=xp.float64),
217
+ rtol=1e-15)
218
+
219
+ @skip_xp_backends(cpu_only=True)
220
+ def test_mlab_linkage_conversion_multiple_rows(self, xp):
221
+ # Tests from/to_mlab_linkage on linkage array with multiple rows.
222
+ Zm = xp.asarray([[3, 6, 138], [4, 5, 219],
223
+ [1, 8, 255], [2, 9, 268], [7, 10, 295]])
224
+ Z = xp.asarray([[2., 5., 138., 2.],
225
+ [3., 4., 219., 2.],
226
+ [0., 7., 255., 3.],
227
+ [1., 8., 268., 4.],
228
+ [6., 9., 295., 6.]],
229
+ dtype=xp.float64)
230
+ xp_assert_close(from_mlab_linkage(Zm), Z, rtol=1e-15)
231
+ xp_assert_close(to_mlab_linkage(Z), xp.asarray(Zm, dtype=xp.float64),
232
+ rtol=1e-15)
233
+
234
+
235
+ @skip_xp_backends(cpu_only=True)
236
+ class TestFcluster:
237
+
238
+ def test_fclusterdata(self, xp):
239
+ for t in hierarchy_test_data.fcluster_inconsistent:
240
+ self.check_fclusterdata(t, 'inconsistent', xp)
241
+ for t in hierarchy_test_data.fcluster_distance:
242
+ self.check_fclusterdata(t, 'distance', xp)
243
+ for t in hierarchy_test_data.fcluster_maxclust:
244
+ self.check_fclusterdata(t, 'maxclust', xp)
245
+
246
+ def check_fclusterdata(self, t, criterion, xp):
247
+ # Tests fclusterdata(X, criterion=criterion, t=t) on a random 3-cluster data set
248
+ expectedT = xp.asarray(getattr(hierarchy_test_data, 'fcluster_' + criterion)[t])
249
+ X = xp.asarray(hierarchy_test_data.Q_X)
250
+ T = fclusterdata(X, criterion=criterion, t=t)
251
+ assert_(is_isomorphic(T, expectedT))
252
+
253
+ def test_fcluster(self, xp):
254
+ for t in hierarchy_test_data.fcluster_inconsistent:
255
+ self.check_fcluster(t, 'inconsistent', xp)
256
+ for t in hierarchy_test_data.fcluster_distance:
257
+ self.check_fcluster(t, 'distance', xp)
258
+ for t in hierarchy_test_data.fcluster_maxclust:
259
+ self.check_fcluster(t, 'maxclust', xp)
260
+
261
+ def check_fcluster(self, t, criterion, xp):
262
+ # Tests fcluster(Z, criterion=criterion, t=t) on a random 3-cluster data set.
263
+ expectedT = xp.asarray(getattr(hierarchy_test_data, 'fcluster_' + criterion)[t])
264
+ Z = single(xp.asarray(hierarchy_test_data.Q_X))
265
+ T = fcluster(Z, criterion=criterion, t=t)
266
+ assert_(is_isomorphic(T, expectedT))
267
+
268
+ def test_fcluster_monocrit(self, xp):
269
+ for t in hierarchy_test_data.fcluster_distance:
270
+ self.check_fcluster_monocrit(t, xp)
271
+ for t in hierarchy_test_data.fcluster_maxclust:
272
+ self.check_fcluster_maxclust_monocrit(t, xp)
273
+
274
+ def check_fcluster_monocrit(self, t, xp):
275
+ expectedT = xp.asarray(hierarchy_test_data.fcluster_distance[t])
276
+ Z = single(xp.asarray(hierarchy_test_data.Q_X))
277
+ T = fcluster(Z, t, criterion='monocrit', monocrit=maxdists(Z))
278
+ assert_(is_isomorphic(T, expectedT))
279
+
280
+ def check_fcluster_maxclust_monocrit(self, t, xp):
281
+ expectedT = xp.asarray(hierarchy_test_data.fcluster_maxclust[t])
282
+ Z = single(xp.asarray(hierarchy_test_data.Q_X))
283
+ T = fcluster(Z, t, criterion='maxclust_monocrit', monocrit=maxdists(Z))
284
+ assert_(is_isomorphic(T, expectedT))
285
+
286
+
287
+ @skip_xp_backends(cpu_only=True)
288
+ class TestLeaders:
289
+
290
+ def test_leaders_single(self, xp):
291
+ # Tests leaders using a flat clustering generated by single linkage.
292
+ X = hierarchy_test_data.Q_X
293
+ Y = pdist(X)
294
+ Y = xp.asarray(Y)
295
+ Z = linkage(Y)
296
+ T = fcluster(Z, criterion='maxclust', t=3)
297
+ Lright = (xp.asarray([53, 55, 56]), xp.asarray([2, 3, 1]))
298
+ T = xp.asarray(T, dtype=xp.int32)
299
+ L = leaders(Z, T)
300
+ assert_allclose(np.concatenate(L), np.concatenate(Lright), rtol=1e-15)
301
+
302
+
303
+ @skip_xp_backends(np_only=True,
304
+ reasons=['`is_isomorphic` only supports NumPy backend'])
305
+ class TestIsIsomorphic:
306
+
307
+ @skip_xp_backends(np_only=True,
308
+ reasons=['array-likes only supported for NumPy backend'])
309
+ def test_array_like(self, xp):
310
+ assert is_isomorphic([1, 1, 1], [2, 2, 2])
311
+ assert is_isomorphic([], [])
312
+
313
+ def test_is_isomorphic_1(self, xp):
314
+ # Tests is_isomorphic on test case #1 (one flat cluster, different labellings)
315
+ a = xp.asarray([1, 1, 1])
316
+ b = xp.asarray([2, 2, 2])
317
+ assert is_isomorphic(a, b)
318
+ assert is_isomorphic(b, a)
319
+
320
+ def test_is_isomorphic_2(self, xp):
321
+ # Tests is_isomorphic on test case #2 (two flat clusters, different labelings)
322
+ a = xp.asarray([1, 7, 1])
323
+ b = xp.asarray([2, 3, 2])
324
+ assert is_isomorphic(a, b)
325
+ assert is_isomorphic(b, a)
326
+
327
+ def test_is_isomorphic_3(self, xp):
328
+ # Tests is_isomorphic on test case #3 (no flat clusters)
329
+ a = xp.asarray([])
330
+ b = xp.asarray([])
331
+ assert is_isomorphic(a, b)
332
+
333
+ def test_is_isomorphic_4A(self, xp):
334
+ # Tests is_isomorphic on test case #4A
335
+ # (3 flat clusters, different labelings, isomorphic)
336
+ a = xp.asarray([1, 2, 3])
337
+ b = xp.asarray([1, 3, 2])
338
+ assert is_isomorphic(a, b)
339
+ assert is_isomorphic(b, a)
340
+
341
+ def test_is_isomorphic_4B(self, xp):
342
+ # Tests is_isomorphic on test case #4B
343
+ # (3 flat clusters, different labelings, nonisomorphic)
344
+ a = xp.asarray([1, 2, 3, 3])
345
+ b = xp.asarray([1, 3, 2, 3])
346
+ assert is_isomorphic(a, b) is False
347
+ assert is_isomorphic(b, a) is False
348
+
349
+ def test_is_isomorphic_4C(self, xp):
350
+ # Tests is_isomorphic on test case #4C
351
+ # (3 flat clusters, different labelings, isomorphic)
352
+ a = xp.asarray([7, 2, 3])
353
+ b = xp.asarray([6, 3, 2])
354
+ assert is_isomorphic(a, b)
355
+ assert is_isomorphic(b, a)
356
+
357
+ def test_is_isomorphic_5(self, xp):
358
+ # Tests is_isomorphic on test case #5 (1000 observations, 2/3/5 random
359
+ # clusters, random permutation of the labeling).
360
+ for nc in [2, 3, 5]:
361
+ self.help_is_isomorphic_randperm(1000, nc, xp=xp)
362
+
363
+ def test_is_isomorphic_6(self, xp):
364
+ # Tests is_isomorphic on test case #5A (1000 observations, 2/3/5 random
365
+ # clusters, random permutation of the labeling, slightly
366
+ # nonisomorphic.)
367
+ for nc in [2, 3, 5]:
368
+ self.help_is_isomorphic_randperm(1000, nc, True, 5, xp=xp)
369
+
370
+ def test_is_isomorphic_7(self, xp):
371
+ # Regression test for gh-6271
372
+ a = xp.asarray([1, 2, 3])
373
+ b = xp.asarray([1, 1, 1])
374
+ assert not is_isomorphic(a, b)
375
+
376
+ def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0,
377
+ *, xp):
378
+ for k in range(3):
379
+ a = (np.random.rand(nobs) * nclusters).astype(int)
380
+ b = np.zeros(a.size, dtype=int)
381
+ P = np.random.permutation(nclusters)
382
+ for i in range(0, a.shape[0]):
383
+ b[i] = P[a[i]]
384
+ if noniso:
385
+ Q = np.random.permutation(nobs)
386
+ b[Q[0:nerrors]] += 1
387
+ b[Q[0:nerrors]] %= nclusters
388
+ a = xp.asarray(a)
389
+ b = xp.asarray(b)
390
+ assert is_isomorphic(a, b) == (not noniso)
391
+ assert is_isomorphic(b, a) == (not noniso)
392
+
393
+
394
+ @skip_xp_backends(cpu_only=True)
395
+ class TestIsValidLinkage:
396
+
397
+ def test_is_valid_linkage_various_size(self, xp):
398
+ for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
399
+ (1, 4, True), (2, 4, True)]:
400
+ self.check_is_valid_linkage_various_size(nrow, ncol, valid, xp)
401
+
402
+ def check_is_valid_linkage_various_size(self, nrow, ncol, valid, xp):
403
+ # Tests is_valid_linkage(Z) with linkage matrices of various sizes
404
+ Z = xp.asarray([[0, 1, 3.0, 2, 5],
405
+ [3, 2, 4.0, 3, 3]], dtype=xp.float64)
406
+ Z = Z[:nrow, :ncol]
407
+ assert_(is_valid_linkage(Z) == valid)
408
+ if not valid:
409
+ assert_raises(ValueError, is_valid_linkage, Z, throw=True)
410
+
411
+ def test_is_valid_linkage_int_type(self, xp):
412
+ # Tests is_valid_linkage(Z) with integer type.
413
+ Z = xp.asarray([[0, 1, 3.0, 2],
414
+ [3, 2, 4.0, 3]], dtype=xp.int64)
415
+ assert_(is_valid_linkage(Z) is False)
416
+ assert_raises(TypeError, is_valid_linkage, Z, throw=True)
417
+
418
+ def test_is_valid_linkage_empty(self, xp):
419
+ # Tests is_valid_linkage(Z) with empty linkage.
420
+ Z = xp.zeros((0, 4), dtype=xp.float64)
421
+ assert_(is_valid_linkage(Z) is False)
422
+ assert_raises(ValueError, is_valid_linkage, Z, throw=True)
423
+
424
+ def test_is_valid_linkage_4_and_up(self, xp):
425
+ # Tests is_valid_linkage(Z) on linkage on observation sets between
426
+ # sizes 4 and 15 (step size 3).
427
+ for i in range(4, 15, 3):
428
+ y = np.random.rand(i*(i-1)//2)
429
+ y = xp.asarray(y)
430
+ Z = linkage(y)
431
+ assert_(is_valid_linkage(Z) is True)
432
+
433
+ @skip_xp_backends('jax.numpy',
434
+ reasons=['jax arrays do not support item assignment'],
435
+ cpu_only=True)
436
+ def test_is_valid_linkage_4_and_up_neg_index_left(self, xp):
437
+ # Tests is_valid_linkage(Z) on linkage on observation sets between
438
+ # sizes 4 and 15 (step size 3) with negative indices (left).
439
+ for i in range(4, 15, 3):
440
+ y = np.random.rand(i*(i-1)//2)
441
+ y = xp.asarray(y)
442
+ Z = linkage(y)
443
+ Z[i//2,0] = -2
444
+ assert_(is_valid_linkage(Z) is False)
445
+ assert_raises(ValueError, is_valid_linkage, Z, throw=True)
446
+
447
+ @skip_xp_backends('jax.numpy',
448
+ reasons=['jax arrays do not support item assignment'],
449
+ cpu_only=True)
450
+ def test_is_valid_linkage_4_and_up_neg_index_right(self, xp):
451
+ # Tests is_valid_linkage(Z) on linkage on observation sets between
452
+ # sizes 4 and 15 (step size 3) with negative indices (right).
453
+ for i in range(4, 15, 3):
454
+ y = np.random.rand(i*(i-1)//2)
455
+ y = xp.asarray(y)
456
+ Z = linkage(y)
457
+ Z[i//2,1] = -2
458
+ assert_(is_valid_linkage(Z) is False)
459
+ assert_raises(ValueError, is_valid_linkage, Z, throw=True)
460
+
461
+ @skip_xp_backends('jax.numpy',
462
+ reasons=['jax arrays do not support item assignment'],
463
+ cpu_only=True)
464
+ def test_is_valid_linkage_4_and_up_neg_dist(self, xp):
465
+ # Tests is_valid_linkage(Z) on linkage on observation sets between
466
+ # sizes 4 and 15 (step size 3) with negative distances.
467
+ for i in range(4, 15, 3):
468
+ y = np.random.rand(i*(i-1)//2)
469
+ y = xp.asarray(y)
470
+ Z = linkage(y)
471
+ Z[i//2,2] = -0.5
472
+ assert_(is_valid_linkage(Z) is False)
473
+ assert_raises(ValueError, is_valid_linkage, Z, throw=True)
474
+
475
+ @skip_xp_backends('jax.numpy',
476
+ reasons=['jax arrays do not support item assignment'],
477
+ cpu_only=True)
478
+ def test_is_valid_linkage_4_and_up_neg_counts(self, xp):
479
+ # Tests is_valid_linkage(Z) on linkage on observation sets between
480
+ # sizes 4 and 15 (step size 3) with negative counts.
481
+ for i in range(4, 15, 3):
482
+ y = np.random.rand(i*(i-1)//2)
483
+ y = xp.asarray(y)
484
+ Z = linkage(y)
485
+ Z[i//2,3] = -2
486
+ assert_(is_valid_linkage(Z) is False)
487
+ assert_raises(ValueError, is_valid_linkage, Z, throw=True)
488
+
489
+
490
+ @skip_xp_backends(cpu_only=True)
491
+ class TestIsValidInconsistent:
492
+
493
+ def test_is_valid_im_int_type(self, xp):
494
+ # Tests is_valid_im(R) with integer type.
495
+ R = xp.asarray([[0, 1, 3.0, 2],
496
+ [3, 2, 4.0, 3]], dtype=xp.int64)
497
+ assert_(is_valid_im(R) is False)
498
+ assert_raises(TypeError, is_valid_im, R, throw=True)
499
+
500
+ def test_is_valid_im_various_size(self, xp):
501
+ for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
502
+ (1, 4, True), (2, 4, True)]:
503
+ self.check_is_valid_im_various_size(nrow, ncol, valid, xp)
504
+
505
+ def check_is_valid_im_various_size(self, nrow, ncol, valid, xp):
506
+ # Tests is_valid_im(R) with linkage matrices of various sizes
507
+ R = xp.asarray([[0, 1, 3.0, 2, 5],
508
+ [3, 2, 4.0, 3, 3]], dtype=xp.float64)
509
+ R = R[:nrow, :ncol]
510
+ assert_(is_valid_im(R) == valid)
511
+ if not valid:
512
+ assert_raises(ValueError, is_valid_im, R, throw=True)
513
+
514
+ def test_is_valid_im_empty(self, xp):
515
+ # Tests is_valid_im(R) with empty inconsistency matrix.
516
+ R = xp.zeros((0, 4), dtype=xp.float64)
517
+ assert_(is_valid_im(R) is False)
518
+ assert_raises(ValueError, is_valid_im, R, throw=True)
519
+
520
+ def test_is_valid_im_4_and_up(self, xp):
521
+ # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
522
+ # (step size 3).
523
+ for i in range(4, 15, 3):
524
+ y = np.random.rand(i*(i-1)//2)
525
+ y = xp.asarray(y)
526
+ Z = linkage(y)
527
+ R = inconsistent(Z)
528
+ assert_(is_valid_im(R) is True)
529
+
530
+ @skip_xp_backends('jax.numpy',
531
+ reasons=['jax arrays do not support item assignment'],
532
+ cpu_only=True)
533
+ def test_is_valid_im_4_and_up_neg_index_left(self, xp):
534
+ # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
535
+ # (step size 3) with negative link height means.
536
+ for i in range(4, 15, 3):
537
+ y = np.random.rand(i*(i-1)//2)
538
+ y = xp.asarray(y)
539
+ Z = linkage(y)
540
+ R = inconsistent(Z)
541
+ R[i//2,0] = -2.0
542
+ assert_(is_valid_im(R) is False)
543
+ assert_raises(ValueError, is_valid_im, R, throw=True)
544
+
545
+ @skip_xp_backends('jax.numpy',
546
+ reasons=['jax arrays do not support item assignment'],
547
+ cpu_only=True)
548
+ def test_is_valid_im_4_and_up_neg_index_right(self, xp):
549
+ # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
550
+ # (step size 3) with negative link height standard deviations.
551
+ for i in range(4, 15, 3):
552
+ y = np.random.rand(i*(i-1)//2)
553
+ y = xp.asarray(y)
554
+ Z = linkage(y)
555
+ R = inconsistent(Z)
556
+ R[i//2,1] = -2.0
557
+ assert_(is_valid_im(R) is False)
558
+ assert_raises(ValueError, is_valid_im, R, throw=True)
559
+
560
+ @skip_xp_backends('jax.numpy',
561
+ reasons=['jax arrays do not support item assignment'],
562
+ cpu_only=True)
563
+ def test_is_valid_im_4_and_up_neg_dist(self, xp):
564
+ # Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
565
+ # (step size 3) with negative link counts.
566
+ for i in range(4, 15, 3):
567
+ y = np.random.rand(i*(i-1)//2)
568
+ y = xp.asarray(y)
569
+ Z = linkage(y)
570
+ R = inconsistent(Z)
571
+ R[i//2,2] = -0.5
572
+ assert_(is_valid_im(R) is False)
573
+ assert_raises(ValueError, is_valid_im, R, throw=True)
574
+
575
+
576
+ class TestNumObsLinkage:
577
+
578
+ @skip_xp_backends(cpu_only=True)
579
+ def test_num_obs_linkage_empty(self, xp):
580
+ # Tests num_obs_linkage(Z) with empty linkage.
581
+ Z = xp.zeros((0, 4), dtype=xp.float64)
582
+ assert_raises(ValueError, num_obs_linkage, Z)
583
+
584
+ def test_num_obs_linkage_1x4(self, xp):
585
+ # Tests num_obs_linkage(Z) on linkage over 2 observations.
586
+ Z = xp.asarray([[0, 1, 3.0, 2]], dtype=xp.float64)
587
+ assert_equal(num_obs_linkage(Z), 2)
588
+
589
+ def test_num_obs_linkage_2x4(self, xp):
590
+ # Tests num_obs_linkage(Z) on linkage over 3 observations.
591
+ Z = xp.asarray([[0, 1, 3.0, 2],
592
+ [3, 2, 4.0, 3]], dtype=xp.float64)
593
+ assert_equal(num_obs_linkage(Z), 3)
594
+
595
+ @skip_xp_backends(cpu_only=True)
596
+ def test_num_obs_linkage_4_and_up(self, xp):
597
+ # Tests num_obs_linkage(Z) on linkage on observation sets between sizes
598
+ # 4 and 15 (step size 3).
599
+ for i in range(4, 15, 3):
600
+ y = np.random.rand(i*(i-1)//2)
601
+ y = xp.asarray(y)
602
+ Z = linkage(y)
603
+ assert_equal(num_obs_linkage(Z), i)
604
+
605
+
606
+ @skip_xp_backends(cpu_only=True)
607
+ class TestLeavesList:
608
+
609
+ def test_leaves_list_1x4(self, xp):
610
+ # Tests leaves_list(Z) on a 1x4 linkage.
611
+ Z = xp.asarray([[0, 1, 3.0, 2]], dtype=xp.float64)
612
+ to_tree(Z)
613
+ assert_allclose(leaves_list(Z), [0, 1], rtol=1e-15)
614
+
615
+ def test_leaves_list_2x4(self, xp):
616
+ # Tests leaves_list(Z) on a 2x4 linkage.
617
+ Z = xp.asarray([[0, 1, 3.0, 2],
618
+ [3, 2, 4.0, 3]], dtype=xp.float64)
619
+ to_tree(Z)
620
+ assert_allclose(leaves_list(Z), [0, 1, 2], rtol=1e-15)
621
+
622
+ def test_leaves_list_Q(self, xp):
623
+ for method in ['single', 'complete', 'average', 'weighted', 'centroid',
624
+ 'median', 'ward']:
625
+ self.check_leaves_list_Q(method, xp)
626
+
627
+ def check_leaves_list_Q(self, method, xp):
628
+ # Tests leaves_list(Z) on the Q data set
629
+ X = xp.asarray(hierarchy_test_data.Q_X)
630
+ Z = linkage(X, method)
631
+ node = to_tree(Z)
632
+ assert_allclose(node.pre_order(), leaves_list(Z), rtol=1e-15)
633
+
634
+ def test_Q_subtree_pre_order(self, xp):
635
+ # Tests that pre_order() works when called on sub-trees.
636
+ X = xp.asarray(hierarchy_test_data.Q_X)
637
+ Z = linkage(X, 'single')
638
+ node = to_tree(Z)
639
+ assert_allclose(node.pre_order(), (node.get_left().pre_order()
640
+ + node.get_right().pre_order()),
641
+ rtol=1e-15)
642
+
643
+
644
+ @skip_xp_backends(cpu_only=True)
645
+ class TestCorrespond:
646
+
647
+ def test_correspond_empty(self, xp):
648
+ # Tests correspond(Z, y) with empty linkage and condensed distance matrix.
649
+ y = xp.zeros((0,), dtype=xp.float64)
650
+ Z = xp.zeros((0,4), dtype=xp.float64)
651
+ assert_raises(ValueError, correspond, Z, y)
652
+
653
+ def test_correspond_2_and_up(self, xp):
654
+ # Tests correspond(Z, y) on linkage and CDMs over observation sets of
655
+ # different sizes.
656
+ for i in range(2, 4):
657
+ y = np.random.rand(i*(i-1)//2)
658
+ y = xp.asarray(y)
659
+ Z = linkage(y)
660
+ assert_(correspond(Z, y))
661
+ for i in range(4, 15, 3):
662
+ y = np.random.rand(i*(i-1)//2)
663
+ y = xp.asarray(y)
664
+ Z = linkage(y)
665
+ assert_(correspond(Z, y))
666
+
667
+ def test_correspond_4_and_up(self, xp):
668
+ # Tests correspond(Z, y) on linkage and CDMs over observation sets of
669
+ # different sizes. Correspondence should be false.
670
+ for (i, j) in (list(zip(list(range(2, 4)), list(range(3, 5)))) +
671
+ list(zip(list(range(3, 5)), list(range(2, 4))))):
672
+ y = np.random.rand(i*(i-1)//2)
673
+ y2 = np.random.rand(j*(j-1)//2)
674
+ y = xp.asarray(y)
675
+ y2 = xp.asarray(y2)
676
+ Z = linkage(y)
677
+ Z2 = linkage(y2)
678
+ assert not correspond(Z, y2)
679
+ assert not correspond(Z2, y)
680
+
681
+ def test_correspond_4_and_up_2(self, xp):
682
+ # Tests correspond(Z, y) on linkage and CDMs over observation sets of
683
+ # different sizes. Correspondence should be false.
684
+ for (i, j) in (list(zip(list(range(2, 7)), list(range(16, 21)))) +
685
+ list(zip(list(range(2, 7)), list(range(16, 21))))):
686
+ y = np.random.rand(i*(i-1)//2)
687
+ y2 = np.random.rand(j*(j-1)//2)
688
+ y = xp.asarray(y)
689
+ y2 = xp.asarray(y2)
690
+ Z = linkage(y)
691
+ Z2 = linkage(y2)
692
+ assert not correspond(Z, y2)
693
+ assert not correspond(Z2, y)
694
+
695
+ def test_num_obs_linkage_multi_matrix(self, xp):
696
+ # Tests num_obs_linkage with observation matrices of multiple sizes.
697
+ for n in range(2, 10):
698
+ X = np.random.rand(n, 4)
699
+ Y = pdist(X)
700
+ Y = xp.asarray(Y)
701
+ Z = linkage(Y)
702
+ assert_equal(num_obs_linkage(Z), n)
703
+
704
+
705
+ @skip_xp_backends(cpu_only=True)
706
+ class TestIsMonotonic:
707
+
708
+ def test_is_monotonic_empty(self, xp):
709
+ # Tests is_monotonic(Z) on an empty linkage.
710
+ Z = xp.zeros((0, 4), dtype=xp.float64)
711
+ assert_raises(ValueError, is_monotonic, Z)
712
+
713
+ def test_is_monotonic_1x4(self, xp):
714
+ # Tests is_monotonic(Z) on 1x4 linkage. Expecting True.
715
+ Z = xp.asarray([[0, 1, 0.3, 2]], dtype=xp.float64)
716
+ assert is_monotonic(Z)
717
+
718
+ def test_is_monotonic_2x4_T(self, xp):
719
+ # Tests is_monotonic(Z) on 2x4 linkage. Expecting True.
720
+ Z = xp.asarray([[0, 1, 0.3, 2],
721
+ [2, 3, 0.4, 3]], dtype=xp.float64)
722
+ assert is_monotonic(Z)
723
+
724
+ def test_is_monotonic_2x4_F(self, xp):
725
+ # Tests is_monotonic(Z) on 2x4 linkage. Expecting False.
726
+ Z = xp.asarray([[0, 1, 0.4, 2],
727
+ [2, 3, 0.3, 3]], dtype=xp.float64)
728
+ assert not is_monotonic(Z)
729
+
730
+ def test_is_monotonic_3x4_T(self, xp):
731
+ # Tests is_monotonic(Z) on 3x4 linkage. Expecting True.
732
+ Z = xp.asarray([[0, 1, 0.3, 2],
733
+ [2, 3, 0.4, 2],
734
+ [4, 5, 0.6, 4]], dtype=xp.float64)
735
+ assert is_monotonic(Z)
736
+
737
+ def test_is_monotonic_3x4_F1(self, xp):
738
+ # Tests is_monotonic(Z) on 3x4 linkage (case 1). Expecting False.
739
+ Z = xp.asarray([[0, 1, 0.3, 2],
740
+ [2, 3, 0.2, 2],
741
+ [4, 5, 0.6, 4]], dtype=xp.float64)
742
+ assert not is_monotonic(Z)
743
+
744
+ def test_is_monotonic_3x4_F2(self, xp):
745
+ # Tests is_monotonic(Z) on 3x4 linkage (case 2). Expecting False.
746
+ Z = xp.asarray([[0, 1, 0.8, 2],
747
+ [2, 3, 0.4, 2],
748
+ [4, 5, 0.6, 4]], dtype=xp.float64)
749
+ assert not is_monotonic(Z)
750
+
751
+ def test_is_monotonic_3x4_F3(self, xp):
752
+ # Tests is_monotonic(Z) on 3x4 linkage (case 3). Expecting False
753
+ Z = xp.asarray([[0, 1, 0.3, 2],
754
+ [2, 3, 0.4, 2],
755
+ [4, 5, 0.2, 4]], dtype=xp.float64)
756
+ assert not is_monotonic(Z)
757
+
758
+ def test_is_monotonic_tdist_linkage1(self, xp):
759
+ # Tests is_monotonic(Z) on clustering generated by single linkage on
760
+ # tdist data set. Expecting True.
761
+ Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
762
+ assert is_monotonic(Z)
763
+
764
+ @skip_xp_backends('jax.numpy',
765
+ reasons=['jax arrays do not support item assignment'],
766
+ cpu_only=True)
767
+ def test_is_monotonic_tdist_linkage2(self, xp):
768
+ # Tests is_monotonic(Z) on clustering generated by single linkage on
769
+ # tdist data set. Perturbing. Expecting False.
770
+ Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
771
+ Z[2,2] = 0.0
772
+ assert not is_monotonic(Z)
773
+
774
+ def test_is_monotonic_Q_linkage(self, xp):
775
+ # Tests is_monotonic(Z) on clustering generated by single linkage on
776
+ # Q data set. Expecting True.
777
+ X = xp.asarray(hierarchy_test_data.Q_X)
778
+ Z = linkage(X, 'single')
779
+ assert is_monotonic(Z)
780
+
781
+
782
+ @skip_xp_backends(cpu_only=True)
783
+ class TestMaxDists:
784
+
785
+ def test_maxdists_empty_linkage(self, xp):
786
+ # Tests maxdists(Z) on empty linkage. Expecting exception.
787
+ Z = xp.zeros((0, 4), dtype=xp.float64)
788
+ assert_raises(ValueError, maxdists, Z)
789
+
790
+ @skip_xp_backends('jax.numpy',
791
+ reasons=['jax arrays do not support item assignment'],
792
+ cpu_only=True)
793
+ def test_maxdists_one_cluster_linkage(self, xp):
794
+ # Tests maxdists(Z) on linkage with one cluster.
795
+ Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64)
796
+ MD = maxdists(Z)
797
+ expectedMD = calculate_maximum_distances(Z, xp)
798
+ xp_assert_close(MD, expectedMD, atol=1e-15)
799
+
800
+ @skip_xp_backends('jax.numpy',
801
+ reasons=['jax arrays do not support item assignment'],
802
+ cpu_only=True)
803
+ def test_maxdists_Q_linkage(self, xp):
804
+ for method in ['single', 'complete', 'ward', 'centroid', 'median']:
805
+ self.check_maxdists_Q_linkage(method, xp)
806
+
807
+ def check_maxdists_Q_linkage(self, method, xp):
808
+ # Tests maxdists(Z) on the Q data set
809
+ X = xp.asarray(hierarchy_test_data.Q_X)
810
+ Z = linkage(X, method)
811
+ MD = maxdists(Z)
812
+ expectedMD = calculate_maximum_distances(Z, xp)
813
+ xp_assert_close(MD, expectedMD, atol=1e-15)
814
+
815
+
816
+ class TestMaxInconsts:
817
+
818
+ @skip_xp_backends(cpu_only=True)
819
+ def test_maxinconsts_empty_linkage(self, xp):
820
+ # Tests maxinconsts(Z, R) on empty linkage. Expecting exception.
821
+ Z = xp.zeros((0, 4), dtype=xp.float64)
822
+ R = xp.zeros((0, 4), dtype=xp.float64)
823
+ assert_raises(ValueError, maxinconsts, Z, R)
824
+
825
+ def test_maxinconsts_difrow_linkage(self, xp):
826
+ # Tests maxinconsts(Z, R) on linkage and inconsistency matrices with
827
+ # different numbers of clusters. Expecting exception.
828
+ Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64)
829
+ R = np.random.rand(2, 4)
830
+ R = xp.asarray(R)
831
+ assert_raises(ValueError, maxinconsts, Z, R)
832
+
833
+ @skip_xp_backends('jax.numpy',
834
+ reasons=['jax arrays do not support item assignment'],
835
+ cpu_only=True)
836
+ def test_maxinconsts_one_cluster_linkage(self, xp):
837
+ # Tests maxinconsts(Z, R) on linkage with one cluster.
838
+ Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64)
839
+ R = xp.asarray([[0, 0, 0, 0.3]], dtype=xp.float64)
840
+ MD = maxinconsts(Z, R)
841
+ expectedMD = calculate_maximum_inconsistencies(Z, R, xp=xp)
842
+ xp_assert_close(MD, expectedMD, atol=1e-15)
843
+
844
+ @skip_xp_backends('jax.numpy',
845
+ reasons=['jax arrays do not support item assignment'],
846
+ cpu_only=True)
847
+ def test_maxinconsts_Q_linkage(self, xp):
848
+ for method in ['single', 'complete', 'ward', 'centroid', 'median']:
849
+ self.check_maxinconsts_Q_linkage(method, xp)
850
+
851
+ def check_maxinconsts_Q_linkage(self, method, xp):
852
+ # Tests maxinconsts(Z, R) on the Q data set
853
+ X = xp.asarray(hierarchy_test_data.Q_X)
854
+ Z = linkage(X, method)
855
+ R = inconsistent(Z)
856
+ MD = maxinconsts(Z, R)
857
+ expectedMD = calculate_maximum_inconsistencies(Z, R, xp=xp)
858
+ xp_assert_close(MD, expectedMD, atol=1e-15)
859
+
860
+
861
+ class TestMaxRStat:
862
+
863
+ def test_maxRstat_invalid_index(self, xp):
864
+ for i in [3.3, -1, 4]:
865
+ self.check_maxRstat_invalid_index(i, xp)
866
+
867
+ def check_maxRstat_invalid_index(self, i, xp):
868
+ # Tests maxRstat(Z, R, i). Expecting exception.
869
+ Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64)
870
+ R = xp.asarray([[0, 0, 0, 0.3]], dtype=xp.float64)
871
+ if isinstance(i, int):
872
+ assert_raises(ValueError, maxRstat, Z, R, i)
873
+ else:
874
+ assert_raises(TypeError, maxRstat, Z, R, i)
875
+
876
+ @skip_xp_backends(cpu_only=True)
877
+ def test_maxRstat_empty_linkage(self, xp):
878
+ for i in range(4):
879
+ self.check_maxRstat_empty_linkage(i, xp)
880
+
881
+ def check_maxRstat_empty_linkage(self, i, xp):
882
+ # Tests maxRstat(Z, R, i) on empty linkage. Expecting exception.
883
+ Z = xp.zeros((0, 4), dtype=xp.float64)
884
+ R = xp.zeros((0, 4), dtype=xp.float64)
885
+ assert_raises(ValueError, maxRstat, Z, R, i)
886
+
887
+ def test_maxRstat_difrow_linkage(self, xp):
888
+ for i in range(4):
889
+ self.check_maxRstat_difrow_linkage(i, xp)
890
+
891
+ def check_maxRstat_difrow_linkage(self, i, xp):
892
+ # Tests maxRstat(Z, R, i) on linkage and inconsistency matrices with
893
+ # different numbers of clusters. Expecting exception.
894
+ Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64)
895
+ R = np.random.rand(2, 4)
896
+ R = xp.asarray(R)
897
+ assert_raises(ValueError, maxRstat, Z, R, i)
898
+
899
+ @skip_xp_backends('jax.numpy',
900
+ reasons=['jax arrays do not support item assignment'],
901
+ cpu_only=True)
902
+ def test_maxRstat_one_cluster_linkage(self, xp):
903
+ for i in range(4):
904
+ self.check_maxRstat_one_cluster_linkage(i, xp)
905
+
906
+ def check_maxRstat_one_cluster_linkage(self, i, xp):
907
+ # Tests maxRstat(Z, R, i) on linkage with one cluster.
908
+ Z = xp.asarray([[0, 1, 0.3, 4]], dtype=xp.float64)
909
+ R = xp.asarray([[0, 0, 0, 0.3]], dtype=xp.float64)
910
+ MD = maxRstat(Z, R, 1)
911
+ expectedMD = calculate_maximum_inconsistencies(Z, R, 1, xp)
912
+ xp_assert_close(MD, expectedMD, atol=1e-15)
913
+
914
+ @skip_xp_backends('jax.numpy',
915
+ reasons=['jax arrays do not support item assignment'],
916
+ cpu_only=True)
917
+ def test_maxRstat_Q_linkage(self, xp):
918
+ for method in ['single', 'complete', 'ward', 'centroid', 'median']:
919
+ for i in range(4):
920
+ self.check_maxRstat_Q_linkage(method, i, xp)
921
+
922
+ def check_maxRstat_Q_linkage(self, method, i, xp):
923
+ # Tests maxRstat(Z, R, i) on the Q data set
924
+ X = xp.asarray(hierarchy_test_data.Q_X)
925
+ Z = linkage(X, method)
926
+ R = inconsistent(Z)
927
+ MD = maxRstat(Z, R, 1)
928
+ expectedMD = calculate_maximum_inconsistencies(Z, R, 1, xp)
929
+ xp_assert_close(MD, expectedMD, atol=1e-15)
930
+
931
+
932
+ @skip_xp_backends(cpu_only=True)
933
+ class TestDendrogram:
934
+
935
+ def test_dendrogram_single_linkage_tdist(self, xp):
936
+ # Tests dendrogram calculation on single linkage of the tdist data set.
937
+ Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
938
+ R = dendrogram(Z, no_plot=True)
939
+ leaves = R["leaves"]
940
+ assert_equal(leaves, [2, 5, 1, 0, 3, 4])
941
+
942
+ def test_valid_orientation(self, xp):
943
+ Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
944
+ assert_raises(ValueError, dendrogram, Z, orientation="foo")
945
+
946
+ def test_labels_as_array_or_list(self, xp):
947
+ # test for gh-12418
948
+ Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
949
+ labels = [1, 3, 2, 6, 4, 5]
950
+ result1 = dendrogram(Z, labels=xp.asarray(labels), no_plot=True)
951
+ result2 = dendrogram(Z, labels=labels, no_plot=True)
952
+ assert result1 == result2
953
+
954
+ @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
955
+ def test_valid_label_size(self, xp):
956
+ link = xp.asarray([
957
+ [0, 1, 1.0, 4],
958
+ [2, 3, 1.0, 5],
959
+ [4, 5, 2.0, 6],
960
+ ])
961
+ plt.figure()
962
+ with pytest.raises(ValueError) as exc_info:
963
+ dendrogram(link, labels=list(range(100)))
964
+ assert "Dimensions of Z and labels must be consistent."\
965
+ in str(exc_info.value)
966
+
967
+ with pytest.raises(
968
+ ValueError,
969
+ match="Dimensions of Z and labels must be consistent."):
970
+ dendrogram(link, labels=[])
971
+
972
+ plt.close()
973
+
974
+ @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
975
+ def test_dendrogram_plot(self, xp):
976
+ for orientation in ['top', 'bottom', 'left', 'right']:
977
+ self.check_dendrogram_plot(orientation, xp)
978
+
979
+ def check_dendrogram_plot(self, orientation, xp):
980
+ # Tests dendrogram plotting.
981
+ Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
982
+ expected = {'color_list': ['C1', 'C0', 'C0', 'C0', 'C0'],
983
+ 'dcoord': [[0.0, 138.0, 138.0, 0.0],
984
+ [0.0, 219.0, 219.0, 0.0],
985
+ [0.0, 255.0, 255.0, 219.0],
986
+ [0.0, 268.0, 268.0, 255.0],
987
+ [138.0, 295.0, 295.0, 268.0]],
988
+ 'icoord': [[5.0, 5.0, 15.0, 15.0],
989
+ [45.0, 45.0, 55.0, 55.0],
990
+ [35.0, 35.0, 50.0, 50.0],
991
+ [25.0, 25.0, 42.5, 42.5],
992
+ [10.0, 10.0, 33.75, 33.75]],
993
+ 'ivl': ['2', '5', '1', '0', '3', '4'],
994
+ 'leaves': [2, 5, 1, 0, 3, 4],
995
+ 'leaves_color_list': ['C1', 'C1', 'C0', 'C0', 'C0', 'C0'],
996
+ }
997
+
998
+ fig = plt.figure()
999
+ ax = fig.add_subplot(221)
1000
+
1001
+ # test that dendrogram accepts ax keyword
1002
+ R1 = dendrogram(Z, ax=ax, orientation=orientation)
1003
+ R1['dcoord'] = np.asarray(R1['dcoord'])
1004
+ assert_equal(R1, expected)
1005
+
1006
+ # test that dendrogram accepts and handle the leaf_font_size and
1007
+ # leaf_rotation keywords
1008
+ dendrogram(Z, ax=ax, orientation=orientation,
1009
+ leaf_font_size=20, leaf_rotation=90)
1010
+ testlabel = (
1011
+ ax.get_xticklabels()[0]
1012
+ if orientation in ['top', 'bottom']
1013
+ else ax.get_yticklabels()[0]
1014
+ )
1015
+ assert_equal(testlabel.get_rotation(), 90)
1016
+ assert_equal(testlabel.get_size(), 20)
1017
+ dendrogram(Z, ax=ax, orientation=orientation,
1018
+ leaf_rotation=90)
1019
+ testlabel = (
1020
+ ax.get_xticklabels()[0]
1021
+ if orientation in ['top', 'bottom']
1022
+ else ax.get_yticklabels()[0]
1023
+ )
1024
+ assert_equal(testlabel.get_rotation(), 90)
1025
+ dendrogram(Z, ax=ax, orientation=orientation,
1026
+ leaf_font_size=20)
1027
+ testlabel = (
1028
+ ax.get_xticklabels()[0]
1029
+ if orientation in ['top', 'bottom']
1030
+ else ax.get_yticklabels()[0]
1031
+ )
1032
+ assert_equal(testlabel.get_size(), 20)
1033
+ plt.close()
1034
+
1035
+ # test plotting to gca (will import pylab)
1036
+ R2 = dendrogram(Z, orientation=orientation)
1037
+ plt.close()
1038
+ R2['dcoord'] = np.asarray(R2['dcoord'])
1039
+ assert_equal(R2, expected)
1040
+
1041
+ @pytest.mark.skipif(not have_matplotlib, reason="no matplotlib")
1042
+ def test_dendrogram_truncate_mode(self, xp):
1043
+ Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
1044
+
1045
+ R = dendrogram(Z, 2, 'lastp', show_contracted=True)
1046
+ plt.close()
1047
+ R['dcoord'] = np.asarray(R['dcoord'])
1048
+ assert_equal(R, {'color_list': ['C0'],
1049
+ 'dcoord': [[0.0, 295.0, 295.0, 0.0]],
1050
+ 'icoord': [[5.0, 5.0, 15.0, 15.0]],
1051
+ 'ivl': ['(2)', '(4)'],
1052
+ 'leaves': [6, 9],
1053
+ 'leaves_color_list': ['C0', 'C0'],
1054
+ })
1055
+
1056
+ R = dendrogram(Z, 2, 'mtica', show_contracted=True)
1057
+ plt.close()
1058
+ R['dcoord'] = np.asarray(R['dcoord'])
1059
+ assert_equal(R, {'color_list': ['C1', 'C0', 'C0', 'C0'],
1060
+ 'dcoord': [[0.0, 138.0, 138.0, 0.0],
1061
+ [0.0, 255.0, 255.0, 0.0],
1062
+ [0.0, 268.0, 268.0, 255.0],
1063
+ [138.0, 295.0, 295.0, 268.0]],
1064
+ 'icoord': [[5.0, 5.0, 15.0, 15.0],
1065
+ [35.0, 35.0, 45.0, 45.0],
1066
+ [25.0, 25.0, 40.0, 40.0],
1067
+ [10.0, 10.0, 32.5, 32.5]],
1068
+ 'ivl': ['2', '5', '1', '0', '(2)'],
1069
+ 'leaves': [2, 5, 1, 0, 7],
1070
+ 'leaves_color_list': ['C1', 'C1', 'C0', 'C0', 'C0'],
1071
+ })
1072
+
1073
+ def test_dendrogram_colors(self, xp):
1074
+ # Tests dendrogram plots with alternate colors
1075
+ Z = linkage(xp.asarray(hierarchy_test_data.ytdist), 'single')
1076
+
1077
+ set_link_color_palette(['c', 'm', 'y', 'k'])
1078
+ R = dendrogram(Z, no_plot=True,
1079
+ above_threshold_color='g', color_threshold=250)
1080
+ set_link_color_palette(['g', 'r', 'c', 'm', 'y', 'k'])
1081
+
1082
+ color_list = R['color_list']
1083
+ assert_equal(color_list, ['c', 'm', 'g', 'g', 'g'])
1084
+
1085
+ # reset color palette (global list)
1086
+ set_link_color_palette(None)
1087
+
1088
+ def test_dendrogram_leaf_colors_zero_dist(self, xp):
1089
+ # tests that the colors of leafs are correct for tree
1090
+ # with two identical points
1091
+ x = xp.asarray([[1, 0, 0],
1092
+ [0, 0, 1],
1093
+ [0, 2, 0],
1094
+ [0, 0, 1],
1095
+ [0, 1, 0],
1096
+ [0, 1, 0]])
1097
+ z = linkage(x, "single")
1098
+ d = dendrogram(z, no_plot=True)
1099
+ exp_colors = ['C0', 'C1', 'C1', 'C0', 'C2', 'C2']
1100
+ colors = d["leaves_color_list"]
1101
+ assert_equal(colors, exp_colors)
1102
+
1103
+ def test_dendrogram_leaf_colors(self, xp):
1104
+ # tests that the colors are correct for a tree
1105
+ # with two near points ((0, 0, 1.1) and (0, 0, 1))
1106
+ x = xp.asarray([[1, 0, 0],
1107
+ [0, 0, 1.1],
1108
+ [0, 2, 0],
1109
+ [0, 0, 1],
1110
+ [0, 1, 0],
1111
+ [0, 1, 0]])
1112
+ z = linkage(x, "single")
1113
+ d = dendrogram(z, no_plot=True)
1114
+ exp_colors = ['C0', 'C1', 'C1', 'C0', 'C2', 'C2']
1115
+ colors = d["leaves_color_list"]
1116
+ assert_equal(colors, exp_colors)
1117
+
1118
+
1119
+ def calculate_maximum_distances(Z, xp):
1120
+ # Used for testing correctness of maxdists.
1121
+ n = Z.shape[0] + 1
1122
+ B = xp.zeros((n-1,), dtype=Z.dtype)
1123
+ q = xp.zeros((3,))
1124
+ for i in range(0, n - 1):
1125
+ q[:] = 0.0
1126
+ left = Z[i, 0]
1127
+ right = Z[i, 1]
1128
+ if left >= n:
1129
+ q[0] = B[xp.asarray(left, dtype=xp.int64) - n]
1130
+ if right >= n:
1131
+ q[1] = B[xp.asarray(right, dtype=xp.int64) - n]
1132
+ q[2] = Z[i, 2]
1133
+ B[i] = xp.max(q)
1134
+ return B
1135
+
1136
+
1137
+ def calculate_maximum_inconsistencies(Z, R, k=3, xp=np):
1138
+ # Used for testing correctness of maxinconsts.
1139
+ n = Z.shape[0] + 1
1140
+ dtype = xp.result_type(Z, R)
1141
+ B = xp.zeros((n-1,), dtype=dtype)
1142
+ q = xp.zeros((3,))
1143
+ for i in range(0, n - 1):
1144
+ q[:] = 0.0
1145
+ left = Z[i, 0]
1146
+ right = Z[i, 1]
1147
+ if left >= n:
1148
+ q[0] = B[xp.asarray(left, dtype=xp.int64) - n]
1149
+ if right >= n:
1150
+ q[1] = B[xp.asarray(right, dtype=xp.int64) - n]
1151
+ q[2] = R[i, k]
1152
+ B[i] = xp.max(q)
1153
+ return B
1154
+
1155
+
1156
+ @skip_xp_backends(cpu_only=True)
1157
+ def test_unsupported_uncondensed_distance_matrix_linkage_warning(xp):
1158
+ assert_warns(ClusterWarning, linkage, xp.asarray([[0, 1], [1, 0]]))
1159
+
1160
+
1161
+ def test_euclidean_linkage_value_error(xp):
1162
+ for method in scipy.cluster.hierarchy._EUCLIDEAN_METHODS:
1163
+ assert_raises(ValueError, linkage, xp.asarray([[1, 1], [1, 1]]),
1164
+ method=method, metric='cityblock')
1165
+
1166
+
1167
+ @skip_xp_backends(cpu_only=True)
1168
+ def test_2x2_linkage(xp):
1169
+ Z1 = linkage(xp.asarray([1]), method='single', metric='euclidean')
1170
+ Z2 = linkage(xp.asarray([[0, 1], [0, 0]]), method='single', metric='euclidean')
1171
+ xp_assert_close(Z1, Z2, rtol=1e-15)
1172
+
1173
+
1174
+ @skip_xp_backends(cpu_only=True)
1175
+ def test_node_compare(xp):
1176
+ np.random.seed(23)
1177
+ nobs = 50
1178
+ X = np.random.randn(nobs, 4)
1179
+ X = xp.asarray(X)
1180
+ Z = scipy.cluster.hierarchy.ward(X)
1181
+ tree = to_tree(Z)
1182
+ assert_(tree > tree.get_left())
1183
+ assert_(tree.get_right() > tree.get_left())
1184
+ assert_(tree.get_right() == tree.get_right())
1185
+ assert_(tree.get_right() != tree.get_left())
1186
+
1187
+
1188
+ @skip_xp_backends(np_only=True, reasons=['`cut_tree` uses non-standard indexing'])
1189
+ def test_cut_tree(xp):
1190
+ np.random.seed(23)
1191
+ nobs = 50
1192
+ X = np.random.randn(nobs, 4)
1193
+ X = xp.asarray(X)
1194
+ Z = scipy.cluster.hierarchy.ward(X)
1195
+ cutree = cut_tree(Z)
1196
+
1197
+ # cutree.dtype varies between int32 and int64 over platforms
1198
+ xp_assert_close(cutree[:, 0], xp.arange(nobs), rtol=1e-15, check_dtype=False)
1199
+ xp_assert_close(cutree[:, -1], xp.zeros(nobs), rtol=1e-15, check_dtype=False)
1200
+ assert_equal(np.asarray(cutree).max(0), np.arange(nobs - 1, -1, -1))
1201
+
1202
+ xp_assert_close(cutree[:, [-5]], cut_tree(Z, n_clusters=5), rtol=1e-15)
1203
+ xp_assert_close(cutree[:, [-5, -10]], cut_tree(Z, n_clusters=[5, 10]), rtol=1e-15)
1204
+ xp_assert_close(cutree[:, [-10, -5]], cut_tree(Z, n_clusters=[10, 5]), rtol=1e-15)
1205
+
1206
+ nodes = _order_cluster_tree(Z)
1207
+ heights = xp.asarray([node.dist for node in nodes])
1208
+
1209
+ xp_assert_close(cutree[:, np.searchsorted(heights, [5])],
1210
+ cut_tree(Z, height=5), rtol=1e-15)
1211
+ xp_assert_close(cutree[:, np.searchsorted(heights, [5, 10])],
1212
+ cut_tree(Z, height=[5, 10]), rtol=1e-15)
1213
+ xp_assert_close(cutree[:, np.searchsorted(heights, [10, 5])],
1214
+ cut_tree(Z, height=[10, 5]), rtol=1e-15)
1215
+
1216
+
1217
+ @skip_xp_backends(cpu_only=True)
1218
+ def test_optimal_leaf_ordering(xp):
1219
+ # test with the distance vector y
1220
+ Z = optimal_leaf_ordering(linkage(xp.asarray(hierarchy_test_data.ytdist)),
1221
+ xp.asarray(hierarchy_test_data.ytdist))
1222
+ expectedZ = hierarchy_test_data.linkage_ytdist_single_olo
1223
+ xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-10)
1224
+
1225
+ # test with the observation matrix X
1226
+ Z = optimal_leaf_ordering(linkage(xp.asarray(hierarchy_test_data.X), 'ward'),
1227
+ xp.asarray(hierarchy_test_data.X))
1228
+ expectedZ = hierarchy_test_data.linkage_X_ward_olo
1229
+ xp_assert_close(Z, xp.asarray(expectedZ), atol=1e-06)
1230
+
1231
+
1232
+ @skip_xp_backends(np_only=True, reasons=['`Heap` only supports NumPy backend'])
1233
+ def test_Heap(xp):
1234
+ values = xp.asarray([2, -1, 0, -1.5, 3])
1235
+ heap = Heap(values)
1236
+
1237
+ pair = heap.get_min()
1238
+ assert_equal(pair['key'], 3)
1239
+ assert_equal(pair['value'], -1.5)
1240
+
1241
+ heap.remove_min()
1242
+ pair = heap.get_min()
1243
+ assert_equal(pair['key'], 1)
1244
+ assert_equal(pair['value'], -1)
1245
+
1246
+ heap.change_value(1, 2.5)
1247
+ pair = heap.get_min()
1248
+ assert_equal(pair['key'], 2)
1249
+ assert_equal(pair['value'], 0)
1250
+
1251
+ heap.remove_min()
1252
+ heap.remove_min()
1253
+
1254
+ heap.change_value(1, 10)
1255
+ pair = heap.get_min()
1256
+ assert_equal(pair['key'], 4)
1257
+ assert_equal(pair['value'], 3)
1258
+
1259
+ heap.remove_min()
1260
+ pair = heap.get_min()
1261
+ assert_equal(pair['key'], 1)
1262
+ assert_equal(pair['value'], 10)
parrot/lib/python3.10/site-packages/scipy/cluster/tests/test_vq.py ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ import sys
3
+ from copy import deepcopy
4
+
5
+ import numpy as np
6
+ from numpy.testing import (
7
+ assert_array_equal, assert_equal, assert_, suppress_warnings
8
+ )
9
+ import pytest
10
+ from pytest import raises as assert_raises
11
+
12
+ from scipy.cluster.vq import (kmeans, kmeans2, py_vq, vq, whiten,
13
+ ClusterError, _krandinit)
14
+ from scipy.cluster import _vq
15
+ from scipy.conftest import array_api_compatible
16
+ from scipy.sparse._sputils import matrix
17
+
18
+ from scipy._lib._array_api import (
19
+ SCIPY_ARRAY_API, copy, cov, xp_assert_close, xp_assert_equal
20
+ )
21
+
22
+ pytestmark = [array_api_compatible, pytest.mark.usefixtures("skip_xp_backends")]
23
+ skip_xp_backends = pytest.mark.skip_xp_backends
24
+
25
+ TESTDATA_2D = np.array([
26
+ -2.2, 1.17, -1.63, 1.69, -2.04, 4.38, -3.09, 0.95, -1.7, 4.79, -1.68, 0.68,
27
+ -2.26, 3.34, -2.29, 2.55, -1.72, -0.72, -1.99, 2.34, -2.75, 3.43, -2.45,
28
+ 2.41, -4.26, 3.65, -1.57, 1.87, -1.96, 4.03, -3.01, 3.86, -2.53, 1.28,
29
+ -4.0, 3.95, -1.62, 1.25, -3.42, 3.17, -1.17, 0.12, -3.03, -0.27, -2.07,
30
+ -0.55, -1.17, 1.34, -2.82, 3.08, -2.44, 0.24, -1.71, 2.48, -5.23, 4.29,
31
+ -2.08, 3.69, -1.89, 3.62, -2.09, 0.26, -0.92, 1.07, -2.25, 0.88, -2.25,
32
+ 2.02, -4.31, 3.86, -2.03, 3.42, -2.76, 0.3, -2.48, -0.29, -3.42, 3.21,
33
+ -2.3, 1.73, -2.84, 0.69, -1.81, 2.48, -5.24, 4.52, -2.8, 1.31, -1.67,
34
+ -2.34, -1.18, 2.17, -2.17, 2.82, -1.85, 2.25, -2.45, 1.86, -6.79, 3.94,
35
+ -2.33, 1.89, -1.55, 2.08, -1.36, 0.93, -2.51, 2.74, -2.39, 3.92, -3.33,
36
+ 2.99, -2.06, -0.9, -2.83, 3.35, -2.59, 3.05, -2.36, 1.85, -1.69, 1.8,
37
+ -1.39, 0.66, -2.06, 0.38, -1.47, 0.44, -4.68, 3.77, -5.58, 3.44, -2.29,
38
+ 2.24, -1.04, -0.38, -1.85, 4.23, -2.88, 0.73, -2.59, 1.39, -1.34, 1.75,
39
+ -1.95, 1.3, -2.45, 3.09, -1.99, 3.41, -5.55, 5.21, -1.73, 2.52, -2.17,
40
+ 0.85, -2.06, 0.49, -2.54, 2.07, -2.03, 1.3, -3.23, 3.09, -1.55, 1.44,
41
+ -0.81, 1.1, -2.99, 2.92, -1.59, 2.18, -2.45, -0.73, -3.12, -1.3, -2.83,
42
+ 0.2, -2.77, 3.24, -1.98, 1.6, -4.59, 3.39, -4.85, 3.75, -2.25, 1.71, -3.28,
43
+ 3.38, -1.74, 0.88, -2.41, 1.92, -2.24, 1.19, -2.48, 1.06, -1.68, -0.62,
44
+ -1.3, 0.39, -1.78, 2.35, -3.54, 2.44, -1.32, 0.66, -2.38, 2.76, -2.35,
45
+ 3.95, -1.86, 4.32, -2.01, -1.23, -1.79, 2.76, -2.13, -0.13, -5.25, 3.84,
46
+ -2.24, 1.59, -4.85, 2.96, -2.41, 0.01, -0.43, 0.13, -3.92, 2.91, -1.75,
47
+ -0.53, -1.69, 1.69, -1.09, 0.15, -2.11, 2.17, -1.53, 1.22, -2.1, -0.86,
48
+ -2.56, 2.28, -3.02, 3.33, -1.12, 3.86, -2.18, -1.19, -3.03, 0.79, -0.83,
49
+ 0.97, -3.19, 1.45, -1.34, 1.28, -2.52, 4.22, -4.53, 3.22, -1.97, 1.75,
50
+ -2.36, 3.19, -0.83, 1.53, -1.59, 1.86, -2.17, 2.3, -1.63, 2.71, -2.03,
51
+ 3.75, -2.57, -0.6, -1.47, 1.33, -1.95, 0.7, -1.65, 1.27, -1.42, 1.09, -3.0,
52
+ 3.87, -2.51, 3.06, -2.6, 0.74, -1.08, -0.03, -2.44, 1.31, -2.65, 2.99,
53
+ -1.84, 1.65, -4.76, 3.75, -2.07, 3.98, -2.4, 2.67, -2.21, 1.49, -1.21,
54
+ 1.22, -5.29, 2.38, -2.85, 2.28, -5.6, 3.78, -2.7, 0.8, -1.81, 3.5, -3.75,
55
+ 4.17, -1.29, 2.99, -5.92, 3.43, -1.83, 1.23, -1.24, -1.04, -2.56, 2.37,
56
+ -3.26, 0.39, -4.63, 2.51, -4.52, 3.04, -1.7, 0.36, -1.41, 0.04, -2.1, 1.0,
57
+ -1.87, 3.78, -4.32, 3.59, -2.24, 1.38, -1.99, -0.22, -1.87, 1.95, -0.84,
58
+ 2.17, -5.38, 3.56, -1.27, 2.9, -1.79, 3.31, -5.47, 3.85, -1.44, 3.69,
59
+ -2.02, 0.37, -1.29, 0.33, -2.34, 2.56, -1.74, -1.27, -1.97, 1.22, -2.51,
60
+ -0.16, -1.64, -0.96, -2.99, 1.4, -1.53, 3.31, -2.24, 0.45, -2.46, 1.71,
61
+ -2.88, 1.56, -1.63, 1.46, -1.41, 0.68, -1.96, 2.76, -1.61,
62
+ 2.11]).reshape((200, 2))
63
+
64
+
65
+ # Global data
66
+ X = np.array([[3.0, 3], [4, 3], [4, 2],
67
+ [9, 2], [5, 1], [6, 2], [9, 4],
68
+ [5, 2], [5, 4], [7, 4], [6, 5]])
69
+
70
+ CODET1 = np.array([[3.0000, 3.0000],
71
+ [6.2000, 4.0000],
72
+ [5.8000, 1.8000]])
73
+
74
+ CODET2 = np.array([[11.0/3, 8.0/3],
75
+ [6.7500, 4.2500],
76
+ [6.2500, 1.7500]])
77
+
78
+ LABEL1 = np.array([0, 1, 2, 2, 2, 2, 1, 2, 1, 1, 1])
79
+
80
+
81
+ class TestWhiten:
82
+
83
+ def test_whiten(self, xp):
84
+ desired = xp.asarray([[5.08738849, 2.97091878],
85
+ [3.19909255, 0.69660580],
86
+ [4.51041982, 0.02640918],
87
+ [4.38567074, 0.95120889],
88
+ [2.32191480, 1.63195503]])
89
+
90
+ obs = xp.asarray([[0.98744510, 0.82766775],
91
+ [0.62093317, 0.19406729],
92
+ [0.87545741, 0.00735733],
93
+ [0.85124403, 0.26499712],
94
+ [0.45067590, 0.45464607]])
95
+ xp_assert_close(whiten(obs), desired, rtol=1e-5)
96
+
97
+ @skip_xp_backends('jax.numpy',
98
+ reasons=['jax arrays do not support item assignment'])
99
+ def test_whiten_zero_std(self, xp):
100
+ desired = xp.asarray([[0., 1.0, 2.86666544],
101
+ [0., 1.0, 1.32460034],
102
+ [0., 1.0, 3.74382172]])
103
+
104
+ obs = xp.asarray([[0., 1., 0.74109533],
105
+ [0., 1., 0.34243798],
106
+ [0., 1., 0.96785929]])
107
+ with warnings.catch_warnings(record=True) as w:
108
+ warnings.simplefilter('always')
109
+
110
+ xp_assert_close(whiten(obs), desired, rtol=1e-5)
111
+
112
+ assert_equal(len(w), 1)
113
+ assert_(issubclass(w[-1].category, RuntimeWarning))
114
+
115
+ def test_whiten_not_finite(self, xp):
116
+ for bad_value in xp.nan, xp.inf, -xp.inf:
117
+ obs = xp.asarray([[0.98744510, bad_value],
118
+ [0.62093317, 0.19406729],
119
+ [0.87545741, 0.00735733],
120
+ [0.85124403, 0.26499712],
121
+ [0.45067590, 0.45464607]])
122
+ assert_raises(ValueError, whiten, obs)
123
+
124
+ @pytest.mark.skipif(SCIPY_ARRAY_API,
125
+ reason='`np.matrix` unsupported in array API mode')
126
+ def test_whiten_not_finite_matrix(self, xp):
127
+ for bad_value in np.nan, np.inf, -np.inf:
128
+ obs = matrix([[0.98744510, bad_value],
129
+ [0.62093317, 0.19406729],
130
+ [0.87545741, 0.00735733],
131
+ [0.85124403, 0.26499712],
132
+ [0.45067590, 0.45464607]])
133
+ assert_raises(ValueError, whiten, obs)
134
+
135
+
136
+ class TestVq:
137
+
138
+ @skip_xp_backends(cpu_only=True)
139
+ def test_py_vq(self, xp):
140
+ initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
141
+ # label1.dtype varies between int32 and int64 over platforms
142
+ label1 = py_vq(xp.asarray(X), xp.asarray(initc))[0]
143
+ xp_assert_equal(label1, xp.asarray(LABEL1, dtype=xp.int64),
144
+ check_dtype=False)
145
+
146
+ @pytest.mark.skipif(SCIPY_ARRAY_API,
147
+ reason='`np.matrix` unsupported in array API mode')
148
+ def test_py_vq_matrix(self, xp):
149
+ initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
150
+ # label1.dtype varies between int32 and int64 over platforms
151
+ label1 = py_vq(matrix(X), matrix(initc))[0]
152
+ assert_array_equal(label1, LABEL1)
153
+
154
+ @skip_xp_backends(np_only=True, reasons=['`_vq` only supports NumPy backend'])
155
+ def test_vq(self, xp):
156
+ initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
157
+ label1, _ = _vq.vq(xp.asarray(X), xp.asarray(initc))
158
+ assert_array_equal(label1, LABEL1)
159
+ _, _ = vq(xp.asarray(X), xp.asarray(initc))
160
+
161
+ @pytest.mark.skipif(SCIPY_ARRAY_API,
162
+ reason='`np.matrix` unsupported in array API mode')
163
+ def test_vq_matrix(self, xp):
164
+ initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
165
+ label1, _ = _vq.vq(matrix(X), matrix(initc))
166
+ assert_array_equal(label1, LABEL1)
167
+ _, _ = vq(matrix(X), matrix(initc))
168
+
169
+ @skip_xp_backends(cpu_only=True)
170
+ def test_vq_1d(self, xp):
171
+ # Test special rank 1 vq algo, python implementation.
172
+ data = X[:, 0]
173
+ initc = data[:3]
174
+ a, b = _vq.vq(data, initc)
175
+ data = xp.asarray(data)
176
+ initc = xp.asarray(initc)
177
+ ta, tb = py_vq(data[:, np.newaxis], initc[:, np.newaxis])
178
+ # ta.dtype varies between int32 and int64 over platforms
179
+ xp_assert_equal(ta, xp.asarray(a, dtype=xp.int64), check_dtype=False)
180
+ xp_assert_equal(tb, xp.asarray(b))
181
+
182
+ @skip_xp_backends(np_only=True, reasons=['`_vq` only supports NumPy backend'])
183
+ def test__vq_sametype(self, xp):
184
+ a = xp.asarray([1.0, 2.0], dtype=xp.float64)
185
+ b = a.astype(xp.float32)
186
+ assert_raises(TypeError, _vq.vq, a, b)
187
+
188
+ @skip_xp_backends(np_only=True, reasons=['`_vq` only supports NumPy backend'])
189
+ def test__vq_invalid_type(self, xp):
190
+ a = xp.asarray([1, 2], dtype=int)
191
+ assert_raises(TypeError, _vq.vq, a, a)
192
+
193
+ @skip_xp_backends(cpu_only=True)
194
+ def test_vq_large_nfeat(self, xp):
195
+ X = np.random.rand(20, 20)
196
+ code_book = np.random.rand(3, 20)
197
+
198
+ codes0, dis0 = _vq.vq(X, code_book)
199
+ codes1, dis1 = py_vq(
200
+ xp.asarray(X), xp.asarray(code_book)
201
+ )
202
+ xp_assert_close(dis1, xp.asarray(dis0), rtol=1e-5)
203
+ # codes1.dtype varies between int32 and int64 over platforms
204
+ xp_assert_equal(codes1, xp.asarray(codes0, dtype=xp.int64), check_dtype=False)
205
+
206
+ X = X.astype(np.float32)
207
+ code_book = code_book.astype(np.float32)
208
+
209
+ codes0, dis0 = _vq.vq(X, code_book)
210
+ codes1, dis1 = py_vq(
211
+ xp.asarray(X), xp.asarray(code_book)
212
+ )
213
+ xp_assert_close(dis1, xp.asarray(dis0, dtype=xp.float64), rtol=1e-5)
214
+ # codes1.dtype varies between int32 and int64 over platforms
215
+ xp_assert_equal(codes1, xp.asarray(codes0, dtype=xp.int64), check_dtype=False)
216
+
217
+ @skip_xp_backends(cpu_only=True)
218
+ def test_vq_large_features(self, xp):
219
+ X = np.random.rand(10, 5) * 1000000
220
+ code_book = np.random.rand(2, 5) * 1000000
221
+
222
+ codes0, dis0 = _vq.vq(X, code_book)
223
+ codes1, dis1 = py_vq(
224
+ xp.asarray(X), xp.asarray(code_book)
225
+ )
226
+ xp_assert_close(dis1, xp.asarray(dis0), rtol=1e-5)
227
+ # codes1.dtype varies between int32 and int64 over platforms
228
+ xp_assert_equal(codes1, xp.asarray(codes0, dtype=xp.int64), check_dtype=False)
229
+
230
+
231
+ # Whole class skipped on GPU for now;
232
+ # once pdist/cdist are hooked up for CuPy, more tests will work
233
+ @skip_xp_backends(cpu_only=True)
234
+ class TestKMean:
235
+
236
+ def test_large_features(self, xp):
237
+ # Generate a data set with large values, and run kmeans on it to
238
+ # (regression for 1077).
239
+ d = 300
240
+ n = 100
241
+
242
+ m1 = np.random.randn(d)
243
+ m2 = np.random.randn(d)
244
+ x = 10000 * np.random.randn(n, d) - 20000 * m1
245
+ y = 10000 * np.random.randn(n, d) + 20000 * m2
246
+
247
+ data = np.empty((x.shape[0] + y.shape[0], d), np.float64)
248
+ data[:x.shape[0]] = x
249
+ data[x.shape[0]:] = y
250
+
251
+ kmeans(xp.asarray(data), 2)
252
+
253
+ def test_kmeans_simple(self, xp):
254
+ np.random.seed(54321)
255
+ initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
256
+ code1 = kmeans(xp.asarray(X), xp.asarray(initc), iter=1)[0]
257
+ xp_assert_close(code1, xp.asarray(CODET2))
258
+
259
+ @pytest.mark.skipif(SCIPY_ARRAY_API,
260
+ reason='`np.matrix` unsupported in array API mode')
261
+ def test_kmeans_simple_matrix(self, xp):
262
+ np.random.seed(54321)
263
+ initc = np.concatenate([[X[0]], [X[1]], [X[2]]])
264
+ code1 = kmeans(matrix(X), matrix(initc), iter=1)[0]
265
+ xp_assert_close(code1, CODET2)
266
+
267
+ def test_kmeans_lost_cluster(self, xp):
268
+ # This will cause kmeans to have a cluster with no points.
269
+ data = xp.asarray(TESTDATA_2D)
270
+ initk = xp.asarray([[-1.8127404, -0.67128041],
271
+ [2.04621601, 0.07401111],
272
+ [-2.31149087, -0.05160469]])
273
+
274
+ kmeans(data, initk)
275
+ with suppress_warnings() as sup:
276
+ sup.filter(UserWarning,
277
+ "One of the clusters is empty. Re-run kmeans with a "
278
+ "different initialization")
279
+ kmeans2(data, initk, missing='warn')
280
+
281
+ assert_raises(ClusterError, kmeans2, data, initk, missing='raise')
282
+
283
+ def test_kmeans2_simple(self, xp):
284
+ np.random.seed(12345678)
285
+ initc = xp.asarray(np.concatenate([[X[0]], [X[1]], [X[2]]]))
286
+ arrays = [xp.asarray] if SCIPY_ARRAY_API else [np.asarray, matrix]
287
+ for tp in arrays:
288
+ code1 = kmeans2(tp(X), tp(initc), iter=1)[0]
289
+ code2 = kmeans2(tp(X), tp(initc), iter=2)[0]
290
+
291
+ xp_assert_close(code1, xp.asarray(CODET1))
292
+ xp_assert_close(code2, xp.asarray(CODET2))
293
+
294
+ @pytest.mark.skipif(SCIPY_ARRAY_API,
295
+ reason='`np.matrix` unsupported in array API mode')
296
+ def test_kmeans2_simple_matrix(self, xp):
297
+ np.random.seed(12345678)
298
+ initc = xp.asarray(np.concatenate([[X[0]], [X[1]], [X[2]]]))
299
+ code1 = kmeans2(matrix(X), matrix(initc), iter=1)[0]
300
+ code2 = kmeans2(matrix(X), matrix(initc), iter=2)[0]
301
+
302
+ xp_assert_close(code1, CODET1)
303
+ xp_assert_close(code2, CODET2)
304
+
305
+ def test_kmeans2_rank1(self, xp):
306
+ data = xp.asarray(TESTDATA_2D)
307
+ data1 = data[:, 0]
308
+
309
+ initc = data1[:3]
310
+ code = copy(initc, xp=xp)
311
+ kmeans2(data1, code, iter=1)[0]
312
+ kmeans2(data1, code, iter=2)[0]
313
+
314
+ def test_kmeans2_rank1_2(self, xp):
315
+ data = xp.asarray(TESTDATA_2D)
316
+ data1 = data[:, 0]
317
+ kmeans2(data1, 2, iter=1)
318
+
319
+ def test_kmeans2_high_dim(self, xp):
320
+ # test kmeans2 when the number of dimensions exceeds the number
321
+ # of input points
322
+ data = xp.asarray(TESTDATA_2D)
323
+ data = xp.reshape(data, (20, 20))[:10, :]
324
+ kmeans2(data, 2)
325
+
326
+ @skip_xp_backends('jax.numpy',
327
+ reasons=['jax arrays do not support item assignment'],
328
+ cpu_only=True)
329
+ def test_kmeans2_init(self, xp):
330
+ np.random.seed(12345)
331
+ data = xp.asarray(TESTDATA_2D)
332
+ k = 3
333
+
334
+ kmeans2(data, k, minit='points')
335
+ kmeans2(data[:, 1], k, minit='points') # special case (1-D)
336
+
337
+ kmeans2(data, k, minit='++')
338
+ kmeans2(data[:, 1], k, minit='++') # special case (1-D)
339
+
340
+ # minit='random' can give warnings, filter those
341
+ with suppress_warnings() as sup:
342
+ sup.filter(message="One of the clusters is empty. Re-run.")
343
+ kmeans2(data, k, minit='random')
344
+ kmeans2(data[:, 1], k, minit='random') # special case (1-D)
345
+
346
+ @pytest.mark.skipif(sys.platform == 'win32',
347
+ reason='Fails with MemoryError in Wine.')
348
+ def test_krandinit(self, xp):
349
+ data = xp.asarray(TESTDATA_2D)
350
+ datas = [xp.reshape(data, (200, 2)),
351
+ xp.reshape(data, (20, 20))[:10, :]]
352
+ k = int(1e6)
353
+ for data in datas:
354
+ rng = np.random.default_rng(1234)
355
+ init = _krandinit(data, k, rng, xp)
356
+ orig_cov = cov(data.T)
357
+ init_cov = cov(init.T)
358
+ xp_assert_close(orig_cov, init_cov, atol=1.1e-2)
359
+
360
+ def test_kmeans2_empty(self, xp):
361
+ # Regression test for gh-1032.
362
+ assert_raises(ValueError, kmeans2, xp.asarray([]), 2)
363
+
364
+ def test_kmeans_0k(self, xp):
365
+ # Regression test for gh-1073: fail when k arg is 0.
366
+ assert_raises(ValueError, kmeans, xp.asarray(X), 0)
367
+ assert_raises(ValueError, kmeans2, xp.asarray(X), 0)
368
+ assert_raises(ValueError, kmeans2, xp.asarray(X), xp.asarray([]))
369
+
370
+ def test_kmeans_large_thres(self, xp):
371
+ # Regression test for gh-1774
372
+ x = xp.asarray([1, 2, 3, 4, 10], dtype=xp.float64)
373
+ res = kmeans(x, 1, thresh=1e16)
374
+ xp_assert_close(res[0], xp.asarray([4.], dtype=xp.float64))
375
+ xp_assert_close(res[1], xp.asarray(2.3999999999999999, dtype=xp.float64)[()])
376
+
377
+ @skip_xp_backends('jax.numpy',
378
+ reasons=['jax arrays do not support item assignment'],
379
+ cpu_only=True)
380
+ def test_kmeans2_kpp_low_dim(self, xp):
381
+ # Regression test for gh-11462
382
+ prev_res = xp.asarray([[-1.95266667, 0.898],
383
+ [-3.153375, 3.3945]], dtype=xp.float64)
384
+ np.random.seed(42)
385
+ res, _ = kmeans2(xp.asarray(TESTDATA_2D), 2, minit='++')
386
+ xp_assert_close(res, prev_res)
387
+
388
+ @skip_xp_backends('jax.numpy',
389
+ reasons=['jax arrays do not support item assignment'],
390
+ cpu_only=True)
391
+ def test_kmeans2_kpp_high_dim(self, xp):
392
+ # Regression test for gh-11462
393
+ n_dim = 100
394
+ size = 10
395
+ centers = np.vstack([5 * np.ones(n_dim),
396
+ -5 * np.ones(n_dim)])
397
+ np.random.seed(42)
398
+ data = np.vstack([
399
+ np.random.multivariate_normal(centers[0], np.eye(n_dim), size=size),
400
+ np.random.multivariate_normal(centers[1], np.eye(n_dim), size=size)
401
+ ])
402
+
403
+ data = xp.asarray(data)
404
+ res, _ = kmeans2(data, 2, minit='++')
405
+ xp_assert_equal(xp.sign(res), xp.sign(xp.asarray(centers)))
406
+
407
+ def test_kmeans_diff_convergence(self, xp):
408
+ # Regression test for gh-8727
409
+ obs = xp.asarray([-3, -1, 0, 1, 1, 8], dtype=xp.float64)
410
+ res = kmeans(obs, xp.asarray([-3., 0.99]))
411
+ xp_assert_close(res[0], xp.asarray([-0.4, 8.], dtype=xp.float64))
412
+ xp_assert_close(res[1], xp.asarray(1.0666666666666667, dtype=xp.float64)[()])
413
+
414
+ @skip_xp_backends('jax.numpy',
415
+ reasons=['jax arrays do not support item assignment'],
416
+ cpu_only=True)
417
+ def test_kmeans_and_kmeans2_random_seed(self, xp):
418
+
419
+ seed_list = [
420
+ 1234, np.random.RandomState(1234), np.random.default_rng(1234)
421
+ ]
422
+
423
+ for seed in seed_list:
424
+ seed1 = deepcopy(seed)
425
+ seed2 = deepcopy(seed)
426
+ data = xp.asarray(TESTDATA_2D)
427
+ # test for kmeans
428
+ res1, _ = kmeans(data, 2, seed=seed1)
429
+ res2, _ = kmeans(data, 2, seed=seed2)
430
+ xp_assert_close(res1, res2) # should be same results
431
+ # test for kmeans2
432
+ for minit in ["random", "points", "++"]:
433
+ res1, _ = kmeans2(data, 2, minit=minit, seed=seed1)
434
+ res2, _ = kmeans2(data, 2, minit=minit, seed=seed2)
435
+ xp_assert_close(res1, res2) # should be same results
parrot/lib/python3.10/site-packages/scipy/cluster/vq.py ADDED
@@ -0,0 +1,835 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ K-means clustering and vector quantization (:mod:`scipy.cluster.vq`)
3
+ ====================================================================
4
+
5
+ Provides routines for k-means clustering, generating code books
6
+ from k-means models and quantizing vectors by comparing them with
7
+ centroids in a code book.
8
+
9
+ .. autosummary::
10
+ :toctree: generated/
11
+
12
+ whiten -- Normalize a group of observations so each feature has unit variance
13
+ vq -- Calculate code book membership of a set of observation vectors
14
+ kmeans -- Perform k-means on a set of observation vectors forming k clusters
15
+ kmeans2 -- A different implementation of k-means with more methods
16
+ -- for initializing centroids
17
+
18
+ Background information
19
+ ----------------------
20
+ The k-means algorithm takes as input the number of clusters to
21
+ generate, k, and a set of observation vectors to cluster. It
22
+ returns a set of centroids, one for each of the k clusters. An
23
+ observation vector is classified with the cluster number or
24
+ centroid index of the centroid closest to it.
25
+
26
+ A vector v belongs to cluster i if it is closer to centroid i than
27
+ any other centroid. If v belongs to i, we say centroid i is the
28
+ dominating centroid of v. The k-means algorithm tries to
29
+ minimize distortion, which is defined as the sum of the squared distances
30
+ between each observation vector and its dominating centroid.
31
+ The minimization is achieved by iteratively reclassifying
32
+ the observations into clusters and recalculating the centroids until
33
+ a configuration is reached in which the centroids are stable. One can
34
+ also define a maximum number of iterations.
35
+
36
+ Since vector quantization is a natural application for k-means,
37
+ information theory terminology is often used. The centroid index
38
+ or cluster index is also referred to as a "code" and the table
39
+ mapping codes to centroids and, vice versa, is often referred to as a
40
+ "code book". The result of k-means, a set of centroids, can be
41
+ used to quantize vectors. Quantization aims to find an encoding of
42
+ vectors that reduces the expected distortion.
43
+
44
+ All routines expect obs to be an M by N array, where the rows are
45
+ the observation vectors. The codebook is a k by N array, where the
46
+ ith row is the centroid of code word i. The observation vectors
47
+ and centroids have the same feature dimension.
48
+
49
+ As an example, suppose we wish to compress a 24-bit color image
50
+ (each pixel is represented by one byte for red, one for blue, and
51
+ one for green) before sending it over the web. By using a smaller
52
+ 8-bit encoding, we can reduce the amount of data by two
53
+ thirds. Ideally, the colors for each of the 256 possible 8-bit
54
+ encoding values should be chosen to minimize distortion of the
55
+ color. Running k-means with k=256 generates a code book of 256
56
+ codes, which fills up all possible 8-bit sequences. Instead of
57
+ sending a 3-byte value for each pixel, the 8-bit centroid index
58
+ (or code word) of the dominating centroid is transmitted. The code
59
+ book is also sent over the wire so each 8-bit code can be
60
+ translated back to a 24-bit pixel value representation. If the
61
+ image of interest was of an ocean, we would expect many 24-bit
62
+ blues to be represented by 8-bit codes. If it was an image of a
63
+ human face, more flesh-tone colors would be represented in the
64
+ code book.
65
+
66
+ """
67
+ import warnings
68
+ import numpy as np
69
+ from collections import deque
70
+ from scipy._lib._array_api import (
71
+ _asarray, array_namespace, size, atleast_nd, copy, cov
72
+ )
73
+ from scipy._lib._util import check_random_state, rng_integers
74
+ from scipy.spatial.distance import cdist
75
+
76
+ from . import _vq
77
+
78
+ __docformat__ = 'restructuredtext'
79
+
80
+ __all__ = ['whiten', 'vq', 'kmeans', 'kmeans2']
81
+
82
+
83
+ class ClusterError(Exception):
84
+ pass
85
+
86
+
87
+ def whiten(obs, check_finite=True):
88
+ """
89
+ Normalize a group of observations on a per feature basis.
90
+
91
+ Before running k-means, it is beneficial to rescale each feature
92
+ dimension of the observation set by its standard deviation (i.e. "whiten"
93
+ it - as in "white noise" where each frequency has equal power).
94
+ Each feature is divided by its standard deviation across all observations
95
+ to give it unit variance.
96
+
97
+ Parameters
98
+ ----------
99
+ obs : ndarray
100
+ Each row of the array is an observation. The
101
+ columns are the features seen during each observation.
102
+
103
+ >>> # f0 f1 f2
104
+ >>> obs = [[ 1., 1., 1.], #o0
105
+ ... [ 2., 2., 2.], #o1
106
+ ... [ 3., 3., 3.], #o2
107
+ ... [ 4., 4., 4.]] #o3
108
+
109
+ check_finite : bool, optional
110
+ Whether to check that the input matrices contain only finite numbers.
111
+ Disabling may give a performance gain, but may result in problems
112
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
113
+ Default: True
114
+
115
+ Returns
116
+ -------
117
+ result : ndarray
118
+ Contains the values in `obs` scaled by the standard deviation
119
+ of each column.
120
+
121
+ Examples
122
+ --------
123
+ >>> import numpy as np
124
+ >>> from scipy.cluster.vq import whiten
125
+ >>> features = np.array([[1.9, 2.3, 1.7],
126
+ ... [1.5, 2.5, 2.2],
127
+ ... [0.8, 0.6, 1.7,]])
128
+ >>> whiten(features)
129
+ array([[ 4.17944278, 2.69811351, 7.21248917],
130
+ [ 3.29956009, 2.93273208, 9.33380951],
131
+ [ 1.75976538, 0.7038557 , 7.21248917]])
132
+
133
+ """
134
+ xp = array_namespace(obs)
135
+ obs = _asarray(obs, check_finite=check_finite, xp=xp)
136
+ std_dev = xp.std(obs, axis=0)
137
+ zero_std_mask = std_dev == 0
138
+ if xp.any(zero_std_mask):
139
+ std_dev[zero_std_mask] = 1.0
140
+ warnings.warn("Some columns have standard deviation zero. "
141
+ "The values of these columns will not change.",
142
+ RuntimeWarning, stacklevel=2)
143
+ return obs / std_dev
144
+
145
+
146
+ def vq(obs, code_book, check_finite=True):
147
+ """
148
+ Assign codes from a code book to observations.
149
+
150
+ Assigns a code from a code book to each observation. Each
151
+ observation vector in the 'M' by 'N' `obs` array is compared with the
152
+ centroids in the code book and assigned the code of the closest
153
+ centroid.
154
+
155
+ The features in `obs` should have unit variance, which can be
156
+ achieved by passing them through the whiten function. The code
157
+ book can be created with the k-means algorithm or a different
158
+ encoding algorithm.
159
+
160
+ Parameters
161
+ ----------
162
+ obs : ndarray
163
+ Each row of the 'M' x 'N' array is an observation. The columns are
164
+ the "features" seen during each observation. The features must be
165
+ whitened first using the whiten function or something equivalent.
166
+ code_book : ndarray
167
+ The code book is usually generated using the k-means algorithm.
168
+ Each row of the array holds a different code, and the columns are
169
+ the features of the code.
170
+
171
+ >>> # f0 f1 f2 f3
172
+ >>> code_book = [
173
+ ... [ 1., 2., 3., 4.], #c0
174
+ ... [ 1., 2., 3., 4.], #c1
175
+ ... [ 1., 2., 3., 4.]] #c2
176
+
177
+ check_finite : bool, optional
178
+ Whether to check that the input matrices contain only finite numbers.
179
+ Disabling may give a performance gain, but may result in problems
180
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
181
+ Default: True
182
+
183
+ Returns
184
+ -------
185
+ code : ndarray
186
+ A length M array holding the code book index for each observation.
187
+ dist : ndarray
188
+ The distortion (distance) between the observation and its nearest
189
+ code.
190
+
191
+ Examples
192
+ --------
193
+ >>> import numpy as np
194
+ >>> from scipy.cluster.vq import vq
195
+ >>> code_book = np.array([[1., 1., 1.],
196
+ ... [2., 2., 2.]])
197
+ >>> features = np.array([[1.9, 2.3, 1.7],
198
+ ... [1.5, 2.5, 2.2],
199
+ ... [0.8, 0.6, 1.7]])
200
+ >>> vq(features, code_book)
201
+ (array([1, 1, 0], dtype=int32), array([0.43588989, 0.73484692, 0.83066239]))
202
+
203
+ """
204
+ xp = array_namespace(obs, code_book)
205
+ obs = _asarray(obs, xp=xp, check_finite=check_finite)
206
+ code_book = _asarray(code_book, xp=xp, check_finite=check_finite)
207
+ ct = xp.result_type(obs, code_book)
208
+
209
+ c_obs = xp.astype(obs, ct, copy=False)
210
+ c_code_book = xp.astype(code_book, ct, copy=False)
211
+
212
+ if xp.isdtype(ct, kind='real floating'):
213
+ c_obs = np.asarray(c_obs)
214
+ c_code_book = np.asarray(c_code_book)
215
+ result = _vq.vq(c_obs, c_code_book)
216
+ return xp.asarray(result[0]), xp.asarray(result[1])
217
+ return py_vq(obs, code_book, check_finite=False)
218
+
219
+
220
+ def py_vq(obs, code_book, check_finite=True):
221
+ """ Python version of vq algorithm.
222
+
223
+ The algorithm computes the Euclidean distance between each
224
+ observation and every frame in the code_book.
225
+
226
+ Parameters
227
+ ----------
228
+ obs : ndarray
229
+ Expects a rank 2 array. Each row is one observation.
230
+ code_book : ndarray
231
+ Code book to use. Same format than obs. Should have same number of
232
+ features (e.g., columns) than obs.
233
+ check_finite : bool, optional
234
+ Whether to check that the input matrices contain only finite numbers.
235
+ Disabling may give a performance gain, but may result in problems
236
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
237
+ Default: True
238
+
239
+ Returns
240
+ -------
241
+ code : ndarray
242
+ code[i] gives the label of the ith obversation; its code is
243
+ code_book[code[i]].
244
+ mind_dist : ndarray
245
+ min_dist[i] gives the distance between the ith observation and its
246
+ corresponding code.
247
+
248
+ Notes
249
+ -----
250
+ This function is slower than the C version but works for
251
+ all input types. If the inputs have the wrong types for the
252
+ C versions of the function, this one is called as a last resort.
253
+
254
+ It is about 20 times slower than the C version.
255
+
256
+ """
257
+ xp = array_namespace(obs, code_book)
258
+ obs = _asarray(obs, xp=xp, check_finite=check_finite)
259
+ code_book = _asarray(code_book, xp=xp, check_finite=check_finite)
260
+
261
+ if obs.ndim != code_book.ndim:
262
+ raise ValueError("Observation and code_book should have the same rank")
263
+
264
+ if obs.ndim == 1:
265
+ obs = obs[:, xp.newaxis]
266
+ code_book = code_book[:, xp.newaxis]
267
+
268
+ # Once `cdist` has array API support, this `xp.asarray` call can be removed
269
+ dist = xp.asarray(cdist(obs, code_book))
270
+ code = xp.argmin(dist, axis=1)
271
+ min_dist = xp.min(dist, axis=1)
272
+ return code, min_dist
273
+
274
+
275
+ def _kmeans(obs, guess, thresh=1e-5, xp=None):
276
+ """ "raw" version of k-means.
277
+
278
+ Returns
279
+ -------
280
+ code_book
281
+ The lowest distortion codebook found.
282
+ avg_dist
283
+ The average distance a observation is from a code in the book.
284
+ Lower means the code_book matches the data better.
285
+
286
+ See Also
287
+ --------
288
+ kmeans : wrapper around k-means
289
+
290
+ Examples
291
+ --------
292
+ Note: not whitened in this example.
293
+
294
+ >>> import numpy as np
295
+ >>> from scipy.cluster.vq import _kmeans
296
+ >>> features = np.array([[ 1.9,2.3],
297
+ ... [ 1.5,2.5],
298
+ ... [ 0.8,0.6],
299
+ ... [ 0.4,1.8],
300
+ ... [ 1.0,1.0]])
301
+ >>> book = np.array((features[0],features[2]))
302
+ >>> _kmeans(features,book)
303
+ (array([[ 1.7 , 2.4 ],
304
+ [ 0.73333333, 1.13333333]]), 0.40563916697728591)
305
+
306
+ """
307
+ xp = np if xp is None else xp
308
+ code_book = guess
309
+ diff = xp.inf
310
+ prev_avg_dists = deque([diff], maxlen=2)
311
+ while diff > thresh:
312
+ # compute membership and distances between obs and code_book
313
+ obs_code, distort = vq(obs, code_book, check_finite=False)
314
+ prev_avg_dists.append(xp.mean(distort, axis=-1))
315
+ # recalc code_book as centroids of associated obs
316
+ obs = np.asarray(obs)
317
+ obs_code = np.asarray(obs_code)
318
+ code_book, has_members = _vq.update_cluster_means(obs, obs_code,
319
+ code_book.shape[0])
320
+ obs = xp.asarray(obs)
321
+ obs_code = xp.asarray(obs_code)
322
+ code_book = xp.asarray(code_book)
323
+ has_members = xp.asarray(has_members)
324
+ code_book = code_book[has_members]
325
+ diff = xp.abs(prev_avg_dists[0] - prev_avg_dists[1])
326
+
327
+ return code_book, prev_avg_dists[1]
328
+
329
+
330
+ def kmeans(obs, k_or_guess, iter=20, thresh=1e-5, check_finite=True,
331
+ *, seed=None):
332
+ """
333
+ Performs k-means on a set of observation vectors forming k clusters.
334
+
335
+ The k-means algorithm adjusts the classification of the observations
336
+ into clusters and updates the cluster centroids until the position of
337
+ the centroids is stable over successive iterations. In this
338
+ implementation of the algorithm, the stability of the centroids is
339
+ determined by comparing the absolute value of the change in the average
340
+ Euclidean distance between the observations and their corresponding
341
+ centroids against a threshold. This yields
342
+ a code book mapping centroids to codes and vice versa.
343
+
344
+ Parameters
345
+ ----------
346
+ obs : ndarray
347
+ Each row of the M by N array is an observation vector. The
348
+ columns are the features seen during each observation.
349
+ The features must be whitened first with the `whiten` function.
350
+
351
+ k_or_guess : int or ndarray
352
+ The number of centroids to generate. A code is assigned to
353
+ each centroid, which is also the row index of the centroid
354
+ in the code_book matrix generated.
355
+
356
+ The initial k centroids are chosen by randomly selecting
357
+ observations from the observation matrix. Alternatively,
358
+ passing a k by N array specifies the initial k centroids.
359
+
360
+ iter : int, optional
361
+ The number of times to run k-means, returning the codebook
362
+ with the lowest distortion. This argument is ignored if
363
+ initial centroids are specified with an array for the
364
+ ``k_or_guess`` parameter. This parameter does not represent the
365
+ number of iterations of the k-means algorithm.
366
+
367
+ thresh : float, optional
368
+ Terminates the k-means algorithm if the change in
369
+ distortion since the last k-means iteration is less than
370
+ or equal to threshold.
371
+
372
+ check_finite : bool, optional
373
+ Whether to check that the input matrices contain only finite numbers.
374
+ Disabling may give a performance gain, but may result in problems
375
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
376
+ Default: True
377
+
378
+ seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
379
+ Seed for initializing the pseudo-random number generator.
380
+ If `seed` is None (or `numpy.random`), the `numpy.random.RandomState`
381
+ singleton is used.
382
+ If `seed` is an int, a new ``RandomState`` instance is used,
383
+ seeded with `seed`.
384
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
385
+ that instance is used.
386
+ The default is None.
387
+
388
+ Returns
389
+ -------
390
+ codebook : ndarray
391
+ A k by N array of k centroids. The ith centroid
392
+ codebook[i] is represented with the code i. The centroids
393
+ and codes generated represent the lowest distortion seen,
394
+ not necessarily the globally minimal distortion.
395
+ Note that the number of centroids is not necessarily the same as the
396
+ ``k_or_guess`` parameter, because centroids assigned to no observations
397
+ are removed during iterations.
398
+
399
+ distortion : float
400
+ The mean (non-squared) Euclidean distance between the observations
401
+ passed and the centroids generated. Note the difference to the standard
402
+ definition of distortion in the context of the k-means algorithm, which
403
+ is the sum of the squared distances.
404
+
405
+ See Also
406
+ --------
407
+ kmeans2 : a different implementation of k-means clustering
408
+ with more methods for generating initial centroids but without
409
+ using a distortion change threshold as a stopping criterion.
410
+
411
+ whiten : must be called prior to passing an observation matrix
412
+ to kmeans.
413
+
414
+ Notes
415
+ -----
416
+ For more functionalities or optimal performance, you can use
417
+ `sklearn.cluster.KMeans <https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html>`_.
418
+ `This <https://hdbscan.readthedocs.io/en/latest/performance_and_scalability.html#comparison-of-high-performance-implementations>`_
419
+ is a benchmark result of several implementations.
420
+
421
+ Examples
422
+ --------
423
+ >>> import numpy as np
424
+ >>> from scipy.cluster.vq import vq, kmeans, whiten
425
+ >>> import matplotlib.pyplot as plt
426
+ >>> features = np.array([[ 1.9,2.3],
427
+ ... [ 1.5,2.5],
428
+ ... [ 0.8,0.6],
429
+ ... [ 0.4,1.8],
430
+ ... [ 0.1,0.1],
431
+ ... [ 0.2,1.8],
432
+ ... [ 2.0,0.5],
433
+ ... [ 0.3,1.5],
434
+ ... [ 1.0,1.0]])
435
+ >>> whitened = whiten(features)
436
+ >>> book = np.array((whitened[0],whitened[2]))
437
+ >>> kmeans(whitened,book)
438
+ (array([[ 2.3110306 , 2.86287398], # random
439
+ [ 0.93218041, 1.24398691]]), 0.85684700941625547)
440
+
441
+ >>> codes = 3
442
+ >>> kmeans(whitened,codes)
443
+ (array([[ 2.3110306 , 2.86287398], # random
444
+ [ 1.32544402, 0.65607529],
445
+ [ 0.40782893, 2.02786907]]), 0.5196582527686241)
446
+
447
+ >>> # Create 50 datapoints in two clusters a and b
448
+ >>> pts = 50
449
+ >>> rng = np.random.default_rng()
450
+ >>> a = rng.multivariate_normal([0, 0], [[4, 1], [1, 4]], size=pts)
451
+ >>> b = rng.multivariate_normal([30, 10],
452
+ ... [[10, 2], [2, 1]],
453
+ ... size=pts)
454
+ >>> features = np.concatenate((a, b))
455
+ >>> # Whiten data
456
+ >>> whitened = whiten(features)
457
+ >>> # Find 2 clusters in the data
458
+ >>> codebook, distortion = kmeans(whitened, 2)
459
+ >>> # Plot whitened data and cluster centers in red
460
+ >>> plt.scatter(whitened[:, 0], whitened[:, 1])
461
+ >>> plt.scatter(codebook[:, 0], codebook[:, 1], c='r')
462
+ >>> plt.show()
463
+
464
+ """
465
+ if isinstance(k_or_guess, int):
466
+ xp = array_namespace(obs)
467
+ else:
468
+ xp = array_namespace(obs, k_or_guess)
469
+ obs = _asarray(obs, xp=xp, check_finite=check_finite)
470
+ guess = _asarray(k_or_guess, xp=xp, check_finite=check_finite)
471
+ if iter < 1:
472
+ raise ValueError("iter must be at least 1, got %s" % iter)
473
+
474
+ # Determine whether a count (scalar) or an initial guess (array) was passed.
475
+ if size(guess) != 1:
476
+ if size(guess) < 1:
477
+ raise ValueError("Asked for 0 clusters. Initial book was %s" %
478
+ guess)
479
+ return _kmeans(obs, guess, thresh=thresh, xp=xp)
480
+
481
+ # k_or_guess is a scalar, now verify that it's an integer
482
+ k = int(guess)
483
+ if k != guess:
484
+ raise ValueError("If k_or_guess is a scalar, it must be an integer.")
485
+ if k < 1:
486
+ raise ValueError("Asked for %d clusters." % k)
487
+
488
+ rng = check_random_state(seed)
489
+
490
+ # initialize best distance value to a large value
491
+ best_dist = xp.inf
492
+ for i in range(iter):
493
+ # the initial code book is randomly selected from observations
494
+ guess = _kpoints(obs, k, rng, xp)
495
+ book, dist = _kmeans(obs, guess, thresh=thresh, xp=xp)
496
+ if dist < best_dist:
497
+ best_book = book
498
+ best_dist = dist
499
+ return best_book, best_dist
500
+
501
+
502
+ def _kpoints(data, k, rng, xp):
503
+ """Pick k points at random in data (one row = one observation).
504
+
505
+ Parameters
506
+ ----------
507
+ data : ndarray
508
+ Expect a rank 1 or 2 array. Rank 1 are assumed to describe one
509
+ dimensional data, rank 2 multidimensional data, in which case one
510
+ row is one observation.
511
+ k : int
512
+ Number of samples to generate.
513
+ rng : `numpy.random.Generator` or `numpy.random.RandomState`
514
+ Random number generator.
515
+
516
+ Returns
517
+ -------
518
+ x : ndarray
519
+ A 'k' by 'N' containing the initial centroids
520
+
521
+ """
522
+ idx = rng.choice(data.shape[0], size=int(k), replace=False)
523
+ # convert to array with default integer dtype (avoids numpy#25607)
524
+ idx = xp.asarray(idx, dtype=xp.asarray([1]).dtype)
525
+ return xp.take(data, idx, axis=0)
526
+
527
+
528
+ def _krandinit(data, k, rng, xp):
529
+ """Returns k samples of a random variable whose parameters depend on data.
530
+
531
+ More precisely, it returns k observations sampled from a Gaussian random
532
+ variable whose mean and covariances are the ones estimated from the data.
533
+
534
+ Parameters
535
+ ----------
536
+ data : ndarray
537
+ Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D
538
+ data, rank 2 multidimensional data, in which case one
539
+ row is one observation.
540
+ k : int
541
+ Number of samples to generate.
542
+ rng : `numpy.random.Generator` or `numpy.random.RandomState`
543
+ Random number generator.
544
+
545
+ Returns
546
+ -------
547
+ x : ndarray
548
+ A 'k' by 'N' containing the initial centroids
549
+
550
+ """
551
+ mu = xp.mean(data, axis=0)
552
+ k = np.asarray(k)
553
+
554
+ if data.ndim == 1:
555
+ _cov = cov(data)
556
+ x = rng.standard_normal(size=k)
557
+ x = xp.asarray(x)
558
+ x *= xp.sqrt(_cov)
559
+ elif data.shape[1] > data.shape[0]:
560
+ # initialize when the covariance matrix is rank deficient
561
+ _, s, vh = xp.linalg.svd(data - mu, full_matrices=False)
562
+ x = rng.standard_normal(size=(k, size(s)))
563
+ x = xp.asarray(x)
564
+ sVh = s[:, None] * vh / xp.sqrt(data.shape[0] - xp.asarray(1.))
565
+ x = x @ sVh
566
+ else:
567
+ _cov = atleast_nd(cov(data.T), ndim=2)
568
+
569
+ # k rows, d cols (one row = one obs)
570
+ # Generate k sample of a random variable ~ Gaussian(mu, cov)
571
+ x = rng.standard_normal(size=(k, size(mu)))
572
+ x = xp.asarray(x)
573
+ x = x @ xp.linalg.cholesky(_cov).T
574
+
575
+ x += mu
576
+ return x
577
+
578
+
579
+ def _kpp(data, k, rng, xp):
580
+ """ Picks k points in the data based on the kmeans++ method.
581
+
582
+ Parameters
583
+ ----------
584
+ data : ndarray
585
+ Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D
586
+ data, rank 2 multidimensional data, in which case one
587
+ row is one observation.
588
+ k : int
589
+ Number of samples to generate.
590
+ rng : `numpy.random.Generator` or `numpy.random.RandomState`
591
+ Random number generator.
592
+
593
+ Returns
594
+ -------
595
+ init : ndarray
596
+ A 'k' by 'N' containing the initial centroids.
597
+
598
+ References
599
+ ----------
600
+ .. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of
601
+ careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium
602
+ on Discrete Algorithms, 2007.
603
+ """
604
+
605
+ ndim = len(data.shape)
606
+ if ndim == 1:
607
+ data = data[:, None]
608
+
609
+ dims = data.shape[1]
610
+
611
+ init = xp.empty((int(k), dims))
612
+
613
+ for i in range(k):
614
+ if i == 0:
615
+ init[i, :] = data[rng_integers(rng, data.shape[0]), :]
616
+
617
+ else:
618
+ D2 = cdist(init[:i,:], data, metric='sqeuclidean').min(axis=0)
619
+ probs = D2/D2.sum()
620
+ cumprobs = probs.cumsum()
621
+ r = rng.uniform()
622
+ cumprobs = np.asarray(cumprobs)
623
+ init[i, :] = data[np.searchsorted(cumprobs, r), :]
624
+
625
+ if ndim == 1:
626
+ init = init[:, 0]
627
+ return init
628
+
629
+
630
+ _valid_init_meth = {'random': _krandinit, 'points': _kpoints, '++': _kpp}
631
+
632
+
633
+ def _missing_warn():
634
+ """Print a warning when called."""
635
+ warnings.warn("One of the clusters is empty. "
636
+ "Re-run kmeans with a different initialization.",
637
+ stacklevel=3)
638
+
639
+
640
+ def _missing_raise():
641
+ """Raise a ClusterError when called."""
642
+ raise ClusterError("One of the clusters is empty. "
643
+ "Re-run kmeans with a different initialization.")
644
+
645
+
646
+ _valid_miss_meth = {'warn': _missing_warn, 'raise': _missing_raise}
647
+
648
+
649
+ def kmeans2(data, k, iter=10, thresh=1e-5, minit='random',
650
+ missing='warn', check_finite=True, *, seed=None):
651
+ """
652
+ Classify a set of observations into k clusters using the k-means algorithm.
653
+
654
+ The algorithm attempts to minimize the Euclidean distance between
655
+ observations and centroids. Several initialization methods are
656
+ included.
657
+
658
+ Parameters
659
+ ----------
660
+ data : ndarray
661
+ A 'M' by 'N' array of 'M' observations in 'N' dimensions or a length
662
+ 'M' array of 'M' 1-D observations.
663
+ k : int or ndarray
664
+ The number of clusters to form as well as the number of
665
+ centroids to generate. If `minit` initialization string is
666
+ 'matrix', or if a ndarray is given instead, it is
667
+ interpreted as initial cluster to use instead.
668
+ iter : int, optional
669
+ Number of iterations of the k-means algorithm to run. Note
670
+ that this differs in meaning from the iters parameter to
671
+ the kmeans function.
672
+ thresh : float, optional
673
+ (not used yet)
674
+ minit : str, optional
675
+ Method for initialization. Available methods are 'random',
676
+ 'points', '++' and 'matrix':
677
+
678
+ 'random': generate k centroids from a Gaussian with mean and
679
+ variance estimated from the data.
680
+
681
+ 'points': choose k observations (rows) at random from data for
682
+ the initial centroids.
683
+
684
+ '++': choose k observations accordingly to the kmeans++ method
685
+ (careful seeding)
686
+
687
+ 'matrix': interpret the k parameter as a k by M (or length k
688
+ array for 1-D data) array of initial centroids.
689
+ missing : str, optional
690
+ Method to deal with empty clusters. Available methods are
691
+ 'warn' and 'raise':
692
+
693
+ 'warn': give a warning and continue.
694
+
695
+ 'raise': raise an ClusterError and terminate the algorithm.
696
+ check_finite : bool, optional
697
+ Whether to check that the input matrices contain only finite numbers.
698
+ Disabling may give a performance gain, but may result in problems
699
+ (crashes, non-termination) if the inputs do contain infinities or NaNs.
700
+ Default: True
701
+ seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
702
+ Seed for initializing the pseudo-random number generator.
703
+ If `seed` is None (or `numpy.random`), the `numpy.random.RandomState`
704
+ singleton is used.
705
+ If `seed` is an int, a new ``RandomState`` instance is used,
706
+ seeded with `seed`.
707
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
708
+ that instance is used.
709
+ The default is None.
710
+
711
+ Returns
712
+ -------
713
+ centroid : ndarray
714
+ A 'k' by 'N' array of centroids found at the last iteration of
715
+ k-means.
716
+ label : ndarray
717
+ label[i] is the code or index of the centroid the
718
+ ith observation is closest to.
719
+
720
+ See Also
721
+ --------
722
+ kmeans
723
+
724
+ References
725
+ ----------
726
+ .. [1] D. Arthur and S. Vassilvitskii, "k-means++: the advantages of
727
+ careful seeding", Proceedings of the Eighteenth Annual ACM-SIAM Symposium
728
+ on Discrete Algorithms, 2007.
729
+
730
+ Examples
731
+ --------
732
+ >>> from scipy.cluster.vq import kmeans2
733
+ >>> import matplotlib.pyplot as plt
734
+ >>> import numpy as np
735
+
736
+ Create z, an array with shape (100, 2) containing a mixture of samples
737
+ from three multivariate normal distributions.
738
+
739
+ >>> rng = np.random.default_rng()
740
+ >>> a = rng.multivariate_normal([0, 6], [[2, 1], [1, 1.5]], size=45)
741
+ >>> b = rng.multivariate_normal([2, 0], [[1, -1], [-1, 3]], size=30)
742
+ >>> c = rng.multivariate_normal([6, 4], [[5, 0], [0, 1.2]], size=25)
743
+ >>> z = np.concatenate((a, b, c))
744
+ >>> rng.shuffle(z)
745
+
746
+ Compute three clusters.
747
+
748
+ >>> centroid, label = kmeans2(z, 3, minit='points')
749
+ >>> centroid
750
+ array([[ 2.22274463, -0.61666946], # may vary
751
+ [ 0.54069047, 5.86541444],
752
+ [ 6.73846769, 4.01991898]])
753
+
754
+ How many points are in each cluster?
755
+
756
+ >>> counts = np.bincount(label)
757
+ >>> counts
758
+ array([29, 51, 20]) # may vary
759
+
760
+ Plot the clusters.
761
+
762
+ >>> w0 = z[label == 0]
763
+ >>> w1 = z[label == 1]
764
+ >>> w2 = z[label == 2]
765
+ >>> plt.plot(w0[:, 0], w0[:, 1], 'o', alpha=0.5, label='cluster 0')
766
+ >>> plt.plot(w1[:, 0], w1[:, 1], 'd', alpha=0.5, label='cluster 1')
767
+ >>> plt.plot(w2[:, 0], w2[:, 1], 's', alpha=0.5, label='cluster 2')
768
+ >>> plt.plot(centroid[:, 0], centroid[:, 1], 'k*', label='centroids')
769
+ >>> plt.axis('equal')
770
+ >>> plt.legend(shadow=True)
771
+ >>> plt.show()
772
+
773
+ """
774
+ if int(iter) < 1:
775
+ raise ValueError("Invalid iter (%s), "
776
+ "must be a positive integer." % iter)
777
+ try:
778
+ miss_meth = _valid_miss_meth[missing]
779
+ except KeyError as e:
780
+ raise ValueError(f"Unknown missing method {missing!r}") from e
781
+
782
+ if isinstance(k, int):
783
+ xp = array_namespace(data)
784
+ else:
785
+ xp = array_namespace(data, k)
786
+ data = _asarray(data, xp=xp, check_finite=check_finite)
787
+ code_book = copy(k, xp=xp)
788
+ if data.ndim == 1:
789
+ d = 1
790
+ elif data.ndim == 2:
791
+ d = data.shape[1]
792
+ else:
793
+ raise ValueError("Input of rank > 2 is not supported.")
794
+
795
+ if size(data) < 1 or size(code_book) < 1:
796
+ raise ValueError("Empty input is not supported.")
797
+
798
+ # If k is not a single value, it should be compatible with data's shape
799
+ if minit == 'matrix' or size(code_book) > 1:
800
+ if data.ndim != code_book.ndim:
801
+ raise ValueError("k array doesn't match data rank")
802
+ nc = code_book.shape[0]
803
+ if data.ndim > 1 and code_book.shape[1] != d:
804
+ raise ValueError("k array doesn't match data dimension")
805
+ else:
806
+ nc = int(code_book)
807
+
808
+ if nc < 1:
809
+ raise ValueError("Cannot ask kmeans2 for %d clusters"
810
+ " (k was %s)" % (nc, code_book))
811
+ elif nc != code_book:
812
+ warnings.warn("k was not an integer, was converted.", stacklevel=2)
813
+
814
+ try:
815
+ init_meth = _valid_init_meth[minit]
816
+ except KeyError as e:
817
+ raise ValueError(f"Unknown init method {minit!r}") from e
818
+ else:
819
+ rng = check_random_state(seed)
820
+ code_book = init_meth(data, code_book, rng, xp)
821
+
822
+ data = np.asarray(data)
823
+ code_book = np.asarray(code_book)
824
+ for i in range(iter):
825
+ # Compute the nearest neighbor for each obs using the current code book
826
+ label = vq(data, code_book, check_finite=check_finite)[0]
827
+ # Update the code book by computing centroids
828
+ new_code_book, has_members = _vq.update_cluster_means(data, label, nc)
829
+ if not has_members.all():
830
+ miss_meth()
831
+ # Set the empty clusters to their previous positions
832
+ new_code_book[~has_members] = code_book[~has_members]
833
+ code_book = new_code_book
834
+
835
+ return xp.asarray(code_book), xp.asarray(label)
parrot/lib/python3.10/site-packages/scipy/fft/__init__.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ==============================================
3
+ Discrete Fourier transforms (:mod:`scipy.fft`)
4
+ ==============================================
5
+
6
+ .. currentmodule:: scipy.fft
7
+
8
+ Fast Fourier Transforms (FFTs)
9
+ ==============================
10
+
11
+ .. autosummary::
12
+ :toctree: generated/
13
+
14
+ fft - Fast (discrete) Fourier Transform (FFT)
15
+ ifft - Inverse FFT
16
+ fft2 - 2-D FFT
17
+ ifft2 - 2-D inverse FFT
18
+ fftn - N-D FFT
19
+ ifftn - N-D inverse FFT
20
+ rfft - FFT of strictly real-valued sequence
21
+ irfft - Inverse of rfft
22
+ rfft2 - 2-D FFT of real sequence
23
+ irfft2 - Inverse of rfft2
24
+ rfftn - N-D FFT of real sequence
25
+ irfftn - Inverse of rfftn
26
+ hfft - FFT of a Hermitian sequence (real spectrum)
27
+ ihfft - Inverse of hfft
28
+ hfft2 - 2-D FFT of a Hermitian sequence
29
+ ihfft2 - Inverse of hfft2
30
+ hfftn - N-D FFT of a Hermitian sequence
31
+ ihfftn - Inverse of hfftn
32
+
33
+ Discrete Sin and Cosine Transforms (DST and DCT)
34
+ ================================================
35
+
36
+ .. autosummary::
37
+ :toctree: generated/
38
+
39
+ dct - Discrete cosine transform
40
+ idct - Inverse discrete cosine transform
41
+ dctn - N-D Discrete cosine transform
42
+ idctn - N-D Inverse discrete cosine transform
43
+ dst - Discrete sine transform
44
+ idst - Inverse discrete sine transform
45
+ dstn - N-D Discrete sine transform
46
+ idstn - N-D Inverse discrete sine transform
47
+
48
+ Fast Hankel Transforms
49
+ ======================
50
+
51
+ .. autosummary::
52
+ :toctree: generated/
53
+
54
+ fht - Fast Hankel transform
55
+ ifht - Inverse of fht
56
+
57
+ Helper functions
58
+ ================
59
+
60
+ .. autosummary::
61
+ :toctree: generated/
62
+
63
+ fftshift - Shift the zero-frequency component to the center of the spectrum
64
+ ifftshift - The inverse of `fftshift`
65
+ fftfreq - Return the Discrete Fourier Transform sample frequencies
66
+ rfftfreq - DFT sample frequencies (for usage with rfft, irfft)
67
+ fhtoffset - Compute an optimal offset for the Fast Hankel Transform
68
+ next_fast_len - Find the optimal length to zero-pad an FFT for speed
69
+ prev_fast_len - Find the maximum slice length that results in a fast FFT
70
+ set_workers - Context manager to set default number of workers
71
+ get_workers - Get the current default number of workers
72
+
73
+ Backend control
74
+ ===============
75
+
76
+ .. autosummary::
77
+ :toctree: generated/
78
+
79
+ set_backend - Context manager to set the backend within a fixed scope
80
+ skip_backend - Context manager to skip a backend within a fixed scope
81
+ set_global_backend - Sets the global fft backend
82
+ register_backend - Register a backend for permanent use
83
+
84
+ """
85
+
86
+ from ._basic import (
87
+ fft, ifft, fft2, ifft2, fftn, ifftn,
88
+ rfft, irfft, rfft2, irfft2, rfftn, irfftn,
89
+ hfft, ihfft, hfft2, ihfft2, hfftn, ihfftn)
90
+ from ._realtransforms import dct, idct, dst, idst, dctn, idctn, dstn, idstn
91
+ from ._fftlog import fht, ifht, fhtoffset
92
+ from ._helper import (
93
+ next_fast_len, prev_fast_len, fftfreq,
94
+ rfftfreq, fftshift, ifftshift)
95
+ from ._backend import (set_backend, skip_backend, set_global_backend,
96
+ register_backend)
97
+ from ._pocketfft.helper import set_workers, get_workers
98
+
99
+ __all__ = [
100
+ 'fft', 'ifft', 'fft2', 'ifft2', 'fftn', 'ifftn',
101
+ 'rfft', 'irfft', 'rfft2', 'irfft2', 'rfftn', 'irfftn',
102
+ 'hfft', 'ihfft', 'hfft2', 'ihfft2', 'hfftn', 'ihfftn',
103
+ 'fftfreq', 'rfftfreq', 'fftshift', 'ifftshift',
104
+ 'next_fast_len', 'prev_fast_len',
105
+ 'dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn',
106
+ 'fht', 'ifht',
107
+ 'fhtoffset',
108
+ 'set_backend', 'skip_backend', 'set_global_backend', 'register_backend',
109
+ 'get_workers', 'set_workers']
110
+
111
+
112
+ from scipy._lib._testutils import PytestTester
113
+ test = PytestTester(__name__)
114
+ del PytestTester
parrot/lib/python3.10/site-packages/scipy/fft/_backend.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import scipy._lib.uarray as ua
2
+ from . import _basic_backend
3
+ from . import _realtransforms_backend
4
+ from . import _fftlog_backend
5
+
6
+
7
+ class _ScipyBackend:
8
+ """The default backend for fft calculations
9
+
10
+ Notes
11
+ -----
12
+ We use the domain ``numpy.scipy`` rather than ``scipy`` because ``uarray``
13
+ treats the domain as a hierarchy. This means the user can install a single
14
+ backend for ``numpy`` and have it implement ``numpy.scipy.fft`` as well.
15
+ """
16
+ __ua_domain__ = "numpy.scipy.fft"
17
+
18
+ @staticmethod
19
+ def __ua_function__(method, args, kwargs):
20
+
21
+ fn = getattr(_basic_backend, method.__name__, None)
22
+ if fn is None:
23
+ fn = getattr(_realtransforms_backend, method.__name__, None)
24
+ if fn is None:
25
+ fn = getattr(_fftlog_backend, method.__name__, None)
26
+ if fn is None:
27
+ return NotImplemented
28
+ return fn(*args, **kwargs)
29
+
30
+
31
+ _named_backends = {
32
+ 'scipy': _ScipyBackend,
33
+ }
34
+
35
+
36
+ def _backend_from_arg(backend):
37
+ """Maps strings to known backends and validates the backend"""
38
+
39
+ if isinstance(backend, str):
40
+ try:
41
+ backend = _named_backends[backend]
42
+ except KeyError as e:
43
+ raise ValueError(f'Unknown backend {backend}') from e
44
+
45
+ if backend.__ua_domain__ != 'numpy.scipy.fft':
46
+ raise ValueError('Backend does not implement "numpy.scipy.fft"')
47
+
48
+ return backend
49
+
50
+
51
+ def set_global_backend(backend, coerce=False, only=False, try_last=False):
52
+ """Sets the global fft backend
53
+
54
+ This utility method replaces the default backend for permanent use. It
55
+ will be tried in the list of backends automatically, unless the
56
+ ``only`` flag is set on a backend. This will be the first tried
57
+ backend outside the :obj:`set_backend` context manager.
58
+
59
+ Parameters
60
+ ----------
61
+ backend : {object, 'scipy'}
62
+ The backend to use.
63
+ Can either be a ``str`` containing the name of a known backend
64
+ {'scipy'} or an object that implements the uarray protocol.
65
+ coerce : bool
66
+ Whether to coerce input types when trying this backend.
67
+ only : bool
68
+ If ``True``, no more backends will be tried if this fails.
69
+ Implied by ``coerce=True``.
70
+ try_last : bool
71
+ If ``True``, the global backend is tried after registered backends.
72
+
73
+ Raises
74
+ ------
75
+ ValueError: If the backend does not implement ``numpy.scipy.fft``.
76
+
77
+ Notes
78
+ -----
79
+ This will overwrite the previously set global backend, which, by default, is
80
+ the SciPy implementation.
81
+
82
+ Examples
83
+ --------
84
+ We can set the global fft backend:
85
+
86
+ >>> from scipy.fft import fft, set_global_backend
87
+ >>> set_global_backend("scipy") # Sets global backend (default is "scipy").
88
+ >>> fft([1]) # Calls the global backend
89
+ array([1.+0.j])
90
+ """
91
+ backend = _backend_from_arg(backend)
92
+ ua.set_global_backend(backend, coerce=coerce, only=only, try_last=try_last)
93
+
94
+
95
+ def register_backend(backend):
96
+ """
97
+ Register a backend for permanent use.
98
+
99
+ Registered backends have the lowest priority and will be tried after the
100
+ global backend.
101
+
102
+ Parameters
103
+ ----------
104
+ backend : {object, 'scipy'}
105
+ The backend to use.
106
+ Can either be a ``str`` containing the name of a known backend
107
+ {'scipy'} or an object that implements the uarray protocol.
108
+
109
+ Raises
110
+ ------
111
+ ValueError: If the backend does not implement ``numpy.scipy.fft``.
112
+
113
+ Examples
114
+ --------
115
+ We can register a new fft backend:
116
+
117
+ >>> from scipy.fft import fft, register_backend, set_global_backend
118
+ >>> class NoopBackend: # Define an invalid Backend
119
+ ... __ua_domain__ = "numpy.scipy.fft"
120
+ ... def __ua_function__(self, func, args, kwargs):
121
+ ... return NotImplemented
122
+ >>> set_global_backend(NoopBackend()) # Set the invalid backend as global
123
+ >>> register_backend("scipy") # Register a new backend
124
+ # The registered backend is called because
125
+ # the global backend returns `NotImplemented`
126
+ >>> fft([1])
127
+ array([1.+0.j])
128
+ >>> set_global_backend("scipy") # Restore global backend to default
129
+
130
+ """
131
+ backend = _backend_from_arg(backend)
132
+ ua.register_backend(backend)
133
+
134
+
135
+ def set_backend(backend, coerce=False, only=False):
136
+ """Context manager to set the backend within a fixed scope.
137
+
138
+ Upon entering the ``with`` statement, the given backend will be added to
139
+ the list of available backends with the highest priority. Upon exit, the
140
+ backend is reset to the state before entering the scope.
141
+
142
+ Parameters
143
+ ----------
144
+ backend : {object, 'scipy'}
145
+ The backend to use.
146
+ Can either be a ``str`` containing the name of a known backend
147
+ {'scipy'} or an object that implements the uarray protocol.
148
+ coerce : bool, optional
149
+ Whether to allow expensive conversions for the ``x`` parameter. e.g.,
150
+ copying a NumPy array to the GPU for a CuPy backend. Implies ``only``.
151
+ only : bool, optional
152
+ If only is ``True`` and this backend returns ``NotImplemented``, then a
153
+ BackendNotImplemented error will be raised immediately. Ignoring any
154
+ lower priority backends.
155
+
156
+ Examples
157
+ --------
158
+ >>> import scipy.fft as fft
159
+ >>> with fft.set_backend('scipy', only=True):
160
+ ... fft.fft([1]) # Always calls the scipy implementation
161
+ array([1.+0.j])
162
+ """
163
+ backend = _backend_from_arg(backend)
164
+ return ua.set_backend(backend, coerce=coerce, only=only)
165
+
166
+
167
+ def skip_backend(backend):
168
+ """Context manager to skip a backend within a fixed scope.
169
+
170
+ Within the context of a ``with`` statement, the given backend will not be
171
+ called. This covers backends registered both locally and globally. Upon
172
+ exit, the backend will again be considered.
173
+
174
+ Parameters
175
+ ----------
176
+ backend : {object, 'scipy'}
177
+ The backend to skip.
178
+ Can either be a ``str`` containing the name of a known backend
179
+ {'scipy'} or an object that implements the uarray protocol.
180
+
181
+ Examples
182
+ --------
183
+ >>> import scipy.fft as fft
184
+ >>> fft.fft([1]) # Calls default SciPy backend
185
+ array([1.+0.j])
186
+ >>> with fft.skip_backend('scipy'): # We explicitly skip the SciPy backend
187
+ ... fft.fft([1]) # leaving no implementation available
188
+ Traceback (most recent call last):
189
+ ...
190
+ BackendNotImplementedError: No selected backends had an implementation ...
191
+ """
192
+ backend = _backend_from_arg(backend)
193
+ return ua.skip_backend(backend)
194
+
195
+
196
+ set_global_backend('scipy', try_last=True)
parrot/lib/python3.10/site-packages/scipy/fft/_basic.py ADDED
@@ -0,0 +1,1630 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from scipy._lib.uarray import generate_multimethod, Dispatchable
2
+ import numpy as np
3
+
4
+
5
+ def _x_replacer(args, kwargs, dispatchables):
6
+ """
7
+ uarray argument replacer to replace the transform input array (``x``)
8
+ """
9
+ if len(args) > 0:
10
+ return (dispatchables[0],) + args[1:], kwargs
11
+ kw = kwargs.copy()
12
+ kw['x'] = dispatchables[0]
13
+ return args, kw
14
+
15
+
16
+ def _dispatch(func):
17
+ """
18
+ Function annotation that creates a uarray multimethod from the function
19
+ """
20
+ return generate_multimethod(func, _x_replacer, domain="numpy.scipy.fft")
21
+
22
+
23
+ @_dispatch
24
+ def fft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
25
+ plan=None):
26
+ """
27
+ Compute the 1-D discrete Fourier Transform.
28
+
29
+ This function computes the 1-D *n*-point discrete Fourier
30
+ Transform (DFT) with the efficient Fast Fourier Transform (FFT)
31
+ algorithm [1]_.
32
+
33
+ Parameters
34
+ ----------
35
+ x : array_like
36
+ Input array, can be complex.
37
+ n : int, optional
38
+ Length of the transformed axis of the output.
39
+ If `n` is smaller than the length of the input, the input is cropped.
40
+ If it is larger, the input is padded with zeros. If `n` is not given,
41
+ the length of the input along the axis specified by `axis` is used.
42
+ axis : int, optional
43
+ Axis over which to compute the FFT. If not given, the last axis is
44
+ used.
45
+ norm : {"backward", "ortho", "forward"}, optional
46
+ Normalization mode. Default is "backward", meaning no normalization on
47
+ the forward transforms and scaling by ``1/n`` on the `ifft`.
48
+ "forward" instead applies the ``1/n`` factor on the forward transform.
49
+ For ``norm="ortho"``, both directions are scaled by ``1/sqrt(n)``.
50
+
51
+ .. versionadded:: 1.6.0
52
+ ``norm={"forward", "backward"}`` options were added
53
+
54
+ overwrite_x : bool, optional
55
+ If True, the contents of `x` can be destroyed; the default is False.
56
+ See the notes below for more details.
57
+ workers : int, optional
58
+ Maximum number of workers to use for parallel computation. If negative,
59
+ the value wraps around from ``os.cpu_count()``. See below for more
60
+ details.
61
+ plan : object, optional
62
+ This argument is reserved for passing in a precomputed plan provided
63
+ by downstream FFT vendors. It is currently not used in SciPy.
64
+
65
+ .. versionadded:: 1.5.0
66
+
67
+ Returns
68
+ -------
69
+ out : complex ndarray
70
+ The truncated or zero-padded input, transformed along the axis
71
+ indicated by `axis`, or the last one if `axis` is not specified.
72
+
73
+ Raises
74
+ ------
75
+ IndexError
76
+ if `axes` is larger than the last axis of `x`.
77
+
78
+ See Also
79
+ --------
80
+ ifft : The inverse of `fft`.
81
+ fft2 : The 2-D FFT.
82
+ fftn : The N-D FFT.
83
+ rfftn : The N-D FFT of real input.
84
+ fftfreq : Frequency bins for given FFT parameters.
85
+ next_fast_len : Size to pad input to for most efficient transforms
86
+
87
+ Notes
88
+ -----
89
+ FFT (Fast Fourier Transform) refers to a way the discrete Fourier Transform
90
+ (DFT) can be calculated efficiently, by using symmetries in the calculated
91
+ terms. The symmetry is highest when `n` is a power of 2, and the transform
92
+ is therefore most efficient for these sizes. For poorly factorizable sizes,
93
+ `scipy.fft` uses Bluestein's algorithm [2]_ and so is never worse than
94
+ O(`n` log `n`). Further performance improvements may be seen by zero-padding
95
+ the input using `next_fast_len`.
96
+
97
+ If ``x`` is a 1d array, then the `fft` is equivalent to ::
98
+
99
+ y[k] = np.sum(x * np.exp(-2j * np.pi * k * np.arange(n)/n))
100
+
101
+ The frequency term ``f=k/n`` is found at ``y[k]``. At ``y[n/2]`` we reach
102
+ the Nyquist frequency and wrap around to the negative-frequency terms. So,
103
+ for an 8-point transform, the frequencies of the result are
104
+ [0, 1, 2, 3, -4, -3, -2, -1]. To rearrange the fft output so that the
105
+ zero-frequency component is centered, like [-4, -3, -2, -1, 0, 1, 2, 3],
106
+ use `fftshift`.
107
+
108
+ Transforms can be done in single, double, or extended precision (long
109
+ double) floating point. Half precision inputs will be converted to single
110
+ precision and non-floating-point inputs will be converted to double
111
+ precision.
112
+
113
+ If the data type of ``x`` is real, a "real FFT" algorithm is automatically
114
+ used, which roughly halves the computation time. To increase efficiency
115
+ a little further, use `rfft`, which does the same calculation, but only
116
+ outputs half of the symmetrical spectrum. If the data are both real and
117
+ symmetrical, the `dct` can again double the efficiency, by generating
118
+ half of the spectrum from half of the signal.
119
+
120
+ When ``overwrite_x=True`` is specified, the memory referenced by ``x`` may
121
+ be used by the implementation in any way. This may include reusing the
122
+ memory for the result, but this is in no way guaranteed. You should not
123
+ rely on the contents of ``x`` after the transform as this may change in
124
+ future without warning.
125
+
126
+ The ``workers`` argument specifies the maximum number of parallel jobs to
127
+ split the FFT computation into. This will execute independent 1-D
128
+ FFTs within ``x``. So, ``x`` must be at least 2-D and the
129
+ non-transformed axes must be large enough to split into chunks. If ``x`` is
130
+ too small, fewer jobs may be used than requested.
131
+
132
+ References
133
+ ----------
134
+ .. [1] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
135
+ machine calculation of complex Fourier series," *Math. Comput.*
136
+ 19: 297-301.
137
+ .. [2] Bluestein, L., 1970, "A linear filtering approach to the
138
+ computation of discrete Fourier transform". *IEEE Transactions on
139
+ Audio and Electroacoustics.* 18 (4): 451-455.
140
+
141
+ Examples
142
+ --------
143
+ >>> import scipy.fft
144
+ >>> import numpy as np
145
+ >>> scipy.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
146
+ array([-2.33486982e-16+1.14423775e-17j, 8.00000000e+00-1.25557246e-15j,
147
+ 2.33486982e-16+2.33486982e-16j, 0.00000000e+00+1.22464680e-16j,
148
+ -1.14423775e-17+2.33486982e-16j, 0.00000000e+00+5.20784380e-16j,
149
+ 1.14423775e-17+1.14423775e-17j, 0.00000000e+00+1.22464680e-16j])
150
+
151
+ In this example, real input has an FFT which is Hermitian, i.e., symmetric
152
+ in the real part and anti-symmetric in the imaginary part:
153
+
154
+ >>> from scipy.fft import fft, fftfreq, fftshift
155
+ >>> import matplotlib.pyplot as plt
156
+ >>> t = np.arange(256)
157
+ >>> sp = fftshift(fft(np.sin(t)))
158
+ >>> freq = fftshift(fftfreq(t.shape[-1]))
159
+ >>> plt.plot(freq, sp.real, freq, sp.imag)
160
+ [<matplotlib.lines.Line2D object at 0x...>,
161
+ <matplotlib.lines.Line2D object at 0x...>]
162
+ >>> plt.show()
163
+
164
+ """
165
+ return (Dispatchable(x, np.ndarray),)
166
+
167
+
168
+ @_dispatch
169
+ def ifft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
170
+ plan=None):
171
+ """
172
+ Compute the 1-D inverse discrete Fourier Transform.
173
+
174
+ This function computes the inverse of the 1-D *n*-point
175
+ discrete Fourier transform computed by `fft`. In other words,
176
+ ``ifft(fft(x)) == x`` to within numerical accuracy.
177
+
178
+ The input should be ordered in the same way as is returned by `fft`,
179
+ i.e.,
180
+
181
+ * ``x[0]`` should contain the zero frequency term,
182
+ * ``x[1:n//2]`` should contain the positive-frequency terms,
183
+ * ``x[n//2 + 1:]`` should contain the negative-frequency terms, in
184
+ increasing order starting from the most negative frequency.
185
+
186
+ For an even number of input points, ``x[n//2]`` represents the sum of
187
+ the values at the positive and negative Nyquist frequencies, as the two
188
+ are aliased together. See `fft` for details.
189
+
190
+ Parameters
191
+ ----------
192
+ x : array_like
193
+ Input array, can be complex.
194
+ n : int, optional
195
+ Length of the transformed axis of the output.
196
+ If `n` is smaller than the length of the input, the input is cropped.
197
+ If it is larger, the input is padded with zeros. If `n` is not given,
198
+ the length of the input along the axis specified by `axis` is used.
199
+ See notes about padding issues.
200
+ axis : int, optional
201
+ Axis over which to compute the inverse DFT. If not given, the last
202
+ axis is used.
203
+ norm : {"backward", "ortho", "forward"}, optional
204
+ Normalization mode (see `fft`). Default is "backward".
205
+ overwrite_x : bool, optional
206
+ If True, the contents of `x` can be destroyed; the default is False.
207
+ See :func:`fft` for more details.
208
+ workers : int, optional
209
+ Maximum number of workers to use for parallel computation. If negative,
210
+ the value wraps around from ``os.cpu_count()``.
211
+ See :func:`~scipy.fft.fft` for more details.
212
+ plan : object, optional
213
+ This argument is reserved for passing in a precomputed plan provided
214
+ by downstream FFT vendors. It is currently not used in SciPy.
215
+
216
+ .. versionadded:: 1.5.0
217
+
218
+ Returns
219
+ -------
220
+ out : complex ndarray
221
+ The truncated or zero-padded input, transformed along the axis
222
+ indicated by `axis`, or the last one if `axis` is not specified.
223
+
224
+ Raises
225
+ ------
226
+ IndexError
227
+ If `axes` is larger than the last axis of `x`.
228
+
229
+ See Also
230
+ --------
231
+ fft : The 1-D (forward) FFT, of which `ifft` is the inverse.
232
+ ifft2 : The 2-D inverse FFT.
233
+ ifftn : The N-D inverse FFT.
234
+
235
+ Notes
236
+ -----
237
+ If the input parameter `n` is larger than the size of the input, the input
238
+ is padded by appending zeros at the end. Even though this is the common
239
+ approach, it might lead to surprising results. If a different padding is
240
+ desired, it must be performed before calling `ifft`.
241
+
242
+ If ``x`` is a 1-D array, then the `ifft` is equivalent to ::
243
+
244
+ y[k] = np.sum(x * np.exp(2j * np.pi * k * np.arange(n)/n)) / len(x)
245
+
246
+ As with `fft`, `ifft` has support for all floating point types and is
247
+ optimized for real input.
248
+
249
+ Examples
250
+ --------
251
+ >>> import scipy.fft
252
+ >>> import numpy as np
253
+ >>> scipy.fft.ifft([0, 4, 0, 0])
254
+ array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) # may vary
255
+
256
+ Create and plot a band-limited signal with random phases:
257
+
258
+ >>> import matplotlib.pyplot as plt
259
+ >>> rng = np.random.default_rng()
260
+ >>> t = np.arange(400)
261
+ >>> n = np.zeros((400,), dtype=complex)
262
+ >>> n[40:60] = np.exp(1j*rng.uniform(0, 2*np.pi, (20,)))
263
+ >>> s = scipy.fft.ifft(n)
264
+ >>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
265
+ [<matplotlib.lines.Line2D object at ...>, <matplotlib.lines.Line2D object at ...>]
266
+ >>> plt.legend(('real', 'imaginary'))
267
+ <matplotlib.legend.Legend object at ...>
268
+ >>> plt.show()
269
+
270
+ """
271
+ return (Dispatchable(x, np.ndarray),)
272
+
273
+
274
+ @_dispatch
275
+ def rfft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
276
+ plan=None):
277
+ """
278
+ Compute the 1-D discrete Fourier Transform for real input.
279
+
280
+ This function computes the 1-D *n*-point discrete Fourier
281
+ Transform (DFT) of a real-valued array by means of an efficient algorithm
282
+ called the Fast Fourier Transform (FFT).
283
+
284
+ Parameters
285
+ ----------
286
+ x : array_like
287
+ Input array
288
+ n : int, optional
289
+ Number of points along transformation axis in the input to use.
290
+ If `n` is smaller than the length of the input, the input is cropped.
291
+ If it is larger, the input is padded with zeros. If `n` is not given,
292
+ the length of the input along the axis specified by `axis` is used.
293
+ axis : int, optional
294
+ Axis over which to compute the FFT. If not given, the last axis is
295
+ used.
296
+ norm : {"backward", "ortho", "forward"}, optional
297
+ Normalization mode (see `fft`). Default is "backward".
298
+ overwrite_x : bool, optional
299
+ If True, the contents of `x` can be destroyed; the default is False.
300
+ See :func:`fft` for more details.
301
+ workers : int, optional
302
+ Maximum number of workers to use for parallel computation. If negative,
303
+ the value wraps around from ``os.cpu_count()``.
304
+ See :func:`~scipy.fft.fft` for more details.
305
+ plan : object, optional
306
+ This argument is reserved for passing in a precomputed plan provided
307
+ by downstream FFT vendors. It is currently not used in SciPy.
308
+
309
+ .. versionadded:: 1.5.0
310
+
311
+ Returns
312
+ -------
313
+ out : complex ndarray
314
+ The truncated or zero-padded input, transformed along the axis
315
+ indicated by `axis`, or the last one if `axis` is not specified.
316
+ If `n` is even, the length of the transformed axis is ``(n/2)+1``.
317
+ If `n` is odd, the length is ``(n+1)/2``.
318
+
319
+ Raises
320
+ ------
321
+ IndexError
322
+ If `axis` is larger than the last axis of `a`.
323
+
324
+ See Also
325
+ --------
326
+ irfft : The inverse of `rfft`.
327
+ fft : The 1-D FFT of general (complex) input.
328
+ fftn : The N-D FFT.
329
+ rfft2 : The 2-D FFT of real input.
330
+ rfftn : The N-D FFT of real input.
331
+
332
+ Notes
333
+ -----
334
+ When the DFT is computed for purely real input, the output is
335
+ Hermitian-symmetric, i.e., the negative frequency terms are just the complex
336
+ conjugates of the corresponding positive-frequency terms, and the
337
+ negative-frequency terms are therefore redundant. This function does not
338
+ compute the negative frequency terms, and the length of the transformed
339
+ axis of the output is therefore ``n//2 + 1``.
340
+
341
+ When ``X = rfft(x)`` and fs is the sampling frequency, ``X[0]`` contains
342
+ the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
343
+
344
+ If `n` is even, ``A[-1]`` contains the term representing both positive
345
+ and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
346
+ real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
347
+ the largest positive frequency (fs/2*(n-1)/n), and is complex in the
348
+ general case.
349
+
350
+ If the input `a` contains an imaginary part, it is silently discarded.
351
+
352
+ Examples
353
+ --------
354
+ >>> import scipy.fft
355
+ >>> scipy.fft.fft([0, 1, 0, 0])
356
+ array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) # may vary
357
+ >>> scipy.fft.rfft([0, 1, 0, 0])
358
+ array([ 1.+0.j, 0.-1.j, -1.+0.j]) # may vary
359
+
360
+ Notice how the final element of the `fft` output is the complex conjugate
361
+ of the second element, for real input. For `rfft`, this symmetry is
362
+ exploited to compute only the non-negative frequency terms.
363
+
364
+ """
365
+ return (Dispatchable(x, np.ndarray),)
366
+
367
+
368
+ @_dispatch
369
+ def irfft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
370
+ plan=None):
371
+ """
372
+ Computes the inverse of `rfft`.
373
+
374
+ This function computes the inverse of the 1-D *n*-point
375
+ discrete Fourier Transform of real input computed by `rfft`.
376
+ In other words, ``irfft(rfft(x), len(x)) == x`` to within numerical
377
+ accuracy. (See Notes below for why ``len(a)`` is necessary here.)
378
+
379
+ The input is expected to be in the form returned by `rfft`, i.e., the
380
+ real zero-frequency term followed by the complex positive frequency terms
381
+ in order of increasing frequency. Since the discrete Fourier Transform of
382
+ real input is Hermitian-symmetric, the negative frequency terms are taken
383
+ to be the complex conjugates of the corresponding positive frequency terms.
384
+
385
+ Parameters
386
+ ----------
387
+ x : array_like
388
+ The input array.
389
+ n : int, optional
390
+ Length of the transformed axis of the output.
391
+ For `n` output points, ``n//2+1`` input points are necessary. If the
392
+ input is longer than this, it is cropped. If it is shorter than this,
393
+ it is padded with zeros. If `n` is not given, it is taken to be
394
+ ``2*(m-1)``, where ``m`` is the length of the input along the axis
395
+ specified by `axis`.
396
+ axis : int, optional
397
+ Axis over which to compute the inverse FFT. If not given, the last
398
+ axis is used.
399
+ norm : {"backward", "ortho", "forward"}, optional
400
+ Normalization mode (see `fft`). Default is "backward".
401
+ overwrite_x : bool, optional
402
+ If True, the contents of `x` can be destroyed; the default is False.
403
+ See :func:`fft` for more details.
404
+ workers : int, optional
405
+ Maximum number of workers to use for parallel computation. If negative,
406
+ the value wraps around from ``os.cpu_count()``.
407
+ See :func:`~scipy.fft.fft` for more details.
408
+ plan : object, optional
409
+ This argument is reserved for passing in a precomputed plan provided
410
+ by downstream FFT vendors. It is currently not used in SciPy.
411
+
412
+ .. versionadded:: 1.5.0
413
+
414
+ Returns
415
+ -------
416
+ out : ndarray
417
+ The truncated or zero-padded input, transformed along the axis
418
+ indicated by `axis`, or the last one if `axis` is not specified.
419
+ The length of the transformed axis is `n`, or, if `n` is not given,
420
+ ``2*(m-1)`` where ``m`` is the length of the transformed axis of the
421
+ input. To get an odd number of output points, `n` must be specified.
422
+
423
+ Raises
424
+ ------
425
+ IndexError
426
+ If `axis` is larger than the last axis of `x`.
427
+
428
+ See Also
429
+ --------
430
+ rfft : The 1-D FFT of real input, of which `irfft` is inverse.
431
+ fft : The 1-D FFT.
432
+ irfft2 : The inverse of the 2-D FFT of real input.
433
+ irfftn : The inverse of the N-D FFT of real input.
434
+
435
+ Notes
436
+ -----
437
+ Returns the real valued `n`-point inverse discrete Fourier transform
438
+ of `x`, where `x` contains the non-negative frequency terms of a
439
+ Hermitian-symmetric sequence. `n` is the length of the result, not the
440
+ input.
441
+
442
+ If you specify an `n` such that `a` must be zero-padded or truncated, the
443
+ extra/removed values will be added/removed at high frequencies. One can
444
+ thus resample a series to `m` points via Fourier interpolation by:
445
+ ``a_resamp = irfft(rfft(a), m)``.
446
+
447
+ The default value of `n` assumes an even output length. By the Hermitian
448
+ symmetry, the last imaginary component must be 0 and so is ignored. To
449
+ avoid losing information, the correct length of the real input *must* be
450
+ given.
451
+
452
+ Examples
453
+ --------
454
+ >>> import scipy.fft
455
+ >>> scipy.fft.ifft([1, -1j, -1, 1j])
456
+ array([0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) # may vary
457
+ >>> scipy.fft.irfft([1, -1j, -1])
458
+ array([0., 1., 0., 0.])
459
+
460
+ Notice how the last term in the input to the ordinary `ifft` is the
461
+ complex conjugate of the second term, and the output has zero imaginary
462
+ part everywhere. When calling `irfft`, the negative frequencies are not
463
+ specified, and the output array is purely real.
464
+
465
+ """
466
+ return (Dispatchable(x, np.ndarray),)
467
+
468
+
469
+ @_dispatch
470
+ def hfft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
471
+ plan=None):
472
+ """
473
+ Compute the FFT of a signal that has Hermitian symmetry, i.e., a real
474
+ spectrum.
475
+
476
+ Parameters
477
+ ----------
478
+ x : array_like
479
+ The input array.
480
+ n : int, optional
481
+ Length of the transformed axis of the output. For `n` output
482
+ points, ``n//2 + 1`` input points are necessary. If the input is
483
+ longer than this, it is cropped. If it is shorter than this, it is
484
+ padded with zeros. If `n` is not given, it is taken to be ``2*(m-1)``,
485
+ where ``m`` is the length of the input along the axis specified by
486
+ `axis`.
487
+ axis : int, optional
488
+ Axis over which to compute the FFT. If not given, the last
489
+ axis is used.
490
+ norm : {"backward", "ortho", "forward"}, optional
491
+ Normalization mode (see `fft`). Default is "backward".
492
+ overwrite_x : bool, optional
493
+ If True, the contents of `x` can be destroyed; the default is False.
494
+ See `fft` for more details.
495
+ workers : int, optional
496
+ Maximum number of workers to use for parallel computation. If negative,
497
+ the value wraps around from ``os.cpu_count()``.
498
+ See :func:`~scipy.fft.fft` for more details.
499
+ plan : object, optional
500
+ This argument is reserved for passing in a precomputed plan provided
501
+ by downstream FFT vendors. It is currently not used in SciPy.
502
+
503
+ .. versionadded:: 1.5.0
504
+
505
+ Returns
506
+ -------
507
+ out : ndarray
508
+ The truncated or zero-padded input, transformed along the axis
509
+ indicated by `axis`, or the last one if `axis` is not specified.
510
+ The length of the transformed axis is `n`, or, if `n` is not given,
511
+ ``2*m - 2``, where ``m`` is the length of the transformed axis of
512
+ the input. To get an odd number of output points, `n` must be
513
+ specified, for instance, as ``2*m - 1`` in the typical case,
514
+
515
+ Raises
516
+ ------
517
+ IndexError
518
+ If `axis` is larger than the last axis of `a`.
519
+
520
+ See Also
521
+ --------
522
+ rfft : Compute the 1-D FFT for real input.
523
+ ihfft : The inverse of `hfft`.
524
+ hfftn : Compute the N-D FFT of a Hermitian signal.
525
+
526
+ Notes
527
+ -----
528
+ `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
529
+ opposite case: here the signal has Hermitian symmetry in the time
530
+ domain and is real in the frequency domain. So, here, it's `hfft`, for
531
+ which you must supply the length of the result if it is to be odd.
532
+ * even: ``ihfft(hfft(a, 2*len(a) - 2) == a``, within roundoff error,
533
+ * odd: ``ihfft(hfft(a, 2*len(a) - 1) == a``, within roundoff error.
534
+
535
+ Examples
536
+ --------
537
+ >>> from scipy.fft import fft, hfft
538
+ >>> import numpy as np
539
+ >>> a = 2 * np.pi * np.arange(10) / 10
540
+ >>> signal = np.cos(a) + 3j * np.sin(3 * a)
541
+ >>> fft(signal).round(10)
542
+ array([ -0.+0.j, 5.+0.j, -0.+0.j, 15.-0.j, 0.+0.j, 0.+0.j,
543
+ -0.+0.j, -15.-0.j, 0.+0.j, 5.+0.j])
544
+ >>> hfft(signal[:6]).round(10) # Input first half of signal
545
+ array([ 0., 5., 0., 15., -0., 0., 0., -15., -0., 5.])
546
+ >>> hfft(signal, 10) # Input entire signal and truncate
547
+ array([ 0., 5., 0., 15., -0., 0., 0., -15., -0., 5.])
548
+ """
549
+ return (Dispatchable(x, np.ndarray),)
550
+
551
+
552
+ @_dispatch
553
+ def ihfft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
554
+ plan=None):
555
+ """
556
+ Compute the inverse FFT of a signal that has Hermitian symmetry.
557
+
558
+ Parameters
559
+ ----------
560
+ x : array_like
561
+ Input array.
562
+ n : int, optional
563
+ Length of the inverse FFT, the number of points along
564
+ transformation axis in the input to use. If `n` is smaller than
565
+ the length of the input, the input is cropped. If it is larger,
566
+ the input is padded with zeros. If `n` is not given, the length of
567
+ the input along the axis specified by `axis` is used.
568
+ axis : int, optional
569
+ Axis over which to compute the inverse FFT. If not given, the last
570
+ axis is used.
571
+ norm : {"backward", "ortho", "forward"}, optional
572
+ Normalization mode (see `fft`). Default is "backward".
573
+ overwrite_x : bool, optional
574
+ If True, the contents of `x` can be destroyed; the default is False.
575
+ See `fft` for more details.
576
+ workers : int, optional
577
+ Maximum number of workers to use for parallel computation. If negative,
578
+ the value wraps around from ``os.cpu_count()``.
579
+ See :func:`~scipy.fft.fft` for more details.
580
+ plan : object, optional
581
+ This argument is reserved for passing in a precomputed plan provided
582
+ by downstream FFT vendors. It is currently not used in SciPy.
583
+
584
+ .. versionadded:: 1.5.0
585
+
586
+ Returns
587
+ -------
588
+ out : complex ndarray
589
+ The truncated or zero-padded input, transformed along the axis
590
+ indicated by `axis`, or the last one if `axis` is not specified.
591
+ The length of the transformed axis is ``n//2 + 1``.
592
+
593
+ See Also
594
+ --------
595
+ hfft, irfft
596
+
597
+ Notes
598
+ -----
599
+ `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
600
+ opposite case: here, the signal has Hermitian symmetry in the time
601
+ domain and is real in the frequency domain. So, here, it's `hfft`, for
602
+ which you must supply the length of the result if it is to be odd:
603
+ * even: ``ihfft(hfft(a, 2*len(a) - 2) == a``, within roundoff error,
604
+ * odd: ``ihfft(hfft(a, 2*len(a) - 1) == a``, within roundoff error.
605
+
606
+ Examples
607
+ --------
608
+ >>> from scipy.fft import ifft, ihfft
609
+ >>> import numpy as np
610
+ >>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
611
+ >>> ifft(spectrum)
612
+ array([1.+0.j, 2.+0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.+0.j]) # may vary
613
+ >>> ihfft(spectrum)
614
+ array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j]) # may vary
615
+ """
616
+ return (Dispatchable(x, np.ndarray),)
617
+
618
+
619
+ @_dispatch
620
+ def fftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *,
621
+ plan=None):
622
+ """
623
+ Compute the N-D discrete Fourier Transform.
624
+
625
+ This function computes the N-D discrete Fourier Transform over
626
+ any number of axes in an M-D array by means of the Fast Fourier
627
+ Transform (FFT).
628
+
629
+ Parameters
630
+ ----------
631
+ x : array_like
632
+ Input array, can be complex.
633
+ s : sequence of ints, optional
634
+ Shape (length of each transformed axis) of the output
635
+ (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
636
+ This corresponds to ``n`` for ``fft(x, n)``.
637
+ Along any axis, if the given shape is smaller than that of the input,
638
+ the input is cropped. If it is larger, the input is padded with zeros.
639
+ if `s` is not given, the shape of the input along the axes specified
640
+ by `axes` is used.
641
+ axes : sequence of ints, optional
642
+ Axes over which to compute the FFT. If not given, the last ``len(s)``
643
+ axes are used, or all axes if `s` is also not specified.
644
+ norm : {"backward", "ortho", "forward"}, optional
645
+ Normalization mode (see `fft`). Default is "backward".
646
+ overwrite_x : bool, optional
647
+ If True, the contents of `x` can be destroyed; the default is False.
648
+ See :func:`fft` for more details.
649
+ workers : int, optional
650
+ Maximum number of workers to use for parallel computation. If negative,
651
+ the value wraps around from ``os.cpu_count()``.
652
+ See :func:`~scipy.fft.fft` for more details.
653
+ plan : object, optional
654
+ This argument is reserved for passing in a precomputed plan provided
655
+ by downstream FFT vendors. It is currently not used in SciPy.
656
+
657
+ .. versionadded:: 1.5.0
658
+
659
+ Returns
660
+ -------
661
+ out : complex ndarray
662
+ The truncated or zero-padded input, transformed along the axes
663
+ indicated by `axes`, or by a combination of `s` and `x`,
664
+ as explained in the parameters section above.
665
+
666
+ Raises
667
+ ------
668
+ ValueError
669
+ If `s` and `axes` have different length.
670
+ IndexError
671
+ If an element of `axes` is larger than the number of axes of `x`.
672
+
673
+ See Also
674
+ --------
675
+ ifftn : The inverse of `fftn`, the inverse N-D FFT.
676
+ fft : The 1-D FFT, with definitions and conventions used.
677
+ rfftn : The N-D FFT of real input.
678
+ fft2 : The 2-D FFT.
679
+ fftshift : Shifts zero-frequency terms to centre of array.
680
+
681
+ Notes
682
+ -----
683
+ The output, analogously to `fft`, contains the term for zero frequency in
684
+ the low-order corner of all axes, the positive frequency terms in the
685
+ first half of all axes, the term for the Nyquist frequency in the middle
686
+ of all axes and the negative frequency terms in the second half of all
687
+ axes, in order of decreasingly negative frequency.
688
+
689
+ Examples
690
+ --------
691
+ >>> import scipy.fft
692
+ >>> import numpy as np
693
+ >>> x = np.mgrid[:3, :3, :3][0]
694
+ >>> scipy.fft.fftn(x, axes=(1, 2))
695
+ array([[[ 0.+0.j, 0.+0.j, 0.+0.j], # may vary
696
+ [ 0.+0.j, 0.+0.j, 0.+0.j],
697
+ [ 0.+0.j, 0.+0.j, 0.+0.j]],
698
+ [[ 9.+0.j, 0.+0.j, 0.+0.j],
699
+ [ 0.+0.j, 0.+0.j, 0.+0.j],
700
+ [ 0.+0.j, 0.+0.j, 0.+0.j]],
701
+ [[18.+0.j, 0.+0.j, 0.+0.j],
702
+ [ 0.+0.j, 0.+0.j, 0.+0.j],
703
+ [ 0.+0.j, 0.+0.j, 0.+0.j]]])
704
+ >>> scipy.fft.fftn(x, (2, 2), axes=(0, 1))
705
+ array([[[ 2.+0.j, 2.+0.j, 2.+0.j], # may vary
706
+ [ 0.+0.j, 0.+0.j, 0.+0.j]],
707
+ [[-2.+0.j, -2.+0.j, -2.+0.j],
708
+ [ 0.+0.j, 0.+0.j, 0.+0.j]]])
709
+
710
+ >>> import matplotlib.pyplot as plt
711
+ >>> rng = np.random.default_rng()
712
+ >>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
713
+ ... 2 * np.pi * np.arange(200) / 34)
714
+ >>> S = np.sin(X) + np.cos(Y) + rng.uniform(0, 1, X.shape)
715
+ >>> FS = scipy.fft.fftn(S)
716
+ >>> plt.imshow(np.log(np.abs(scipy.fft.fftshift(FS))**2))
717
+ <matplotlib.image.AxesImage object at 0x...>
718
+ >>> plt.show()
719
+
720
+ """
721
+ return (Dispatchable(x, np.ndarray),)
722
+
723
+
724
+ @_dispatch
725
+ def ifftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *,
726
+ plan=None):
727
+ """
728
+ Compute the N-D inverse discrete Fourier Transform.
729
+
730
+ This function computes the inverse of the N-D discrete
731
+ Fourier Transform over any number of axes in an M-D array by
732
+ means of the Fast Fourier Transform (FFT). In other words,
733
+ ``ifftn(fftn(x)) == x`` to within numerical accuracy.
734
+
735
+ The input, analogously to `ifft`, should be ordered in the same way as is
736
+ returned by `fftn`, i.e., it should have the term for zero frequency
737
+ in all axes in the low-order corner, the positive frequency terms in the
738
+ first half of all axes, the term for the Nyquist frequency in the middle
739
+ of all axes and the negative frequency terms in the second half of all
740
+ axes, in order of decreasingly negative frequency.
741
+
742
+ Parameters
743
+ ----------
744
+ x : array_like
745
+ Input array, can be complex.
746
+ s : sequence of ints, optional
747
+ Shape (length of each transformed axis) of the output
748
+ (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
749
+ This corresponds to ``n`` for ``ifft(x, n)``.
750
+ Along any axis, if the given shape is smaller than that of the input,
751
+ the input is cropped. If it is larger, the input is padded with zeros.
752
+ if `s` is not given, the shape of the input along the axes specified
753
+ by `axes` is used. See notes for issue on `ifft` zero padding.
754
+ axes : sequence of ints, optional
755
+ Axes over which to compute the IFFT. If not given, the last ``len(s)``
756
+ axes are used, or all axes if `s` is also not specified.
757
+ norm : {"backward", "ortho", "forward"}, optional
758
+ Normalization mode (see `fft`). Default is "backward".
759
+ overwrite_x : bool, optional
760
+ If True, the contents of `x` can be destroyed; the default is False.
761
+ See :func:`fft` for more details.
762
+ workers : int, optional
763
+ Maximum number of workers to use for parallel computation. If negative,
764
+ the value wraps around from ``os.cpu_count()``.
765
+ See :func:`~scipy.fft.fft` for more details.
766
+ plan : object, optional
767
+ This argument is reserved for passing in a precomputed plan provided
768
+ by downstream FFT vendors. It is currently not used in SciPy.
769
+
770
+ .. versionadded:: 1.5.0
771
+
772
+ Returns
773
+ -------
774
+ out : complex ndarray
775
+ The truncated or zero-padded input, transformed along the axes
776
+ indicated by `axes`, or by a combination of `s` or `x`,
777
+ as explained in the parameters section above.
778
+
779
+ Raises
780
+ ------
781
+ ValueError
782
+ If `s` and `axes` have different length.
783
+ IndexError
784
+ If an element of `axes` is larger than the number of axes of `x`.
785
+
786
+ See Also
787
+ --------
788
+ fftn : The forward N-D FFT, of which `ifftn` is the inverse.
789
+ ifft : The 1-D inverse FFT.
790
+ ifft2 : The 2-D inverse FFT.
791
+ ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
792
+ of array.
793
+
794
+ Notes
795
+ -----
796
+ Zero-padding, analogously with `ifft`, is performed by appending zeros to
797
+ the input along the specified dimension. Although this is the common
798
+ approach, it might lead to surprising results. If another form of zero
799
+ padding is desired, it must be performed before `ifftn` is called.
800
+
801
+ Examples
802
+ --------
803
+ >>> import scipy.fft
804
+ >>> import numpy as np
805
+ >>> x = np.eye(4)
806
+ >>> scipy.fft.ifftn(scipy.fft.fftn(x, axes=(0,)), axes=(1,))
807
+ array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary
808
+ [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
809
+ [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
810
+ [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
811
+
812
+
813
+ Create and plot an image with band-limited frequency content:
814
+
815
+ >>> import matplotlib.pyplot as plt
816
+ >>> rng = np.random.default_rng()
817
+ >>> n = np.zeros((200,200), dtype=complex)
818
+ >>> n[60:80, 20:40] = np.exp(1j*rng.uniform(0, 2*np.pi, (20, 20)))
819
+ >>> im = scipy.fft.ifftn(n).real
820
+ >>> plt.imshow(im)
821
+ <matplotlib.image.AxesImage object at 0x...>
822
+ >>> plt.show()
823
+
824
+ """
825
+ return (Dispatchable(x, np.ndarray),)
826
+
827
+
828
+ @_dispatch
829
+ def fft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *,
830
+ plan=None):
831
+ """
832
+ Compute the 2-D discrete Fourier Transform
833
+
834
+ This function computes the N-D discrete Fourier Transform
835
+ over any axes in an M-D array by means of the
836
+ Fast Fourier Transform (FFT). By default, the transform is computed over
837
+ the last two axes of the input array, i.e., a 2-dimensional FFT.
838
+
839
+ Parameters
840
+ ----------
841
+ x : array_like
842
+ Input array, can be complex
843
+ s : sequence of ints, optional
844
+ Shape (length of each transformed axis) of the output
845
+ (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
846
+ This corresponds to ``n`` for ``fft(x, n)``.
847
+ Along each axis, if the given shape is smaller than that of the input,
848
+ the input is cropped. If it is larger, the input is padded with zeros.
849
+ if `s` is not given, the shape of the input along the axes specified
850
+ by `axes` is used.
851
+ axes : sequence of ints, optional
852
+ Axes over which to compute the FFT. If not given, the last two axes are
853
+ used.
854
+ norm : {"backward", "ortho", "forward"}, optional
855
+ Normalization mode (see `fft`). Default is "backward".
856
+ overwrite_x : bool, optional
857
+ If True, the contents of `x` can be destroyed; the default is False.
858
+ See :func:`fft` for more details.
859
+ workers : int, optional
860
+ Maximum number of workers to use for parallel computation. If negative,
861
+ the value wraps around from ``os.cpu_count()``.
862
+ See :func:`~scipy.fft.fft` for more details.
863
+ plan : object, optional
864
+ This argument is reserved for passing in a precomputed plan provided
865
+ by downstream FFT vendors. It is currently not used in SciPy.
866
+
867
+ .. versionadded:: 1.5.0
868
+
869
+ Returns
870
+ -------
871
+ out : complex ndarray
872
+ The truncated or zero-padded input, transformed along the axes
873
+ indicated by `axes`, or the last two axes if `axes` is not given.
874
+
875
+ Raises
876
+ ------
877
+ ValueError
878
+ If `s` and `axes` have different length, or `axes` not given and
879
+ ``len(s) != 2``.
880
+ IndexError
881
+ If an element of `axes` is larger than the number of axes of `x`.
882
+
883
+ See Also
884
+ --------
885
+ ifft2 : The inverse 2-D FFT.
886
+ fft : The 1-D FFT.
887
+ fftn : The N-D FFT.
888
+ fftshift : Shifts zero-frequency terms to the center of the array.
889
+ For 2-D input, swaps first and third quadrants, and second
890
+ and fourth quadrants.
891
+
892
+ Notes
893
+ -----
894
+ `fft2` is just `fftn` with a different default for `axes`.
895
+
896
+ The output, analogously to `fft`, contains the term for zero frequency in
897
+ the low-order corner of the transformed axes, the positive frequency terms
898
+ in the first half of these axes, the term for the Nyquist frequency in the
899
+ middle of the axes and the negative frequency terms in the second half of
900
+ the axes, in order of decreasingly negative frequency.
901
+
902
+ See `fftn` for details and a plotting example, and `fft` for
903
+ definitions and conventions used.
904
+
905
+
906
+ Examples
907
+ --------
908
+ >>> import scipy.fft
909
+ >>> import numpy as np
910
+ >>> x = np.mgrid[:5, :5][0]
911
+ >>> scipy.fft.fft2(x)
912
+ array([[ 50. +0.j , 0. +0.j , 0. +0.j , # may vary
913
+ 0. +0.j , 0. +0.j ],
914
+ [-12.5+17.20477401j, 0. +0.j , 0. +0.j ,
915
+ 0. +0.j , 0. +0.j ],
916
+ [-12.5 +4.0614962j , 0. +0.j , 0. +0.j ,
917
+ 0. +0.j , 0. +0.j ],
918
+ [-12.5 -4.0614962j , 0. +0.j , 0. +0.j ,
919
+ 0. +0.j , 0. +0.j ],
920
+ [-12.5-17.20477401j, 0. +0.j , 0. +0.j ,
921
+ 0. +0.j , 0. +0.j ]])
922
+
923
+ """
924
+ return (Dispatchable(x, np.ndarray),)
925
+
926
+
927
+ @_dispatch
928
+ def ifft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *,
929
+ plan=None):
930
+ """
931
+ Compute the 2-D inverse discrete Fourier Transform.
932
+
933
+ This function computes the inverse of the 2-D discrete Fourier
934
+ Transform over any number of axes in an M-D array by means of
935
+ the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(x)) == x``
936
+ to within numerical accuracy. By default, the inverse transform is
937
+ computed over the last two axes of the input array.
938
+
939
+ The input, analogously to `ifft`, should be ordered in the same way as is
940
+ returned by `fft2`, i.e., it should have the term for zero frequency
941
+ in the low-order corner of the two axes, the positive frequency terms in
942
+ the first half of these axes, the term for the Nyquist frequency in the
943
+ middle of the axes and the negative frequency terms in the second half of
944
+ both axes, in order of decreasingly negative frequency.
945
+
946
+ Parameters
947
+ ----------
948
+ x : array_like
949
+ Input array, can be complex.
950
+ s : sequence of ints, optional
951
+ Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
952
+ ``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
953
+ Along each axis, if the given shape is smaller than that of the input,
954
+ the input is cropped. If it is larger, the input is padded with zeros.
955
+ if `s` is not given, the shape of the input along the axes specified
956
+ by `axes` is used. See notes for issue on `ifft` zero padding.
957
+ axes : sequence of ints, optional
958
+ Axes over which to compute the FFT. If not given, the last two
959
+ axes are used.
960
+ norm : {"backward", "ortho", "forward"}, optional
961
+ Normalization mode (see `fft`). Default is "backward".
962
+ overwrite_x : bool, optional
963
+ If True, the contents of `x` can be destroyed; the default is False.
964
+ See :func:`fft` for more details.
965
+ workers : int, optional
966
+ Maximum number of workers to use for parallel computation. If negative,
967
+ the value wraps around from ``os.cpu_count()``.
968
+ See :func:`~scipy.fft.fft` for more details.
969
+ plan : object, optional
970
+ This argument is reserved for passing in a precomputed plan provided
971
+ by downstream FFT vendors. It is currently not used in SciPy.
972
+
973
+ .. versionadded:: 1.5.0
974
+
975
+ Returns
976
+ -------
977
+ out : complex ndarray
978
+ The truncated or zero-padded input, transformed along the axes
979
+ indicated by `axes`, or the last two axes if `axes` is not given.
980
+
981
+ Raises
982
+ ------
983
+ ValueError
984
+ If `s` and `axes` have different length, or `axes` not given and
985
+ ``len(s) != 2``.
986
+ IndexError
987
+ If an element of `axes` is larger than the number of axes of `x`.
988
+
989
+ See Also
990
+ --------
991
+ fft2 : The forward 2-D FFT, of which `ifft2` is the inverse.
992
+ ifftn : The inverse of the N-D FFT.
993
+ fft : The 1-D FFT.
994
+ ifft : The 1-D inverse FFT.
995
+
996
+ Notes
997
+ -----
998
+ `ifft2` is just `ifftn` with a different default for `axes`.
999
+
1000
+ See `ifftn` for details and a plotting example, and `fft` for
1001
+ definition and conventions used.
1002
+
1003
+ Zero-padding, analogously with `ifft`, is performed by appending zeros to
1004
+ the input along the specified dimension. Although this is the common
1005
+ approach, it might lead to surprising results. If another form of zero
1006
+ padding is desired, it must be performed before `ifft2` is called.
1007
+
1008
+ Examples
1009
+ --------
1010
+ >>> import scipy.fft
1011
+ >>> import numpy as np
1012
+ >>> x = 4 * np.eye(4)
1013
+ >>> scipy.fft.ifft2(x)
1014
+ array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary
1015
+ [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
1016
+ [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
1017
+ [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
1018
+
1019
+ """
1020
+ return (Dispatchable(x, np.ndarray),)
1021
+
1022
+
1023
+ @_dispatch
1024
+ def rfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *,
1025
+ plan=None):
1026
+ """
1027
+ Compute the N-D discrete Fourier Transform for real input.
1028
+
1029
+ This function computes the N-D discrete Fourier Transform over
1030
+ any number of axes in an M-D real array by means of the Fast
1031
+ Fourier Transform (FFT). By default, all axes are transformed, with the
1032
+ real transform performed over the last axis, while the remaining
1033
+ transforms are complex.
1034
+
1035
+ Parameters
1036
+ ----------
1037
+ x : array_like
1038
+ Input array, taken to be real.
1039
+ s : sequence of ints, optional
1040
+ Shape (length along each transformed axis) to use from the input.
1041
+ (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
1042
+ The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
1043
+ for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
1044
+ Along any axis, if the given shape is smaller than that of the input,
1045
+ the input is cropped. If it is larger, the input is padded with zeros.
1046
+ if `s` is not given, the shape of the input along the axes specified
1047
+ by `axes` is used.
1048
+ axes : sequence of ints, optional
1049
+ Axes over which to compute the FFT. If not given, the last ``len(s)``
1050
+ axes are used, or all axes if `s` is also not specified.
1051
+ norm : {"backward", "ortho", "forward"}, optional
1052
+ Normalization mode (see `fft`). Default is "backward".
1053
+ overwrite_x : bool, optional
1054
+ If True, the contents of `x` can be destroyed; the default is False.
1055
+ See :func:`fft` for more details.
1056
+ workers : int, optional
1057
+ Maximum number of workers to use for parallel computation. If negative,
1058
+ the value wraps around from ``os.cpu_count()``.
1059
+ See :func:`~scipy.fft.fft` for more details.
1060
+ plan : object, optional
1061
+ This argument is reserved for passing in a precomputed plan provided
1062
+ by downstream FFT vendors. It is currently not used in SciPy.
1063
+
1064
+ .. versionadded:: 1.5.0
1065
+
1066
+ Returns
1067
+ -------
1068
+ out : complex ndarray
1069
+ The truncated or zero-padded input, transformed along the axes
1070
+ indicated by `axes`, or by a combination of `s` and `x`,
1071
+ as explained in the parameters section above.
1072
+ The length of the last axis transformed will be ``s[-1]//2+1``,
1073
+ while the remaining transformed axes will have lengths according to
1074
+ `s`, or unchanged from the input.
1075
+
1076
+ Raises
1077
+ ------
1078
+ ValueError
1079
+ If `s` and `axes` have different length.
1080
+ IndexError
1081
+ If an element of `axes` is larger than the number of axes of `x`.
1082
+
1083
+ See Also
1084
+ --------
1085
+ irfftn : The inverse of `rfftn`, i.e., the inverse of the N-D FFT
1086
+ of real input.
1087
+ fft : The 1-D FFT, with definitions and conventions used.
1088
+ rfft : The 1-D FFT of real input.
1089
+ fftn : The N-D FFT.
1090
+ rfft2 : The 2-D FFT of real input.
1091
+
1092
+ Notes
1093
+ -----
1094
+ The transform for real input is performed over the last transformation
1095
+ axis, as by `rfft`, then the transform over the remaining axes is
1096
+ performed as by `fftn`. The order of the output is as for `rfft` for the
1097
+ final transformation axis, and as for `fftn` for the remaining
1098
+ transformation axes.
1099
+
1100
+ See `fft` for details, definitions and conventions used.
1101
+
1102
+ Examples
1103
+ --------
1104
+ >>> import scipy.fft
1105
+ >>> import numpy as np
1106
+ >>> x = np.ones((2, 2, 2))
1107
+ >>> scipy.fft.rfftn(x)
1108
+ array([[[8.+0.j, 0.+0.j], # may vary
1109
+ [0.+0.j, 0.+0.j]],
1110
+ [[0.+0.j, 0.+0.j],
1111
+ [0.+0.j, 0.+0.j]]])
1112
+
1113
+ >>> scipy.fft.rfftn(x, axes=(2, 0))
1114
+ array([[[4.+0.j, 0.+0.j], # may vary
1115
+ [4.+0.j, 0.+0.j]],
1116
+ [[0.+0.j, 0.+0.j],
1117
+ [0.+0.j, 0.+0.j]]])
1118
+
1119
+ """
1120
+ return (Dispatchable(x, np.ndarray),)
1121
+
1122
+
1123
+ @_dispatch
1124
+ def rfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *,
1125
+ plan=None):
1126
+ """
1127
+ Compute the 2-D FFT of a real array.
1128
+
1129
+ Parameters
1130
+ ----------
1131
+ x : array
1132
+ Input array, taken to be real.
1133
+ s : sequence of ints, optional
1134
+ Shape of the FFT.
1135
+ axes : sequence of ints, optional
1136
+ Axes over which to compute the FFT.
1137
+ norm : {"backward", "ortho", "forward"}, optional
1138
+ Normalization mode (see `fft`). Default is "backward".
1139
+ overwrite_x : bool, optional
1140
+ If True, the contents of `x` can be destroyed; the default is False.
1141
+ See :func:`fft` for more details.
1142
+ workers : int, optional
1143
+ Maximum number of workers to use for parallel computation. If negative,
1144
+ the value wraps around from ``os.cpu_count()``.
1145
+ See :func:`~scipy.fft.fft` for more details.
1146
+ plan : object, optional
1147
+ This argument is reserved for passing in a precomputed plan provided
1148
+ by downstream FFT vendors. It is currently not used in SciPy.
1149
+
1150
+ .. versionadded:: 1.5.0
1151
+
1152
+ Returns
1153
+ -------
1154
+ out : ndarray
1155
+ The result of the real 2-D FFT.
1156
+
1157
+ See Also
1158
+ --------
1159
+ irfft2 : The inverse of the 2-D FFT of real input.
1160
+ rfft : The 1-D FFT of real input.
1161
+ rfftn : Compute the N-D discrete Fourier Transform for real
1162
+ input.
1163
+
1164
+ Notes
1165
+ -----
1166
+ This is really just `rfftn` with different default behavior.
1167
+ For more details see `rfftn`.
1168
+
1169
+ """
1170
+ return (Dispatchable(x, np.ndarray),)
1171
+
1172
+
1173
+ @_dispatch
1174
+ def irfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *,
1175
+ plan=None):
1176
+ """
1177
+ Computes the inverse of `rfftn`
1178
+
1179
+ This function computes the inverse of the N-D discrete
1180
+ Fourier Transform for real input over any number of axes in an
1181
+ M-D array by means of the Fast Fourier Transform (FFT). In
1182
+ other words, ``irfftn(rfftn(x), x.shape) == x`` to within numerical
1183
+ accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
1184
+ and for the same reason.)
1185
+
1186
+ The input should be ordered in the same way as is returned by `rfftn`,
1187
+ i.e., as for `irfft` for the final transformation axis, and as for `ifftn`
1188
+ along all the other axes.
1189
+
1190
+ Parameters
1191
+ ----------
1192
+ x : array_like
1193
+ Input array.
1194
+ s : sequence of ints, optional
1195
+ Shape (length of each transformed axis) of the output
1196
+ (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
1197
+ number of input points used along this axis, except for the last axis,
1198
+ where ``s[-1]//2+1`` points of the input are used.
1199
+ Along any axis, if the shape indicated by `s` is smaller than that of
1200
+ the input, the input is cropped. If it is larger, the input is padded
1201
+ with zeros. If `s` is not given, the shape of the input along the axes
1202
+ specified by axes is used. Except for the last axis which is taken to be
1203
+ ``2*(m-1)``, where ``m`` is the length of the input along that axis.
1204
+ axes : sequence of ints, optional
1205
+ Axes over which to compute the inverse FFT. If not given, the last
1206
+ `len(s)` axes are used, or all axes if `s` is also not specified.
1207
+ norm : {"backward", "ortho", "forward"}, optional
1208
+ Normalization mode (see `fft`). Default is "backward".
1209
+ overwrite_x : bool, optional
1210
+ If True, the contents of `x` can be destroyed; the default is False.
1211
+ See :func:`fft` for more details.
1212
+ workers : int, optional
1213
+ Maximum number of workers to use for parallel computation. If negative,
1214
+ the value wraps around from ``os.cpu_count()``.
1215
+ See :func:`~scipy.fft.fft` for more details.
1216
+ plan : object, optional
1217
+ This argument is reserved for passing in a precomputed plan provided
1218
+ by downstream FFT vendors. It is currently not used in SciPy.
1219
+
1220
+ .. versionadded:: 1.5.0
1221
+
1222
+ Returns
1223
+ -------
1224
+ out : ndarray
1225
+ The truncated or zero-padded input, transformed along the axes
1226
+ indicated by `axes`, or by a combination of `s` or `x`,
1227
+ as explained in the parameters section above.
1228
+ The length of each transformed axis is as given by the corresponding
1229
+ element of `s`, or the length of the input in every axis except for the
1230
+ last one if `s` is not given. In the final transformed axis the length
1231
+ of the output when `s` is not given is ``2*(m-1)``, where ``m`` is the
1232
+ length of the final transformed axis of the input. To get an odd
1233
+ number of output points in the final axis, `s` must be specified.
1234
+
1235
+ Raises
1236
+ ------
1237
+ ValueError
1238
+ If `s` and `axes` have different length.
1239
+ IndexError
1240
+ If an element of `axes` is larger than the number of axes of `x`.
1241
+
1242
+ See Also
1243
+ --------
1244
+ rfftn : The forward N-D FFT of real input,
1245
+ of which `ifftn` is the inverse.
1246
+ fft : The 1-D FFT, with definitions and conventions used.
1247
+ irfft : The inverse of the 1-D FFT of real input.
1248
+ irfft2 : The inverse of the 2-D FFT of real input.
1249
+
1250
+ Notes
1251
+ -----
1252
+ See `fft` for definitions and conventions used.
1253
+
1254
+ See `rfft` for definitions and conventions used for real input.
1255
+
1256
+ The default value of `s` assumes an even output length in the final
1257
+ transformation axis. When performing the final complex to real
1258
+ transformation, the Hermitian symmetry requires that the last imaginary
1259
+ component along that axis must be 0 and so it is ignored. To avoid losing
1260
+ information, the correct length of the real input *must* be given.
1261
+
1262
+ Examples
1263
+ --------
1264
+ >>> import scipy.fft
1265
+ >>> import numpy as np
1266
+ >>> x = np.zeros((3, 2, 2))
1267
+ >>> x[0, 0, 0] = 3 * 2 * 2
1268
+ >>> scipy.fft.irfftn(x)
1269
+ array([[[1., 1.],
1270
+ [1., 1.]],
1271
+ [[1., 1.],
1272
+ [1., 1.]],
1273
+ [[1., 1.],
1274
+ [1., 1.]]])
1275
+
1276
+ """
1277
+ return (Dispatchable(x, np.ndarray),)
1278
+
1279
+
1280
+ @_dispatch
1281
+ def irfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *,
1282
+ plan=None):
1283
+ """
1284
+ Computes the inverse of `rfft2`
1285
+
1286
+ Parameters
1287
+ ----------
1288
+ x : array_like
1289
+ The input array
1290
+ s : sequence of ints, optional
1291
+ Shape of the real output to the inverse FFT.
1292
+ axes : sequence of ints, optional
1293
+ The axes over which to compute the inverse fft.
1294
+ Default is the last two axes.
1295
+ norm : {"backward", "ortho", "forward"}, optional
1296
+ Normalization mode (see `fft`). Default is "backward".
1297
+ overwrite_x : bool, optional
1298
+ If True, the contents of `x` can be destroyed; the default is False.
1299
+ See :func:`fft` for more details.
1300
+ workers : int, optional
1301
+ Maximum number of workers to use for parallel computation. If negative,
1302
+ the value wraps around from ``os.cpu_count()``.
1303
+ See :func:`~scipy.fft.fft` for more details.
1304
+ plan : object, optional
1305
+ This argument is reserved for passing in a precomputed plan provided
1306
+ by downstream FFT vendors. It is currently not used in SciPy.
1307
+
1308
+ .. versionadded:: 1.5.0
1309
+
1310
+ Returns
1311
+ -------
1312
+ out : ndarray
1313
+ The result of the inverse real 2-D FFT.
1314
+
1315
+ See Also
1316
+ --------
1317
+ rfft2 : The 2-D FFT of real input.
1318
+ irfft : The inverse of the 1-D FFT of real input.
1319
+ irfftn : The inverse of the N-D FFT of real input.
1320
+
1321
+ Notes
1322
+ -----
1323
+ This is really `irfftn` with different defaults.
1324
+ For more details see `irfftn`.
1325
+
1326
+ """
1327
+ return (Dispatchable(x, np.ndarray),)
1328
+
1329
+
1330
+ @_dispatch
1331
+ def hfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *,
1332
+ plan=None):
1333
+ """
1334
+ Compute the N-D FFT of Hermitian symmetric complex input, i.e., a
1335
+ signal with a real spectrum.
1336
+
1337
+ This function computes the N-D discrete Fourier Transform for a
1338
+ Hermitian symmetric complex input over any number of axes in an
1339
+ M-D array by means of the Fast Fourier Transform (FFT). In other
1340
+ words, ``ihfftn(hfftn(x, s)) == x`` to within numerical accuracy. (``s``
1341
+ here is ``x.shape`` with ``s[-1] = x.shape[-1] * 2 - 1``, this is necessary
1342
+ for the same reason ``x.shape`` would be necessary for `irfft`.)
1343
+
1344
+ Parameters
1345
+ ----------
1346
+ x : array_like
1347
+ Input array.
1348
+ s : sequence of ints, optional
1349
+ Shape (length of each transformed axis) of the output
1350
+ (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
1351
+ number of input points used along this axis, except for the last axis,
1352
+ where ``s[-1]//2+1`` points of the input are used.
1353
+ Along any axis, if the shape indicated by `s` is smaller than that of
1354
+ the input, the input is cropped. If it is larger, the input is padded
1355
+ with zeros. If `s` is not given, the shape of the input along the axes
1356
+ specified by axes is used. Except for the last axis which is taken to be
1357
+ ``2*(m-1)`` where ``m`` is the length of the input along that axis.
1358
+ axes : sequence of ints, optional
1359
+ Axes over which to compute the inverse FFT. If not given, the last
1360
+ `len(s)` axes are used, or all axes if `s` is also not specified.
1361
+ norm : {"backward", "ortho", "forward"}, optional
1362
+ Normalization mode (see `fft`). Default is "backward".
1363
+ overwrite_x : bool, optional
1364
+ If True, the contents of `x` can be destroyed; the default is False.
1365
+ See :func:`fft` for more details.
1366
+ workers : int, optional
1367
+ Maximum number of workers to use for parallel computation. If negative,
1368
+ the value wraps around from ``os.cpu_count()``.
1369
+ See :func:`~scipy.fft.fft` for more details.
1370
+ plan : object, optional
1371
+ This argument is reserved for passing in a precomputed plan provided
1372
+ by downstream FFT vendors. It is currently not used in SciPy.
1373
+
1374
+ .. versionadded:: 1.5.0
1375
+
1376
+ Returns
1377
+ -------
1378
+ out : ndarray
1379
+ The truncated or zero-padded input, transformed along the axes
1380
+ indicated by `axes`, or by a combination of `s` or `x`,
1381
+ as explained in the parameters section above.
1382
+ The length of each transformed axis is as given by the corresponding
1383
+ element of `s`, or the length of the input in every axis except for the
1384
+ last one if `s` is not given. In the final transformed axis the length
1385
+ of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
1386
+ length of the final transformed axis of the input. To get an odd
1387
+ number of output points in the final axis, `s` must be specified.
1388
+
1389
+ Raises
1390
+ ------
1391
+ ValueError
1392
+ If `s` and `axes` have different length.
1393
+ IndexError
1394
+ If an element of `axes` is larger than the number of axes of `x`.
1395
+
1396
+ See Also
1397
+ --------
1398
+ ihfftn : The inverse N-D FFT with real spectrum. Inverse of `hfftn`.
1399
+ fft : The 1-D FFT, with definitions and conventions used.
1400
+ rfft : Forward FFT of real input.
1401
+
1402
+ Notes
1403
+ -----
1404
+ For a 1-D signal ``x`` to have a real spectrum, it must satisfy
1405
+ the Hermitian property::
1406
+
1407
+ x[i] == np.conj(x[-i]) for all i
1408
+
1409
+ This generalizes into higher dimensions by reflecting over each axis in
1410
+ turn::
1411
+
1412
+ x[i, j, k, ...] == np.conj(x[-i, -j, -k, ...]) for all i, j, k, ...
1413
+
1414
+ This should not be confused with a Hermitian matrix, for which the
1415
+ transpose is its own conjugate::
1416
+
1417
+ x[i, j] == np.conj(x[j, i]) for all i, j
1418
+
1419
+
1420
+ The default value of `s` assumes an even output length in the final
1421
+ transformation axis. When performing the final complex to real
1422
+ transformation, the Hermitian symmetry requires that the last imaginary
1423
+ component along that axis must be 0 and so it is ignored. To avoid losing
1424
+ information, the correct length of the real input *must* be given.
1425
+
1426
+ Examples
1427
+ --------
1428
+ >>> import scipy.fft
1429
+ >>> import numpy as np
1430
+ >>> x = np.ones((3, 2, 2))
1431
+ >>> scipy.fft.hfftn(x)
1432
+ array([[[12., 0.],
1433
+ [ 0., 0.]],
1434
+ [[ 0., 0.],
1435
+ [ 0., 0.]],
1436
+ [[ 0., 0.],
1437
+ [ 0., 0.]]])
1438
+
1439
+ """
1440
+ return (Dispatchable(x, np.ndarray),)
1441
+
1442
+
1443
+ @_dispatch
1444
+ def hfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *,
1445
+ plan=None):
1446
+ """
1447
+ Compute the 2-D FFT of a Hermitian complex array.
1448
+
1449
+ Parameters
1450
+ ----------
1451
+ x : array
1452
+ Input array, taken to be Hermitian complex.
1453
+ s : sequence of ints, optional
1454
+ Shape of the real output.
1455
+ axes : sequence of ints, optional
1456
+ Axes over which to compute the FFT.
1457
+ norm : {"backward", "ortho", "forward"}, optional
1458
+ Normalization mode (see `fft`). Default is "backward".
1459
+ overwrite_x : bool, optional
1460
+ If True, the contents of `x` can be destroyed; the default is False.
1461
+ See `fft` for more details.
1462
+ workers : int, optional
1463
+ Maximum number of workers to use for parallel computation. If negative,
1464
+ the value wraps around from ``os.cpu_count()``.
1465
+ See :func:`~scipy.fft.fft` for more details.
1466
+ plan : object, optional
1467
+ This argument is reserved for passing in a precomputed plan provided
1468
+ by downstream FFT vendors. It is currently not used in SciPy.
1469
+
1470
+ .. versionadded:: 1.5.0
1471
+
1472
+ Returns
1473
+ -------
1474
+ out : ndarray
1475
+ The real result of the 2-D Hermitian complex real FFT.
1476
+
1477
+ See Also
1478
+ --------
1479
+ hfftn : Compute the N-D discrete Fourier Transform for Hermitian
1480
+ complex input.
1481
+
1482
+ Notes
1483
+ -----
1484
+ This is really just `hfftn` with different default behavior.
1485
+ For more details see `hfftn`.
1486
+
1487
+ """
1488
+ return (Dispatchable(x, np.ndarray),)
1489
+
1490
+
1491
+ @_dispatch
1492
+ def ihfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *,
1493
+ plan=None):
1494
+ """
1495
+ Compute the N-D inverse discrete Fourier Transform for a real
1496
+ spectrum.
1497
+
1498
+ This function computes the N-D inverse discrete Fourier Transform
1499
+ over any number of axes in an M-D real array by means of the Fast
1500
+ Fourier Transform (FFT). By default, all axes are transformed, with the
1501
+ real transform performed over the last axis, while the remaining transforms
1502
+ are complex.
1503
+
1504
+ Parameters
1505
+ ----------
1506
+ x : array_like
1507
+ Input array, taken to be real.
1508
+ s : sequence of ints, optional
1509
+ Shape (length along each transformed axis) to use from the input.
1510
+ (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
1511
+ Along any axis, if the given shape is smaller than that of the input,
1512
+ the input is cropped. If it is larger, the input is padded with zeros.
1513
+ if `s` is not given, the shape of the input along the axes specified
1514
+ by `axes` is used.
1515
+ axes : sequence of ints, optional
1516
+ Axes over which to compute the FFT. If not given, the last ``len(s)``
1517
+ axes are used, or all axes if `s` is also not specified.
1518
+ norm : {"backward", "ortho", "forward"}, optional
1519
+ Normalization mode (see `fft`). Default is "backward".
1520
+ overwrite_x : bool, optional
1521
+ If True, the contents of `x` can be destroyed; the default is False.
1522
+ See :func:`fft` for more details.
1523
+ workers : int, optional
1524
+ Maximum number of workers to use for parallel computation. If negative,
1525
+ the value wraps around from ``os.cpu_count()``.
1526
+ See :func:`~scipy.fft.fft` for more details.
1527
+ plan : object, optional
1528
+ This argument is reserved for passing in a precomputed plan provided
1529
+ by downstream FFT vendors. It is currently not used in SciPy.
1530
+
1531
+ .. versionadded:: 1.5.0
1532
+
1533
+ Returns
1534
+ -------
1535
+ out : complex ndarray
1536
+ The truncated or zero-padded input, transformed along the axes
1537
+ indicated by `axes`, or by a combination of `s` and `x`,
1538
+ as explained in the parameters section above.
1539
+ The length of the last axis transformed will be ``s[-1]//2+1``,
1540
+ while the remaining transformed axes will have lengths according to
1541
+ `s`, or unchanged from the input.
1542
+
1543
+ Raises
1544
+ ------
1545
+ ValueError
1546
+ If `s` and `axes` have different length.
1547
+ IndexError
1548
+ If an element of `axes` is larger than the number of axes of `x`.
1549
+
1550
+ See Also
1551
+ --------
1552
+ hfftn : The forward N-D FFT of Hermitian input.
1553
+ hfft : The 1-D FFT of Hermitian input.
1554
+ fft : The 1-D FFT, with definitions and conventions used.
1555
+ fftn : The N-D FFT.
1556
+ hfft2 : The 2-D FFT of Hermitian input.
1557
+
1558
+ Notes
1559
+ -----
1560
+ The transform for real input is performed over the last transformation
1561
+ axis, as by `ihfft`, then the transform over the remaining axes is
1562
+ performed as by `ifftn`. The order of the output is the positive part of
1563
+ the Hermitian output signal, in the same format as `rfft`.
1564
+
1565
+ Examples
1566
+ --------
1567
+ >>> import scipy.fft
1568
+ >>> import numpy as np
1569
+ >>> x = np.ones((2, 2, 2))
1570
+ >>> scipy.fft.ihfftn(x)
1571
+ array([[[1.+0.j, 0.+0.j], # may vary
1572
+ [0.+0.j, 0.+0.j]],
1573
+ [[0.+0.j, 0.+0.j],
1574
+ [0.+0.j, 0.+0.j]]])
1575
+ >>> scipy.fft.ihfftn(x, axes=(2, 0))
1576
+ array([[[1.+0.j, 0.+0.j], # may vary
1577
+ [1.+0.j, 0.+0.j]],
1578
+ [[0.+0.j, 0.+0.j],
1579
+ [0.+0.j, 0.+0.j]]])
1580
+
1581
+ """
1582
+ return (Dispatchable(x, np.ndarray),)
1583
+
1584
+
1585
+ @_dispatch
1586
+ def ihfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *,
1587
+ plan=None):
1588
+ """
1589
+ Compute the 2-D inverse FFT of a real spectrum.
1590
+
1591
+ Parameters
1592
+ ----------
1593
+ x : array_like
1594
+ The input array
1595
+ s : sequence of ints, optional
1596
+ Shape of the real input to the inverse FFT.
1597
+ axes : sequence of ints, optional
1598
+ The axes over which to compute the inverse fft.
1599
+ Default is the last two axes.
1600
+ norm : {"backward", "ortho", "forward"}, optional
1601
+ Normalization mode (see `fft`). Default is "backward".
1602
+ overwrite_x : bool, optional
1603
+ If True, the contents of `x` can be destroyed; the default is False.
1604
+ See :func:`fft` for more details.
1605
+ workers : int, optional
1606
+ Maximum number of workers to use for parallel computation. If negative,
1607
+ the value wraps around from ``os.cpu_count()``.
1608
+ See :func:`~scipy.fft.fft` for more details.
1609
+ plan : object, optional
1610
+ This argument is reserved for passing in a precomputed plan provided
1611
+ by downstream FFT vendors. It is currently not used in SciPy.
1612
+
1613
+ .. versionadded:: 1.5.0
1614
+
1615
+ Returns
1616
+ -------
1617
+ out : ndarray
1618
+ The result of the inverse real 2-D FFT.
1619
+
1620
+ See Also
1621
+ --------
1622
+ ihfftn : Compute the inverse of the N-D FFT of Hermitian input.
1623
+
1624
+ Notes
1625
+ -----
1626
+ This is really `ihfftn` with different defaults.
1627
+ For more details see `ihfftn`.
1628
+
1629
+ """
1630
+ return (Dispatchable(x, np.ndarray),)
parrot/lib/python3.10/site-packages/scipy/fft/_basic_backend.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from scipy._lib._array_api import (
2
+ array_namespace, is_numpy, xp_unsupported_param_msg, is_complex
3
+ )
4
+ from . import _pocketfft
5
+ import numpy as np
6
+
7
+
8
+ def _validate_fft_args(workers, plan, norm):
9
+ if workers is not None:
10
+ raise ValueError(xp_unsupported_param_msg("workers"))
11
+ if plan is not None:
12
+ raise ValueError(xp_unsupported_param_msg("plan"))
13
+ if norm is None:
14
+ norm = 'backward'
15
+ return norm
16
+
17
+
18
+ # pocketfft is used whenever SCIPY_ARRAY_API is not set,
19
+ # or x is a NumPy array or array-like.
20
+ # When SCIPY_ARRAY_API is set, we try to use xp.fft for CuPy arrays,
21
+ # PyTorch arrays and other array API standard supporting objects.
22
+ # If xp.fft does not exist, we attempt to convert to np and back to use pocketfft.
23
+
24
+ def _execute_1D(func_str, pocketfft_func, x, n, axis, norm, overwrite_x, workers, plan):
25
+ xp = array_namespace(x)
26
+
27
+ if is_numpy(xp):
28
+ x = np.asarray(x)
29
+ return pocketfft_func(x, n=n, axis=axis, norm=norm,
30
+ overwrite_x=overwrite_x, workers=workers, plan=plan)
31
+
32
+ norm = _validate_fft_args(workers, plan, norm)
33
+ if hasattr(xp, 'fft'):
34
+ xp_func = getattr(xp.fft, func_str)
35
+ return xp_func(x, n=n, axis=axis, norm=norm)
36
+
37
+ x = np.asarray(x)
38
+ y = pocketfft_func(x, n=n, axis=axis, norm=norm)
39
+ return xp.asarray(y)
40
+
41
+
42
+ def _execute_nD(func_str, pocketfft_func, x, s, axes, norm, overwrite_x, workers, plan):
43
+ xp = array_namespace(x)
44
+
45
+ if is_numpy(xp):
46
+ x = np.asarray(x)
47
+ return pocketfft_func(x, s=s, axes=axes, norm=norm,
48
+ overwrite_x=overwrite_x, workers=workers, plan=plan)
49
+
50
+ norm = _validate_fft_args(workers, plan, norm)
51
+ if hasattr(xp, 'fft'):
52
+ xp_func = getattr(xp.fft, func_str)
53
+ return xp_func(x, s=s, axes=axes, norm=norm)
54
+
55
+ x = np.asarray(x)
56
+ y = pocketfft_func(x, s=s, axes=axes, norm=norm)
57
+ return xp.asarray(y)
58
+
59
+
60
+ def fft(x, n=None, axis=-1, norm=None,
61
+ overwrite_x=False, workers=None, *, plan=None):
62
+ return _execute_1D('fft', _pocketfft.fft, x, n=n, axis=axis, norm=norm,
63
+ overwrite_x=overwrite_x, workers=workers, plan=plan)
64
+
65
+
66
+ def ifft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
67
+ plan=None):
68
+ return _execute_1D('ifft', _pocketfft.ifft, x, n=n, axis=axis, norm=norm,
69
+ overwrite_x=overwrite_x, workers=workers, plan=plan)
70
+
71
+
72
+ def rfft(x, n=None, axis=-1, norm=None,
73
+ overwrite_x=False, workers=None, *, plan=None):
74
+ return _execute_1D('rfft', _pocketfft.rfft, x, n=n, axis=axis, norm=norm,
75
+ overwrite_x=overwrite_x, workers=workers, plan=plan)
76
+
77
+
78
+ def irfft(x, n=None, axis=-1, norm=None,
79
+ overwrite_x=False, workers=None, *, plan=None):
80
+ return _execute_1D('irfft', _pocketfft.irfft, x, n=n, axis=axis, norm=norm,
81
+ overwrite_x=overwrite_x, workers=workers, plan=plan)
82
+
83
+
84
+ def hfft(x, n=None, axis=-1, norm=None,
85
+ overwrite_x=False, workers=None, *, plan=None):
86
+ return _execute_1D('hfft', _pocketfft.hfft, x, n=n, axis=axis, norm=norm,
87
+ overwrite_x=overwrite_x, workers=workers, plan=plan)
88
+
89
+
90
+ def ihfft(x, n=None, axis=-1, norm=None,
91
+ overwrite_x=False, workers=None, *, plan=None):
92
+ return _execute_1D('ihfft', _pocketfft.ihfft, x, n=n, axis=axis, norm=norm,
93
+ overwrite_x=overwrite_x, workers=workers, plan=plan)
94
+
95
+
96
+ def fftn(x, s=None, axes=None, norm=None,
97
+ overwrite_x=False, workers=None, *, plan=None):
98
+ return _execute_nD('fftn', _pocketfft.fftn, x, s=s, axes=axes, norm=norm,
99
+ overwrite_x=overwrite_x, workers=workers, plan=plan)
100
+
101
+
102
+
103
+ def ifftn(x, s=None, axes=None, norm=None,
104
+ overwrite_x=False, workers=None, *, plan=None):
105
+ return _execute_nD('ifftn', _pocketfft.ifftn, x, s=s, axes=axes, norm=norm,
106
+ overwrite_x=overwrite_x, workers=workers, plan=plan)
107
+
108
+
109
+ def fft2(x, s=None, axes=(-2, -1), norm=None,
110
+ overwrite_x=False, workers=None, *, plan=None):
111
+ return fftn(x, s, axes, norm, overwrite_x, workers, plan=plan)
112
+
113
+
114
+ def ifft2(x, s=None, axes=(-2, -1), norm=None,
115
+ overwrite_x=False, workers=None, *, plan=None):
116
+ return ifftn(x, s, axes, norm, overwrite_x, workers, plan=plan)
117
+
118
+
119
+ def rfftn(x, s=None, axes=None, norm=None,
120
+ overwrite_x=False, workers=None, *, plan=None):
121
+ return _execute_nD('rfftn', _pocketfft.rfftn, x, s=s, axes=axes, norm=norm,
122
+ overwrite_x=overwrite_x, workers=workers, plan=plan)
123
+
124
+
125
+ def rfft2(x, s=None, axes=(-2, -1), norm=None,
126
+ overwrite_x=False, workers=None, *, plan=None):
127
+ return rfftn(x, s, axes, norm, overwrite_x, workers, plan=plan)
128
+
129
+
130
+ def irfftn(x, s=None, axes=None, norm=None,
131
+ overwrite_x=False, workers=None, *, plan=None):
132
+ return _execute_nD('irfftn', _pocketfft.irfftn, x, s=s, axes=axes, norm=norm,
133
+ overwrite_x=overwrite_x, workers=workers, plan=plan)
134
+
135
+
136
+ def irfft2(x, s=None, axes=(-2, -1), norm=None,
137
+ overwrite_x=False, workers=None, *, plan=None):
138
+ return irfftn(x, s, axes, norm, overwrite_x, workers, plan=plan)
139
+
140
+
141
+ def _swap_direction(norm):
142
+ if norm in (None, 'backward'):
143
+ norm = 'forward'
144
+ elif norm == 'forward':
145
+ norm = 'backward'
146
+ elif norm != 'ortho':
147
+ raise ValueError('Invalid norm value %s; should be "backward", '
148
+ '"ortho", or "forward".' % norm)
149
+ return norm
150
+
151
+
152
+ def hfftn(x, s=None, axes=None, norm=None,
153
+ overwrite_x=False, workers=None, *, plan=None):
154
+ xp = array_namespace(x)
155
+ if is_numpy(xp):
156
+ x = np.asarray(x)
157
+ return _pocketfft.hfftn(x, s, axes, norm, overwrite_x, workers, plan=plan)
158
+ if is_complex(x, xp):
159
+ x = xp.conj(x)
160
+ return irfftn(x, s, axes, _swap_direction(norm),
161
+ overwrite_x, workers, plan=plan)
162
+
163
+
164
+ def hfft2(x, s=None, axes=(-2, -1), norm=None,
165
+ overwrite_x=False, workers=None, *, plan=None):
166
+ return hfftn(x, s, axes, norm, overwrite_x, workers, plan=plan)
167
+
168
+
169
+ def ihfftn(x, s=None, axes=None, norm=None,
170
+ overwrite_x=False, workers=None, *, plan=None):
171
+ xp = array_namespace(x)
172
+ if is_numpy(xp):
173
+ x = np.asarray(x)
174
+ return _pocketfft.ihfftn(x, s, axes, norm, overwrite_x, workers, plan=plan)
175
+ return xp.conj(rfftn(x, s, axes, _swap_direction(norm),
176
+ overwrite_x, workers, plan=plan))
177
+
178
+ def ihfft2(x, s=None, axes=(-2, -1), norm=None,
179
+ overwrite_x=False, workers=None, *, plan=None):
180
+ return ihfftn(x, s, axes, norm, overwrite_x, workers, plan=plan)
parrot/lib/python3.10/site-packages/scipy/fft/_debug_backends.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ class NumPyBackend:
4
+ """Backend that uses numpy.fft"""
5
+ __ua_domain__ = "numpy.scipy.fft"
6
+
7
+ @staticmethod
8
+ def __ua_function__(method, args, kwargs):
9
+ kwargs.pop("overwrite_x", None)
10
+
11
+ fn = getattr(np.fft, method.__name__, None)
12
+ return (NotImplemented if fn is None
13
+ else fn(*args, **kwargs))
14
+
15
+
16
+ class EchoBackend:
17
+ """Backend that just prints the __ua_function__ arguments"""
18
+ __ua_domain__ = "numpy.scipy.fft"
19
+
20
+ @staticmethod
21
+ def __ua_function__(method, args, kwargs):
22
+ print(method, args, kwargs, sep='\n')
parrot/lib/python3.10/site-packages/scipy/fft/_fftlog.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Fast Hankel transforms using the FFTLog algorithm.
2
+
3
+ The implementation closely follows the Fortran code of Hamilton (2000).
4
+
5
+ added: 14/11/2020 Nicolas Tessore <n.tessore@ucl.ac.uk>
6
+ """
7
+
8
+ from ._basic import _dispatch
9
+ from scipy._lib.uarray import Dispatchable
10
+ from ._fftlog_backend import fhtoffset
11
+ import numpy as np
12
+
13
+ __all__ = ['fht', 'ifht', 'fhtoffset']
14
+
15
+
16
+ @_dispatch
17
+ def fht(a, dln, mu, offset=0.0, bias=0.0):
18
+ r'''Compute the fast Hankel transform.
19
+
20
+ Computes the discrete Hankel transform of a logarithmically spaced periodic
21
+ sequence using the FFTLog algorithm [1]_, [2]_.
22
+
23
+ Parameters
24
+ ----------
25
+ a : array_like (..., n)
26
+ Real periodic input array, uniformly logarithmically spaced. For
27
+ multidimensional input, the transform is performed over the last axis.
28
+ dln : float
29
+ Uniform logarithmic spacing of the input array.
30
+ mu : float
31
+ Order of the Hankel transform, any positive or negative real number.
32
+ offset : float, optional
33
+ Offset of the uniform logarithmic spacing of the output array.
34
+ bias : float, optional
35
+ Exponent of power law bias, any positive or negative real number.
36
+
37
+ Returns
38
+ -------
39
+ A : array_like (..., n)
40
+ The transformed output array, which is real, periodic, uniformly
41
+ logarithmically spaced, and of the same shape as the input array.
42
+
43
+ See Also
44
+ --------
45
+ ifht : The inverse of `fht`.
46
+ fhtoffset : Return an optimal offset for `fht`.
47
+
48
+ Notes
49
+ -----
50
+ This function computes a discrete version of the Hankel transform
51
+
52
+ .. math::
53
+
54
+ A(k) = \int_{0}^{\infty} \! a(r) \, J_\mu(kr) \, k \, dr \;,
55
+
56
+ where :math:`J_\mu` is the Bessel function of order :math:`\mu`. The index
57
+ :math:`\mu` may be any real number, positive or negative. Note that the
58
+ numerical Hankel transform uses an integrand of :math:`k \, dr`, while the
59
+ mathematical Hankel transform is commonly defined using :math:`r \, dr`.
60
+
61
+ The input array `a` is a periodic sequence of length :math:`n`, uniformly
62
+ logarithmically spaced with spacing `dln`,
63
+
64
+ .. math::
65
+
66
+ a_j = a(r_j) \;, \quad
67
+ r_j = r_c \exp[(j-j_c) \, \mathtt{dln}]
68
+
69
+ centred about the point :math:`r_c`. Note that the central index
70
+ :math:`j_c = (n-1)/2` is half-integral if :math:`n` is even, so that
71
+ :math:`r_c` falls between two input elements. Similarly, the output
72
+ array `A` is a periodic sequence of length :math:`n`, also uniformly
73
+ logarithmically spaced with spacing `dln`
74
+
75
+ .. math::
76
+
77
+ A_j = A(k_j) \;, \quad
78
+ k_j = k_c \exp[(j-j_c) \, \mathtt{dln}]
79
+
80
+ centred about the point :math:`k_c`.
81
+
82
+ The centre points :math:`r_c` and :math:`k_c` of the periodic intervals may
83
+ be chosen arbitrarily, but it would be usual to choose the product
84
+ :math:`k_c r_c = k_j r_{n-1-j} = k_{n-1-j} r_j` to be unity. This can be
85
+ changed using the `offset` parameter, which controls the logarithmic offset
86
+ :math:`\log(k_c) = \mathtt{offset} - \log(r_c)` of the output array.
87
+ Choosing an optimal value for `offset` may reduce ringing of the discrete
88
+ Hankel transform.
89
+
90
+ If the `bias` parameter is nonzero, this function computes a discrete
91
+ version of the biased Hankel transform
92
+
93
+ .. math::
94
+
95
+ A(k) = \int_{0}^{\infty} \! a_q(r) \, (kr)^q \, J_\mu(kr) \, k \, dr
96
+
97
+ where :math:`q` is the value of `bias`, and a power law bias
98
+ :math:`a_q(r) = a(r) \, (kr)^{-q}` is applied to the input sequence.
99
+ Biasing the transform can help approximate the continuous transform of
100
+ :math:`a(r)` if there is a value :math:`q` such that :math:`a_q(r)` is
101
+ close to a periodic sequence, in which case the resulting :math:`A(k)` will
102
+ be close to the continuous transform.
103
+
104
+ References
105
+ ----------
106
+ .. [1] Talman J. D., 1978, J. Comp. Phys., 29, 35
107
+ .. [2] Hamilton A. J. S., 2000, MNRAS, 312, 257 (astro-ph/9905191)
108
+
109
+ Examples
110
+ --------
111
+
112
+ This example is the adapted version of ``fftlogtest.f`` which is provided
113
+ in [2]_. It evaluates the integral
114
+
115
+ .. math::
116
+
117
+ \int^\infty_0 r^{\mu+1} \exp(-r^2/2) J_\mu(k, r) k dr
118
+ = k^{\mu+1} \exp(-k^2/2) .
119
+
120
+ >>> import numpy as np
121
+ >>> from scipy import fft
122
+ >>> import matplotlib.pyplot as plt
123
+
124
+ Parameters for the transform.
125
+
126
+ >>> mu = 0.0 # Order mu of Bessel function
127
+ >>> r = np.logspace(-7, 1, 128) # Input evaluation points
128
+ >>> dln = np.log(r[1]/r[0]) # Step size
129
+ >>> offset = fft.fhtoffset(dln, initial=-6*np.log(10), mu=mu)
130
+ >>> k = np.exp(offset)/r[::-1] # Output evaluation points
131
+
132
+ Define the analytical function.
133
+
134
+ >>> def f(x, mu):
135
+ ... """Analytical function: x^(mu+1) exp(-x^2/2)."""
136
+ ... return x**(mu + 1)*np.exp(-x**2/2)
137
+
138
+ Evaluate the function at ``r`` and compute the corresponding values at
139
+ ``k`` using FFTLog.
140
+
141
+ >>> a_r = f(r, mu)
142
+ >>> fht = fft.fht(a_r, dln, mu=mu, offset=offset)
143
+
144
+ For this example we can actually compute the analytical response (which in
145
+ this case is the same as the input function) for comparison and compute the
146
+ relative error.
147
+
148
+ >>> a_k = f(k, mu)
149
+ >>> rel_err = abs((fht-a_k)/a_k)
150
+
151
+ Plot the result.
152
+
153
+ >>> figargs = {'sharex': True, 'sharey': True, 'constrained_layout': True}
154
+ >>> fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4), **figargs)
155
+ >>> ax1.set_title(r'$r^{\mu+1}\ \exp(-r^2/2)$')
156
+ >>> ax1.loglog(r, a_r, 'k', lw=2)
157
+ >>> ax1.set_xlabel('r')
158
+ >>> ax2.set_title(r'$k^{\mu+1} \exp(-k^2/2)$')
159
+ >>> ax2.loglog(k, a_k, 'k', lw=2, label='Analytical')
160
+ >>> ax2.loglog(k, fht, 'C3--', lw=2, label='FFTLog')
161
+ >>> ax2.set_xlabel('k')
162
+ >>> ax2.legend(loc=3, framealpha=1)
163
+ >>> ax2.set_ylim([1e-10, 1e1])
164
+ >>> ax2b = ax2.twinx()
165
+ >>> ax2b.loglog(k, rel_err, 'C0', label='Rel. Error (-)')
166
+ >>> ax2b.set_ylabel('Rel. Error (-)', color='C0')
167
+ >>> ax2b.tick_params(axis='y', labelcolor='C0')
168
+ >>> ax2b.legend(loc=4, framealpha=1)
169
+ >>> ax2b.set_ylim([1e-9, 1e-3])
170
+ >>> plt.show()
171
+
172
+ '''
173
+ return (Dispatchable(a, np.ndarray),)
174
+
175
+
176
+ @_dispatch
177
+ def ifht(A, dln, mu, offset=0.0, bias=0.0):
178
+ r"""Compute the inverse fast Hankel transform.
179
+
180
+ Computes the discrete inverse Hankel transform of a logarithmically spaced
181
+ periodic sequence. This is the inverse operation to `fht`.
182
+
183
+ Parameters
184
+ ----------
185
+ A : array_like (..., n)
186
+ Real periodic input array, uniformly logarithmically spaced. For
187
+ multidimensional input, the transform is performed over the last axis.
188
+ dln : float
189
+ Uniform logarithmic spacing of the input array.
190
+ mu : float
191
+ Order of the Hankel transform, any positive or negative real number.
192
+ offset : float, optional
193
+ Offset of the uniform logarithmic spacing of the output array.
194
+ bias : float, optional
195
+ Exponent of power law bias, any positive or negative real number.
196
+
197
+ Returns
198
+ -------
199
+ a : array_like (..., n)
200
+ The transformed output array, which is real, periodic, uniformly
201
+ logarithmically spaced, and of the same shape as the input array.
202
+
203
+ See Also
204
+ --------
205
+ fht : Definition of the fast Hankel transform.
206
+ fhtoffset : Return an optimal offset for `ifht`.
207
+
208
+ Notes
209
+ -----
210
+ This function computes a discrete version of the Hankel transform
211
+
212
+ .. math::
213
+
214
+ a(r) = \int_{0}^{\infty} \! A(k) \, J_\mu(kr) \, r \, dk \;,
215
+
216
+ where :math:`J_\mu` is the Bessel function of order :math:`\mu`. The index
217
+ :math:`\mu` may be any real number, positive or negative. Note that the
218
+ numerical inverse Hankel transform uses an integrand of :math:`r \, dk`, while the
219
+ mathematical inverse Hankel transform is commonly defined using :math:`k \, dk`.
220
+
221
+ See `fht` for further details.
222
+ """
223
+ return (Dispatchable(A, np.ndarray),)
parrot/lib/python3.10/site-packages/scipy/fft/_fftlog_backend.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from warnings import warn
3
+ from ._basic import rfft, irfft
4
+ from ..special import loggamma, poch
5
+
6
+ from scipy._lib._array_api import array_namespace, copy
7
+
8
+ __all__ = ['fht', 'ifht', 'fhtoffset']
9
+
10
+ # constants
11
+ LN_2 = np.log(2)
12
+
13
+
14
+ def fht(a, dln, mu, offset=0.0, bias=0.0):
15
+ xp = array_namespace(a)
16
+ a = xp.asarray(a)
17
+
18
+ # size of transform
19
+ n = a.shape[-1]
20
+
21
+ # bias input array
22
+ if bias != 0:
23
+ # a_q(r) = a(r) (r/r_c)^{-q}
24
+ j_c = (n-1)/2
25
+ j = xp.arange(n, dtype=xp.float64)
26
+ a = a * xp.exp(-bias*(j - j_c)*dln)
27
+
28
+ # compute FHT coefficients
29
+ u = xp.asarray(fhtcoeff(n, dln, mu, offset=offset, bias=bias))
30
+
31
+ # transform
32
+ A = _fhtq(a, u, xp=xp)
33
+
34
+ # bias output array
35
+ if bias != 0:
36
+ # A(k) = A_q(k) (k/k_c)^{-q} (k_c r_c)^{-q}
37
+ A *= xp.exp(-bias*((j - j_c)*dln + offset))
38
+
39
+ return A
40
+
41
+
42
+ def ifht(A, dln, mu, offset=0.0, bias=0.0):
43
+ xp = array_namespace(A)
44
+ A = xp.asarray(A)
45
+
46
+ # size of transform
47
+ n = A.shape[-1]
48
+
49
+ # bias input array
50
+ if bias != 0:
51
+ # A_q(k) = A(k) (k/k_c)^{q} (k_c r_c)^{q}
52
+ j_c = (n-1)/2
53
+ j = xp.arange(n, dtype=xp.float64)
54
+ A = A * xp.exp(bias*((j - j_c)*dln + offset))
55
+
56
+ # compute FHT coefficients
57
+ u = xp.asarray(fhtcoeff(n, dln, mu, offset=offset, bias=bias, inverse=True))
58
+
59
+ # transform
60
+ a = _fhtq(A, u, inverse=True, xp=xp)
61
+
62
+ # bias output array
63
+ if bias != 0:
64
+ # a(r) = a_q(r) (r/r_c)^{q}
65
+ a /= xp.exp(-bias*(j - j_c)*dln)
66
+
67
+ return a
68
+
69
+
70
+ def fhtcoeff(n, dln, mu, offset=0.0, bias=0.0, inverse=False):
71
+ """Compute the coefficient array for a fast Hankel transform."""
72
+ lnkr, q = offset, bias
73
+
74
+ # Hankel transform coefficients
75
+ # u_m = (kr)^{-i 2m pi/(n dlnr)} U_mu(q + i 2m pi/(n dlnr))
76
+ # with U_mu(x) = 2^x Gamma((mu+1+x)/2)/Gamma((mu+1-x)/2)
77
+ xp = (mu+1+q)/2
78
+ xm = (mu+1-q)/2
79
+ y = np.linspace(0, np.pi*(n//2)/(n*dln), n//2+1)
80
+ u = np.empty(n//2+1, dtype=complex)
81
+ v = np.empty(n//2+1, dtype=complex)
82
+ u.imag[:] = y
83
+ u.real[:] = xm
84
+ loggamma(u, out=v)
85
+ u.real[:] = xp
86
+ loggamma(u, out=u)
87
+ y *= 2*(LN_2 - lnkr)
88
+ u.real -= v.real
89
+ u.real += LN_2*q
90
+ u.imag += v.imag
91
+ u.imag += y
92
+ np.exp(u, out=u)
93
+
94
+ # fix last coefficient to be real
95
+ u.imag[-1] = 0
96
+
97
+ # deal with special cases
98
+ if not np.isfinite(u[0]):
99
+ # write u_0 = 2^q Gamma(xp)/Gamma(xm) = 2^q poch(xm, xp-xm)
100
+ # poch() handles special cases for negative integers correctly
101
+ u[0] = 2**q * poch(xm, xp-xm)
102
+ # the coefficient may be inf or 0, meaning the transform or the
103
+ # inverse transform, respectively, is singular
104
+
105
+ # check for singular transform or singular inverse transform
106
+ if np.isinf(u[0]) and not inverse:
107
+ warn('singular transform; consider changing the bias', stacklevel=3)
108
+ # fix coefficient to obtain (potentially correct) transform anyway
109
+ u = copy(u)
110
+ u[0] = 0
111
+ elif u[0] == 0 and inverse:
112
+ warn('singular inverse transform; consider changing the bias', stacklevel=3)
113
+ # fix coefficient to obtain (potentially correct) inverse anyway
114
+ u = copy(u)
115
+ u[0] = np.inf
116
+
117
+ return u
118
+
119
+
120
+ def fhtoffset(dln, mu, initial=0.0, bias=0.0):
121
+ """Return optimal offset for a fast Hankel transform.
122
+
123
+ Returns an offset close to `initial` that fulfils the low-ringing
124
+ condition of [1]_ for the fast Hankel transform `fht` with logarithmic
125
+ spacing `dln`, order `mu` and bias `bias`.
126
+
127
+ Parameters
128
+ ----------
129
+ dln : float
130
+ Uniform logarithmic spacing of the transform.
131
+ mu : float
132
+ Order of the Hankel transform, any positive or negative real number.
133
+ initial : float, optional
134
+ Initial value for the offset. Returns the closest value that fulfils
135
+ the low-ringing condition.
136
+ bias : float, optional
137
+ Exponent of power law bias, any positive or negative real number.
138
+
139
+ Returns
140
+ -------
141
+ offset : float
142
+ Optimal offset of the uniform logarithmic spacing of the transform that
143
+ fulfils a low-ringing condition.
144
+
145
+ Examples
146
+ --------
147
+ >>> from scipy.fft import fhtoffset
148
+ >>> dln = 0.1
149
+ >>> mu = 2.0
150
+ >>> initial = 0.5
151
+ >>> bias = 0.0
152
+ >>> offset = fhtoffset(dln, mu, initial, bias)
153
+ >>> offset
154
+ 0.5454581477676637
155
+
156
+ See Also
157
+ --------
158
+ fht : Definition of the fast Hankel transform.
159
+
160
+ References
161
+ ----------
162
+ .. [1] Hamilton A. J. S., 2000, MNRAS, 312, 257 (astro-ph/9905191)
163
+
164
+ """
165
+
166
+ lnkr, q = initial, bias
167
+
168
+ xp = (mu+1+q)/2
169
+ xm = (mu+1-q)/2
170
+ y = np.pi/(2*dln)
171
+ zp = loggamma(xp + 1j*y)
172
+ zm = loggamma(xm + 1j*y)
173
+ arg = (LN_2 - lnkr)/dln + (zp.imag + zm.imag)/np.pi
174
+ return lnkr + (arg - np.round(arg))*dln
175
+
176
+
177
+ def _fhtq(a, u, inverse=False, *, xp=None):
178
+ """Compute the biased fast Hankel transform.
179
+
180
+ This is the basic FFTLog routine.
181
+ """
182
+ if xp is None:
183
+ xp = np
184
+
185
+ # size of transform
186
+ n = a.shape[-1]
187
+
188
+ # biased fast Hankel transform via real FFT
189
+ A = rfft(a, axis=-1)
190
+ if not inverse:
191
+ # forward transform
192
+ A *= u
193
+ else:
194
+ # backward transform
195
+ A /= xp.conj(u)
196
+ A = irfft(A, n, axis=-1)
197
+ A = xp.flip(A, axis=-1)
198
+
199
+ return A
parrot/lib/python3.10/site-packages/scipy/fft/_helper.py ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import update_wrapper, lru_cache
2
+ import inspect
3
+
4
+ from ._pocketfft import helper as _helper
5
+
6
+ import numpy as np
7
+ from scipy._lib._array_api import array_namespace
8
+
9
+
10
+ def next_fast_len(target, real=False):
11
+ """Find the next fast size of input data to ``fft``, for zero-padding, etc.
12
+
13
+ SciPy's FFT algorithms gain their speed by a recursive divide and conquer
14
+ strategy. This relies on efficient functions for small prime factors of the
15
+ input length. Thus, the transforms are fastest when using composites of the
16
+ prime factors handled by the fft implementation. If there are efficient
17
+ functions for all radices <= `n`, then the result will be a number `x`
18
+ >= ``target`` with only prime factors < `n`. (Also known as `n`-smooth
19
+ numbers)
20
+
21
+ Parameters
22
+ ----------
23
+ target : int
24
+ Length to start searching from. Must be a positive integer.
25
+ real : bool, optional
26
+ True if the FFT involves real input or output (e.g., `rfft` or `hfft`
27
+ but not `fft`). Defaults to False.
28
+
29
+ Returns
30
+ -------
31
+ out : int
32
+ The smallest fast length greater than or equal to ``target``.
33
+
34
+ Notes
35
+ -----
36
+ The result of this function may change in future as performance
37
+ considerations change, for example, if new prime factors are added.
38
+
39
+ Calling `fft` or `ifft` with real input data performs an ``'R2C'``
40
+ transform internally.
41
+
42
+ Examples
43
+ --------
44
+ On a particular machine, an FFT of prime length takes 11.4 ms:
45
+
46
+ >>> from scipy import fft
47
+ >>> import numpy as np
48
+ >>> rng = np.random.default_rng()
49
+ >>> min_len = 93059 # prime length is worst case for speed
50
+ >>> a = rng.standard_normal(min_len)
51
+ >>> b = fft.fft(a)
52
+
53
+ Zero-padding to the next regular length reduces computation time to
54
+ 1.6 ms, a speedup of 7.3 times:
55
+
56
+ >>> fft.next_fast_len(min_len, real=True)
57
+ 93312
58
+ >>> b = fft.fft(a, 93312)
59
+
60
+ Rounding up to the next power of 2 is not optimal, taking 3.0 ms to
61
+ compute; 1.9 times longer than the size given by ``next_fast_len``:
62
+
63
+ >>> b = fft.fft(a, 131072)
64
+
65
+ """
66
+ pass
67
+
68
+
69
+ # Directly wrap the c-function good_size but take the docstring etc., from the
70
+ # next_fast_len function above
71
+ _sig = inspect.signature(next_fast_len)
72
+ next_fast_len = update_wrapper(lru_cache(_helper.good_size), next_fast_len)
73
+ next_fast_len.__wrapped__ = _helper.good_size
74
+ next_fast_len.__signature__ = _sig
75
+
76
+
77
+ def prev_fast_len(target, real=False):
78
+ """Find the previous fast size of input data to ``fft``.
79
+ Useful for discarding a minimal number of samples before FFT.
80
+
81
+ SciPy's FFT algorithms gain their speed by a recursive divide and conquer
82
+ strategy. This relies on efficient functions for small prime factors of the
83
+ input length. Thus, the transforms are fastest when using composites of the
84
+ prime factors handled by the fft implementation. If there are efficient
85
+ functions for all radices <= `n`, then the result will be a number `x`
86
+ <= ``target`` with only prime factors <= `n`. (Also known as `n`-smooth
87
+ numbers)
88
+
89
+ Parameters
90
+ ----------
91
+ target : int
92
+ Maximum length to search until. Must be a positive integer.
93
+ real : bool, optional
94
+ True if the FFT involves real input or output (e.g., `rfft` or `hfft`
95
+ but not `fft`). Defaults to False.
96
+
97
+ Returns
98
+ -------
99
+ out : int
100
+ The largest fast length less than or equal to ``target``.
101
+
102
+ Notes
103
+ -----
104
+ The result of this function may change in future as performance
105
+ considerations change, for example, if new prime factors are added.
106
+
107
+ Calling `fft` or `ifft` with real input data performs an ``'R2C'``
108
+ transform internally.
109
+
110
+ In the current implementation, prev_fast_len assumes radices of
111
+ 2,3,5,7,11 for complex FFT and 2,3,5 for real FFT.
112
+
113
+ Examples
114
+ --------
115
+ On a particular machine, an FFT of prime length takes 16.2 ms:
116
+
117
+ >>> from scipy import fft
118
+ >>> import numpy as np
119
+ >>> rng = np.random.default_rng()
120
+ >>> max_len = 93059 # prime length is worst case for speed
121
+ >>> a = rng.standard_normal(max_len)
122
+ >>> b = fft.fft(a)
123
+
124
+ Performing FFT on the maximum fast length less than max_len
125
+ reduces the computation time to 1.5 ms, a speedup of 10.5 times:
126
+
127
+ >>> fft.prev_fast_len(max_len, real=True)
128
+ 92160
129
+ >>> c = fft.fft(a[:92160]) # discard last 899 samples
130
+
131
+ """
132
+ pass
133
+
134
+
135
+ # Directly wrap the c-function prev_good_size but take the docstring etc.,
136
+ # from the prev_fast_len function above
137
+ _sig_prev_fast_len = inspect.signature(prev_fast_len)
138
+ prev_fast_len = update_wrapper(lru_cache()(_helper.prev_good_size), prev_fast_len)
139
+ prev_fast_len.__wrapped__ = _helper.prev_good_size
140
+ prev_fast_len.__signature__ = _sig_prev_fast_len
141
+
142
+
143
+ def _init_nd_shape_and_axes(x, shape, axes):
144
+ """Handle shape and axes arguments for N-D transforms.
145
+
146
+ Returns the shape and axes in a standard form, taking into account negative
147
+ values and checking for various potential errors.
148
+
149
+ Parameters
150
+ ----------
151
+ x : array_like
152
+ The input array.
153
+ shape : int or array_like of ints or None
154
+ The shape of the result. If both `shape` and `axes` (see below) are
155
+ None, `shape` is ``x.shape``; if `shape` is None but `axes` is
156
+ not None, then `shape` is ``numpy.take(x.shape, axes, axis=0)``.
157
+ If `shape` is -1, the size of the corresponding dimension of `x` is
158
+ used.
159
+ axes : int or array_like of ints or None
160
+ Axes along which the calculation is computed.
161
+ The default is over all axes.
162
+ Negative indices are automatically converted to their positive
163
+ counterparts.
164
+
165
+ Returns
166
+ -------
167
+ shape : tuple
168
+ The shape of the result as a tuple of integers.
169
+ axes : list
170
+ Axes along which the calculation is computed, as a list of integers.
171
+
172
+ """
173
+ x = np.asarray(x)
174
+ return _helper._init_nd_shape_and_axes(x, shape, axes)
175
+
176
+
177
+ def fftfreq(n, d=1.0, *, xp=None, device=None):
178
+ """Return the Discrete Fourier Transform sample frequencies.
179
+
180
+ The returned float array `f` contains the frequency bin centers in cycles
181
+ per unit of the sample spacing (with zero at the start). For instance, if
182
+ the sample spacing is in seconds, then the frequency unit is cycles/second.
183
+
184
+ Given a window length `n` and a sample spacing `d`::
185
+
186
+ f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even
187
+ f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd
188
+
189
+ Parameters
190
+ ----------
191
+ n : int
192
+ Window length.
193
+ d : scalar, optional
194
+ Sample spacing (inverse of the sampling rate). Defaults to 1.
195
+ xp : array_namespace, optional
196
+ The namespace for the return array. Default is None, where NumPy is used.
197
+ device : device, optional
198
+ The device for the return array.
199
+ Only valid when `xp.fft.fftfreq` implements the device parameter.
200
+
201
+ Returns
202
+ -------
203
+ f : ndarray
204
+ Array of length `n` containing the sample frequencies.
205
+
206
+ Examples
207
+ --------
208
+ >>> import numpy as np
209
+ >>> import scipy.fft
210
+ >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float)
211
+ >>> fourier = scipy.fft.fft(signal)
212
+ >>> n = signal.size
213
+ >>> timestep = 0.1
214
+ >>> freq = scipy.fft.fftfreq(n, d=timestep)
215
+ >>> freq
216
+ array([ 0. , 1.25, 2.5 , ..., -3.75, -2.5 , -1.25])
217
+
218
+ """
219
+ xp = np if xp is None else xp
220
+ # numpy does not yet support the `device` keyword
221
+ # `xp.__name__ != 'numpy'` should be removed when numpy is compatible
222
+ if hasattr(xp, 'fft') and xp.__name__ != 'numpy':
223
+ return xp.fft.fftfreq(n, d=d, device=device)
224
+ if device is not None:
225
+ raise ValueError('device parameter is not supported for input array type')
226
+ return np.fft.fftfreq(n, d=d)
227
+
228
+
229
+ def rfftfreq(n, d=1.0, *, xp=None, device=None):
230
+ """Return the Discrete Fourier Transform sample frequencies
231
+ (for usage with rfft, irfft).
232
+
233
+ The returned float array `f` contains the frequency bin centers in cycles
234
+ per unit of the sample spacing (with zero at the start). For instance, if
235
+ the sample spacing is in seconds, then the frequency unit is cycles/second.
236
+
237
+ Given a window length `n` and a sample spacing `d`::
238
+
239
+ f = [0, 1, ..., n/2-1, n/2] / (d*n) if n is even
240
+ f = [0, 1, ..., (n-1)/2-1, (n-1)/2] / (d*n) if n is odd
241
+
242
+ Unlike `fftfreq` (but like `scipy.fftpack.rfftfreq`)
243
+ the Nyquist frequency component is considered to be positive.
244
+
245
+ Parameters
246
+ ----------
247
+ n : int
248
+ Window length.
249
+ d : scalar, optional
250
+ Sample spacing (inverse of the sampling rate). Defaults to 1.
251
+ xp : array_namespace, optional
252
+ The namespace for the return array. Default is None, where NumPy is used.
253
+ device : device, optional
254
+ The device for the return array.
255
+ Only valid when `xp.fft.rfftfreq` implements the device parameter.
256
+
257
+ Returns
258
+ -------
259
+ f : ndarray
260
+ Array of length ``n//2 + 1`` containing the sample frequencies.
261
+
262
+ Examples
263
+ --------
264
+ >>> import numpy as np
265
+ >>> import scipy.fft
266
+ >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float)
267
+ >>> fourier = scipy.fft.rfft(signal)
268
+ >>> n = signal.size
269
+ >>> sample_rate = 100
270
+ >>> freq = scipy.fft.fftfreq(n, d=1./sample_rate)
271
+ >>> freq
272
+ array([ 0., 10., 20., ..., -30., -20., -10.])
273
+ >>> freq = scipy.fft.rfftfreq(n, d=1./sample_rate)
274
+ >>> freq
275
+ array([ 0., 10., 20., 30., 40., 50.])
276
+
277
+ """
278
+ xp = np if xp is None else xp
279
+ # numpy does not yet support the `device` keyword
280
+ # `xp.__name__ != 'numpy'` should be removed when numpy is compatible
281
+ if hasattr(xp, 'fft') and xp.__name__ != 'numpy':
282
+ return xp.fft.rfftfreq(n, d=d, device=device)
283
+ if device is not None:
284
+ raise ValueError('device parameter is not supported for input array type')
285
+ return np.fft.rfftfreq(n, d=d)
286
+
287
+
288
+ def fftshift(x, axes=None):
289
+ """Shift the zero-frequency component to the center of the spectrum.
290
+
291
+ This function swaps half-spaces for all axes listed (defaults to all).
292
+ Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even.
293
+
294
+ Parameters
295
+ ----------
296
+ x : array_like
297
+ Input array.
298
+ axes : int or shape tuple, optional
299
+ Axes over which to shift. Default is None, which shifts all axes.
300
+
301
+ Returns
302
+ -------
303
+ y : ndarray
304
+ The shifted array.
305
+
306
+ See Also
307
+ --------
308
+ ifftshift : The inverse of `fftshift`.
309
+
310
+ Examples
311
+ --------
312
+ >>> import numpy as np
313
+ >>> freqs = np.fft.fftfreq(10, 0.1)
314
+ >>> freqs
315
+ array([ 0., 1., 2., ..., -3., -2., -1.])
316
+ >>> np.fft.fftshift(freqs)
317
+ array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.])
318
+
319
+ Shift the zero-frequency component only along the second axis:
320
+
321
+ >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3)
322
+ >>> freqs
323
+ array([[ 0., 1., 2.],
324
+ [ 3., 4., -4.],
325
+ [-3., -2., -1.]])
326
+ >>> np.fft.fftshift(freqs, axes=(1,))
327
+ array([[ 2., 0., 1.],
328
+ [-4., 3., 4.],
329
+ [-1., -3., -2.]])
330
+
331
+ """
332
+ xp = array_namespace(x)
333
+ if hasattr(xp, 'fft'):
334
+ return xp.fft.fftshift(x, axes=axes)
335
+ x = np.asarray(x)
336
+ y = np.fft.fftshift(x, axes=axes)
337
+ return xp.asarray(y)
338
+
339
+
340
+ def ifftshift(x, axes=None):
341
+ """The inverse of `fftshift`. Although identical for even-length `x`, the
342
+ functions differ by one sample for odd-length `x`.
343
+
344
+ Parameters
345
+ ----------
346
+ x : array_like
347
+ Input array.
348
+ axes : int or shape tuple, optional
349
+ Axes over which to calculate. Defaults to None, which shifts all axes.
350
+
351
+ Returns
352
+ -------
353
+ y : ndarray
354
+ The shifted array.
355
+
356
+ See Also
357
+ --------
358
+ fftshift : Shift zero-frequency component to the center of the spectrum.
359
+
360
+ Examples
361
+ --------
362
+ >>> import numpy as np
363
+ >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3)
364
+ >>> freqs
365
+ array([[ 0., 1., 2.],
366
+ [ 3., 4., -4.],
367
+ [-3., -2., -1.]])
368
+ >>> np.fft.ifftshift(np.fft.fftshift(freqs))
369
+ array([[ 0., 1., 2.],
370
+ [ 3., 4., -4.],
371
+ [-3., -2., -1.]])
372
+
373
+ """
374
+ xp = array_namespace(x)
375
+ if hasattr(xp, 'fft'):
376
+ return xp.fft.ifftshift(x, axes=axes)
377
+ x = np.asarray(x)
378
+ y = np.fft.ifftshift(x, axes=axes)
379
+ return xp.asarray(y)
parrot/lib/python3.10/site-packages/scipy/fft/_pocketfft/LICENSE.md ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (C) 2010-2019 Max-Planck-Society
2
+ All rights reserved.
3
+
4
+ Redistribution and use in source and binary forms, with or without modification,
5
+ are permitted provided that the following conditions are met:
6
+
7
+ * Redistributions of source code must retain the above copyright notice, this
8
+ list of conditions and the following disclaimer.
9
+ * Redistributions in binary form must reproduce the above copyright notice, this
10
+ list of conditions and the following disclaimer in the documentation and/or
11
+ other materials provided with the distribution.
12
+ * Neither the name of the copyright holder nor the names of its contributors may
13
+ be used to endorse or promote products derived from this software without
14
+ specific prior written permission.
15
+
16
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
20
+ ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
23
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
parrot/lib/python3.10/site-packages/scipy/fft/_realtransforms.py ADDED
@@ -0,0 +1,693 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._basic import _dispatch
2
+ from scipy._lib.uarray import Dispatchable
3
+ import numpy as np
4
+
5
+ __all__ = ['dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn']
6
+
7
+
8
+ @_dispatch
9
+ def dctn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False,
10
+ workers=None, *, orthogonalize=None):
11
+ """
12
+ Return multidimensional Discrete Cosine Transform along the specified axes.
13
+
14
+ Parameters
15
+ ----------
16
+ x : array_like
17
+ The input array.
18
+ type : {1, 2, 3, 4}, optional
19
+ Type of the DCT (see Notes). Default type is 2.
20
+ s : int or array_like of ints or None, optional
21
+ The shape of the result. If both `s` and `axes` (see below) are None,
22
+ `s` is ``x.shape``; if `s` is None but `axes` is not None, then `s` is
23
+ ``numpy.take(x.shape, axes, axis=0)``.
24
+ If ``s[i] > x.shape[i]``, the ith dimension of the input is padded with zeros.
25
+ If ``s[i] < x.shape[i]``, the ith dimension of the input is truncated to length
26
+ ``s[i]``.
27
+ If any element of `s` is -1, the size of the corresponding dimension of
28
+ `x` is used.
29
+ axes : int or array_like of ints or None, optional
30
+ Axes over which the DCT is computed. If not given, the last ``len(s)``
31
+ axes are used, or all axes if `s` is also not specified.
32
+ norm : {"backward", "ortho", "forward"}, optional
33
+ Normalization mode (see Notes). Default is "backward".
34
+ overwrite_x : bool, optional
35
+ If True, the contents of `x` can be destroyed; the default is False.
36
+ workers : int, optional
37
+ Maximum number of workers to use for parallel computation. If negative,
38
+ the value wraps around from ``os.cpu_count()``.
39
+ See :func:`~scipy.fft.fft` for more details.
40
+ orthogonalize : bool, optional
41
+ Whether to use the orthogonalized DCT variant (see Notes).
42
+ Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise.
43
+
44
+ .. versionadded:: 1.8.0
45
+
46
+ Returns
47
+ -------
48
+ y : ndarray of real
49
+ The transformed input array.
50
+
51
+ See Also
52
+ --------
53
+ idctn : Inverse multidimensional DCT
54
+
55
+ Notes
56
+ -----
57
+ For full details of the DCT types and normalization modes, as well as
58
+ references, see `dct`.
59
+
60
+ Examples
61
+ --------
62
+ >>> import numpy as np
63
+ >>> from scipy.fft import dctn, idctn
64
+ >>> rng = np.random.default_rng()
65
+ >>> y = rng.standard_normal((16, 16))
66
+ >>> np.allclose(y, idctn(dctn(y)))
67
+ True
68
+
69
+ """
70
+ return (Dispatchable(x, np.ndarray),)
71
+
72
+
73
+ @_dispatch
74
+ def idctn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False,
75
+ workers=None, orthogonalize=None):
76
+ """
77
+ Return multidimensional Inverse Discrete Cosine Transform along the specified axes.
78
+
79
+ Parameters
80
+ ----------
81
+ x : array_like
82
+ The input array.
83
+ type : {1, 2, 3, 4}, optional
84
+ Type of the DCT (see Notes). Default type is 2.
85
+ s : int or array_like of ints or None, optional
86
+ The shape of the result. If both `s` and `axes` (see below) are
87
+ None, `s` is ``x.shape``; if `s` is None but `axes` is
88
+ not None, then `s` is ``numpy.take(x.shape, axes, axis=0)``.
89
+ If ``s[i] > x.shape[i]``, the ith dimension of the input is padded with zeros.
90
+ If ``s[i] < x.shape[i]``, the ith dimension of the input is truncated to length
91
+ ``s[i]``.
92
+ If any element of `s` is -1, the size of the corresponding dimension of
93
+ `x` is used.
94
+ axes : int or array_like of ints or None, optional
95
+ Axes over which the IDCT is computed. If not given, the last ``len(s)``
96
+ axes are used, or all axes if `s` is also not specified.
97
+ norm : {"backward", "ortho", "forward"}, optional
98
+ Normalization mode (see Notes). Default is "backward".
99
+ overwrite_x : bool, optional
100
+ If True, the contents of `x` can be destroyed; the default is False.
101
+ workers : int, optional
102
+ Maximum number of workers to use for parallel computation. If negative,
103
+ the value wraps around from ``os.cpu_count()``.
104
+ See :func:`~scipy.fft.fft` for more details.
105
+ orthogonalize : bool, optional
106
+ Whether to use the orthogonalized IDCT variant (see Notes).
107
+ Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise.
108
+
109
+ .. versionadded:: 1.8.0
110
+
111
+ Returns
112
+ -------
113
+ y : ndarray of real
114
+ The transformed input array.
115
+
116
+ See Also
117
+ --------
118
+ dctn : multidimensional DCT
119
+
120
+ Notes
121
+ -----
122
+ For full details of the IDCT types and normalization modes, as well as
123
+ references, see `idct`.
124
+
125
+ Examples
126
+ --------
127
+ >>> import numpy as np
128
+ >>> from scipy.fft import dctn, idctn
129
+ >>> rng = np.random.default_rng()
130
+ >>> y = rng.standard_normal((16, 16))
131
+ >>> np.allclose(y, idctn(dctn(y)))
132
+ True
133
+
134
+ """
135
+ return (Dispatchable(x, np.ndarray),)
136
+
137
+
138
+ @_dispatch
139
+ def dstn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False,
140
+ workers=None, orthogonalize=None):
141
+ """
142
+ Return multidimensional Discrete Sine Transform along the specified axes.
143
+
144
+ Parameters
145
+ ----------
146
+ x : array_like
147
+ The input array.
148
+ type : {1, 2, 3, 4}, optional
149
+ Type of the DST (see Notes). Default type is 2.
150
+ s : int or array_like of ints or None, optional
151
+ The shape of the result. If both `s` and `axes` (see below) are None,
152
+ `s` is ``x.shape``; if `s` is None but `axes` is not None, then `s` is
153
+ ``numpy.take(x.shape, axes, axis=0)``.
154
+ If ``s[i] > x.shape[i]``, the ith dimension of the input is padded with zeros.
155
+ If ``s[i] < x.shape[i]``, the ith dimension of the input is truncated to length
156
+ ``s[i]``.
157
+ If any element of `shape` is -1, the size of the corresponding dimension
158
+ of `x` is used.
159
+ axes : int or array_like of ints or None, optional
160
+ Axes over which the DST is computed. If not given, the last ``len(s)``
161
+ axes are used, or all axes if `s` is also not specified.
162
+ norm : {"backward", "ortho", "forward"}, optional
163
+ Normalization mode (see Notes). Default is "backward".
164
+ overwrite_x : bool, optional
165
+ If True, the contents of `x` can be destroyed; the default is False.
166
+ workers : int, optional
167
+ Maximum number of workers to use for parallel computation. If negative,
168
+ the value wraps around from ``os.cpu_count()``.
169
+ See :func:`~scipy.fft.fft` for more details.
170
+ orthogonalize : bool, optional
171
+ Whether to use the orthogonalized DST variant (see Notes).
172
+ Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise.
173
+
174
+ .. versionadded:: 1.8.0
175
+
176
+ Returns
177
+ -------
178
+ y : ndarray of real
179
+ The transformed input array.
180
+
181
+ See Also
182
+ --------
183
+ idstn : Inverse multidimensional DST
184
+
185
+ Notes
186
+ -----
187
+ For full details of the DST types and normalization modes, as well as
188
+ references, see `dst`.
189
+
190
+ Examples
191
+ --------
192
+ >>> import numpy as np
193
+ >>> from scipy.fft import dstn, idstn
194
+ >>> rng = np.random.default_rng()
195
+ >>> y = rng.standard_normal((16, 16))
196
+ >>> np.allclose(y, idstn(dstn(y)))
197
+ True
198
+
199
+ """
200
+ return (Dispatchable(x, np.ndarray),)
201
+
202
+
203
+ @_dispatch
204
+ def idstn(x, type=2, s=None, axes=None, norm=None, overwrite_x=False,
205
+ workers=None, orthogonalize=None):
206
+ """
207
+ Return multidimensional Inverse Discrete Sine Transform along the specified axes.
208
+
209
+ Parameters
210
+ ----------
211
+ x : array_like
212
+ The input array.
213
+ type : {1, 2, 3, 4}, optional
214
+ Type of the DST (see Notes). Default type is 2.
215
+ s : int or array_like of ints or None, optional
216
+ The shape of the result. If both `s` and `axes` (see below) are None,
217
+ `s` is ``x.shape``; if `s` is None but `axes` is not None, then `s` is
218
+ ``numpy.take(x.shape, axes, axis=0)``.
219
+ If ``s[i] > x.shape[i]``, the ith dimension of the input is padded with zeros.
220
+ If ``s[i] < x.shape[i]``, the ith dimension of the input is truncated to length
221
+ ``s[i]``.
222
+ If any element of `s` is -1, the size of the corresponding dimension of
223
+ `x` is used.
224
+ axes : int or array_like of ints or None, optional
225
+ Axes over which the IDST is computed. If not given, the last ``len(s)``
226
+ axes are used, or all axes if `s` is also not specified.
227
+ norm : {"backward", "ortho", "forward"}, optional
228
+ Normalization mode (see Notes). Default is "backward".
229
+ overwrite_x : bool, optional
230
+ If True, the contents of `x` can be destroyed; the default is False.
231
+ workers : int, optional
232
+ Maximum number of workers to use for parallel computation. If negative,
233
+ the value wraps around from ``os.cpu_count()``.
234
+ See :func:`~scipy.fft.fft` for more details.
235
+ orthogonalize : bool, optional
236
+ Whether to use the orthogonalized IDST variant (see Notes).
237
+ Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise.
238
+
239
+ .. versionadded:: 1.8.0
240
+
241
+ Returns
242
+ -------
243
+ y : ndarray of real
244
+ The transformed input array.
245
+
246
+ See Also
247
+ --------
248
+ dstn : multidimensional DST
249
+
250
+ Notes
251
+ -----
252
+ For full details of the IDST types and normalization modes, as well as
253
+ references, see `idst`.
254
+
255
+ Examples
256
+ --------
257
+ >>> import numpy as np
258
+ >>> from scipy.fft import dstn, idstn
259
+ >>> rng = np.random.default_rng()
260
+ >>> y = rng.standard_normal((16, 16))
261
+ >>> np.allclose(y, idstn(dstn(y)))
262
+ True
263
+
264
+ """
265
+ return (Dispatchable(x, np.ndarray),)
266
+
267
+
268
+ @_dispatch
269
+ def dct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, workers=None,
270
+ orthogonalize=None):
271
+ r"""Return the Discrete Cosine Transform of arbitrary type sequence x.
272
+
273
+ Parameters
274
+ ----------
275
+ x : array_like
276
+ The input array.
277
+ type : {1, 2, 3, 4}, optional
278
+ Type of the DCT (see Notes). Default type is 2.
279
+ n : int, optional
280
+ Length of the transform. If ``n < x.shape[axis]``, `x` is
281
+ truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
282
+ default results in ``n = x.shape[axis]``.
283
+ axis : int, optional
284
+ Axis along which the dct is computed; the default is over the
285
+ last axis (i.e., ``axis=-1``).
286
+ norm : {"backward", "ortho", "forward"}, optional
287
+ Normalization mode (see Notes). Default is "backward".
288
+ overwrite_x : bool, optional
289
+ If True, the contents of `x` can be destroyed; the default is False.
290
+ workers : int, optional
291
+ Maximum number of workers to use for parallel computation. If negative,
292
+ the value wraps around from ``os.cpu_count()``.
293
+ See :func:`~scipy.fft.fft` for more details.
294
+ orthogonalize : bool, optional
295
+ Whether to use the orthogonalized DCT variant (see Notes).
296
+ Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise.
297
+
298
+ .. versionadded:: 1.8.0
299
+
300
+ Returns
301
+ -------
302
+ y : ndarray of real
303
+ The transformed input array.
304
+
305
+ See Also
306
+ --------
307
+ idct : Inverse DCT
308
+
309
+ Notes
310
+ -----
311
+ For a single dimension array ``x``, ``dct(x, norm='ortho')`` is equal to
312
+ MATLAB ``dct(x)``.
313
+
314
+ .. warning:: For ``type in {1, 2, 3}``, ``norm="ortho"`` breaks the direct
315
+ correspondence with the direct Fourier transform. To recover
316
+ it you must specify ``orthogonalize=False``.
317
+
318
+ For ``norm="ortho"`` both the `dct` and `idct` are scaled by the same
319
+ overall factor in both directions. By default, the transform is also
320
+ orthogonalized which for types 1, 2 and 3 means the transform definition is
321
+ modified to give orthogonality of the DCT matrix (see below).
322
+
323
+ For ``norm="backward"``, there is no scaling on `dct` and the `idct` is
324
+ scaled by ``1/N`` where ``N`` is the "logical" size of the DCT. For
325
+ ``norm="forward"`` the ``1/N`` normalization is applied to the forward
326
+ `dct` instead and the `idct` is unnormalized.
327
+
328
+ There are, theoretically, 8 types of the DCT, only the first 4 types are
329
+ implemented in SciPy.'The' DCT generally refers to DCT type 2, and 'the'
330
+ Inverse DCT generally refers to DCT type 3.
331
+
332
+ **Type I**
333
+
334
+ There are several definitions of the DCT-I; we use the following
335
+ (for ``norm="backward"``)
336
+
337
+ .. math::
338
+
339
+ y_k = x_0 + (-1)^k x_{N-1} + 2 \sum_{n=1}^{N-2} x_n \cos\left(
340
+ \frac{\pi k n}{N-1} \right)
341
+
342
+ If ``orthogonalize=True``, ``x[0]`` and ``x[N-1]`` are multiplied by a
343
+ scaling factor of :math:`\sqrt{2}`, and ``y[0]`` and ``y[N-1]`` are divided
344
+ by :math:`\sqrt{2}`. When combined with ``norm="ortho"``, this makes the
345
+ corresponding matrix of coefficients orthonormal (``O @ O.T = np.eye(N)``).
346
+
347
+ .. note::
348
+ The DCT-I is only supported for input size > 1.
349
+
350
+ **Type II**
351
+
352
+ There are several definitions of the DCT-II; we use the following
353
+ (for ``norm="backward"``)
354
+
355
+ .. math::
356
+
357
+ y_k = 2 \sum_{n=0}^{N-1} x_n \cos\left(\frac{\pi k(2n+1)}{2N} \right)
358
+
359
+ If ``orthogonalize=True``, ``y[0]`` is divided by :math:`\sqrt{2}` which,
360
+ when combined with ``norm="ortho"``, makes the corresponding matrix of
361
+ coefficients orthonormal (``O @ O.T = np.eye(N)``).
362
+
363
+ **Type III**
364
+
365
+ There are several definitions, we use the following (for
366
+ ``norm="backward"``)
367
+
368
+ .. math::
369
+
370
+ y_k = x_0 + 2 \sum_{n=1}^{N-1} x_n \cos\left(\frac{\pi(2k+1)n}{2N}\right)
371
+
372
+ If ``orthogonalize=True``, ``x[0]`` terms are multiplied by
373
+ :math:`\sqrt{2}` which, when combined with ``norm="ortho"``, makes the
374
+ corresponding matrix of coefficients orthonormal (``O @ O.T = np.eye(N)``).
375
+
376
+ The (unnormalized) DCT-III is the inverse of the (unnormalized) DCT-II, up
377
+ to a factor `2N`. The orthonormalized DCT-III is exactly the inverse of
378
+ the orthonormalized DCT-II.
379
+
380
+ **Type IV**
381
+
382
+ There are several definitions of the DCT-IV; we use the following
383
+ (for ``norm="backward"``)
384
+
385
+ .. math::
386
+
387
+ y_k = 2 \sum_{n=0}^{N-1} x_n \cos\left(\frac{\pi(2k+1)(2n+1)}{4N} \right)
388
+
389
+ ``orthogonalize`` has no effect here, as the DCT-IV matrix is already
390
+ orthogonal up to a scale factor of ``2N``.
391
+
392
+ References
393
+ ----------
394
+ .. [1] 'A Fast Cosine Transform in One and Two Dimensions', by J.
395
+ Makhoul, `IEEE Transactions on acoustics, speech and signal
396
+ processing` vol. 28(1), pp. 27-34,
397
+ :doi:`10.1109/TASSP.1980.1163351` (1980).
398
+ .. [2] Wikipedia, "Discrete cosine transform",
399
+ https://en.wikipedia.org/wiki/Discrete_cosine_transform
400
+
401
+ Examples
402
+ --------
403
+ The Type 1 DCT is equivalent to the FFT (though faster) for real,
404
+ even-symmetrical inputs. The output is also real and even-symmetrical.
405
+ Half of the FFT input is used to generate half of the FFT output:
406
+
407
+ >>> from scipy.fft import fft, dct
408
+ >>> import numpy as np
409
+ >>> fft(np.array([4., 3., 5., 10., 5., 3.])).real
410
+ array([ 30., -8., 6., -2., 6., -8.])
411
+ >>> dct(np.array([4., 3., 5., 10.]), 1)
412
+ array([ 30., -8., 6., -2.])
413
+
414
+ """
415
+ return (Dispatchable(x, np.ndarray),)
416
+
417
+
418
+ @_dispatch
419
+ def idct(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False,
420
+ workers=None, orthogonalize=None):
421
+ """
422
+ Return the Inverse Discrete Cosine Transform of an arbitrary type sequence.
423
+
424
+ Parameters
425
+ ----------
426
+ x : array_like
427
+ The input array.
428
+ type : {1, 2, 3, 4}, optional
429
+ Type of the DCT (see Notes). Default type is 2.
430
+ n : int, optional
431
+ Length of the transform. If ``n < x.shape[axis]``, `x` is
432
+ truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
433
+ default results in ``n = x.shape[axis]``.
434
+ axis : int, optional
435
+ Axis along which the idct is computed; the default is over the
436
+ last axis (i.e., ``axis=-1``).
437
+ norm : {"backward", "ortho", "forward"}, optional
438
+ Normalization mode (see Notes). Default is "backward".
439
+ overwrite_x : bool, optional
440
+ If True, the contents of `x` can be destroyed; the default is False.
441
+ workers : int, optional
442
+ Maximum number of workers to use for parallel computation. If negative,
443
+ the value wraps around from ``os.cpu_count()``.
444
+ See :func:`~scipy.fft.fft` for more details.
445
+ orthogonalize : bool, optional
446
+ Whether to use the orthogonalized IDCT variant (see Notes).
447
+ Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise.
448
+
449
+ .. versionadded:: 1.8.0
450
+
451
+ Returns
452
+ -------
453
+ idct : ndarray of real
454
+ The transformed input array.
455
+
456
+ See Also
457
+ --------
458
+ dct : Forward DCT
459
+
460
+ Notes
461
+ -----
462
+ For a single dimension array `x`, ``idct(x, norm='ortho')`` is equal to
463
+ MATLAB ``idct(x)``.
464
+
465
+ .. warning:: For ``type in {1, 2, 3}``, ``norm="ortho"`` breaks the direct
466
+ correspondence with the inverse direct Fourier transform. To
467
+ recover it you must specify ``orthogonalize=False``.
468
+
469
+ For ``norm="ortho"`` both the `dct` and `idct` are scaled by the same
470
+ overall factor in both directions. By default, the transform is also
471
+ orthogonalized which for types 1, 2 and 3 means the transform definition is
472
+ modified to give orthogonality of the IDCT matrix (see `dct` for the full
473
+ definitions).
474
+
475
+ 'The' IDCT is the IDCT-II, which is the same as the normalized DCT-III.
476
+
477
+ The IDCT is equivalent to a normal DCT except for the normalization and
478
+ type. DCT type 1 and 4 are their own inverse and DCTs 2 and 3 are each
479
+ other's inverses.
480
+
481
+ Examples
482
+ --------
483
+ The Type 1 DCT is equivalent to the DFT for real, even-symmetrical
484
+ inputs. The output is also real and even-symmetrical. Half of the IFFT
485
+ input is used to generate half of the IFFT output:
486
+
487
+ >>> from scipy.fft import ifft, idct
488
+ >>> import numpy as np
489
+ >>> ifft(np.array([ 30., -8., 6., -2., 6., -8.])).real
490
+ array([ 4., 3., 5., 10., 5., 3.])
491
+ >>> idct(np.array([ 30., -8., 6., -2.]), 1)
492
+ array([ 4., 3., 5., 10.])
493
+
494
+ """
495
+ return (Dispatchable(x, np.ndarray),)
496
+
497
+
498
+ @_dispatch
499
+ def dst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False, workers=None,
500
+ orthogonalize=None):
501
+ r"""
502
+ Return the Discrete Sine Transform of arbitrary type sequence x.
503
+
504
+ Parameters
505
+ ----------
506
+ x : array_like
507
+ The input array.
508
+ type : {1, 2, 3, 4}, optional
509
+ Type of the DST (see Notes). Default type is 2.
510
+ n : int, optional
511
+ Length of the transform. If ``n < x.shape[axis]``, `x` is
512
+ truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
513
+ default results in ``n = x.shape[axis]``.
514
+ axis : int, optional
515
+ Axis along which the dst is computed; the default is over the
516
+ last axis (i.e., ``axis=-1``).
517
+ norm : {"backward", "ortho", "forward"}, optional
518
+ Normalization mode (see Notes). Default is "backward".
519
+ overwrite_x : bool, optional
520
+ If True, the contents of `x` can be destroyed; the default is False.
521
+ workers : int, optional
522
+ Maximum number of workers to use for parallel computation. If negative,
523
+ the value wraps around from ``os.cpu_count()``.
524
+ See :func:`~scipy.fft.fft` for more details.
525
+ orthogonalize : bool, optional
526
+ Whether to use the orthogonalized DST variant (see Notes).
527
+ Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise.
528
+
529
+ .. versionadded:: 1.8.0
530
+
531
+ Returns
532
+ -------
533
+ dst : ndarray of reals
534
+ The transformed input array.
535
+
536
+ See Also
537
+ --------
538
+ idst : Inverse DST
539
+
540
+ Notes
541
+ -----
542
+ .. warning:: For ``type in {2, 3}``, ``norm="ortho"`` breaks the direct
543
+ correspondence with the direct Fourier transform. To recover
544
+ it you must specify ``orthogonalize=False``.
545
+
546
+ For ``norm="ortho"`` both the `dst` and `idst` are scaled by the same
547
+ overall factor in both directions. By default, the transform is also
548
+ orthogonalized which for types 2 and 3 means the transform definition is
549
+ modified to give orthogonality of the DST matrix (see below).
550
+
551
+ For ``norm="backward"``, there is no scaling on the `dst` and the `idst` is
552
+ scaled by ``1/N`` where ``N`` is the "logical" size of the DST.
553
+
554
+ There are, theoretically, 8 types of the DST for different combinations of
555
+ even/odd boundary conditions and boundary off sets [1]_, only the first
556
+ 4 types are implemented in SciPy.
557
+
558
+ **Type I**
559
+
560
+ There are several definitions of the DST-I; we use the following for
561
+ ``norm="backward"``. DST-I assumes the input is odd around :math:`n=-1` and
562
+ :math:`n=N`.
563
+
564
+ .. math::
565
+
566
+ y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(k+1)(n+1)}{N+1}\right)
567
+
568
+ Note that the DST-I is only supported for input size > 1.
569
+ The (unnormalized) DST-I is its own inverse, up to a factor :math:`2(N+1)`.
570
+ The orthonormalized DST-I is exactly its own inverse.
571
+
572
+ ``orthogonalize`` has no effect here, as the DST-I matrix is already
573
+ orthogonal up to a scale factor of ``2N``.
574
+
575
+ **Type II**
576
+
577
+ There are several definitions of the DST-II; we use the following for
578
+ ``norm="backward"``. DST-II assumes the input is odd around :math:`n=-1/2` and
579
+ :math:`n=N-1/2`; the output is odd around :math:`k=-1` and even around :math:`k=N-1`
580
+
581
+ .. math::
582
+
583
+ y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(k+1)(2n+1)}{2N}\right)
584
+
585
+ If ``orthogonalize=True``, ``y[-1]`` is divided :math:`\sqrt{2}` which, when
586
+ combined with ``norm="ortho"``, makes the corresponding matrix of
587
+ coefficients orthonormal (``O @ O.T = np.eye(N)``).
588
+
589
+ **Type III**
590
+
591
+ There are several definitions of the DST-III, we use the following (for
592
+ ``norm="backward"``). DST-III assumes the input is odd around :math:`n=-1` and
593
+ even around :math:`n=N-1`
594
+
595
+ .. math::
596
+
597
+ y_k = (-1)^k x_{N-1} + 2 \sum_{n=0}^{N-2} x_n \sin\left(
598
+ \frac{\pi(2k+1)(n+1)}{2N}\right)
599
+
600
+ If ``orthogonalize=True``, ``x[-1]`` is multiplied by :math:`\sqrt{2}`
601
+ which, when combined with ``norm="ortho"``, makes the corresponding matrix
602
+ of coefficients orthonormal (``O @ O.T = np.eye(N)``).
603
+
604
+ The (unnormalized) DST-III is the inverse of the (unnormalized) DST-II, up
605
+ to a factor :math:`2N`. The orthonormalized DST-III is exactly the inverse of the
606
+ orthonormalized DST-II.
607
+
608
+ **Type IV**
609
+
610
+ There are several definitions of the DST-IV, we use the following (for
611
+ ``norm="backward"``). DST-IV assumes the input is odd around :math:`n=-0.5` and
612
+ even around :math:`n=N-0.5`
613
+
614
+ .. math::
615
+
616
+ y_k = 2 \sum_{n=0}^{N-1} x_n \sin\left(\frac{\pi(2k+1)(2n+1)}{4N}\right)
617
+
618
+ ``orthogonalize`` has no effect here, as the DST-IV matrix is already
619
+ orthogonal up to a scale factor of ``2N``.
620
+
621
+ The (unnormalized) DST-IV is its own inverse, up to a factor :math:`2N`. The
622
+ orthonormalized DST-IV is exactly its own inverse.
623
+
624
+ References
625
+ ----------
626
+ .. [1] Wikipedia, "Discrete sine transform",
627
+ https://en.wikipedia.org/wiki/Discrete_sine_transform
628
+
629
+ """
630
+ return (Dispatchable(x, np.ndarray),)
631
+
632
+
633
+ @_dispatch
634
+ def idst(x, type=2, n=None, axis=-1, norm=None, overwrite_x=False,
635
+ workers=None, orthogonalize=None):
636
+ """
637
+ Return the Inverse Discrete Sine Transform of an arbitrary type sequence.
638
+
639
+ Parameters
640
+ ----------
641
+ x : array_like
642
+ The input array.
643
+ type : {1, 2, 3, 4}, optional
644
+ Type of the DST (see Notes). Default type is 2.
645
+ n : int, optional
646
+ Length of the transform. If ``n < x.shape[axis]``, `x` is
647
+ truncated. If ``n > x.shape[axis]``, `x` is zero-padded. The
648
+ default results in ``n = x.shape[axis]``.
649
+ axis : int, optional
650
+ Axis along which the idst is computed; the default is over the
651
+ last axis (i.e., ``axis=-1``).
652
+ norm : {"backward", "ortho", "forward"}, optional
653
+ Normalization mode (see Notes). Default is "backward".
654
+ overwrite_x : bool, optional
655
+ If True, the contents of `x` can be destroyed; the default is False.
656
+ workers : int, optional
657
+ Maximum number of workers to use for parallel computation. If negative,
658
+ the value wraps around from ``os.cpu_count()``.
659
+ See :func:`~scipy.fft.fft` for more details.
660
+ orthogonalize : bool, optional
661
+ Whether to use the orthogonalized IDST variant (see Notes).
662
+ Defaults to ``True`` when ``norm="ortho"`` and ``False`` otherwise.
663
+
664
+ .. versionadded:: 1.8.0
665
+
666
+ Returns
667
+ -------
668
+ idst : ndarray of real
669
+ The transformed input array.
670
+
671
+ See Also
672
+ --------
673
+ dst : Forward DST
674
+
675
+ Notes
676
+ -----
677
+ .. warning:: For ``type in {2, 3}``, ``norm="ortho"`` breaks the direct
678
+ correspondence with the inverse direct Fourier transform.
679
+
680
+ For ``norm="ortho"`` both the `dst` and `idst` are scaled by the same
681
+ overall factor in both directions. By default, the transform is also
682
+ orthogonalized which for types 2 and 3 means the transform definition is
683
+ modified to give orthogonality of the DST matrix (see `dst` for the full
684
+ definitions).
685
+
686
+ 'The' IDST is the IDST-II, which is the same as the normalized DST-III.
687
+
688
+ The IDST is equivalent to a normal DST except for the normalization and
689
+ type. DST type 1 and 4 are their own inverse and DSTs 2 and 3 are each
690
+ other's inverses.
691
+
692
+ """
693
+ return (Dispatchable(x, np.ndarray),)
parrot/lib/python3.10/site-packages/scipy/fft/_realtransforms_backend.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from scipy._lib._array_api import array_namespace
2
+ import numpy as np
3
+ from . import _pocketfft
4
+
5
+ __all__ = ['dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn']
6
+
7
+
8
+ def _execute(pocketfft_func, x, type, s, axes, norm,
9
+ overwrite_x, workers, orthogonalize):
10
+ xp = array_namespace(x)
11
+ x = np.asarray(x)
12
+ y = pocketfft_func(x, type, s, axes, norm,
13
+ overwrite_x=overwrite_x, workers=workers,
14
+ orthogonalize=orthogonalize)
15
+ return xp.asarray(y)
16
+
17
+
18
+ def dctn(x, type=2, s=None, axes=None, norm=None,
19
+ overwrite_x=False, workers=None, *, orthogonalize=None):
20
+ return _execute(_pocketfft.dctn, x, type, s, axes, norm,
21
+ overwrite_x, workers, orthogonalize)
22
+
23
+
24
+ def idctn(x, type=2, s=None, axes=None, norm=None,
25
+ overwrite_x=False, workers=None, *, orthogonalize=None):
26
+ return _execute(_pocketfft.idctn, x, type, s, axes, norm,
27
+ overwrite_x, workers, orthogonalize)
28
+
29
+
30
+ def dstn(x, type=2, s=None, axes=None, norm=None,
31
+ overwrite_x=False, workers=None, orthogonalize=None):
32
+ return _execute(_pocketfft.dstn, x, type, s, axes, norm,
33
+ overwrite_x, workers, orthogonalize)
34
+
35
+
36
+ def idstn(x, type=2, s=None, axes=None, norm=None,
37
+ overwrite_x=False, workers=None, *, orthogonalize=None):
38
+ return _execute(_pocketfft.idstn, x, type, s, axes, norm,
39
+ overwrite_x, workers, orthogonalize)
40
+
41
+
42
+ def dct(x, type=2, n=None, axis=-1, norm=None,
43
+ overwrite_x=False, workers=None, orthogonalize=None):
44
+ return _execute(_pocketfft.dct, x, type, n, axis, norm,
45
+ overwrite_x, workers, orthogonalize)
46
+
47
+
48
+ def idct(x, type=2, n=None, axis=-1, norm=None,
49
+ overwrite_x=False, workers=None, orthogonalize=None):
50
+ return _execute(_pocketfft.idct, x, type, n, axis, norm,
51
+ overwrite_x, workers, orthogonalize)
52
+
53
+
54
+ def dst(x, type=2, n=None, axis=-1, norm=None,
55
+ overwrite_x=False, workers=None, orthogonalize=None):
56
+ return _execute(_pocketfft.dst, x, type, n, axis, norm,
57
+ overwrite_x, workers, orthogonalize)
58
+
59
+
60
+ def idst(x, type=2, n=None, axis=-1, norm=None,
61
+ overwrite_x=False, workers=None, orthogonalize=None):
62
+ return _execute(_pocketfft.idst, x, type, n, axis, norm,
63
+ overwrite_x, workers, orthogonalize)
parrot/lib/python3.10/site-packages/scipy/fft/tests/mock_backend.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import scipy.fft
3
+
4
+ class _MockFunction:
5
+ def __init__(self, return_value = None):
6
+ self.number_calls = 0
7
+ self.return_value = return_value
8
+ self.last_args = ([], {})
9
+
10
+ def __call__(self, *args, **kwargs):
11
+ self.number_calls += 1
12
+ self.last_args = (args, kwargs)
13
+ return self.return_value
14
+
15
+
16
+ fft = _MockFunction(np.random.random(10))
17
+ fft2 = _MockFunction(np.random.random(10))
18
+ fftn = _MockFunction(np.random.random(10))
19
+
20
+ ifft = _MockFunction(np.random.random(10))
21
+ ifft2 = _MockFunction(np.random.random(10))
22
+ ifftn = _MockFunction(np.random.random(10))
23
+
24
+ rfft = _MockFunction(np.random.random(10))
25
+ rfft2 = _MockFunction(np.random.random(10))
26
+ rfftn = _MockFunction(np.random.random(10))
27
+
28
+ irfft = _MockFunction(np.random.random(10))
29
+ irfft2 = _MockFunction(np.random.random(10))
30
+ irfftn = _MockFunction(np.random.random(10))
31
+
32
+ hfft = _MockFunction(np.random.random(10))
33
+ hfft2 = _MockFunction(np.random.random(10))
34
+ hfftn = _MockFunction(np.random.random(10))
35
+
36
+ ihfft = _MockFunction(np.random.random(10))
37
+ ihfft2 = _MockFunction(np.random.random(10))
38
+ ihfftn = _MockFunction(np.random.random(10))
39
+
40
+ dct = _MockFunction(np.random.random(10))
41
+ idct = _MockFunction(np.random.random(10))
42
+ dctn = _MockFunction(np.random.random(10))
43
+ idctn = _MockFunction(np.random.random(10))
44
+
45
+ dst = _MockFunction(np.random.random(10))
46
+ idst = _MockFunction(np.random.random(10))
47
+ dstn = _MockFunction(np.random.random(10))
48
+ idstn = _MockFunction(np.random.random(10))
49
+
50
+ fht = _MockFunction(np.random.random(10))
51
+ ifht = _MockFunction(np.random.random(10))
52
+
53
+
54
+ __ua_domain__ = "numpy.scipy.fft"
55
+
56
+
57
+ _implements = {
58
+ scipy.fft.fft: fft,
59
+ scipy.fft.fft2: fft2,
60
+ scipy.fft.fftn: fftn,
61
+ scipy.fft.ifft: ifft,
62
+ scipy.fft.ifft2: ifft2,
63
+ scipy.fft.ifftn: ifftn,
64
+ scipy.fft.rfft: rfft,
65
+ scipy.fft.rfft2: rfft2,
66
+ scipy.fft.rfftn: rfftn,
67
+ scipy.fft.irfft: irfft,
68
+ scipy.fft.irfft2: irfft2,
69
+ scipy.fft.irfftn: irfftn,
70
+ scipy.fft.hfft: hfft,
71
+ scipy.fft.hfft2: hfft2,
72
+ scipy.fft.hfftn: hfftn,
73
+ scipy.fft.ihfft: ihfft,
74
+ scipy.fft.ihfft2: ihfft2,
75
+ scipy.fft.ihfftn: ihfftn,
76
+ scipy.fft.dct: dct,
77
+ scipy.fft.idct: idct,
78
+ scipy.fft.dctn: dctn,
79
+ scipy.fft.idctn: idctn,
80
+ scipy.fft.dst: dst,
81
+ scipy.fft.idst: idst,
82
+ scipy.fft.dstn: dstn,
83
+ scipy.fft.idstn: idstn,
84
+ scipy.fft.fht: fht,
85
+ scipy.fft.ifht: ifht
86
+ }
87
+
88
+
89
+ def __ua_function__(method, args, kwargs):
90
+ fn = _implements.get(method)
91
+ return (fn(*args, **kwargs) if fn is not None
92
+ else NotImplemented)
parrot/lib/python3.10/site-packages/scipy/fft/tests/test_backend.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+
3
+ import numpy as np
4
+ import scipy.fft
5
+ from scipy.fft import _fftlog, _pocketfft, set_backend
6
+ from scipy.fft.tests import mock_backend
7
+
8
+ from numpy.testing import assert_allclose, assert_equal
9
+ import pytest
10
+
11
+ fnames = ('fft', 'fft2', 'fftn',
12
+ 'ifft', 'ifft2', 'ifftn',
13
+ 'rfft', 'rfft2', 'rfftn',
14
+ 'irfft', 'irfft2', 'irfftn',
15
+ 'dct', 'idct', 'dctn', 'idctn',
16
+ 'dst', 'idst', 'dstn', 'idstn',
17
+ 'fht', 'ifht')
18
+
19
+ np_funcs = (np.fft.fft, np.fft.fft2, np.fft.fftn,
20
+ np.fft.ifft, np.fft.ifft2, np.fft.ifftn,
21
+ np.fft.rfft, np.fft.rfft2, np.fft.rfftn,
22
+ np.fft.irfft, np.fft.irfft2, np.fft.irfftn,
23
+ np.fft.hfft, _pocketfft.hfft2, _pocketfft.hfftn, # np has no hfftn
24
+ np.fft.ihfft, _pocketfft.ihfft2, _pocketfft.ihfftn,
25
+ _pocketfft.dct, _pocketfft.idct, _pocketfft.dctn, _pocketfft.idctn,
26
+ _pocketfft.dst, _pocketfft.idst, _pocketfft.dstn, _pocketfft.idstn,
27
+ # must provide required kwargs for fht, ifht
28
+ partial(_fftlog.fht, dln=2, mu=0.5),
29
+ partial(_fftlog.ifht, dln=2, mu=0.5))
30
+
31
+ funcs = (scipy.fft.fft, scipy.fft.fft2, scipy.fft.fftn,
32
+ scipy.fft.ifft, scipy.fft.ifft2, scipy.fft.ifftn,
33
+ scipy.fft.rfft, scipy.fft.rfft2, scipy.fft.rfftn,
34
+ scipy.fft.irfft, scipy.fft.irfft2, scipy.fft.irfftn,
35
+ scipy.fft.hfft, scipy.fft.hfft2, scipy.fft.hfftn,
36
+ scipy.fft.ihfft, scipy.fft.ihfft2, scipy.fft.ihfftn,
37
+ scipy.fft.dct, scipy.fft.idct, scipy.fft.dctn, scipy.fft.idctn,
38
+ scipy.fft.dst, scipy.fft.idst, scipy.fft.dstn, scipy.fft.idstn,
39
+ # must provide required kwargs for fht, ifht
40
+ partial(scipy.fft.fht, dln=2, mu=0.5),
41
+ partial(scipy.fft.ifht, dln=2, mu=0.5))
42
+
43
+ mocks = (mock_backend.fft, mock_backend.fft2, mock_backend.fftn,
44
+ mock_backend.ifft, mock_backend.ifft2, mock_backend.ifftn,
45
+ mock_backend.rfft, mock_backend.rfft2, mock_backend.rfftn,
46
+ mock_backend.irfft, mock_backend.irfft2, mock_backend.irfftn,
47
+ mock_backend.hfft, mock_backend.hfft2, mock_backend.hfftn,
48
+ mock_backend.ihfft, mock_backend.ihfft2, mock_backend.ihfftn,
49
+ mock_backend.dct, mock_backend.idct,
50
+ mock_backend.dctn, mock_backend.idctn,
51
+ mock_backend.dst, mock_backend.idst,
52
+ mock_backend.dstn, mock_backend.idstn,
53
+ mock_backend.fht, mock_backend.ifht)
54
+
55
+
56
+ @pytest.mark.parametrize("func, np_func, mock", zip(funcs, np_funcs, mocks))
57
+ def test_backend_call(func, np_func, mock):
58
+ x = np.arange(20).reshape((10,2))
59
+ answer = np_func(x.astype(np.float64))
60
+ assert_allclose(func(x), answer, atol=1e-10)
61
+
62
+ with set_backend(mock_backend, only=True):
63
+ mock.number_calls = 0
64
+ y = func(x)
65
+ assert_equal(y, mock.return_value)
66
+ assert_equal(mock.number_calls, 1)
67
+
68
+ assert_allclose(func(x), answer, atol=1e-10)
69
+
70
+
71
+ plan_funcs = (scipy.fft.fft, scipy.fft.fft2, scipy.fft.fftn,
72
+ scipy.fft.ifft, scipy.fft.ifft2, scipy.fft.ifftn,
73
+ scipy.fft.rfft, scipy.fft.rfft2, scipy.fft.rfftn,
74
+ scipy.fft.irfft, scipy.fft.irfft2, scipy.fft.irfftn,
75
+ scipy.fft.hfft, scipy.fft.hfft2, scipy.fft.hfftn,
76
+ scipy.fft.ihfft, scipy.fft.ihfft2, scipy.fft.ihfftn)
77
+
78
+ plan_mocks = (mock_backend.fft, mock_backend.fft2, mock_backend.fftn,
79
+ mock_backend.ifft, mock_backend.ifft2, mock_backend.ifftn,
80
+ mock_backend.rfft, mock_backend.rfft2, mock_backend.rfftn,
81
+ mock_backend.irfft, mock_backend.irfft2, mock_backend.irfftn,
82
+ mock_backend.hfft, mock_backend.hfft2, mock_backend.hfftn,
83
+ mock_backend.ihfft, mock_backend.ihfft2, mock_backend.ihfftn)
84
+
85
+
86
+ @pytest.mark.parametrize("func, mock", zip(plan_funcs, plan_mocks))
87
+ def test_backend_plan(func, mock):
88
+ x = np.arange(20).reshape((10, 2))
89
+
90
+ with pytest.raises(NotImplementedError, match='precomputed plan'):
91
+ func(x, plan='foo')
92
+
93
+ with set_backend(mock_backend, only=True):
94
+ mock.number_calls = 0
95
+ y = func(x, plan='foo')
96
+ assert_equal(y, mock.return_value)
97
+ assert_equal(mock.number_calls, 1)
98
+ assert_equal(mock.last_args[1]['plan'], 'foo')
parrot/lib/python3.10/site-packages/scipy/fftpack/convolve.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73f5380c42f41477b0daffdba65f1550dc74ac7c617ae7aaa11b8c8fd30e0a18
3
+ size 272968
parrot/lib/python3.10/site-packages/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes-early-eof-no-data.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:617d60f2a7423801b5eaf5fd1baab84ac7c28f6655935f7d8f30d0f12d335982
3
+ size 72
parrot/lib/python3.10/site-packages/scipy/io/tests/data/test-44100Hz-le-1ch-4bytes.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6a4c2be981dcf7ada79c54dd558a08073e742305fe3bc665e877d8820ec1229
3
+ size 17720
parrot/lib/python3.10/site-packages/scipy/io/tests/data/test-48000Hz-2ch-64bit-float-le-wavex.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12a6019c4813c532af69302d740e47225e3b0821488def77cb885ac11e8fcaed
3
+ size 7792
parrot/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-3ch-5S-24bit-inconsistent.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b76320ae2de1e892d00de92bc0884304e686e3a394cc7ca7533d2929bbcea4d5
3
+ size 90
parrot/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-3ch-5S-36bit.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a22315b1057dfaa181cff670b1f024800f416573635db1f8cf1086bef753d116
3
+ size 120
parrot/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-3ch-5S-45bit.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7bded7a0facf1890c887c9ceea68a2fff562639c95966b783a73d0a03375763b
3
+ size 134
parrot/lib/python3.10/site-packages/scipy/io/tests/data/test-8000Hz-le-4ch-9S-12bit.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d45ebb87cb6bdb1cf40b92b6d53f72f60b29706034aa748ecec976be302b00cb
3
+ size 116
parrot/lib/python3.10/site-packages/scipy/spatial/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.96 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/spatial/__pycache__/_geometric_slerp.cpython-310.pyc ADDED
Binary file (7.2 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/spatial/__pycache__/_plotutils.cpython-310.pyc ADDED
Binary file (6.66 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/spatial/__pycache__/_procrustes.cpython-310.pyc ADDED
Binary file (4.25 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/spatial/__pycache__/ckdtree.cpython-310.pyc ADDED
Binary file (586 Bytes). View file
 
parrot/lib/python3.10/site-packages/scipy/spatial/__pycache__/distance.cpython-310.pyc ADDED
Binary file (80.5 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/spatial/__pycache__/kdtree.cpython-310.pyc ADDED
Binary file (665 Bytes). View file
 
parrot/lib/python3.10/site-packages/scipy/spatial/__pycache__/qhull.cpython-310.pyc ADDED
Binary file (650 Bytes). View file
 
parrot/lib/python3.10/site-packages/scipy/spatial/qhull_src/COPYING.txt ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Qhull, Copyright (c) 1993-2019
2
+
3
+ C.B. Barber
4
+ Arlington, MA
5
+
6
+ and
7
+
8
+ The National Science and Technology Research Center for
9
+ Computation and Visualization of Geometric Structures
10
+ (The Geometry Center)
11
+ University of Minnesota
12
+
13
+ email: qhull@qhull.org
14
+
15
+ This software includes Qhull from C.B. Barber and The Geometry Center.
16
+ Qhull is copyrighted as noted above. Qhull is free software and may
17
+ be obtained via http from www.qhull.org. It may be freely copied, modified,
18
+ and redistributed under the following conditions:
19
+
20
+ 1. All copyright notices must remain intact in all files.
21
+
22
+ 2. A copy of this text file must be distributed along with any copies
23
+ of Qhull that you redistribute; this includes copies that you have
24
+ modified, or copies of programs or other software products that
25
+ include Qhull.
26
+
27
+ 3. If you modify Qhull, you must include a notice giving the
28
+ name of the person performing the modification, the date of
29
+ modification, and the reason for such modification.
30
+
31
+ 4. When distributing modified versions of Qhull, or other software
32
+ products that include Qhull, you must provide notice that the original
33
+ source code may be obtained as noted above.
34
+
35
+ 5. There is no warranty or other guarantee of fitness for Qhull, it is
36
+ provided solely "as is". Bug reports or fixes may be sent to
37
+ qhull_bug@qhull.org; the authors may or may not act on them as
38
+ they desire.
parrot/lib/python3.10/site-packages/scipy/spatial/tests/data/pdist-cosine-ml-iris.txt ADDED
The diff for this file is too large to render. See raw diff
 
parrot/lib/python3.10/site-packages/scipy/spatial/tests/data/pdist-minkowski-3.2-ml-iris.txt ADDED
The diff for this file is too large to render. See raw diff
 
parrot/lib/python3.10/site-packages/scipy/spatial/transform/__init__.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Spatial Transformations (:mod:`scipy.spatial.transform`)
3
+ ========================================================
4
+
5
+ .. currentmodule:: scipy.spatial.transform
6
+
7
+ This package implements various spatial transformations. For now,
8
+ only rotations are supported.
9
+
10
+ Rotations in 3 dimensions
11
+ -------------------------
12
+ .. autosummary::
13
+ :toctree: generated/
14
+
15
+ Rotation
16
+ Slerp
17
+ RotationSpline
18
+ """
19
+ from ._rotation import Rotation, Slerp
20
+ from ._rotation_spline import RotationSpline
21
+
22
+ # Deprecated namespaces, to be removed in v2.0.0
23
+ from . import rotation
24
+
25
+ __all__ = ['Rotation', 'Slerp', 'RotationSpline']
26
+
27
+ from scipy._lib._testutils import PytestTester
28
+ test = PytestTester(__name__)
29
+ del PytestTester
parrot/lib/python3.10/site-packages/scipy/spatial/transform/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (860 Bytes). View file