ADAPT-Chase commited on
Commit
9c6d378
·
verified ·
1 Parent(s): 9000bd0

Add files using upload-large-folder tool

Browse files
Files changed (20) hide show
  1. tool_server/.venv/lib/python3.12/site-packages/einops/__pycache__/__init__.cpython-312.pyc +0 -0
  2. tool_server/.venv/lib/python3.12/site-packages/einops/__pycache__/_backends.cpython-312.pyc +0 -0
  3. tool_server/.venv/lib/python3.12/site-packages/einops/__pycache__/_torch_specific.cpython-312.pyc +0 -0
  4. tool_server/.venv/lib/python3.12/site-packages/einops/__pycache__/array_api.cpython-312.pyc +0 -0
  5. tool_server/.venv/lib/python3.12/site-packages/einops/__pycache__/einops.cpython-312.pyc +0 -0
  6. tool_server/.venv/lib/python3.12/site-packages/einops/__pycache__/packing.cpython-312.pyc +0 -0
  7. tool_server/.venv/lib/python3.12/site-packages/einops/__pycache__/parsing.cpython-312.pyc +0 -0
  8. tool_server/.venv/lib/python3.12/site-packages/einops/layers/oneflow.py +54 -0
  9. tool_server/.venv/lib/python3.12/site-packages/einops/layers/paddle.py +58 -0
  10. tool_server/.venv/lib/python3.12/site-packages/einops/layers/tensorflow.py +103 -0
  11. tool_server/.venv/lib/python3.12/site-packages/einops/layers/torch.py +67 -0
  12. tool_server/.venv/lib/python3.12/site-packages/einops/tests/__init__.py +109 -0
  13. tool_server/.venv/lib/python3.12/site-packages/einops/tests/run_tests.py +85 -0
  14. tool_server/.venv/lib/python3.12/site-packages/einops/tests/test_einsum.py +352 -0
  15. tool_server/.venv/lib/python3.12/site-packages/einops/tests/test_examples.py +297 -0
  16. tool_server/.venv/lib/python3.12/site-packages/einops/tests/test_layers.py +469 -0
  17. tool_server/.venv/lib/python3.12/site-packages/einops/tests/test_ops.py +651 -0
  18. tool_server/.venv/lib/python3.12/site-packages/einops/tests/test_other.py +291 -0
  19. tool_server/.venv/lib/python3.12/site-packages/einops/tests/test_packing.py +309 -0
  20. tool_server/.venv/lib/python3.12/site-packages/einops/tests/test_parsing.py +126 -0
tool_server/.venv/lib/python3.12/site-packages/einops/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (754 Bytes). View file
 
tool_server/.venv/lib/python3.12/site-packages/einops/__pycache__/_backends.cpython-312.pyc ADDED
Binary file (41.6 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/einops/__pycache__/_torch_specific.cpython-312.pyc ADDED
Binary file (6.15 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/einops/__pycache__/array_api.cpython-312.pyc ADDED
Binary file (6.45 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/einops/__pycache__/einops.cpython-312.pyc ADDED
Binary file (39.5 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/einops/__pycache__/packing.cpython-312.pyc ADDED
Binary file (8.63 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/einops/__pycache__/parsing.cpython-312.pyc ADDED
Binary file (7.57 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/einops/layers/oneflow.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Dict, cast
2
+
3
+ import oneflow as flow
4
+
5
+ from . import RearrangeMixin, ReduceMixin
6
+ from ._einmix import _EinmixMixin
7
+
8
+ __author__ = "Tianhe Ren & Depeng Liang"
9
+
10
+
11
+ class Rearrange(RearrangeMixin, flow.nn.Module):
12
+ def forward(self, input):
13
+ return self._apply_recipe(input)
14
+
15
+
16
+ class Reduce(ReduceMixin, flow.nn.Module):
17
+ def forward(self, input):
18
+ return self._apply_recipe(input)
19
+
20
+
21
+ class EinMix(_EinmixMixin, flow.nn.Module):
22
+ def _create_parameters(self, weight_shape, weight_bound, bias_shape, bias_bound):
23
+ self.weight = flow.nn.Parameter(
24
+ flow.zeros(weight_shape).uniform_(-weight_bound, weight_bound), requires_grad=True
25
+ )
26
+ if bias_shape is not None:
27
+ self.bias = flow.nn.Parameter(flow.zeros(bias_shape).uniform_(-bias_bound, bias_bound), requires_grad=True)
28
+ else:
29
+ self.bias = None
30
+
31
+ def _create_rearrange_layers(
32
+ self,
33
+ pre_reshape_pattern: Optional[str],
34
+ pre_reshape_lengths: Optional[Dict],
35
+ post_reshape_pattern: Optional[str],
36
+ post_reshape_lengths: Optional[Dict],
37
+ ):
38
+ self.pre_rearrange = None
39
+ if pre_reshape_pattern is not None:
40
+ self.pre_rearrange = Rearrange(pre_reshape_pattern, **cast(dict, pre_reshape_lengths))
41
+
42
+ self.post_rearrange = None
43
+ if post_reshape_pattern is not None:
44
+ self.post_rearrange = Rearrange(post_reshape_pattern, **cast(dict, post_reshape_lengths))
45
+
46
+ def forward(self, input):
47
+ if self.pre_rearrange is not None:
48
+ input = self.pre_rearrange(input)
49
+ result = flow.einsum(self.einsum_pattern, input, self.weight)
50
+ if self.bias is not None:
51
+ result += self.bias
52
+ if self.post_rearrange is not None:
53
+ result = self.post_rearrange(result)
54
+ return result
tool_server/.venv/lib/python3.12/site-packages/einops/layers/paddle.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Dict, cast
2
+
3
+ import paddle
4
+
5
+ from . import RearrangeMixin, ReduceMixin
6
+ from ._einmix import _EinmixMixin
7
+
8
+ __author__ = "PaddlePaddle"
9
+
10
+
11
+ class Rearrange(RearrangeMixin, paddle.nn.Layer):
12
+ def forward(self, input):
13
+ return self._apply_recipe(input)
14
+
15
+
16
+ class Reduce(ReduceMixin, paddle.nn.Layer):
17
+ def forward(self, input):
18
+ return self._apply_recipe(input)
19
+
20
+
21
+ class EinMix(_EinmixMixin, paddle.nn.Layer):
22
+ def _create_parameters(self, weight_shape, weight_bound, bias_shape, bias_bound):
23
+ self.weight = self.create_parameter(
24
+ weight_shape, default_initializer=paddle.nn.initializer.Uniform(-weight_bound, weight_bound)
25
+ )
26
+
27
+ if bias_shape is not None:
28
+ self.bias = self.create_parameter(
29
+ bias_shape, default_initializer=paddle.nn.initializer.Uniform(-bias_bound, bias_bound)
30
+ )
31
+ else:
32
+ self.bias = None
33
+
34
+ def _create_rearrange_layers(
35
+ self,
36
+ pre_reshape_pattern: Optional[str],
37
+ pre_reshape_lengths: Optional[Dict],
38
+ post_reshape_pattern: Optional[str],
39
+ post_reshape_lengths: Optional[Dict],
40
+ ):
41
+ self.pre_rearrange = None
42
+ if pre_reshape_pattern is not None:
43
+ self.pre_rearrange = Rearrange(pre_reshape_pattern, **cast(dict, pre_reshape_lengths))
44
+
45
+ self.post_rearrange = None
46
+ if post_reshape_pattern is not None:
47
+ self.post_rearrange = Rearrange(post_reshape_pattern, **cast(dict, post_reshape_lengths))
48
+
49
+ def forward(self, input):
50
+ if self.pre_rearrange is not None:
51
+ input = self.pre_rearrange(input)
52
+
53
+ result = paddle.einsum(self.einsum_pattern, input, self.weight)
54
+ if self.bias is not None:
55
+ result += self.bias
56
+ if self.post_rearrange is not None:
57
+ result = self.post_rearrange(result)
58
+ return result
tool_server/.venv/lib/python3.12/site-packages/einops/layers/tensorflow.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Comment about tensorflow layers:
3
+ unfortunately instructions on creation of TF layers change constantly,
4
+ and changed way too many times at this point to remember what-compatible-where.
5
+
6
+ Layers in einops==0.7.0 (and several prior versions)
7
+ are compatible with TF 2.13
8
+
9
+ Layers in einops==0.8.0 were re-implemented
10
+ according to official instructions for TF 2.16
11
+
12
+ """
13
+
14
+ from typing import Optional, Dict, cast
15
+
16
+ import tensorflow as tf
17
+ from tensorflow.keras.layers import Layer
18
+
19
+
20
+ from . import RearrangeMixin, ReduceMixin
21
+ from ._einmix import _EinmixMixin
22
+
23
+
24
+ __author__ = "Alex Rogozhnikov"
25
+
26
+
27
+ class Rearrange(RearrangeMixin, Layer):
28
+ def build(self, input_shape):
29
+ pass # layer does not have any parameters to be initialized
30
+
31
+ def call(self, inputs):
32
+ return self._apply_recipe(inputs)
33
+
34
+ def get_config(self):
35
+ return {"pattern": self.pattern, **self.axes_lengths}
36
+
37
+
38
+ class Reduce(ReduceMixin, Layer):
39
+ def build(self, input_shape):
40
+ pass # layer does not have any parameters to be initialized
41
+
42
+ def call(self, inputs):
43
+ return self._apply_recipe(inputs)
44
+
45
+ def get_config(self):
46
+ return {"pattern": self.pattern, "reduction": self.reduction, **self.axes_lengths}
47
+
48
+
49
+ class EinMix(_EinmixMixin, Layer):
50
+ def _create_parameters(self, weight_shape, weight_bound, bias_shape, bias_bound):
51
+ # this method is called in __init__,
52
+ # but we postpone actual creation to build(), as TF instruction suggests
53
+ self._params = [weight_shape, weight_bound, bias_shape, bias_bound]
54
+
55
+ def _create_rearrange_layers(
56
+ self,
57
+ pre_reshape_pattern: Optional[str],
58
+ pre_reshape_lengths: Optional[Dict],
59
+ post_reshape_pattern: Optional[str],
60
+ post_reshape_lengths: Optional[Dict],
61
+ ):
62
+ self.pre_rearrange = None
63
+ if pre_reshape_pattern is not None:
64
+ self.pre_rearrange = Rearrange(pre_reshape_pattern, **cast(dict, pre_reshape_lengths))
65
+
66
+ self.post_rearrange = None
67
+ if post_reshape_pattern is not None:
68
+ self.post_rearrange = Rearrange(post_reshape_pattern, **cast(dict, post_reshape_lengths))
69
+
70
+ def build(self, input_shape):
71
+ [weight_shape, weight_bound, bias_shape, bias_bound] = self._params
72
+ self.weight = self.add_weight(
73
+ shape=weight_shape,
74
+ initializer=tf.random_uniform_initializer(-weight_bound, weight_bound),
75
+ trainable=True,
76
+ )
77
+
78
+ if bias_shape is not None:
79
+ self.bias = self.add_weight(
80
+ shape=bias_shape,
81
+ initializer=tf.random_uniform_initializer(-bias_bound, bias_bound),
82
+ trainable=True,
83
+ )
84
+ else:
85
+ self.bias = None
86
+
87
+ def call(self, inputs):
88
+ if self.pre_rearrange is not None:
89
+ inputs = self.pre_rearrange(inputs)
90
+ result = tf.einsum(self.einsum_pattern, inputs, self.weight)
91
+ if self.bias is not None:
92
+ result = result + self.bias
93
+ if self.post_rearrange is not None:
94
+ result = self.post_rearrange(result)
95
+ return result
96
+
97
+ def get_config(self):
98
+ return {
99
+ "pattern": self.pattern,
100
+ "weight_shape": self.weight_shape,
101
+ "bias_shape": self.bias_shape,
102
+ **self.axes_lengths,
103
+ }
tool_server/.venv/lib/python3.12/site-packages/einops/layers/torch.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Dict, cast
2
+
3
+ import torch
4
+
5
+ from . import RearrangeMixin, ReduceMixin
6
+ from ._einmix import _EinmixMixin
7
+ from .._torch_specific import apply_for_scriptable_torch
8
+
9
+ __author__ = "Alex Rogozhnikov"
10
+
11
+
12
+ class Rearrange(RearrangeMixin, torch.nn.Module):
13
+ def forward(self, input):
14
+ recipe = self._multirecipe[input.ndim]
15
+ return apply_for_scriptable_torch(recipe, input, reduction_type="rearrange", axes_dims=self._axes_lengths)
16
+
17
+ def _apply_recipe(self, x):
18
+ # overriding parent method to prevent it's scripting
19
+ pass
20
+
21
+
22
+ class Reduce(ReduceMixin, torch.nn.Module):
23
+ def forward(self, input):
24
+ recipe = self._multirecipe[input.ndim]
25
+ return apply_for_scriptable_torch(recipe, input, reduction_type=self.reduction, axes_dims=self._axes_lengths)
26
+
27
+ def _apply_recipe(self, x):
28
+ # overriding parent method to prevent it's scripting
29
+ pass
30
+
31
+
32
+ class EinMix(_EinmixMixin, torch.nn.Module):
33
+ def _create_parameters(self, weight_shape, weight_bound, bias_shape, bias_bound):
34
+ self.weight = torch.nn.Parameter(
35
+ torch.zeros(weight_shape).uniform_(-weight_bound, weight_bound), requires_grad=True
36
+ )
37
+ if bias_shape is not None:
38
+ self.bias = torch.nn.Parameter(
39
+ torch.zeros(bias_shape).uniform_(-bias_bound, bias_bound), requires_grad=True
40
+ )
41
+ else:
42
+ self.bias = None
43
+
44
+ def _create_rearrange_layers(
45
+ self,
46
+ pre_reshape_pattern: Optional[str],
47
+ pre_reshape_lengths: Optional[Dict],
48
+ post_reshape_pattern: Optional[str],
49
+ post_reshape_lengths: Optional[Dict],
50
+ ):
51
+ self.pre_rearrange = None
52
+ if pre_reshape_pattern is not None:
53
+ self.pre_rearrange = Rearrange(pre_reshape_pattern, **cast(dict, pre_reshape_lengths))
54
+
55
+ self.post_rearrange = None
56
+ if post_reshape_pattern is not None:
57
+ self.post_rearrange = Rearrange(post_reshape_pattern, **cast(dict, post_reshape_lengths))
58
+
59
+ def forward(self, input):
60
+ if self.pre_rearrange is not None:
61
+ input = self.pre_rearrange(input)
62
+ result = torch.einsum(self.einsum_pattern, input, self.weight)
63
+ if self.bias is not None:
64
+ result += self.bias
65
+ if self.post_rearrange is not None:
66
+ result = self.post_rearrange(result)
67
+ return result
tool_server/.venv/lib/python3.12/site-packages/einops/tests/__init__.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Common utils for testing.
3
+ These functions allow testing only some frameworks, not all.
4
+ """
5
+
6
+ import logging
7
+ import os
8
+ from functools import lru_cache
9
+ from typing import List, Tuple
10
+
11
+ from einops import _backends
12
+ import warnings
13
+
14
+ __author__ = "Alex Rogozhnikov"
15
+
16
+
17
+ # minimize noise in tests logging
18
+ logging.getLogger("tensorflow").disabled = True
19
+ logging.getLogger("matplotlib").disabled = True
20
+
21
+ FLOAT_REDUCTIONS = ("min", "max", "sum", "mean", "prod") # not includes any/all
22
+
23
+
24
+ def find_names_of_all_frameworks() -> List[str]:
25
+ backend_subclasses = []
26
+ backends = _backends.AbstractBackend.__subclasses__()
27
+ while backends:
28
+ backend = backends.pop()
29
+ backends += backend.__subclasses__()
30
+ backend_subclasses.append(backend)
31
+ return [b.framework_name for b in backend_subclasses]
32
+
33
+
34
+ ENVVAR_NAME = "EINOPS_TEST_BACKENDS"
35
+
36
+
37
+ def unparse_backends(backend_names: List[str]) -> Tuple[str, str]:
38
+ _known_backends = find_names_of_all_frameworks()
39
+ for backend_name in backend_names:
40
+ if backend_name not in _known_backends:
41
+ raise RuntimeError(f"Unknown framework: {backend_name}")
42
+ return ENVVAR_NAME, ",".join(backend_names)
43
+
44
+
45
+ @lru_cache(maxsize=1)
46
+ def parse_backends_to_test() -> List[str]:
47
+ if ENVVAR_NAME not in os.environ:
48
+ raise RuntimeError(f"Testing frameworks were not specified, env var {ENVVAR_NAME} not set")
49
+ parsed_backends = os.environ[ENVVAR_NAME].split(",")
50
+ _known_backends = find_names_of_all_frameworks()
51
+ for backend_name in parsed_backends:
52
+ if backend_name not in _known_backends:
53
+ raise RuntimeError(f"Unknown framework: {backend_name}")
54
+
55
+ return parsed_backends
56
+
57
+
58
+ def is_backend_tested(backend: str) -> bool:
59
+ """Used to skip test if corresponding backend is not tested"""
60
+ if backend not in find_names_of_all_frameworks():
61
+ raise RuntimeError(f"Unknown framework {backend}")
62
+ return backend in parse_backends_to_test()
63
+
64
+
65
+ def collect_test_backends(symbolic=False, layers=False) -> List[_backends.AbstractBackend]:
66
+ """
67
+ :param symbolic: symbolic or imperative frameworks?
68
+ :param layers: layers or operations?
69
+ :return: list of backends satisfying set conditions
70
+ """
71
+ if not symbolic:
72
+ if not layers:
73
+ backend_types = [
74
+ _backends.NumpyBackend,
75
+ _backends.JaxBackend,
76
+ _backends.TorchBackend,
77
+ _backends.TensorflowBackend,
78
+ _backends.OneFlowBackend,
79
+ _backends.PaddleBackend,
80
+ _backends.CupyBackend,
81
+ ]
82
+ else:
83
+ backend_types = [
84
+ _backends.TorchBackend,
85
+ _backends.OneFlowBackend,
86
+ _backends.PaddleBackend,
87
+ ]
88
+ else:
89
+ if not layers:
90
+ backend_types = [
91
+ _backends.PyTensorBackend,
92
+ ]
93
+ else:
94
+ backend_types = [
95
+ _backends.TFKerasBackend,
96
+ ]
97
+
98
+ backend_names_to_test = parse_backends_to_test()
99
+ result = []
100
+ for backend_type in backend_types:
101
+ if backend_type.framework_name not in backend_names_to_test:
102
+ continue
103
+ try:
104
+ result.append(backend_type())
105
+ except ImportError:
106
+ # problem with backend installation fails a specific test function,
107
+ # but will be skipped in all other test cases
108
+ warnings.warn("backend could not be initialized for tests: {}".format(backend_type))
109
+ return result
tool_server/.venv/lib/python3.12/site-packages/einops/tests/run_tests.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Runs tests that are appropriate for framework.
3
+ """
4
+
5
+ import os
6
+ import sys
7
+ from subprocess import Popen
8
+ from pathlib import Path
9
+
10
+ __author__ = "Alex Rogozhnikov"
11
+
12
+
13
+ def run(cmd, **env):
14
+ # keeps printing output when testing
15
+ cmd = cmd.split(" ") if isinstance(cmd, str) else cmd
16
+ print("running:", cmd)
17
+ p = Popen(cmd, cwd=str(Path(__file__).parent), env={**os.environ, **env})
18
+ p.communicate()
19
+ return p.returncode
20
+
21
+
22
+ def main():
23
+ _executable, *args = sys.argv
24
+ frameworks = [x for x in args if x != "--pip-install"]
25
+ pip_install_is_set = "--pip-install" in args
26
+ framework_name2installation = {
27
+ "numpy": ["numpy"],
28
+ "torch": ["torch --index-url https://download.pytorch.org/whl/cpu"],
29
+ "jax": ["jax[cpu]", "flax"],
30
+ "tensorflow": ["tensorflow"],
31
+ "cupy": ["cupy"],
32
+ # switch to stable paddlepaddle, because of https://github.com/PaddlePaddle/Paddle/issues/63927
33
+ # "paddle": ["paddlepaddle==0.0.0 -f https://www.paddlepaddle.org.cn/whl/linux/cpu-mkl/develop.html"],
34
+ "paddle": ["paddlepaddle"],
35
+ "oneflow": ["oneflow==0.9.0"],
36
+ "pytensor": ["pytensor"],
37
+ }
38
+
39
+ usage = f"""
40
+ Usage: python -m einops.tests.run_tests <frameworks> [--pip-install]
41
+ Example: python -m einops.tests.run_tests numpy pytorch --pip-install
42
+
43
+ Available frameworks: {list(framework_name2installation)}
44
+ When --pip-install is set, auto-installs requirements with pip.
45
+ (make sure which pip points to right pip)
46
+ """
47
+ if len(frameworks) == 0:
48
+ print(usage)
49
+ return
50
+ else:
51
+ synonyms = {
52
+ "tf": "tensorflow",
53
+ "pytorch": "torch",
54
+ "paddlepaddle": "paddle",
55
+ }
56
+ frameworks = [synonyms.get(f, f) for f in frameworks]
57
+ wrong_frameworks = [f for f in frameworks if f not in framework_name2installation]
58
+ if wrong_frameworks:
59
+ print(usage)
60
+ raise RuntimeError(f"Unrecognized frameworks: {wrong_frameworks}")
61
+
62
+ if pip_install_is_set:
63
+ print("Install testing infra")
64
+ other_dependencies = ["pytest"]
65
+ assert 0 == run("pip install {} --progress-bar off -q".format(" ".join(other_dependencies)))
66
+
67
+ for framework in frameworks:
68
+ print(f"Installing {framework}")
69
+ pip_instructions = framework_name2installation[framework]
70
+ assert 0 == run("pip install {} --progress-bar off -q".format(" ".join(pip_instructions)))
71
+
72
+ # we need to inform testing script which frameworks to use
73
+ # this is done by setting an envvar EINOPS_TEST_BACKENDS
74
+ from einops.tests import unparse_backends
75
+
76
+ envvar_name, envvar_value = unparse_backends(backend_names=frameworks)
77
+ return_code = run(
78
+ "python -m pytest .",
79
+ **{envvar_name: envvar_value},
80
+ )
81
+ assert return_code == 0
82
+
83
+
84
+ if __name__ == "__main__":
85
+ main()
tool_server/.venv/lib/python3.12/site-packages/einops/tests/test_einsum.py ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Callable
2
+ from einops.tests import collect_test_backends
3
+ from einops.einops import _compactify_pattern_for_einsum, einsum, EinopsError
4
+ import numpy as np
5
+ import pytest
6
+ import string
7
+
8
+
9
+ class Arguments:
10
+ def __init__(self, *args: Any, **kargs: Any):
11
+ self.args = args
12
+ self.kwargs = kargs
13
+
14
+ def __call__(self, function: Callable):
15
+ return function(*self.args, **self.kwargs)
16
+
17
+
18
+ test_layer_cases = [
19
+ (
20
+ Arguments("b c_in h w -> w c_out h b", "c_in c_out", bias_shape=None, c_out=13, c_in=12),
21
+ (2, 12, 3, 4),
22
+ (4, 13, 3, 2),
23
+ ),
24
+ (
25
+ Arguments("b c_in h w -> w c_out h b", "c_in c_out", bias_shape="c_out", c_out=13, c_in=12),
26
+ (2, 12, 3, 4),
27
+ (4, 13, 3, 2),
28
+ ),
29
+ (
30
+ Arguments("b c_in h w -> w c_in h b", "", bias_shape=None, c_in=12),
31
+ (2, 12, 3, 4),
32
+ (4, 12, 3, 2),
33
+ ),
34
+ (
35
+ Arguments("b c_in h w -> b c_out", "c_in h w c_out", bias_shape=None, c_in=12, h=3, w=4, c_out=5),
36
+ (2, 12, 3, 4),
37
+ (2, 5),
38
+ ),
39
+ (
40
+ Arguments("b t head c_in -> b t head c_out", "head c_in c_out", bias_shape=None, head=4, c_in=5, c_out=6),
41
+ (2, 3, 4, 5),
42
+ (2, 3, 4, 6),
43
+ ),
44
+ ]
45
+
46
+
47
+ # Each of the form:
48
+ # (Arguments, true_einsum_pattern, in_shapes, out_shape)
49
+ test_functional_cases = [
50
+ (
51
+ # Basic:
52
+ "b c h w, b w -> b h",
53
+ "abcd,ad->ac",
54
+ ((2, 3, 4, 5), (2, 5)),
55
+ (2, 4),
56
+ ),
57
+ (
58
+ # Three tensors:
59
+ "b c h w, b w, b c -> b h",
60
+ "abcd,ad,ab->ac",
61
+ ((2, 3, 40, 5), (2, 5), (2, 3)),
62
+ (2, 40),
63
+ ),
64
+ (
65
+ # Ellipsis, and full names:
66
+ "... one two three, three four five -> ... two five",
67
+ "...abc,cde->...be",
68
+ ((32, 5, 2, 3, 4), (4, 5, 6)),
69
+ (32, 5, 3, 6),
70
+ ),
71
+ (
72
+ # Ellipsis at the end:
73
+ "one two three ..., three four five -> two five ...",
74
+ "abc...,cde->be...",
75
+ ((2, 3, 4, 32, 5), (4, 5, 6)),
76
+ (3, 6, 32, 5),
77
+ ),
78
+ (
79
+ # Ellipsis on multiple tensors:
80
+ "... one two three, ... three four five -> ... two five",
81
+ "...abc,...cde->...be",
82
+ ((32, 5, 2, 3, 4), (32, 5, 4, 5, 6)),
83
+ (32, 5, 3, 6),
84
+ ),
85
+ (
86
+ # One tensor, and underscores:
87
+ "first_tensor second_tensor -> first_tensor",
88
+ "ab->a",
89
+ ((5, 4),),
90
+ (5,),
91
+ ),
92
+ (
93
+ # Trace (repeated index)
94
+ "i i -> ",
95
+ "aa->",
96
+ ((5, 5),),
97
+ (),
98
+ ),
99
+ (
100
+ # Too many spaces in string:
101
+ " one two , three four->two four ",
102
+ "ab,cd->bd",
103
+ ((2, 3), (4, 5)),
104
+ (3, 5),
105
+ ),
106
+ # The following tests were inspired by numpy's einsum tests
107
+ # https://github.com/numpy/numpy/blob/v1.23.0/numpy/core/tests/test_einsum.py
108
+ (
109
+ # Trace with other indices
110
+ "i middle i -> middle",
111
+ "aba->b",
112
+ ((5, 10, 5),),
113
+ (10,),
114
+ ),
115
+ (
116
+ # Ellipsis in the middle:
117
+ "i ... i -> ...",
118
+ "a...a->...",
119
+ ((5, 3, 2, 1, 4, 5),),
120
+ (3, 2, 1, 4),
121
+ ),
122
+ (
123
+ # Product of first and last axes:
124
+ "i ... i -> i ...",
125
+ "a...a->a...",
126
+ ((5, 3, 2, 1, 4, 5),),
127
+ (5, 3, 2, 1, 4),
128
+ ),
129
+ (
130
+ # Triple diagonal
131
+ "one one one -> one",
132
+ "aaa->a",
133
+ ((5, 5, 5),),
134
+ (5,),
135
+ ),
136
+ (
137
+ # Axis swap:
138
+ "i j k -> j i k",
139
+ "abc->bac",
140
+ ((1, 2, 3),),
141
+ (2, 1, 3),
142
+ ),
143
+ (
144
+ # Identity:
145
+ "... -> ...",
146
+ "...->...",
147
+ ((5, 4, 3, 2, 1),),
148
+ (5, 4, 3, 2, 1),
149
+ ),
150
+ (
151
+ # Elementwise product of three tensors
152
+ "..., ..., ... -> ...",
153
+ "...,...,...->...",
154
+ ((3, 2), (3, 2), (3, 2)),
155
+ (3, 2),
156
+ ),
157
+ (
158
+ # Basic summation:
159
+ "index ->",
160
+ "a->",
161
+ ((10,)),
162
+ (()),
163
+ ),
164
+ ]
165
+
166
+
167
+ def test_layer():
168
+ for backend in collect_test_backends(layers=True, symbolic=False):
169
+ if backend.framework_name in ["tensorflow", "torch", "oneflow", "paddle"]:
170
+ layer_type = backend.layers().EinMix
171
+ for args, in_shape, out_shape in test_layer_cases:
172
+ layer = args(layer_type)
173
+ print("Running", layer.einsum_pattern, "for", backend.framework_name)
174
+ input = np.random.uniform(size=in_shape).astype("float32")
175
+ input_framework = backend.from_numpy(input)
176
+ output_framework = layer(input_framework)
177
+ output = backend.to_numpy(output_framework)
178
+ assert output.shape == out_shape
179
+
180
+
181
+ valid_backends_functional = [
182
+ "tensorflow",
183
+ "torch",
184
+ "jax",
185
+ "numpy",
186
+ "oneflow",
187
+ "cupy",
188
+ "tensorflow.keras",
189
+ "paddle",
190
+ "pytensor",
191
+ ]
192
+
193
+
194
+ def test_functional():
195
+ # Functional tests:
196
+ backends = filter(lambda x: x.framework_name in valid_backends_functional, collect_test_backends())
197
+ for backend in backends:
198
+ for einops_pattern, true_pattern, in_shapes, out_shape in test_functional_cases:
199
+ print(f"Running '{einops_pattern}' for {backend.framework_name}")
200
+
201
+ # Create pattern:
202
+ predicted_pattern = _compactify_pattern_for_einsum(einops_pattern)
203
+ assert predicted_pattern == true_pattern
204
+
205
+ # Generate example data:
206
+ rstate = np.random.RandomState(0)
207
+ in_arrays = [rstate.uniform(size=shape).astype("float32") for shape in in_shapes]
208
+ in_arrays_framework = [backend.from_numpy(array) for array in in_arrays]
209
+
210
+ # Loop over whether we call it manually with the backend,
211
+ # or whether we use `einops.einsum`.
212
+ for do_manual_call in [True, False]:
213
+ # Actually run einsum:
214
+ if do_manual_call:
215
+ out_array = backend.einsum(predicted_pattern, *in_arrays_framework)
216
+ else:
217
+ out_array = einsum(*in_arrays_framework, einops_pattern)
218
+
219
+ # Check shape:
220
+ if tuple(out_array.shape) != out_shape:
221
+ raise ValueError(f"Expected output shape {out_shape} but got {out_array.shape}")
222
+
223
+ # Check values:
224
+ true_out_array = np.einsum(true_pattern, *in_arrays)
225
+ predicted_out_array = backend.to_numpy(out_array)
226
+ np.testing.assert_array_almost_equal(predicted_out_array, true_out_array, decimal=5)
227
+
228
+
229
+ def test_functional_symbolic():
230
+ backends = filter(
231
+ lambda x: x.framework_name in valid_backends_functional, collect_test_backends(symbolic=True, layers=False)
232
+ )
233
+ for backend in backends:
234
+ for einops_pattern, true_pattern, in_shapes, out_shape in test_functional_cases:
235
+ print(f"Running '{einops_pattern}' for symbolic {backend.framework_name}")
236
+ # Create pattern:
237
+ predicted_pattern = _compactify_pattern_for_einsum(einops_pattern)
238
+ assert predicted_pattern == true_pattern
239
+
240
+ rstate = np.random.RandomState(0)
241
+ in_syms = [backend.create_symbol(in_shape) for in_shape in in_shapes]
242
+ in_data = [rstate.uniform(size=in_shape).astype("float32") for in_shape in in_shapes]
243
+
244
+ expected_out_data = np.einsum(true_pattern, *in_data)
245
+
246
+ for do_manual_call in [True, False]:
247
+ if do_manual_call:
248
+ predicted_out_symbol = backend.einsum(predicted_pattern, *in_syms)
249
+ else:
250
+ predicted_out_symbol = einsum(*in_syms, einops_pattern)
251
+
252
+ predicted_out_data = backend.eval_symbol(
253
+ predicted_out_symbol,
254
+ list(zip(in_syms, in_data)),
255
+ )
256
+ if predicted_out_data.shape != out_shape:
257
+ raise ValueError(f"Expected output shape {out_shape} but got {predicted_out_data.shape}")
258
+ np.testing.assert_array_almost_equal(predicted_out_data, expected_out_data, decimal=5)
259
+
260
+
261
+ def test_functional_errors():
262
+ # Specific backend does not matter, as errors are raised
263
+ # during the pattern creation.
264
+
265
+ rstate = np.random.RandomState(0)
266
+
267
+ def create_tensor(*shape):
268
+ return rstate.uniform(size=shape).astype("float32")
269
+
270
+ # raise NotImplementedError("Singleton () axes are not yet supported in einsum.")
271
+ with pytest.raises(NotImplementedError, match="^Singleton"):
272
+ einsum(
273
+ create_tensor(5, 1),
274
+ "i () -> i",
275
+ )
276
+
277
+ # raise NotImplementedError("Shape rearrangement is not yet supported in einsum.")
278
+ with pytest.raises(NotImplementedError, match="^Shape rearrangement"):
279
+ einsum(
280
+ create_tensor(5, 1),
281
+ "a b -> (a b)",
282
+ )
283
+
284
+ with pytest.raises(NotImplementedError, match="^Shape rearrangement"):
285
+ einsum(
286
+ create_tensor(10, 1),
287
+ "(a b) -> a b",
288
+ )
289
+
290
+ # raise RuntimeError("Encountered empty axis name in einsum.")
291
+ # raise RuntimeError("Axis name in einsum must be a string.")
292
+ # ^ Not tested, these are just a failsafe in case an unexpected error occurs.
293
+
294
+ # raise NotImplementedError("Anonymous axes are not yet supported in einsum.")
295
+ with pytest.raises(NotImplementedError, match="^Anonymous axes"):
296
+ einsum(
297
+ create_tensor(5, 1),
298
+ "i 2 -> i",
299
+ )
300
+
301
+ # ParsedExpression error:
302
+ with pytest.raises(EinopsError, match="^Invalid axis identifier"):
303
+ einsum(
304
+ create_tensor(5, 1),
305
+ "i 2j -> i",
306
+ )
307
+
308
+ # raise ValueError("Einsum pattern must contain '->'.")
309
+ with pytest.raises(ValueError, match="^Einsum pattern"):
310
+ einsum(
311
+ create_tensor(5, 3, 2),
312
+ "i j k",
313
+ )
314
+
315
+ # raise RuntimeError("Too many axes in einsum.")
316
+ with pytest.raises(RuntimeError, match="^Too many axes"):
317
+ einsum(
318
+ create_tensor(1),
319
+ " ".join(string.ascii_letters) + " extra ->",
320
+ )
321
+
322
+ # raise RuntimeError("Unknown axis on right side of einsum.")
323
+ with pytest.raises(RuntimeError, match="^Unknown axis"):
324
+ einsum(
325
+ create_tensor(5, 1),
326
+ "i j -> k",
327
+ )
328
+
329
+ # raise ValueError(
330
+ # "The last argument passed to `einops.einsum` must be a string,"
331
+ # " representing the einsum pattern."
332
+ # )
333
+ with pytest.raises(ValueError, match="^The last argument"):
334
+ einsum(
335
+ "i j k -> i",
336
+ create_tensor(5, 4, 3),
337
+ )
338
+
339
+ # raise ValueError(
340
+ # "`einops.einsum` takes at minimum two arguments: the tensors,"
341
+ # " followed by the pattern."
342
+ # )
343
+ with pytest.raises(ValueError, match="^`einops.einsum` takes"):
344
+ einsum(
345
+ "i j k -> i",
346
+ )
347
+ with pytest.raises(ValueError, match="^`einops.einsum` takes"):
348
+ einsum(
349
+ create_tensor(5, 1),
350
+ )
351
+
352
+ # TODO: Include check for giving normal einsum pattern rather than einops.
tool_server/.venv/lib/python3.12/site-packages/einops/tests/test_examples.py ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy
2
+ import pytest
3
+
4
+ from einops import rearrange, parse_shape, reduce
5
+ from einops.tests import is_backend_tested
6
+ from einops.tests.test_ops import imp_op_backends
7
+
8
+
9
+ def test_rearrange_examples():
10
+ def test1(x):
11
+ # transpose
12
+ y = rearrange(x, "b c h w -> b h w c")
13
+ assert tuple(y.shape) == (10, 30, 40, 20)
14
+ return y
15
+
16
+ def test2(x):
17
+ # view / reshape
18
+ y = rearrange(x, "b c h w -> b (c h w)")
19
+ assert tuple(y.shape) == (10, 20 * 30 * 40)
20
+ return y
21
+
22
+ def test3(x):
23
+ # depth-to-space
24
+ y = rearrange(x, "b (c h1 w1) h w -> b c (h h1) (w w1)", h1=2, w1=2)
25
+ assert tuple(y.shape) == (10, 5, 30 * 2, 40 * 2)
26
+ return y
27
+
28
+ def test4(x):
29
+ # space-to-depth
30
+ y = rearrange(x, "b c (h h1) (w w1) -> b (h1 w1 c) h w", h1=2, w1=2)
31
+ assert tuple(y.shape) == (10, 20 * 4, 30 // 2, 40 // 2)
32
+ return y
33
+
34
+ def test5(x):
35
+ # simple transposition
36
+ y = rearrange(x, "b1 sound b2 letter -> b1 b2 sound letter")
37
+ assert tuple(y.shape) == (10, 30, 20, 40)
38
+ return y
39
+
40
+ def test6(x):
41
+ # parsing parameters
42
+ t = rearrange(x, "b c h w -> (b h w) c")
43
+ t = t[:, ::2] # replacement for dot-product, just changes size of second axis
44
+ assert tuple(t.shape) == (10 * 30 * 40, 10)
45
+
46
+ y = rearrange(t, "(b h w) c2 -> b c2 h w", **parse_shape(x, "b _ h w"))
47
+ assert tuple(y.shape) == (10, 10, 30, 40)
48
+ return y
49
+
50
+ def test7(x):
51
+ # split of embedding into groups
52
+ y1, y2 = rearrange(x, "b (c g) h w -> g b c h w", g=2)
53
+ assert tuple(y1.shape) == (10, 10, 30, 40)
54
+ assert tuple(y2.shape) == (10, 10, 30, 40)
55
+ return y1 + y2 # only one tensor is expected in output
56
+
57
+ def test8(x):
58
+ # max-pooling
59
+ y = reduce(x, "b c (h h1) (w w1) -> b c h w", reduction="max", h1=2, w1=2)
60
+ assert tuple(y.shape) == (10, 20, 30 // 2, 40 // 2)
61
+ return y
62
+
63
+ def test9(x):
64
+ # squeeze - unsqueeze
65
+ y = reduce(x, "b c h w -> b c () ()", reduction="max")
66
+ assert tuple(y.shape) == (10, 20, 1, 1)
67
+ y = rearrange(y, "b c () () -> c b")
68
+ assert tuple(y.shape) == (20, 10)
69
+ return y
70
+
71
+ def test10(x):
72
+ # stack
73
+ tensors = list(x + 0) # 0 is needed https://github.com/tensorflow/tensorflow/issues/23185
74
+ tensors = rearrange(tensors, "b c h w -> b h w c")
75
+ assert tuple(tensors.shape) == (10, 30, 40, 20)
76
+ return tensors
77
+
78
+ def test11(x):
79
+ # concatenate
80
+ tensors = list(x + 0) # 0 is needed https://github.com/tensorflow/tensorflow/issues/23185
81
+ tensors = rearrange(tensors, "b c h w -> h (b w) c")
82
+ assert tuple(tensors.shape) == (30, 10 * 40, 20)
83
+ return tensors
84
+
85
+ def shufflenet(x, convolve, c1, c2):
86
+ # shufflenet reordering example
87
+ x = convolve(x)
88
+ x = rearrange(x, "b (c1 c2) h w-> b (c2 c1) h w", c1=c1, c2=c2)
89
+ x = convolve(x)
90
+ return x
91
+
92
+ def convolve_strided_1d(x, stride, usual_convolution):
93
+ x = rearrange(x, "b c t1 t2 -> b c (t1 t2)") # reduce dimensionality
94
+ x = rearrange(x, "b c (t stride) -> (stride b) c t", stride=stride)
95
+ x = usual_convolution(x)
96
+ x = rearrange(x, "(stride b) c t -> b c (t stride)", stride=stride)
97
+ return x
98
+
99
+ def convolve_strided_2d(x, h_stride, w_stride, usual_convolution):
100
+ x = rearrange(x, "b c (h hs) (w ws) -> (hs ws b) c h w", hs=h_stride, ws=w_stride)
101
+ x = usual_convolution(x)
102
+ x = rearrange(x, "(hs ws b) c h w -> b c (h hs) (w ws)", hs=h_stride, ws=w_stride)
103
+ return x
104
+
105
+ def unet_like_1d(x, usual_convolution):
106
+ # u-net like steps for increasing / reducing dimensionality
107
+ x = rearrange(x, "b c t1 t2 -> b c (t1 t2)") # reduce dimensionality
108
+ y = rearrange(x, "b c (t dt) -> b (dt c) t", dt=2)
109
+ y = usual_convolution(y)
110
+ x = x + rearrange(y, "b (dt c) t -> b c (t dt)", dt=2)
111
+ return x
112
+
113
+ # mock for convolution (works for all backends)
114
+ def convolve_mock(x):
115
+ return x
116
+
117
+ tests = [
118
+ test1,
119
+ test2,
120
+ test3,
121
+ test4,
122
+ test5,
123
+ test6,
124
+ test7,
125
+ test8,
126
+ test9,
127
+ test10,
128
+ test11,
129
+ lambda x: shufflenet(x, convolve=convolve_mock, c1=4, c2=5),
130
+ lambda x: convolve_strided_1d(x, stride=2, usual_convolution=convolve_mock),
131
+ lambda x: convolve_strided_2d(x, h_stride=2, w_stride=2, usual_convolution=convolve_mock),
132
+ lambda x: unet_like_1d(x, usual_convolution=convolve_mock),
133
+ ]
134
+
135
+ for backend in imp_op_backends:
136
+ print("testing source_examples for ", backend.framework_name)
137
+ for test in tests:
138
+ x = numpy.arange(10 * 20 * 30 * 40).reshape([10, 20, 30, 40])
139
+ result1 = test(x)
140
+ result2 = backend.to_numpy(test(backend.from_numpy(x)))
141
+ assert numpy.array_equal(result1, result2)
142
+
143
+ # now with strides
144
+ x = numpy.arange(10 * 2 * 20 * 3 * 30 * 1 * 40).reshape([10 * 2, 20 * 3, 30 * 1, 40 * 1])
145
+ # known torch bug - torch doesn't support negative steps
146
+ last_step = -1 if (backend.framework_name != "torch" and backend.framework_name != "oneflow") else 1
147
+ indexing_expression = numpy.index_exp[::2, ::3, ::1, ::last_step]
148
+ result1 = test(x[indexing_expression])
149
+ result2 = backend.to_numpy(test(backend.from_numpy(x)[indexing_expression]))
150
+ assert numpy.array_equal(result1, result2)
151
+
152
+
153
+ def tensor_train_example_numpy():
154
+ # kept here just for a collection, only tested for numpy
155
+ # https://arxiv.org/pdf/1509.06569.pdf, (5)
156
+ x = numpy.ones([3, 4, 5, 6])
157
+ rank = 4
158
+ if numpy.__version__ < "1.15.0":
159
+ # numpy.einsum fails here, skip test
160
+ return
161
+ # creating appropriate Gs
162
+ Gs = [numpy.ones([d, d, rank, rank]) for d in x.shape]
163
+ Gs[0] = Gs[0][:, :, :1, :]
164
+ Gs[-1] = Gs[-1][:, :, :, :1]
165
+
166
+ # einsum way
167
+ y = x.reshape((1,) + x.shape)
168
+ for G in Gs:
169
+ # taking partial results left-to-right
170
+ # y = numpy.einsum('i j alpha beta, alpha i ... -> beta ... j', G, y)
171
+ y = numpy.einsum("i j a b, a i ... -> b ... j", G, y)
172
+ y1 = y.reshape(-1)
173
+
174
+ # alternative way
175
+ y = x.reshape(-1)
176
+ for G in Gs:
177
+ i, j, alpha, beta = G.shape
178
+ y = rearrange(y, "(i rest alpha) -> rest (alpha i)", alpha=alpha, i=i)
179
+ y = y @ rearrange(G, "i j alpha beta -> (alpha i) (j beta)")
180
+ y = rearrange(y, "rest (beta j) -> (beta rest j)", beta=beta, j=j)
181
+ y2 = y
182
+ assert numpy.allclose(y1, y2)
183
+
184
+ # yet another way
185
+ y = x
186
+ for G in Gs:
187
+ i, j, alpha, beta = G.shape
188
+ y = rearrange(y, "i ... (j alpha) -> ... j (alpha i)", alpha=alpha, i=i)
189
+ y = y @ rearrange(G, "i j alpha beta -> (alpha i) (j beta)")
190
+ y3 = y.reshape(-1)
191
+ assert numpy.allclose(y1, y3)
192
+
193
+
194
+ def test_pytorch_yolo_fragment():
195
+ if not is_backend_tested("torch"):
196
+ pytest.skip()
197
+
198
+ import torch
199
+
200
+ def old_way(input, num_classes, num_anchors, anchors, stride_h, stride_w):
201
+ # https://github.com/BobLiu20/YOLOv3_PyTorch/blob/c6b483743598b5f64d520d81e7e5f47ba936d4c9/nets/yolo_loss.py#L28-L44
202
+ bs = input.size(0)
203
+ in_h = input.size(2)
204
+ in_w = input.size(3)
205
+ scaled_anchors = [(a_w / stride_w, a_h / stride_h) for a_w, a_h in anchors]
206
+
207
+ prediction = input.view(bs, num_anchors, 5 + num_classes, in_h, in_w).permute(0, 1, 3, 4, 2).contiguous()
208
+ # Get outputs
209
+ x = torch.sigmoid(prediction[..., 0]) # Center x
210
+ y = torch.sigmoid(prediction[..., 1]) # Center y
211
+ w = prediction[..., 2] # Width
212
+ h = prediction[..., 3] # Height
213
+ conf = torch.sigmoid(prediction[..., 4]) # Conf
214
+ pred_cls = torch.sigmoid(prediction[..., 5:]) # Cls pred.
215
+
216
+ # https://github.com/BobLiu20/YOLOv3_PyTorch/blob/c6b483743598b5f64d520d81e7e5f47ba936d4c9/nets/yolo_loss.py#L70-L92
217
+ FloatTensor = torch.cuda.FloatTensor if x.is_cuda else torch.FloatTensor
218
+ LongTensor = torch.cuda.LongTensor if x.is_cuda else torch.LongTensor
219
+ # Calculate offsets for each grid
220
+ grid_x = (
221
+ torch.linspace(0, in_w - 1, in_w)
222
+ .repeat(in_w, 1)
223
+ .repeat(bs * num_anchors, 1, 1)
224
+ .view(x.shape)
225
+ .type(FloatTensor)
226
+ )
227
+ grid_y = (
228
+ torch.linspace(0, in_h - 1, in_h)
229
+ .repeat(in_h, 1)
230
+ .t()
231
+ .repeat(bs * num_anchors, 1, 1)
232
+ .view(y.shape)
233
+ .type(FloatTensor)
234
+ )
235
+ # Calculate anchor w, h
236
+ anchor_w = FloatTensor(scaled_anchors).index_select(1, LongTensor([0]))
237
+ anchor_h = FloatTensor(scaled_anchors).index_select(1, LongTensor([1]))
238
+ anchor_w = anchor_w.repeat(bs, 1).repeat(1, 1, in_h * in_w).view(w.shape)
239
+ anchor_h = anchor_h.repeat(bs, 1).repeat(1, 1, in_h * in_w).view(h.shape)
240
+ # Add offset and scale with anchors
241
+ pred_boxes = FloatTensor(prediction[..., :4].shape)
242
+ pred_boxes[..., 0] = x.data + grid_x
243
+ pred_boxes[..., 1] = y.data + grid_y
244
+ pred_boxes[..., 2] = torch.exp(w.data) * anchor_w
245
+ pred_boxes[..., 3] = torch.exp(h.data) * anchor_h
246
+ # Results
247
+ _scale = torch.Tensor([stride_w, stride_h] * 2).type(FloatTensor)
248
+ output = torch.cat(
249
+ (pred_boxes.view(bs, -1, 4) * _scale, conf.view(bs, -1, 1), pred_cls.view(bs, -1, num_classes)), -1
250
+ )
251
+ return output
252
+
253
+ def new_way(input, num_classes, num_anchors, anchors, stride_h, stride_w):
254
+ raw_predictions = rearrange(input, " b (anchor prediction) h w -> prediction b anchor h w", anchor=num_anchors)
255
+
256
+ anchors = torch.FloatTensor(anchors).to(input.device)
257
+ anchor_sizes = rearrange(anchors, "anchor dim -> dim () anchor () ()")
258
+
259
+ _, _, _, in_h, in_w = raw_predictions.shape
260
+ grid_h = rearrange(torch.arange(in_h).float(), "h -> () () h ()").to(input.device)
261
+ grid_w = rearrange(torch.arange(in_w).float(), "w -> () () () w").to(input.device)
262
+
263
+ predicted_bboxes = torch.zeros_like(raw_predictions)
264
+ predicted_bboxes[0] = (raw_predictions[0].sigmoid() + grid_h) * stride_h # center y
265
+ predicted_bboxes[1] = (raw_predictions[1].sigmoid() + grid_w) * stride_w # center x
266
+ predicted_bboxes[2:4] = (raw_predictions[2:4].exp()) * anchor_sizes # bbox width and height
267
+ predicted_bboxes[4] = raw_predictions[4].sigmoid() # confidence
268
+ predicted_bboxes[5:] = raw_predictions[5:].sigmoid() # class predictions
269
+ # only to match results of original code, not needed
270
+ return rearrange(predicted_bboxes, "prediction b anchor h w -> b anchor h w prediction")
271
+
272
+ stride_h = 4
273
+ stride_w = 4
274
+ batch_size = 5
275
+ num_classes = 12
276
+ anchors = [[50, 100], [100, 50], [75, 75]]
277
+ num_anchors = len(anchors)
278
+
279
+ input = torch.randn([batch_size, num_anchors * (5 + num_classes), 1, 1])
280
+ result1 = old_way(
281
+ input=input,
282
+ num_anchors=num_anchors,
283
+ num_classes=num_classes,
284
+ stride_h=stride_h,
285
+ stride_w=stride_w,
286
+ anchors=anchors,
287
+ )
288
+ result2 = new_way(
289
+ input=input,
290
+ num_anchors=num_anchors,
291
+ num_classes=num_classes,
292
+ stride_h=stride_h,
293
+ stride_w=stride_w,
294
+ anchors=anchors,
295
+ )
296
+ result1 = result1.reshape(result2.shape)
297
+ assert torch.allclose(result1, result2)
tool_server/.venv/lib/python3.12/site-packages/einops/tests/test_layers.py ADDED
@@ -0,0 +1,469 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pickle
2
+ from collections import namedtuple
3
+
4
+ import numpy
5
+ import pytest
6
+
7
+ from einops import rearrange, reduce, EinopsError
8
+ from einops.tests import collect_test_backends, is_backend_tested, FLOAT_REDUCTIONS as REDUCTIONS
9
+
10
+ __author__ = "Alex Rogozhnikov"
11
+
12
+ testcase = namedtuple("testcase", ["pattern", "axes_lengths", "input_shape", "wrong_shapes"])
13
+
14
+ rearrangement_patterns = [
15
+ testcase(
16
+ "b c h w -> b (c h w)",
17
+ dict(c=20),
18
+ (10, 20, 30, 40),
19
+ [(), (10,), (10, 10, 10), (10, 21, 30, 40), [1, 20, 1, 1, 1]],
20
+ ),
21
+ testcase(
22
+ "b c (h1 h2) (w1 w2) -> b (c h2 w2) h1 w1",
23
+ dict(h2=2, w2=2),
24
+ (10, 20, 30, 40),
25
+ [(), (1, 1, 1, 1), (1, 10, 3), ()],
26
+ ),
27
+ testcase(
28
+ "b ... c -> c b ...",
29
+ dict(b=10),
30
+ (10, 20, 30),
31
+ [(), (10,), (5, 10)],
32
+ ),
33
+ ]
34
+
35
+
36
+ def test_rearrange_imperative():
37
+ for backend in collect_test_backends(symbolic=False, layers=True):
38
+ print("Test layer for ", backend.framework_name)
39
+
40
+ for pattern, axes_lengths, input_shape, wrong_shapes in rearrangement_patterns:
41
+ x = numpy.arange(numpy.prod(input_shape), dtype="float32").reshape(input_shape)
42
+ result_numpy = rearrange(x, pattern, **axes_lengths)
43
+ layer = backend.layers().Rearrange(pattern, **axes_lengths)
44
+ for shape in wrong_shapes:
45
+ try:
46
+ layer(backend.from_numpy(numpy.zeros(shape, dtype="float32")))
47
+ except BaseException:
48
+ pass
49
+ else:
50
+ raise AssertionError("Failure expected")
51
+
52
+ # simple pickling / unpickling
53
+ layer2 = pickle.loads(pickle.dumps(layer))
54
+ result1 = backend.to_numpy(layer(backend.from_numpy(x)))
55
+ result2 = backend.to_numpy(layer2(backend.from_numpy(x)))
56
+ assert numpy.allclose(result_numpy, result1)
57
+ assert numpy.allclose(result1, result2)
58
+
59
+ just_sum = backend.layers().Reduce("...->", reduction="sum")
60
+
61
+ variable = backend.from_numpy(x)
62
+ result = just_sum(layer(variable))
63
+
64
+ result.backward()
65
+ assert numpy.allclose(backend.to_numpy(variable.grad), 1)
66
+
67
+
68
+ def test_rearrange_symbolic():
69
+ for backend in collect_test_backends(symbolic=True, layers=True):
70
+ print("Test layer for ", backend.framework_name)
71
+
72
+ for pattern, axes_lengths, input_shape, wrong_shapes in rearrangement_patterns:
73
+ x = numpy.arange(numpy.prod(input_shape), dtype="float32").reshape(input_shape)
74
+ result_numpy = rearrange(x, pattern, **axes_lengths)
75
+ layer = backend.layers().Rearrange(pattern, **axes_lengths)
76
+ input_shape_of_nones = [None] * len(input_shape)
77
+ shapes = [input_shape, input_shape_of_nones]
78
+
79
+ for shape in shapes:
80
+ symbol = backend.create_symbol(shape)
81
+ eval_inputs = [(symbol, x)]
82
+
83
+ result_symbol1 = layer(symbol)
84
+ result1 = backend.eval_symbol(result_symbol1, eval_inputs)
85
+ assert numpy.allclose(result_numpy, result1)
86
+
87
+ layer2 = pickle.loads(pickle.dumps(layer))
88
+ result_symbol2 = layer2(symbol)
89
+ result2 = backend.eval_symbol(result_symbol2, eval_inputs)
90
+ assert numpy.allclose(result1, result2)
91
+
92
+ # now testing back-propagation
93
+ just_sum = backend.layers().Reduce("...->", reduction="sum")
94
+
95
+ result_sum1 = backend.eval_symbol(just_sum(result_symbol1), eval_inputs)
96
+ result_sum2 = numpy.sum(x)
97
+
98
+ assert numpy.allclose(result_sum1, result_sum2)
99
+
100
+
101
+ reduction_patterns = rearrangement_patterns + [
102
+ testcase("b c h w -> b ()", dict(b=10), (10, 20, 30, 40), [(10,), (10, 20, 30)]),
103
+ testcase("b c (h1 h2) (w1 w2) -> b c h1 w1", dict(h1=15, h2=2, w2=2), (10, 20, 30, 40), [(10, 20, 31, 40)]),
104
+ testcase("b ... c -> b", dict(b=10), (10, 20, 30, 40), [(10,), (11, 10)]),
105
+ ]
106
+
107
+
108
+ def test_reduce_imperative():
109
+ for backend in collect_test_backends(symbolic=False, layers=True):
110
+ print("Test layer for ", backend.framework_name)
111
+ for reduction in REDUCTIONS:
112
+ for pattern, axes_lengths, input_shape, wrong_shapes in reduction_patterns:
113
+ print(backend, reduction, pattern, axes_lengths, input_shape, wrong_shapes)
114
+ x = numpy.arange(1, 1 + numpy.prod(input_shape), dtype="float32").reshape(input_shape)
115
+ x /= x.mean()
116
+ result_numpy = reduce(x, pattern, reduction, **axes_lengths)
117
+ layer = backend.layers().Reduce(pattern, reduction, **axes_lengths)
118
+ for shape in wrong_shapes:
119
+ try:
120
+ layer(backend.from_numpy(numpy.zeros(shape, dtype="float32")))
121
+ except BaseException:
122
+ pass
123
+ else:
124
+ raise AssertionError("Failure expected")
125
+
126
+ # simple pickling / unpickling
127
+ layer2 = pickle.loads(pickle.dumps(layer))
128
+ result1 = backend.to_numpy(layer(backend.from_numpy(x)))
129
+ result2 = backend.to_numpy(layer2(backend.from_numpy(x)))
130
+ assert numpy.allclose(result_numpy, result1)
131
+ assert numpy.allclose(result1, result2)
132
+
133
+ just_sum = backend.layers().Reduce("...->", reduction="sum")
134
+
135
+ variable = backend.from_numpy(x)
136
+ result = just_sum(layer(variable))
137
+
138
+ result.backward()
139
+ grad = backend.to_numpy(variable.grad)
140
+ if reduction == "sum":
141
+ assert numpy.allclose(grad, 1)
142
+ if reduction == "mean":
143
+ assert numpy.allclose(grad, grad.min())
144
+ if reduction in ["max", "min"]:
145
+ assert numpy.all(numpy.in1d(grad, [0, 1]))
146
+ assert numpy.sum(grad) > 0.5
147
+
148
+
149
+ def test_reduce_symbolic():
150
+ for backend in collect_test_backends(symbolic=True, layers=True):
151
+ print("Test layer for ", backend.framework_name)
152
+ for reduction in REDUCTIONS:
153
+ for pattern, axes_lengths, input_shape, wrong_shapes in reduction_patterns:
154
+ x = numpy.arange(1, 1 + numpy.prod(input_shape), dtype="float32").reshape(input_shape)
155
+ x /= x.mean()
156
+ result_numpy = reduce(x, pattern, reduction, **axes_lengths)
157
+ layer = backend.layers().Reduce(pattern, reduction, **axes_lengths)
158
+ input_shape_of_nones = [None] * len(input_shape)
159
+ shapes = [input_shape, input_shape_of_nones]
160
+
161
+ for shape in shapes:
162
+ symbol = backend.create_symbol(shape)
163
+ eval_inputs = [(symbol, x)]
164
+
165
+ result_symbol1 = layer(symbol)
166
+ result1 = backend.eval_symbol(result_symbol1, eval_inputs)
167
+ assert numpy.allclose(result_numpy, result1)
168
+
169
+ layer2 = pickle.loads(pickle.dumps(layer))
170
+ result_symbol2 = layer2(symbol)
171
+ result2 = backend.eval_symbol(result_symbol2, eval_inputs)
172
+ assert numpy.allclose(result1, result2)
173
+
174
+
175
+ def create_torch_model(use_reduce=False, add_scripted_layer=False):
176
+ if not is_backend_tested("torch"):
177
+ pytest.skip()
178
+ else:
179
+ from torch.nn import Sequential, Conv2d, MaxPool2d, Linear, ReLU
180
+ from einops.layers.torch import Rearrange, Reduce, EinMix
181
+ import torch.jit
182
+
183
+ return Sequential(
184
+ Conv2d(3, 6, kernel_size=(5, 5)),
185
+ Reduce("b c (h h2) (w w2) -> b c h w", "max", h2=2, w2=2) if use_reduce else MaxPool2d(kernel_size=2),
186
+ Conv2d(6, 16, kernel_size=(5, 5)),
187
+ Reduce("b c (h h2) (w w2) -> b c h w", "max", h2=2, w2=2),
188
+ torch.jit.script(Rearrange("b c h w -> b (c h w)"))
189
+ if add_scripted_layer
190
+ else Rearrange("b c h w -> b (c h w)"),
191
+ Linear(16 * 5 * 5, 120),
192
+ ReLU(),
193
+ Linear(120, 84),
194
+ ReLU(),
195
+ EinMix("b c1 -> (b c2)", weight_shape="c1 c2", bias_shape="c2", c1=84, c2=84),
196
+ EinMix("(b c2) -> b c3", weight_shape="c2 c3", bias_shape="c3", c2=84, c3=84),
197
+ Linear(84, 10),
198
+ )
199
+
200
+
201
+ def test_torch_layer():
202
+ if not is_backend_tested("torch"):
203
+ pytest.skip()
204
+ else:
205
+ # checked that torch present
206
+ import torch
207
+ import torch.jit
208
+
209
+ model1 = create_torch_model(use_reduce=True)
210
+ model2 = create_torch_model(use_reduce=False)
211
+ input = torch.randn([10, 3, 32, 32])
212
+ # random models have different predictions
213
+ assert not torch.allclose(model1(input), model2(input))
214
+ model2.load_state_dict(pickle.loads(pickle.dumps(model1.state_dict())))
215
+ assert torch.allclose(model1(input), model2(input))
216
+
217
+ # tracing (freezing)
218
+ model3 = torch.jit.trace(model2, example_inputs=input)
219
+ torch.testing.assert_close(model1(input), model3(input), atol=1e-3, rtol=1e-3)
220
+ torch.testing.assert_close(model1(input + 1), model3(input + 1), atol=1e-3, rtol=1e-3)
221
+
222
+ model4 = torch.jit.trace(model2, example_inputs=input)
223
+ torch.testing.assert_close(model1(input), model4(input), atol=1e-3, rtol=1e-3)
224
+ torch.testing.assert_close(model1(input + 1), model4(input + 1), atol=1e-3, rtol=1e-3)
225
+
226
+
227
+ def test_torch_layers_scripting():
228
+ if not is_backend_tested("torch"):
229
+ pytest.skip()
230
+ else:
231
+ import torch
232
+
233
+ for script_layer in [False, True]:
234
+ model1 = create_torch_model(use_reduce=True, add_scripted_layer=script_layer)
235
+ model2 = torch.jit.script(model1)
236
+ input = torch.randn([10, 3, 32, 32])
237
+
238
+ torch.testing.assert_close(model1(input), model2(input), atol=1e-3, rtol=1e-3)
239
+
240
+
241
+ def test_keras_layer():
242
+ if not is_backend_tested("tensorflow"):
243
+ pytest.skip()
244
+ else:
245
+ import tensorflow as tf
246
+
247
+ if tf.__version__ < "2.16.":
248
+ # current implementation of layers follows new TF interface
249
+ pytest.skip()
250
+ from tensorflow.keras.models import Sequential
251
+ from tensorflow.keras.layers import Conv2D as Conv2d, Dense as Linear, ReLU
252
+ from einops.layers.keras import Rearrange, Reduce, EinMix, keras_custom_objects
253
+
254
+ def create_keras_model():
255
+ return Sequential(
256
+ [
257
+ Conv2d(6, kernel_size=5, input_shape=[32, 32, 3]),
258
+ Reduce("b c (h h2) (w w2) -> b c h w", "max", h2=2, w2=2),
259
+ Conv2d(16, kernel_size=5),
260
+ Reduce("b c (h h2) (w w2) -> b c h w", "max", h2=2, w2=2),
261
+ Rearrange("b c h w -> b (c h w)"),
262
+ Linear(120),
263
+ ReLU(),
264
+ Linear(84),
265
+ ReLU(),
266
+ EinMix("b c1 -> (b c2)", weight_shape="c1 c2", bias_shape="c2", c1=84, c2=84),
267
+ EinMix("(b c2) -> b c3", weight_shape="c2 c3", bias_shape="c3", c2=84, c3=84),
268
+ Linear(10),
269
+ ]
270
+ )
271
+
272
+ model1 = create_keras_model()
273
+ model2 = create_keras_model()
274
+
275
+ input = numpy.random.normal(size=[10, 32, 32, 3]).astype("float32")
276
+ # two randomly init models should provide different outputs
277
+ assert not numpy.allclose(model1.predict_on_batch(input), model2.predict_on_batch(input))
278
+
279
+ # get some temp filename
280
+ tmp_model_filename = "/tmp/einops_tf_model.h5"
281
+ # save arch + weights
282
+ print("temp_path_keras1", tmp_model_filename)
283
+ tf.keras.models.save_model(model1, tmp_model_filename)
284
+ model3 = tf.keras.models.load_model(tmp_model_filename, custom_objects=keras_custom_objects)
285
+
286
+ numpy.testing.assert_allclose(model1.predict_on_batch(input), model3.predict_on_batch(input))
287
+
288
+ weight_filename = "/tmp/einops_tf_model.weights.h5"
289
+ # save arch as json
290
+ model4 = tf.keras.models.model_from_json(model1.to_json(), custom_objects=keras_custom_objects)
291
+ model1.save_weights(weight_filename)
292
+ model4.load_weights(weight_filename)
293
+ model2.load_weights(weight_filename)
294
+ # check that differently-inialized model receives same weights
295
+ numpy.testing.assert_allclose(model1.predict_on_batch(input), model2.predict_on_batch(input))
296
+ # ulimate test
297
+ # save-load architecture, and then load weights - should return same result
298
+ numpy.testing.assert_allclose(model1.predict_on_batch(input), model4.predict_on_batch(input))
299
+
300
+
301
+ def test_flax_layers():
302
+ """
303
+ One-off simple tests for Flax layers.
304
+ Unfortunately, Flax layers have a different interface from other layers.
305
+ """
306
+ if not is_backend_tested("jax"):
307
+ pytest.skip()
308
+ else:
309
+ import jax
310
+ import jax.numpy as jnp
311
+
312
+ import flax
313
+ from flax import linen as nn
314
+ from einops.layers.flax import EinMix, Reduce, Rearrange
315
+
316
+ class NN(nn.Module):
317
+ @nn.compact
318
+ def __call__(self, x):
319
+ x = EinMix(
320
+ "b (h h2) (w w2) c -> b h w c_out", "h2 w2 c c_out", "c_out", sizes=dict(h2=2, w2=3, c=4, c_out=5)
321
+ )(x)
322
+ x = Rearrange("b h w c -> b (w h c)", sizes=dict(c=5))(x)
323
+ x = Reduce("b hwc -> b", "mean", dict(hwc=2 * 3 * 5))(x)
324
+ return x
325
+
326
+ model = NN()
327
+ fixed_input = jnp.ones([10, 2 * 2, 3 * 3, 4])
328
+ params = model.init(jax.random.PRNGKey(0), fixed_input)
329
+
330
+ def eval_at_point(params):
331
+ return jnp.linalg.norm(model.apply(params, fixed_input))
332
+
333
+ vandg = jax.value_and_grad(eval_at_point)
334
+ value0 = eval_at_point(params)
335
+ value1, grad1 = vandg(params)
336
+ assert jnp.allclose(value0, value1)
337
+
338
+ params2 = jax.tree_map(lambda x1, x2: x1 - x2 * 0.001, params, grad1)
339
+
340
+ value2 = eval_at_point(params2)
341
+ assert value0 >= value2, (value0, value2)
342
+
343
+ # check serialization
344
+ fbytes = flax.serialization.to_bytes(params)
345
+ _loaded = flax.serialization.from_bytes(params, fbytes)
346
+
347
+
348
+ def test_einmix_decomposition():
349
+ """
350
+ Testing that einmix correctly decomposes into smaller transformations.
351
+ """
352
+ from einops.layers._einmix import _EinmixDebugger
353
+
354
+ mixin1 = _EinmixDebugger(
355
+ "a b c d e -> e d c b a",
356
+ weight_shape="d a b",
357
+ d=2, a=3, b=5,
358
+ ) # fmt: off
359
+ assert mixin1.pre_reshape_pattern is None
360
+ assert mixin1.post_reshape_pattern is None
361
+ assert mixin1.einsum_pattern == "abcde,dab->edcba"
362
+ assert mixin1.saved_weight_shape == [2, 3, 5]
363
+ assert mixin1.saved_bias_shape is None
364
+
365
+ mixin2 = _EinmixDebugger(
366
+ "a b c d e -> e d c b a",
367
+ weight_shape="d a b",
368
+ bias_shape="a b c d e",
369
+ a=1, b=2, c=3, d=4, e=5,
370
+ ) # fmt: off
371
+ assert mixin2.pre_reshape_pattern is None
372
+ assert mixin2.post_reshape_pattern is None
373
+ assert mixin2.einsum_pattern == "abcde,dab->edcba"
374
+ assert mixin2.saved_weight_shape == [4, 1, 2]
375
+ assert mixin2.saved_bias_shape == [5, 4, 3, 2, 1]
376
+
377
+ mixin3 = _EinmixDebugger(
378
+ "... -> ...",
379
+ weight_shape="",
380
+ bias_shape="",
381
+ ) # fmt: off
382
+ assert mixin3.pre_reshape_pattern is None
383
+ assert mixin3.post_reshape_pattern is None
384
+ assert mixin3.einsum_pattern == "...,->..."
385
+ assert mixin3.saved_weight_shape == []
386
+ assert mixin3.saved_bias_shape == []
387
+
388
+ mixin4 = _EinmixDebugger(
389
+ "b a ... -> b c ...",
390
+ weight_shape="b a c",
391
+ a=1, b=2, c=3,
392
+ ) # fmt: off
393
+ assert mixin4.pre_reshape_pattern is None
394
+ assert mixin4.post_reshape_pattern is None
395
+ assert mixin4.einsum_pattern == "ba...,bac->bc..."
396
+ assert mixin4.saved_weight_shape == [2, 1, 3]
397
+ assert mixin4.saved_bias_shape is None
398
+
399
+ mixin5 = _EinmixDebugger(
400
+ "(b a) ... -> b c (...)",
401
+ weight_shape="b a c",
402
+ a=1, b=2, c=3,
403
+ ) # fmt: off
404
+ assert mixin5.pre_reshape_pattern == "(b a) ... -> b a ..."
405
+ assert mixin5.pre_reshape_lengths == dict(a=1, b=2)
406
+ assert mixin5.post_reshape_pattern == "b c ... -> b c (...)"
407
+ assert mixin5.einsum_pattern == "ba...,bac->bc..."
408
+ assert mixin5.saved_weight_shape == [2, 1, 3]
409
+ assert mixin5.saved_bias_shape is None
410
+
411
+ mixin6 = _EinmixDebugger(
412
+ "b ... (a c) -> b ... (a d)",
413
+ weight_shape="c d",
414
+ bias_shape="a d",
415
+ a=1, c=3, d=4,
416
+ ) # fmt: off
417
+ assert mixin6.pre_reshape_pattern == "b ... (a c) -> b ... a c"
418
+ assert mixin6.pre_reshape_lengths == dict(a=1, c=3)
419
+ assert mixin6.post_reshape_pattern == "b ... a d -> b ... (a d)"
420
+ assert mixin6.einsum_pattern == "b...ac,cd->b...ad"
421
+ assert mixin6.saved_weight_shape == [3, 4]
422
+ assert mixin6.saved_bias_shape == [1, 1, 4] # (b) a d, ellipsis does not participate
423
+
424
+ mixin7 = _EinmixDebugger(
425
+ "a ... (b c) -> a (... d b)",
426
+ weight_shape="c d b",
427
+ bias_shape="d b",
428
+ b=2, c=3, d=4,
429
+ ) # fmt: off
430
+ assert mixin7.pre_reshape_pattern == "a ... (b c) -> a ... b c"
431
+ assert mixin7.pre_reshape_lengths == dict(b=2, c=3)
432
+ assert mixin7.post_reshape_pattern == "a ... d b -> a (... d b)"
433
+ assert mixin7.einsum_pattern == "a...bc,cdb->a...db"
434
+ assert mixin7.saved_weight_shape == [3, 4, 2]
435
+ assert mixin7.saved_bias_shape == [1, 4, 2] # (a) d b, ellipsis does not participate
436
+
437
+
438
+ def test_einmix_restrictions():
439
+ """
440
+ Testing different cases
441
+ """
442
+ from einops.layers._einmix import _EinmixDebugger
443
+
444
+ with pytest.raises(EinopsError):
445
+ _EinmixDebugger(
446
+ "a b c d e -> e d c b a",
447
+ weight_shape="d a b",
448
+ d=2, a=3, # missing b
449
+ ) # fmt: off
450
+
451
+ with pytest.raises(EinopsError):
452
+ _EinmixDebugger(
453
+ "a b c d e -> e d c b a",
454
+ weight_shape="w a b",
455
+ d=2, a=3, b=1 # missing d
456
+ ) # fmt: off
457
+
458
+ with pytest.raises(EinopsError):
459
+ _EinmixDebugger(
460
+ "(...) a -> ... a",
461
+ weight_shape="a", a=1, # ellipsis on the left
462
+ ) # fmt: off
463
+
464
+ with pytest.raises(EinopsError):
465
+ _EinmixDebugger(
466
+ "(...) a -> a ...",
467
+ weight_shape="a", a=1, # ellipsis on the right side after bias axis
468
+ bias_shape='a',
469
+ ) # fmt: off
tool_server/.venv/lib/python3.12/site-packages/einops/tests/test_ops.py ADDED
@@ -0,0 +1,651 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+
3
+ import numpy
4
+ import numpy as np
5
+ import pytest
6
+
7
+ from einops import EinopsError
8
+ from einops.einops import rearrange, reduce, repeat, _enumerate_directions
9
+ from einops.tests import collect_test_backends, is_backend_tested, FLOAT_REDUCTIONS as REDUCTIONS
10
+
11
+ imp_op_backends = collect_test_backends(symbolic=False, layers=False)
12
+ sym_op_backends = collect_test_backends(symbolic=True, layers=False)
13
+
14
+ identity_patterns = [
15
+ "...->...",
16
+ "a b c d e-> a b c d e",
17
+ "a b c d e ...-> ... a b c d e",
18
+ "a b c d e ...-> a ... b c d e",
19
+ "... a b c d e -> ... a b c d e",
20
+ "a ... e-> a ... e",
21
+ "a ... -> a ... ",
22
+ "a ... c d e -> a (...) c d e",
23
+ ]
24
+
25
+ equivalent_rearrange_patterns = [
26
+ ("a b c d e -> (a b) c d e", "a b ... -> (a b) ... "),
27
+ ("a b c d e -> a b (c d) e", "... c d e -> ... (c d) e"),
28
+ ("a b c d e -> a b c d e", "... -> ... "),
29
+ ("a b c d e -> (a b c d e)", "... -> (...)"),
30
+ ("a b c d e -> b (c d e) a", "a b ... -> b (...) a"),
31
+ ("a b c d e -> b (a c d) e", "a b ... e -> b (a ...) e"),
32
+ ]
33
+
34
+ equivalent_reduction_patterns = [
35
+ ("a b c d e -> ", " ... -> "),
36
+ ("a b c d e -> (e a)", "a ... e -> (e a)"),
37
+ ("a b c d e -> d (a e)", " a b c d e ... -> d (a e) "),
38
+ ("a b c d e -> (a b)", " ... c d e -> (...) "),
39
+ ]
40
+
41
+
42
+ def test_collapsed_ellipsis_errors_out():
43
+ x = numpy.zeros([1, 1, 1, 1, 1])
44
+ rearrange(x, "a b c d ... -> a b c ... d")
45
+ with pytest.raises(EinopsError):
46
+ rearrange(x, "a b c d (...) -> a b c ... d")
47
+
48
+ rearrange(x, "... -> (...)")
49
+ with pytest.raises(EinopsError):
50
+ rearrange(x, "(...) -> (...)")
51
+
52
+
53
+ def test_ellipsis_ops_numpy():
54
+ x = numpy.arange(2 * 3 * 4 * 5 * 6).reshape([2, 3, 4, 5, 6])
55
+ for pattern in identity_patterns:
56
+ assert numpy.array_equal(x, rearrange(x, pattern)), pattern
57
+
58
+ for pattern1, pattern2 in equivalent_rearrange_patterns:
59
+ assert numpy.array_equal(rearrange(x, pattern1), rearrange(x, pattern2))
60
+
61
+ for reduction in ["min", "max", "sum"]:
62
+ for pattern1, pattern2 in equivalent_reduction_patterns:
63
+ assert numpy.array_equal(reduce(x, pattern1, reduction=reduction), reduce(x, pattern2, reduction=reduction))
64
+
65
+ # now just check coincidence with numpy
66
+ all_rearrange_patterns = [*identity_patterns]
67
+ for pattern_pairs in equivalent_rearrange_patterns:
68
+ all_rearrange_patterns.extend(pattern_pairs)
69
+
70
+
71
+ def check_op_against_numpy(backend, numpy_input, pattern, axes_lengths, reduction="rearrange", is_symbolic=False):
72
+ """
73
+ Helper to test result of operation (rearrange or transpose) against numpy
74
+ if reduction == 'rearrange', rearrange op is tested, otherwise reduce
75
+ """
76
+
77
+ def operation(x):
78
+ if reduction == "rearrange":
79
+ return rearrange(x, pattern, **axes_lengths)
80
+ else:
81
+ return reduce(x, pattern, reduction, **axes_lengths)
82
+
83
+ numpy_result = operation(numpy_input)
84
+ check_equal = numpy.array_equal
85
+ p_none_dimension = 0.5
86
+ if is_symbolic:
87
+ symbol_shape = [d if numpy.random.random() >= p_none_dimension else None for d in numpy_input.shape]
88
+ symbol = backend.create_symbol(shape=symbol_shape)
89
+ result_symbol = operation(symbol)
90
+ backend_result = backend.eval_symbol(result_symbol, [(symbol, numpy_input)])
91
+ else:
92
+ backend_result = operation(backend.from_numpy(numpy_input))
93
+ backend_result = backend.to_numpy(backend_result)
94
+
95
+ check_equal(numpy_result, backend_result)
96
+
97
+
98
+ def test_ellipsis_ops_imperative():
99
+ """Checking various patterns against numpy"""
100
+ x = numpy.arange(2 * 3 * 4 * 5 * 6).reshape([2, 3, 4, 5, 6])
101
+ for is_symbolic in [True, False]:
102
+ for backend in collect_test_backends(symbolic=is_symbolic, layers=False):
103
+ for pattern in identity_patterns + list(itertools.chain(*equivalent_rearrange_patterns)):
104
+ check_op_against_numpy(
105
+ backend, x, pattern, axes_lengths={}, reduction="rearrange", is_symbolic=is_symbolic
106
+ )
107
+
108
+ for reduction in ["min", "max", "sum"]:
109
+ for pattern in itertools.chain(*equivalent_reduction_patterns):
110
+ check_op_against_numpy(
111
+ backend, x, pattern, axes_lengths={}, reduction=reduction, is_symbolic=is_symbolic
112
+ )
113
+
114
+
115
+ def test_rearrange_array_api():
116
+ import numpy as xp
117
+ from einops import array_api as AA
118
+
119
+ if xp.__version__ < "2.0.0":
120
+ pytest.skip()
121
+
122
+ x = numpy.arange(2 * 3 * 4 * 5 * 6).reshape([2, 3, 4, 5, 6])
123
+ for pattern in identity_patterns + list(itertools.chain(*equivalent_rearrange_patterns)):
124
+ expected = rearrange(x, pattern)
125
+ result = AA.rearrange(xp.from_dlpack(x), pattern)
126
+ assert numpy.array_equal(AA.asnumpy(result + 0), expected)
127
+
128
+
129
+ def test_reduce_array_api():
130
+ import numpy as xp
131
+ from einops import array_api as AA
132
+
133
+ if xp.__version__ < "2.0.0":
134
+ pytest.skip()
135
+
136
+ x = numpy.arange(2 * 3 * 4 * 5 * 6).reshape([2, 3, 4, 5, 6])
137
+ for pattern in itertools.chain(*equivalent_reduction_patterns):
138
+ for reduction in ["min", "max", "sum"]:
139
+ expected = reduce(x, pattern, reduction=reduction)
140
+ result = AA.reduce(xp.from_dlpack(x), pattern, reduction=reduction)
141
+ assert numpy.array_equal(AA.asnumpy(np.asarray(result + 0)), expected)
142
+
143
+
144
+ def test_rearrange_consistency_numpy():
145
+ shape = [1, 2, 3, 5, 7, 11]
146
+ x = numpy.arange(numpy.prod(shape)).reshape(shape)
147
+ for pattern in [
148
+ "a b c d e f -> a b c d e f",
149
+ "b a c d e f -> a b d e f c",
150
+ "a b c d e f -> f e d c b a",
151
+ "a b c d e f -> (f e) d (c b a)",
152
+ "a b c d e f -> (f e d c b a)",
153
+ ]:
154
+ result = rearrange(x, pattern)
155
+ assert len(numpy.setdiff1d(x, result)) == 0
156
+ assert result.dtype == x.dtype
157
+
158
+ result = rearrange(x, "a b c d e f -> a (b) (c d e) f")
159
+ assert numpy.array_equal(x.flatten(), result.flatten())
160
+
161
+ result = rearrange(x, "a aa aa1 a1a1 aaaa a11 -> a aa aa1 a1a1 aaaa a11")
162
+ assert numpy.array_equal(x, result)
163
+
164
+ result1 = rearrange(x, "a b c d e f -> f e d c b a")
165
+ result2 = rearrange(x, "f e d c b a -> a b c d e f")
166
+ assert numpy.array_equal(result1, result2)
167
+
168
+ result = rearrange(rearrange(x, "a b c d e f -> (f d) c (e b) a"), "(f d) c (e b) a -> a b c d e f", b=2, d=5)
169
+ assert numpy.array_equal(x, result)
170
+
171
+ sizes = dict(zip("abcdef", shape))
172
+ temp = rearrange(x, "a b c d e f -> (f d) c (e b) a", **sizes)
173
+ result = rearrange(temp, "(f d) c (e b) a -> a b c d e f", **sizes)
174
+ assert numpy.array_equal(x, result)
175
+
176
+ x2 = numpy.arange(2 * 3 * 4).reshape([2, 3, 4])
177
+ result = rearrange(x2, "a b c -> b c a")
178
+ assert x2[1, 2, 3] == result[2, 3, 1]
179
+ assert x2[0, 1, 2] == result[1, 2, 0]
180
+
181
+
182
+ def test_rearrange_permutations_numpy():
183
+ # tests random permutation of axes against two independent numpy ways
184
+ for n_axes in range(1, 10):
185
+ input = numpy.arange(2**n_axes).reshape([2] * n_axes)
186
+ permutation = numpy.random.permutation(n_axes)
187
+ left_expression = " ".join("i" + str(axis) for axis in range(n_axes))
188
+ right_expression = " ".join("i" + str(axis) for axis in permutation)
189
+ expression = left_expression + " -> " + right_expression
190
+ result = rearrange(input, expression)
191
+
192
+ for pick in numpy.random.randint(0, 2, [10, n_axes]):
193
+ assert input[tuple(pick)] == result[tuple(pick[permutation])]
194
+
195
+ for n_axes in range(1, 10):
196
+ input = numpy.arange(2**n_axes).reshape([2] * n_axes)
197
+ permutation = numpy.random.permutation(n_axes)
198
+ left_expression = " ".join("i" + str(axis) for axis in range(n_axes)[::-1])
199
+ right_expression = " ".join("i" + str(axis) for axis in permutation[::-1])
200
+ expression = left_expression + " -> " + right_expression
201
+ result = rearrange(input, expression)
202
+ assert result.shape == input.shape
203
+ expected_result = numpy.zeros_like(input)
204
+ for original_axis, result_axis in enumerate(permutation):
205
+ expected_result |= ((input >> original_axis) & 1) << result_axis
206
+
207
+ assert numpy.array_equal(result, expected_result)
208
+
209
+
210
+ def test_reduction_imperatives():
211
+ for backend in imp_op_backends:
212
+ print("Reduction tests for ", backend.framework_name)
213
+ for reduction in REDUCTIONS:
214
+ # slight redundancy for simpler order - numpy version is evaluated multiple times
215
+ input = numpy.arange(2 * 3 * 4 * 5 * 6, dtype="int64").reshape([2, 3, 4, 5, 6])
216
+ if reduction in ["mean", "prod"]:
217
+ input = input / input.astype("float64").mean()
218
+ test_cases = [
219
+ ["a b c d e -> ", {}, getattr(input, reduction)()],
220
+ ["a ... -> ", {}, getattr(input, reduction)()],
221
+ ["(a1 a2) ... (e1 e2) -> ", dict(a1=1, e2=2), getattr(input, reduction)()],
222
+ [
223
+ "a b c d e -> (e c) a",
224
+ {},
225
+ getattr(input, reduction)(axis=(1, 3)).transpose(2, 1, 0).reshape([-1, 2]),
226
+ ],
227
+ [
228
+ "a ... c d e -> (e c) a",
229
+ {},
230
+ getattr(input, reduction)(axis=(1, 3)).transpose(2, 1, 0).reshape([-1, 2]),
231
+ ],
232
+ [
233
+ "a b c d e ... -> (e c) a",
234
+ {},
235
+ getattr(input, reduction)(axis=(1, 3)).transpose(2, 1, 0).reshape([-1, 2]),
236
+ ],
237
+ ["a b c d e -> (e c a)", {}, getattr(input, reduction)(axis=(1, 3)).transpose(2, 1, 0).reshape([-1])],
238
+ ["(a a2) ... -> (a2 a) ...", dict(a2=1), input],
239
+ ]
240
+ for pattern, axes_lengths, expected_result in test_cases:
241
+ result = reduce(backend.from_numpy(input.copy()), pattern, reduction=reduction, **axes_lengths)
242
+ result = backend.to_numpy(result)
243
+ assert numpy.allclose(result, expected_result), f"Failed at {pattern}"
244
+
245
+
246
+ def test_reduction_symbolic():
247
+ for backend in sym_op_backends:
248
+ print("Reduction tests for ", backend.framework_name)
249
+ for reduction in REDUCTIONS:
250
+ input = numpy.arange(2 * 3 * 4 * 5 * 6, dtype="int64").reshape([2, 3, 4, 5, 6])
251
+ input = input / input.astype("float64").mean()
252
+ # slight redundancy for simpler order - numpy version is evaluated multiple times
253
+ test_cases = [
254
+ ["a b c d e -> ", {}, getattr(input, reduction)()],
255
+ ["a ... -> ", {}, getattr(input, reduction)()],
256
+ ["(a a2) ... (e e2) -> ", dict(a2=1, e2=1), getattr(input, reduction)()],
257
+ [
258
+ "a b c d e -> (e c) a",
259
+ {},
260
+ getattr(input, reduction)(axis=(1, 3)).transpose(2, 1, 0).reshape([-1, 2]),
261
+ ],
262
+ [
263
+ "a ... c d e -> (e c) a",
264
+ {},
265
+ getattr(input, reduction)(axis=(1, 3)).transpose(2, 1, 0).reshape([-1, 2]),
266
+ ],
267
+ [
268
+ "a b c d e ... -> (e c) a",
269
+ {},
270
+ getattr(input, reduction)(axis=(1, 3)).transpose(2, 1, 0).reshape([-1, 2]),
271
+ ],
272
+ ["a b c d e -> (e c a)", {}, getattr(input, reduction)(axis=(1, 3)).transpose(2, 1, 0).reshape([-1])],
273
+ ["(a a2) ... -> (a2 a) ...", dict(a2=1), input],
274
+ ]
275
+ for pattern, axes_lengths, expected_numpy_result in test_cases:
276
+ shapes = [input.shape, [None for _ in input.shape]]
277
+ for shape in shapes:
278
+ sym = backend.create_symbol(shape)
279
+ result_sym = reduce(sym, pattern, reduction=reduction, **axes_lengths)
280
+ result = backend.eval_symbol(result_sym, [(sym, input)])
281
+ assert numpy.allclose(result, expected_numpy_result)
282
+
283
+ if True:
284
+ shape = []
285
+ _axes_lengths = {**axes_lengths}
286
+ for axis, length in zip("abcde", input.shape):
287
+ # filling as much as possible with Nones
288
+ if axis in pattern:
289
+ shape.append(None)
290
+ _axes_lengths[axis] = length
291
+ else:
292
+ shape.append(length)
293
+ sym = backend.create_symbol(shape)
294
+ result_sym = reduce(sym, pattern, reduction=reduction, **_axes_lengths)
295
+ result = backend.eval_symbol(result_sym, [(sym, input)])
296
+ assert numpy.allclose(result, expected_numpy_result)
297
+
298
+
299
+ def test_reduction_stress_imperatives():
300
+ for backend in imp_op_backends:
301
+ print("Stress-testing reduction for ", backend.framework_name)
302
+ for reduction in REDUCTIONS + ("rearrange",):
303
+ dtype = "int64"
304
+ coincide = numpy.array_equal
305
+ if reduction in ["mean", "prod"]:
306
+ dtype = "float64"
307
+ coincide = numpy.allclose
308
+ max_dim = 11
309
+ if "oneflow" in backend.framework_name:
310
+ max_dim = 7
311
+ if "paddle" in backend.framework_name:
312
+ max_dim = 9
313
+ for n_axes in range(max_dim):
314
+ shape = numpy.random.randint(2, 4, size=n_axes)
315
+ permutation = numpy.random.permutation(n_axes)
316
+ skipped = 0 if reduction == "rearrange" else numpy.random.randint(n_axes + 1)
317
+ left = " ".join("x" + str(i) for i in range(n_axes))
318
+ right = " ".join("x" + str(i) for i in permutation[skipped:])
319
+ pattern = left + "->" + right
320
+ x = numpy.arange(1, 1 + numpy.prod(shape), dtype=dtype).reshape(shape)
321
+ if reduction == "prod":
322
+ x /= x.mean() # to avoid overflows
323
+ result1 = reduce(x, pattern, reduction=reduction)
324
+ result2 = x.transpose(permutation)
325
+ if skipped > 0:
326
+ result2 = getattr(result2, reduction)(axis=tuple(range(skipped)))
327
+ assert coincide(result1, result2)
328
+ check_op_against_numpy(backend, x, pattern, reduction=reduction, axes_lengths={}, is_symbolic=False)
329
+
330
+
331
+ def test_reduction_with_callable_imperatives():
332
+ x_numpy = numpy.arange(2 * 3 * 4 * 5 * 6).reshape([2, 3, 4, 5, 6]).astype("float32")
333
+ x_numpy /= x_numpy.max()
334
+
335
+ def logsumexp_torch(x, tuple_of_axes):
336
+ return x.logsumexp(tuple_of_axes)
337
+
338
+ def logsumexp_tf(x, tuple_of_axes):
339
+ import tensorflow as tf
340
+
341
+ return tf.reduce_logsumexp(x, tuple_of_axes)
342
+
343
+ def logsumexp_keras(x, tuple_of_axes):
344
+ import tensorflow.keras.backend as k
345
+
346
+ return k.logsumexp(x, tuple_of_axes)
347
+
348
+ def logsumexp_numpy(x, tuple_of_axes):
349
+ # very naive logsumexp to compare to
350
+ minused = x.max(tuple_of_axes)
351
+ y = x - x.max(tuple_of_axes, keepdims=True)
352
+ y = numpy.exp(y)
353
+ y = numpy.sum(y, axis=tuple_of_axes)
354
+ return numpy.log(y) + minused
355
+
356
+ from einops._backends import TorchBackend, TensorflowBackend, TFKerasBackend, NumpyBackend
357
+
358
+ backend2callback = {
359
+ TorchBackend.framework_name: logsumexp_torch,
360
+ TensorflowBackend.framework_name: logsumexp_tf,
361
+ TFKerasBackend.framework_name: logsumexp_keras,
362
+ NumpyBackend.framework_name: logsumexp_numpy,
363
+ }
364
+
365
+ for backend in imp_op_backends:
366
+ if backend.framework_name not in backend2callback:
367
+ continue
368
+
369
+ backend_callback = backend2callback[backend.framework_name]
370
+
371
+ x_backend = backend.from_numpy(x_numpy)
372
+ for pattern1, pattern2 in equivalent_reduction_patterns:
373
+ print("Test reduction with callable for ", backend.framework_name, pattern1, pattern2)
374
+ output_numpy = reduce(x_numpy, pattern1, reduction=logsumexp_numpy)
375
+ output_backend = reduce(x_backend, pattern1, reduction=backend_callback)
376
+ assert numpy.allclose(
377
+ output_numpy,
378
+ backend.to_numpy(output_backend),
379
+ )
380
+
381
+
382
+ def test_enumerating_directions():
383
+ for backend in imp_op_backends:
384
+ print("testing directions for", backend.framework_name)
385
+ for shape in [[], [1], [1, 1, 1], [2, 3, 5, 7]]:
386
+ x = numpy.arange(numpy.prod(shape)).reshape(shape)
387
+ axes1 = _enumerate_directions(x)
388
+ axes2 = _enumerate_directions(backend.from_numpy(x))
389
+ assert len(axes1) == len(axes2) == len(shape)
390
+ for ax1, ax2 in zip(axes1, axes2):
391
+ ax2 = backend.to_numpy(ax2)
392
+ assert ax1.shape == ax2.shape
393
+ assert numpy.allclose(ax1, ax2)
394
+
395
+
396
+ def test_concatenations_and_stacking():
397
+ for backend in imp_op_backends:
398
+ print("testing shapes for ", backend.framework_name)
399
+ for n_arrays in [1, 2, 5]:
400
+ shapes = [[], [1], [1, 1], [2, 3, 5, 7], [1] * 6]
401
+ for shape in shapes:
402
+ arrays1 = [numpy.arange(i, i + numpy.prod(shape)).reshape(shape) for i in range(n_arrays)]
403
+ arrays2 = [backend.from_numpy(array) for array in arrays1]
404
+ result0 = numpy.asarray(arrays1)
405
+ result1 = rearrange(arrays1, "...->...")
406
+ result2 = rearrange(arrays2, "...->...")
407
+ assert numpy.array_equal(result0, result1)
408
+ assert numpy.array_equal(result1, backend.to_numpy(result2))
409
+
410
+ result1 = rearrange(arrays1, "b ... -> ... b")
411
+ result2 = rearrange(arrays2, "b ... -> ... b")
412
+ assert numpy.array_equal(result1, backend.to_numpy(result2))
413
+
414
+
415
+ def test_gradients_imperatives():
416
+ # lazy - just checking reductions
417
+ for reduction in REDUCTIONS:
418
+ if reduction in ("any", "all"):
419
+ continue # non-differentiable ops
420
+ x = numpy.arange(1, 1 + 2 * 3 * 4).reshape([2, 3, 4]).astype("float32")
421
+ results = {}
422
+ for backend in imp_op_backends:
423
+ y0 = backend.from_numpy(x)
424
+ if not hasattr(y0, "grad"):
425
+ continue
426
+
427
+ y1 = reduce(y0, "a b c -> c a", reduction=reduction)
428
+ y2 = reduce(y1, "c a -> a c", reduction=reduction)
429
+ y3 = reduce(y2, "a (c1 c2) -> a", reduction=reduction, c1=2)
430
+ y4 = reduce(y3, "... -> ", reduction=reduction)
431
+
432
+ y4.backward()
433
+ grad = backend.to_numpy(y0.grad)
434
+ results[backend.framework_name] = grad
435
+
436
+ print("comparing gradients for", results.keys())
437
+ for name1, grad1 in results.items():
438
+ for name2, grad2 in results.items():
439
+ assert numpy.allclose(grad1, grad2), [name1, name2, "provided different gradients"]
440
+
441
+
442
+ def test_tiling_imperatives():
443
+ for backend in imp_op_backends:
444
+ print("Tiling tests for ", backend.framework_name)
445
+ input = numpy.arange(2 * 3 * 5, dtype="int64").reshape([2, 1, 3, 1, 5])
446
+ test_cases = [
447
+ (1, 1, 1, 1, 1),
448
+ (1, 2, 1, 3, 1),
449
+ (3, 1, 1, 4, 1),
450
+ ]
451
+ for repeats in test_cases:
452
+ expected = numpy.tile(input, repeats)
453
+ converted = backend.from_numpy(input)
454
+ repeated = backend.tile(converted, repeats)
455
+ result = backend.to_numpy(repeated)
456
+ assert numpy.array_equal(result, expected)
457
+
458
+
459
+ def test_tiling_symbolic():
460
+ for backend in sym_op_backends:
461
+ print("Tiling tests for ", backend.framework_name)
462
+ input = numpy.arange(2 * 3 * 5, dtype="int64").reshape([2, 1, 3, 1, 5])
463
+ test_cases = [
464
+ (1, 1, 1, 1, 1),
465
+ (1, 2, 1, 3, 1),
466
+ (3, 1, 1, 4, 1),
467
+ ]
468
+ for repeats in test_cases:
469
+ expected = numpy.tile(input, repeats)
470
+ sym = backend.create_symbol(input.shape)
471
+ result = backend.eval_symbol(backend.tile(sym, repeats), [[sym, input]])
472
+ assert numpy.array_equal(result, expected)
473
+
474
+ sym = backend.create_symbol([None] * len(input.shape))
475
+ result = backend.eval_symbol(backend.tile(sym, repeats), [[sym, input]])
476
+ assert numpy.array_equal(result, expected)
477
+
478
+
479
+ repeat_test_cases = [
480
+ # all assume that input has shape [2, 3, 5]
481
+ ("a b c -> c a b", dict()),
482
+ ("a b c -> (c copy a b)", dict(copy=2, a=2, b=3, c=5)),
483
+ ("a b c -> (a copy) b c ", dict(copy=1)),
484
+ ("a b c -> (c a) (copy1 b copy2)", dict(a=2, copy1=1, copy2=2)),
485
+ ("a ... -> a ... copy", dict(copy=4)),
486
+ ("... c -> ... (copy1 c copy2)", dict(copy1=1, copy2=2)),
487
+ ("... -> ... ", dict()),
488
+ (" ... -> copy1 ... copy2 ", dict(copy1=2, copy2=3)),
489
+ ("a b c -> copy1 a copy2 b c () ", dict(copy1=2, copy2=1)),
490
+ ]
491
+
492
+
493
+ def check_reversion(x, repeat_pattern, **sizes):
494
+ """Checks repeat pattern by running reduction"""
495
+ left, right = repeat_pattern.split("->")
496
+ reduce_pattern = right + "->" + left
497
+ repeated = repeat(x, repeat_pattern, **sizes)
498
+ reduced_min = reduce(repeated, reduce_pattern, reduction="min", **sizes)
499
+ reduced_max = reduce(repeated, reduce_pattern, reduction="max", **sizes)
500
+ assert numpy.array_equal(x, reduced_min)
501
+ assert numpy.array_equal(x, reduced_max)
502
+
503
+
504
+ def test_repeat_numpy():
505
+ # check repeat vs reduce. Repeat works ok if reverse reduction with min and max work well
506
+ x = numpy.arange(2 * 3 * 5).reshape([2, 3, 5])
507
+ x1 = repeat(x, "a b c -> copy a b c ", copy=1)
508
+ assert numpy.array_equal(x[None], x1)
509
+ for pattern, axis_dimensions in repeat_test_cases:
510
+ check_reversion(x, pattern, **axis_dimensions)
511
+
512
+
513
+ def test_repeat_imperatives():
514
+ x = numpy.arange(2 * 3 * 5).reshape([2, 3, 5])
515
+ for backend in imp_op_backends:
516
+ print("Repeat tests for ", backend.framework_name)
517
+
518
+ for pattern, axis_dimensions in repeat_test_cases:
519
+ expected = repeat(x, pattern, **axis_dimensions)
520
+ converted = backend.from_numpy(x)
521
+ repeated = repeat(converted, pattern, **axis_dimensions)
522
+ result = backend.to_numpy(repeated)
523
+ assert numpy.array_equal(result, expected)
524
+
525
+
526
+ def test_repeat_symbolic():
527
+ x = numpy.arange(2 * 3 * 5).reshape([2, 3, 5])
528
+
529
+ for backend in sym_op_backends:
530
+ print("Repeat tests for ", backend.framework_name)
531
+
532
+ for pattern, axis_dimensions in repeat_test_cases:
533
+ expected = repeat(x, pattern, **axis_dimensions)
534
+
535
+ sym = backend.create_symbol(x.shape)
536
+ result = backend.eval_symbol(repeat(sym, pattern, **axis_dimensions), [[sym, x]])
537
+ assert numpy.array_equal(result, expected)
538
+
539
+
540
+ def test_repeat_array_api():
541
+ import numpy as xp
542
+ from einops import array_api as AA
543
+
544
+ if xp.__version__ < "2.0.0":
545
+ pytest.skip()
546
+
547
+ x = numpy.arange(2 * 3 * 5).reshape([2, 3, 5])
548
+
549
+ for pattern, axis_dimensions in repeat_test_cases:
550
+ expected = repeat(x, pattern, **axis_dimensions)
551
+
552
+ result = AA.repeat(xp.from_dlpack(x), pattern, **axis_dimensions)
553
+ assert numpy.array_equal(AA.asnumpy(result + 0), expected)
554
+
555
+
556
+ test_cases_repeat_anonymous = [
557
+ # all assume that input has shape [1, 2, 4, 6]
558
+ ("a b c d -> c a d b", dict()),
559
+ ("a b c d -> (c 2 d a b)", dict(a=1, c=4, d=6)),
560
+ ("1 b c d -> (d copy 1) 3 b c ", dict(copy=3)),
561
+ ("1 ... -> 3 ... ", dict()),
562
+ ("() ... d -> 1 (copy1 d copy2) ... ", dict(copy1=2, copy2=3)),
563
+ ("1 b c d -> (1 1) (1 b) 2 c 3 d (1 1)", dict()),
564
+ ]
565
+
566
+
567
+ def test_anonymous_axes():
568
+ x = numpy.arange(1 * 2 * 4 * 6).reshape([1, 2, 4, 6])
569
+ for pattern, axis_dimensions in test_cases_repeat_anonymous:
570
+ check_reversion(x, pattern, **axis_dimensions)
571
+
572
+
573
+ def test_list_inputs():
574
+ x = numpy.arange(2 * 3 * 4 * 5 * 6).reshape([2, 3, 4, 5, 6])
575
+
576
+ assert numpy.array_equal(
577
+ rearrange(list(x), "... -> (...)"),
578
+ rearrange(x, "... -> (...)"),
579
+ )
580
+ assert numpy.array_equal(
581
+ reduce(list(x), "a ... e -> (...)", "min"),
582
+ reduce(x, "a ... e -> (...)", "min"),
583
+ )
584
+ assert numpy.array_equal(
585
+ repeat(list(x), "... -> b (...)", b=3),
586
+ repeat(x, "... -> b (...)", b=3),
587
+ )
588
+
589
+
590
+ def test_torch_compile_with_dynamic_shape():
591
+ if not is_backend_tested("torch"):
592
+ pytest.skip()
593
+ import torch
594
+
595
+ # somewhat reasonable debug messages
596
+ torch._dynamo.config.verbose = True
597
+
598
+ def func1(x):
599
+ # test contains ellipsis
600
+ a, b, c, *other = x.shape
601
+ x = rearrange(x, "(a a2) b c ... -> b (c a2) (a ...)", a2=2)
602
+ # test contains passing expression as axis length
603
+ x = reduce(x, "b ca2 A -> b A", "sum", ca2=c * 2)
604
+ return x
605
+
606
+ # seems can't test static and dynamic in the same test run.
607
+ # func1_compiled_static = torch.compile(func1, dynamic=False, fullgraph=True, backend='aot_eager')
608
+ func1_compiled_dynamic = torch.compile(func1, dynamic=True, fullgraph=True, backend="aot_eager")
609
+
610
+ x = torch.randn(size=[4, 5, 6, 3])
611
+ assert torch.equal(func1_compiled_dynamic(x), func1(x))
612
+ # check with input of different dimensionality, and with all shape elements changed
613
+ x = torch.randn(size=[6, 3, 4, 2, 3])
614
+ assert torch.equal(func1_compiled_dynamic(x), func1(x))
615
+
616
+
617
+ def bit_count(x):
618
+ return sum((x >> i) & 1 for i in range(20))
619
+
620
+
621
+ def test_reduction_imperatives_booleans():
622
+ """Checks that any/all reduction works in all frameworks"""
623
+ x_np = numpy.asarray([(bit_count(x) % 2) == 0 for x in range(2**6)]).reshape([2] * 6)
624
+ for backend in imp_op_backends:
625
+ print("Reduction any/all tests for ", backend.framework_name)
626
+
627
+ for axis in range(6):
628
+ expected_result_any = numpy.any(x_np, axis=axis, keepdims=True)
629
+ expected_result_all = numpy.all(x_np, axis=axis, keepdims=True)
630
+ assert not numpy.array_equal(expected_result_any, expected_result_all)
631
+
632
+ axes = list("abcdef")
633
+ axes_in = list(axes)
634
+ axes_out = list(axes)
635
+ axes_out[axis] = "1"
636
+ pattern = (" ".join(axes_in)) + " -> " + (" ".join(axes_out))
637
+
638
+ res_any = reduce(backend.from_numpy(x_np), pattern, reduction="any")
639
+ res_all = reduce(backend.from_numpy(x_np), pattern, reduction="all")
640
+
641
+ assert numpy.array_equal(expected_result_any, backend.to_numpy(res_any))
642
+ assert numpy.array_equal(expected_result_all, backend.to_numpy(res_all))
643
+
644
+ # expected result: any/all
645
+ expected_result_any = numpy.any(x_np, axis=(0, 1), keepdims=True)
646
+ expected_result_all = numpy.all(x_np, axis=(0, 1), keepdims=True)
647
+ pattern = "a b ... -> 1 1 ..."
648
+ res_any = reduce(backend.from_numpy(x_np), pattern, reduction="any")
649
+ res_all = reduce(backend.from_numpy(x_np), pattern, reduction="all")
650
+ assert numpy.array_equal(expected_result_any, backend.to_numpy(res_any))
651
+ assert numpy.array_equal(expected_result_all, backend.to_numpy(res_all))
tool_server/.venv/lib/python3.12/site-packages/einops/tests/test_other.py ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from doctest import testmod
2
+
3
+ import numpy
4
+ import pytest
5
+
6
+ import einops
7
+ import einops.layers
8
+ import einops.parsing
9
+ from einops._backends import AbstractBackend
10
+ from einops.einops import rearrange, parse_shape, _optimize_transformation
11
+ from einops.tests import collect_test_backends, is_backend_tested
12
+
13
+ __author__ = "Alex Rogozhnikov"
14
+
15
+
16
+ def test_doctests_examples():
17
+ # tests docstrings, additionally
18
+ testmod(einops.layers, raise_on_error=True, extraglobs=dict(np=numpy))
19
+ testmod(einops.einops, raise_on_error=True, extraglobs=dict(np=numpy))
20
+
21
+
22
+ def test_backends_installed():
23
+ """
24
+ This test will fail if some of backends are not installed or can't be imported
25
+ Other tests will just work and only test installed backends.
26
+ """
27
+ from . import parse_backends_to_test
28
+
29
+ backends_to_test = parse_backends_to_test()
30
+ errors = []
31
+ for backend_type in AbstractBackend.__subclasses__():
32
+ if backend_type.framework_name not in backends_to_test:
33
+ continue
34
+ try:
35
+ # instantiate
36
+ backend_type()
37
+ except Exception as e:
38
+ errors.append((backend_type.framework_name, e))
39
+ assert len(errors) == 0, errors
40
+
41
+
42
+ def test_optimize_transformations_numpy():
43
+ print("Testing optimizations")
44
+ shapes = [[2] * n_dimensions for n_dimensions in range(14)]
45
+ shapes += [[3] * n_dimensions for n_dimensions in range(6)]
46
+ shapes += [[2, 3, 5, 7]]
47
+ shapes += [[2, 3, 5, 7, 11, 17]]
48
+
49
+ for shape in shapes:
50
+ for attempt in range(5):
51
+ n_dimensions = len(shape)
52
+ x = numpy.random.randint(0, 2**12, size=shape).reshape([-1])
53
+ init_shape = shape[:]
54
+ n_reduced = numpy.random.randint(0, n_dimensions + 1)
55
+ reduced_axes = tuple(numpy.random.permutation(n_dimensions)[:n_reduced])
56
+ axes_reordering = numpy.random.permutation(n_dimensions - n_reduced)
57
+ final_shape = numpy.random.randint(0, 1024, size=333) # just random
58
+
59
+ init_shape2, reduced_axes2, axes_reordering2, final_shape2 = combination2 = _optimize_transformation(
60
+ init_shape, reduced_axes, axes_reordering, final_shape
61
+ )
62
+
63
+ assert numpy.array_equal(final_shape, final_shape2)
64
+ result1 = x.reshape(init_shape).sum(axis=reduced_axes).transpose(axes_reordering).reshape([-1])
65
+ result2 = x.reshape(init_shape2).sum(axis=reduced_axes2).transpose(axes_reordering2).reshape([-1])
66
+ assert numpy.array_equal(result1, result2)
67
+
68
+ # testing we can't optimize this formula again
69
+ combination3 = _optimize_transformation(*combination2)
70
+ for a, b in zip(combination2, combination3):
71
+ assert numpy.array_equal(a, b)
72
+
73
+
74
+ _IMPERATIVE_BACKENDS = collect_test_backends(symbolic=False, layers=False)
75
+
76
+ x_np = numpy.zeros([10, 20, 30, 40])
77
+
78
+
79
+ def test_parse_shape_imperative():
80
+ for backend in _IMPERATIVE_BACKENDS:
81
+ print("Shape parsing for ", backend.framework_name)
82
+ parsed1 = parse_shape(x_np, "a b c d")
83
+ parsed2 = parse_shape(backend.from_numpy(x_np), "a b c d")
84
+ assert parsed1 == parsed2 == dict(a=10, b=20, c=30, d=40)
85
+ assert parsed1 != dict(a=1, b=20, c=30, d=40) != parsed2
86
+
87
+
88
+ def test_underscore():
89
+ for backend in _IMPERATIVE_BACKENDS:
90
+ parsed1 = parse_shape(x_np, "_ _ _ _")
91
+ parsed2 = parse_shape(backend.from_numpy(x_np), "_ _ _ _")
92
+ assert parsed1 == parsed2 == dict()
93
+
94
+
95
+ def test_underscore_one():
96
+ for backend in _IMPERATIVE_BACKENDS:
97
+ parsed1 = parse_shape(x_np, "_ _ _ hello")
98
+ parsed2 = parse_shape(backend.from_numpy(x_np), "_ _ _ hello")
99
+ assert parsed1 == parsed2 == dict(hello=40)
100
+
101
+
102
+ def test_underscore_several():
103
+ for backend in _IMPERATIVE_BACKENDS:
104
+ parsed1 = parse_shape(x_np, "_ _ a1 a1a111a")
105
+ parsed2 = parse_shape(backend.from_numpy(x_np), "_ _ a1 a1a111a")
106
+ assert parsed1 == parsed2 == dict(a1=30, a1a111a=40)
107
+
108
+
109
+ def test_repeating():
110
+ with pytest.raises(einops.EinopsError):
111
+ parse_shape(x_np, "a a b b")
112
+
113
+ for backend in _IMPERATIVE_BACKENDS:
114
+ with pytest.raises(einops.EinopsError):
115
+ parse_shape(backend.from_numpy(x_np), "a a b b")
116
+
117
+
118
+ def test_ellipsis():
119
+ for backend in _IMPERATIVE_BACKENDS:
120
+ for shape, pattern, expected in [
121
+ ([10, 20], "...", dict()),
122
+ ([10], "... a", dict(a=10)),
123
+ ([10, 20], "... a", dict(a=20)),
124
+ ([10, 20, 30], "... a", dict(a=30)),
125
+ ([10, 20, 30, 40], "... a", dict(a=40)),
126
+ ([10], "a ...", dict(a=10)),
127
+ ([10, 20], "a ...", dict(a=10)),
128
+ ([10, 20, 30], "a ...", dict(a=10)),
129
+ ([10, 20, 30, 40], "a ...", dict(a=10)),
130
+ ([10, 20, 30, 40], " a ... b", dict(a=10, b=40)),
131
+ ([10, 40], " a ... b", dict(a=10, b=40)),
132
+ ]:
133
+ x = numpy.ones(shape)
134
+ parsed1 = parse_shape(x, pattern)
135
+ parsed2 = parse_shape(backend.from_numpy(x), pattern)
136
+ assert parsed1 == parsed2 == expected
137
+
138
+
139
+ def test_parse_with_anonymous_axes():
140
+ for backend in _IMPERATIVE_BACKENDS:
141
+ for shape, pattern, expected in [
142
+ ([1, 2, 3, 4], "1 2 3 a", dict(a=4)),
143
+ ([10, 1, 2], "a 1 2", dict(a=10)),
144
+ ([10, 1, 2], "a () 2", dict(a=10)),
145
+ ]:
146
+ x = numpy.ones(shape)
147
+ parsed1 = parse_shape(x, pattern)
148
+ parsed2 = parse_shape(backend.from_numpy(x), pattern)
149
+ assert parsed1 == parsed2 == expected
150
+
151
+
152
+ def test_failures():
153
+ for backend in _IMPERATIVE_BACKENDS:
154
+ # every test should fail
155
+ for shape, pattern in [
156
+ ([1, 2, 3, 4], "a b c"),
157
+ ([1, 2, 3, 4], "2 a b c"),
158
+ ([1, 2, 3, 4], "a b c ()"),
159
+ ([1, 2, 3, 4], "a b c d e"),
160
+ ([1, 2, 3, 4], "a b c d e ..."),
161
+ ([1, 2, 3, 4], "a b c ()"),
162
+ ]:
163
+ with pytest.raises(RuntimeError):
164
+ x = numpy.ones(shape)
165
+ parse_shape(backend.from_numpy(x), pattern)
166
+
167
+
168
+ _SYMBOLIC_BACKENDS = [
169
+ *collect_test_backends(symbolic=True, layers=False),
170
+ *collect_test_backends(symbolic=True, layers=True),
171
+ ]
172
+
173
+ # tensorflow.keras needs special way to compile,
174
+ # shape vars can be used only inside layers but not as outputs
175
+ _SYMBOLIC_BACKENDS = [backend for backend in _SYMBOLIC_BACKENDS if backend.framework_name != "tensorflow.keras"]
176
+
177
+
178
+ @pytest.mark.parametrize("backend", _SYMBOLIC_BACKENDS)
179
+ def test_parse_shape_symbolic(backend):
180
+ for shape in [
181
+ [10, 20, 30, 40],
182
+ [10, 20, None, None],
183
+ [None, None, None, None],
184
+ ]:
185
+ print(
186
+ f"special shape parsing {backend.framework_name=} {shape=}",
187
+ )
188
+ input_symbol = backend.create_symbol(shape)
189
+
190
+ shape_placeholder = parse_shape(input_symbol, "a b c d")
191
+ shape = {}
192
+ for name, symbol in shape_placeholder.items():
193
+ shape[name] = (
194
+ symbol
195
+ if isinstance(symbol, int)
196
+ else backend.eval_symbol(symbol, [(input_symbol, numpy.zeros([10, 20, 30, 40]))])
197
+ )
198
+ print(shape)
199
+ result_placeholder = rearrange(
200
+ input_symbol, "a b (c1 c2) (d1 d2) -> (a b d1) c1 (c2 d2)", **parse_shape(input_symbol, "a b c1 _"), d2=2
201
+ )
202
+ result = backend.eval_symbol(result_placeholder, [(input_symbol, numpy.zeros([10, 20, 30, 40]))])
203
+ print(result.shape)
204
+ assert result.shape == (10 * 20 * 20, 30, 1 * 2)
205
+ assert numpy.allclose(result, 0)
206
+
207
+
208
+ @pytest.mark.parametrize("backend", _SYMBOLIC_BACKENDS)
209
+ def test_parse_shape_symbolic_ellipsis(backend):
210
+ for static_shape, shape, pattern, expected in [
211
+ ([10, 20], [None, None], "...", dict()),
212
+ ([10], [None], "... a", dict(a=10)),
213
+ ([10, 20], [None, None], "... a", dict(a=20)),
214
+ ([10, 20, 30], [None, None, None], "... a", dict(a=30)),
215
+ ([10, 20, 30, 40], [None, None, None, None], "... a", dict(a=40)),
216
+ ([10], [None], "a ...", dict(a=10)),
217
+ ([10, 20], [None, None], "a ...", dict(a=10)),
218
+ ([10, 20, 30], [None, None, None], "a ...", dict(a=10)),
219
+ ([10, 20, 30, 40], [None, None, None, None], "a ...", dict(a=10)),
220
+ ([10, 20, 30, 40], [None, None, None, None], " a ... b", dict(a=10, b=40)),
221
+ ([10, 40], [None, None], " a ... b ", dict(a=10, b=40)),
222
+ ]:
223
+ input_symbol = backend.create_symbol(shape)
224
+ shape_placeholder = parse_shape(input_symbol, pattern)
225
+ out_shape = {}
226
+ for name, symbol in shape_placeholder.items():
227
+ if isinstance(symbol, int):
228
+ out_shape[name] = symbol
229
+ else:
230
+ out_shape[name] = backend.eval_symbol(symbol, [(input_symbol, numpy.zeros(static_shape))])
231
+ assert out_shape == expected
232
+
233
+
234
+ def test_is_float_type():
235
+ backends = collect_test_backends(symbolic=False, layers=False)
236
+ backends += collect_test_backends(symbolic=False, layers=True)
237
+ for backend in backends:
238
+ for dtype in ["int32", "int64", "float32", "float64"]:
239
+ is_float = "float" in dtype
240
+ input = numpy.zeros([3, 4, 5], dtype=dtype)
241
+ input = backend.from_numpy(input)
242
+ assert backend.is_float_type(input) == is_float, (dtype, backend, input.dtype)
243
+
244
+
245
+ def test_torch_compile():
246
+ """
247
+ Test ensures that allow_ops_in_compiled_graph allows compiling in a single graph
248
+ Additionally we ensure that after compilation cache works properly
249
+ (by changing shapes and patterns)
250
+ We additionally check that pack/unpack still can be handled
251
+ despite variable number of inputs/outputs
252
+ """
253
+ if not is_backend_tested("torch"):
254
+ pytest.skip()
255
+ import torch
256
+ from torch import nn
257
+ from einops import repeat, reduce, pack, unpack, einsum
258
+ from einops._torch_specific import allow_ops_in_compiled_graph
259
+
260
+ allow_ops_in_compiled_graph()
261
+
262
+ class TorchModuleWithOperations(nn.Module):
263
+ def __init__(self) -> None:
264
+ super().__init__()
265
+
266
+ def forward(self, x_abc, suffix=""):
267
+ a, b, c = x_abc.shape
268
+
269
+ def suf(pattern):
270
+ parts = pattern.split()
271
+ return " ".join([p if p[-1] not in "acd" else p + suffix for p in parts])
272
+
273
+ # patterns look a bit strange because names a, c, d will be modified on every run
274
+ # by suf function
275
+ x_abcd = repeat(x_abc, suf("a b c -> a b c 4"))
276
+ x_abc = reduce(x_abcd, suf("a b c d -> a b c"), "min")
277
+ x_abdc, ps = pack([x_abc] * (2 + len(suffix)), suf("a b * c"))
278
+ x_array = unpack(rearrange(x_abdc, suf("a b d c -> (a b ) 1 c d")), ps, "ab one1 c *")
279
+ x1 = x_array[0] + len(x_array)
280
+ x1 = rearrange(x1, suf("(a b ) 1 c -> a b c"), b=b)
281
+ addition = einsum(x_abc, x_abcd, suf("a b c , a b c d -> d"))[0]
282
+ return x1 + addition
283
+
284
+ original = TorchModuleWithOperations()
285
+ compiled = torch.compile(original, fullgraph=True, backend="aot_eager")
286
+ for size in [10, 20, 40]:
287
+ x = torch.rand([size, size + 1, size + 2])
288
+ for suffix in ["", "suf1", "other_suffix"]:
289
+ result1 = compiled(x, suffix)
290
+ result2 = original(x, suffix)
291
+ assert torch.allclose(result1, result2)
tool_server/.venv/lib/python3.12/site-packages/einops/tests/test_packing.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ import typing
3
+
4
+ import numpy as np
5
+ import pytest
6
+
7
+ from einops import EinopsError, asnumpy, pack, unpack
8
+ from einops.tests import collect_test_backends
9
+
10
+
11
+ def pack_unpack(xs, pattern):
12
+ x, ps = pack(xs, pattern)
13
+ unpacked = unpack(xs, ps, pattern)
14
+ assert len(unpacked) == len(xs)
15
+ for a, b in zip(unpacked, xs):
16
+ assert np.allclose(asnumpy(a), asnumpy(b))
17
+
18
+
19
+ def unpack_and_pack(x, ps, pattern: str):
20
+ unpacked = unpack(x, ps, pattern)
21
+ packed, ps2 = pack(unpacked, pattern=pattern)
22
+
23
+ assert np.allclose(asnumpy(packed), asnumpy(x))
24
+ return unpacked
25
+
26
+
27
+ def unpack_and_pack_against_numpy(x, ps, pattern: str):
28
+ capturer_backend = CaptureException()
29
+ capturer_numpy = CaptureException()
30
+
31
+ with capturer_backend:
32
+ unpacked = unpack(x, ps, pattern)
33
+ packed, ps2 = pack(unpacked, pattern=pattern)
34
+
35
+ with capturer_numpy:
36
+ x_np = asnumpy(x)
37
+ unpacked_np = unpack(x_np, ps, pattern)
38
+ packed_np, ps3 = pack(unpacked_np, pattern=pattern)
39
+
40
+ assert type(capturer_numpy.exception) == type(capturer_backend.exception) # noqa E721
41
+ if capturer_numpy.exception is not None:
42
+ # both failed
43
+ return
44
+ else:
45
+ # neither failed, check results are identical
46
+ assert np.allclose(asnumpy(packed), asnumpy(x))
47
+ assert np.allclose(asnumpy(packed_np), asnumpy(x))
48
+ assert len(unpacked) == len(unpacked_np)
49
+ for a, b in zip(unpacked, unpacked_np):
50
+ assert np.allclose(asnumpy(a), b)
51
+
52
+
53
+ class CaptureException:
54
+ def __enter__(self):
55
+ self.exception = None
56
+
57
+ def __exit__(self, exc_type, exc_val, exc_tb):
58
+ self.exception = exc_val
59
+ return True
60
+
61
+
62
+ def test_numpy_trivial(H=13, W=17):
63
+ def rand(*shape):
64
+ return np.random.random(shape)
65
+
66
+ def check(a, b):
67
+ assert a.dtype == b.dtype
68
+ assert a.shape == b.shape
69
+ assert np.all(a == b)
70
+
71
+ r, g, b = rand(3, H, W)
72
+ embeddings = rand(H, W, 32)
73
+
74
+ check(
75
+ np.stack([r, g, b], axis=2),
76
+ pack([r, g, b], "h w *")[0],
77
+ )
78
+ check(
79
+ np.stack([r, g, b], axis=1),
80
+ pack([r, g, b], "h * w")[0],
81
+ )
82
+ check(
83
+ np.stack([r, g, b], axis=0),
84
+ pack([r, g, b], "* h w")[0],
85
+ )
86
+
87
+ check(
88
+ np.concatenate([r, g, b], axis=1),
89
+ pack([r, g, b], "h *")[0],
90
+ )
91
+ check(
92
+ np.concatenate([r, g, b], axis=0),
93
+ pack([r, g, b], "* w")[0],
94
+ )
95
+
96
+ i = np.index_exp[:, :, None]
97
+ check(
98
+ np.concatenate([r[i], g[i], b[i], embeddings], axis=2),
99
+ pack([r, g, b, embeddings], "h w *")[0],
100
+ )
101
+
102
+ with pytest.raises(EinopsError):
103
+ pack([r, g, b, embeddings], "h w nonexisting_axis *")
104
+
105
+ pack([r, g, b], "some_name_for_H some_name_for_w1 *")
106
+
107
+ with pytest.raises(EinopsError):
108
+ pack([r, g, b, embeddings], "h _w *") # no leading underscore
109
+ with pytest.raises(EinopsError):
110
+ pack([r, g, b, embeddings], "h_ w *") # no trailing underscore
111
+ with pytest.raises(EinopsError):
112
+ pack([r, g, b, embeddings], "1h_ w *")
113
+ with pytest.raises(EinopsError):
114
+ pack([r, g, b, embeddings], "1 w *")
115
+ with pytest.raises(EinopsError):
116
+ pack([r, g, b, embeddings], "h h *")
117
+ # capital and non-capital are different
118
+ pack([r, g, b, embeddings], "h H *")
119
+
120
+
121
+ @dataclasses.dataclass
122
+ class UnpackTestCase:
123
+ shape: typing.Tuple[int, ...]
124
+ pattern: str
125
+
126
+ def dim(self):
127
+ return self.pattern.split().index("*")
128
+
129
+ def selfcheck(self):
130
+ assert self.shape[self.dim()] == 5
131
+
132
+
133
+ cases = [
134
+ # NB: in all cases unpacked axis is of length 5.
135
+ # that's actively used in tests below
136
+ UnpackTestCase((5,), "*"),
137
+ UnpackTestCase((5, 7), "* seven"),
138
+ UnpackTestCase((7, 5), "seven *"),
139
+ UnpackTestCase((5, 3, 4), "* three four"),
140
+ UnpackTestCase((4, 5, 3), "four * three"),
141
+ UnpackTestCase((3, 4, 5), "three four *"),
142
+ ]
143
+
144
+
145
+ def test_pack_unpack_with_numpy():
146
+ case: UnpackTestCase
147
+
148
+ for case in cases:
149
+ shape = case.shape
150
+ pattern = case.pattern
151
+
152
+ x = np.random.random(shape)
153
+ # all correct, no minus 1
154
+ unpack_and_pack(x, [[2], [1], [2]], pattern)
155
+ # no -1, asking for wrong shapes
156
+ with pytest.raises(BaseException):
157
+ unpack_and_pack(x, [[2], [1], [2]], pattern + " non_existent_axis")
158
+ with pytest.raises(BaseException):
159
+ unpack_and_pack(x, [[2], [1], [1]], pattern)
160
+ with pytest.raises(BaseException):
161
+ unpack_and_pack(x, [[4], [1], [1]], pattern)
162
+ # all correct, with -1
163
+ unpack_and_pack(x, [[2], [1], [-1]], pattern)
164
+ unpack_and_pack(x, [[2], [-1], [2]], pattern)
165
+ unpack_and_pack(x, [[-1], [1], [2]], pattern)
166
+ _, _, last = unpack_and_pack(x, [[2], [3], [-1]], pattern)
167
+ assert last.shape[case.dim()] == 0
168
+ # asking for more elements than available
169
+ with pytest.raises(BaseException):
170
+ unpack(x, [[2], [4], [-1]], pattern)
171
+ # this one does not raise, because indexing x[2:1] just returns zero elements
172
+ # with pytest.raises(BaseException):
173
+ # unpack(x, [[2], [-1], [4]], pattern)
174
+ with pytest.raises(BaseException):
175
+ unpack(x, [[-1], [1], [5]], pattern)
176
+
177
+ # all correct, -1 nested
178
+ rs = unpack_and_pack(x, [[1, 2], [1, 1], [-1, 1]], pattern)
179
+ assert all(len(r.shape) == len(x.shape) + 1 for r in rs)
180
+ rs = unpack_and_pack(x, [[1, 2], [1, -1], [1, 1]], pattern)
181
+ assert all(len(r.shape) == len(x.shape) + 1 for r in rs)
182
+ rs = unpack_and_pack(x, [[2, -1], [1, 2], [1, 1]], pattern)
183
+ assert all(len(r.shape) == len(x.shape) + 1 for r in rs)
184
+
185
+ # asking for more elements, -1 nested
186
+ with pytest.raises(BaseException):
187
+ unpack(x, [[-1, 2], [1], [5]], pattern)
188
+ with pytest.raises(BaseException):
189
+ unpack(x, [[2, 2], [2], [5, -1]], pattern)
190
+
191
+ # asking for non-divisible number of elements
192
+ with pytest.raises(BaseException):
193
+ unpack(x, [[2, 1], [1], [3, -1]], pattern)
194
+ with pytest.raises(BaseException):
195
+ unpack(x, [[2, 1], [3, -1], [1]], pattern)
196
+ with pytest.raises(BaseException):
197
+ unpack(x, [[3, -1], [2, 1], [1]], pattern)
198
+
199
+ # -1 takes zero
200
+ unpack_and_pack(x, [[0], [5], [-1]], pattern)
201
+ unpack_and_pack(x, [[0], [-1], [5]], pattern)
202
+ unpack_and_pack(x, [[-1], [5], [0]], pattern)
203
+
204
+ # -1 takes zero, -1
205
+ unpack_and_pack(x, [[2, -1], [1, 5]], pattern)
206
+
207
+
208
+ def test_pack_unpack_against_numpy():
209
+ for backend in collect_test_backends(symbolic=False, layers=False):
210
+ print(f"test packing against numpy for {backend.framework_name}")
211
+ check_zero_len = True
212
+
213
+ for case in cases:
214
+ unpack_and_pack = unpack_and_pack_against_numpy
215
+ shape = case.shape
216
+ pattern = case.pattern
217
+
218
+ x = np.random.random(shape)
219
+ x = backend.from_numpy(x)
220
+ # all correct, no minus 1
221
+ unpack_and_pack(x, [[2], [1], [2]], pattern)
222
+ # no -1, asking for wrong shapes
223
+ with pytest.raises(BaseException):
224
+ unpack(x, [[2], [1], [1]], pattern)
225
+
226
+ with pytest.raises(BaseException):
227
+ unpack(x, [[4], [1], [1]], pattern)
228
+ # all correct, with -1
229
+ unpack_and_pack(x, [[2], [1], [-1]], pattern)
230
+ unpack_and_pack(x, [[2], [-1], [2]], pattern)
231
+ unpack_and_pack(x, [[-1], [1], [2]], pattern)
232
+
233
+ # asking for more elements than available
234
+ with pytest.raises(BaseException):
235
+ unpack(x, [[2], [4], [-1]], pattern)
236
+ # this one does not raise, because indexing x[2:1] just returns zero elements
237
+ # with pytest.raises(BaseException):
238
+ # unpack(x, [[2], [-1], [4]], pattern)
239
+ with pytest.raises(BaseException):
240
+ unpack(x, [[-1], [1], [5]], pattern)
241
+
242
+ # all correct, -1 nested
243
+ unpack_and_pack(x, [[1, 2], [1, 1], [-1, 1]], pattern)
244
+ unpack_and_pack(x, [[1, 2], [1, -1], [1, 1]], pattern)
245
+ unpack_and_pack(x, [[2, -1], [1, 2], [1, 1]], pattern)
246
+
247
+ # asking for more elements, -1 nested
248
+ with pytest.raises(BaseException):
249
+ unpack(x, [[-1, 2], [1], [5]], pattern)
250
+ with pytest.raises(BaseException):
251
+ unpack(x, [[2, 2], [2], [5, -1]], pattern)
252
+
253
+ # asking for non-divisible number of elements
254
+ with pytest.raises(BaseException):
255
+ unpack(x, [[2, 1], [1], [3, -1]], pattern)
256
+ with pytest.raises(BaseException):
257
+ unpack(x, [[2, 1], [3, -1], [1]], pattern)
258
+ with pytest.raises(BaseException):
259
+ unpack(x, [[3, -1], [2, 1], [1]], pattern)
260
+
261
+ if check_zero_len:
262
+ # -1 takes zero
263
+ unpack_and_pack(x, [[2], [3], [-1]], pattern)
264
+ unpack_and_pack(x, [[0], [5], [-1]], pattern)
265
+ unpack_and_pack(x, [[0], [-1], [5]], pattern)
266
+ unpack_and_pack(x, [[-1], [5], [0]], pattern)
267
+
268
+ # -1 takes zero, -1
269
+ unpack_and_pack(x, [[2, -1], [1, 5]], pattern)
270
+
271
+
272
+ def test_pack_unpack_array_api():
273
+ from einops import array_api as AA
274
+ import numpy as xp
275
+
276
+ if xp.__version__ < "2.0.0":
277
+ pytest.skip()
278
+
279
+ for case in cases:
280
+ shape = case.shape
281
+ pattern = case.pattern
282
+ x_np = np.random.random(shape)
283
+ x_xp = xp.from_dlpack(x_np)
284
+
285
+ for ps in [
286
+ [[2], [1], [2]],
287
+ [[1], [1], [-1]],
288
+ [[1], [1], [-1, 3]],
289
+ [[2, 1], [1, 1, 1], [-1]],
290
+ ]:
291
+ x_np_split = unpack(x_np, ps, pattern)
292
+ x_xp_split = AA.unpack(x_xp, ps, pattern)
293
+ for a, b in zip(x_np_split, x_xp_split):
294
+ assert np.allclose(a, AA.asnumpy(b + 0))
295
+
296
+ x_agg_np, ps1 = pack(x_np_split, pattern)
297
+ x_agg_xp, ps2 = AA.pack(x_xp_split, pattern)
298
+ assert ps1 == ps2
299
+ assert np.allclose(x_agg_np, AA.asnumpy(x_agg_xp))
300
+
301
+ for ps in [
302
+ [[2, 3]],
303
+ [[1], [5]],
304
+ [[1], [5], [-1]],
305
+ [[1], [2, 3]],
306
+ [[1], [5], [-1, 2]],
307
+ ]:
308
+ with pytest.raises(BaseException):
309
+ unpack(x_np, ps, pattern)
tool_server/.venv/lib/python3.12/site-packages/einops/tests/test_parsing.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ from einops import EinopsError
4
+ from einops.parsing import ParsedExpression, AnonymousAxis, _ellipsis
5
+
6
+ __author__ = "Alex Rogozhnikov"
7
+
8
+
9
+ class AnonymousAxisPlaceholder:
10
+ def __init__(self, value: int):
11
+ self.value = value
12
+ assert isinstance(self.value, int)
13
+
14
+ def __eq__(self, other):
15
+ return isinstance(other, AnonymousAxis) and self.value == other.value
16
+
17
+
18
+ def test_anonymous_axes():
19
+ a, b = AnonymousAxis("2"), AnonymousAxis("2")
20
+ assert a != b
21
+ c, d = AnonymousAxisPlaceholder(2), AnonymousAxisPlaceholder(3)
22
+ assert a == c and b == c
23
+ assert a != d and b != d
24
+ assert [a, 2, b] == [c, 2, c]
25
+
26
+
27
+ def test_elementary_axis_name():
28
+ for name in [
29
+ "a",
30
+ "b",
31
+ "h",
32
+ "dx",
33
+ "h1",
34
+ "zz",
35
+ "i9123",
36
+ "somelongname",
37
+ "Alex",
38
+ "camelCase",
39
+ "u_n_d_e_r_score",
40
+ "unreasonablyLongAxisName",
41
+ ]:
42
+ assert ParsedExpression.check_axis_name(name)
43
+
44
+ for name in ["", "2b", "12", "_startWithUnderscore", "endWithUnderscore_", "_", "...", _ellipsis]:
45
+ assert not ParsedExpression.check_axis_name(name)
46
+
47
+
48
+ def test_invalid_expressions():
49
+ # double ellipsis should raise an error
50
+ ParsedExpression("... a b c d")
51
+ with pytest.raises(EinopsError):
52
+ ParsedExpression("... a b c d ...")
53
+ with pytest.raises(EinopsError):
54
+ ParsedExpression("... a b c (d ...)")
55
+ with pytest.raises(EinopsError):
56
+ ParsedExpression("(... a) b c (d ...)")
57
+
58
+ # double/missing/enclosed parenthesis
59
+ ParsedExpression("(a) b c (d ...)")
60
+ with pytest.raises(EinopsError):
61
+ ParsedExpression("(a)) b c (d ...)")
62
+ with pytest.raises(EinopsError):
63
+ ParsedExpression("(a b c (d ...)")
64
+ with pytest.raises(EinopsError):
65
+ ParsedExpression("(a) (()) b c (d ...)")
66
+ with pytest.raises(EinopsError):
67
+ ParsedExpression("(a) ((b c) (d ...))")
68
+
69
+ # invalid identifiers
70
+ ParsedExpression("camelCase under_scored cApiTaLs ß ...")
71
+ with pytest.raises(EinopsError):
72
+ ParsedExpression("1a")
73
+ with pytest.raises(EinopsError):
74
+ ParsedExpression("_pre")
75
+ with pytest.raises(EinopsError):
76
+ ParsedExpression("...pre")
77
+ with pytest.raises(EinopsError):
78
+ ParsedExpression("pre...")
79
+
80
+
81
+ def test_parse_expression():
82
+ parsed = ParsedExpression("a1 b1 c1 d1")
83
+ assert parsed.identifiers == {"a1", "b1", "c1", "d1"}
84
+ assert parsed.composition == [["a1"], ["b1"], ["c1"], ["d1"]]
85
+ assert not parsed.has_non_unitary_anonymous_axes
86
+ assert not parsed.has_ellipsis
87
+
88
+ parsed = ParsedExpression("() () () ()")
89
+ assert parsed.identifiers == set()
90
+ assert parsed.composition == [[], [], [], []]
91
+ assert not parsed.has_non_unitary_anonymous_axes
92
+ assert not parsed.has_ellipsis
93
+
94
+ parsed = ParsedExpression("1 1 1 ()")
95
+ assert parsed.identifiers == set()
96
+ assert parsed.composition == [[], [], [], []]
97
+ assert not parsed.has_non_unitary_anonymous_axes
98
+ assert not parsed.has_ellipsis
99
+
100
+ aap = AnonymousAxisPlaceholder
101
+
102
+ parsed = ParsedExpression("5 (3 4)")
103
+ assert len(parsed.identifiers) == 3 and {i.value for i in parsed.identifiers} == {3, 4, 5}
104
+ assert parsed.composition == [[aap(5)], [aap(3), aap(4)]]
105
+ assert parsed.has_non_unitary_anonymous_axes
106
+ assert not parsed.has_ellipsis
107
+
108
+ parsed = ParsedExpression("5 1 (1 4) 1")
109
+ assert len(parsed.identifiers) == 2 and {i.value for i in parsed.identifiers} == {4, 5}
110
+ assert parsed.composition == [[aap(5)], [], [aap(4)], []]
111
+
112
+ parsed = ParsedExpression("name1 ... a1 12 (name2 14)")
113
+ assert len(parsed.identifiers) == 6
114
+ assert parsed.identifiers.difference({"name1", _ellipsis, "a1", "name2"}).__len__() == 2
115
+ assert parsed.composition == [["name1"], _ellipsis, ["a1"], [aap(12)], ["name2", aap(14)]]
116
+ assert parsed.has_non_unitary_anonymous_axes
117
+ assert parsed.has_ellipsis
118
+ assert not parsed.has_ellipsis_parenthesized
119
+
120
+ parsed = ParsedExpression("(name1 ... a1 12) name2 14")
121
+ assert len(parsed.identifiers) == 6
122
+ assert parsed.identifiers.difference({"name1", _ellipsis, "a1", "name2"}).__len__() == 2
123
+ assert parsed.composition == [["name1", _ellipsis, "a1", aap(12)], ["name2"], [aap(14)]]
124
+ assert parsed.has_non_unitary_anonymous_axes
125
+ assert parsed.has_ellipsis
126
+ assert parsed.has_ellipsis_parenthesized