ZTWHHH commited on
Commit
5e39937
·
verified ·
1 Parent(s): 8e05000

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. evalkit_cambrian/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl.xz +3 -0
  3. evalkit_cambrian/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_02.npy +3 -0
  4. evalkit_cambrian/lib/python3.10/site-packages/torch/nn/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  5. evalkit_cambrian/lib/python3.10/site-packages/torch/nn/modules/__pycache__/activation.cpython-310.pyc +0 -0
  6. evalkit_cambrian/lib/python3.10/site-packages/torch/nn/modules/__pycache__/batchnorm.cpython-310.pyc +0 -0
  7. evalkit_cambrian/lib/python3.10/site-packages/torch/nn/modules/__pycache__/container.cpython-310.pyc +0 -0
  8. evalkit_cambrian/lib/python3.10/site-packages/torch/nn/modules/__pycache__/distance.cpython-310.pyc +0 -0
  9. evalkit_cambrian/lib/python3.10/site-packages/torch/nn/modules/__pycache__/flatten.cpython-310.pyc +0 -0
  10. evalkit_cambrian/lib/python3.10/site-packages/torch/nn/modules/__pycache__/fold.cpython-310.pyc +0 -0
  11. evalkit_cambrian/lib/python3.10/site-packages/torch/nn/modules/__pycache__/loss.cpython-310.pyc +0 -0
  12. evalkit_cambrian/lib/python3.10/site-packages/torch/nn/modules/__pycache__/utils.cpython-310.pyc +0 -0
  13. evalkit_cambrian/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__init__.py +9 -0
  14. evalkit_cambrian/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/conv_expanded_weights.py +52 -0
  15. evalkit_cambrian/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/conv_utils.py +240 -0
  16. evalkit_cambrian/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/embedding_expanded_weights.py +54 -0
  17. evalkit_cambrian/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/layer_norm_expanded_weights.py +59 -0
  18. evalkit_cambrian/lib/python3.10/site-packages/torch/nn/utils/_per_sample_grad.py +102 -0
  19. evalkit_cambrian/lib/python3.10/site-packages/torch/nn/utils/fusion.py +138 -0
  20. evalkit_cambrian/lib/python3.10/site-packages/torch/nn/utils/parametrizations.py +571 -0
  21. evalkit_cambrian/lib/python3.10/site-packages/torch/nn/utils/parametrize.py +758 -0
  22. evalkit_cambrian/lib/python3.10/site-packages/torch/nn/utils/prune.py +1379 -0
  23. evalkit_cambrian/lib/python3.10/site-packages/torch/nn/utils/rnn.py +517 -0
  24. evalkit_cambrian/lib/python3.10/site-packages/torch/nn/utils/stateless.py +263 -0
  25. infer_4_47_1/lib/python3.10/site-packages/scipy/optimize/__pycache__/_optimize.cpython-310.pyc +3 -0
  26. infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/__init__.cpython-310.pyc +0 -0
  27. infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_expm_multiply.cpython-310.pyc +0 -0
  28. infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_interface.cpython-310.pyc +0 -0
  29. infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_matfuncs.cpython-310.pyc +0 -0
  30. infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_norm.cpython-310.pyc +0 -0
  31. infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_onenormest.cpython-310.pyc +0 -0
  32. infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_special_sparse_arrays.cpython-310.pyc +0 -0
  33. infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_svdp.cpython-310.pyc +0 -0
  34. infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/dsolve.cpython-310.pyc +0 -0
  35. infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/eigen.cpython-310.pyc +0 -0
  36. infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/interface.cpython-310.pyc +0 -0
  37. infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/isolve.cpython-310.pyc +0 -0
  38. infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/matfuncs.cpython-310.pyc +0 -0
  39. infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/__pycache__/__init__.cpython-310.pyc +0 -0
  40. infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/__pycache__/_svds_doc.cpython-310.pyc +0 -0
  41. infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/_arpack.cpython-310-x86_64-linux-gnu.so +3 -0
  42. infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/tests/__pycache__/test_svds.cpython-310.pyc +0 -0
  43. infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/_gcrotmk.cpython-310.pyc +0 -0
  44. infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/iterative.cpython-310.pyc +0 -0
  45. infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/lgmres.cpython-310.pyc +0 -0
  46. infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/lsqr.cpython-310.pyc +0 -0
  47. infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/minres.cpython-310.pyc +0 -0
  48. infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/tfqmr.cpython-310.pyc +0 -0
  49. infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/lsqr.py +589 -0
  50. infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/__init__.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -916,3 +916,6 @@ infer_4_47_1/lib/python3.10/site-packages/numpy/core/_multiarray_tests.cpython-3
916
  infer_4_47_1/lib/python3.10/site-packages/scipy/optimize/_highspy/_core.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
917
  infer_4_47_1/lib/python3.10/site-packages/scipy/optimize/_highspy/libhighs.a filter=lfs diff=lfs merge=lfs -text
918
  infer_4_47_1/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_basic.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
 
916
  infer_4_47_1/lib/python3.10/site-packages/scipy/optimize/_highspy/_core.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
917
  infer_4_47_1/lib/python3.10/site-packages/scipy/optimize/_highspy/libhighs.a filter=lfs diff=lfs merge=lfs -text
918
  infer_4_47_1/lib/python3.10/site-packages/scipy/special/tests/__pycache__/test_basic.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
919
+ infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/_arpack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
920
+ infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/_propack/_dpropack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
921
+ infer_4_47_1/lib/python3.10/site-packages/scipy/optimize/__pycache__/_optimize.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
evalkit_cambrian/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_pickle_py27_np17.pkl.xz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efb146d450c6d061d06affb56f17384e7f64cbab9b516fcc6c4d3f8869b3e707
3
+ size 712
evalkit_cambrian/lib/python3.10/site-packages/joblib/test/data/joblib_0.9.2_pickle_py35_np19.pkl_02.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c1cf36cb781fbcc21b953bb0a0b45df092da0eae0e765882e5963ccd70105b1
3
+ size 120
evalkit_cambrian/lib/python3.10/site-packages/torch/nn/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (5.07 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/nn/modules/__pycache__/activation.cpython-310.pyc ADDED
Binary file (54.3 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/nn/modules/__pycache__/batchnorm.cpython-310.pyc ADDED
Binary file (31.8 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/nn/modules/__pycache__/container.cpython-310.pyc ADDED
Binary file (34.2 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/nn/modules/__pycache__/distance.cpython-310.pyc ADDED
Binary file (4.07 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/nn/modules/__pycache__/flatten.cpython-310.pyc ADDED
Binary file (5.89 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/nn/modules/__pycache__/fold.cpython-310.pyc ADDED
Binary file (13.1 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/nn/modules/__pycache__/loss.cpython-310.pyc ADDED
Binary file (92.7 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/nn/modules/__pycache__/utils.cpython-310.pyc ADDED
Binary file (2.72 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from .conv_expanded_weights import ConvPerSampleGrad
2
+ from .embedding_expanded_weights import EmbeddingPerSampleGrad
3
+ from .group_norm_expanded_weights import GroupNormPerSampleGrad
4
+ from .instance_norm_expanded_weights import InstanceNormPerSampleGrad
5
+ from .layer_norm_expanded_weights import LayerNormPerSampleGrad
6
+ from .linear_expanded_weights import LinearPerSampleGrad
7
+ from .expanded_weights_impl import ExpandedWeight
8
+
9
+ __all__ = ['ExpandedWeight']
evalkit_cambrian/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/conv_expanded_weights.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+
4
+ from .conv_utils import conv_backward, conv_args_and_kwargs, conv_picker, conv_input_for_string_padding
5
+ from .expanded_weights_impl import ExpandedWeight, implements_per_sample_grads
6
+ from .expanded_weights_utils import forward_helper
7
+
8
+ @implements_per_sample_grads(F.conv1d)
9
+ @implements_per_sample_grads(F.conv2d)
10
+ @implements_per_sample_grads(F.conv3d)
11
+ class ConvPerSampleGrad(torch.autograd.Function):
12
+ @staticmethod
13
+ def forward(ctx, kwarg_names, conv_fn, *expanded_args_and_kwargs):
14
+ expanded_args, expanded_kwargs = conv_args_and_kwargs(kwarg_names, expanded_args_and_kwargs)
15
+ orig_input = expanded_args[0]
16
+ was_same_padding = expanded_kwargs['padding'] == "same"
17
+
18
+ if isinstance(expanded_kwargs['padding'], str):
19
+ # if padding is a string, we'll do the necessary padding (slowly) using F.pad
20
+ kernel_size = expanded_args[1].shape[2:]
21
+ padding, dilation = expanded_kwargs['padding'], expanded_kwargs['dilation']
22
+ input = conv_input_for_string_padding(conv_fn, padding, expanded_args[0], dilation, kernel_size)
23
+ expanded_args = (input, expanded_args[1])
24
+ # since we've already done the padding, don't need any more
25
+ expanded_kwargs['padding'] = 0
26
+
27
+ output = forward_helper(conv_fn, expanded_args, expanded_kwargs)
28
+ input, weight = expanded_args
29
+ batched_dim_size = conv_picker(conv_fn, 3, 4, 5)
30
+ if input.dim() != batched_dim_size:
31
+ raise RuntimeError(f"Expanded Weights only support convolution with batched input, got {conv_fn} with an"
32
+ f"unbatched input of dim {input.dim()}, expected input of dim {batched_dim_size}")
33
+
34
+ ctx.conv_fn = conv_fn
35
+
36
+ ctx.batch_size = orig_input.shape[0]
37
+ ctx.input_required_grad = orig_input.requires_grad
38
+ ctx.orig_input_shape = orig_input.shape
39
+ ctx.was_same_padding = was_same_padding
40
+ ctx.stride, ctx.padding = expanded_kwargs['stride'], expanded_kwargs['padding']
41
+ ctx.dilation, ctx.groups = expanded_kwargs['dilation'], expanded_kwargs['groups']
42
+
43
+ if isinstance(weight, ExpandedWeight):
44
+ ctx.input = input
45
+ ctx.weight = weight
46
+ ctx.bias = expanded_kwargs['bias']
47
+
48
+ return output
49
+
50
+ @staticmethod
51
+ def backward(ctx, grad_output):
52
+ return conv_backward(ctx.conv_fn, ctx, grad_output)
evalkit_cambrian/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/conv_utils.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+
4
+ import numpy as np
5
+ from typing import List, Optional
6
+
7
+ from .expanded_weights_utils import \
8
+ set_grad_sample_if_exists, unpack_expanded_weight_or_tensor
9
+
10
+ THRESHOLD = 32
11
+
12
+
13
+ def conv_picker(func, conv1dOpt, conv2dOpt, conv3dOpt):
14
+ if func == F.conv1d:
15
+ return conv1dOpt
16
+ if func == F.conv2d:
17
+ return conv2dOpt
18
+ else:
19
+ assert func == F.conv3d
20
+ return conv3dOpt
21
+
22
+
23
+ def conv_args_and_kwargs(kwarg_names, expanded_args_and_kwargs):
24
+ args = expanded_args_and_kwargs[:len(expanded_args_and_kwargs) - len(kwarg_names)]
25
+ kwargs = expanded_args_and_kwargs[len(expanded_args_and_kwargs) - len(kwarg_names):]
26
+ kwargs = dict(zip(kwarg_names, kwargs))
27
+
28
+ return conv_normalizer(*args, **kwargs)
29
+
30
+
31
+ def conv_normalizer(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
32
+ return (input, weight), {'bias': bias, 'stride': stride, 'padding': padding, 'dilation': dilation, 'groups': groups}
33
+
34
+
35
+ def conv_input_for_string_padding(func, padding_style, input, dilation, kernel_size):
36
+ if padding_style == "valid":
37
+ return input
38
+ else:
39
+ padding = int_padding_for_string_padding(func, padding_style, dilation, kernel_size)
40
+ return F.pad(input, padding)
41
+
42
+
43
+ def int_padding_for_string_padding(func, padding_style, dilation, kernel_size):
44
+ def get_dilation(i):
45
+ return dilation[i] if isinstance(dilation, tuple) else dilation
46
+
47
+ if padding_style == "same":
48
+ padding: List[int] = []
49
+ # F.pad needs the padding in reverse order from what conv expects
50
+ for i in range(conv_picker(func, 0, 1, 2), -1, -1):
51
+ padding += conv_padding_for_same(get_dilation(i), kernel_size[i])
52
+ return padding
53
+ elif padding_style == "valid":
54
+ return conv_picker(func, 2, 4, 6) * (0,)
55
+ else:
56
+ raise RuntimeError(f"got padding type of {padding_style}, only accept 'same' or 'valid'")
57
+
58
+
59
+ def conv_padding_for_same(dilation, kernel_size):
60
+ total_pad = dilation * (kernel_size - 1)
61
+ left_pad = total_pad // 2
62
+ right_pad = total_pad - left_pad
63
+ return left_pad, right_pad
64
+
65
+
66
+ def conv_backward(func, ctx, grad_output):
67
+
68
+ def weight_grad_sample(weight):
69
+ if (batch_size < THRESHOLD and groups == 1):
70
+ return conv_group_weight_grad_sample(ctx.input, grad_output, weight_shape, stride, padding, dilation, batch_size, func)
71
+ else:
72
+ return conv_unfold_weight_grad_sample(ctx.input, grad_output, weight_shape, kernel_size,
73
+ stride, padding, dilation, groups, func)
74
+
75
+ def expand(param):
76
+ if isinstance(param, int):
77
+ return conv_picker(func, (param,), (param, param), (param, param, param))
78
+ else:
79
+ return param
80
+
81
+ def calc_total_padding(func, was_same, padding, dilation, kernel_size):
82
+ if was_same:
83
+ all_padding = int_padding_for_string_padding(func, "same", dilation, kernel_size)
84
+ # F.pad needs the padding in reverse order from what conv expects
85
+ total_padding = tuple(all_padding[i] + all_padding[i - 1] for i in range(len(all_padding) - 1, -1, -2))
86
+ return total_padding
87
+ else:
88
+ return tuple(2 * pad for pad in padding)
89
+
90
+ weight_shape = ctx.weight.shape
91
+ stride, padding, dilation, groups = expand(ctx.stride), expand(ctx.padding), expand(ctx.dilation), ctx.groups
92
+
93
+ kernel_size = []
94
+ for i in range(2, conv_picker(func, 3, 4, 5)):
95
+ kernel_size.append(weight_shape[i])
96
+
97
+ batch_size = ctx.batch_size
98
+ results: List[Optional[torch.Tensor]] = []
99
+ results.append(None) # for kwarg names
100
+ results.append(None) # for op reference
101
+
102
+ # "same" padding may give uneven padding on either side so we need to separate the "padding" attr and total padding
103
+ total_padding = calc_total_padding(func, ctx.was_same_padding, padding, dilation, kernel_size)
104
+
105
+ if ctx.input_required_grad:
106
+ output_padding = []
107
+ input_dims = conv_picker(func, 1, 2, 3)
108
+ for i in range(input_dims):
109
+ input_dim = ctx.orig_input_shape[2 + i]
110
+ output_padding.append((total_padding[i] + input_dim - (kernel_size[i] * dilation[i] - dilation[i] + 1)) % stride[i])
111
+ weight_ = unpack_expanded_weight_or_tensor(ctx.weight)
112
+ transpose_func = conv_picker(func, F.conv_transpose1d, F.conv_transpose2d, F.conv_transpose3d)
113
+ out = transpose_func(grad_output, weight_, None, stride, padding, tuple(output_padding), groups, dilation)
114
+
115
+ if ctx.was_same_padding:
116
+ for i in range(len(total_padding)):
117
+ out = torch.narrow(out, 2 + i, total_padding[i] // 2, ctx.orig_input_shape[2 + i])
118
+
119
+ results.append(out)
120
+ else:
121
+ results.append(None)
122
+ # weight and bias don't compute batched gradients; no other arguments are differentiable
123
+ results = results + [None] * 6
124
+
125
+ # set grad_sample field for weight and bias with per sample gradients
126
+ set_grad_sample_if_exists(ctx.weight, weight_grad_sample)
127
+ set_grad_sample_if_exists(ctx.bias, lambda _: grad_output.reshape(*grad_output.shape[:2], -1).sum(dim=2))
128
+ return tuple(results)
129
+
130
+
131
+ def conv_unfold_weight_grad_sample(input, grad_output, weight_shape, kernel_size, stride, padding, dilation, groups, func):
132
+ n = input.shape[0]
133
+ in_channels = input.shape[1]
134
+
135
+ unfold_func = conv_picker(
136
+ func,
137
+ lambda: F.unfold(input.unsqueeze(-2),
138
+ kernel_size=(1, kernel_size[0]),
139
+ dilation=(1, dilation[0]),
140
+ padding=(0, padding[0]),
141
+ stride=(1, stride[0])),
142
+ lambda: F.unfold(input, kernel_size, dilation=dilation, padding=padding, stride=stride),
143
+ lambda: unfold3d(input, kernel_size, padding, stride, dilation)
144
+ )
145
+
146
+ input = unfold_func()
147
+ grad_output = grad_output.reshape(n, -1, input.shape[-1])
148
+
149
+ # n=batch_sz; o=num_out_channels; p=(num_in_channels/groups)*kernel_sz
150
+ weight_grad_sample = torch.einsum("noq,npq->nop", grad_output, input)
151
+ # rearrange the above tensor and extract diagonals.
152
+ weight_grad_sample = weight_grad_sample.view(
153
+ n,
154
+ groups,
155
+ -1,
156
+ groups,
157
+ int(in_channels / groups),
158
+ np.prod(kernel_size),
159
+ )
160
+ weight_grad_sample = torch.einsum("ngrg...->ngr...", weight_grad_sample).contiguous()
161
+ shape = [n] + list(weight_shape)
162
+ weight_grad_sample = weight_grad_sample.view(shape)
163
+ return weight_grad_sample
164
+
165
+
166
+ def conv_group_weight_grad_sample(input, grad_output, weight_shape, stride, padding, dilation, batch_size, func):
167
+ I = input.shape[1]
168
+ O = grad_output.shape[1]
169
+
170
+ input_ = input.transpose(0, 1)
171
+ grad_output_ = grad_output.view(grad_output.shape[0] * grad_output.shape[1], 1, *grad_output.shape[2:])
172
+
173
+ weight_grad_sample = func(input_, grad_output_, None, stride=dilation, padding=padding, dilation=stride, groups=batch_size)
174
+ input_dims = conv_picker(func, 3, 4, 5)
175
+ for i in range(2, input_dims):
176
+ weight_grad_sample = weight_grad_sample.narrow(i, 0, weight_shape[i])
177
+ weight_grad_sample = weight_grad_sample.view(I, batch_size, O, *weight_grad_sample.shape[2:])
178
+ weight_grad_sample = weight_grad_sample.movedim(0, 2)
179
+ return weight_grad_sample
180
+
181
+
182
+ def unfold3d(
183
+ tensor,
184
+ kernel_size,
185
+ padding,
186
+ stride,
187
+ dilation,
188
+ ):
189
+ r"""
190
+ Extract sliding local blocks from an batched input tensor.
191
+
192
+ :class:`torch.nn.Unfold` only supports 4D inputs (batched image-like tensors).
193
+ This method implements the same action for 5D inputs
194
+ Args:
195
+ tensor: An input tensor of shape ``(B, C, D, H, W)``.
196
+ kernel_size: the size of the sliding blocks
197
+ padding: implicit zero padding to be added on both sides of input
198
+ stride: the stride of the sliding blocks in the input spatial dimensions
199
+ dilation: the spacing between the kernel points.
200
+ Returns:
201
+ A tensor of shape ``(B, C * np.prod(kernel_size), L)``, where L - output spatial dimensions.
202
+ See :class:`torch.nn.Unfold` for more details
203
+ Example:
204
+ >>> # xdoctest: +SKIP
205
+ >>> B, C, D, H, W = 3, 4, 5, 6, 7
206
+ >>> tensor = torch.arange(1, B * C * D * H * W + 1.).view(B, C, D, H, W)
207
+ >>> unfold3d(tensor, kernel_size=2, padding=0, stride=1).shape
208
+ torch.Size([3, 32, 120])
209
+ """
210
+ if len(tensor.shape) != 5:
211
+ raise ValueError(
212
+ f"Input tensor must be of the shape [B, C, D, H, W]. Got{tensor.shape}"
213
+ )
214
+
215
+ if dilation != (1, 1, 1):
216
+ raise NotImplementedError(f"dilation={dilation} not supported.")
217
+
218
+ batch_size, channels, _, _, _ = tensor.shape
219
+
220
+ # Input shape: (B, C, D, H, W)
221
+ tensor = F.pad(
222
+ tensor, (padding[2], padding[2], padding[1], padding[1], padding[0], padding[0])
223
+ )
224
+ # Output shape: (B, C, D+2*padding[2], H+2*padding[1], W+2*padding[0])
225
+
226
+ tensor = tensor.unfold(dimension=2, size=kernel_size[0], step=stride[0])
227
+ tensor = tensor.unfold(dimension=3, size=kernel_size[1], step=stride[1])
228
+ tensor = tensor.unfold(dimension=4, size=kernel_size[2], step=stride[2])
229
+ # Output shape: (B, C, D_out, H_out, W_out, kernel_size[0], kernel_size[1], kernel_size[2])
230
+ # For D_out, H_out, W_out definitions see :class:`torch.nn.Unfold`
231
+
232
+ tensor = tensor.permute(0, 2, 3, 4, 1, 5, 6, 7)
233
+ # Output shape: (B, D_out, H_out, W_out, C, kernel_size[0], kernel_size[1], kernel_size[2])
234
+
235
+ tensor = tensor.reshape(batch_size, -1, channels * np.prod(kernel_size)).transpose(
236
+ 1, 2
237
+ )
238
+ # Output shape: (B, D_out * H_out * W_out, C * kernel_size[0] * kernel_size[1] * kernel_size[2]
239
+
240
+ return tensor
evalkit_cambrian/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/embedding_expanded_weights.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn.functional as F
3
+ from .expanded_weights_impl import implements_per_sample_grads
4
+ from .expanded_weights_utils import standard_kwargs, forward_helper, set_grad_sample_if_exists
5
+
6
+ from typing import List, Optional
7
+
8
+ @implements_per_sample_grads(F.embedding)
9
+ class EmbeddingPerSampleGrad(torch.autograd.Function):
10
+ @staticmethod
11
+ def forward(ctx, kwarg_names, _, *expanded_args_and_kwargs):
12
+ expanded_args, expanded_kwargs = standard_kwargs(kwarg_names, expanded_args_and_kwargs)
13
+ if len(expanded_args[0].shape) == 1:
14
+ raise RuntimeError(f"Expanded Weights needs an input with a batch size, got a 1D tensor, {expanded_args[0]}")
15
+ output = forward_helper(F.embedding, expanded_args, expanded_kwargs)
16
+ ctx.input, ctx.weight = expanded_args
17
+ ctx.padding_idx, ctx.scale_grad_by_freq = expanded_kwargs['padding_idx'], expanded_kwargs['scale_grad_by_freq']
18
+ ctx.sparse = expanded_kwargs['sparse']
19
+ return output
20
+
21
+ @staticmethod
22
+ def backward(ctx, grad_output):
23
+ input, weight = ctx.input, ctx.weight
24
+ padding_idx, scale_grad_by_freq, sparse = ctx.padding_idx, ctx.scale_grad_by_freq, ctx.sparse
25
+
26
+ def weight_per_sample_grad(weight):
27
+ batch_size = input.shape[0]
28
+ embedding_dim = weight.shape[1]
29
+ index = (
30
+ input.unsqueeze(-1)
31
+ .expand(*input.shape, embedding_dim)
32
+ .reshape(batch_size, -1, embedding_dim)
33
+ )
34
+ grad_sample = torch.zeros(
35
+ batch_size, *weight.shape, device=weight.device, dtype=grad_output.dtype
36
+ )
37
+ return grad_sample.scatter_add_(1, index, grad_output.reshape(batch_size, -1, embedding_dim))
38
+
39
+ results: List[Optional[torch.Tensor]] = []
40
+ results.append(None) # for kwarg names
41
+ results.append(None) # for op reference
42
+
43
+ if input.requires_grad:
44
+ bw_fn = torch.ops.aten.embedding_backward
45
+ results.append(bw_fn(grad_output, input, weight.shape[0], padding_idx, scale_grad_by_freq, sparse))
46
+ else:
47
+ results.append(None)
48
+
49
+ # weight doesn't compute batched gradients; no other arguments are differentiable (2 not saved from forward)
50
+ results = results + [None] * 6
51
+
52
+ # set grad_sample field for weight with per sample gradients
53
+ set_grad_sample_if_exists(weight, weight_per_sample_grad)
54
+ return tuple(results)
evalkit_cambrian/lib/python3.10/site-packages/torch/nn/utils/_expanded_weights/layer_norm_expanded_weights.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ import torch.nn.functional as F
4
+ from .expanded_weights_impl import ExpandedWeight, implements_per_sample_grads
5
+ from .expanded_weights_utils import forward_helper, set_grad_sample_if_exists, \
6
+ standard_kwargs, sum_over_all_but_batch_and_last_n, unpack_expanded_weight_or_tensor
7
+ from typing import List, Optional
8
+
9
+ @implements_per_sample_grads(F.layer_norm)
10
+ class LayerNormPerSampleGrad(torch.autograd.Function):
11
+ @staticmethod
12
+ def forward(ctx, kwarg_names, _, *expanded_args_and_kwargs):
13
+ expanded_args, expanded_kwargs = standard_kwargs(kwarg_names, expanded_args_and_kwargs)
14
+ input = expanded_args[0]
15
+ normalized_shape = expanded_args[1]
16
+ if len(input.shape) <= len(normalized_shape):
17
+ raise RuntimeError("Expanded Weights: Layer norm should not normalize over batch dimension for per sample gradient"
18
+ f"computations but got that normalized shape, {normalized_shape}, matched input shape.")
19
+ output, mean, rstd = forward_helper(torch.native_layer_norm, expanded_args, expanded_kwargs)
20
+ ctx.args = expanded_args
21
+
22
+ if input.requires_grad or isinstance(expanded_kwargs['weight'], ExpandedWeight):
23
+ ctx.weight = expanded_kwargs['weight']
24
+ if input.requires_grad or isinstance(expanded_kwargs['bias'], ExpandedWeight):
25
+ ctx.bias = expanded_kwargs['bias']
26
+ ctx.eps = expanded_kwargs['eps']
27
+ ctx.mean, ctx.rstd = mean, rstd
28
+ return output
29
+
30
+
31
+ @staticmethod
32
+ def backward(ctx, grad_output):
33
+
34
+ def weight_per_sample_grad(weight):
35
+ return sum_over_all_but_batch_and_last_n(F.layer_norm(input, normalized_shape, eps=ctx.eps) * grad_output, weight.dim())
36
+
37
+ input, normalized_shape = ctx.args
38
+ mean, rstd = ctx.mean, ctx.rstd
39
+
40
+ results: List[Optional[torch.Tensor]] = []
41
+ results.append(None) # for kwarg names
42
+ results.append(None) # for op reference
43
+ if input.requires_grad:
44
+ weight_ = unpack_expanded_weight_or_tensor(ctx.weight)
45
+ bias_ = unpack_expanded_weight_or_tensor(ctx.bias)
46
+ results.append(torch.ops.aten.native_layer_norm_backward(
47
+ grad_output, input, normalized_shape, mean, rstd, weight_, bias_, (True, False, False))[0])
48
+ else:
49
+ results.append(None)
50
+
51
+ # weight and bias don't compute batched gradients; no other arguments are differentiable
52
+ results = results + [None] * 4
53
+
54
+ # set grad_sample field for weight and bias with per sample gradients
55
+ if hasattr(ctx, "weight"):
56
+ set_grad_sample_if_exists(ctx.weight, weight_per_sample_grad)
57
+ if hasattr(ctx, "bias"):
58
+ set_grad_sample_if_exists(ctx.bias, lambda bias: sum_over_all_but_batch_and_last_n(grad_output, bias.dim()))
59
+ return tuple(results)
evalkit_cambrian/lib/python3.10/site-packages/torch/nn/utils/_per_sample_grad.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+
3
+ import torch
4
+ from torch.nn.utils._expanded_weights.expanded_weights_impl import ExpandedWeight
5
+
6
+ from torch.utils import _pytree as pytree
7
+
8
+
9
+ # dependency on `functional_call` means that this can't be exposed in utils
10
+ # without creating circular dependency
11
+ def call_for_per_sample_grads(module, *, batch_size=None, loss_reduction="sum", batch_first=True):
12
+ r"""
13
+ Return a forward function for a module, populating grad_sample with per sample gradients on backward invocation.
14
+
15
+ Args:
16
+ module: The ``nn.Module`` to get per sample gradients with respect to. All trainable
17
+ parameters will compute per sample gradients, located in a ``grad_sample``
18
+ field when ``backward`` is invoked
19
+ batch_size: The batch size of the input. If None is passed, all tensor arguments in args and kwargs must have
20
+ the same batch size, which is the size of the first dimension. Otherwise, it must be passed manually.
21
+ Default: None
22
+ loss_reduction: Indicates if the loss reduction (for aggregating the gradients) is a sum or a mean operation. If
23
+ "mean", per sample gradients will be scaled by the batch size to offset the crossbatch interaction from
24
+ running mean across a batch. Must be "mean" or "sum". Default: "sum"
25
+ batch_first: Indicates if the batch dimension is the first dimension. If True, the batch dimension is the first
26
+ dimension. If False, it's the second dimension. Default: True.
27
+
28
+ Examples::
29
+ >>> # xdoctest: +SKIP
30
+ >>> model = nn.Linear(4, 3)
31
+ >>> batched_input = torch.randn(5, 4) # batch size of 5
32
+ >>> res = call_for_per_sample_grads(model)(batched_input).sum()
33
+ >>> res.backward()
34
+ >>> assert model.weight.shape == (3, 4)
35
+ >>> assert model.weight.grad_sample.shape == (5, 3, 4)
36
+ >>> assert model.weight.grad is None
37
+ >>> assert model.bias.shape == (3,)
38
+ >>> assert model.bias.grad_sample.shape == (5, 3)
39
+ >>> assert model.bias.grad is None
40
+
41
+ An example using "mean" loss reduction. The grad_sample fields will be scaled by batch_size from what they would be
42
+ if we ran the same code with loss_reduction="sum". This is because the mean at the end will scale all
43
+ grad_outputs by 1 / batch_size from cross batch interaction.
44
+ >>> model = nn.Linear(4, 3)
45
+ >>> batched_input = torch.randn(5, 4) # batch size of 5
46
+ >>> res = call_for_per_sample_grads(model, 5, loss_reduction="mean")(batched_input).mean()
47
+ >>> res.backward()
48
+
49
+ Note::
50
+ Does not work with any `nn.RNN`, including `nn.GRU` or `nn.LSTM`. Please use custom
51
+ rewrites that wrap an `nn.Linear` module. See Opacus for an example
52
+ """
53
+
54
+ def maybe_build_expanded_weight(og_tensor, batch_size):
55
+ if og_tensor.requires_grad:
56
+ return ExpandedWeight(og_tensor, batch_size, loss_reduction)
57
+ else:
58
+ return og_tensor
59
+
60
+ def compute_batch_size(*args, **kwargs):
61
+ args_and_kwargs = pytree.arg_tree_leaves(*args, **kwargs)
62
+ batch_size = None
63
+ for arg in args_and_kwargs:
64
+ if not isinstance(arg, torch.Tensor):
65
+ continue
66
+
67
+ arg_batch_size = arg.shape[0] if batch_first else arg.shape[1]
68
+ if batch_size is not None and batch_size != arg_batch_size:
69
+ raise RuntimeError("When computing batch size, found at least one input with batch size "
70
+ f"{batch_size} and one with batch size {arg_batch_size}. Please specify it "
71
+ "explicitly using the batch size kwarg in call_for_per_sample_grads")
72
+ batch_size = arg_batch_size
73
+ if batch_size is None:
74
+ raise RuntimeError("Unable to find a tensor in the passed args and kwargs. They may not be pytree-able "
75
+ "and so ExpandedWeights cannot compute the batch size from the inputs. Please specify "
76
+ "it explicitly")
77
+ return batch_size
78
+
79
+ if loss_reduction not in ["sum", "mean"]:
80
+ raise RuntimeError(f"Expected loss_reduction argument to be sum or mean, got {loss_reduction}")
81
+
82
+ if not isinstance(module, torch.nn.Module):
83
+ raise RuntimeError(f"Module passed must be nn.Module, got {type(module).__name__}")
84
+ if not (batch_size is None or isinstance(batch_size, int)):
85
+ raise RuntimeError(f"Batch size passed must be None or an integer, got {type(batch_size).__name__}")
86
+ if batch_size is not None and batch_size < 1:
87
+ raise RuntimeError(f"Batch size must be positive, got {batch_size}")
88
+ for weight in module.parameters():
89
+ if hasattr(weight, "grad_sample") and weight.grad_sample is not None: # type: ignore[attr-defined]
90
+ raise RuntimeError("Current Expanded Weights accumulates the gradients, which will be incorrect for multiple "
91
+ f"calls without clearing gradients. Please clear out the grad_sample parameter of {weight} or "
92
+ "post an issue to pytorch/pytorch to prioritize correct behavior")
93
+
94
+ @functools.wraps(module.forward)
95
+ def wrapper(*args, **kwargs):
96
+ wrapper_batch_size = batch_size
97
+ if wrapper_batch_size is None:
98
+ wrapper_batch_size = compute_batch_size(*args, **kwargs)
99
+
100
+ params = {name: maybe_build_expanded_weight(value, wrapper_batch_size) for (name, value) in module.named_parameters()}
101
+ return torch.func.functional_call(module, params, args, kwargs)
102
+ return wrapper
evalkit_cambrian/lib/python3.10/site-packages/torch/nn/utils/fusion.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import copy
4
+ from typing import Optional, Tuple, TypeVar
5
+
6
+ import torch
7
+
8
+ __all__ = ['fuse_conv_bn_eval', 'fuse_conv_bn_weights', 'fuse_linear_bn_eval', 'fuse_linear_bn_weights']
9
+
10
+ ConvT = TypeVar("ConvT", bound="torch.nn.modules.conv._ConvNd")
11
+ LinearT = TypeVar("LinearT", bound="torch.nn.Linear")
12
+
13
+ def fuse_conv_bn_eval(conv: ConvT, bn: torch.nn.modules.batchnorm._BatchNorm, transpose: bool = False) -> ConvT:
14
+ r"""Fuse a convolutional module and a BatchNorm module into a single, new convolutional module.
15
+
16
+ Args:
17
+ conv (torch.nn.modules.conv._ConvNd): A convolutional module.
18
+ bn (torch.nn.modules.batchnorm._BatchNorm): A BatchNorm module.
19
+ transpose (bool, optional): If True, transpose the convolutional weight. Defaults to False.
20
+
21
+ Returns:
22
+ torch.nn.modules.conv._ConvNd: The fused convolutional module.
23
+
24
+ .. note::
25
+ Both ``conv`` and ``bn`` must be in eval mode, and ``bn`` must have its running buffers computed.
26
+ """
27
+ assert not (conv.training or bn.training), "Fusion only for eval!"
28
+ fused_conv = copy.deepcopy(conv)
29
+
30
+ assert bn.running_mean is not None and bn.running_var is not None
31
+ fused_conv.weight, fused_conv.bias = fuse_conv_bn_weights(
32
+ fused_conv.weight, fused_conv.bias,
33
+ bn.running_mean, bn.running_var, bn.eps, bn.weight, bn.bias, transpose)
34
+
35
+ return fused_conv
36
+
37
+ def fuse_conv_bn_weights(
38
+ conv_w: torch.Tensor,
39
+ conv_b: Optional[torch.Tensor],
40
+ bn_rm: torch.Tensor,
41
+ bn_rv: torch.Tensor,
42
+ bn_eps: float,
43
+ bn_w: Optional[torch.Tensor],
44
+ bn_b: Optional[torch.Tensor],
45
+ transpose: bool = False
46
+ ) -> Tuple[torch.nn.Parameter, torch.nn.Parameter]:
47
+ r"""Fuse convolutional module parameters and BatchNorm module parameters into new convolutional module parameters.
48
+
49
+ Args:
50
+ conv_w (torch.Tensor): Convolutional weight.
51
+ conv_b (Optional[torch.Tensor]): Convolutional bias.
52
+ bn_rm (torch.Tensor): BatchNorm running mean.
53
+ bn_rv (torch.Tensor): BatchNorm running variance.
54
+ bn_eps (float): BatchNorm epsilon.
55
+ bn_w (Optional[torch.Tensor]): BatchNorm weight.
56
+ bn_b (Optional[torch.Tensor]): BatchNorm bias.
57
+ transpose (bool, optional): If True, transpose the conv weight. Defaults to False.
58
+
59
+ Returns:
60
+ Tuple[torch.nn.Parameter, torch.nn.Parameter]: Fused convolutional weight and bias.
61
+ """
62
+ conv_weight_dtype = conv_w.dtype
63
+ conv_bias_dtype = conv_b.dtype if conv_b is not None else conv_weight_dtype
64
+ if conv_b is None:
65
+ conv_b = torch.zeros_like(bn_rm)
66
+ if bn_w is None:
67
+ bn_w = torch.ones_like(bn_rm)
68
+ if bn_b is None:
69
+ bn_b = torch.zeros_like(bn_rm)
70
+ bn_var_rsqrt = torch.rsqrt(bn_rv + bn_eps)
71
+
72
+ if transpose:
73
+ shape = [1, -1] + [1] * (len(conv_w.shape) - 2)
74
+ else:
75
+ shape = [-1, 1] + [1] * (len(conv_w.shape) - 2)
76
+
77
+ fused_conv_w = (conv_w * (bn_w * bn_var_rsqrt).reshape(shape)).to(dtype=conv_weight_dtype)
78
+ fused_conv_b = ((conv_b - bn_rm) * bn_var_rsqrt * bn_w + bn_b).to(dtype=conv_bias_dtype)
79
+
80
+ return (
81
+ torch.nn.Parameter(fused_conv_w, conv_w.requires_grad), torch.nn.Parameter(fused_conv_b, conv_b.requires_grad)
82
+ )
83
+
84
+ def fuse_linear_bn_eval(linear: LinearT, bn: torch.nn.modules.batchnorm._BatchNorm) -> LinearT:
85
+ r"""Fuse a linear module and a BatchNorm module into a single, new linear module.
86
+
87
+ Args:
88
+ linear (torch.nn.Linear): A Linear module.
89
+ bn (torch.nn.modules.batchnorm._BatchNorm): A BatchNorm module.
90
+
91
+ Returns:
92
+ torch.nn.Linear: The fused linear module.
93
+
94
+ .. note::
95
+ Both ``linear`` and ``bn`` must be in eval mode, and ``bn`` must have its running buffers computed.
96
+ """
97
+ assert not (linear.training or bn.training), "Fusion only for eval!"
98
+ fused_linear = copy.deepcopy(linear)
99
+
100
+ assert bn.running_mean is not None and bn.running_var is not None
101
+ fused_linear.weight, fused_linear.bias = fuse_linear_bn_weights(
102
+ fused_linear.weight, fused_linear.bias,
103
+ bn.running_mean, bn.running_var, bn.eps, bn.weight, bn.bias)
104
+
105
+ return fused_linear
106
+
107
+ def fuse_linear_bn_weights(
108
+ linear_w: torch.Tensor,
109
+ linear_b: Optional[torch.Tensor],
110
+ bn_rm: torch.Tensor,
111
+ bn_rv: torch.Tensor,
112
+ bn_eps: float,
113
+ bn_w: torch.Tensor,
114
+ bn_b: torch.Tensor,
115
+ ) -> Tuple[torch.nn.Parameter, torch.nn.Parameter]:
116
+ r"""Fuse linear module parameters and BatchNorm module parameters into new linear module parameters.
117
+
118
+ Args:
119
+ linear_w (torch.Tensor): Linear weight.
120
+ linear_b (Optional[torch.Tensor]): Linear bias.
121
+ bn_rm (torch.Tensor): BatchNorm running mean.
122
+ bn_rv (torch.Tensor): BatchNorm running variance.
123
+ bn_eps (float): BatchNorm epsilon.
124
+ bn_w (torch.Tensor): BatchNorm weight.
125
+ bn_b (torch.Tensor): BatchNorm bias.
126
+ transpose (bool, optional): If True, transpose the conv weight. Defaults to False.
127
+
128
+ Returns:
129
+ Tuple[torch.nn.Parameter, torch.nn.Parameter]: Fused linear weight and bias.
130
+ """
131
+ if linear_b is None:
132
+ linear_b = torch.zeros_like(bn_rm)
133
+ bn_scale = bn_w * torch.rsqrt(bn_rv + bn_eps)
134
+
135
+ fused_w = linear_w * bn_scale.unsqueeze(-1)
136
+ fused_b = (linear_b - bn_rm) * bn_scale + bn_b
137
+
138
+ return torch.nn.Parameter(fused_w, linear_w.requires_grad), torch.nn.Parameter(fused_b, linear_b.requires_grad)
evalkit_cambrian/lib/python3.10/site-packages/torch/nn/utils/parametrizations.py ADDED
@@ -0,0 +1,571 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum, auto
2
+
3
+ import torch
4
+ from torch import Tensor
5
+ from ..utils import parametrize
6
+ from ..modules import Module
7
+ from .. import functional as F
8
+
9
+ from typing import Optional
10
+
11
+ __all__ = ['orthogonal', 'spectral_norm', 'weight_norm']
12
+
13
+
14
+ def _is_orthogonal(Q, eps=None):
15
+ n, k = Q.size(-2), Q.size(-1)
16
+ Id = torch.eye(k, dtype=Q.dtype, device=Q.device)
17
+ # A reasonable eps, but not too large
18
+ eps = 10. * n * torch.finfo(Q.dtype).eps
19
+ return torch.allclose(Q.mH @ Q, Id, atol=eps)
20
+
21
+
22
+ def _make_orthogonal(A):
23
+ """Assume that A is a tall matrix.
24
+
25
+ Compute the Q factor s.t. A = QR (A may be complex) and diag(R) is real and non-negative.
26
+ """
27
+ X, tau = torch.geqrf(A)
28
+ Q = torch.linalg.householder_product(X, tau)
29
+ # The diagonal of X is the diagonal of R (which is always real) so we normalise by its signs
30
+ Q *= X.diagonal(dim1=-2, dim2=-1).sgn().unsqueeze(-2)
31
+ return Q
32
+
33
+
34
+ class _OrthMaps(Enum):
35
+ matrix_exp = auto()
36
+ cayley = auto()
37
+ householder = auto()
38
+
39
+
40
+ class _Orthogonal(Module):
41
+ base: Tensor
42
+
43
+ def __init__(self,
44
+ weight,
45
+ orthogonal_map: _OrthMaps,
46
+ *,
47
+ use_trivialization=True) -> None:
48
+ super().__init__()
49
+
50
+ # Note [Householder complex]
51
+ # For complex tensors, it is not possible to compute the tensor `tau` necessary for
52
+ # linalg.householder_product from the reflectors.
53
+ # To see this, note that the reflectors have a shape like:
54
+ # 0 0 0
55
+ # * 0 0
56
+ # * * 0
57
+ # which, for complex matrices, give n(n-1) (real) parameters. Now, you need n^2 parameters
58
+ # to parametrize the unitary matrices. Saving tau on its own does not work either, because
59
+ # not every combination of `(A, tau)` gives a unitary matrix, meaning that if we optimise
60
+ # them as independent tensors we would not maintain the constraint
61
+ # An equivalent reasoning holds for rectangular matrices
62
+ if weight.is_complex() and orthogonal_map == _OrthMaps.householder:
63
+ raise ValueError("The householder parametrization does not support complex tensors.")
64
+
65
+ self.shape = weight.shape
66
+ self.orthogonal_map = orthogonal_map
67
+ if use_trivialization:
68
+ self.register_buffer("base", None)
69
+
70
+ def forward(self, X: torch.Tensor) -> torch.Tensor:
71
+ n, k = X.size(-2), X.size(-1)
72
+ transposed = n < k
73
+ if transposed:
74
+ X = X.mT
75
+ n, k = k, n
76
+ # Here n > k and X is a tall matrix
77
+ if self.orthogonal_map == _OrthMaps.matrix_exp or self.orthogonal_map == _OrthMaps.cayley:
78
+ # We just need n x k - k(k-1)/2 parameters
79
+ X = X.tril()
80
+ if n != k:
81
+ # Embed into a square matrix
82
+ X = torch.cat([X, X.new_zeros(n, n - k).expand(*X.shape[:-2], -1, -1)], dim=-1)
83
+ A = X - X.mH
84
+ # A is skew-symmetric (or skew-hermitian)
85
+ if self.orthogonal_map == _OrthMaps.matrix_exp:
86
+ Q = torch.matrix_exp(A)
87
+ elif self.orthogonal_map == _OrthMaps.cayley:
88
+ # Computes the Cayley retraction (I+A/2)(I-A/2)^{-1}
89
+ Id = torch.eye(n, dtype=A.dtype, device=A.device)
90
+ Q = torch.linalg.solve(torch.add(Id, A, alpha=-0.5), torch.add(Id, A, alpha=0.5))
91
+ # Q is now orthogonal (or unitary) of size (..., n, n)
92
+ if n != k:
93
+ Q = Q[..., :k]
94
+ # Q is now the size of the X (albeit perhaps transposed)
95
+ else:
96
+ # X is real here, as we do not support householder with complex numbers
97
+ A = X.tril(diagonal=-1)
98
+ tau = 2. / (1. + (A * A).sum(dim=-2))
99
+ Q = torch.linalg.householder_product(A, tau)
100
+ # The diagonal of X is 1's and -1's
101
+ # We do not want to differentiate through this or update the diagonal of X hence the casting
102
+ Q = Q * X.diagonal(dim1=-2, dim2=-1).int().unsqueeze(-2)
103
+
104
+ if hasattr(self, "base"):
105
+ Q = self.base @ Q
106
+ if transposed:
107
+ Q = Q.mT
108
+ return Q
109
+
110
+ @torch.autograd.no_grad()
111
+ def right_inverse(self, Q: torch.Tensor) -> torch.Tensor:
112
+ if Q.shape != self.shape:
113
+ raise ValueError(f"Expected a matrix or batch of matrices of shape {self.shape}. "
114
+ f"Got a tensor of shape {Q.shape}.")
115
+
116
+ Q_init = Q
117
+ n, k = Q.size(-2), Q.size(-1)
118
+ transpose = n < k
119
+ if transpose:
120
+ Q = Q.mT
121
+ n, k = k, n
122
+
123
+ # We always make sure to always copy Q in every path
124
+ if not hasattr(self, "base"):
125
+ # Note [right_inverse expm cayley]
126
+ # If we do not have use_trivialization=True, we just implement the inverse of the forward
127
+ # map for the Householder. To see why, think that for the Cayley map,
128
+ # we would need to find the matrix X \in R^{n x k} such that:
129
+ # Y = torch.cat([X.tril(), X.new_zeros(n, n - k).expand(*X.shape[:-2], -1, -1)], dim=-1)
130
+ # A = Y - Y.mH
131
+ # cayley(A)[:, :k]
132
+ # gives the original tensor. It is not clear how to do this.
133
+ # Perhaps via some algebraic manipulation involving the QR like that of
134
+ # Corollary 2.2 in Edelman, Arias and Smith?
135
+ if self.orthogonal_map == _OrthMaps.cayley or self.orthogonal_map == _OrthMaps.matrix_exp:
136
+ raise NotImplementedError("It is not possible to assign to the matrix exponential "
137
+ "or the Cayley parametrizations when use_trivialization=False.")
138
+
139
+ # If parametrization == _OrthMaps.householder, make Q orthogonal via the QR decomposition.
140
+ # Here Q is always real because we do not support householder and complex matrices.
141
+ # See note [Householder complex]
142
+ A, tau = torch.geqrf(Q)
143
+ # We want to have a decomposition X = QR with diag(R) > 0, as otherwise we could
144
+ # decompose an orthogonal matrix Q as Q = (-Q)@(-Id), which is a valid QR decomposition
145
+ # The diagonal of Q is the diagonal of R from the qr decomposition
146
+ A.diagonal(dim1=-2, dim2=-1).sign_()
147
+ # Equality with zero is ok because LAPACK returns exactly zero when it does not want
148
+ # to use a particular reflection
149
+ A.diagonal(dim1=-2, dim2=-1)[tau == 0.] *= -1
150
+ return A.mT if transpose else A
151
+ else:
152
+ if n == k:
153
+ # We check whether Q is orthogonal
154
+ if not _is_orthogonal(Q):
155
+ Q = _make_orthogonal(Q)
156
+ else: # Is orthogonal
157
+ Q = Q.clone()
158
+ else:
159
+ # Complete Q into a full n x n orthogonal matrix
160
+ N = torch.randn(*(Q.size()[:-2] + (n, n - k)), dtype=Q.dtype, device=Q.device)
161
+ Q = torch.cat([Q, N], dim=-1)
162
+ Q = _make_orthogonal(Q)
163
+ self.base = Q
164
+
165
+ # It is necessary to return the -Id, as we use the diagonal for the
166
+ # Householder parametrization. Using -Id makes:
167
+ # householder(torch.zeros(m,n)) == torch.eye(m,n)
168
+ # Poor man's version of eye_like
169
+ neg_Id = torch.zeros_like(Q_init)
170
+ neg_Id.diagonal(dim1=-2, dim2=-1).fill_(-1.)
171
+ return neg_Id
172
+
173
+
174
+ def orthogonal(module: Module,
175
+ name: str = 'weight',
176
+ orthogonal_map: Optional[str] = None,
177
+ *,
178
+ use_trivialization: bool = True) -> Module:
179
+ r"""Apply an orthogonal or unitary parametrization to a matrix or a batch of matrices.
180
+
181
+ Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`, the parametrized
182
+ matrix :math:`Q \in \mathbb{K}^{m \times n}` is **orthogonal** as
183
+
184
+ .. math::
185
+
186
+ \begin{align*}
187
+ Q^{\text{H}}Q &= \mathrm{I}_n \mathrlap{\qquad \text{if }m \geq n}\\
188
+ QQ^{\text{H}} &= \mathrm{I}_m \mathrlap{\qquad \text{if }m < n}
189
+ \end{align*}
190
+
191
+ where :math:`Q^{\text{H}}` is the conjugate transpose when :math:`Q` is complex
192
+ and the transpose when :math:`Q` is real-valued, and
193
+ :math:`\mathrm{I}_n` is the `n`-dimensional identity matrix.
194
+ In plain words, :math:`Q` will have orthonormal columns whenever :math:`m \geq n`
195
+ and orthonormal rows otherwise.
196
+
197
+ If the tensor has more than two dimensions, we consider it as a batch of matrices of shape `(..., m, n)`.
198
+
199
+ The matrix :math:`Q` may be parametrized via three different ``orthogonal_map`` in terms of the original tensor:
200
+
201
+ - ``"matrix_exp"``/``"cayley"``:
202
+ the :func:`~torch.matrix_exp` :math:`Q = \exp(A)` and the `Cayley map`_
203
+ :math:`Q = (\mathrm{I}_n + A/2)(\mathrm{I}_n - A/2)^{-1}` are applied to a skew-symmetric
204
+ :math:`A` to give an orthogonal matrix.
205
+ - ``"householder"``: computes a product of Householder reflectors
206
+ (:func:`~torch.linalg.householder_product`).
207
+
208
+ ``"matrix_exp"``/``"cayley"`` often make the parametrized weight converge faster than
209
+ ``"householder"``, but they are slower to compute for very thin or very wide matrices.
210
+
211
+ If ``use_trivialization=True`` (default), the parametrization implements the "Dynamic Trivialization Framework",
212
+ where an extra matrix :math:`B \in \mathbb{K}^{n \times n}` is stored under
213
+ ``module.parametrizations.weight[0].base``. This helps the
214
+ convergence of the parametrized layer at the expense of some extra memory use.
215
+ See `Trivializations for Gradient-Based Optimization on Manifolds`_ .
216
+
217
+ Initial value of :math:`Q`:
218
+ If the original tensor is not parametrized and ``use_trivialization=True`` (default), the initial value
219
+ of :math:`Q` is that of the original tensor if it is orthogonal (or unitary in the complex case)
220
+ and it is orthogonalized via the QR decomposition otherwise (see :func:`torch.linalg.qr`).
221
+ Same happens when it is not parametrized and ``orthogonal_map="householder"`` even when ``use_trivialization=False``.
222
+ Otherwise, the initial value is the result of the composition of all the registered
223
+ parametrizations applied to the original tensor.
224
+
225
+ .. note::
226
+ This function is implemented using the parametrization functionality
227
+ in :func:`~torch.nn.utils.parametrize.register_parametrization`.
228
+
229
+
230
+ .. _`Cayley map`: https://en.wikipedia.org/wiki/Cayley_transform#Matrix_map
231
+ .. _`Trivializations for Gradient-Based Optimization on Manifolds`: https://arxiv.org/abs/1909.09501
232
+
233
+ Args:
234
+ module (nn.Module): module on which to register the parametrization.
235
+ name (str, optional): name of the tensor to make orthogonal. Default: ``"weight"``.
236
+ orthogonal_map (str, optional): One of the following: ``"matrix_exp"``, ``"cayley"``, ``"householder"``.
237
+ Default: ``"matrix_exp"`` if the matrix is square or complex, ``"householder"`` otherwise.
238
+ use_trivialization (bool, optional): whether to use the dynamic trivialization framework.
239
+ Default: ``True``.
240
+
241
+ Returns:
242
+ The original module with an orthogonal parametrization registered to the specified
243
+ weight
244
+
245
+ Example::
246
+
247
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK)
248
+ >>> orth_linear = orthogonal(nn.Linear(20, 40))
249
+ >>> orth_linear
250
+ ParametrizedLinear(
251
+ in_features=20, out_features=40, bias=True
252
+ (parametrizations): ModuleDict(
253
+ (weight): ParametrizationList(
254
+ (0): _Orthogonal()
255
+ )
256
+ )
257
+ )
258
+ >>> # xdoctest: +IGNORE_WANT
259
+ >>> Q = orth_linear.weight
260
+ >>> torch.dist(Q.T @ Q, torch.eye(20))
261
+ tensor(4.9332e-07)
262
+ """
263
+ weight = getattr(module, name, None)
264
+ if not isinstance(weight, Tensor):
265
+ raise ValueError(
266
+ f"Module '{module}' has no parameter or buffer with name '{name}'"
267
+ )
268
+
269
+ # We could implement this for 1-dim tensors as the maps on the sphere
270
+ # but I believe it'd bite more people than it'd help
271
+ if weight.ndim < 2:
272
+ raise ValueError("Expected a matrix or batch of matrices. "
273
+ f"Got a tensor of {weight.ndim} dimensions.")
274
+
275
+ if orthogonal_map is None:
276
+ orthogonal_map = "matrix_exp" if weight.size(-2) == weight.size(-1) or weight.is_complex() else "householder"
277
+
278
+ orth_enum = getattr(_OrthMaps, orthogonal_map, None)
279
+ if orth_enum is None:
280
+ raise ValueError('orthogonal_map has to be one of "matrix_exp", "cayley", "householder". '
281
+ f'Got: {orthogonal_map}')
282
+ orth = _Orthogonal(weight,
283
+ orth_enum,
284
+ use_trivialization=use_trivialization)
285
+ parametrize.register_parametrization(module, name, orth, unsafe=True)
286
+ return module
287
+
288
+
289
+ class _WeightNorm(Module):
290
+ def __init__(
291
+ self,
292
+ dim: Optional[int] = 0,
293
+ ) -> None:
294
+ super().__init__()
295
+ if dim is None:
296
+ dim = -1
297
+ self.dim = dim
298
+
299
+ def forward(self, weight_g, weight_v):
300
+ return torch._weight_norm(weight_v, weight_g, self.dim)
301
+
302
+ def right_inverse(self, weight):
303
+ weight_g = torch.norm_except_dim(weight, 2, self.dim)
304
+ weight_v = weight
305
+
306
+ return weight_g, weight_v
307
+
308
+
309
+ def weight_norm(module: Module, name: str = 'weight', dim: int = 0):
310
+ r"""Apply weight normalization to a parameter in the given module.
311
+
312
+ .. math::
313
+ \mathbf{w} = g \dfrac{\mathbf{v}}{\|\mathbf{v}\|}
314
+
315
+ Weight normalization is a reparameterization that decouples the magnitude
316
+ of a weight tensor from its direction. This replaces the parameter specified
317
+ by :attr:`name` with two parameters: one specifying the magnitude
318
+ and one specifying the direction.
319
+
320
+ By default, with ``dim=0``, the norm is computed independently per output
321
+ channel/plane. To compute a norm over the entire weight tensor, use
322
+ ``dim=None``.
323
+
324
+ See https://arxiv.org/abs/1602.07868
325
+
326
+ Args:
327
+ module (Module): containing module
328
+ name (str, optional): name of weight parameter
329
+ dim (int, optional): dimension over which to compute the norm
330
+
331
+ Returns:
332
+ The original module with the weight norm hook
333
+
334
+ Example::
335
+
336
+ >>> m = weight_norm(nn.Linear(20, 40), name='weight')
337
+ >>> m
338
+ ParametrizedLinear(
339
+ in_features=20, out_features=40, bias=True
340
+ (parametrizations): ModuleDict(
341
+ (weight): ParametrizationList(
342
+ (0): _WeightNorm()
343
+ )
344
+ )
345
+ )
346
+ >>> m.parametrizations.weight.original0.size()
347
+ torch.Size([40, 1])
348
+ >>> m.parametrizations.weight.original1.size()
349
+ torch.Size([40, 20])
350
+
351
+ """
352
+ _weight_norm = _WeightNorm(dim)
353
+ parametrize.register_parametrization(module, name, _weight_norm, unsafe=True)
354
+
355
+ def _weight_norm_compat_hook(state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
356
+ g_key = f"{prefix}{name}_g"
357
+ v_key = f"{prefix}{name}_v"
358
+ if g_key in state_dict and v_key in state_dict:
359
+ original0 = state_dict.pop(g_key)
360
+ original1 = state_dict.pop(v_key)
361
+ state_dict[f"{prefix}parametrizations.{name}.original0"] = original0
362
+ state_dict[f"{prefix}parametrizations.{name}.original1"] = original1
363
+ module._register_load_state_dict_pre_hook(_weight_norm_compat_hook)
364
+ return module
365
+
366
+
367
+ class _SpectralNorm(Module):
368
+ def __init__(
369
+ self,
370
+ weight: torch.Tensor,
371
+ n_power_iterations: int = 1,
372
+ dim: int = 0,
373
+ eps: float = 1e-12
374
+ ) -> None:
375
+ super().__init__()
376
+ ndim = weight.ndim
377
+ if dim >= ndim or dim < -ndim:
378
+ raise IndexError("Dimension out of range (expected to be in range of "
379
+ f"[-{ndim}, {ndim - 1}] but got {dim})")
380
+
381
+ if n_power_iterations <= 0:
382
+ raise ValueError('Expected n_power_iterations to be positive, but '
383
+ f'got n_power_iterations={n_power_iterations}')
384
+ self.dim = dim if dim >= 0 else dim + ndim
385
+ self.eps = eps
386
+ if ndim > 1:
387
+ # For ndim == 1 we do not need to approximate anything (see _SpectralNorm.forward)
388
+ self.n_power_iterations = n_power_iterations
389
+ weight_mat = self._reshape_weight_to_matrix(weight)
390
+ h, w = weight_mat.size()
391
+
392
+ u = weight_mat.new_empty(h).normal_(0, 1)
393
+ v = weight_mat.new_empty(w).normal_(0, 1)
394
+ self.register_buffer('_u', F.normalize(u, dim=0, eps=self.eps))
395
+ self.register_buffer('_v', F.normalize(v, dim=0, eps=self.eps))
396
+
397
+ # Start with u, v initialized to some reasonable values by performing a number
398
+ # of iterations of the power method
399
+ self._power_method(weight_mat, 15)
400
+
401
+ def _reshape_weight_to_matrix(self, weight: torch.Tensor) -> torch.Tensor:
402
+ # Precondition
403
+ assert weight.ndim > 1
404
+
405
+ if self.dim != 0:
406
+ # permute dim to front
407
+ weight = weight.permute(self.dim, *(d for d in range(weight.dim()) if d != self.dim))
408
+
409
+ return weight.flatten(1)
410
+
411
+ @torch.autograd.no_grad()
412
+ def _power_method(self, weight_mat: torch.Tensor, n_power_iterations: int) -> None:
413
+ # See original note at torch/nn/utils/spectral_norm.py
414
+ # NB: If `do_power_iteration` is set, the `u` and `v` vectors are
415
+ # updated in power iteration **in-place**. This is very important
416
+ # because in `DataParallel` forward, the vectors (being buffers) are
417
+ # broadcast from the parallelized module to each module replica,
418
+ # which is a new module object created on the fly. And each replica
419
+ # runs its own spectral norm power iteration. So simply assigning
420
+ # the updated vectors to the module this function runs on will cause
421
+ # the update to be lost forever. And the next time the parallelized
422
+ # module is replicated, the same randomly initialized vectors are
423
+ # broadcast and used!
424
+ #
425
+ # Therefore, to make the change propagate back, we rely on two
426
+ # important behaviors (also enforced via tests):
427
+ # 1. `DataParallel` doesn't clone storage if the broadcast tensor
428
+ # is already on correct device; and it makes sure that the
429
+ # parallelized module is already on `device[0]`.
430
+ # 2. If the out tensor in `out=` kwarg has correct shape, it will
431
+ # just fill in the values.
432
+ # Therefore, since the same power iteration is performed on all
433
+ # devices, simply updating the tensors in-place will make sure that
434
+ # the module replica on `device[0]` will update the _u vector on the
435
+ # parallelized module (by shared storage).
436
+ #
437
+ # However, after we update `u` and `v` in-place, we need to **clone**
438
+ # them before using them to normalize the weight. This is to support
439
+ # backproping through two forward passes, e.g., the common pattern in
440
+ # GAN training: loss = D(real) - D(fake). Otherwise, engine will
441
+ # complain that variables needed to do backward for the first forward
442
+ # (i.e., the `u` and `v` vectors) are changed in the second forward.
443
+
444
+ # Precondition
445
+ assert weight_mat.ndim > 1
446
+
447
+ for _ in range(n_power_iterations):
448
+ # Spectral norm of weight equals to `u^T W v`, where `u` and `v`
449
+ # are the first left and right singular vectors.
450
+ # This power iteration produces approximations of `u` and `v`.
451
+ self._u = F.normalize(torch.mv(weight_mat, self._v), # type: ignore[has-type]
452
+ dim=0, eps=self.eps, out=self._u) # type: ignore[has-type]
453
+ self._v = F.normalize(torch.mv(weight_mat.t(), self._u),
454
+ dim=0, eps=self.eps, out=self._v) # type: ignore[has-type]
455
+
456
+ def forward(self, weight: torch.Tensor) -> torch.Tensor:
457
+ if weight.ndim == 1:
458
+ # Faster and more exact path, no need to approximate anything
459
+ return F.normalize(weight, dim=0, eps=self.eps)
460
+ else:
461
+ weight_mat = self._reshape_weight_to_matrix(weight)
462
+ if self.training:
463
+ self._power_method(weight_mat, self.n_power_iterations)
464
+ # See above on why we need to clone
465
+ u = self._u.clone(memory_format=torch.contiguous_format)
466
+ v = self._v.clone(memory_format=torch.contiguous_format)
467
+ # The proper way of computing this should be through F.bilinear, but
468
+ # it seems to have some efficiency issues:
469
+ # https://github.com/pytorch/pytorch/issues/58093
470
+ sigma = torch.dot(u, torch.mv(weight_mat, v))
471
+ return weight / sigma
472
+
473
+ def right_inverse(self, value: torch.Tensor) -> torch.Tensor:
474
+ # we may want to assert here that the passed value already
475
+ # satisfies constraints
476
+ return value
477
+
478
+
479
+ def spectral_norm(module: Module,
480
+ name: str = 'weight',
481
+ n_power_iterations: int = 1,
482
+ eps: float = 1e-12,
483
+ dim: Optional[int] = None) -> Module:
484
+ r"""Apply spectral normalization to a parameter in the given module.
485
+
486
+ .. math::
487
+ \mathbf{W}_{SN} = \dfrac{\mathbf{W}}{\sigma(\mathbf{W})},
488
+ \sigma(\mathbf{W}) = \max_{\mathbf{h}: \mathbf{h} \ne 0} \dfrac{\|\mathbf{W} \mathbf{h}\|_2}{\|\mathbf{h}\|_2}
489
+
490
+ When applied on a vector, it simplifies to
491
+
492
+ .. math::
493
+ \mathbf{x}_{SN} = \dfrac{\mathbf{x}}{\|\mathbf{x}\|_2}
494
+
495
+ Spectral normalization stabilizes the training of discriminators (critics)
496
+ in Generative Adversarial Networks (GANs) by reducing the Lipschitz constant
497
+ of the model. :math:`\sigma` is approximated performing one iteration of the
498
+ `power method`_ every time the weight is accessed. If the dimension of the
499
+ weight tensor is greater than 2, it is reshaped to 2D in power iteration
500
+ method to get spectral norm.
501
+
502
+
503
+ See `Spectral Normalization for Generative Adversarial Networks`_ .
504
+
505
+ .. _`power method`: https://en.wikipedia.org/wiki/Power_iteration
506
+ .. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957
507
+
508
+ .. note::
509
+ This function is implemented using the parametrization functionality
510
+ in :func:`~torch.nn.utils.parametrize.register_parametrization`. It is a
511
+ reimplementation of :func:`torch.nn.utils.spectral_norm`.
512
+
513
+ .. note::
514
+ When this constraint is registered, the singular vectors associated to the largest
515
+ singular value are estimated rather than sampled at random. These are then updated
516
+ performing :attr:`n_power_iterations` of the `power method`_ whenever the tensor
517
+ is accessed with the module on `training` mode.
518
+
519
+ .. note::
520
+ If the `_SpectralNorm` module, i.e., `module.parametrization.weight[idx]`,
521
+ is in training mode on removal, it will perform another power iteration.
522
+ If you'd like to avoid this iteration, set the module to eval mode
523
+ before its removal.
524
+
525
+ Args:
526
+ module (nn.Module): containing module
527
+ name (str, optional): name of weight parameter. Default: ``"weight"``.
528
+ n_power_iterations (int, optional): number of power iterations to
529
+ calculate spectral norm. Default: ``1``.
530
+ eps (float, optional): epsilon for numerical stability in
531
+ calculating norms. Default: ``1e-12``.
532
+ dim (int, optional): dimension corresponding to number of outputs.
533
+ Default: ``0``, except for modules that are instances of
534
+ ConvTranspose{1,2,3}d, when it is ``1``
535
+
536
+ Returns:
537
+ The original module with a new parametrization registered to the specified
538
+ weight
539
+
540
+ Example::
541
+
542
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK)
543
+ >>> # xdoctest: +IGNORE_WANT("non-deterministic")
544
+ >>> snm = spectral_norm(nn.Linear(20, 40))
545
+ >>> snm
546
+ ParametrizedLinear(
547
+ in_features=20, out_features=40, bias=True
548
+ (parametrizations): ModuleDict(
549
+ (weight): ParametrizationList(
550
+ (0): _SpectralNorm()
551
+ )
552
+ )
553
+ )
554
+ >>> torch.linalg.matrix_norm(snm.weight, 2)
555
+ tensor(1.0081, grad_fn=<AmaxBackward0>)
556
+ """
557
+ weight = getattr(module, name, None)
558
+ if not isinstance(weight, Tensor):
559
+ raise ValueError(
560
+ f"Module '{module}' has no parameter or buffer with name '{name}'"
561
+ )
562
+
563
+ if dim is None:
564
+ if isinstance(module, (torch.nn.ConvTranspose1d,
565
+ torch.nn.ConvTranspose2d,
566
+ torch.nn.ConvTranspose3d)):
567
+ dim = 1
568
+ else:
569
+ dim = 0
570
+ parametrize.register_parametrization(module, name, _SpectralNorm(weight, n_power_iterations, dim, eps))
571
+ return module
evalkit_cambrian/lib/python3.10/site-packages/torch/nn/utils/parametrize.py ADDED
@@ -0,0 +1,758 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.nn.modules.container import ModuleList, ModuleDict, Module
3
+ from torch.nn.parameter import Parameter
4
+ from torch import Tensor
5
+
6
+ import collections
7
+ import copyreg
8
+ from copy import deepcopy
9
+ from contextlib import contextmanager
10
+ from typing import Union, Optional, Dict, Tuple, Sequence
11
+
12
+ __all__ = ['cached', 'ParametrizationList', 'register_parametrization', 'is_parametrized', 'remove_parametrizations',
13
+ 'type_before_parametrizations', 'transfer_parametrizations_and_params']
14
+
15
+ _cache_enabled = 0
16
+ _cache: Dict[Tuple[int, str], Optional[Tensor]] = {}
17
+
18
+
19
+ @contextmanager
20
+ def cached():
21
+ r"""Context manager that enables the caching system within parametrizations registered with :func:`register_parametrization`.
22
+
23
+ The value of the parametrized objects is computed and cached the first time
24
+ they are required when this context manager is active. The cached values are
25
+ discarded when leaving the context manager.
26
+
27
+ This is useful when using a parametrized parameter more than once in the forward pass.
28
+ An example of this is when parametrizing the recurrent kernel of an RNN or when
29
+ sharing weights.
30
+
31
+ The simplest way to activate the cache is by wrapping the forward pass of the neural network
32
+
33
+ .. code-block:: python
34
+
35
+ import torch.nn.utils.parametrize as P
36
+ ...
37
+ with P.cached():
38
+ output = model(inputs)
39
+
40
+ in training and evaluation. One may also wrap the parts of the modules that use
41
+ several times the parametrized tensors. For example, the loop of an RNN with a
42
+ parametrized recurrent kernel:
43
+
44
+ .. code-block:: python
45
+
46
+ with P.cached():
47
+ for x in xs:
48
+ out_rnn = self.rnn_cell(x, out_rnn)
49
+ """
50
+ global _cache
51
+ global _cache_enabled
52
+ _cache_enabled += 1
53
+ try:
54
+ yield
55
+ finally:
56
+ _cache_enabled -= 1
57
+ if not _cache_enabled:
58
+ _cache = {}
59
+
60
+
61
+ def _register_parameter_or_buffer(module, name, X):
62
+ if isinstance(X, Parameter):
63
+ module.register_parameter(name, X)
64
+ else:
65
+ module.register_buffer(name, X)
66
+
67
+
68
+ class ParametrizationList(ModuleList):
69
+ r"""A sequential container that holds and manages the original parameters or buffers of a parametrized :class:`torch.nn.Module`.
70
+
71
+ It is the type of ``module.parametrizations[tensor_name]`` when ``module[tensor_name]``
72
+ has been parametrized with :func:`register_parametrization`.
73
+
74
+ If the first registered parametrization has a ``right_inverse`` that returns one tensor or
75
+ does not have a ``right_inverse`` (in which case we assume that ``right_inverse`` is the identity),
76
+ it will hold the tensor under the name ``original``.
77
+ If it has a ``right_inverse`` that returns more than one tensor, these will be registered as
78
+ ``original0``, ``original1``, ...
79
+
80
+ .. warning::
81
+ This class is used internally by :func:`register_parametrization`. It is documented
82
+ here for completeness. It shall not be instantiated by the user.
83
+
84
+ Args:
85
+ modules (sequence): sequence of modules representing the parametrizations
86
+ original (Parameter or Tensor): parameter or buffer that is parametrized
87
+ unsafe (bool): a boolean flag that denotes whether the parametrization
88
+ may change the dtype and shape of the tensor. Default: `False`
89
+ Warning: the parametrization is not checked for consistency upon registration.
90
+ Enable this flag at your own risk.
91
+ """
92
+
93
+ original: Tensor
94
+ unsafe: bool
95
+
96
+ def __init__(
97
+ self, modules: Sequence[Module], original: Union[Tensor, Parameter], unsafe: bool = False
98
+ ) -> None:
99
+ # We require this because we need to treat differently the first parametrization
100
+ # This should never throw, unless this class is used from the outside
101
+ if len(modules) == 0:
102
+ raise ValueError("ParametrizationList requires one or more modules.")
103
+
104
+ super().__init__(modules)
105
+ self.unsafe = unsafe
106
+
107
+ # In plain words:
108
+ # module.weight must keep its dtype and shape.
109
+ # Furthermore, if there is no right_inverse or the right_inverse returns a tensor,
110
+ # this should be of the same dtype as the original tensor
111
+ #
112
+ # We check that the following invariants hold:
113
+ # X = module.weight
114
+ # Y = param.right_inverse(X)
115
+ # assert isinstance(Y, Tensor) or
116
+ # (isinstance(Y, collections.abc.Sequence) and all(isinstance(t, Tensor) for t in Y))
117
+ # Z = param(Y) if isinstance(Y, Tensor) else param(*Y)
118
+ # # Consistency checks
119
+ # assert X.dtype == Z.dtype and X.shape == Z.shape
120
+ # # If it has one input, this allows to be able to use set_ to be able to
121
+ # # move data to/from the original tensor without changing its id (which is what the
122
+ # # optimizer uses to track parameters)
123
+ # if isinstance(Y, Tensor)
124
+ # assert X.dtype == Y.dtype
125
+ # Below we use original = X, new = Y
126
+
127
+ original_shape = original.shape
128
+ original_dtype = original.dtype
129
+
130
+ # Compute new
131
+ with torch.no_grad():
132
+ new = original
133
+ for module in reversed(self): # type: ignore[call-overload]
134
+ if hasattr(module, "right_inverse"):
135
+ try:
136
+ new = module.right_inverse(new)
137
+ except NotImplementedError:
138
+ pass
139
+ # else, or if it throws, we assume that right_inverse is the identity
140
+
141
+ if not isinstance(new, Tensor) and not isinstance(new, collections.abc.Sequence):
142
+ raise ValueError("'right_inverse' must return a Tensor or a Sequence of tensors (list, tuple...). "
143
+ f"Got {type(new).__name__}")
144
+
145
+ # Set the number of original tensors
146
+ self.is_tensor = isinstance(new, Tensor)
147
+ self.ntensors = 1 if self.is_tensor else len(new)
148
+
149
+ # Register the tensor(s)
150
+ if self.is_tensor:
151
+ if original.dtype != new.dtype:
152
+ raise ValueError(
153
+ "When `right_inverse` outputs one tensor, it may not change the dtype.\n"
154
+ f"original.dtype: {original.dtype}\n"
155
+ f"right_inverse(original).dtype: {new.dtype}"
156
+ )
157
+ # Set the original to original so that the user does not need to re-register the parameter
158
+ # manually in the optimiser
159
+ with torch.no_grad():
160
+ original.set_(new) # type: ignore[call-overload]
161
+ _register_parameter_or_buffer(self, "original", original)
162
+ else:
163
+ for i, originali in enumerate(new):
164
+ if not isinstance(originali, Tensor):
165
+ raise ValueError("'right_inverse' must return a Tensor or a Sequence of tensors "
166
+ "(list, tuple...). "
167
+ f"Got element {i} of the sequence with type {type(originali).__name__}.")
168
+
169
+ # If the original tensor was a Parameter that required grad, we expect the user to
170
+ # add the new parameters to the optimizer after registering the parametrization
171
+ # (this is documented)
172
+ if isinstance(original, Parameter):
173
+ originali = Parameter(originali)
174
+ originali.requires_grad_(original.requires_grad)
175
+ _register_parameter_or_buffer(self, f"original{i}", originali)
176
+
177
+ if not self.unsafe:
178
+ # Consistency checks:
179
+ # Since f : A -> B, right_inverse : B -> A, Z and original should live in B
180
+ # Z = forward(right_inverse(original))
181
+ Z = self()
182
+ if not isinstance(Z, Tensor):
183
+ raise ValueError(
184
+ f"A parametrization must return a tensor. Got {type(Z).__name__}."
185
+ )
186
+ if Z.dtype != original_dtype:
187
+ raise ValueError(
188
+ "Registering a parametrization may not change the dtype of the tensor, unless `unsafe` flag is enabled.\n"
189
+ f"unparametrized dtype: {original_dtype}\n"
190
+ f"parametrized dtype: {Z.dtype}"
191
+ )
192
+ if Z.shape != original_shape:
193
+ raise ValueError(
194
+ "Registering a parametrization may not change the shape of the tensor, unless `unsafe` flag is enabled.\n"
195
+ f"unparametrized shape: {original_shape}\n"
196
+ f"parametrized shape: {Z.shape}"
197
+ )
198
+
199
+ def right_inverse(self, value: Tensor) -> None:
200
+ r"""Call the ``right_inverse`` methods of the parametrizations in the inverse registration order.
201
+
202
+ Then, it stores the result in ``self.original`` if ``right_inverse`` outputs one tensor
203
+ or in ``self.original0``, ``self.original1``, ... if it outputs several.
204
+
205
+ Args:
206
+ value (Tensor): Value to which initialize the module
207
+ """
208
+ # All the exceptions in this function should almost never throw.
209
+ # They could throw if, for example, right_inverse function returns a different
210
+ # dtype when given a different input, which should most likely be caused by a
211
+ # bug in the user's code
212
+
213
+ with torch.no_grad():
214
+ # See https://github.com/pytorch/pytorch/issues/53103
215
+ for module in reversed(self): # type: ignore[call-overload]
216
+ if hasattr(module, "right_inverse"):
217
+ value = module.right_inverse(value)
218
+ else:
219
+ raise RuntimeError(f"parametrization {type(module).__name__} does not implement "
220
+ "right_inverse.")
221
+ if self.is_tensor:
222
+ # These exceptions should only throw when a right_inverse function does not
223
+ # return the same dtype for every input, which should most likely be caused by a bug
224
+ if not isinstance(value, Tensor):
225
+ raise ValueError(
226
+ f"`right_inverse` should return a tensor. Got {type(value).__name__}"
227
+ )
228
+ if value.dtype != self.original.dtype:
229
+ raise ValueError(
230
+ f"The tensor returned by `right_inverse` has dtype {value.dtype} "
231
+ f"while `original` has dtype {self.original.dtype}"
232
+ )
233
+ # We know that the result is going to have the same dtype
234
+ self.original.set_(value) # type: ignore[call-overload]
235
+ else:
236
+ if not isinstance(value, collections.abc.Sequence):
237
+ raise ValueError(
238
+ "'right_inverse' must return a sequence of tensors. "
239
+ f"Got {type(value).__name__}."
240
+ )
241
+ if len(value) != self.ntensors:
242
+ raise ValueError(
243
+ "'right_inverse' must return a sequence of tensors of length "
244
+ f"{self.ntensors}. Got a sequence of length {len(value)}."
245
+ )
246
+ for i, tensor in enumerate(value):
247
+ original_i = getattr(self, f"original{i}")
248
+ if not isinstance(tensor, Tensor):
249
+ raise ValueError(
250
+ f"`right_inverse` must return a sequence of tensors. "
251
+ f"Got element {i} of type {type(tensor).__name__}"
252
+ )
253
+ if original_i.dtype != tensor.dtype:
254
+ raise ValueError(
255
+ f"Tensor {i} returned by `right_inverse` has dtype {tensor.dtype} "
256
+ f"while `original{i}` has dtype {original_i.dtype}"
257
+ )
258
+ original_i.set_(tensor)
259
+
260
+ def forward(self) -> Tensor:
261
+ if torch.jit.is_scripting():
262
+ raise RuntimeError('Parametrization is not working with scripting.')
263
+ # Unpack the originals for the first parametrization
264
+ if self.is_tensor:
265
+ x = self[0](self.original)
266
+ else:
267
+ originals = (getattr(self, f"original{i}") for i in range(self.ntensors))
268
+ x = self[0](*originals)
269
+ # It's not possible to call self[1:] here, so we have to be a bit more cryptic
270
+ # Also we want to skip all non-integer keys
271
+ curr_idx = 1
272
+ while hasattr(self, str(curr_idx)):
273
+ x = self[curr_idx](x)
274
+ curr_idx += 1
275
+ return x
276
+
277
+
278
+ def _inject_new_class(module: Module) -> None:
279
+ r"""Set up a module to be parametrized.
280
+
281
+ This works by substituting the class of the module by a class
282
+ that extends it to be able to inject a property
283
+
284
+ Args:
285
+ module (nn.Module): module into which to inject the property
286
+ """
287
+ cls = module.__class__
288
+
289
+ def default_deepcopy(self, memo):
290
+ # Just emulate a standard deepcopy procedure when __deepcopy__ doesn't exist in the current class.
291
+ obj = memo.get(id(self), None)
292
+ if obj is not None:
293
+ return obj
294
+ replica = self.__new__(self.__class__)
295
+ memo[id(self)] = replica
296
+ replica.__dict__ = deepcopy(self.__dict__, memo)
297
+ # Also save all slots if they exist.
298
+ slots_to_save = copyreg._slotnames(self.__class__) # type: ignore[attr-defined]
299
+ for slot in slots_to_save:
300
+ if hasattr(self, slot):
301
+ setattr(replica, slot, deepcopy(getattr(self, slot), memo))
302
+ return replica
303
+
304
+ def getstate(self):
305
+ raise RuntimeError(
306
+ "Serialization of parametrized modules is only "
307
+ "supported through state_dict(). See:\n"
308
+ "https://pytorch.org/tutorials/beginner/saving_loading_models.html"
309
+ "#saving-loading-a-general-checkpoint-for-inference-and-or-resuming-training"
310
+ )
311
+
312
+ dct = {"__getstate__": getstate}
313
+ # We don't allow serialization of parametrized modules but should still allow deepcopying.
314
+ # Default 'deepcopy' function invokes __deepcopy__ method instead of __getstate__ when it exists.
315
+ if not hasattr(cls, "__deepcopy__"):
316
+ dct["__deepcopy__"] = default_deepcopy # type: ignore[assignment]
317
+
318
+ param_cls = type(
319
+ f"Parametrized{cls.__name__}",
320
+ (cls,),
321
+ dct,
322
+ )
323
+
324
+ module.__class__ = param_cls
325
+
326
+
327
+ def _inject_property(module: Module, tensor_name: str) -> None:
328
+ r"""Injects a property into module[tensor_name].
329
+
330
+ It assumes that the class in the module has already been modified from its
331
+ original one using _inject_new_class and that the tensor under :attr:`tensor_name`
332
+ has already been moved out
333
+
334
+ Args:
335
+ module (nn.Module): module into which to inject the property
336
+ tensor_name (str): name of the name of the property to create
337
+ """
338
+ # We check the precondition.
339
+ # This should never fire if register_parametrization is correctly implemented
340
+ assert not hasattr(module, tensor_name)
341
+
342
+ @torch.jit.unused
343
+ def get_cached_parametrization(parametrization) -> Tensor:
344
+ global _cache
345
+ key = (id(module), tensor_name)
346
+ tensor = _cache.get(key)
347
+ if tensor is None:
348
+ tensor = parametrization()
349
+ _cache[key] = tensor
350
+ return tensor
351
+
352
+ def get_parametrized(self) -> Tensor:
353
+ if torch.jit.is_scripting():
354
+ raise RuntimeError('Parametrization is not working with scripting.')
355
+ parametrization = self.parametrizations[tensor_name]
356
+ if _cache_enabled:
357
+ if torch.jit.is_scripting():
358
+ # Scripting
359
+ raise RuntimeError('Caching is not implemented for scripting. '
360
+ 'Either disable caching or avoid scripting.')
361
+ elif torch._C._get_tracing_state() is not None:
362
+ # Tracing
363
+ raise RuntimeError('Cannot trace a model while caching parametrizations.')
364
+ else:
365
+ return get_cached_parametrization(parametrization)
366
+ else:
367
+ # If caching is not active, this function just evaluates the parametrization
368
+ return parametrization()
369
+
370
+ def set_original(self, value: Tensor) -> None:
371
+ if torch.jit.is_scripting():
372
+ raise RuntimeError('Parametrization is not working with scripting.')
373
+ self.parametrizations[tensor_name].right_inverse(value)
374
+
375
+ setattr(module.__class__, tensor_name, property(get_parametrized, set_original))
376
+
377
+ def register_parametrization(
378
+ module: Module, tensor_name: str, parametrization: Module, *, unsafe: bool = False,
379
+ ) -> Module:
380
+ r"""Register a parametrization to a tensor in a module.
381
+
382
+ Assume that ``tensor_name="weight"`` for simplicity. When accessing ``module.weight``,
383
+ the module will return the parametrized version ``parametrization(module.weight)``.
384
+ If the original tensor requires a gradient, the backward pass will differentiate
385
+ through :attr:`parametrization`, and the optimizer will update the tensor accordingly.
386
+
387
+ The first time that a module registers a parametrization, this function will add an attribute
388
+ ``parametrizations`` to the module of type :class:`~ParametrizationList`.
389
+
390
+ The list of parametrizations on the tensor ``weight`` will be accessible under
391
+ ``module.parametrizations.weight``.
392
+
393
+ The original tensor will be accessible under
394
+ ``module.parametrizations.weight.original``.
395
+
396
+ Parametrizations may be concatenated by registering several parametrizations
397
+ on the same attribute.
398
+
399
+ The training mode of a registered parametrization is updated on registration
400
+ to match the training mode of the host module
401
+
402
+ Parametrized parameters and buffers have an inbuilt caching system that can be activated
403
+ using the context manager :func:`cached`.
404
+
405
+ A :attr:`parametrization` may optionally implement a method with signature
406
+
407
+ .. code-block:: python
408
+
409
+ def right_inverse(self, X: Tensor) -> Union[Tensor, Sequence[Tensor]]
410
+
411
+ This method is called on the unparametrized tensor when the first parametrization
412
+ is registered to compute the initial value of the original tensor.
413
+ If this method is not implemented, the original tensor will be just the unparametrized tensor.
414
+
415
+ If all the parametrizations registered on a tensor implement `right_inverse` it is possible
416
+ to initialize a parametrized tensor by assigning to it, as shown in the example below.
417
+
418
+ It is possible for the first parametrization to depend on several inputs.
419
+ This may be implemented returning a tuple of tensors from ``right_inverse``
420
+ (see the example implementation of a ``RankOne`` parametrization below).
421
+
422
+ In this case, the unconstrained tensors are also located under ``module.parametrizations.weight``
423
+ with names ``original0``, ``original1``,...
424
+
425
+ .. note::
426
+
427
+ If unsafe=False (default) both the forward and right_inverse methods will be called
428
+ once to perform a number of consistency checks.
429
+ If unsafe=True, then right_inverse will be called if the tensor is not parametrized,
430
+ and nothing will be called otherwise.
431
+
432
+ .. note::
433
+
434
+ In most situations, ``right_inverse`` will be a function such that
435
+ ``forward(right_inverse(X)) == X`` (see
436
+ `right inverse <https://en.wikipedia.org/wiki/Inverse_function#Right_inverses>`_).
437
+ Sometimes, when the parametrization is not surjective, it may be reasonable
438
+ to relax this.
439
+
440
+ .. warning::
441
+
442
+ If a parametrization depends on several inputs, :func:`~register_parametrization`
443
+ will register a number of new parameters. If such parametrization is registered
444
+ after the optimizer is created, these new parameters will need to be added manually
445
+ to the optimizer. See :meth:`torch.Optimizer.add_param_group`.
446
+
447
+ Args:
448
+ module (nn.Module): module on which to register the parametrization
449
+ tensor_name (str): name of the parameter or buffer on which to register
450
+ the parametrization
451
+ parametrization (nn.Module): the parametrization to register
452
+ Keyword args:
453
+ unsafe (bool): a boolean flag that denotes whether the parametrization
454
+ may change the dtype and shape of the tensor. Default: `False`
455
+ Warning: the parametrization is not checked for consistency upon registration.
456
+ Enable this flag at your own risk.
457
+
458
+ Raises:
459
+ ValueError: if the module does not have a parameter or a buffer named :attr:`tensor_name`
460
+
461
+ Examples:
462
+ >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK)
463
+ >>> import torch
464
+ >>> import torch.nn as nn
465
+ >>> import torch.nn.utils.parametrize as P
466
+ >>>
467
+ >>> class Symmetric(nn.Module):
468
+ >>> def forward(self, X):
469
+ >>> return X.triu() + X.triu(1).T # Return a symmetric matrix
470
+ >>>
471
+ >>> def right_inverse(self, A):
472
+ >>> return A.triu()
473
+ >>>
474
+ >>> m = nn.Linear(5, 5)
475
+ >>> P.register_parametrization(m, "weight", Symmetric())
476
+ >>> print(torch.allclose(m.weight, m.weight.T)) # m.weight is now symmetric
477
+ True
478
+ >>> A = torch.rand(5, 5)
479
+ >>> A = A + A.T # A is now symmetric
480
+ >>> m.weight = A # Initialize the weight to be the symmetric matrix A
481
+ >>> print(torch.allclose(m.weight, A))
482
+ True
483
+
484
+ >>> class RankOne(nn.Module):
485
+ >>> def forward(self, x, y):
486
+ >>> # Form a rank 1 matrix multiplying two vectors
487
+ >>> return x.unsqueeze(-1) @ y.unsqueeze(-2)
488
+ >>>
489
+ >>> def right_inverse(self, Z):
490
+ >>> # Project Z onto the rank 1 matrices
491
+ >>> U, S, Vh = torch.linalg.svd(Z, full_matrices=False)
492
+ >>> # Return rescaled singular vectors
493
+ >>> s0_sqrt = S[0].sqrt().unsqueeze(-1)
494
+ >>> return U[..., :, 0] * s0_sqrt, Vh[..., 0, :] * s0_sqrt
495
+ >>>
496
+ >>> linear_rank_one = P.register_parametrization(nn.Linear(4, 4), "weight", RankOne())
497
+ >>> print(torch.linalg.matrix_rank(linear_rank_one.weight).item())
498
+ 1
499
+
500
+ """
501
+ parametrization.train(module.training)
502
+ if is_parametrized(module, tensor_name):
503
+ # Correctness checks.
504
+ # If A is the space of tensors with shape and dtype equal to module.weight
505
+ # we check that parametrization.forward and parametrization.right_inverse are
506
+ # functions from A to A
507
+ if not unsafe:
508
+ Y = getattr(module, tensor_name)
509
+ X = parametrization(Y)
510
+ if not isinstance(X, Tensor):
511
+ raise ValueError(
512
+ f"A parametrization must return a tensor. Got {type(X).__name__}."
513
+ )
514
+ if X.dtype != Y.dtype:
515
+ raise ValueError(
516
+ "Registering a parametrization may not change the dtype of the tensor, unless the `unsafe` flag is enabled.\n"
517
+ f"module.{tensor_name}.dtype: {Y.dtype}\n"
518
+ f"parametrization(module.{tensor_name}).dtype: {X.dtype}"
519
+ )
520
+ if X.shape != Y.shape:
521
+ raise ValueError(
522
+ "Registering a parametrization may not change the shape of the tensor, unless the `unsafe` flag is enabled.\n"
523
+ f"module.{tensor_name}.shape: {Y.shape}\n"
524
+ f"parametrization(module.{tensor_name}).shape: {X.shape}"
525
+ )
526
+ if hasattr(parametrization, "right_inverse"):
527
+ try:
528
+ Z = parametrization.right_inverse(X) # type: ignore[operator]
529
+ except NotImplementedError:
530
+ pass
531
+ else:
532
+ if not isinstance(Z, Tensor):
533
+ raise ValueError(
534
+ f"parametrization.right_inverse must return a tensor. Got: {type(Z).__name__}"
535
+ )
536
+ if Z.dtype != Y.dtype:
537
+ raise ValueError(
538
+ "The tensor returned by parametrization.right_inverse must have the same dtype "
539
+ f"as module.{tensor_name}, unless the `unsafe` flag is enabled.\n"
540
+ f"module.{tensor_name}.dtype: {Y.dtype}\n"
541
+ f"returned dtype: {Z.dtype}"
542
+ )
543
+ if Z.shape != Y.shape:
544
+ raise ValueError(
545
+ "The tensor returned by parametrization.right_inverse must have the same shape "
546
+ f"as module.{tensor_name}, unless the `unsafe` flag is enabled.\n"
547
+ f"module.{tensor_name}.shape: {Y.shape}\n"
548
+ f"returned shape: {Z.shape}"
549
+ )
550
+ # else right_inverse is assumed to be the identity
551
+
552
+ # add the new parametrization to the parametrization list
553
+ assert isinstance(module.parametrizations, ModuleDict) # Make mypy happy
554
+ module.parametrizations[tensor_name].append(parametrization)
555
+ # If unsafe was True in previous parametrization, keep it enabled
556
+ module.parametrizations[tensor_name].unsafe |= unsafe # type: ignore[index, union-attr]
557
+ elif tensor_name in module._buffers or tensor_name in module._parameters:
558
+ # Set the parametrization mechanism
559
+ # Fetch the original buffer or parameter
560
+ original = getattr(module, tensor_name)
561
+ # We create this early to check for possible errors
562
+ parametrizations = ParametrizationList([parametrization], original, unsafe=unsafe)
563
+ # Delete the previous parameter or buffer
564
+ delattr(module, tensor_name)
565
+ # If this is the first parametrization registered on the module,
566
+ # we prepare the module to inject the property
567
+ if not is_parametrized(module):
568
+ # Change the class
569
+ _inject_new_class(module)
570
+ # Inject a ``ModuleDict`` into the instance under module.parametrizations
571
+ module.parametrizations = ModuleDict()
572
+ # Add a property into the class
573
+ _inject_property(module, tensor_name)
574
+ # Add a ParametrizationList
575
+ assert isinstance(module.parametrizations, ModuleDict) # Make mypy happy
576
+ module.parametrizations[tensor_name] = parametrizations
577
+ else:
578
+ raise ValueError(
579
+ f"Module '{module}' does not have a parameter, a buffer, or a "
580
+ f"parametrized element with name '{tensor_name}'"
581
+ )
582
+ return module
583
+
584
+
585
+ def is_parametrized(module: Module, tensor_name: Optional[str] = None) -> bool:
586
+ r"""Determine if a module has a parametrization.
587
+
588
+ Args:
589
+ module (nn.Module): module to query
590
+ tensor_name (str, optional): name of the parameter in the module
591
+ Default: ``None``
592
+ Returns:
593
+ ``True`` if :attr:`module` has a parametrization for the parameter named :attr:`tensor_name`,
594
+ or if it has any parametrization when :attr:`tensor_name` is ``None``;
595
+ otherwise ``False``
596
+ """
597
+ parametrizations = getattr(module, "parametrizations", None)
598
+ if parametrizations is None or not isinstance(parametrizations, ModuleDict):
599
+ return False
600
+ if tensor_name is None:
601
+ # Check that there is at least one parametrized buffer or Parameter
602
+ return len(parametrizations) > 0
603
+ else:
604
+ return tensor_name in parametrizations
605
+
606
+ def remove_parametrizations(
607
+ module: Module, tensor_name: str, leave_parametrized: bool = True
608
+ ) -> Module:
609
+ r"""Remove the parametrizations on a tensor in a module.
610
+
611
+ - If ``leave_parametrized=True``, ``module[tensor_name]`` will be set to
612
+ its current output. In this case, the parametrization shall not change the ``dtype``
613
+ of the tensor.
614
+ - If ``leave_parametrized=False``, ``module[tensor_name]`` will be set to
615
+ the unparametrised tensor in ``module.parametrizations[tensor_name].original``.
616
+ This is only possible when the parametrization depends on just one tensor.
617
+
618
+ Args:
619
+ module (nn.Module): module from which remove the parametrization
620
+ tensor_name (str): name of the parametrization to be removed
621
+ leave_parametrized (bool, optional): leave the attribute :attr:`tensor_name` parametrized.
622
+ Default: ``True``
623
+
624
+ Returns:
625
+ Module: module
626
+
627
+ Raises:
628
+ ValueError: if ``module[tensor_name]`` is not parametrized
629
+ ValueError: if ``leave_parametrized=False`` and the parametrization depends on several tensors
630
+ """
631
+ if not is_parametrized(module, tensor_name):
632
+ raise ValueError(f"Module {module} does not have a parametrization on {tensor_name}")
633
+
634
+ # Fetch the original tensor
635
+ assert isinstance(module.parametrizations, ModuleDict) # Make mypy happy
636
+ parametrizations = module.parametrizations[tensor_name]
637
+ if parametrizations.is_tensor:
638
+ original = parametrizations.original
639
+ if leave_parametrized:
640
+ with torch.no_grad():
641
+ t = getattr(module, tensor_name)
642
+ # We know they have the same dtype because we have checked this when registering the
643
+ # parametrizations. As such, we can use set_
644
+ # We do this so that the parameter does not to change the id()
645
+ # This way the user does not need to update the optimizer
646
+ with torch.no_grad():
647
+ if type(original) is torch.Tensor:
648
+ original.set_(t)
649
+ else:
650
+ try:
651
+ original.set_(t)
652
+ except RuntimeError as e:
653
+ # TODO: Fix this for tensor subclasses that are parameters:
654
+ # RuntimeError: set_storage is not allowed on a Tensor created from .data or .detach().
655
+ raise RuntimeError("Calling remove_parametrizations() with leave_parametrized=True "
656
+ "for a parameter that is an instance of a tensor subclass requires "
657
+ "set_() to be implemented correctly for the tensor subclass. Either "
658
+ "set leave_parametrized=False or provide a working implementation for "
659
+ "set_() in the tensor subclass.") from e
660
+ else:
661
+ if leave_parametrized:
662
+ # We cannot use no_grad because we need to know whether one or more
663
+ # original tensors required grad
664
+ t = getattr(module, tensor_name)
665
+ # We'll have to trust the user to add it to the optimizer
666
+ original = Parameter(t) if t.requires_grad else t
667
+ else:
668
+ raise ValueError("Cannot leave unparametrized (`leave_parametrized=False`) a tensor "
669
+ "that is parametrized in terms of a sequence of tensors.")
670
+
671
+ # Delete the property that manages the parametrization
672
+ delattr(module.__class__, tensor_name)
673
+ # Delete the ParametrizationList
674
+ del module.parametrizations[tensor_name]
675
+
676
+ # Restore the parameter / buffer into the main class
677
+ _register_parameter_or_buffer(module, tensor_name, original)
678
+
679
+ # Roll back the parametrized class if no other buffer or parameter
680
+ # is currently parametrized in this class
681
+ if not is_parametrized(module):
682
+ delattr(module, "parametrizations")
683
+ # Restore class
684
+ orig_cls = module.__class__.__bases__[0]
685
+ module.__class__ = orig_cls
686
+ return module
687
+
688
+ def type_before_parametrizations(module: Module) -> type:
689
+ r"""Return the module type before parametrizations were applied and if not, then it returns the module type.
690
+
691
+ Args:
692
+ module (nn.Module): module to get type of
693
+ """
694
+ if is_parametrized(module):
695
+ return module.__class__.__bases__[0]
696
+ else:
697
+ return type(module)
698
+
699
+ def transfer_parametrizations_and_params(
700
+ from_module: Module, to_module: Module, tensor_name: Optional[str] = None
701
+ ) -> Module:
702
+ r"""Transfer parametrizations and the parameters they parametrize from :attr:`from_module` to :attr:`to_module`.
703
+
704
+ If :attr:`tensor_name` is specified, only transfers the specified parameter, otherwise
705
+ transfers all parametrized parameters. If those parameters do not exist in to_module, it will create them.
706
+ Does nothing if from_module is not parametrized.
707
+
708
+ Args:
709
+ from_module (nn.Module): module to transfer from
710
+ to_module (nn.Module): module to transfer to
711
+ tensor_name (str, optional): parameter to transfer
712
+
713
+ Returns:
714
+ Module: to_module
715
+ """
716
+ if is_parametrized(from_module):
717
+ assert isinstance(from_module.parametrizations, ModuleDict) # for mypy
718
+
719
+ # get list of all params or the single param to transfer
720
+ parameters_to_transfer: Union[list, ModuleDict] = (
721
+ from_module.parametrizations if tensor_name is None else [tensor_name]
722
+ )
723
+
724
+ assert hasattr(parameters_to_transfer, "__iter__") # for mypy
725
+ for parameter_name in parameters_to_transfer:
726
+
727
+ # initialize the to-be-transferred param in to_module if it doesn't exist already
728
+ if not hasattr(to_module, parameter_name):
729
+ setattr(
730
+ to_module,
731
+ parameter_name,
732
+ Parameter(getattr(from_module, parameter_name)),
733
+ )
734
+
735
+ # apply the params's parametrizations to to_module
736
+ for param_func in from_module.parametrizations[parameter_name]:
737
+ register_parametrization(to_module, parameter_name, param_func)
738
+ assert isinstance(to_module.parametrizations, ModuleDict) # for mypy
739
+
740
+ # make values match, original values can be stored in either original or
741
+ # original0, original1..., need to check both cases
742
+ if hasattr(from_module.parametrizations[parameter_name], "original"):
743
+ to_module.parametrizations[parameter_name].original = \
744
+ from_module.parametrizations[parameter_name].original
745
+ else:
746
+ num = 0
747
+ orig_num = "original" + str(num)
748
+ # loop through each original# until all values have been set
749
+ while hasattr(from_module.parametrizations[parameter_name], orig_num):
750
+ setattr(
751
+ to_module.parametrizations[parameter_name],
752
+ orig_num,
753
+ getattr(from_module.parametrizations[parameter_name], orig_num),
754
+ )
755
+ num = num + 1
756
+ orig_num = "original" + str(num)
757
+
758
+ return to_module
evalkit_cambrian/lib/python3.10/site-packages/torch/nn/utils/prune.py ADDED
@@ -0,0 +1,1379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""Pruning methods."""
2
+ import numbers
3
+ from abc import ABC, abstractmethod
4
+ from collections.abc import Iterable
5
+ from typing import Tuple
6
+
7
+ import torch
8
+
9
+
10
+ class BasePruningMethod(ABC):
11
+ r"""Abstract base class for creation of new pruning techniques.
12
+
13
+ Provides a skeleton for customization requiring the overriding of methods
14
+ such as :meth:`compute_mask` and :meth:`apply`.
15
+ """
16
+
17
+ _tensor_name: str
18
+
19
+ def __call__(self, module, inputs):
20
+ r"""Multiply the mask into original tensor and store the result.
21
+
22
+ Multiplies the mask (stored in ``module[name + '_mask']``)
23
+ into the original tensor (stored in ``module[name + '_orig']``)
24
+ and stores the result into ``module[name]`` by using :meth:`apply_mask`.
25
+
26
+ Args:
27
+ module (nn.Module): module containing the tensor to prune
28
+ inputs: not used.
29
+ """
30
+ setattr(module, self._tensor_name, self.apply_mask(module))
31
+
32
+ @abstractmethod
33
+ def compute_mask(self, t, default_mask):
34
+ r"""Compute and returns a mask for the input tensor ``t``.
35
+
36
+ Starting from a base ``default_mask`` (which should be a mask of ones
37
+ if the tensor has not been pruned yet), generate a random mask to
38
+ apply on top of the ``default_mask`` according to the specific pruning
39
+ method recipe.
40
+
41
+ Args:
42
+ t (torch.Tensor): tensor representing the importance scores of the
43
+ parameter to prune.
44
+ default_mask (torch.Tensor): Base mask from previous pruning
45
+ iterations, that need to be respected after the new mask is
46
+ applied. Same dims as ``t``.
47
+
48
+ Returns:
49
+ mask (torch.Tensor): mask to apply to ``t``, of same dims as ``t``
50
+ """
51
+ pass
52
+
53
+ def apply_mask(self, module):
54
+ r"""Simply handles the multiplication between the parameter being pruned and the generated mask.
55
+
56
+ Fetches the mask and the original tensor from the module
57
+ and returns the pruned version of the tensor.
58
+
59
+ Args:
60
+ module (nn.Module): module containing the tensor to prune
61
+
62
+ Returns:
63
+ pruned_tensor (torch.Tensor): pruned version of the input tensor
64
+ """
65
+ # to carry out the multiplication, the mask needs to have been computed,
66
+ # so the pruning method must know what tensor it's operating on
67
+ assert self._tensor_name is not None, f"Module {module} has to be pruned" # this gets set in apply()
68
+ mask = getattr(module, self._tensor_name + "_mask")
69
+ orig = getattr(module, self._tensor_name + "_orig")
70
+ pruned_tensor = mask.to(dtype=orig.dtype) * orig
71
+ return pruned_tensor
72
+
73
+ @classmethod
74
+ def apply(cls, module, name, *args, importance_scores=None, **kwargs):
75
+ r"""Add pruning on the fly and reparametrization of a tensor.
76
+
77
+ Adds the forward pre-hook that enables pruning on the fly and
78
+ the reparametrization of a tensor in terms of the original tensor
79
+ and the pruning mask.
80
+
81
+ Args:
82
+ module (nn.Module): module containing the tensor to prune
83
+ name (str): parameter name within ``module`` on which pruning
84
+ will act.
85
+ args: arguments passed on to a subclass of
86
+ :class:`BasePruningMethod`
87
+ importance_scores (torch.Tensor): tensor of importance scores (of
88
+ same shape as module parameter) used to compute mask for pruning.
89
+ The values in this tensor indicate the importance of the
90
+ corresponding elements in the parameter being pruned.
91
+ If unspecified or None, the parameter will be used in its place.
92
+ kwargs: keyword arguments passed on to a subclass of a
93
+ :class:`BasePruningMethod`
94
+ """
95
+
96
+ def _get_composite_method(cls, module, name, *args, **kwargs):
97
+ # Check if a pruning method has already been applied to
98
+ # `module[name]`. If so, store that in `old_method`.
99
+ old_method = None
100
+ found = 0
101
+ # there should technically be only 1 hook with hook.name == name
102
+ # assert this using `found`
103
+ hooks_to_remove = []
104
+ for k, hook in module._forward_pre_hooks.items():
105
+ # if it exists, take existing thing, remove hook, then
106
+ # go through normal thing
107
+ if isinstance(hook, BasePruningMethod) and hook._tensor_name == name:
108
+ old_method = hook
109
+ hooks_to_remove.append(k)
110
+ found += 1
111
+ assert (
112
+ found <= 1
113
+ ), f"Avoid adding multiple pruning hooks to the\
114
+ same tensor {name} of module {module}. Use a PruningContainer."
115
+
116
+ for k in hooks_to_remove:
117
+ del module._forward_pre_hooks[k]
118
+
119
+ # Apply the new pruning method, either from scratch or on top of
120
+ # the previous one.
121
+ method = cls(*args, **kwargs) # new pruning
122
+ # Have the pruning method remember what tensor it's been applied to
123
+ method._tensor_name = name
124
+
125
+ # combine `methods` with `old_method`, if `old_method` exists
126
+ if old_method is not None: # meaning that there was a hook
127
+ # if the hook is already a pruning container, just add the
128
+ # new pruning method to the container
129
+ if isinstance(old_method, PruningContainer):
130
+ old_method.add_pruning_method(method)
131
+ method = old_method # rename old_method --> method
132
+
133
+ # if the hook is simply a single pruning method, create a
134
+ # container, add the old pruning method and the new one
135
+ elif isinstance(old_method, BasePruningMethod):
136
+ container = PruningContainer(old_method)
137
+ # Have the pruning method remember the name of its tensor
138
+ # setattr(container, '_tensor_name', name)
139
+ container.add_pruning_method(method)
140
+ method = container # rename container --> method
141
+ return method
142
+
143
+ method = _get_composite_method(cls, module, name, *args, **kwargs)
144
+ # at this point we have no forward_pre_hooks but we could have an
145
+ # active reparametrization of the tensor if another pruning method
146
+ # had been applied (in which case `method` would be a PruningContainer
147
+ # and not a simple pruning method).
148
+
149
+ # Pruning is to be applied to the module's tensor named `name`,
150
+ # starting from the state it is found in prior to this iteration of
151
+ # pruning. The pruning mask is calculated based on importances scores.
152
+
153
+ orig = getattr(module, name)
154
+ if importance_scores is not None:
155
+ assert (
156
+ importance_scores.shape == orig.shape
157
+ ), f"importance_scores should have the same shape as parameter {name} of {module}"
158
+ else:
159
+ importance_scores = orig
160
+
161
+ # If this is the first time pruning is applied, take care of moving
162
+ # the original tensor to a new parameter called name + '_orig' and
163
+ # and deleting the original parameter
164
+ if not isinstance(method, PruningContainer):
165
+ # copy `module[name]` to `module[name + '_orig']`
166
+ module.register_parameter(name + "_orig", orig)
167
+ # temporarily delete `module[name]`
168
+ del module._parameters[name]
169
+ default_mask = torch.ones_like(orig) # temp
170
+ # If this is not the first time pruning is applied, all of the above
171
+ # has been done before in a previous pruning iteration, so we're good
172
+ # to go
173
+ else:
174
+ default_mask = (
175
+ getattr(module, name + "_mask")
176
+ .detach()
177
+ .clone(memory_format=torch.contiguous_format)
178
+ )
179
+
180
+ # Use try/except because if anything goes wrong with the mask
181
+ # computation etc., you'd want to roll back.
182
+ try:
183
+ # get the final mask, computed according to the specific method
184
+ mask = method.compute_mask(importance_scores, default_mask=default_mask)
185
+ # reparameterize by saving mask to `module[name + '_mask']`...
186
+ module.register_buffer(name + "_mask", mask)
187
+ # ... and the new pruned tensor to `module[name]`
188
+ setattr(module, name, method.apply_mask(module))
189
+ # associate the pruning method to the module via a hook to
190
+ # compute the function before every forward() (compile by run)
191
+ module.register_forward_pre_hook(method)
192
+
193
+ except Exception as e:
194
+ if not isinstance(method, PruningContainer):
195
+ orig = getattr(module, name + "_orig")
196
+ module.register_parameter(name, orig)
197
+ del module._parameters[name + "_orig"]
198
+ raise e
199
+
200
+ return method
201
+
202
+ def prune(self, t, default_mask=None, importance_scores=None):
203
+ r"""Compute and returns a pruned version of input tensor ``t``.
204
+
205
+ According to the pruning rule specified in :meth:`compute_mask`.
206
+
207
+ Args:
208
+ t (torch.Tensor): tensor to prune (of same dimensions as
209
+ ``default_mask``).
210
+ importance_scores (torch.Tensor): tensor of importance scores (of
211
+ same shape as ``t``) used to compute mask for pruning ``t``.
212
+ The values in this tensor indicate the importance of the
213
+ corresponding elements in the ``t`` that is being pruned.
214
+ If unspecified or None, the tensor ``t`` will be used in its place.
215
+ default_mask (torch.Tensor, optional): mask from previous pruning
216
+ iteration, if any. To be considered when determining what
217
+ portion of the tensor that pruning should act on. If None,
218
+ default to a mask of ones.
219
+
220
+ Returns:
221
+ pruned version of tensor ``t``.
222
+ """
223
+ if importance_scores is not None:
224
+ assert (
225
+ importance_scores.shape == t.shape
226
+ ), "importance_scores should have the same shape as tensor t"
227
+ else:
228
+ importance_scores = t
229
+ default_mask = default_mask if default_mask is not None else torch.ones_like(t)
230
+ return t * self.compute_mask(importance_scores, default_mask=default_mask)
231
+
232
+ def remove(self, module):
233
+ r"""Remove the pruning reparameterization from a module.
234
+
235
+ The pruned parameter named ``name`` remains permanently pruned,
236
+ and the parameter named ``name+'_orig'`` is removed from the parameter list.
237
+ Similarly, the buffer named ``name+'_mask'`` is removed from the buffers.
238
+
239
+ Note:
240
+ Pruning itself is NOT undone or reversed!
241
+ """
242
+ # before removing pruning from a tensor, it has to have been applied
243
+ assert (
244
+ self._tensor_name is not None
245
+ ), f"Module {module} has to be pruned before pruning can be removed" # this gets set in apply()
246
+
247
+ # to update module[name] to latest trained weights
248
+ weight = self.apply_mask(module) # masked weights
249
+
250
+ # delete and reset
251
+ if hasattr(module, self._tensor_name):
252
+ delattr(module, self._tensor_name)
253
+ orig = module._parameters[self._tensor_name + "_orig"]
254
+ orig.data = weight.data
255
+ del module._parameters[self._tensor_name + "_orig"]
256
+ del module._buffers[self._tensor_name + "_mask"]
257
+ setattr(module, self._tensor_name, orig)
258
+
259
+
260
+ class PruningContainer(BasePruningMethod):
261
+ """Container holding a sequence of pruning methods for iterative pruning.
262
+
263
+ Keeps track of the order in which pruning methods are applied and handles
264
+ combining successive pruning calls.
265
+
266
+ Accepts as argument an instance of a BasePruningMethod or an iterable of
267
+ them.
268
+ """
269
+
270
+ def __init__(self, *args):
271
+ self._pruning_methods: Tuple[BasePruningMethod, ...] = tuple()
272
+ if not isinstance(args, Iterable): # only 1 item
273
+ self._tensor_name = args._tensor_name
274
+ self.add_pruning_method(args)
275
+ elif len(args) == 1: # only 1 item in a tuple
276
+ self._tensor_name = args[0]._tensor_name
277
+ self.add_pruning_method(args[0])
278
+ else: # manual construction from list or other iterable (or no args)
279
+ for method in args:
280
+ self.add_pruning_method(method)
281
+
282
+ def add_pruning_method(self, method):
283
+ r"""Add a child pruning ``method`` to the container.
284
+
285
+ Args:
286
+ method (subclass of BasePruningMethod): child pruning method
287
+ to be added to the container.
288
+ """
289
+ # check that we're adding a pruning method to the container
290
+ if not isinstance(method, BasePruningMethod) and method is not None:
291
+ raise TypeError(
292
+ f"{type(method)} is not a BasePruningMethod subclass"
293
+ )
294
+ elif method is not None and self._tensor_name != method._tensor_name:
295
+ raise ValueError(
296
+ "Can only add pruning methods acting on "
297
+ f"the parameter named '{self._tensor_name}' to PruningContainer {self}."
298
+ + f" Found '{method._tensor_name}'"
299
+ )
300
+ # if all checks passed, add to _pruning_methods tuple
301
+ self._pruning_methods += (method,) # type: ignore[operator]
302
+
303
+ def __len__(self):
304
+ return len(self._pruning_methods)
305
+
306
+ def __iter__(self):
307
+ return iter(self._pruning_methods)
308
+
309
+ def __getitem__(self, idx):
310
+ return self._pruning_methods[idx]
311
+
312
+ def compute_mask(self, t, default_mask):
313
+ r"""Apply the latest ``method`` by computing the new partial masks and returning its combination with the ``default_mask``.
314
+
315
+ The new partial mask should be computed on the entries or channels
316
+ that were not zeroed out by the ``default_mask``.
317
+ Which portions of the tensor ``t`` the new mask will be calculated from
318
+ depends on the ``PRUNING_TYPE`` (handled by the type handler):
319
+
320
+ * for 'unstructured', the mask will be computed from the raveled
321
+ list of nonmasked entries;
322
+
323
+ * for 'structured', the mask will be computed from the nonmasked
324
+ channels in the tensor;
325
+
326
+ * for 'global', the mask will be computed across all entries.
327
+
328
+ Args:
329
+ t (torch.Tensor): tensor representing the parameter to prune
330
+ (of same dimensions as ``default_mask``).
331
+ default_mask (torch.Tensor): mask from previous pruning iteration.
332
+
333
+ Returns:
334
+ mask (torch.Tensor): new mask that combines the effects
335
+ of the ``default_mask`` and the new mask from the current
336
+ pruning ``method`` (of same dimensions as ``default_mask`` and
337
+ ``t``).
338
+ """
339
+
340
+ def _combine_masks(method, t, mask):
341
+ r"""Combine the masks from all pruning methods and returns a new mask.
342
+
343
+ Args:
344
+ method (a BasePruningMethod subclass): pruning method
345
+ currently being applied.
346
+ t (torch.Tensor): tensor representing the parameter to prune
347
+ (of same dimensions as mask).
348
+ mask (torch.Tensor): mask from previous pruning iteration
349
+
350
+ Returns:
351
+ new_mask (torch.Tensor): new mask that combines the effects
352
+ of the old mask and the new mask from the current
353
+ pruning method (of same dimensions as mask and t).
354
+ """
355
+ new_mask = mask # start off from existing mask
356
+ new_mask = new_mask.to(dtype=t.dtype)
357
+
358
+ # compute a slice of t onto which the new pruning method will operate
359
+ if method.PRUNING_TYPE == "unstructured":
360
+ # prune entries of t where the mask is 1
361
+ slc = mask == 1
362
+
363
+ # for struct pruning, exclude channels that have already been
364
+ # entirely pruned
365
+ elif method.PRUNING_TYPE == "structured":
366
+ if not hasattr(method, "dim"):
367
+ raise AttributeError(
368
+ "Pruning methods of PRUNING_TYPE "
369
+ '"structured" need to have the attribute `dim` defined.'
370
+ )
371
+
372
+ # find the channels to keep by removing the ones that have been
373
+ # zeroed out already (i.e. where sum(entries) == 0)
374
+ n_dims = t.dim() # "is this a 2D tensor? 3D? ..."
375
+ dim = method.dim
376
+ # convert negative indexing
377
+ if dim < 0:
378
+ dim = n_dims + dim
379
+ # if dim is still negative after subtracting it from n_dims
380
+ if dim < 0:
381
+ raise IndexError(
382
+ f"Index is out of bounds for tensor with dimensions {n_dims}"
383
+ )
384
+ # find channels along dim = dim that aren't already tots 0ed out
385
+ keep_channel = mask.sum(dim=[d for d in range(n_dims) if d != dim]) != 0
386
+ # create slice to identify what to prune
387
+ slc = [slice(None)] * n_dims
388
+ slc[dim] = keep_channel
389
+
390
+ elif method.PRUNING_TYPE == "global":
391
+ n_dims = len(t.shape) # "is this a 2D tensor? 3D? ..."
392
+ slc = [slice(None)] * n_dims
393
+
394
+ else:
395
+ raise ValueError(
396
+ f"Unrecognized PRUNING_TYPE {method.PRUNING_TYPE}"
397
+ )
398
+
399
+ # compute the new mask on the unpruned slice of the tensor t
400
+ partial_mask = method.compute_mask(t[slc], default_mask=mask[slc])
401
+ new_mask[slc] = partial_mask.to(dtype=new_mask.dtype)
402
+
403
+ return new_mask
404
+
405
+ method = self._pruning_methods[-1]
406
+ mask = _combine_masks(method, t, default_mask)
407
+ return mask
408
+
409
+
410
+ class Identity(BasePruningMethod):
411
+ r"""Utility pruning method that does not prune any units but generates the pruning parametrization with a mask of ones."""
412
+
413
+ PRUNING_TYPE = "unstructured"
414
+
415
+ def compute_mask(self, t, default_mask):
416
+ mask = default_mask
417
+ return mask
418
+
419
+ @classmethod
420
+ def apply(cls, module, name):
421
+ r"""Add pruning on the fly and reparametrization of a tensor.
422
+
423
+ Adds the forward pre-hook that enables pruning on the fly and
424
+ the reparametrization of a tensor in terms of the original tensor
425
+ and the pruning mask.
426
+
427
+ Args:
428
+ module (nn.Module): module containing the tensor to prune
429
+ name (str): parameter name within ``module`` on which pruning
430
+ will act.
431
+ """
432
+ return super().apply(module, name)
433
+
434
+
435
+ class RandomUnstructured(BasePruningMethod):
436
+ r"""Prune (currently unpruned) units in a tensor at random.
437
+
438
+ Args:
439
+ name (str): parameter name within ``module`` on which pruning
440
+ will act.
441
+ amount (int or float): quantity of parameters to prune.
442
+ If ``float``, should be between 0.0 and 1.0 and represent the
443
+ fraction of parameters to prune. If ``int``, it represents the
444
+ absolute number of parameters to prune.
445
+ """
446
+
447
+ PRUNING_TYPE = "unstructured"
448
+
449
+ def __init__(self, amount):
450
+ # Check range of validity of pruning amount
451
+ _validate_pruning_amount_init(amount)
452
+ self.amount = amount
453
+
454
+ def compute_mask(self, t, default_mask):
455
+ # Check that the amount of units to prune is not > than the number of
456
+ # parameters in t
457
+ tensor_size = t.nelement()
458
+ # Compute number of units to prune: amount if int,
459
+ # else amount * tensor_size
460
+ nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size)
461
+ # This should raise an error if the number of units to prune is larger
462
+ # than the number of units in the tensor
463
+ _validate_pruning_amount(nparams_toprune, tensor_size)
464
+
465
+ mask = default_mask.clone(memory_format=torch.contiguous_format)
466
+
467
+ if nparams_toprune != 0: # k=0 not supported by torch.kthvalue
468
+ prob = torch.rand_like(t)
469
+ topk = torch.topk(prob.view(-1), k=nparams_toprune)
470
+ mask.view(-1)[topk.indices] = 0
471
+
472
+ return mask
473
+
474
+ @classmethod
475
+ def apply(cls, module, name, amount):
476
+ r"""Add pruning on the fly and reparametrization of a tensor.
477
+
478
+ Adds the forward pre-hook that enables pruning on the fly and
479
+ the reparametrization of a tensor in terms of the original tensor
480
+ and the pruning mask.
481
+
482
+ Args:
483
+ module (nn.Module): module containing the tensor to prune
484
+ name (str): parameter name within ``module`` on which pruning
485
+ will act.
486
+ amount (int or float): quantity of parameters to prune.
487
+ If ``float``, should be between 0.0 and 1.0 and represent the
488
+ fraction of parameters to prune. If ``int``, it represents the
489
+ absolute number of parameters to prune.
490
+ """
491
+ return super().apply(module, name, amount=amount)
492
+
493
+
494
+ class L1Unstructured(BasePruningMethod):
495
+ r"""Prune (currently unpruned) units in a tensor by zeroing out the ones with the lowest L1-norm.
496
+
497
+ Args:
498
+ amount (int or float): quantity of parameters to prune.
499
+ If ``float``, should be between 0.0 and 1.0 and represent the
500
+ fraction of parameters to prune. If ``int``, it represents the
501
+ absolute number of parameters to prune.
502
+ """
503
+
504
+ PRUNING_TYPE = "unstructured"
505
+
506
+ def __init__(self, amount):
507
+ # Check range of validity of pruning amount
508
+ _validate_pruning_amount_init(amount)
509
+ self.amount = amount
510
+
511
+ def compute_mask(self, t, default_mask):
512
+ # Check that the amount of units to prune is not > than the number of
513
+ # parameters in t
514
+ tensor_size = t.nelement()
515
+ # Compute number of units to prune: amount if int,
516
+ # else amount * tensor_size
517
+ nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size)
518
+ # This should raise an error if the number of units to prune is larger
519
+ # than the number of units in the tensor
520
+ _validate_pruning_amount(nparams_toprune, tensor_size)
521
+
522
+ mask = default_mask.clone(memory_format=torch.contiguous_format)
523
+
524
+ if nparams_toprune != 0: # k=0 not supported by torch.kthvalue
525
+ # largest=True --> top k; largest=False --> bottom k
526
+ # Prune the smallest k
527
+ topk = torch.topk(torch.abs(t).view(-1), k=nparams_toprune, largest=False)
528
+ # topk will have .indices and .values
529
+ mask.view(-1)[topk.indices] = 0
530
+
531
+ return mask
532
+
533
+ @classmethod
534
+ def apply(cls, module, name, amount, importance_scores=None):
535
+ r"""Add pruning on the fly and reparametrization of a tensor.
536
+
537
+ Adds the forward pre-hook that enables pruning on the fly and
538
+ the reparametrization of a tensor in terms of the original tensor
539
+ and the pruning mask.
540
+
541
+ Args:
542
+ module (nn.Module): module containing the tensor to prune
543
+ name (str): parameter name within ``module`` on which pruning
544
+ will act.
545
+ amount (int or float): quantity of parameters to prune.
546
+ If ``float``, should be between 0.0 and 1.0 and represent the
547
+ fraction of parameters to prune. If ``int``, it represents the
548
+ absolute number of parameters to prune.
549
+ importance_scores (torch.Tensor): tensor of importance scores (of same
550
+ shape as module parameter) used to compute mask for pruning.
551
+ The values in this tensor indicate the importance of the corresponding
552
+ elements in the parameter being pruned.
553
+ If unspecified or None, the module parameter will be used in its place.
554
+ """
555
+ return super().apply(
556
+ module, name, amount=amount, importance_scores=importance_scores
557
+ )
558
+
559
+
560
+ class RandomStructured(BasePruningMethod):
561
+ r"""Prune entire (currently unpruned) channels in a tensor at random.
562
+
563
+ Args:
564
+ amount (int or float): quantity of parameters to prune.
565
+ If ``float``, should be between 0.0 and 1.0 and represent the
566
+ fraction of parameters to prune. If ``int``, it represents the
567
+ absolute number of parameters to prune.
568
+ dim (int, optional): index of the dim along which we define
569
+ channels to prune. Default: -1.
570
+ """
571
+
572
+ PRUNING_TYPE = "structured"
573
+
574
+ def __init__(self, amount, dim=-1):
575
+ # Check range of validity of amount
576
+ _validate_pruning_amount_init(amount)
577
+ self.amount = amount
578
+ self.dim = dim
579
+
580
+ def compute_mask(self, t, default_mask):
581
+ r"""Compute and returns a mask for the input tensor ``t``.
582
+
583
+ Starting from a base ``default_mask`` (which should be a mask of ones
584
+ if the tensor has not been pruned yet), generate a random mask to
585
+ apply on top of the ``default_mask`` by randomly zeroing out channels
586
+ along the specified dim of the tensor.
587
+
588
+ Args:
589
+ t (torch.Tensor): tensor representing the parameter to prune
590
+ default_mask (torch.Tensor): Base mask from previous pruning
591
+ iterations, that need to be respected after the new mask is
592
+ applied. Same dims as ``t``.
593
+
594
+ Returns:
595
+ mask (torch.Tensor): mask to apply to ``t``, of same dims as ``t``
596
+
597
+ Raises:
598
+ IndexError: if ``self.dim >= len(t.shape)``
599
+ """
600
+ # Check that tensor has structure (i.e. more than 1 dimension) such
601
+ # that the concept of "channels" makes sense
602
+ _validate_structured_pruning(t)
603
+
604
+ # Check that self.dim is a valid dim to index t, else raise IndexError
605
+ _validate_pruning_dim(t, self.dim)
606
+
607
+ # Check that the amount of channels to prune is not > than the number of
608
+ # channels in t along the dim to prune
609
+ tensor_size = t.shape[self.dim]
610
+ # Compute number of units to prune: amount if int,
611
+ # else amount * tensor_size
612
+ nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size)
613
+ # This should raise an error if the number of units to prune is larger
614
+ # than the number of units in the tensor
615
+ _validate_pruning_amount(nparams_toprune, tensor_size)
616
+
617
+ # Compute binary mask by initializing it to all 0s and then filling in
618
+ # 1s wherever topk.indices indicates, along self.dim.
619
+ # mask has the same shape as tensor t
620
+ def make_mask(t, dim, nchannels, nchannels_toprune):
621
+ # generate a random number in [0, 1] to associate to each channel
622
+ prob = torch.rand(nchannels)
623
+ # generate mask for each channel by 0ing out the channels that
624
+ # got assigned the k = nchannels_toprune lowest values in prob
625
+ threshold = torch.kthvalue(prob, k=nchannels_toprune).values
626
+ channel_mask = prob > threshold
627
+
628
+ mask = torch.zeros_like(t)
629
+ slc = [slice(None)] * len(t.shape)
630
+ slc[dim] = channel_mask
631
+ mask[slc] = 1
632
+ return mask
633
+
634
+ if nparams_toprune == 0: # k=0 not supported by torch.kthvalue
635
+ mask = default_mask
636
+ else:
637
+ # apply the new structured mask on top of prior (potentially
638
+ # unstructured) mask
639
+ mask = make_mask(t, self.dim, tensor_size, nparams_toprune)
640
+ mask *= default_mask.to(dtype=mask.dtype)
641
+ return mask
642
+
643
+ @classmethod
644
+ def apply(cls, module, name, amount, dim=-1):
645
+ r"""Add pruning on the fly and reparametrization of a tensor.
646
+
647
+ Adds the forward pre-hook that enables pruning on the fly and
648
+ the reparametrization of a tensor in terms of the original tensor
649
+ and the pruning mask.
650
+
651
+ Args:
652
+ module (nn.Module): module containing the tensor to prune
653
+ name (str): parameter name within ``module`` on which pruning
654
+ will act.
655
+ amount (int or float): quantity of parameters to prune.
656
+ If ``float``, should be between 0.0 and 1.0 and represent the
657
+ fraction of parameters to prune. If ``int``, it represents the
658
+ absolute number of parameters to prune.
659
+ dim (int, optional): index of the dim along which we define
660
+ channels to prune. Default: -1.
661
+ """
662
+ return super().apply(module, name, amount=amount, dim=dim)
663
+
664
+
665
+ class LnStructured(BasePruningMethod):
666
+ r"""Prune entire (currently unpruned) channels in a tensor based on their L\ ``n``-norm.
667
+
668
+ Args:
669
+ amount (int or float): quantity of channels to prune.
670
+ If ``float``, should be between 0.0 and 1.0 and represent the
671
+ fraction of parameters to prune. If ``int``, it represents the
672
+ absolute number of parameters to prune.
673
+ n (int, float, inf, -inf, 'fro', 'nuc'): See documentation of valid
674
+ entries for argument ``p`` in :func:`torch.norm`.
675
+ dim (int, optional): index of the dim along which we define
676
+ channels to prune. Default: -1.
677
+ """
678
+
679
+ PRUNING_TYPE = "structured"
680
+
681
+ def __init__(self, amount, n, dim=-1):
682
+ # Check range of validity of amount
683
+ _validate_pruning_amount_init(amount)
684
+ self.amount = amount
685
+ self.n = n
686
+ self.dim = dim
687
+
688
+ def compute_mask(self, t, default_mask):
689
+ r"""Compute and returns a mask for the input tensor ``t``.
690
+
691
+ Starting from a base ``default_mask`` (which should be a mask of ones
692
+ if the tensor has not been pruned yet), generate a mask to apply on
693
+ top of the ``default_mask`` by zeroing out the channels along the
694
+ specified dim with the lowest L\ ``n``-norm.
695
+
696
+ Args:
697
+ t (torch.Tensor): tensor representing the parameter to prune
698
+ default_mask (torch.Tensor): Base mask from previous pruning
699
+ iterations, that need to be respected after the new mask is
700
+ applied. Same dims as ``t``.
701
+
702
+ Returns:
703
+ mask (torch.Tensor): mask to apply to ``t``, of same dims as ``t``
704
+
705
+ Raises:
706
+ IndexError: if ``self.dim >= len(t.shape)``
707
+ """
708
+ # Check that tensor has structure (i.e. more than 1 dimension) such
709
+ # that the concept of "channels" makes sense
710
+ _validate_structured_pruning(t)
711
+ # Check that self.dim is a valid dim to index t, else raise IndexError
712
+ _validate_pruning_dim(t, self.dim)
713
+
714
+ # Check that the amount of channels to prune is not > than the number of
715
+ # channels in t along the dim to prune
716
+ tensor_size = t.shape[self.dim]
717
+ # Compute number of units to prune: amount if int,
718
+ # else amount * tensor_size
719
+ nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size)
720
+ nparams_tokeep = tensor_size - nparams_toprune
721
+ # This should raise an error if the number of units to prune is larger
722
+ # than the number of units in the tensor
723
+ _validate_pruning_amount(nparams_toprune, tensor_size)
724
+
725
+ # Structured pruning prunes entire channels so we need to know the
726
+ # L_n norm along each channel to then find the topk based on this
727
+ # metric
728
+ norm = _compute_norm(t, self.n, self.dim)
729
+ # largest=True --> top k; largest=False --> bottom k
730
+ # Keep the largest k channels along dim=self.dim
731
+ topk = torch.topk(norm, k=nparams_tokeep, largest=True)
732
+ # topk will have .indices and .values
733
+
734
+ # Compute binary mask by initializing it to all 0s and then filling in
735
+ # 1s wherever topk.indices indicates, along self.dim.
736
+ # mask has the same shape as tensor t
737
+ def make_mask(t, dim, indices):
738
+ # init mask to 0
739
+ mask = torch.zeros_like(t)
740
+ # e.g.: slc = [None, None, None], if len(t.shape) = 3
741
+ slc = [slice(None)] * len(t.shape)
742
+ # replace a None at position=dim with indices
743
+ # e.g.: slc = [None, None, [0, 2, 3]] if dim=2 & indices=[0,2,3]
744
+ slc[dim] = indices
745
+ # use slc to slice mask and replace all its entries with 1s
746
+ # e.g.: mask[:, :, [0, 2, 3]] = 1
747
+ mask[slc] = 1
748
+ return mask
749
+
750
+ if nparams_toprune == 0: # k=0 not supported by torch.kthvalue
751
+ mask = default_mask
752
+ else:
753
+ mask = make_mask(t, self.dim, topk.indices)
754
+ mask *= default_mask.to(dtype=mask.dtype)
755
+
756
+ return mask
757
+
758
+ @classmethod
759
+ def apply(cls, module, name, amount, n, dim, importance_scores=None):
760
+ r"""Add pruning on the fly and reparametrization of a tensor.
761
+
762
+ Adds the forward pre-hook that enables pruning on the fly and
763
+ the reparametrization of a tensor in terms of the original tensor
764
+ and the pruning mask.
765
+
766
+ Args:
767
+ module (nn.Module): module containing the tensor to prune
768
+ name (str): parameter name within ``module`` on which pruning
769
+ will act.
770
+ amount (int or float): quantity of parameters to prune.
771
+ If ``float``, should be between 0.0 and 1.0 and represent the
772
+ fraction of parameters to prune. If ``int``, it represents the
773
+ absolute number of parameters to prune.
774
+ n (int, float, inf, -inf, 'fro', 'nuc'): See documentation of valid
775
+ entries for argument ``p`` in :func:`torch.norm`.
776
+ dim (int): index of the dim along which we define channels to
777
+ prune.
778
+ importance_scores (torch.Tensor): tensor of importance scores (of same
779
+ shape as module parameter) used to compute mask for pruning.
780
+ The values in this tensor indicate the importance of the corresponding
781
+ elements in the parameter being pruned.
782
+ If unspecified or None, the module parameter will be used in its place.
783
+ """
784
+ return super().apply(
785
+ module,
786
+ name,
787
+ amount=amount,
788
+ n=n,
789
+ dim=dim,
790
+ importance_scores=importance_scores,
791
+ )
792
+
793
+
794
+ class CustomFromMask(BasePruningMethod):
795
+
796
+ PRUNING_TYPE = "global"
797
+
798
+ def __init__(self, mask):
799
+ self.mask = mask
800
+
801
+ def compute_mask(self, t, default_mask):
802
+ assert default_mask.shape == self.mask.shape
803
+ mask = default_mask * self.mask.to(dtype=default_mask.dtype)
804
+ return mask
805
+
806
+ @classmethod
807
+ def apply(cls, module, name, mask):
808
+ r"""Add pruning on the fly and reparametrization of a tensor.
809
+
810
+ Adds the forward pre-hook that enables pruning on the fly and
811
+ the reparametrization of a tensor in terms of the original tensor
812
+ and the pruning mask.
813
+
814
+ Args:
815
+ module (nn.Module): module containing the tensor to prune
816
+ name (str): parameter name within ``module`` on which pruning
817
+ will act.
818
+ """
819
+ return super().apply(module, name, mask=mask)
820
+
821
+
822
+ def identity(module, name):
823
+ r"""Apply pruning reparametrization without pruning any units.
824
+
825
+ Applies pruning reparametrization to the tensor corresponding to the
826
+ parameter called ``name`` in ``module`` without actually pruning any
827
+ units. Modifies module in place (and also return the modified module)
828
+ by:
829
+
830
+ 1) adding a named buffer called ``name+'_mask'`` corresponding to the
831
+ binary mask applied to the parameter ``name`` by the pruning method.
832
+ 2) replacing the parameter ``name`` by its pruned version, while the
833
+ original (unpruned) parameter is stored in a new parameter named
834
+ ``name+'_orig'``.
835
+
836
+ Note:
837
+ The mask is a tensor of ones.
838
+
839
+ Args:
840
+ module (nn.Module): module containing the tensor to prune.
841
+ name (str): parameter name within ``module`` on which pruning
842
+ will act.
843
+
844
+ Returns:
845
+ module (nn.Module): modified (i.e. pruned) version of the input module
846
+
847
+ Examples:
848
+ >>> # xdoctest: +SKIP
849
+ >>> m = prune.identity(nn.Linear(2, 3), 'bias')
850
+ >>> print(m.bias_mask)
851
+ tensor([1., 1., 1.])
852
+ """
853
+ Identity.apply(module, name)
854
+ return module
855
+
856
+
857
+ def random_unstructured(module, name, amount):
858
+ r"""Prune tensor by removing random (currently unpruned) units.
859
+
860
+ Prunes tensor corresponding to parameter called ``name`` in ``module``
861
+ by removing the specified ``amount`` of (currently unpruned) units
862
+ selected at random.
863
+ Modifies module in place (and also return the modified module) by:
864
+
865
+ 1) adding a named buffer called ``name+'_mask'`` corresponding to the
866
+ binary mask applied to the parameter ``name`` by the pruning method.
867
+ 2) replacing the parameter ``name`` by its pruned version, while the
868
+ original (unpruned) parameter is stored in a new parameter named
869
+ ``name+'_orig'``.
870
+
871
+ Args:
872
+ module (nn.Module): module containing the tensor to prune
873
+ name (str): parameter name within ``module`` on which pruning
874
+ will act.
875
+ amount (int or float): quantity of parameters to prune.
876
+ If ``float``, should be between 0.0 and 1.0 and represent the
877
+ fraction of parameters to prune. If ``int``, it represents the
878
+ absolute number of parameters to prune.
879
+
880
+ Returns:
881
+ module (nn.Module): modified (i.e. pruned) version of the input module
882
+
883
+ Examples:
884
+ >>> # xdoctest: +SKIP
885
+ >>> m = prune.random_unstructured(nn.Linear(2, 3), 'weight', amount=1)
886
+ >>> torch.sum(m.weight_mask == 0)
887
+ tensor(1)
888
+
889
+ """
890
+ RandomUnstructured.apply(module, name, amount)
891
+ return module
892
+
893
+
894
+ def l1_unstructured(module, name, amount, importance_scores=None):
895
+ r"""Prune tensor by removing units with the lowest L1-norm.
896
+
897
+ Prunes tensor corresponding to parameter called ``name`` in ``module``
898
+ by removing the specified `amount` of (currently unpruned) units with the
899
+ lowest L1-norm.
900
+ Modifies module in place (and also return the modified module)
901
+ by:
902
+
903
+ 1) adding a named buffer called ``name+'_mask'`` corresponding to the
904
+ binary mask applied to the parameter ``name`` by the pruning method.
905
+ 2) replacing the parameter ``name`` by its pruned version, while the
906
+ original (unpruned) parameter is stored in a new parameter named
907
+ ``name+'_orig'``.
908
+
909
+ Args:
910
+ module (nn.Module): module containing the tensor to prune
911
+ name (str): parameter name within ``module`` on which pruning
912
+ will act.
913
+ amount (int or float): quantity of parameters to prune.
914
+ If ``float``, should be between 0.0 and 1.0 and represent the
915
+ fraction of parameters to prune. If ``int``, it represents the
916
+ absolute number of parameters to prune.
917
+ importance_scores (torch.Tensor): tensor of importance scores (of same
918
+ shape as module parameter) used to compute mask for pruning.
919
+ The values in this tensor indicate the importance of the corresponding
920
+ elements in the parameter being pruned.
921
+ If unspecified or None, the module parameter will be used in its place.
922
+
923
+ Returns:
924
+ module (nn.Module): modified (i.e. pruned) version of the input module
925
+
926
+ Examples:
927
+ >>> # xdoctest: +SKIP
928
+ >>> m = prune.l1_unstructured(nn.Linear(2, 3), 'weight', amount=0.2)
929
+ >>> m.state_dict().keys()
930
+ odict_keys(['bias', 'weight_orig', 'weight_mask'])
931
+ """
932
+ L1Unstructured.apply(
933
+ module, name, amount=amount, importance_scores=importance_scores
934
+ )
935
+ return module
936
+
937
+
938
+ def random_structured(module, name, amount, dim):
939
+ r"""Prune tensor by removing random channels along the specified dimension.
940
+
941
+ Prunes tensor corresponding to parameter called ``name`` in ``module``
942
+ by removing the specified ``amount`` of (currently unpruned) channels
943
+ along the specified ``dim`` selected at random.
944
+ Modifies module in place (and also return the modified module)
945
+ by:
946
+
947
+ 1) adding a named buffer called ``name+'_mask'`` corresponding to the
948
+ binary mask applied to the parameter ``name`` by the pruning method.
949
+ 2) replacing the parameter ``name`` by its pruned version, while the
950
+ original (unpruned) parameter is stored in a new parameter named
951
+ ``name+'_orig'``.
952
+
953
+ Args:
954
+ module (nn.Module): module containing the tensor to prune
955
+ name (str): parameter name within ``module`` on which pruning
956
+ will act.
957
+ amount (int or float): quantity of parameters to prune.
958
+ If ``float``, should be between 0.0 and 1.0 and represent the
959
+ fraction of parameters to prune. If ``int``, it represents the
960
+ absolute number of parameters to prune.
961
+ dim (int): index of the dim along which we define channels to prune.
962
+
963
+ Returns:
964
+ module (nn.Module): modified (i.e. pruned) version of the input module
965
+
966
+ Examples:
967
+ >>> # xdoctest: +SKIP
968
+ >>> m = prune.random_structured(
969
+ ... nn.Linear(5, 3), 'weight', amount=3, dim=1
970
+ ... )
971
+ >>> columns_pruned = int(sum(torch.sum(m.weight, dim=0) == 0))
972
+ >>> print(columns_pruned)
973
+ 3
974
+ """
975
+ RandomStructured.apply(module, name, amount, dim)
976
+ return module
977
+
978
+
979
+ def ln_structured(module, name, amount, n, dim, importance_scores=None):
980
+ r"""Prune tensor by removing channels with the lowest L\ ``n``-norm along the specified dimension.
981
+
982
+ Prunes tensor corresponding to parameter called ``name`` in ``module``
983
+ by removing the specified ``amount`` of (currently unpruned) channels
984
+ along the specified ``dim`` with the lowest L\ ``n``-norm.
985
+ Modifies module in place (and also return the modified module)
986
+ by:
987
+
988
+ 1) adding a named buffer called ``name+'_mask'`` corresponding to the
989
+ binary mask applied to the parameter ``name`` by the pruning method.
990
+ 2) replacing the parameter ``name`` by its pruned version, while the
991
+ original (unpruned) parameter is stored in a new parameter named
992
+ ``name+'_orig'``.
993
+
994
+ Args:
995
+ module (nn.Module): module containing the tensor to prune
996
+ name (str): parameter name within ``module`` on which pruning
997
+ will act.
998
+ amount (int or float): quantity of parameters to prune.
999
+ If ``float``, should be between 0.0 and 1.0 and represent the
1000
+ fraction of parameters to prune. If ``int``, it represents the
1001
+ absolute number of parameters to prune.
1002
+ n (int, float, inf, -inf, 'fro', 'nuc'): See documentation of valid
1003
+ entries for argument ``p`` in :func:`torch.norm`.
1004
+ dim (int): index of the dim along which we define channels to prune.
1005
+ importance_scores (torch.Tensor): tensor of importance scores (of same
1006
+ shape as module parameter) used to compute mask for pruning.
1007
+ The values in this tensor indicate the importance of the corresponding
1008
+ elements in the parameter being pruned.
1009
+ If unspecified or None, the module parameter will be used in its place.
1010
+
1011
+ Returns:
1012
+ module (nn.Module): modified (i.e. pruned) version of the input module
1013
+
1014
+ Examples:
1015
+ >>> from torch.nn.utils import prune
1016
+ >>> m = prune.ln_structured(
1017
+ ... nn.Conv2d(5, 3, 2), 'weight', amount=0.3, dim=1, n=float('-inf')
1018
+ ... )
1019
+ """
1020
+ LnStructured.apply(
1021
+ module, name, amount, n, dim, importance_scores=importance_scores
1022
+ )
1023
+ return module
1024
+
1025
+
1026
+ def global_unstructured(parameters, pruning_method, importance_scores=None, **kwargs):
1027
+ r"""
1028
+ Globally prunes tensors corresponding to all parameters in ``parameters`` by applying the specified ``pruning_method``.
1029
+
1030
+ Modifies modules in place by:
1031
+
1032
+ 1) adding a named buffer called ``name+'_mask'`` corresponding to the
1033
+ binary mask applied to the parameter ``name`` by the pruning method.
1034
+ 2) replacing the parameter ``name`` by its pruned version, while the
1035
+ original (unpruned) parameter is stored in a new parameter named
1036
+ ``name+'_orig'``.
1037
+
1038
+ Args:
1039
+ parameters (Iterable of (module, name) tuples): parameters of
1040
+ the model to prune in a global fashion, i.e. by aggregating all
1041
+ weights prior to deciding which ones to prune. module must be of
1042
+ type :class:`nn.Module`, and name must be a string.
1043
+ pruning_method (function): a valid pruning function from this module,
1044
+ or a custom one implemented by the user that satisfies the
1045
+ implementation guidelines and has ``PRUNING_TYPE='unstructured'``.
1046
+ importance_scores (dict): a dictionary mapping (module, name) tuples to
1047
+ the corresponding parameter's importance scores tensor. The tensor
1048
+ should be the same shape as the parameter, and is used for computing
1049
+ mask for pruning.
1050
+ If unspecified or None, the parameter will be used in place of its
1051
+ importance scores.
1052
+ kwargs: other keyword arguments such as:
1053
+ amount (int or float): quantity of parameters to prune across the
1054
+ specified parameters.
1055
+ If ``float``, should be between 0.0 and 1.0 and represent the
1056
+ fraction of parameters to prune. If ``int``, it represents the
1057
+ absolute number of parameters to prune.
1058
+
1059
+ Raises:
1060
+ TypeError: if ``PRUNING_TYPE != 'unstructured'``
1061
+
1062
+ Note:
1063
+ Since global structured pruning doesn't make much sense unless the
1064
+ norm is normalized by the size of the parameter, we now limit the
1065
+ scope of global pruning to unstructured methods.
1066
+
1067
+ Examples:
1068
+ >>> from torch.nn.utils import prune
1069
+ >>> from collections import OrderedDict
1070
+ >>> net = nn.Sequential(OrderedDict([
1071
+ ... ('first', nn.Linear(10, 4)),
1072
+ ... ('second', nn.Linear(4, 1)),
1073
+ ... ]))
1074
+ >>> parameters_to_prune = (
1075
+ ... (net.first, 'weight'),
1076
+ ... (net.second, 'weight'),
1077
+ ... )
1078
+ >>> prune.global_unstructured(
1079
+ ... parameters_to_prune,
1080
+ ... pruning_method=prune.L1Unstructured,
1081
+ ... amount=10,
1082
+ ... )
1083
+ >>> print(sum(torch.nn.utils.parameters_to_vector(net.buffers()) == 0))
1084
+ tensor(10)
1085
+
1086
+ """
1087
+ # ensure parameters is a list or generator of tuples
1088
+ if not isinstance(parameters, Iterable):
1089
+ raise TypeError("global_unstructured(): parameters is not an Iterable")
1090
+
1091
+ importance_scores = importance_scores if importance_scores is not None else {}
1092
+ if not isinstance(importance_scores, dict):
1093
+ raise TypeError("global_unstructured(): importance_scores must be of type dict")
1094
+
1095
+ # flatten importance scores to consider them all at once in global pruning
1096
+ relevant_importance_scores = torch.nn.utils.parameters_to_vector(
1097
+ [
1098
+ importance_scores.get((module, name), getattr(module, name))
1099
+ for (module, name) in parameters
1100
+ ]
1101
+ )
1102
+ # similarly, flatten the masks (if they exist), or use a flattened vector
1103
+ # of 1s of the same dimensions as t
1104
+ default_mask = torch.nn.utils.parameters_to_vector(
1105
+ [
1106
+ getattr(module, name + "_mask", torch.ones_like(getattr(module, name)))
1107
+ for (module, name) in parameters
1108
+ ]
1109
+ )
1110
+
1111
+ # use the canonical pruning methods to compute the new mask, even if the
1112
+ # parameter is now a flattened out version of `parameters`
1113
+ container = PruningContainer()
1114
+ container._tensor_name = "temp" # to make it match that of `method`
1115
+ method = pruning_method(**kwargs)
1116
+ method._tensor_name = "temp" # to make it match that of `container`
1117
+ if method.PRUNING_TYPE != "unstructured":
1118
+ raise TypeError(
1119
+ 'Only "unstructured" PRUNING_TYPE supported for '
1120
+ f"the `pruning_method`. Found method {pruning_method} of type {method.PRUNING_TYPE}"
1121
+ )
1122
+
1123
+ container.add_pruning_method(method)
1124
+
1125
+ # use the `compute_mask` method from `PruningContainer` to combine the
1126
+ # mask computed by the new method with the pre-existing mask
1127
+ final_mask = container.compute_mask(relevant_importance_scores, default_mask)
1128
+
1129
+ # Pointer for slicing the mask to match the shape of each parameter
1130
+ pointer = 0
1131
+ for module, name in parameters:
1132
+
1133
+ param = getattr(module, name)
1134
+ # The length of the parameter
1135
+ num_param = param.numel()
1136
+ # Slice the mask, reshape it
1137
+ param_mask = final_mask[pointer : pointer + num_param].view_as(param)
1138
+ # Assign the correct pre-computed mask to each parameter and add it
1139
+ # to the forward_pre_hooks like any other pruning method
1140
+ custom_from_mask(module, name, mask=param_mask)
1141
+
1142
+ # Increment the pointer to continue slicing the final_mask
1143
+ pointer += num_param
1144
+
1145
+
1146
+ def custom_from_mask(module, name, mask):
1147
+ r"""Prune tensor corresponding to parameter called ``name`` in ``module`` by applying the pre-computed mask in ``mask``.
1148
+
1149
+ Modifies module in place (and also return the modified module) by:
1150
+
1151
+ 1) adding a named buffer called ``name+'_mask'`` corresponding to the
1152
+ binary mask applied to the parameter ``name`` by the pruning method.
1153
+ 2) replacing the parameter ``name`` by its pruned version, while the
1154
+ original (unpruned) parameter is stored in a new parameter named
1155
+ ``name+'_orig'``.
1156
+
1157
+ Args:
1158
+ module (nn.Module): module containing the tensor to prune
1159
+ name (str): parameter name within ``module`` on which pruning
1160
+ will act.
1161
+ mask (Tensor): binary mask to be applied to the parameter.
1162
+
1163
+ Returns:
1164
+ module (nn.Module): modified (i.e. pruned) version of the input module
1165
+
1166
+ Examples:
1167
+ >>> from torch.nn.utils import prune
1168
+ >>> m = prune.custom_from_mask(
1169
+ ... nn.Linear(5, 3), name='bias', mask=torch.tensor([0, 1, 0])
1170
+ ... )
1171
+ >>> print(m.bias_mask)
1172
+ tensor([0., 1., 0.])
1173
+
1174
+ """
1175
+ CustomFromMask.apply(module, name, mask)
1176
+ return module
1177
+
1178
+
1179
+ def remove(module, name):
1180
+ r"""Remove the pruning reparameterization from a module and the pruning method from the forward hook.
1181
+
1182
+ The pruned parameter named ``name`` remains permanently pruned, and the parameter
1183
+ named ``name+'_orig'`` is removed from the parameter list. Similarly,
1184
+ the buffer named ``name+'_mask'`` is removed from the buffers.
1185
+
1186
+ Note:
1187
+ Pruning itself is NOT undone or reversed!
1188
+
1189
+ Args:
1190
+ module (nn.Module): module containing the tensor to prune
1191
+ name (str): parameter name within ``module`` on which pruning
1192
+ will act.
1193
+
1194
+ Examples:
1195
+ >>> m = random_unstructured(nn.Linear(5, 7), name='weight', amount=0.2)
1196
+ >>> m = remove(m, name='weight')
1197
+ """
1198
+ for k, hook in module._forward_pre_hooks.items():
1199
+ if isinstance(hook, BasePruningMethod) and hook._tensor_name == name:
1200
+ hook.remove(module)
1201
+ del module._forward_pre_hooks[k]
1202
+ return module
1203
+
1204
+ raise ValueError(
1205
+ f"Parameter '{name}' of module {module} has to be pruned before pruning can be removed"
1206
+ )
1207
+
1208
+
1209
+ def is_pruned(module):
1210
+ r"""Check if a module is pruned by looking for pruning pre-hooks.
1211
+
1212
+ Check whether ``module`` is pruned by looking for
1213
+ ``forward_pre_hooks`` in its modules that inherit from the
1214
+ :class:`BasePruningMethod`.
1215
+
1216
+ Args:
1217
+ module (nn.Module): object that is either pruned or unpruned
1218
+
1219
+ Returns:
1220
+ binary answer to whether ``module`` is pruned.
1221
+
1222
+ Examples:
1223
+ >>> from torch.nn.utils import prune
1224
+ >>> m = nn.Linear(5, 7)
1225
+ >>> print(prune.is_pruned(m))
1226
+ False
1227
+ >>> prune.random_unstructured(m, name='weight', amount=0.2)
1228
+ >>> print(prune.is_pruned(m))
1229
+ True
1230
+ """
1231
+ for _, submodule in module.named_modules():
1232
+ for hook in submodule._forward_pre_hooks.values():
1233
+ if isinstance(hook, BasePruningMethod):
1234
+ return True
1235
+ return False
1236
+
1237
+
1238
+ def _validate_pruning_amount_init(amount):
1239
+ r"""Validate helper to check the range of amount at init.
1240
+
1241
+ Args:
1242
+ amount (int or float): quantity of parameters to prune.
1243
+ If float, should be between 0.0 and 1.0 and represent the
1244
+ fraction of parameters to prune. If int, it represents the
1245
+ absolute number of parameters to prune.
1246
+
1247
+ Raises:
1248
+ ValueError: if amount is a float not in [0, 1], or if it's a negative
1249
+ integer.
1250
+ TypeError: if amount is neither a float nor an integer.
1251
+
1252
+ Note:
1253
+ This does not take into account the number of parameters in the
1254
+ tensor to be pruned, which is known only at prune.
1255
+ """
1256
+ if not isinstance(amount, numbers.Real):
1257
+ raise TypeError(
1258
+ f"Invalid type for amount: {amount}. Must be int or float."
1259
+ )
1260
+
1261
+ if (isinstance(amount, numbers.Integral) and amount < 0) or (
1262
+ not isinstance(amount, numbers.Integral) # so it's a float
1263
+ and (float(amount) > 1.0 or float(amount) < 0.0)
1264
+ ):
1265
+ raise ValueError(
1266
+ f"amount={amount} should either be a float in the range [0, 1] or a non-negative integer"
1267
+ )
1268
+
1269
+
1270
+ def _validate_pruning_amount(amount, tensor_size):
1271
+ r"""Validate that the pruning amount is meaningful wrt to the size of the data.
1272
+
1273
+ Validation helper to check that the amount of parameters to prune
1274
+ is meaningful wrt to the size of the data (`tensor_size`).
1275
+
1276
+ Args:
1277
+ amount (int or float): quantity of parameters to prune.
1278
+ If float, should be between 0.0 and 1.0 and represent the
1279
+ fraction of parameters to prune. If int, it represents the
1280
+ absolute number of parameters to prune.
1281
+ tensor_size (int): absolute number of parameters in the tensor
1282
+ to prune.
1283
+ """
1284
+ # TODO: consider removing this check and allowing users to specify
1285
+ # a number of units to prune that is greater than the number of units
1286
+ # left to prune. In this case, the tensor will just be fully pruned.
1287
+
1288
+ if isinstance(amount, numbers.Integral) and amount > tensor_size:
1289
+ raise ValueError(
1290
+ f"amount={amount} should be smaller than the number of parameters to prune={tensor_size}"
1291
+ )
1292
+
1293
+
1294
+ def _validate_structured_pruning(t):
1295
+ r"""Validate that the tensor to be pruned is at least 2-Dimensional.
1296
+
1297
+ Validation helper to check that the tensor to be pruned is multi-
1298
+ dimensional, such that the concept of "channels" is well-defined.
1299
+
1300
+ Args:
1301
+ t (torch.Tensor): tensor representing the parameter to prune
1302
+
1303
+ Raises:
1304
+ ValueError: if the tensor `t` is not at least 2D.
1305
+ """
1306
+ shape = t.shape
1307
+ if len(shape) <= 1:
1308
+ raise ValueError(
1309
+ "Structured pruning can only be applied to "
1310
+ "multidimensional tensors. Found tensor of shape "
1311
+ f"{shape} with {len(shape)} dims"
1312
+ )
1313
+
1314
+
1315
+ def _compute_nparams_toprune(amount, tensor_size):
1316
+ r"""Convert the pruning amount from a percentage to absolute value.
1317
+
1318
+ Since amount can be expressed either in absolute value or as a
1319
+ percentage of the number of units/channels in a tensor, this utility
1320
+ function converts the percentage to absolute value to standardize
1321
+ the handling of pruning.
1322
+
1323
+ Args:
1324
+ amount (int or float): quantity of parameters to prune.
1325
+ If float, should be between 0.0 and 1.0 and represent the
1326
+ fraction of parameters to prune. If int, it represents the
1327
+ absolute number of parameters to prune.
1328
+ tensor_size (int): absolute number of parameters in the tensor
1329
+ to prune.
1330
+
1331
+ Returns:
1332
+ int: the number of units to prune in the tensor
1333
+ """
1334
+ # incorrect type already checked in _validate_pruning_amount_init
1335
+ if isinstance(amount, numbers.Integral):
1336
+ return amount
1337
+ else:
1338
+ return round(amount * tensor_size)
1339
+
1340
+
1341
+ def _validate_pruning_dim(t, dim):
1342
+ r"""Validate that the pruning dimension is within the bounds of the tensor dimension.
1343
+
1344
+ Args:
1345
+ t (torch.Tensor): tensor representing the parameter to prune
1346
+ dim (int): index of the dim along which we define channels to prune
1347
+ """
1348
+ if dim >= t.dim():
1349
+ raise IndexError(f"Invalid index {dim} for tensor of size {t.shape}")
1350
+
1351
+
1352
+ def _compute_norm(t, n, dim):
1353
+ r"""Compute the L_n-norm of a tensor along all dimensions except for the specified dimension.
1354
+
1355
+ The L_n-norm will be computed across all entries in tensor `t` along all dimension
1356
+ except for the one identified by dim.
1357
+ Example: if `t` is of shape, say, 3x2x4 and dim=2 (the last dim),
1358
+ then norm will have Size [4], and each entry will represent the
1359
+ `L_n`-norm computed using the 3x2=6 entries for each of the 4 channels.
1360
+
1361
+ Args:
1362
+ t (torch.Tensor): tensor representing the parameter to prune
1363
+ n (int, float, inf, -inf, 'fro', 'nuc'): See documentation of valid
1364
+ entries for argument p in torch.norm
1365
+ dim (int): dim identifying the channels to prune
1366
+
1367
+ Returns:
1368
+ norm (torch.Tensor): L_n norm computed across all dimensions except
1369
+ for `dim`. By construction, `norm.shape = t.shape[-1]`.
1370
+ """
1371
+ # dims = all axes, except for the one identified by `dim`
1372
+ dims = list(range(t.dim()))
1373
+ # convert negative indexing
1374
+ if dim < 0:
1375
+ dim = dims[dim]
1376
+ dims.remove(dim)
1377
+
1378
+ norm = torch.norm(t, p=n, dim=dims)
1379
+ return norm
evalkit_cambrian/lib/python3.10/site-packages/torch/nn/utils/rnn.py ADDED
@@ -0,0 +1,517 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from typing import Iterable, List, NamedTuple, Tuple, Union
3
+
4
+ import torch
5
+ from torch import Tensor
6
+ from ... import _VF
7
+ from ..._jit_internal import Optional
8
+
9
+
10
+ __all__ = ['PackedSequence', 'invert_permutation', 'pack_padded_sequence', 'pad_packed_sequence', 'pad_sequence',
11
+ 'unpad_sequence', 'pack_sequence', 'unpack_sequence']
12
+
13
+
14
+ class PackedSequence_(NamedTuple):
15
+ data: torch.Tensor
16
+ batch_sizes: torch.Tensor
17
+ sorted_indices: Optional[torch.Tensor]
18
+ unsorted_indices: Optional[torch.Tensor]
19
+
20
+
21
+ def bind(optional, fn):
22
+ if optional is None:
23
+ return None
24
+ return fn(optional)
25
+
26
+
27
+ class PackedSequence(PackedSequence_):
28
+ r"""Holds the data and list of :attr:`batch_sizes` of a packed sequence.
29
+
30
+ All RNN modules accept packed sequences as inputs.
31
+
32
+ Note:
33
+ Instances of this class should never be created manually. They are meant
34
+ to be instantiated by functions like :func:`pack_padded_sequence`.
35
+
36
+ Batch sizes represent the number elements at each sequence step in
37
+ the batch, not the varying sequence lengths passed to
38
+ :func:`pack_padded_sequence`. For instance, given data ``abc`` and ``x``
39
+ the :class:`PackedSequence` would contain data ``axbc`` with
40
+ ``batch_sizes=[2,1,1]``.
41
+
42
+ Attributes:
43
+ data (Tensor): Tensor containing packed sequence
44
+ batch_sizes (Tensor): Tensor of integers holding
45
+ information about the batch size at each sequence step
46
+ sorted_indices (Tensor, optional): Tensor of integers holding how this
47
+ :class:`PackedSequence` is constructed from sequences.
48
+ unsorted_indices (Tensor, optional): Tensor of integers holding how this
49
+ to recover the original sequences with correct order.
50
+
51
+ .. note::
52
+ :attr:`data` can be on arbitrary device and of arbitrary dtype.
53
+ :attr:`sorted_indices` and :attr:`unsorted_indices` must be ``torch.int64``
54
+ tensors on the same device as :attr:`data`.
55
+
56
+ However, :attr:`batch_sizes` should always be a CPU ``torch.int64`` tensor.
57
+
58
+ This invariant is maintained throughout :class:`PackedSequence` class,
59
+ and all functions that construct a `:class:PackedSequence` in PyTorch
60
+ (i.e., they only pass in tensors conforming to this constraint).
61
+
62
+ """
63
+
64
+ def __new__(cls, data, batch_sizes=None, sorted_indices=None, unsorted_indices=None):
65
+ return super().__new__(
66
+ cls,
67
+ *_packed_sequence_init_args(data, batch_sizes, sorted_indices,
68
+ unsorted_indices))
69
+
70
+ # NOTE [ device and dtype of a PackedSequence ]
71
+ #
72
+ # See the note above in doc string (starting with ":attr:`data` can be on
73
+ # arbitrary device...").
74
+ def pin_memory(self):
75
+ # Why not convert `batch_sizes`?
76
+ # See NOTE [ device and dtype of a PackedSequence ]
77
+ return type(self)(self.data.pin_memory(), self.batch_sizes,
78
+ bind(self.sorted_indices, lambda t: t.pin_memory()),
79
+ bind(self.unsorted_indices, lambda t: t.pin_memory()))
80
+
81
+ def cuda(self, *args, **kwargs):
82
+ # Tests to see if 'cuda' should be added to kwargs
83
+ ex = torch.tensor((), dtype=self.data.dtype, device=self.data.device).to(*args, **kwargs)
84
+ if ex.is_cuda:
85
+ return self.to(*args, **kwargs)
86
+ return self.to(*args, device='cuda', **kwargs)
87
+
88
+ def cpu(self, *args, **kwargs):
89
+
90
+ ex = torch.tensor((), dtype=self.data.dtype, device=self.data.device).to(*args, **kwargs)
91
+ if ex.device.type == 'cpu':
92
+ return self.to(*args, **kwargs)
93
+ return self.to(*args, device='cpu', **kwargs)
94
+
95
+ def double(self):
96
+ return self.to(dtype=torch.double)
97
+
98
+ def float(self):
99
+ return self.to(dtype=torch.float)
100
+
101
+ def half(self):
102
+ return self.to(dtype=torch.half)
103
+
104
+ def long(self):
105
+ return self.to(dtype=torch.long)
106
+
107
+ def int(self):
108
+ return self.to(dtype=torch.int)
109
+
110
+ def short(self):
111
+ return self.to(dtype=torch.short)
112
+
113
+ def char(self):
114
+ return self.to(dtype=torch.int8)
115
+
116
+ def byte(self):
117
+ return self.to(dtype=torch.uint8)
118
+
119
+ def to(self, *args, **kwargs):
120
+ r"""Perform dtype and/or device conversion on `self.data`.
121
+
122
+ It has similar signature as :meth:`torch.Tensor.to`, except optional
123
+ arguments like `non_blocking` and `copy` should be passed as kwargs,
124
+ not args, or they will not apply to the index tensors.
125
+
126
+ .. note::
127
+
128
+ If the ``self.data`` Tensor already has the correct :class:`torch.dtype`
129
+ and :class:`torch.device`, then ``self`` is returned.
130
+ Otherwise, returns a copy with the desired configuration.
131
+ """
132
+ # Why not convert `batch_sizes`?
133
+ # See NOTE [ device and dtype of a PackedSequence ]
134
+ data = self.data.to(*args, **kwargs)
135
+ if data is self.data:
136
+ return self
137
+ else:
138
+ # Does not forward device or dtype arg/kwargs, device is set from data.device
139
+ kwargs = dict(filter(lambda t: t[0] != 'device' and t[0] != 'dtype', kwargs.items()))
140
+ sorted_indices = bind(self.sorted_indices, lambda t: t.to(data.device, **kwargs))
141
+ unsorted_indices = bind(self.unsorted_indices, lambda t: t.to(data.device, **kwargs))
142
+ return type(self)(data, self.batch_sizes, sorted_indices, unsorted_indices)
143
+
144
+ @property
145
+ def is_cuda(self):
146
+ r"""Return true if `self.data` stored on a gpu."""
147
+ return self.data.is_cuda
148
+
149
+ def is_pinned(self):
150
+ r"""Return true if `self.data` stored on in pinned memory."""
151
+ return self.data.is_pinned()
152
+
153
+
154
+ # TorchScript doesn't support constructors on named tuples, so we use this helper
155
+ # method to construct PackedSequence
156
+ def _packed_sequence_init_args(
157
+ data: Tensor,
158
+ batch_sizes: Optional[Tensor] = None,
159
+ sorted_indices: Optional[Tensor] = None,
160
+ unsorted_indices: Optional[Tensor] = None,
161
+ ) -> Tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]:
162
+ # NB: if unsorted_indices is provided, it should be the inverse permutation
163
+ # to sorted_indices. Don't assert it here because the PackedSequence ctor
164
+ # should only be used internally.
165
+
166
+ if unsorted_indices is None:
167
+ unsorted_indices = invert_permutation(sorted_indices)
168
+
169
+ # support being called as `PackedSequence(data, batch_sizes, sorted_indices)`
170
+ if batch_sizes is not None:
171
+ # TODO: Re-enable this check (.type isn't supported in TorchScript)
172
+ if batch_sizes.device.type != 'cpu':
173
+ raise ValueError(
174
+ "batch_sizes should always be on CPU. "
175
+ "Instances of PackedSequence should never be created manually. "
176
+ "They should be instantiated by functions like pack_sequence "
177
+ "and pack_padded_sequences in nn.utils.rnn. "
178
+ "https://pytorch.org/docs/stable/nn.html#torch.nn.utils.rnn.pack_sequence")
179
+ return data, batch_sizes, sorted_indices, unsorted_indices
180
+
181
+ # support being called as `PackedSequence((data, batch_sizes), *, sorted_indices)`
182
+ else:
183
+ assert isinstance(data, (list, tuple)) and len(data) == 2
184
+ return data[0], data[1], sorted_indices, unsorted_indices
185
+
186
+
187
+ def _packed_sequence_init(
188
+ data: Tensor,
189
+ batch_sizes: Optional[Tensor] = None,
190
+ sorted_indices: Optional[Tensor] = None,
191
+ unsorted_indices: Optional[Tensor] = None,
192
+ ) -> PackedSequence:
193
+ data, batch_sizes, sorted_indices, unsorted_indices = _packed_sequence_init_args(
194
+ data, batch_sizes, sorted_indices, unsorted_indices)
195
+ return PackedSequence(data, batch_sizes, sorted_indices, unsorted_indices)
196
+
197
+
198
+ def invert_permutation(permutation: Optional[Tensor]) -> Optional[Tensor]:
199
+ if permutation is None:
200
+ return None
201
+ output = torch.empty_like(permutation, memory_format=torch.legacy_contiguous_format)
202
+ output.scatter_(0, permutation,
203
+ torch.arange(0, permutation.numel(), device=permutation.device))
204
+ return output
205
+
206
+
207
+ def pack_padded_sequence(
208
+ input: Tensor,
209
+ lengths: Tensor,
210
+ batch_first: bool = False,
211
+ enforce_sorted: bool = True,
212
+ ) -> PackedSequence:
213
+ r"""Packs a Tensor containing padded sequences of variable length.
214
+
215
+ :attr:`input` can be of size ``T x B x *`` where `T` is the length of the
216
+ longest sequence (equal to ``lengths[0]``), ``B`` is the batch size, and
217
+ ``*`` is any number of dimensions (including 0). If ``batch_first`` is
218
+ ``True``, ``B x T x *`` :attr:`input` is expected.
219
+
220
+ For unsorted sequences, use `enforce_sorted = False`. If :attr:`enforce_sorted` is
221
+ ``True``, the sequences should be sorted by length in a decreasing order, i.e.
222
+ ``input[:,0]`` should be the longest sequence, and ``input[:,B-1]`` the shortest
223
+ one. `enforce_sorted = True` is only necessary for ONNX export.
224
+
225
+ Note:
226
+ This function accepts any input that has at least two dimensions. You
227
+ can apply it to pack the labels, and use the output of the RNN with
228
+ them to compute the loss directly. A Tensor can be retrieved from
229
+ a :class:`PackedSequence` object by accessing its ``.data`` attribute.
230
+
231
+ Args:
232
+ input (Tensor): padded batch of variable length sequences.
233
+ lengths (Tensor or list(int)): list of sequence lengths of each batch
234
+ element (must be on the CPU if provided as a tensor).
235
+ batch_first (bool, optional): if ``True``, the input is expected in ``B x T x *``
236
+ format.
237
+ enforce_sorted (bool, optional): if ``True``, the input is expected to
238
+ contain sequences sorted by length in a decreasing order. If
239
+ ``False``, the input will get sorted unconditionally. Default: ``True``.
240
+
241
+ Returns:
242
+ a :class:`PackedSequence` object
243
+ """
244
+ if not isinstance(lengths, torch.Tensor):
245
+ if torch._C._get_tracing_state():
246
+ warnings.warn('pack_padded_sequence has been called with a Python list of '
247
+ 'sequence lengths. The tracer cannot track the data flow of Python '
248
+ 'values, and it will treat them as constants, likely rendering '
249
+ 'the trace incorrect for any other combination of lengths.',
250
+ stacklevel=2)
251
+ lengths = torch.as_tensor(lengths, dtype=torch.int64, device='cpu')
252
+ else:
253
+ lengths = lengths.to(dtype=torch.int64)
254
+
255
+ if enforce_sorted:
256
+ sorted_indices = None
257
+ else:
258
+ lengths, sorted_indices = torch.sort(lengths, descending=True)
259
+ sorted_indices = sorted_indices.to(input.device)
260
+ batch_dim = 0 if batch_first else 1
261
+ input = input.index_select(batch_dim, sorted_indices)
262
+
263
+ data, batch_sizes = \
264
+ _VF._pack_padded_sequence(input, lengths, batch_first)
265
+ return _packed_sequence_init(data, batch_sizes, sorted_indices, None)
266
+
267
+
268
+ def pad_packed_sequence(
269
+ sequence: PackedSequence,
270
+ batch_first: bool = False,
271
+ padding_value: float = 0.0,
272
+ total_length: Optional[int] = None,
273
+ ) -> Tuple[Tensor, Tensor]:
274
+ r"""Pad a packed batch of variable length sequences.
275
+
276
+ It is an inverse operation to :func:`pack_padded_sequence`.
277
+
278
+ The returned Tensor's data will be of size ``T x B x *``, where `T` is the length
279
+ of the longest sequence and `B` is the batch size. If ``batch_first`` is True,
280
+ the data will be transposed into ``B x T x *`` format.
281
+
282
+ Example:
283
+ >>> from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
284
+ >>> seq = torch.tensor([[1, 2, 0], [3, 0, 0], [4, 5, 6]])
285
+ >>> lens = [2, 1, 3]
286
+ >>> packed = pack_padded_sequence(seq, lens, batch_first=True, enforce_sorted=False)
287
+ >>> packed
288
+ PackedSequence(data=tensor([4, 1, 3, 5, 2, 6]), batch_sizes=tensor([3, 2, 1]),
289
+ sorted_indices=tensor([2, 0, 1]), unsorted_indices=tensor([1, 2, 0]))
290
+ >>> seq_unpacked, lens_unpacked = pad_packed_sequence(packed, batch_first=True)
291
+ >>> seq_unpacked
292
+ tensor([[1, 2, 0],
293
+ [3, 0, 0],
294
+ [4, 5, 6]])
295
+ >>> lens_unpacked
296
+ tensor([2, 1, 3])
297
+
298
+ .. note::
299
+ :attr:`total_length` is useful to implement the
300
+ ``pack sequence -> recurrent network -> unpack sequence`` pattern in a
301
+ :class:`~torch.nn.Module` wrapped in :class:`~torch.nn.DataParallel`.
302
+ See :ref:`this FAQ section <pack-rnn-unpack-with-data-parallelism>` for
303
+ details.
304
+
305
+ Args:
306
+ sequence (PackedSequence): batch to pad
307
+ batch_first (bool, optional): if ``True``, the output will be in ``B x T x *``
308
+ format.
309
+ padding_value (float, optional): values for padded elements.
310
+ total_length (int, optional): if not ``None``, the output will be padded to
311
+ have length :attr:`total_length`. This method will throw :class:`ValueError`
312
+ if :attr:`total_length` is less than the max sequence length in
313
+ :attr:`sequence`.
314
+
315
+ Returns:
316
+ Tuple of Tensor containing the padded sequence, and a Tensor
317
+ containing the list of lengths of each sequence in the batch.
318
+ Batch elements will be re-ordered as they were ordered originally when
319
+ the batch was passed to ``pack_padded_sequence`` or ``pack_sequence``.
320
+
321
+
322
+
323
+
324
+ """
325
+ max_seq_length = sequence.batch_sizes.size(0)
326
+ if total_length is not None:
327
+ if total_length < max_seq_length:
328
+ raise ValueError("Expected total_length to be at least the length "
329
+ "of the longest sequence in input, but got "
330
+ f"total_length={total_length} and max sequence length being {max_seq_length}"
331
+ )
332
+ max_seq_length = total_length
333
+ padded_output, lengths = _VF._pad_packed_sequence(
334
+ sequence.data, sequence.batch_sizes, batch_first, padding_value, max_seq_length)
335
+ unsorted_indices = sequence.unsorted_indices
336
+ if unsorted_indices is not None:
337
+ batch_dim = 0 if batch_first else 1
338
+ return padded_output.index_select(batch_dim, unsorted_indices), lengths[unsorted_indices.cpu()]
339
+ return padded_output, lengths
340
+
341
+ # NOTE: .pyi stub allows Iterable[Tensor], but for JIT-compatibility we need to be more restrictive here.
342
+ def pad_sequence(
343
+ sequences: Union[Tensor, List[Tensor]],
344
+ batch_first: bool = False,
345
+ padding_value: float = 0.0,
346
+ ) -> Tensor:
347
+ r"""Pad a list of variable length Tensors with ``padding_value``.
348
+
349
+ ``pad_sequence`` stacks a list of Tensors along a new dimension,
350
+ and pads them to equal length. For example, if the input is a list of
351
+ sequences with size ``L x *`` and ``batch_first`` is False, the output is
352
+ of size ``T x B x *``.
353
+
354
+ `B` is batch size. It is equal to the number of elements in ``sequences``.
355
+ `T` is length of the longest sequence.
356
+ `L` is length of the sequence.
357
+ `*` is any number of trailing dimensions, including none.
358
+
359
+ Example:
360
+ >>> from torch.nn.utils.rnn import pad_sequence
361
+ >>> a = torch.ones(25, 300)
362
+ >>> b = torch.ones(22, 300)
363
+ >>> c = torch.ones(15, 300)
364
+ >>> pad_sequence([a, b, c]).size()
365
+ torch.Size([25, 3, 300])
366
+
367
+ Note:
368
+ This function returns a Tensor of size ``T x B x *`` or ``B x T x *``
369
+ where `T` is the length of the longest sequence. This function assumes
370
+ trailing dimensions and type of all the Tensors in sequences are same.
371
+
372
+ Args:
373
+ sequences (list[Tensor]): list of variable length sequences.
374
+ batch_first (bool, optional): output will be in ``B x T x *`` if True, or in
375
+ ``T x B x *`` otherwise. Default: False.
376
+ padding_value (float, optional): value for padded elements. Default: 0.
377
+
378
+ Returns:
379
+ Tensor of size ``T x B x *`` if :attr:`batch_first` is ``False``.
380
+ Tensor of size ``B x T x *`` otherwise
381
+ """
382
+ if not (torch.jit.is_tracing() or torch.jit.is_scripting()):
383
+ # JIT doesn't support `Iterable`
384
+ if not isinstance(sequences, Iterable):
385
+ msg = ('pad_sequence: Expected iterable for input sequences, but got arg of type: '
386
+ f'{type(sequences)}')
387
+ raise RuntimeError(msg)
388
+
389
+ # In JIT context this leads to,
390
+ # RuntimeError: cannot statically infer the expected size of a list in this context
391
+ sequences = tuple(sequences)
392
+ else:
393
+ # For JIT, we only support Union[Tensor, Tuple[Tensor]]
394
+ if isinstance(sequences, torch.Tensor):
395
+ sequences = sequences.unbind(0)
396
+
397
+ # assuming trailing dimensions and type of all the Tensors
398
+ # in sequences are same and fetching those from sequences[0]
399
+ return torch._C._nn.pad_sequence(sequences, batch_first, padding_value)
400
+
401
+
402
+ def unpad_sequence(
403
+ padded_sequences: Tensor,
404
+ lengths: Tensor,
405
+ batch_first: bool = False,
406
+ ) -> List[Tensor]:
407
+ r"""Unpad padded Tensor into a list of variable length Tensors.
408
+
409
+ ``unpad_sequence`` unstacks padded Tensor into a list of variable length Tensors.
410
+
411
+ Example:
412
+ >>> from torch.nn.utils.rnn import pad_sequence, unpad_sequence
413
+ >>> a = torch.ones(25, 300)
414
+ >>> b = torch.ones(22, 300)
415
+ >>> c = torch.ones(15, 300)
416
+ >>> sequences = [a, b, c]
417
+ >>> padded_sequences = pad_sequence(sequences)
418
+ >>> lengths = torch.as_tensor([v.size(0) for v in sequences])
419
+ >>> unpadded_sequences = unpad_sequence(padded_sequences, lengths)
420
+ >>> torch.allclose(sequences[0], unpadded_sequences[0])
421
+ True
422
+ >>> torch.allclose(sequences[1], unpadded_sequences[1])
423
+ True
424
+ >>> torch.allclose(sequences[2], unpadded_sequences[2])
425
+ True
426
+
427
+ Args:
428
+ padded_sequences (Tensor): padded sequences.
429
+ lengths (Tensor): length of original (unpadded) sequences.
430
+ batch_first (bool, optional): whether batch dimension first or not. Default: False.
431
+
432
+ Returns:
433
+ a list of :class:`Tensor` objects
434
+ """
435
+ unpadded_sequences = []
436
+
437
+ if not batch_first:
438
+ padded_sequences.transpose_(0, 1)
439
+
440
+ max_length = padded_sequences.shape[1]
441
+ idx = torch.arange(max_length, device=lengths.device)
442
+
443
+ for seq, length in zip(padded_sequences, lengths):
444
+ mask = idx < length
445
+ unpacked_seq = seq[mask]
446
+ unpadded_sequences.append(unpacked_seq)
447
+
448
+ return unpadded_sequences
449
+
450
+
451
+ def pack_sequence(sequences: List[Tensor], enforce_sorted: bool = True) -> PackedSequence:
452
+ r"""Packs a list of variable length Tensors.
453
+
454
+ Consecutive call of the next functions: ``pad_sequence``, ``pack_padded_sequence``.
455
+
456
+ ``sequences`` should be a list of Tensors of size ``L x *``, where `L` is
457
+ the length of a sequence and `*` is any number of trailing dimensions,
458
+ including zero.
459
+
460
+ For unsorted sequences, use `enforce_sorted = False`. If ``enforce_sorted``
461
+ is ``True``, the sequences should be sorted in the order of decreasing length.
462
+ ``enforce_sorted = True`` is only necessary for ONNX export.
463
+
464
+
465
+ Example:
466
+ >>> from torch.nn.utils.rnn import pack_sequence
467
+ >>> a = torch.tensor([1, 2, 3])
468
+ >>> b = torch.tensor([4, 5])
469
+ >>> c = torch.tensor([6])
470
+ >>> pack_sequence([a, b, c])
471
+ PackedSequence(data=tensor([1, 4, 6, 2, 5, 3]), batch_sizes=tensor([3, 2, 1]), sorted_indices=None, unsorted_indices=None)
472
+
473
+
474
+ Args:
475
+ sequences (list[Tensor]): A list of sequences of decreasing length.
476
+ enforce_sorted (bool, optional): if ``True``, checks that the input
477
+ contains sequences sorted by length in a decreasing order. If
478
+ ``False``, this condition is not checked. Default: ``True``.
479
+
480
+ Returns:
481
+ a :class:`PackedSequence` object
482
+ """
483
+ lengths = torch.as_tensor([v.size(0) for v in sequences])
484
+ return pack_padded_sequence(pad_sequence(sequences), lengths, enforce_sorted=enforce_sorted)
485
+
486
+
487
+ def unpack_sequence(packed_sequences: PackedSequence) -> List[Tensor]:
488
+ r"""Unpack PackedSequence into a list of variable length Tensors.
489
+
490
+ ``packed_sequences`` should be a PackedSequence object.
491
+
492
+
493
+ Example:
494
+ >>> from torch.nn.utils.rnn import pack_sequence, unpack_sequence
495
+ >>> a = torch.tensor([1, 2, 3])
496
+ >>> b = torch.tensor([4, 5])
497
+ >>> c = torch.tensor([6])
498
+ >>> sequences = [a, b, c]
499
+ >>> print(sequences)
500
+ [tensor([1, 2, 3]), tensor([4, 5]), tensor([6])]
501
+ >>> packed_sequences = pack_sequence(sequences)
502
+ >>> print(packed_sequences)
503
+ PackedSequence(data=tensor([1, 4, 6, 2, 5, 3]), batch_sizes=tensor([3, 2, 1]), sorted_indices=None, unsorted_indices=None)
504
+ >>> unpacked_sequences = unpack_sequence(packed_sequences)
505
+ >>> print(unpacked_sequences)
506
+ [tensor([1, 2, 3]), tensor([4, 5]), tensor([6])]
507
+
508
+
509
+ Args:
510
+ packed_sequences (PackedSequence): A PackedSequence object.
511
+
512
+ Returns:
513
+ a list of :class:`Tensor` objects
514
+ """
515
+ padded_sequences, lengths = pad_packed_sequence(packed_sequences, batch_first=True)
516
+ unpacked_sequences = unpad_sequence(padded_sequences, lengths, batch_first=True)
517
+ return unpacked_sequences
evalkit_cambrian/lib/python3.10/site-packages/torch/nn/utils/stateless.py ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import warnings
3
+ from collections import defaultdict
4
+ from typing import Any, Dict, Iterator, Optional, Set, Tuple, Union
5
+
6
+ import torch
7
+ from torch import Tensor
8
+ from torch.nn.utils._named_member_accessor import NamedMemberAccessor
9
+
10
+ __all__ = ["functional_call"]
11
+
12
+
13
+ def _untie_named_tensors_map(
14
+ module: "torch.nn.Module",
15
+ parameters_and_buffers: Dict[str, Tensor],
16
+ ) -> Dict[str, Tensor]:
17
+ """
18
+ Unties all tied tensors in the module to parameters_and_buffers.
19
+
20
+ This function returns a new untied_parameters_and_buffers dictionary and leave the original
21
+ untied_parameters_and_buffers dictionary unchanged. It adds new (missing) keys for tied tensors
22
+ in the module to untied_parameters_and_buffers. The value of the new key is the user-given value
23
+ in the original parameters_and_buffers dictionary.
24
+
25
+ If there are more than one user-given values for the same tied tensor, it will raise an error.
26
+
27
+ For example, if the module has two tied weights self.foo and self.tied_foo and the user passes
28
+ {'foo': foo_value, ...}, this will return {'foo': foo_value, 'tied_foo': foo_value, ...}. If the
29
+ user passes {'foo': foo_value, 'tied_foo': tied_foo_value, ...}, it will raise an error. If the
30
+ user passes {'foo': foo_value, 'tied_foo': foo_value, ...}, it will not raise an error.
31
+
32
+ Args:
33
+ module (torch.nn.Module): the module to determine which tensors are tied.
34
+ parameters_and_buffers (Dict[str, Tensor]): a map of {name: tensor} for reparamaterizing the module.
35
+
36
+ Returns:
37
+ A new untied version of the parameters_and_buffers dictionary.
38
+
39
+ Raises:
40
+ ValueError: if there are more than one user-given values for the same tied tensor.
41
+ """
42
+ # A map of {name: tensor} for all tensors (including tied ones) in the module.
43
+ all_named_tensors: Dict[str, Tensor] = {}
44
+ all_named_tensors.update(module.named_parameters(remove_duplicate=False))
45
+ all_named_tensors.update(module.named_buffers(remove_duplicate=False))
46
+
47
+ # A map of {tensor: set(all_tied_names)} for all tensor names in the module.
48
+ tensor_to_tied_names_map: Dict[Tensor, Set[str]] = defaultdict(set)
49
+ for name, tensor in all_named_tensors.items():
50
+ tensor_to_tied_names_map[tensor].add(name)
51
+
52
+ # A map of {tied_name: set(all_tied_names)} for all tensor names in the module.
53
+ # If a name is not tied, it will not be in this map.
54
+ tied_names_map: Dict[str, Set[str]] = {}
55
+ for tied_names in tensor_to_tied_names_map.values():
56
+ if len(tied_names) > 1:
57
+ for tied_name in tied_names:
58
+ tied_names_map[tied_name] = tied_names
59
+
60
+ # Make sure the user didn't pass multiple values for the same tied tensor.
61
+ given_names = set(parameters_and_buffers.keys())
62
+ given_names_for_tied_tensors = given_names.intersection(tied_names_map.keys())
63
+ for given_name in given_names_for_tied_tensors:
64
+ tied_names = tied_names_map[given_name]
65
+ if (
66
+ # Detect if there are multiple keys present for the same tied tensor.
67
+ len(tied_names.intersection(given_names_for_tied_tensors)) > 1
68
+ # Only raise an error if the user passed multiple values for the same tied tensor.
69
+ # If all given values are the same, don't raise.
70
+ and len({parameters_and_buffers[tied_name] for tied_name in tied_names})
71
+ != 1
72
+ ):
73
+ raise ValueError(
74
+ f"functional_call got multiple values for keys {sorted(tied_names)}, "
75
+ f"which are tied. Consider using tie_weights=False"
76
+ )
77
+
78
+ # Untie the given named tensor map
79
+ # Make a copy for not modifying the original dict
80
+ untied_parameters_and_buffers = parameters_and_buffers.copy()
81
+ for given_name in given_names_for_tied_tensors:
82
+ for tied_name in tied_names_map[given_name]:
83
+ untied_parameters_and_buffers[tied_name] = parameters_and_buffers[
84
+ given_name
85
+ ]
86
+ return untied_parameters_and_buffers
87
+
88
+
89
+ @contextlib.contextmanager
90
+ def _reparametrize_module(
91
+ module: "torch.nn.Module",
92
+ parameters_and_buffers: Dict[str, Tensor],
93
+ *,
94
+ tie_weights: bool = False,
95
+ strict: bool = False,
96
+ ) -> Iterator[None]:
97
+ if tie_weights:
98
+ untied_parameters_and_buffers = _untie_named_tensors_map(
99
+ module, parameters_and_buffers
100
+ )
101
+ else:
102
+ untied_parameters_and_buffers = parameters_and_buffers
103
+
104
+ accessor = NamedMemberAccessor(module)
105
+ if strict:
106
+ missing_keys, unexpected_keys = accessor.check_keys(
107
+ untied_parameters_and_buffers
108
+ )
109
+ error_msgs = []
110
+ if len(unexpected_keys) > 0:
111
+ error_msgs.append(
112
+ f"Unexpected key(s): {', '.join(map(repr, unexpected_keys))}."
113
+ )
114
+ if len(missing_keys) > 0:
115
+ error_msgs.append(f"Missing key(s): {', '.join(map(repr, missing_keys))}.")
116
+ if len(error_msgs) > 0:
117
+ raise RuntimeError(
118
+ "Error(s) in reparametrizing for {}:\n\t{}".format(
119
+ module._get_name(), "\n\t".join(error_msgs)
120
+ )
121
+ )
122
+
123
+ orig_parameters_and_buffers: Dict[str, Tensor] = {}
124
+ try:
125
+ orig_parameters_and_buffers, _ = accessor.swap_tensors_dict(
126
+ untied_parameters_and_buffers, allow_missing=True
127
+ )
128
+ yield
129
+ finally:
130
+ new_parameters_and_buffers, _ = accessor.swap_tensors_dict(
131
+ orig_parameters_and_buffers, allow_missing=True
132
+ )
133
+ # Sometimes the module is not completely stateless and has some in-place modifications on
134
+ # the _parameters and _buffers dictionaries.
135
+ # Write the changed parameters and buffers back to the original dict.
136
+ parameters_and_buffers.update(
137
+ {
138
+ k: new_parameters_and_buffers[k]
139
+ for k in parameters_and_buffers
140
+ if k in new_parameters_and_buffers
141
+ }
142
+ )
143
+
144
+
145
+ def functional_call(
146
+ module: "torch.nn.Module",
147
+ parameters_and_buffers: Dict[str, Tensor],
148
+ args: Union[Any, Tuple],
149
+ kwargs: Optional[Dict[str, Any]] = None,
150
+ *,
151
+ tie_weights: bool = True,
152
+ strict: bool = False,
153
+ ):
154
+ r"""Perform a functional call on the module by replacing the module parameters and buffers with the provided ones.
155
+
156
+ .. warning::
157
+
158
+ This API is deprecated as of PyTorch 2.0 and will be removed in a future
159
+ version of PyTorch. Please use :func:`torch.func.functional_call` instead,
160
+ which is a drop-in replacement for this API.
161
+
162
+ .. note:: If the module has active parametrizations, passing a value in the
163
+ :attr:`parameters_and_buffers` argument with the name set to the regular parameter
164
+ name will completely disable the parametrization.
165
+ If you want to apply the parametrization function to the value passed
166
+ please set the key as ``{submodule_name}.parametrizations.{parameter_name}.original``.
167
+
168
+ .. note:: If the module performs in-place operations on parameters/buffers, these will be reflected
169
+ in the `parameters_and_buffers` input.
170
+
171
+ Example::
172
+
173
+ >>> a = {'foo': torch.zeros(())}
174
+ >>> # xdoctest: +SKIP
175
+ >>> mod = Foo() # does self.foo = self.foo + 1
176
+ >>> print(mod.foo) # tensor(0.)
177
+ >>> functional_call(mod, a, torch.ones(()))
178
+ >>> print(mod.foo) # tensor(0.)
179
+ >>> print(a['foo']) # tensor(1.)
180
+
181
+ .. note:: If the module has tied weights, whether or not functional_call respects the tying is determined by the
182
+ tie_weights flag.
183
+
184
+ Example::
185
+
186
+ >>> a = {'foo': torch.zeros(())}
187
+ >>> # xdoctest: +SKIP
188
+ >>> mod = Foo() # has both self.foo and self.foo_tied which are tied. Returns x + self.foo + self.foo_tied
189
+ >>> print(mod.foo) # tensor(1.)
190
+ >>> mod(torch.zeros(())) # tensor(2.)
191
+ >>> functional_call(mod, a, torch.zeros(())) # tensor(0.) since it will change self.foo_tied too
192
+ >>> functional_call(mod, a, torch.zeros(()), tie_weights=False) # tensor(1.)--self.foo_tied is not updated
193
+ >>> new_a = {'foo': torch.zeros(()), 'foo_tied': torch.zeros(())}
194
+ >>> functional_call(mod, new_a, torch.zeros()) # tensor(0.)
195
+
196
+ Args:
197
+ module (torch.nn.Module): the module to call
198
+ parameters_and_buffers (dict of str and Tensor): the parameters that will be used in
199
+ the module call.
200
+ args (Any or tuple): arguments to be passed to the module call. If not a tuple, considered a single argument.
201
+ kwargs (dict): keyword arguments to be passed to the module call
202
+ tie_weights (bool, optional): If True, then parameters and buffers tied in the original model will be treated as
203
+ tied in the reparamaterized version. Therefore, if True and different values are passed for the tied
204
+ parameters and buffers, it will error. If False, it will not respect the originally tied parameters and
205
+ buffers unless the values passed for both weights are the same. Default: True.
206
+ strict (bool, optional): If True, then the parameters and buffers passed in must match the parameters and
207
+ buffers in the original module. Therefore, if True and there are any missing or unexpected keys, it will
208
+ error. Default: False.
209
+
210
+ Returns:
211
+ Any: the result of calling ``module``.
212
+ """
213
+ warnings.warn(
214
+ "This API is deprecated as of PyTorch 2.0 and will be removed in a future "
215
+ "version of PyTorch. Please use torch.func.functional_call instead "
216
+ "which is a drop-in replacement for this API."
217
+ )
218
+
219
+ return _functional_call(
220
+ module,
221
+ parameters_and_buffers,
222
+ args,
223
+ kwargs,
224
+ tie_weights=tie_weights,
225
+ strict=strict,
226
+ )
227
+
228
+
229
+ def _functional_call(
230
+ module: "torch.nn.Module",
231
+ parameters_and_buffers: Dict[str, Tensor],
232
+ args: Union[Any, Tuple],
233
+ kwargs: Optional[Dict[str, Any]] = None,
234
+ *,
235
+ tie_weights: bool = True,
236
+ strict: bool = False,
237
+ ):
238
+ # TODO allow kwargs such as unsafe and others for parametrization
239
+ if (
240
+ torch.jit.is_tracing()
241
+ or torch.jit.is_scripting()
242
+ or isinstance(
243
+ module,
244
+ (
245
+ torch.jit.RecursiveScriptModule,
246
+ torch.jit.ScriptModule,
247
+ torch.jit.ScriptFunction,
248
+ ),
249
+ )
250
+ ):
251
+ raise RuntimeError("The stateless API can't be used with Jitted modules")
252
+ if isinstance(module, torch.nn.DataParallel):
253
+ raise RuntimeError(
254
+ "The stateless API can't be used with nn.DataParallel module"
255
+ )
256
+ if kwargs is None:
257
+ kwargs = {}
258
+ if not isinstance(args, tuple):
259
+ args = (args,)
260
+ with _reparametrize_module(
261
+ module, parameters_and_buffers, tie_weights=tie_weights, strict=strict
262
+ ):
263
+ return module(*args, **kwargs)
infer_4_47_1/lib/python3.10/site-packages/scipy/optimize/__pycache__/_optimize.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f95107e442f582b8a1bf505d92670c76051137cfd3da7f289dfdb76f9549c23
3
+ size 115312
infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (4.26 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_expm_multiply.cpython-310.pyc ADDED
Binary file (23 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_interface.cpython-310.pyc ADDED
Binary file (31.1 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_matfuncs.cpython-310.pyc ADDED
Binary file (25.5 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_norm.cpython-310.pyc ADDED
Binary file (5.19 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_onenormest.cpython-310.pyc ADDED
Binary file (11.9 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_special_sparse_arrays.cpython-310.pyc ADDED
Binary file (33.8 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_svdp.cpython-310.pyc ADDED
Binary file (8.98 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/dsolve.cpython-310.pyc ADDED
Binary file (696 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/eigen.cpython-310.pyc ADDED
Binary file (665 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/interface.cpython-310.pyc ADDED
Binary file (639 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/isolve.cpython-310.pyc ADDED
Binary file (683 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/matfuncs.cpython-310.pyc ADDED
Binary file (638 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (643 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/__pycache__/_svds_doc.cpython-310.pyc ADDED
Binary file (15.2 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/_arpack.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2092ee978f2586e89d43a9e7a35e25aab648a980f754d2519808e8243c62b155
3
+ size 877161
infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/tests/__pycache__/test_svds.cpython-310.pyc ADDED
Binary file (26.4 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/_gcrotmk.cpython-310.pyc ADDED
Binary file (10.9 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/iterative.cpython-310.pyc ADDED
Binary file (26.6 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/lgmres.cpython-310.pyc ADDED
Binary file (6.96 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/lsqr.cpython-310.pyc ADDED
Binary file (17 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/minres.cpython-310.pyc ADDED
Binary file (7.41 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/tfqmr.cpython-310.pyc ADDED
Binary file (5.28 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/lsqr.py ADDED
@@ -0,0 +1,589 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Sparse Equations and Least Squares.
2
+
3
+ The original Fortran code was written by C. C. Paige and M. A. Saunders as
4
+ described in
5
+
6
+ C. C. Paige and M. A. Saunders, LSQR: An algorithm for sparse linear
7
+ equations and sparse least squares, TOMS 8(1), 43--71 (1982).
8
+
9
+ C. C. Paige and M. A. Saunders, Algorithm 583; LSQR: Sparse linear
10
+ equations and least-squares problems, TOMS 8(2), 195--209 (1982).
11
+
12
+ It is licensed under the following BSD license:
13
+
14
+ Copyright (c) 2006, Systems Optimization Laboratory
15
+ All rights reserved.
16
+
17
+ Redistribution and use in source and binary forms, with or without
18
+ modification, are permitted provided that the following conditions are
19
+ met:
20
+
21
+ * Redistributions of source code must retain the above copyright
22
+ notice, this list of conditions and the following disclaimer.
23
+
24
+ * Redistributions in binary form must reproduce the above
25
+ copyright notice, this list of conditions and the following
26
+ disclaimer in the documentation and/or other materials provided
27
+ with the distribution.
28
+
29
+ * Neither the name of Stanford University nor the names of its
30
+ contributors may be used to endorse or promote products derived
31
+ from this software without specific prior written permission.
32
+
33
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44
+
45
+ The Fortran code was translated to Python for use in CVXOPT by Jeffery
46
+ Kline with contributions by Mridul Aanjaneya and Bob Myhill.
47
+
48
+ Adapted for SciPy by Stefan van der Walt.
49
+
50
+ """
51
+
52
+ __all__ = ['lsqr']
53
+
54
+ import numpy as np
55
+ from math import sqrt
56
+ from scipy.sparse.linalg._interface import aslinearoperator
57
+ from scipy.sparse._sputils import convert_pydata_sparse_to_scipy
58
+
59
+ eps = np.finfo(np.float64).eps
60
+
61
+
62
+ def _sym_ortho(a, b):
63
+ """
64
+ Stable implementation of Givens rotation.
65
+
66
+ Notes
67
+ -----
68
+ The routine 'SymOrtho' was added for numerical stability. This is
69
+ recommended by S.-C. Choi in [1]_. It removes the unpleasant potential of
70
+ ``1/eps`` in some important places (see, for example text following
71
+ "Compute the next plane rotation Qk" in minres.py).
72
+
73
+ References
74
+ ----------
75
+ .. [1] S.-C. Choi, "Iterative Methods for Singular Linear Equations
76
+ and Least-Squares Problems", Dissertation,
77
+ http://www.stanford.edu/group/SOL/dissertations/sou-cheng-choi-thesis.pdf
78
+
79
+ """
80
+ if b == 0:
81
+ return np.sign(a), 0, abs(a)
82
+ elif a == 0:
83
+ return 0, np.sign(b), abs(b)
84
+ elif abs(b) > abs(a):
85
+ tau = a / b
86
+ s = np.sign(b) / sqrt(1 + tau * tau)
87
+ c = s * tau
88
+ r = b / s
89
+ else:
90
+ tau = b / a
91
+ c = np.sign(a) / sqrt(1+tau*tau)
92
+ s = c * tau
93
+ r = a / c
94
+ return c, s, r
95
+
96
+
97
+ def lsqr(A, b, damp=0.0, atol=1e-6, btol=1e-6, conlim=1e8,
98
+ iter_lim=None, show=False, calc_var=False, x0=None):
99
+ """Find the least-squares solution to a large, sparse, linear system
100
+ of equations.
101
+
102
+ The function solves ``Ax = b`` or ``min ||Ax - b||^2`` or
103
+ ``min ||Ax - b||^2 + d^2 ||x - x0||^2``.
104
+
105
+ The matrix A may be square or rectangular (over-determined or
106
+ under-determined), and may have any rank.
107
+
108
+ ::
109
+
110
+ 1. Unsymmetric equations -- solve Ax = b
111
+
112
+ 2. Linear least squares -- solve Ax = b
113
+ in the least-squares sense
114
+
115
+ 3. Damped least squares -- solve ( A )*x = ( b )
116
+ ( damp*I ) ( damp*x0 )
117
+ in the least-squares sense
118
+
119
+ Parameters
120
+ ----------
121
+ A : {sparse array, ndarray, LinearOperator}
122
+ Representation of an m-by-n matrix.
123
+ Alternatively, ``A`` can be a linear operator which can
124
+ produce ``Ax`` and ``A^T x`` using, e.g.,
125
+ ``scipy.sparse.linalg.LinearOperator``.
126
+ b : array_like, shape (m,)
127
+ Right-hand side vector ``b``.
128
+ damp : float
129
+ Damping coefficient. Default is 0.
130
+ atol, btol : float, optional
131
+ Stopping tolerances. `lsqr` continues iterations until a
132
+ certain backward error estimate is smaller than some quantity
133
+ depending on atol and btol. Let ``r = b - Ax`` be the
134
+ residual vector for the current approximate solution ``x``.
135
+ If ``Ax = b`` seems to be consistent, `lsqr` terminates
136
+ when ``norm(r) <= atol * norm(A) * norm(x) + btol * norm(b)``.
137
+ Otherwise, `lsqr` terminates when ``norm(A^H r) <=
138
+ atol * norm(A) * norm(r)``. If both tolerances are 1.0e-6 (default),
139
+ the final ``norm(r)`` should be accurate to about 6
140
+ digits. (The final ``x`` will usually have fewer correct digits,
141
+ depending on ``cond(A)`` and the size of LAMBDA.) If `atol`
142
+ or `btol` is None, a default value of 1.0e-6 will be used.
143
+ Ideally, they should be estimates of the relative error in the
144
+ entries of ``A`` and ``b`` respectively. For example, if the entries
145
+ of ``A`` have 7 correct digits, set ``atol = 1e-7``. This prevents
146
+ the algorithm from doing unnecessary work beyond the
147
+ uncertainty of the input data.
148
+ conlim : float, optional
149
+ Another stopping tolerance. lsqr terminates if an estimate of
150
+ ``cond(A)`` exceeds `conlim`. For compatible systems ``Ax =
151
+ b``, `conlim` could be as large as 1.0e+12 (say). For
152
+ least-squares problems, conlim should be less than 1.0e+8.
153
+ Maximum precision can be obtained by setting ``atol = btol =
154
+ conlim = zero``, but the number of iterations may then be
155
+ excessive. Default is 1e8.
156
+ iter_lim : int, optional
157
+ Explicit limitation on number of iterations (for safety).
158
+ show : bool, optional
159
+ Display an iteration log. Default is False.
160
+ calc_var : bool, optional
161
+ Whether to estimate diagonals of ``(A'A + damp^2*I)^{-1}``.
162
+ x0 : array_like, shape (n,), optional
163
+ Initial guess of x, if None zeros are used. Default is None.
164
+
165
+ .. versionadded:: 1.0.0
166
+
167
+ Returns
168
+ -------
169
+ x : ndarray of float
170
+ The final solution.
171
+ istop : int
172
+ Gives the reason for termination.
173
+ 1 means x is an approximate solution to Ax = b.
174
+ 2 means x approximately solves the least-squares problem.
175
+ itn : int
176
+ Iteration number upon termination.
177
+ r1norm : float
178
+ ``norm(r)``, where ``r = b - Ax``.
179
+ r2norm : float
180
+ ``sqrt( norm(r)^2 + damp^2 * norm(x - x0)^2 )``. Equal to `r1norm`
181
+ if ``damp == 0``.
182
+ anorm : float
183
+ Estimate of Frobenius norm of ``Abar = [[A]; [damp*I]]``.
184
+ acond : float
185
+ Estimate of ``cond(Abar)``.
186
+ arnorm : float
187
+ Estimate of ``norm(A'@r - damp^2*(x - x0))``.
188
+ xnorm : float
189
+ ``norm(x)``
190
+ var : ndarray of float
191
+ If ``calc_var`` is True, estimates all diagonals of
192
+ ``(A'A)^{-1}`` (if ``damp == 0``) or more generally ``(A'A +
193
+ damp^2*I)^{-1}``. This is well defined if A has full column
194
+ rank or ``damp > 0``. (Not sure what var means if ``rank(A)
195
+ < n`` and ``damp = 0.``)
196
+
197
+ Notes
198
+ -----
199
+ LSQR uses an iterative method to approximate the solution. The
200
+ number of iterations required to reach a certain accuracy depends
201
+ strongly on the scaling of the problem. Poor scaling of the rows
202
+ or columns of A should therefore be avoided where possible.
203
+
204
+ For example, in problem 1 the solution is unaltered by
205
+ row-scaling. If a row of A is very small or large compared to
206
+ the other rows of A, the corresponding row of ( A b ) should be
207
+ scaled up or down.
208
+
209
+ In problems 1 and 2, the solution x is easily recovered
210
+ following column-scaling. Unless better information is known,
211
+ the nonzero columns of A should be scaled so that they all have
212
+ the same Euclidean norm (e.g., 1.0).
213
+
214
+ In problem 3, there is no freedom to re-scale if damp is
215
+ nonzero. However, the value of damp should be assigned only
216
+ after attention has been paid to the scaling of A.
217
+
218
+ The parameter damp is intended to help regularize
219
+ ill-conditioned systems, by preventing the true solution from
220
+ being very large. Another aid to regularization is provided by
221
+ the parameter acond, which may be used to terminate iterations
222
+ before the computed solution becomes very large.
223
+
224
+ If some initial estimate ``x0`` is known and if ``damp == 0``,
225
+ one could proceed as follows:
226
+
227
+ 1. Compute a residual vector ``r0 = b - A@x0``.
228
+ 2. Use LSQR to solve the system ``A@dx = r0``.
229
+ 3. Add the correction dx to obtain a final solution ``x = x0 + dx``.
230
+
231
+ This requires that ``x0`` be available before and after the call
232
+ to LSQR. To judge the benefits, suppose LSQR takes k1 iterations
233
+ to solve A@x = b and k2 iterations to solve A@dx = r0.
234
+ If x0 is "good", norm(r0) will be smaller than norm(b).
235
+ If the same stopping tolerances atol and btol are used for each
236
+ system, k1 and k2 will be similar, but the final solution x0 + dx
237
+ should be more accurate. The only way to reduce the total work
238
+ is to use a larger stopping tolerance for the second system.
239
+ If some value btol is suitable for A@x = b, the larger value
240
+ btol*norm(b)/norm(r0) should be suitable for A@dx = r0.
241
+
242
+ Preconditioning is another way to reduce the number of iterations.
243
+ If it is possible to solve a related system ``M@x = b``
244
+ efficiently, where M approximates A in some helpful way (e.g. M -
245
+ A has low rank or its elements are small relative to those of A),
246
+ LSQR may converge more rapidly on the system ``A@M(inverse)@z =
247
+ b``, after which x can be recovered by solving M@x = z.
248
+
249
+ If A is symmetric, LSQR should not be used!
250
+
251
+ Alternatives are the symmetric conjugate-gradient method (cg)
252
+ and/or SYMMLQ. SYMMLQ is an implementation of symmetric cg that
253
+ applies to any symmetric A and will converge more rapidly than
254
+ LSQR. If A is positive definite, there are other implementations
255
+ of symmetric cg that require slightly less work per iteration than
256
+ SYMMLQ (but will take the same number of iterations).
257
+
258
+ References
259
+ ----------
260
+ .. [1] C. C. Paige and M. A. Saunders (1982a).
261
+ "LSQR: An algorithm for sparse linear equations and
262
+ sparse least squares", ACM TOMS 8(1), 43-71.
263
+ .. [2] C. C. Paige and M. A. Saunders (1982b).
264
+ "Algorithm 583. LSQR: Sparse linear equations and least
265
+ squares problems", ACM TOMS 8(2), 195-209.
266
+ .. [3] M. A. Saunders (1995). "Solution of sparse rectangular
267
+ systems using LSQR and CRAIG", BIT 35, 588-604.
268
+
269
+ Examples
270
+ --------
271
+ >>> import numpy as np
272
+ >>> from scipy.sparse import csc_array
273
+ >>> from scipy.sparse.linalg import lsqr
274
+ >>> A = csc_array([[1., 0.], [1., 1.], [0., 1.]], dtype=float)
275
+
276
+ The first example has the trivial solution ``[0, 0]``
277
+
278
+ >>> b = np.array([0., 0., 0.], dtype=float)
279
+ >>> x, istop, itn, normr = lsqr(A, b)[:4]
280
+ >>> istop
281
+ 0
282
+ >>> x
283
+ array([ 0., 0.])
284
+
285
+ The stopping code ``istop=0`` returned indicates that a vector of zeros was
286
+ found as a solution. The returned solution `x` indeed contains
287
+ ``[0., 0.]``. The next example has a non-trivial solution:
288
+
289
+ >>> b = np.array([1., 0., -1.], dtype=float)
290
+ >>> x, istop, itn, r1norm = lsqr(A, b)[:4]
291
+ >>> istop
292
+ 1
293
+ >>> x
294
+ array([ 1., -1.])
295
+ >>> itn
296
+ 1
297
+ >>> r1norm
298
+ 4.440892098500627e-16
299
+
300
+ As indicated by ``istop=1``, `lsqr` found a solution obeying the tolerance
301
+ limits. The given solution ``[1., -1.]`` obviously solves the equation. The
302
+ remaining return values include information about the number of iterations
303
+ (`itn=1`) and the remaining difference of left and right side of the solved
304
+ equation.
305
+ The final example demonstrates the behavior in the case where there is no
306
+ solution for the equation:
307
+
308
+ >>> b = np.array([1., 0.01, -1.], dtype=float)
309
+ >>> x, istop, itn, r1norm = lsqr(A, b)[:4]
310
+ >>> istop
311
+ 2
312
+ >>> x
313
+ array([ 1.00333333, -0.99666667])
314
+ >>> A.dot(x)-b
315
+ array([ 0.00333333, -0.00333333, 0.00333333])
316
+ >>> r1norm
317
+ 0.005773502691896255
318
+
319
+ `istop` indicates that the system is inconsistent and thus `x` is rather an
320
+ approximate solution to the corresponding least-squares problem. `r1norm`
321
+ contains the norm of the minimal residual that was found.
322
+ """
323
+ A = convert_pydata_sparse_to_scipy(A)
324
+ A = aslinearoperator(A)
325
+ b = np.atleast_1d(b)
326
+ if b.ndim > 1:
327
+ b = b.squeeze()
328
+
329
+ m, n = A.shape
330
+ if iter_lim is None:
331
+ iter_lim = 2 * n
332
+ var = np.zeros(n)
333
+
334
+ msg = ('The exact solution is x = 0 ',
335
+ 'Ax - b is small enough, given atol, btol ',
336
+ 'The least-squares solution is good enough, given atol ',
337
+ 'The estimate of cond(Abar) has exceeded conlim ',
338
+ 'Ax - b is small enough for this machine ',
339
+ 'The least-squares solution is good enough for this machine',
340
+ 'Cond(Abar) seems to be too large for this machine ',
341
+ 'The iteration limit has been reached ')
342
+
343
+ if show:
344
+ print(' ')
345
+ print('LSQR Least-squares solution of Ax = b')
346
+ str1 = f'The matrix A has {m} rows and {n} columns'
347
+ str2 = f'damp = {damp:20.14e} calc_var = {calc_var:8g}'
348
+ str3 = f'atol = {atol:8.2e} conlim = {conlim:8.2e}'
349
+ str4 = f'btol = {btol:8.2e} iter_lim = {iter_lim:8g}'
350
+ print(str1)
351
+ print(str2)
352
+ print(str3)
353
+ print(str4)
354
+
355
+ itn = 0
356
+ istop = 0
357
+ ctol = 0
358
+ if conlim > 0:
359
+ ctol = 1/conlim
360
+ anorm = 0
361
+ acond = 0
362
+ dampsq = damp**2
363
+ ddnorm = 0
364
+ res2 = 0
365
+ xnorm = 0
366
+ xxnorm = 0
367
+ z = 0
368
+ cs2 = -1
369
+ sn2 = 0
370
+
371
+ # Set up the first vectors u and v for the bidiagonalization.
372
+ # These satisfy beta*u = b - A@x, alfa*v = A'@u.
373
+ u = b
374
+ bnorm = np.linalg.norm(b)
375
+
376
+ if x0 is None:
377
+ x = np.zeros(n)
378
+ beta = bnorm.copy()
379
+ else:
380
+ x = np.asarray(x0)
381
+ u = u - A.matvec(x)
382
+ beta = np.linalg.norm(u)
383
+
384
+ if beta > 0:
385
+ u = (1/beta) * u
386
+ v = A.rmatvec(u)
387
+ alfa = np.linalg.norm(v)
388
+ else:
389
+ v = x.copy()
390
+ alfa = 0
391
+
392
+ if alfa > 0:
393
+ v = (1/alfa) * v
394
+ w = v.copy()
395
+
396
+ rhobar = alfa
397
+ phibar = beta
398
+ rnorm = beta
399
+ r1norm = rnorm
400
+ r2norm = rnorm
401
+
402
+ # Reverse the order here from the original matlab code because
403
+ # there was an error on return when arnorm==0
404
+ arnorm = alfa * beta
405
+ if arnorm == 0:
406
+ if show:
407
+ print(msg[0])
408
+ return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var
409
+
410
+ head1 = ' Itn x[0] r1norm r2norm '
411
+ head2 = ' Compatible LS Norm A Cond A'
412
+
413
+ if show:
414
+ print(' ')
415
+ print(head1, head2)
416
+ test1 = 1
417
+ test2 = alfa / beta
418
+ str1 = f'{itn:6g} {x[0]:12.5e}'
419
+ str2 = f' {r1norm:10.3e} {r2norm:10.3e}'
420
+ str3 = f' {test1:8.1e} {test2:8.1e}'
421
+ print(str1, str2, str3)
422
+
423
+ # Main iteration loop.
424
+ while itn < iter_lim:
425
+ itn = itn + 1
426
+ # Perform the next step of the bidiagonalization to obtain the
427
+ # next beta, u, alfa, v. These satisfy the relations
428
+ # beta*u = a@v - alfa*u,
429
+ # alfa*v = A'@u - beta*v.
430
+ u = A.matvec(v) - alfa * u
431
+ beta = np.linalg.norm(u)
432
+
433
+ if beta > 0:
434
+ u = (1/beta) * u
435
+ anorm = sqrt(anorm**2 + alfa**2 + beta**2 + dampsq)
436
+ v = A.rmatvec(u) - beta * v
437
+ alfa = np.linalg.norm(v)
438
+ if alfa > 0:
439
+ v = (1 / alfa) * v
440
+
441
+ # Use a plane rotation to eliminate the damping parameter.
442
+ # This alters the diagonal (rhobar) of the lower-bidiagonal matrix.
443
+ if damp > 0:
444
+ rhobar1 = sqrt(rhobar**2 + dampsq)
445
+ cs1 = rhobar / rhobar1
446
+ sn1 = damp / rhobar1
447
+ psi = sn1 * phibar
448
+ phibar = cs1 * phibar
449
+ else:
450
+ # cs1 = 1 and sn1 = 0
451
+ rhobar1 = rhobar
452
+ psi = 0.
453
+
454
+ # Use a plane rotation to eliminate the subdiagonal element (beta)
455
+ # of the lower-bidiagonal matrix, giving an upper-bidiagonal matrix.
456
+ cs, sn, rho = _sym_ortho(rhobar1, beta)
457
+
458
+ theta = sn * alfa
459
+ rhobar = -cs * alfa
460
+ phi = cs * phibar
461
+ phibar = sn * phibar
462
+ tau = sn * phi
463
+
464
+ # Update x and w.
465
+ t1 = phi / rho
466
+ t2 = -theta / rho
467
+ dk = (1 / rho) * w
468
+
469
+ x = x + t1 * w
470
+ w = v + t2 * w
471
+ ddnorm = ddnorm + np.linalg.norm(dk)**2
472
+
473
+ if calc_var:
474
+ var = var + dk**2
475
+
476
+ # Use a plane rotation on the right to eliminate the
477
+ # super-diagonal element (theta) of the upper-bidiagonal matrix.
478
+ # Then use the result to estimate norm(x).
479
+ delta = sn2 * rho
480
+ gambar = -cs2 * rho
481
+ rhs = phi - delta * z
482
+ zbar = rhs / gambar
483
+ xnorm = sqrt(xxnorm + zbar**2)
484
+ gamma = sqrt(gambar**2 + theta**2)
485
+ cs2 = gambar / gamma
486
+ sn2 = theta / gamma
487
+ z = rhs / gamma
488
+ xxnorm = xxnorm + z**2
489
+
490
+ # Test for convergence.
491
+ # First, estimate the condition of the matrix Abar,
492
+ # and the norms of rbar and Abar'rbar.
493
+ acond = anorm * sqrt(ddnorm)
494
+ res1 = phibar**2
495
+ res2 = res2 + psi**2
496
+ rnorm = sqrt(res1 + res2)
497
+ arnorm = alfa * abs(tau)
498
+
499
+ # Distinguish between
500
+ # r1norm = ||b - Ax|| and
501
+ # r2norm = rnorm in current code
502
+ # = sqrt(r1norm^2 + damp^2*||x - x0||^2).
503
+ # Estimate r1norm from
504
+ # r1norm = sqrt(r2norm^2 - damp^2*||x - x0||^2).
505
+ # Although there is cancellation, it might be accurate enough.
506
+ if damp > 0:
507
+ r1sq = rnorm**2 - dampsq * xxnorm
508
+ r1norm = sqrt(abs(r1sq))
509
+ if r1sq < 0:
510
+ r1norm = -r1norm
511
+ else:
512
+ r1norm = rnorm
513
+ r2norm = rnorm
514
+
515
+ # Now use these norms to estimate certain other quantities,
516
+ # some of which will be small near a solution.
517
+ test1 = rnorm / bnorm
518
+ test2 = arnorm / (anorm * rnorm + eps)
519
+ test3 = 1 / (acond + eps)
520
+ t1 = test1 / (1 + anorm * xnorm / bnorm)
521
+ rtol = btol + atol * anorm * xnorm / bnorm
522
+
523
+ # The following tests guard against extremely small values of
524
+ # atol, btol or ctol. (The user may have set any or all of
525
+ # the parameters atol, btol, conlim to 0.)
526
+ # The effect is equivalent to the normal tests using
527
+ # atol = eps, btol = eps, conlim = 1/eps.
528
+ if itn >= iter_lim:
529
+ istop = 7
530
+ if 1 + test3 <= 1:
531
+ istop = 6
532
+ if 1 + test2 <= 1:
533
+ istop = 5
534
+ if 1 + t1 <= 1:
535
+ istop = 4
536
+
537
+ # Allow for tolerances set by the user.
538
+ if test3 <= ctol:
539
+ istop = 3
540
+ if test2 <= atol:
541
+ istop = 2
542
+ if test1 <= rtol:
543
+ istop = 1
544
+
545
+ if show:
546
+ # See if it is time to print something.
547
+ prnt = False
548
+ if n <= 40:
549
+ prnt = True
550
+ if itn <= 10:
551
+ prnt = True
552
+ if itn >= iter_lim-10:
553
+ prnt = True
554
+ # if itn%10 == 0: prnt = True
555
+ if test3 <= 2*ctol:
556
+ prnt = True
557
+ if test2 <= 10*atol:
558
+ prnt = True
559
+ if test1 <= 10*rtol:
560
+ prnt = True
561
+ if istop != 0:
562
+ prnt = True
563
+
564
+ if prnt:
565
+ str1 = f'{itn:6g} {x[0]:12.5e}'
566
+ str2 = f' {r1norm:10.3e} {r2norm:10.3e}'
567
+ str3 = f' {test1:8.1e} {test2:8.1e}'
568
+ str4 = f' {anorm:8.1e} {acond:8.1e}'
569
+ print(str1, str2, str3, str4)
570
+
571
+ if istop != 0:
572
+ break
573
+
574
+ # End of iteration loop.
575
+ # Print the stopping condition.
576
+ if show:
577
+ print(' ')
578
+ print('LSQR finished')
579
+ print(msg[istop])
580
+ print(' ')
581
+ str1 = f'istop ={istop:8g} r1norm ={r1norm:8.1e}'
582
+ str2 = f'anorm ={anorm:8.1e} arnorm ={arnorm:8.1e}'
583
+ str3 = f'itn ={itn:8g} r2norm ={r2norm:8.1e}'
584
+ str4 = f'acond ={acond:8.1e} xnorm ={xnorm:8.1e}'
585
+ print(str1 + ' ' + str2)
586
+ print(str3 + ' ' + str4)
587
+ print(' ')
588
+
589
+ return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var
infer_4_47_1/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (191 Bytes). View file