diff --git a/janus/lib/python3.10/site-packages/distutils-precedence.pth b/janus/lib/python3.10/site-packages/distutils-precedence.pth new file mode 100644 index 0000000000000000000000000000000000000000..c659194195f07bd6f19b5522515551309af14a3d --- /dev/null +++ b/janus/lib/python3.10/site-packages/distutils-precedence.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2638ce9e2500e572a5e0de7faed6661eb569d1b696fcba07b0dd223da5f5d224 +size 151 diff --git a/janus/lib/python3.10/site-packages/einops/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/einops/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4de12ea2cda37f4e66304bda613acb189472f7b2 Binary files /dev/null and b/janus/lib/python3.10/site-packages/einops/__pycache__/__init__.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/einops/__pycache__/_backends.cpython-310.pyc b/janus/lib/python3.10/site-packages/einops/__pycache__/_backends.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..041fcb94772018ada33d242e68fb0bd1320ca849 Binary files /dev/null and b/janus/lib/python3.10/site-packages/einops/__pycache__/_backends.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/einops/__pycache__/_torch_specific.cpython-310.pyc b/janus/lib/python3.10/site-packages/einops/__pycache__/_torch_specific.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf10960b6317be83cd24ede6332ae91d54a0ba0e Binary files /dev/null and b/janus/lib/python3.10/site-packages/einops/__pycache__/_torch_specific.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/einops/experimental/__pycache__/indexing.cpython-310.pyc b/janus/lib/python3.10/site-packages/einops/experimental/__pycache__/indexing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32ef396da177be1686993a4eceb03ca57e7cd394 Binary files /dev/null and b/janus/lib/python3.10/site-packages/einops/experimental/__pycache__/indexing.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/einops/experimental/indexing.py b/janus/lib/python3.10/site-packages/einops/experimental/indexing.py new file mode 100644 index 0000000000000000000000000000000000000000..8297d3e37e3e65fabfbe24c1d9cb1fd9e32c80bd --- /dev/null +++ b/janus/lib/python3.10/site-packages/einops/experimental/indexing.py @@ -0,0 +1,398 @@ +""" + +Indexing one array with the other(s). + +Concept for discussion. + +Notation targets hard cases, not simple ones, like indexing of 1d-array with another 1d-array +(notation supports that, but you can't simplify arr[ind], and there is no reason to) + +Examples + +1. query for every token in sequence a token in the image. Images and sequences are paired + einindex('b t c <- b h w c, [h, w] b t', arr_bhwc, [h_indices_bt, w_indices_bt]) + + this is equivalent, so you can pass indexers idependently or together + einindex('b t c <- b h w c, [h, w] b t', arr_bhwc, np.asarray([h_indices_bt, w_indices_bt])) + + after some thinking I decided that having first axis for indexing variable is not too restrictive, + but should simplify mapping of such cases. + For this reason [...] part should always go first in indexer. + + This makes the largest difference with einindex https://github.com/malmaud/einindex, + which has almost identical grammar, but puts special dimension last, while we put it first. + This trick allows naturally decomposing multiindex into individual dimensions or visa versa. + + +2. query for every token in the video the most suitable word in a (matching) sentence + einindex('b t h w <- seq b, [seq] t b h w', arr_tbc, [t_indices_bhw]) + + note, that only one indexer is used, but still it has to be enclosed in the list. + That's a price for being generic. Alternatively leading singleton dimension can be added. + + +3. (not supported now, future planning) + for every timeframe in a video, find the token with the highest norm (across h and w), and compose a new stack of them + indices_2bt = argmax(x_bthwc.norm(dim=-1), 'b t h w -> [h, w] b t') + selected_embeddings_btc = einindex('b t c <- b t h w c, [h, w] b t', x_bthwc, indices_2bt) + + while currently question is around 'how do we index', + it is important to pre-align that with a question 'what are natural ways to get indices'. + Most common are min/max. less common options: topk (works here), random sampling. + + + +Some important properties of this notation: +- support for multiple indexers, including using a single tensor to keep multiple indexers +- 'batch' indexing, when some axes of indexer and array should be matched +- universal (one-indexing-to-rule-them-all) +- extensible for (named) ellipses, including variadic number of indexers +- extensible for einops-style compositions and decompositions +- extensible for outer indexing when indexers are not aligned + +Current implementation based on python array api and uses loops, +because no appropriate indexing available in the standard. + +""" + +from typing import List, Union, TypeVar, Tuple + +from einops import EinopsError + +T = TypeVar("T") + + +class CompositionDecomposition: + def __init__( + self, + decomposed_shape: List[str], + composed_shape: List[List[str]], + ): + flat_shape = [] + for x in composed_shape: + flat_shape.extend(x) + + self.compose_transposition: Tuple[int, ...] = tuple([decomposed_shape.index(x) for x in flat_shape]) + self.decompose_transposition: Tuple[int, ...] = tuple([flat_shape.index(x) for x in decomposed_shape]) + self.composed_shape = composed_shape + self.decomposed_shape = decomposed_shape + + def decompose(self, x, known_axes_lengths: dict[str, int]): + xp = x.__array_namespace__() + shape = x.shape + + flat_shape = [] + + for i, axis_group in enumerate(self.composed_shape): + unknown_axis_name = None + known_sizes_prod = 1 + for axis_name in axis_group: + if axis_name in known_axes_lengths: + known_sizes_prod *= known_axes_lengths[axis_name] + else: + if unknown_axis_name is None: + unknown_axis_name = axis_name + else: + raise EinopsError("Can't infer the size") + + if unknown_axis_name is None: + assert shape[i] == known_sizes_prod + else: + known_axes_lengths[unknown_axis_name] = shape[i] // known_sizes_prod + + for axis in axis_group: + flat_shape.append(known_axes_lengths[axis]) + + x = xp.reshape(x, flat_shape) + return xp.permute_dims(x, self.decompose_transposition) + + def compose(self, x, known_axes_lengths: dict[str, int]): + xp = x.__array_namespace__() + + for axis_len, axis_name in zip(x.shape, self.decomposed_shape): + if axis_name in known_axes_lengths: + assert known_axes_lengths[axis_name] == axis_len + else: + known_axes_lengths[axis_name] = axis_len + + x = xp.permute_dims(x, self.compose_transposition) + new_shape = [] + for axis_group in self.composed_shape: + composed_axis_size = 1 + for axis_name in axis_group: + composed_axis_size *= known_axes_lengths[axis_name] + new_shape.append(composed_axis_size) + + return xp.reshape(x, tuple(new_shape)) + + +def arange_at_position(xp, n_axes, axis, axis_len, device=None): + x = xp.arange(axis_len, dtype=xp.int64, device=device) + shape = [1] * n_axes + shape[axis] = axis_len + x = xp.reshape(x, shape) + return x + + +class IndexingFormula: + def __init__(self, pattern: str): + """ + :param pattern: example 'b t c <- b hsel wsel c, [hsel, wsel] b t' + """ + self.pattern = pattern + left, right = pattern.split("<-") + arg_split = right.index(",") + arr_pattern, ind_pattern = right[:arg_split], right[arg_split + 1 :] + ind_pattern = ind_pattern.strip() + # print( + # arr_pattern, '\n', + # ind_pattern, + # ) + assert ind_pattern.startswith("["), "composition axis should go first in indexer (second argument) [h w] i j k" + composition_start = ind_pattern.index("[") + composition_end = ind_pattern.index("]") + composition = ind_pattern[composition_start + 1 : composition_end] + ind_other_axes = ind_pattern[composition_end + 1 :] + + self.result_axes_names = left.split() + self.array_axes_names = arr_pattern.split() + self.indexing_axes_names = [x.strip() for x in composition.split(",")] + self.indexer_other_axes_names = ind_other_axes.split() + + for group_name, group in [ + ("result", self.result_axes_names), + ("array", self.array_axes_names), + ("indexer", self.indexing_axes_names + self.indexer_other_axes_names), + ]: + if len(set(group)) != len(group): + # need more verbosity, which axis, raise + raise EinopsError(f"{group_name} pattern ({group}) contains a duplicated axis") + + axis_groups = [ + self.result_axes_names, + self.array_axes_names, + self.indexing_axes_names, + self.indexer_other_axes_names, + ] + + all_axes = set() + for group in axis_groups: + all_axes.update(group) + + self.indexer_axes = [] + self.batch_axes = [] + self.result_and_index_axes = [] + self.result_and_array_axes = [] + + for axis in all_axes: + presence = tuple(axis in g for g in axis_groups) + # want match-case here. sweet dreams + if presence == (False, True, True, False): + self.indexer_axes.append(axis) + elif presence[2]: + raise EinopsError(f"Wrong usage of indexer variable {axis}") + elif presence == (True, True, False, True): + self.batch_axes.append(axis) + elif presence == (True, False, False, True): + self.result_and_index_axes.append(axis) + elif presence == (True, True, False, False): + self.result_and_array_axes.append(axis) + else: + # TODO better categorization of wrong usage patterns + raise EinopsError(f"{axis} is used incorrectly in {pattern}") + + assert set(self.indexer_axes) == set(self.indexing_axes_names) + # order of these variables matters, since we can't lose mapping here + self.indexer_axes = self.indexing_axes_names + + self.array_composition = CompositionDecomposition( + decomposed_shape=self.array_axes_names, + composed_shape=[self.batch_axes + self.indexer_axes, self.result_and_array_axes], + ) + + self.index_composition = CompositionDecomposition( + decomposed_shape=self.indexer_other_axes_names, + # single axis after composition + composed_shape=[self.batch_axes + self.result_and_index_axes], + ) + + self.result_composition = CompositionDecomposition( + decomposed_shape=self.result_axes_names, + composed_shape=[self.batch_axes + self.result_and_index_axes, self.result_and_array_axes], + ) + + def apply_to_array_api(self, arr: T, ind: Union[T, List[T]]): + known_axes_sizes: dict[str, int] = {} + xp = arr.__array_namespace__() + + if not isinstance(ind, list): + ind = [ind[i, ...] for i in range(ind.shape[0])] + + for indexer in ind: + assert len(indexer.shape) == len(self.indexer_other_axes_names) + + # step 1. transpose, reshapes of arr; learn its dimensions + arr_2d = self.array_composition.compose(arr, known_axes_sizes) + + # step 2. compute shifts and create an actual indexing array + shift = 1 + full_index = xp.zeros([1] * len(ind[0].shape), dtype=xp.int64, device=arr.device) + + # original order: [*batch-like axes, *indexing_axes,] + # now we need to traverse them in the opposite direction + + for axis_name, indexer in list(zip(self.indexing_axes_names, ind))[::-1]: + full_index = full_index + shift * (indexer % known_axes_sizes[axis_name]) + shift *= known_axes_sizes[axis_name] + + for axis_name in self.batch_axes[::-1]: + axis_id = self.indexer_other_axes_names.index(axis_name) + full_index = ( + full_index + + arange_at_position( + xp, + len(self.indexer_other_axes_names), + axis=axis_id, + axis_len=known_axes_sizes[axis_name], + device=arr.device, + ) + * shift + ) + shift *= known_axes_sizes[axis_name] + + assert shift == arr_2d.shape[0] + + # step 3. Flatten index + full_index = self.index_composition.compose(full_index, known_axes_sizes) + + # step 4. indexing + # python array api lacks any integer indexing, so... I use loops. + # did you know that there is conceptual programming ... just like art? + # result_2d = arr_2d[full_index] + result_2d = xp.stack([arr_2d[full_index[i], :] for i in range(full_index.shape[0])]) + + # step 5. doing resulting + result = self.result_composition.decompose(result_2d, known_axes_sizes) + return result + + +def einindex(pattern: str, arr: T, /, ind: Union[T, List[T]]): + """ + Demonstrates how einindex should work. + Supports data-api compliant arrays. + """ + formula = IndexingFormula(pattern) + return formula.apply_to_array_api(arr, ind) + + +def test_composition_and_decomposition(): + import numpy.array_api as np + + x = np.arange(2 * 3 * 5 * 7) + x = np.reshape(x, (2, 3, 5, 7)) + comp = CompositionDecomposition( + decomposed_shape=["a", "b", "c", "d"], + composed_shape=[["a", "b"], ["c", "d"]], + ) + assert comp.compose(x, known_axes_lengths={}).shape == (2 * 3, 5 * 7) + + y = CompositionDecomposition( + decomposed_shape=["a", "b", "c", "d"], + composed_shape=[["a", "b"], [], ["c", "d"]], + ).compose(x, {}) + assert y.shape == (2 * 3, 1, 5 * 7) + assert np.all(np.reshape(x, (-1,)) == np.reshape(y, (-1,))) + + comp = CompositionDecomposition( + decomposed_shape=["a", "b", "e", "c", "d"], + composed_shape=[["e", "c"], ["b"], ["a", "d"]], + ) + x = np.arange(2 * 3 * 5 * 7 * 3) + x = np.reshape(x, (2, 3, 5, 7, 3)) + + axes = {} + y = comp.compose(x, axes) + x2 = comp.decompose(y, axes) + assert np.all(x == x2) + + +def test_simple_indexing(): + import numpy.array_api as np + + # simple 2d test + arr = np.reshape(np.arange(5 * 7), (5, 7)) + ind = np.arange(7) % 5 + x = einindex("j <- i j, [i] j", arr, [ind]) + for j, i in enumerate(ind): + assert arr[i, j] == x[j] + + y = einindex("j <- j i, [i] j", np.permute_dims(arr, (1, 0)), [ind]) + for j, i in enumerate(ind): + assert arr[i, j] == y[j] + + +def test_multidimensional_indexing(): + import numpy.array_api as np + + embedding_bhwc = ( + +arange_at_position(np, 4, 0, 2) * 1000 + + arange_at_position(np, 4, 1, 3) * 100 + + arange_at_position(np, 4, 2, 5) * 10 + + arange_at_position(np, 4, 3, 7) * 1 + ) + + hindices_bt = np.reshape(np.arange(6), (2, 3)) % 3 + windices_bt = np.reshape(np.arange(6), (2, 3)) % 5 + + # imagine that you have pairs of image <> sentence + # your goal is to get most suitable token from image for every token in sentence + # thus for every token in sentence you compute best k and v + + result = einindex("c t b <- b h w c, [h, w] b t", embedding_bhwc, [hindices_bt, windices_bt]) + # example of using a single array for indexing multiple axes + hw_indices_bt = np.stack([hindices_bt, windices_bt]) + result2 = einindex("c t b <- b h w c, [h, w] b t", embedding_bhwc, hw_indices_bt) + assert np.all(result == result2) + + # check vs manual element computation + result_manual = result * 0 + for b in range(2): + for t in range(3): + for c in range(7): + h = hindices_bt[b, t] + w = windices_bt[b, t] + result_manual[c, t, b] = embedding_bhwc[b, h, w, c] + + assert np.all(result == result_manual) + + +def test_reverse_indexing(): + import numpy.array_api as np + + C, T, B = 2, 3, 5 + # G = GPU, batch-like varaible + G = 4 + H = 7 + W = 9 + + arr_gtbc = ( + +arange_at_position(np, 4, 0, G) * 1000 + + arange_at_position(np, 4, 1, T) * 100 + + arange_at_position(np, 4, 2, B) * 10 + + arange_at_position(np, 4, 3, C) * 1 + ) + + t_indices_gbhw = np.reshape(np.arange(G * B * H * W), (G, B, H, W)) % T + + result = einindex("g b c h w <- g t b c, [t] g b h w", arr_gtbc, [t_indices_gbhw]) + + result_manual = result * 0 + for g in range(G): + for b in range(B): + for c in range(C): + for h in range(H): + for w in range(W): + t = t_indices_gbhw[g, b, h, w] + result_manual[g, b, c, h, w] = arr_gtbc[g, t, b, c] + + assert np.all(result == result_manual) diff --git a/janus/lib/python3.10/site-packages/einops/layers/__pycache__/_einmix.cpython-310.pyc b/janus/lib/python3.10/site-packages/einops/layers/__pycache__/_einmix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d65f670ad98055d1d3224ad0a491ea8288e05daa Binary files /dev/null and b/janus/lib/python3.10/site-packages/einops/layers/__pycache__/_einmix.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/einops/layers/__pycache__/oneflow.cpython-310.pyc b/janus/lib/python3.10/site-packages/einops/layers/__pycache__/oneflow.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1950a1c271a09970bb7d796f5cdb196e0b7b716 Binary files /dev/null and b/janus/lib/python3.10/site-packages/einops/layers/__pycache__/oneflow.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/einops/layers/__pycache__/paddle.cpython-310.pyc b/janus/lib/python3.10/site-packages/einops/layers/__pycache__/paddle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..456f40d24b90204cba8b24efef063390eaa9eeb8 Binary files /dev/null and b/janus/lib/python3.10/site-packages/einops/layers/__pycache__/paddle.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/einops/layers/__pycache__/tensorflow.cpython-310.pyc b/janus/lib/python3.10/site-packages/einops/layers/__pycache__/tensorflow.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bbd811530a20eda03d3226f6fec51968903d7b58 Binary files /dev/null and b/janus/lib/python3.10/site-packages/einops/layers/__pycache__/tensorflow.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/einops/layers/oneflow.py b/janus/lib/python3.10/site-packages/einops/layers/oneflow.py new file mode 100644 index 0000000000000000000000000000000000000000..c3486626814d275441284f1aca3274157b302ff8 --- /dev/null +++ b/janus/lib/python3.10/site-packages/einops/layers/oneflow.py @@ -0,0 +1,54 @@ +from typing import Optional, Dict, cast + +import oneflow as flow + +from . import RearrangeMixin, ReduceMixin +from ._einmix import _EinmixMixin + +__author__ = "Tianhe Ren & Depeng Liang" + + +class Rearrange(RearrangeMixin, flow.nn.Module): + def forward(self, input): + return self._apply_recipe(input) + + +class Reduce(ReduceMixin, flow.nn.Module): + def forward(self, input): + return self._apply_recipe(input) + + +class EinMix(_EinmixMixin, flow.nn.Module): + def _create_parameters(self, weight_shape, weight_bound, bias_shape, bias_bound): + self.weight = flow.nn.Parameter( + flow.zeros(weight_shape).uniform_(-weight_bound, weight_bound), requires_grad=True + ) + if bias_shape is not None: + self.bias = flow.nn.Parameter(flow.zeros(bias_shape).uniform_(-bias_bound, bias_bound), requires_grad=True) + else: + self.bias = None + + def _create_rearrange_layers( + self, + pre_reshape_pattern: Optional[str], + pre_reshape_lengths: Optional[Dict], + post_reshape_pattern: Optional[str], + post_reshape_lengths: Optional[Dict], + ): + self.pre_rearrange = None + if pre_reshape_pattern is not None: + self.pre_rearrange = Rearrange(pre_reshape_pattern, **cast(dict, pre_reshape_lengths)) + + self.post_rearrange = None + if post_reshape_pattern is not None: + self.post_rearrange = Rearrange(post_reshape_pattern, **cast(dict, post_reshape_lengths)) + + def forward(self, input): + if self.pre_rearrange is not None: + input = self.pre_rearrange(input) + result = flow.einsum(self.einsum_pattern, input, self.weight) + if self.bias is not None: + result += self.bias + if self.post_rearrange is not None: + result = self.post_rearrange(result) + return result diff --git a/janus/lib/python3.10/site-packages/einops/parsing.py b/janus/lib/python3.10/site-packages/einops/parsing.py new file mode 100644 index 0000000000000000000000000000000000000000..a33fe49e765c582389ca87d3fad59e5e21b4e101 --- /dev/null +++ b/janus/lib/python3.10/site-packages/einops/parsing.py @@ -0,0 +1,152 @@ +from einops import EinopsError +import keyword +import warnings +from typing import List, Optional, Set, Tuple, Union + +_ellipsis: str = "…" # NB, this is a single unicode symbol. String is used as it is not a list, but can be iterated + + +class AnonymousAxis(object): + """Important thing: all instances of this class are not equal to each other""" + + def __init__(self, value: str): + self.value = int(value) + if self.value <= 1: + if self.value == 1: + raise EinopsError("No need to create anonymous axis of length 1. Report this as an issue") + else: + raise EinopsError("Anonymous axis should have positive length, not {}".format(self.value)) + + def __repr__(self): + return "{}-axis".format(str(self.value)) + + +class ParsedExpression: + """ + non-mutable structure that contains information about one side of expression (e.g. 'b c (h w)') + and keeps some information important for downstream + """ + + def __init__(self, expression: str, *, allow_underscore: bool = False, allow_duplicates: bool = False): + self.has_ellipsis: bool = False + self.has_ellipsis_parenthesized: Optional[bool] = None + self.identifiers: Set[str] = set() + # that's axes like 2, 3, 4 or 5. Axes with size 1 are exceptional and replaced with empty composition + self.has_non_unitary_anonymous_axes: bool = False + # composition keeps structure of composite axes, see how different corner cases are handled in tests + self.composition: List[Union[List[str], str]] = [] + if "." in expression: + if "..." not in expression: + raise EinopsError("Expression may contain dots only inside ellipsis (...)") + if str.count(expression, "...") != 1 or str.count(expression, ".") != 3: + raise EinopsError( + "Expression may contain dots only inside ellipsis (...); only one ellipsis for tensor " + ) + expression = expression.replace("...", _ellipsis) + self.has_ellipsis = True + + bracket_group: Optional[List[str]] = None + + def add_axis_name(x): + if x in self.identifiers: + if not (allow_underscore and x == "_") and not allow_duplicates: + raise EinopsError('Indexing expression contains duplicate dimension "{}"'.format(x)) + if x == _ellipsis: + self.identifiers.add(_ellipsis) + if bracket_group is None: + self.composition.append(_ellipsis) + self.has_ellipsis_parenthesized = False + else: + bracket_group.append(_ellipsis) + self.has_ellipsis_parenthesized = True + else: + is_number = str.isdecimal(x) + if is_number and int(x) == 1: + # handling the case of anonymous axis of length 1 + if bracket_group is None: + self.composition.append([]) + else: + pass # no need to think about 1s inside parenthesis + return + is_axis_name, reason = self.check_axis_name_return_reason(x, allow_underscore=allow_underscore) + if not (is_number or is_axis_name): + raise EinopsError("Invalid axis identifier: {}\n{}".format(x, reason)) + if is_number: + x = AnonymousAxis(x) + self.identifiers.add(x) + if is_number: + self.has_non_unitary_anonymous_axes = True + if bracket_group is None: + self.composition.append([x]) + else: + bracket_group.append(x) + + current_identifier = None + for char in expression: + if char in "() ": + if current_identifier is not None: + add_axis_name(current_identifier) + current_identifier = None + if char == "(": + if bracket_group is not None: + raise EinopsError("Axis composition is one-level (brackets inside brackets not allowed)") + bracket_group = [] + elif char == ")": + if bracket_group is None: + raise EinopsError("Brackets are not balanced") + self.composition.append(bracket_group) + bracket_group = None + elif str.isalnum(char) or char in ["_", _ellipsis]: + if current_identifier is None: + current_identifier = char + else: + current_identifier += char + else: + raise EinopsError("Unknown character '{}'".format(char)) + + if bracket_group is not None: + raise EinopsError('Imbalanced parentheses in expression: "{}"'.format(expression)) + if current_identifier is not None: + add_axis_name(current_identifier) + + def flat_axes_order(self) -> List: + result = [] + for composed_axis in self.composition: + assert isinstance(composed_axis, list), "does not work with ellipsis" + for axis in composed_axis: + result.append(axis) + return result + + def has_composed_axes(self) -> bool: + # this will ignore 1 inside brackets + for axes in self.composition: + if isinstance(axes, list) and len(axes) > 1: + return True + return False + + @staticmethod + def check_axis_name_return_reason(name: str, allow_underscore: bool = False) -> Tuple[bool, str]: + if not str.isidentifier(name): + return False, "not a valid python identifier" + elif name[0] == "_" or name[-1] == "_": + if name == "_" and allow_underscore: + return True, "" + return False, "axis name should should not start or end with underscore" + else: + if keyword.iskeyword(name): + warnings.warn("It is discouraged to use axes names that are keywords: {}".format(name), RuntimeWarning) + if name in ["axis"]: + warnings.warn( + "It is discouraged to use 'axis' as an axis name " "and will raise an error in future", + FutureWarning, + ) + return True, "" + + @staticmethod + def check_axis_name(name: str) -> bool: + """ + Valid axes names are python identifiers except keywords, + and additionally should not start or end with underscore + """ + is_valid, _reason = ParsedExpression.check_axis_name_return_reason(name) + return is_valid diff --git a/janus/lib/python3.10/site-packages/timm/layers/__init__.py b/janus/lib/python3.10/site-packages/timm/layers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c71ff30c82855eccc85a6591695723c71870c0a8 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/layers/__init__.py @@ -0,0 +1,62 @@ +from .activations import * +from .adaptive_avgmax_pool import \ + adaptive_avgmax_pool2d, select_adaptive_pool2d, AdaptiveAvgMaxPool2d, SelectAdaptivePool2d +from .attention2d import MultiQueryAttention2d, Attention2d, MultiQueryAttentionV2 +from .attention_pool import AttentionPoolLatent +from .attention_pool2d import AttentionPool2d, RotAttentionPool2d, RotaryEmbedding +from .blur_pool import BlurPool2d, create_aa +from .classifier import create_classifier, ClassifierHead, NormMlpClassifierHead, ClNormMlpClassifierHead +from .cond_conv2d import CondConv2d, get_condconv_initializer +from .config import is_exportable, is_scriptable, is_no_jit, use_fused_attn, \ + set_exportable, set_scriptable, set_no_jit, set_layer_config, set_fused_attn, \ + set_reentrant_ckpt, use_reentrant_ckpt +from .conv2d_same import Conv2dSame, conv2d_same +from .conv_bn_act import ConvNormAct, ConvNormActAa, ConvBnAct +from .create_act import create_act_layer, get_act_layer, get_act_fn +from .create_attn import get_attn, create_attn +from .create_conv2d import create_conv2d +from .create_norm import get_norm_layer, create_norm_layer +from .create_norm_act import get_norm_act_layer, create_norm_act_layer, get_norm_act_layer +from .drop import DropBlock2d, DropPath, drop_block_2d, drop_path +from .eca import EcaModule, CecaModule, EfficientChannelAttn, CircularEfficientChannelAttn +from .evo_norm import EvoNorm2dB0, EvoNorm2dB1, EvoNorm2dB2,\ + EvoNorm2dS0, EvoNorm2dS0a, EvoNorm2dS1, EvoNorm2dS1a, EvoNorm2dS2, EvoNorm2dS2a +from .fast_norm import is_fast_norm, set_fast_norm, fast_group_norm, fast_layer_norm +from .filter_response_norm import FilterResponseNormTlu2d, FilterResponseNormAct2d +from .format import Format, get_channel_dim, get_spatial_dim, nchw_to, nhwc_to +from .gather_excite import GatherExcite +from .global_context import GlobalContext +from .grid import ndgrid, meshgrid +from .helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible, extend_tuple +from .hybrid_embed import HybridEmbed, HybridEmbedWithSize +from .inplace_abn import InplaceAbn +from .layer_scale import LayerScale, LayerScale2d +from .linear import Linear +from .mixed_conv2d import MixedConv2d +from .mlp import Mlp, GluMlp, GatedMlp, SwiGLU, SwiGLUPacked, ConvMlp, GlobalResponseNormMlp +from .non_local_attn import NonLocalAttn, BatNonLocalAttn +from .norm import GroupNorm, GroupNorm1, LayerNorm, LayerNorm2d, RmsNorm, RmsNorm2d, SimpleNorm, SimpleNorm2d +from .norm_act import BatchNormAct2d, GroupNormAct, GroupNorm1Act, LayerNormAct, LayerNormAct2d,\ + SyncBatchNormAct, convert_sync_batchnorm, FrozenBatchNormAct2d, freeze_batch_norm_2d, unfreeze_batch_norm_2d +from .padding import get_padding, get_same_padding, pad_same +from .patch_dropout import PatchDropout +from .patch_embed import PatchEmbed, PatchEmbedWithSize, resample_patch_embed +from .pool2d_same import AvgPool2dSame, create_pool2d +from .pos_embed import resample_abs_pos_embed, resample_abs_pos_embed_nhwc +from .pos_embed_rel import RelPosMlp, RelPosBias, RelPosBiasTf, gen_relative_position_index, gen_relative_log_coords, \ + resize_rel_pos_bias_table, resize_rel_pos_bias_table_simple, resize_rel_pos_bias_table_levit +from .pos_embed_sincos import pixel_freq_bands, freq_bands, build_sincos2d_pos_embed, build_fourier_pos_embed, \ + build_rotary_pos_embed, apply_rot_embed, apply_rot_embed_cat, apply_rot_embed_list, apply_keep_indices_nlc, \ + FourierEmbed, RotaryEmbedding, RotaryEmbeddingCat +from .squeeze_excite import SEModule, SqueezeExcite, EffectiveSEModule, EffectiveSqueezeExcite +from .selective_kernel import SelectiveKernel +from .separable_conv import SeparableConv2d, SeparableConvNormAct +from .space_to_depth import SpaceToDepth, DepthToSpace +from .split_attn import SplitAttn +from .split_batchnorm import SplitBatchNorm2d, convert_splitbn_model +from .std_conv import StdConv2d, StdConv2dSame, ScaledStdConv2d, ScaledStdConv2dSame +from .test_time_pool import TestTimePoolHead, apply_test_time_pool +from .trace_utils import _assert, _float_to_int +from .typing import LayerType, PadType +from .weight_init import trunc_normal_, trunc_normal_tf_, variance_scaling_, lecun_normal_, \ + init_weight_jax, init_weight_vit diff --git a/janus/lib/python3.10/site-packages/timm/layers/attention2d.py b/janus/lib/python3.10/site-packages/timm/layers/attention2d.py new file mode 100644 index 0000000000000000000000000000000000000000..6a542828bcfa3657cfab31440719b8d893763892 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/layers/attention2d.py @@ -0,0 +1,351 @@ +from typing import List, Optional, Type, Union + +import torch +from torch import nn as nn +from torch.nn import functional as F + +from .config import use_fused_attn +from .create_conv2d import create_conv2d +from .helpers import to_2tuple +from .pool2d_same import create_pool2d + + +class MultiQueryAttentionV2(nn.Module): + """Multi Query Attention. + + Fast Transformer Decoding: One Write-Head is All You Need + https://arxiv.org/pdf/1911.02150.pdf + + This is an acceletor optimized version - removing multiple unnecessary + tensor transpose by re-arranging indices according to the following rules: 1) + contracted indices are at the end, 2) other indices have the same order in the + input and output tensores. + + Compared to V1, this gives 3x speed up. + """ + + def __init__( + self, + dim: int, + dim_out: Optional[int] = None, + num_heads: int = 8, + key_dim: int = 64, + value_dim: int = 64, + attn_drop: float = 0., + proj_drop: float = 0., + ): + """Initializer.""" + super().__init__() + dim_out = dim_out or dim + self.num_heads = num_heads + self.key_dim = key_dim + self.value_dim = value_dim + self.scale = key_dim ** -0.5 + + self.query_proj = nn.Parameter(torch.randn([self.num_heads, self.key_dim, dim])) + self.key_proj = nn.Parameter(torch.randn([dim, self.key_dim])) + self.value_proj = nn.Parameter(torch.randn([dim, self.value_dim])) + self.attn_drop = nn.Dropout(attn_drop) + self.out_proj = nn.Parameter(torch.randn([dim_out, self.num_heads, self.value_dim])) + self.proj_drop = nn.Dropout(proj_drop) + + def _reshape_input(self, t): + """Reshapes a tensor to three dimensions, keeping the first and last.""" + s = t.shape + # Propagate the shape statically where possible. + #num = t.shape[1:-1].numel() + #return t.reshape(s[0], num, s[-1]) + return t.reshape(s[0], s[1], -1).transpose(1, 2) + + def forward(self, x, m: Optional[torch.Tensor] = None): + """Run layer computation.""" + b, _, h, w = x.shape + m = m if m is not None else x + + reshaped_x = self._reshape_input(x) + reshaped_m = self._reshape_input(m) + + q = torch.einsum('bnd,hkd->bnhk', reshaped_x, self.query_proj) + k = torch.einsum('bmd,dk->bmk', reshaped_m, self.key_proj) + + attn = torch.einsum('bnhk,bmk->bnhm', q, k) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + v = torch.einsum('bmd,dv->bmv', reshaped_m, self.value_proj) + o = torch.einsum('bnhm,bmv->bnhv', attn, v) + result = torch.einsum('bnhv,dhv->bdn', o, self.out_proj) + result = self.proj_drop(result) + return result.reshape(b, -1, h, w) + + +class MultiQueryAttention2d(nn.Module): + """Multi Query Attention with spatial downsampling. + + 3 parameters are introduced for the spatial downsampling: + 1. kv_stride: downsampling factor on Key and Values only. + 2. query_strides: horizontal & vertical strides on Query only. + + This is an optimized version. + 1. Projections in Attention is explicit written out as 1x1 Conv2D. + 2. Additional reshapes are introduced to bring a up to 3x speed up. + """ + fused_attn: torch.jit.Final[bool] + + def __init__( + self, + dim: int, + dim_out: Optional[int] = None, + num_heads: int = 8, + key_dim: Optional[int] = None, + value_dim: Optional[int] = None, + query_strides: int = 1, + kv_stride: int = 1, + dw_kernel_size: int = 3, + dilation: int = 1, + padding: Union[str, int, List[int]] = '', + attn_drop: float = 0., + proj_drop: float = 0., + norm_layer: Type[nn.Module] = nn.BatchNorm2d, + use_bias: bool = False, + ): + """Initializer. + + Args: + num_heads: Number of attention heads. + key_dim: Size of the attention key dimension. + value_dim: Size of the attention value dimension. + query_strides: Vertical stride size for query only. + kv_stride: Key and value stride size. + dw_kernel_size: Spatial dimension of the depthwise kernel. + """ + super().__init__() + dim_out = dim_out or dim + self.num_heads = num_heads + self.key_dim = key_dim or dim // num_heads + self.value_dim = value_dim or dim // num_heads + self.query_strides = to_2tuple(query_strides) + self.kv_stride = kv_stride + self.has_query_strides = any([s > 1 for s in self.query_strides]) + self.scale = self.key_dim ** -0.5 + self.fused_attn = use_fused_attn() + self.drop = attn_drop + + self.query = nn.Sequential() + if self.has_query_strides: + # FIXME dilation + if padding == 'same': + self.query.add_module('down_pool', create_pool2d( + 'avg', + kernel_size=self.query_strides, + padding='same', + )) + else: + # no pad if not 'same' as kern=stride=even + self.query.add_module('down_pool', nn.AvgPool2d(kernel_size=query_strides)) + self.query.add_module('norm', norm_layer(dim)) + self.query.add_module('proj', create_conv2d( + dim, + self.num_heads * self.key_dim, + kernel_size=1, + bias=use_bias, + )) + + self.key = nn.Sequential() + if kv_stride > 1: + self.key.add_module('down_conv', create_conv2d( + dim, + dim, + kernel_size=dw_kernel_size, + stride=kv_stride, + dilation=dilation, + padding=padding, + depthwise=True, + )) + self.key.add_module('norm', norm_layer(dim)) + self.key.add_module('proj', create_conv2d( + dim, + self.key_dim, + kernel_size=1, + padding=padding, + bias=use_bias, + )) + + self.value = nn.Sequential() + if kv_stride > 1: + self.value.add_module('down_conv', create_conv2d( + dim, + dim, + kernel_size=dw_kernel_size, + stride=kv_stride, + dilation=dilation, + padding=padding, + depthwise=True, + )) + self.value.add_module('norm', norm_layer(dim)) + self.value.add_module('proj', create_conv2d( + dim, + self.value_dim, + kernel_size=1, + bias=use_bias, + )) + + self.attn_drop = nn.Dropout(attn_drop) + + self.output = nn.Sequential() + if self.has_query_strides: + self.output.add_module('upsample', nn.Upsample(scale_factor=self.query_strides, mode='bilinear', align_corners=False)) + self.output.add_module('proj', create_conv2d( + self.value_dim * self.num_heads, + dim_out, + kernel_size=1, + bias=use_bias, + )) + self.output.add_module('drop', nn.Dropout(proj_drop)) + + self.einsum = False + + def init_weights(self): + # using xavier appeared to improve stability for mobilenetv4 hybrid w/ this layer + nn.init.xavier_uniform_(self.query.proj.weight) + nn.init.xavier_uniform_(self.key.proj.weight) + nn.init.xavier_uniform_(self.value.proj.weight) + if self.kv_stride > 1: + nn.init.xavier_uniform_(self.key.down_conv.weight) + nn.init.xavier_uniform_(self.value.down_conv.weight) + nn.init.xavier_uniform_(self.output.proj.weight) + + def _reshape_input(self, t: torch.Tensor): + """Reshapes a tensor to three dimensions, keeping the batch and channels.""" + s = t.shape + t = t.reshape(s[0], s[1], -1).transpose(1, 2) + if self.einsum: + return t + else: + return t.unsqueeze(1).contiguous() + + def _reshape_projected_query(self, t: torch.Tensor, num_heads: int, key_dim: int): + """Reshapes projected query: [b, n, n, h x k] -> [b, n x n, h, k].""" + s = t.shape + t = t.reshape(s[0], num_heads, key_dim, -1) + if self.einsum: + return t.permute(0, 3, 1, 2).contiguous() + else: + return t.transpose(-1, -2).contiguous() + + def _reshape_output(self, t: torch.Tensor, num_heads: int, h_px: int, w_px: int): + """Reshape output:[b, n x n x h, k] -> [b, n, n, hk].""" + s = t.shape + feat_dim = s[-1] * num_heads + if not self.einsum: + t = t.transpose(1, 2) + return t.reshape(s[0], h_px, w_px, feat_dim).permute(0, 3, 1, 2).contiguous() + + def forward(self, x, attn_mask: Optional[torch.Tensor] = None): + """Run layer computation.""" + B, C, H, W = s = x.shape + + q = self.query(x) + # desired q shape: [b, h, k, n x n] - [b, l, h, k] + q = self._reshape_projected_query(q, self.num_heads, self.key_dim) + + k = self.key(x) + # output shape of k: [b, k, p], p = m x m + k = self._reshape_input(k) + + v = self.value(x) + # output shape of v: [ b, p, k], p = m x m + v = self._reshape_input(v) + + # desired q shape: [b, n x n, h, k] + # desired k shape: [b, m x m, k] + # desired logits shape: [b, n x n, h, m x m] + if self.einsum: + attn = torch.einsum('blhk,bpk->blhp', q, k) * self.scale + if attn_mask is not None: + # NOTE: assumes mask is float and in correct shape + attn = attn + attn_mask + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + o = torch.einsum('blhp,bpk->blhk', attn, v) + else: + if self.fused_attn: + o = F.scaled_dot_product_attention( + q, k, v, + attn_mask=attn_mask, + dropout_p=self.attn_drop.p if self.training else 0. + ) + else: + q = q * self.scale + attn = q @ k.transpose(-1, -2) + if attn_mask is not None: + # NOTE: assumes mask is float and in correct shape + attn = attn + attn_mask + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + o = attn @ v + + # reshape o into [b, hk, n, n,] + o = self._reshape_output(o, self.num_heads, H // self.query_strides[0], W // self.query_strides[1]) + x = self.output(o) + return x + + +class Attention2d(nn.Module): + fused_attn: torch.jit.Final[bool] + + """ multi-head attention for 2D NCHW tensors""" + def __init__( + self, + dim: int, + dim_out: Optional[int] = None, + num_heads: int = 32, + bias: bool = True, + expand_first: bool = False, + head_first: bool = False, + attn_drop: float = 0., + proj_drop: float = 0. + ): + super().__init__() + dim_out = dim_out or dim + dim_attn = dim_out if expand_first else dim + self.num_heads = num_heads + self.dim_head = dim_attn // num_heads + self.head_first = head_first + self.fused_attn = use_fused_attn() + + self.qkv = nn.Conv2d(dim, dim_attn * 3, 1, bias=bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Conv2d(dim_attn, dim_out, 1, bias=bias) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x, attn_mask: Optional[torch.Tensor] = None): + B, C, H, W = x.shape + + if self.head_first: + q, k, v = self.qkv(x).view(B, self.num_heads, self.dim_head * 3, -1).chunk(3, dim=2) + else: + q, k, v = self.qkv(x).reshape(B, 3, self.num_heads, self.dim_head, -1).unbind(1) + + if self.fused_attn: + x = torch.nn.functional.scaled_dot_product_attention( + q.transpose(-1, -2).contiguous(), + k.transpose(-1, -2).contiguous(), + v.transpose(-1, -2).contiguous(), + attn_mask=attn_mask, + dropout_p=self.attn_drop.p if self.training else 0., + ).transpose(-1, -2).reshape(B, -1, H, W) + else: + q = q.transpose(-1, -2) + v = v.transpose(-1, -2) + attn = q @ k * q.size(-1) ** -0.5 + if attn_mask is not None: + # NOTE: assumes mask is float and in correct shape + attn = attn + attn_mask + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + x = (attn @ v).transpose(-1, -2).reshape(B, -1, H, W) + + x = self.proj(x) + x = self.proj_drop(x) + return x diff --git a/janus/lib/python3.10/site-packages/timm/layers/attention_pool.py b/janus/lib/python3.10/site-packages/timm/layers/attention_pool.py new file mode 100644 index 0000000000000000000000000000000000000000..da5585b363a4254ddc53f495eb92417ed6ed9f71 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/layers/attention_pool.py @@ -0,0 +1,105 @@ +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .config import use_fused_attn +from .mlp import Mlp +from .weight_init import trunc_normal_tf_ + + +class AttentionPoolLatent(nn.Module): + """ Attention pooling w/ latent query + """ + fused_attn: torch.jit.Final[bool] + + def __init__( + self, + in_features: int, + out_features: int = None, + embed_dim: int = None, + num_heads: int = 8, + feat_size: Optional[int] = None, + mlp_ratio: float = 4.0, + qkv_bias: bool = True, + qk_norm: bool = False, + latent_len: int = 1, + latent_dim: int = None, + pos_embed: str = '', + pool_type: str = 'token', + norm_layer: Optional[nn.Module] = None, + drop: float = 0.0, + ): + super().__init__() + embed_dim = embed_dim or in_features + out_features = out_features or in_features + assert embed_dim % num_heads == 0 + self.num_heads = num_heads + self.head_dim = embed_dim // num_heads + self.feat_size = feat_size + self.scale = self.head_dim ** -0.5 + self.pool = pool_type + self.fused_attn = use_fused_attn() + + if pos_embed == 'abs': + assert feat_size is not None + self.pos_embed = nn.Parameter(torch.zeros(feat_size, in_features)) + else: + self.pos_embed = None + + self.latent_dim = latent_dim or embed_dim + self.latent_len = latent_len + self.latent = nn.Parameter(torch.zeros(1, self.latent_len, embed_dim)) + + self.q = nn.Linear(embed_dim, embed_dim, bias=qkv_bias) + self.kv = nn.Linear(embed_dim, embed_dim * 2, bias=qkv_bias) + self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() + self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() + self.proj = nn.Linear(embed_dim, embed_dim) + self.proj_drop = nn.Dropout(drop) + + self.norm = norm_layer(out_features) if norm_layer is not None else nn.Identity() + self.mlp = Mlp(embed_dim, int(embed_dim * mlp_ratio)) + + self.init_weights() + + def init_weights(self): + if self.pos_embed is not None: + trunc_normal_tf_(self.pos_embed, std=self.pos_embed.shape[1] ** -0.5) + trunc_normal_tf_(self.latent, std=self.latent_dim ** -0.5) + + def forward(self, x): + B, N, C = x.shape + + if self.pos_embed is not None: + # FIXME interpolate + x = x + self.pos_embed.unsqueeze(0).to(x.dtype) + + q_latent = self.latent.expand(B, -1, -1) + q = self.q(q_latent).reshape(B, self.latent_len, self.num_heads, self.head_dim).transpose(1, 2) + + kv = self.kv(x).reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + k, v = kv.unbind(0) + + q, k = self.q_norm(q), self.k_norm(k) + + if self.fused_attn: + x = F.scaled_dot_product_attention(q, k, v) + else: + q = q * self.scale + attn = q @ k.transpose(-2, -1) + attn = attn.softmax(dim=-1) + x = attn @ v + x = x.transpose(1, 2).reshape(B, self.latent_len, C) + x = self.proj(x) + x = self.proj_drop(x) + + x = x + self.mlp(self.norm(x)) + + # optional pool if latent seq_len > 1 and pooled output is desired + if self.pool == 'token': + x = x[:, 0] + elif self.pool == 'avg': + x = x.mean(1) + return x \ No newline at end of file diff --git a/janus/lib/python3.10/site-packages/timm/layers/bottleneck_attn.py b/janus/lib/python3.10/site-packages/timm/layers/bottleneck_attn.py new file mode 100644 index 0000000000000000000000000000000000000000..c3db464e5ab4f2d3478293034e90a0939dadb628 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/layers/bottleneck_attn.py @@ -0,0 +1,157 @@ +""" Bottleneck Self Attention (Bottleneck Transformers) + +Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605 + +@misc{2101.11605, +Author = {Aravind Srinivas and Tsung-Yi Lin and Niki Parmar and Jonathon Shlens and Pieter Abbeel and Ashish Vaswani}, +Title = {Bottleneck Transformers for Visual Recognition}, +Year = {2021}, +} + +Based on ref gist at: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + +This impl is a WIP but given that it is based on the ref gist likely not too far off. + +Hacked together by / Copyright 2021 Ross Wightman +""" +from typing import List + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .helpers import to_2tuple, make_divisible +from .weight_init import trunc_normal_ +from .trace_utils import _assert + + +def rel_logits_1d(q, rel_k, permute_mask: List[int]): + """ Compute relative logits along one dimension + + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + + Args: + q: (batch, heads, height, width, dim) + rel_k: (2 * width - 1, dim) + permute_mask: permute output dim according to this + """ + B, H, W, dim = q.shape + x = (q @ rel_k.transpose(-1, -2)) + x = x.reshape(-1, W, 2 * W -1) + + # pad to shift from relative to absolute indexing + x_pad = F.pad(x, [0, 1]).flatten(1) + x_pad = F.pad(x_pad, [0, W - 1]) + + # reshape and slice out the padded elements + x_pad = x_pad.reshape(-1, W + 1, 2 * W - 1) + x = x_pad[:, :W, W - 1:] + + # reshape and tile + x = x.reshape(B, H, 1, W, W).expand(-1, -1, H, -1, -1) + return x.permute(permute_mask) + + +class PosEmbedRel(nn.Module): + """ Relative Position Embedding + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + """ + def __init__(self, feat_size, dim_head, scale): + super().__init__() + self.height, self.width = to_2tuple(feat_size) + self.dim_head = dim_head + self.height_rel = nn.Parameter(torch.randn(self.height * 2 - 1, dim_head) * scale) + self.width_rel = nn.Parameter(torch.randn(self.width * 2 - 1, dim_head) * scale) + + def forward(self, q): + B, HW, _ = q.shape + + # relative logits in width dimension. + q = q.reshape(B, self.height, self.width, -1) + rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4)) + + # relative logits in height dimension. + q = q.transpose(1, 2) + rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2)) + + rel_logits = rel_logits_h + rel_logits_w + rel_logits = rel_logits.reshape(B, HW, HW) + return rel_logits + + +class BottleneckAttn(nn.Module): + """ Bottleneck Attention + Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605 + + The internal dimensions of the attention module are controlled by the interaction of several arguments. + * the output dimension of the module is specified by dim_out, which falls back to input dim if not set + * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim + * the query and key (qk) dimensions are determined by + * num_heads * dim_head if dim_head is not None + * num_heads * (dim_out * attn_ratio // num_heads) if dim_head is None + * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not used + + Args: + dim (int): input dimension to the module + dim_out (int): output dimension of the module, same as dim if not set + stride (int): output stride of the module, avg pool used if stride == 2 (default: 1). + num_heads (int): parallel attention heads (default: 4) + dim_head (int): dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set + qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0) + qkv_bias (bool): add bias to q, k, and v projections + scale_pos_embed (bool): scale the position embedding as well as Q @ K + """ + def __init__( + self, dim, dim_out=None, feat_size=None, stride=1, num_heads=4, dim_head=None, + qk_ratio=1.0, qkv_bias=False, scale_pos_embed=False): + super().__init__() + assert feat_size is not None, 'A concrete feature size matching expected input (H, W) is required' + dim_out = dim_out or dim + assert dim_out % num_heads == 0 + self.num_heads = num_heads + self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads + self.dim_head_v = dim_out // self.num_heads + self.dim_out_qk = num_heads * self.dim_head_qk + self.dim_out_v = num_heads * self.dim_head_v + self.scale = self.dim_head_qk ** -0.5 + self.scale_pos_embed = scale_pos_embed + + self.qkv = nn.Conv2d(dim, self.dim_out_qk * 2 + self.dim_out_v, 1, bias=qkv_bias) + + # NOTE I'm only supporting relative pos embedding for now + self.pos_embed = PosEmbedRel(feat_size, dim_head=self.dim_head_qk, scale=self.scale) + + self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity() + + self.reset_parameters() + + def reset_parameters(self): + trunc_normal_(self.qkv.weight, std=self.qkv.weight.shape[1] ** -0.5) # fan-in + trunc_normal_(self.pos_embed.height_rel, std=self.scale) + trunc_normal_(self.pos_embed.width_rel, std=self.scale) + + def forward(self, x): + B, C, H, W = x.shape + _assert(H == self.pos_embed.height, '') + _assert(W == self.pos_embed.width, '') + + x = self.qkv(x) # B, (2 * dim_head_qk + dim_head_v) * num_heads, H, W + + # NOTE head vs channel split ordering in qkv projection was decided before I allowed qk to differ from v + # So, this is more verbose than if heads were before qkv splits, but throughput is not impacted. + q, k, v = torch.split(x, [self.dim_out_qk, self.dim_out_qk, self.dim_out_v], dim=1) + q = q.reshape(B * self.num_heads, self.dim_head_qk, -1).transpose(-1, -2) + k = k.reshape(B * self.num_heads, self.dim_head_qk, -1) # no transpose, for q @ k + v = v.reshape(B * self.num_heads, self.dim_head_v, -1).transpose(-1, -2) + + if self.scale_pos_embed: + attn = (q @ k + self.pos_embed(q)) * self.scale # B * num_heads, H * W, H * W + else: + attn = (q @ k) * self.scale + self.pos_embed(q) + attn = attn.softmax(dim=-1) + + out = (attn @ v).transpose(-1, -2).reshape(B, self.dim_out_v, H, W) # B, dim_out, H, W + out = self.pool(out) + return out diff --git a/janus/lib/python3.10/site-packages/timm/layers/classifier.py b/janus/lib/python3.10/site-packages/timm/layers/classifier.py new file mode 100644 index 0000000000000000000000000000000000000000..5e425fe6c80783396f336d1bbe38a6370a7662f4 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/layers/classifier.py @@ -0,0 +1,283 @@ +""" Classifier head and layer factory + +Hacked together by / Copyright 2020 Ross Wightman +""" +from collections import OrderedDict +from functools import partial +from typing import Optional, Union, Callable + +import torch +import torch.nn as nn +from torch.nn import functional as F + +from .adaptive_avgmax_pool import SelectAdaptivePool2d +from .create_act import get_act_layer +from .create_norm import get_norm_layer + + +def _create_pool( + num_features: int, + num_classes: int, + pool_type: str = 'avg', + use_conv: bool = False, + input_fmt: Optional[str] = None, +): + flatten_in_pool = not use_conv # flatten when we use a Linear layer after pooling + if not pool_type: + flatten_in_pool = False # disable flattening if pooling is pass-through (no pooling) + global_pool = SelectAdaptivePool2d( + pool_type=pool_type, + flatten=flatten_in_pool, + input_fmt=input_fmt, + ) + num_pooled_features = num_features * global_pool.feat_mult() + return global_pool, num_pooled_features + + +def _create_fc(num_features, num_classes, use_conv=False): + if num_classes <= 0: + fc = nn.Identity() # pass-through (no classifier) + elif use_conv: + fc = nn.Conv2d(num_features, num_classes, 1, bias=True) + else: + fc = nn.Linear(num_features, num_classes, bias=True) + return fc + + +def create_classifier( + num_features: int, + num_classes: int, + pool_type: str = 'avg', + use_conv: bool = False, + input_fmt: str = 'NCHW', + drop_rate: Optional[float] = None, +): + global_pool, num_pooled_features = _create_pool( + num_features, + num_classes, + pool_type, + use_conv=use_conv, + input_fmt=input_fmt, + ) + fc = _create_fc( + num_pooled_features, + num_classes, + use_conv=use_conv, + ) + if drop_rate is not None: + dropout = nn.Dropout(drop_rate) + return global_pool, dropout, fc + return global_pool, fc + + +class ClassifierHead(nn.Module): + """Classifier head w/ configurable global pooling and dropout.""" + + def __init__( + self, + in_features: int, + num_classes: int, + pool_type: str = 'avg', + drop_rate: float = 0., + use_conv: bool = False, + input_fmt: str = 'NCHW', + ): + """ + Args: + in_features: The number of input features. + num_classes: The number of classes for the final classifier layer (output). + pool_type: Global pooling type, pooling disabled if empty string (''). + drop_rate: Pre-classifier dropout rate. + """ + super(ClassifierHead, self).__init__() + self.in_features = in_features + self.use_conv = use_conv + self.input_fmt = input_fmt + + global_pool, fc = create_classifier( + in_features, + num_classes, + pool_type, + use_conv=use_conv, + input_fmt=input_fmt, + ) + self.global_pool = global_pool + self.drop = nn.Dropout(drop_rate) + self.fc = fc + self.flatten = nn.Flatten(1) if use_conv and pool_type else nn.Identity() + + def reset(self, num_classes: int, pool_type: Optional[str] = None): + if pool_type is not None and pool_type != self.global_pool.pool_type: + self.global_pool, self.fc = create_classifier( + self.in_features, + num_classes, + pool_type=pool_type, + use_conv=self.use_conv, + input_fmt=self.input_fmt, + ) + self.flatten = nn.Flatten(1) if self.use_conv and pool_type else nn.Identity() + else: + num_pooled_features = self.in_features * self.global_pool.feat_mult() + self.fc = _create_fc( + num_pooled_features, + num_classes, + use_conv=self.use_conv, + ) + + def forward(self, x, pre_logits: bool = False): + x = self.global_pool(x) + x = self.drop(x) + if pre_logits: + return self.flatten(x) + x = self.fc(x) + return self.flatten(x) + + +class NormMlpClassifierHead(nn.Module): + """ A Pool -> Norm -> Mlp Classifier Head for '2D' NCHW tensors + """ + def __init__( + self, + in_features: int, + num_classes: int, + hidden_size: Optional[int] = None, + pool_type: str = 'avg', + drop_rate: float = 0., + norm_layer: Union[str, Callable] = 'layernorm2d', + act_layer: Union[str, Callable] = 'tanh', + ): + """ + Args: + in_features: The number of input features. + num_classes: The number of classes for the final classifier layer (output). + hidden_size: The hidden size of the MLP (pre-logits FC layer) if not None. + pool_type: Global pooling type, pooling disabled if empty string (''). + drop_rate: Pre-classifier dropout rate. + norm_layer: Normalization layer type. + act_layer: MLP activation layer type (only used if hidden_size is not None). + """ + super().__init__() + self.in_features = in_features + self.hidden_size = hidden_size + self.num_features = in_features + self.use_conv = not pool_type + norm_layer = get_norm_layer(norm_layer) + act_layer = get_act_layer(act_layer) + linear_layer = partial(nn.Conv2d, kernel_size=1) if self.use_conv else nn.Linear + + self.global_pool = SelectAdaptivePool2d(pool_type=pool_type) + self.norm = norm_layer(in_features) + self.flatten = nn.Flatten(1) if pool_type else nn.Identity() + if hidden_size: + self.pre_logits = nn.Sequential(OrderedDict([ + ('fc', linear_layer(in_features, hidden_size)), + ('act', act_layer()), + ])) + self.num_features = hidden_size + else: + self.pre_logits = nn.Identity() + self.drop = nn.Dropout(drop_rate) + self.fc = linear_layer(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def reset(self, num_classes: int, pool_type: Optional[str] = None): + if pool_type is not None: + self.global_pool = SelectAdaptivePool2d(pool_type=pool_type) + self.flatten = nn.Flatten(1) if pool_type else nn.Identity() + self.use_conv = self.global_pool.is_identity() + linear_layer = partial(nn.Conv2d, kernel_size=1) if self.use_conv else nn.Linear + if self.hidden_size: + if ((isinstance(self.pre_logits.fc, nn.Conv2d) and not self.use_conv) or + (isinstance(self.pre_logits.fc, nn.Linear) and self.use_conv)): + with torch.no_grad(): + new_fc = linear_layer(self.in_features, self.hidden_size) + new_fc.weight.copy_(self.pre_logits.fc.weight.reshape(new_fc.weight.shape)) + new_fc.bias.copy_(self.pre_logits.fc.bias) + self.pre_logits.fc = new_fc + self.fc = linear_layer(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward(self, x, pre_logits: bool = False): + x = self.global_pool(x) + x = self.norm(x) + x = self.flatten(x) + x = self.pre_logits(x) + x = self.drop(x) + if pre_logits: + return x + x = self.fc(x) + return x + + +class ClNormMlpClassifierHead(nn.Module): + """ A Pool -> Norm -> Mlp Classifier Head for n-D NxxC tensors + """ + def __init__( + self, + in_features: int, + num_classes: int, + hidden_size: Optional[int] = None, + pool_type: str = 'avg', + drop_rate: float = 0., + norm_layer: Union[str, Callable] = 'layernorm', + act_layer: Union[str, Callable] = 'gelu', + input_fmt: str = 'NHWC', + ): + """ + Args: + in_features: The number of input features. + num_classes: The number of classes for the final classifier layer (output). + hidden_size: The hidden size of the MLP (pre-logits FC layer) if not None. + pool_type: Global pooling type, pooling disabled if empty string (''). + drop_rate: Pre-classifier dropout rate. + norm_layer: Normalization layer type. + act_layer: MLP activation layer type (only used if hidden_size is not None). + """ + super().__init__() + self.in_features = in_features + self.hidden_size = hidden_size + self.num_features = in_features + assert pool_type in ('', 'avg', 'max', 'avgmax') + self.pool_type = pool_type + assert input_fmt in ('NHWC', 'NLC') + self.pool_dim = 1 if input_fmt == 'NLC' else (1, 2) + norm_layer = get_norm_layer(norm_layer) + act_layer = get_act_layer(act_layer) + + self.norm = norm_layer(in_features) + if hidden_size: + self.pre_logits = nn.Sequential(OrderedDict([ + ('fc', nn.Linear(in_features, hidden_size)), + ('act', act_layer()), + ])) + self.num_features = hidden_size + else: + self.pre_logits = nn.Identity() + self.drop = nn.Dropout(drop_rate) + self.fc = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def reset(self, num_classes: int, pool_type: Optional[str] = None, reset_other: bool = False): + if pool_type is not None: + self.pool_type = pool_type + if reset_other: + self.pre_logits = nn.Identity() + self.norm = nn.Identity() + self.fc = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def _global_pool(self, x): + if self.pool_type: + if self.pool_type == 'avg': + x = x.mean(dim=self.pool_dim) + elif self.pool_type == 'max': + x = x.amax(dim=self.pool_dim) + elif self.pool_type == 'avgmax': + x = 0.5 * (x.amax(dim=self.pool_dim) + x.mean(dim=self.pool_dim)) + return x + + def forward(self, x, pre_logits: bool = False): + x = self._global_pool(x) + x = self.norm(x) + x = self.pre_logits(x) + x = self.drop(x) + if pre_logits: + return x + x = self.fc(x) + return x diff --git a/janus/lib/python3.10/site-packages/timm/layers/conv2d_same.py b/janus/lib/python3.10/site-packages/timm/layers/conv2d_same.py new file mode 100644 index 0000000000000000000000000000000000000000..7ac85b793891ce3d793671abfecfa0a4db5b389c --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/layers/conv2d_same.py @@ -0,0 +1,110 @@ +""" Conv2d w/ Same Padding + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import Tuple, Optional + +from .config import is_exportable, is_scriptable +from .padding import pad_same, pad_same_arg, get_padding_value + + +_USE_EXPORT_CONV = False + + +def conv2d_same( + x, + weight: torch.Tensor, + bias: Optional[torch.Tensor] = None, + stride: Tuple[int, int] = (1, 1), + padding: Tuple[int, int] = (0, 0), + dilation: Tuple[int, int] = (1, 1), + groups: int = 1, +): + x = pad_same(x, weight.shape[-2:], stride, dilation) + return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups) + + +class Conv2dSame(nn.Conv2d): + """ Tensorflow like 'SAME' convolution wrapper for 2D convolutions + """ + + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=True, + ): + super(Conv2dSame, self).__init__( + in_channels, out_channels, kernel_size, + stride, 0, dilation, groups, bias, + ) + + def forward(self, x): + return conv2d_same( + x, self.weight, self.bias, + self.stride, self.padding, self.dilation, self.groups, + ) + + +class Conv2dSameExport(nn.Conv2d): + """ ONNX export friendly Tensorflow like 'SAME' convolution wrapper for 2D convolutions + + NOTE: This does not currently work with torch.jit.script + """ + + # pylint: disable=unused-argument + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=True, + ): + super(Conv2dSameExport, self).__init__( + in_channels, out_channels, kernel_size, + stride, 0, dilation, groups, bias, + ) + self.pad = None + self.pad_input_size = (0, 0) + + def forward(self, x): + input_size = x.size()[-2:] + if self.pad is None: + pad_arg = pad_same_arg(input_size, self.weight.size()[-2:], self.stride, self.dilation) + self.pad = nn.ZeroPad2d(pad_arg) + self.pad_input_size = input_size + + x = self.pad(x) + return F.conv2d( + x, self.weight, self.bias, + self.stride, self.padding, self.dilation, self.groups, + ) + + +def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs): + padding = kwargs.pop('padding', '') + kwargs.setdefault('bias', False) + padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs) + if is_dynamic: + if _USE_EXPORT_CONV and is_exportable(): + # older PyTorch ver needed this to export same padding reasonably + assert not is_scriptable() # Conv2DSameExport does not work with jit + return Conv2dSameExport(in_chs, out_chs, kernel_size, **kwargs) + else: + return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs) + else: + return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs) + + diff --git a/janus/lib/python3.10/site-packages/timm/layers/create_conv2d.py b/janus/lib/python3.10/site-packages/timm/layers/create_conv2d.py new file mode 100644 index 0000000000000000000000000000000000000000..ac9489ce492d0f768c1ae8892163fa986bac8fd8 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/layers/create_conv2d.py @@ -0,0 +1,36 @@ +""" Create Conv2d Factory Method + +Hacked together by / Copyright 2020 Ross Wightman +""" + +from .mixed_conv2d import MixedConv2d +from .cond_conv2d import CondConv2d +from .conv2d_same import create_conv2d_pad + + +def create_conv2d(in_channels, out_channels, kernel_size, **kwargs): + """ Select a 2d convolution implementation based on arguments + Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d. + + Used extensively by EfficientNet, MobileNetv3 and related networks. + """ + if isinstance(kernel_size, list): + assert 'num_experts' not in kwargs # MixNet + CondConv combo not supported currently + if 'groups' in kwargs: + groups = kwargs.pop('groups') + if groups == in_channels: + kwargs['depthwise'] = True + else: + assert groups == 1 + # We're going to use only lists for defining the MixedConv2d kernel groups, + # ints, tuples, other iterables will continue to pass to normal conv and specify h, w. + m = MixedConv2d(in_channels, out_channels, kernel_size, **kwargs) + else: + depthwise = kwargs.pop('depthwise', False) + # for DW out_channels must be multiple of in_channels as must have out_channels % groups == 0 + groups = in_channels if depthwise else kwargs.pop('groups', 1) + if 'num_experts' in kwargs and kwargs['num_experts'] > 0: + m = CondConv2d(in_channels, out_channels, kernel_size, groups=groups, **kwargs) + else: + m = create_conv2d_pad(in_channels, out_channels, kernel_size, groups=groups, **kwargs) + return m diff --git a/janus/lib/python3.10/site-packages/timm/layers/create_norm.py b/janus/lib/python3.10/site-packages/timm/layers/create_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..75262b5eca6f03f9c86c59643f14a7cfd9bba503 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/layers/create_norm.py @@ -0,0 +1,60 @@ +""" Norm Layer Factory + +Create norm modules by string (to mirror create_act and creat_norm-act fns) + +Copyright 2022 Ross Wightman +""" +import functools +import types +from typing import Type + +import torch.nn as nn + +from .norm import GroupNorm, GroupNorm1, LayerNorm, LayerNorm2d, RmsNorm, RmsNorm2d, SimpleNorm, SimpleNorm2d +from torchvision.ops.misc import FrozenBatchNorm2d + +_NORM_MAP = dict( + batchnorm=nn.BatchNorm2d, + batchnorm2d=nn.BatchNorm2d, + batchnorm1d=nn.BatchNorm1d, + groupnorm=GroupNorm, + groupnorm1=GroupNorm1, + layernorm=LayerNorm, + layernorm2d=LayerNorm2d, + rmsnorm=RmsNorm, + rmsnorm2d=RmsNorm2d, + simplenorm=SimpleNorm, + simplenorm2d=SimpleNorm2d, + frozenbatchnorm2d=FrozenBatchNorm2d, +) +_NORM_TYPES = {m for n, m in _NORM_MAP.items()} + + +def create_norm_layer(layer_name, num_features, **kwargs): + layer = get_norm_layer(layer_name) + layer_instance = layer(num_features, **kwargs) + return layer_instance + + +def get_norm_layer(norm_layer): + if norm_layer is None: + return None + assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial)) + norm_kwargs = {} + + # unbind partial fn, so args can be rebound later + if isinstance(norm_layer, functools.partial): + norm_kwargs.update(norm_layer.keywords) + norm_layer = norm_layer.func + + if isinstance(norm_layer, str): + if not norm_layer: + return None + layer_name = norm_layer.replace('_', '').lower() + norm_layer = _NORM_MAP[layer_name] + else: + norm_layer = norm_layer + + if norm_kwargs: + norm_layer = functools.partial(norm_layer, **norm_kwargs) # bind/rebind args + return norm_layer diff --git a/janus/lib/python3.10/site-packages/timm/layers/drop.py b/janus/lib/python3.10/site-packages/timm/layers/drop.py new file mode 100644 index 0000000000000000000000000000000000000000..289245f5adab275ffc38de3b01fdb23f9e491593 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/layers/drop.py @@ -0,0 +1,182 @@ +""" DropBlock, DropPath + +PyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization layers. + +Papers: +DropBlock: A regularization method for convolutional networks (https://arxiv.org/abs/1810.12890) + +Deep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382) + +Code: +DropBlock impl inspired by two Tensorflow impl that I liked: + - https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74 + - https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .grid import ndgrid + + +def drop_block_2d( + x, + drop_prob: float = 0.1, + block_size: int = 7, + gamma_scale: float = 1.0, + with_noise: bool = False, + inplace: bool = False, + batchwise: bool = False +): + """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf + + DropBlock with an experimental gaussian noise option. This layer has been tested on a few training + runs with success, but needs further validation and possibly optimization for lower runtime impact. + """ + B, C, H, W = x.shape + total_size = W * H + clipped_block_size = min(block_size, min(W, H)) + # seed_drop_rate, the gamma parameter + gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ( + (W - block_size + 1) * (H - block_size + 1)) + + # Forces the block to be inside the feature map. + w_i, h_i = ndgrid(torch.arange(W, device=x.device), torch.arange(H, device=x.device)) + valid_block = ((w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2)) & \ + ((h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2)) + valid_block = torch.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype) + + if batchwise: + # one mask for whole batch, quite a bit faster + uniform_noise = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device) + else: + uniform_noise = torch.rand_like(x) + block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to(dtype=x.dtype) + block_mask = -F.max_pool2d( + -block_mask, + kernel_size=clipped_block_size, # block_size, + stride=1, + padding=clipped_block_size // 2) + + if with_noise: + normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x) + if inplace: + x.mul_(block_mask).add_(normal_noise * (1 - block_mask)) + else: + x = x * block_mask + normal_noise * (1 - block_mask) + else: + normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(x.dtype) + if inplace: + x.mul_(block_mask * normalize_scale) + else: + x = x * block_mask * normalize_scale + return x + + +def drop_block_fast_2d( + x: torch.Tensor, + drop_prob: float = 0.1, + block_size: int = 7, + gamma_scale: float = 1.0, + with_noise: bool = False, + inplace: bool = False, +): + """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf + + DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid + block mask at edges. + """ + B, C, H, W = x.shape + total_size = W * H + clipped_block_size = min(block_size, min(W, H)) + gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / ( + (W - block_size + 1) * (H - block_size + 1)) + + block_mask = torch.empty_like(x).bernoulli_(gamma) + block_mask = F.max_pool2d( + block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2) + + if with_noise: + normal_noise = torch.empty_like(x).normal_() + if inplace: + x.mul_(1. - block_mask).add_(normal_noise * block_mask) + else: + x = x * (1. - block_mask) + normal_noise * block_mask + else: + block_mask = 1 - block_mask + normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-6)).to(dtype=x.dtype) + if inplace: + x.mul_(block_mask * normalize_scale) + else: + x = x * block_mask * normalize_scale + return x + + +class DropBlock2d(nn.Module): + """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf + """ + + def __init__( + self, + drop_prob: float = 0.1, + block_size: int = 7, + gamma_scale: float = 1.0, + with_noise: bool = False, + inplace: bool = False, + batchwise: bool = False, + fast: bool = True): + super(DropBlock2d, self).__init__() + self.drop_prob = drop_prob + self.gamma_scale = gamma_scale + self.block_size = block_size + self.with_noise = with_noise + self.inplace = inplace + self.batchwise = batchwise + self.fast = fast # FIXME finish comparisons of fast vs not + + def forward(self, x): + if not self.training or not self.drop_prob: + return x + if self.fast: + return drop_block_fast_2d( + x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace) + else: + return drop_block_2d( + x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise) + + +def drop_path(x, drop_prob: float = 0., training: bool = False, scale_by_keep: bool = True): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + + This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, + the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for + changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use + 'survival rate' as the argument. + + """ + if drop_prob == 0. or not training: + return x + keep_prob = 1 - drop_prob + shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = x.new_empty(shape).bernoulli_(keep_prob) + if keep_prob > 0.0 and scale_by_keep: + random_tensor.div_(keep_prob) + return x * random_tensor + + +class DropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + """ + def __init__(self, drop_prob: float = 0., scale_by_keep: bool = True): + super(DropPath, self).__init__() + self.drop_prob = drop_prob + self.scale_by_keep = scale_by_keep + + def forward(self, x): + return drop_path(x, self.drop_prob, self.training, self.scale_by_keep) + + def extra_repr(self): + return f'drop_prob={round(self.drop_prob,3):0.3f}' diff --git a/janus/lib/python3.10/site-packages/timm/layers/eca.py b/janus/lib/python3.10/site-packages/timm/layers/eca.py new file mode 100644 index 0000000000000000000000000000000000000000..e29be6ac3c95bb61229cdcdd659ec89d541f1a53 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/layers/eca.py @@ -0,0 +1,145 @@ +""" +ECA module from ECAnet + +paper: ECA-Net: Efficient Channel Attention for Deep Convolutional Neural Networks +https://arxiv.org/abs/1910.03151 + +Original ECA model borrowed from https://github.com/BangguWu/ECANet + +Modified circular ECA implementation and adaption for use in timm package +by Chris Ha https://github.com/VRandme + +Original License: + +MIT License + +Copyright (c) 2019 BangguWu, Qilong Wang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +""" +import math +from torch import nn +import torch.nn.functional as F + + +from .create_act import create_act_layer +from .helpers import make_divisible + + +class EcaModule(nn.Module): + """Constructs an ECA module. + + Args: + channels: Number of channels of the input feature map for use in adaptive kernel sizes + for actual calculations according to channel. + gamma, beta: when channel is given parameters of mapping function + refer to original paper https://arxiv.org/pdf/1910.03151.pdf + (default=None. if channel size not given, use k_size given for kernel size.) + kernel_size: Adaptive selection of kernel size (default=3) + gamm: used in kernel_size calc, see above + beta: used in kernel_size calc, see above + act_layer: optional non-linearity after conv, enables conv bias, this is an experiment + gate_layer: gating non-linearity to use + """ + def __init__( + self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid', + rd_ratio=1/8, rd_channels=None, rd_divisor=8, use_mlp=False): + super(EcaModule, self).__init__() + if channels is not None: + t = int(abs(math.log(channels, 2) + beta) / gamma) + kernel_size = max(t if t % 2 else t + 1, 3) + assert kernel_size % 2 == 1 + padding = (kernel_size - 1) // 2 + if use_mlp: + # NOTE 'mlp' mode is a timm experiment, not in paper + assert channels is not None + if rd_channels is None: + rd_channels = make_divisible(channels * rd_ratio, divisor=rd_divisor) + act_layer = act_layer or nn.ReLU + self.conv = nn.Conv1d(1, rd_channels, kernel_size=1, padding=0, bias=True) + self.act = create_act_layer(act_layer) + self.conv2 = nn.Conv1d(rd_channels, 1, kernel_size=kernel_size, padding=padding, bias=True) + else: + self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=padding, bias=False) + self.act = None + self.conv2 = None + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + y = x.mean((2, 3)).view(x.shape[0], 1, -1) # view for 1d conv + y = self.conv(y) + if self.conv2 is not None: + y = self.act(y) + y = self.conv2(y) + y = self.gate(y).view(x.shape[0], -1, 1, 1) + return x * y.expand_as(x) + + +EfficientChannelAttn = EcaModule # alias + + +class CecaModule(nn.Module): + """Constructs a circular ECA module. + + ECA module where the conv uses circular padding rather than zero padding. + Unlike the spatial dimension, the channels do not have inherent ordering nor + locality. Although this module in essence, applies such an assumption, it is unnecessary + to limit the channels on either "edge" from being circularly adapted to each other. + This will fundamentally increase connectivity and possibly increase performance metrics + (accuracy, robustness), without significantly impacting resource metrics + (parameter size, throughput,latency, etc) + + Args: + channels: Number of channels of the input feature map for use in adaptive kernel sizes + for actual calculations according to channel. + gamma, beta: when channel is given parameters of mapping function + refer to original paper https://arxiv.org/pdf/1910.03151.pdf + (default=None. if channel size not given, use k_size given for kernel size.) + kernel_size: Adaptive selection of kernel size (default=3) + gamm: used in kernel_size calc, see above + beta: used in kernel_size calc, see above + act_layer: optional non-linearity after conv, enables conv bias, this is an experiment + gate_layer: gating non-linearity to use + """ + + def __init__(self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid'): + super(CecaModule, self).__init__() + if channels is not None: + t = int(abs(math.log(channels, 2) + beta) / gamma) + kernel_size = max(t if t % 2 else t + 1, 3) + has_act = act_layer is not None + assert kernel_size % 2 == 1 + + # PyTorch circular padding mode is buggy as of pytorch 1.4 + # see https://github.com/pytorch/pytorch/pull/17240 + # implement manual circular padding + self.padding = (kernel_size - 1) // 2 + self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=0, bias=has_act) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + y = x.mean((2, 3)).view(x.shape[0], 1, -1) + # Manually implement circular padding, F.pad does not seemed to be bugged + y = F.pad(y, (self.padding, self.padding), mode='circular') + y = self.conv(y) + y = self.gate(y).view(x.shape[0], -1, 1, 1) + return x * y.expand_as(x) + + +CircularEfficientChannelAttn = CecaModule diff --git a/janus/lib/python3.10/site-packages/timm/layers/evo_norm.py b/janus/lib/python3.10/site-packages/timm/layers/evo_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..ea77620712c80a54d943ef0b920556cbafc1f9f6 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/layers/evo_norm.py @@ -0,0 +1,352 @@ +""" EvoNorm in PyTorch + +Based on `Evolving Normalization-Activation Layers` - https://arxiv.org/abs/2004.02967 +@inproceedings{NEURIPS2020, + author = {Liu, Hanxiao and Brock, Andy and Simonyan, Karen and Le, Quoc}, + booktitle = {Advances in Neural Information Processing Systems}, + editor = {H. Larochelle and M. Ranzato and R. Hadsell and M. F. Balcan and H. Lin}, + pages = {13539--13550}, + publisher = {Curran Associates, Inc.}, + title = {Evolving Normalization-Activation Layers}, + url = {https://proceedings.neurips.cc/paper/2020/file/9d4c03631b8b0c85ae08bf05eda37d0f-Paper.pdf}, + volume = {33}, + year = {2020} +} + +An attempt at getting decent performing EvoNorms running in PyTorch. +While faster than other PyTorch impl, still quite a ways off the built-in BatchNorm +in terms of memory usage and throughput on GPUs. + +I'm testing these modules on TPU w/ PyTorch XLA. Promising start but +currently working around some issues with builtin torch/tensor.var/std. Unlike +GPU, similar train speeds for EvoNormS variants and BatchNorm. + +Hacked together by / Copyright 2020 Ross Wightman +""" +from typing import Sequence, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .create_act import create_act_layer +from .trace_utils import _assert + + +def instance_std(x, eps: float = 1e-5): + std = x.float().var(dim=(2, 3), unbiased=False, keepdim=True).add(eps).sqrt().to(x.dtype) + return std.expand(x.shape) + + +def instance_std_tpu(x, eps: float = 1e-5): + std = manual_var(x, dim=(2, 3)).add(eps).sqrt() + return std.expand(x.shape) +# instance_std = instance_std_tpu + + +def instance_rms(x, eps: float = 1e-5): + rms = x.float().square().mean(dim=(2, 3), keepdim=True).add(eps).sqrt().to(x.dtype) + return rms.expand(x.shape) + + +def manual_var(x, dim: Union[int, Sequence[int]], diff_sqm: bool = False): + xm = x.mean(dim=dim, keepdim=True) + if diff_sqm: + # difference of squared mean and mean squared, faster on TPU can be less stable + var = ((x * x).mean(dim=dim, keepdim=True) - (xm * xm)).clamp(0) + else: + var = ((x - xm) * (x - xm)).mean(dim=dim, keepdim=True) + return var + + +def group_std(x, groups: int = 32, eps: float = 1e-5, flatten: bool = False): + B, C, H, W = x.shape + x_dtype = x.dtype + _assert(C % groups == 0, '') + if flatten: + x = x.reshape(B, groups, -1) # FIXME simpler shape causing TPU / XLA issues + std = x.float().var(dim=2, unbiased=False, keepdim=True).add(eps).sqrt().to(x_dtype) + else: + x = x.reshape(B, groups, C // groups, H, W) + std = x.float().var(dim=(2, 3, 4), unbiased=False, keepdim=True).add(eps).sqrt().to(x_dtype) + return std.expand(x.shape).reshape(B, C, H, W) + + +def group_std_tpu(x, groups: int = 32, eps: float = 1e-5, diff_sqm: bool = False, flatten: bool = False): + # This is a workaround for some stability / odd behaviour of .var and .std + # running on PyTorch XLA w/ TPUs. These manual var impl are producing much better results + B, C, H, W = x.shape + _assert(C % groups == 0, '') + if flatten: + x = x.reshape(B, groups, -1) # FIXME simpler shape causing TPU / XLA issues + var = manual_var(x, dim=-1, diff_sqm=diff_sqm) + else: + x = x.reshape(B, groups, C // groups, H, W) + var = manual_var(x, dim=(2, 3, 4), diff_sqm=diff_sqm) + return var.add(eps).sqrt().expand(x.shape).reshape(B, C, H, W) +#group_std = group_std_tpu # FIXME TPU temporary + + +def group_rms(x, groups: int = 32, eps: float = 1e-5): + B, C, H, W = x.shape + _assert(C % groups == 0, '') + x_dtype = x.dtype + x = x.reshape(B, groups, C // groups, H, W) + rms = x.float().square().mean(dim=(2, 3, 4), keepdim=True).add(eps).sqrt_().to(x_dtype) + return rms.expand(x.shape).reshape(B, C, H, W) + + +class EvoNorm2dB0(nn.Module): + def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-3, **_): + super().__init__() + self.apply_act = apply_act # apply activation (non-linearity) + self.momentum = momentum + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.v = nn.Parameter(torch.ones(num_features)) if apply_act else None + self.register_buffer('running_var', torch.ones(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + if self.v is not None: + nn.init.ones_(self.v) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + if self.v is not None: + if self.training: + var = x.float().var(dim=(0, 2, 3), unbiased=False) + # var = manual_var(x, dim=(0, 2, 3)).squeeze() + n = x.numel() / x.shape[1] + self.running_var.copy_( + self.running_var * (1 - self.momentum) + + var.detach() * self.momentum * (n / (n - 1))) + else: + var = self.running_var + left = var.add(self.eps).sqrt_().to(x_dtype).view(v_shape).expand_as(x) + v = self.v.to(x_dtype).view(v_shape) + right = x * v + instance_std(x, self.eps) + x = x / left.max(right) + return x * self.weight.to(x_dtype).view(v_shape) + self.bias.to(x_dtype).view(v_shape) + + +class EvoNorm2dB1(nn.Module): + def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-5, **_): + super().__init__() + self.apply_act = apply_act # apply activation (non-linearity) + self.momentum = momentum + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.register_buffer('running_var', torch.ones(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + if self.apply_act: + if self.training: + var = x.float().var(dim=(0, 2, 3), unbiased=False) + n = x.numel() / x.shape[1] + self.running_var.copy_( + self.running_var * (1 - self.momentum) + + var.detach().to(self.running_var.dtype) * self.momentum * (n / (n - 1))) + else: + var = self.running_var + var = var.to(x_dtype).view(v_shape) + left = var.add(self.eps).sqrt_() + right = (x + 1) * instance_rms(x, self.eps) + x = x / left.max(right) + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) + + +class EvoNorm2dB2(nn.Module): + def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-5, **_): + super().__init__() + self.apply_act = apply_act # apply activation (non-linearity) + self.momentum = momentum + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.register_buffer('running_var', torch.ones(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + if self.apply_act: + if self.training: + var = x.float().var(dim=(0, 2, 3), unbiased=False) + n = x.numel() / x.shape[1] + self.running_var.copy_( + self.running_var * (1 - self.momentum) + + var.detach().to(self.running_var.dtype) * self.momentum * (n / (n - 1))) + else: + var = self.running_var + var = var.to(x_dtype).view(v_shape) + left = var.add(self.eps).sqrt_() + right = instance_rms(x, self.eps) - x + x = x / left.max(right) + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) + + +class EvoNorm2dS0(nn.Module): + def __init__(self, num_features, groups=32, group_size=None, apply_act=True, eps=1e-5, **_): + super().__init__() + self.apply_act = apply_act # apply activation (non-linearity) + if group_size: + assert num_features % group_size == 0 + self.groups = num_features // group_size + else: + self.groups = groups + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.v = nn.Parameter(torch.ones(num_features)) if apply_act else None + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + if self.v is not None: + nn.init.ones_(self.v) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + if self.v is not None: + v = self.v.view(v_shape).to(x_dtype) + x = x * (x * v).sigmoid() / group_std(x, self.groups, self.eps) + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) + + +class EvoNorm2dS0a(EvoNorm2dS0): + def __init__(self, num_features, groups=32, group_size=None, apply_act=True, eps=1e-3, **_): + super().__init__( + num_features, groups=groups, group_size=group_size, apply_act=apply_act, eps=eps) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + d = group_std(x, self.groups, self.eps) + if self.v is not None: + v = self.v.view(v_shape).to(x_dtype) + x = x * (x * v).sigmoid() + x = x / d + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) + + +class EvoNorm2dS1(nn.Module): + def __init__( + self, num_features, groups=32, group_size=None, + apply_act=True, act_layer=None, eps=1e-5, **_): + super().__init__() + act_layer = act_layer or nn.SiLU + self.apply_act = apply_act # apply activation (non-linearity) + if act_layer is not None and apply_act: + self.act = create_act_layer(act_layer) + else: + self.act = nn.Identity() + if group_size: + assert num_features % group_size == 0 + self.groups = num_features // group_size + else: + self.groups = groups + self.eps = eps + self.pre_act_norm = False + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + if self.apply_act: + x = self.act(x) / group_std(x, self.groups, self.eps) + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) + + +class EvoNorm2dS1a(EvoNorm2dS1): + def __init__( + self, num_features, groups=32, group_size=None, + apply_act=True, act_layer=None, eps=1e-3, **_): + super().__init__( + num_features, groups=groups, group_size=group_size, apply_act=apply_act, act_layer=act_layer, eps=eps) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + x = self.act(x) / group_std(x, self.groups, self.eps) + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) + + +class EvoNorm2dS2(nn.Module): + def __init__( + self, num_features, groups=32, group_size=None, + apply_act=True, act_layer=None, eps=1e-5, **_): + super().__init__() + act_layer = act_layer or nn.SiLU + self.apply_act = apply_act # apply activation (non-linearity) + if act_layer is not None and apply_act: + self.act = create_act_layer(act_layer) + else: + self.act = nn.Identity() + if group_size: + assert num_features % group_size == 0 + self.groups = num_features // group_size + else: + self.groups = groups + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + if self.apply_act: + x = self.act(x) / group_rms(x, self.groups, self.eps) + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) + + +class EvoNorm2dS2a(EvoNorm2dS2): + def __init__( + self, num_features, groups=32, group_size=None, + apply_act=True, act_layer=None, eps=1e-3, **_): + super().__init__( + num_features, groups=groups, group_size=group_size, apply_act=apply_act, act_layer=act_layer, eps=eps) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + x = self.act(x) / group_rms(x, self.groups, self.eps) + return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype) diff --git a/janus/lib/python3.10/site-packages/timm/layers/filter_response_norm.py b/janus/lib/python3.10/site-packages/timm/layers/filter_response_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..a66a1cd493e4cecec27419925a6a2045bb05f25f --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/layers/filter_response_norm.py @@ -0,0 +1,68 @@ +""" Filter Response Norm in PyTorch + +Based on `Filter Response Normalization Layer` - https://arxiv.org/abs/1911.09737 + +Hacked together by / Copyright 2021 Ross Wightman +""" +import torch +import torch.nn as nn + +from .create_act import create_act_layer +from .trace_utils import _assert + + +def inv_instance_rms(x, eps: float = 1e-5): + rms = x.square().float().mean(dim=(2, 3), keepdim=True).add(eps).rsqrt().to(x.dtype) + return rms.expand(x.shape) + + +class FilterResponseNormTlu2d(nn.Module): + def __init__(self, num_features, apply_act=True, eps=1e-5, rms=True, **_): + super(FilterResponseNormTlu2d, self).__init__() + self.apply_act = apply_act # apply activation (non-linearity) + self.rms = rms + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.tau = nn.Parameter(torch.zeros(num_features)) if apply_act else None + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + if self.tau is not None: + nn.init.zeros_(self.tau) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + x = x * inv_instance_rms(x, self.eps) + x = x * self.weight.view(v_shape).to(dtype=x_dtype) + self.bias.view(v_shape).to(dtype=x_dtype) + return torch.maximum(x, self.tau.reshape(v_shape).to(dtype=x_dtype)) if self.tau is not None else x + + +class FilterResponseNormAct2d(nn.Module): + def __init__(self, num_features, apply_act=True, act_layer=nn.ReLU, inplace=None, rms=True, eps=1e-5, **_): + super(FilterResponseNormAct2d, self).__init__() + if act_layer is not None and apply_act: + self.act = create_act_layer(act_layer, inplace=inplace) + else: + self.act = nn.Identity() + self.rms = rms + self.eps = eps + self.weight = nn.Parameter(torch.ones(num_features)) + self.bias = nn.Parameter(torch.zeros(num_features)) + self.reset_parameters() + + def reset_parameters(self): + nn.init.ones_(self.weight) + nn.init.zeros_(self.bias) + + def forward(self, x): + _assert(x.dim() == 4, 'expected 4D input') + x_dtype = x.dtype + v_shape = (1, -1, 1, 1) + x = x * inv_instance_rms(x, self.eps) + x = x * self.weight.view(v_shape).to(dtype=x_dtype) + self.bias.view(v_shape).to(dtype=x_dtype) + return self.act(x) diff --git a/janus/lib/python3.10/site-packages/timm/layers/format.py b/janus/lib/python3.10/site-packages/timm/layers/format.py new file mode 100644 index 0000000000000000000000000000000000000000..7eadc1af832aa593451fd32f9697b0e498fcc6de --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/layers/format.py @@ -0,0 +1,58 @@ +from enum import Enum +from typing import Union + +import torch + + +class Format(str, Enum): + NCHW = 'NCHW' + NHWC = 'NHWC' + NCL = 'NCL' + NLC = 'NLC' + + +FormatT = Union[str, Format] + + +def get_spatial_dim(fmt: FormatT): + fmt = Format(fmt) + if fmt is Format.NLC: + dim = (1,) + elif fmt is Format.NCL: + dim = (2,) + elif fmt is Format.NHWC: + dim = (1, 2) + else: + dim = (2, 3) + return dim + + +def get_channel_dim(fmt: FormatT): + fmt = Format(fmt) + if fmt is Format.NHWC: + dim = 3 + elif fmt is Format.NLC: + dim = 2 + else: + dim = 1 + return dim + + +def nchw_to(x: torch.Tensor, fmt: Format): + if fmt == Format.NHWC: + x = x.permute(0, 2, 3, 1) + elif fmt == Format.NLC: + x = x.flatten(2).transpose(1, 2) + elif fmt == Format.NCL: + x = x.flatten(2) + return x + + +def nhwc_to(x: torch.Tensor, fmt: Format): + if fmt == Format.NCHW: + x = x.permute(0, 3, 1, 2) + elif fmt == Format.NLC: + x = x.flatten(1, 2) + elif fmt == Format.NCL: + x = x.flatten(1, 2).transpose(1, 2) + return x diff --git a/janus/lib/python3.10/site-packages/timm/layers/global_context.py b/janus/lib/python3.10/site-packages/timm/layers/global_context.py new file mode 100644 index 0000000000000000000000000000000000000000..de7fb5c15f08a5c2fe42cb7c174fff92d6b0d3bf --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/layers/global_context.py @@ -0,0 +1,67 @@ +""" Global Context Attention Block + +Paper: `GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond` + - https://arxiv.org/abs/1904.11492 + +Official code consulted as reference: https://github.com/xvjiarui/GCNet + +Hacked together by / Copyright 2021 Ross Wightman +""" +from torch import nn as nn +import torch.nn.functional as F + +from .create_act import create_act_layer, get_act_layer +from .helpers import make_divisible +from .mlp import ConvMlp +from .norm import LayerNorm2d + + +class GlobalContext(nn.Module): + + def __init__(self, channels, use_attn=True, fuse_add=False, fuse_scale=True, init_last_zero=False, + rd_ratio=1./8, rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid'): + super(GlobalContext, self).__init__() + act_layer = get_act_layer(act_layer) + + self.conv_attn = nn.Conv2d(channels, 1, kernel_size=1, bias=True) if use_attn else None + + if rd_channels is None: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + if fuse_add: + self.mlp_add = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d) + else: + self.mlp_add = None + if fuse_scale: + self.mlp_scale = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d) + else: + self.mlp_scale = None + + self.gate = create_act_layer(gate_layer) + self.init_last_zero = init_last_zero + self.reset_parameters() + + def reset_parameters(self): + if self.conv_attn is not None: + nn.init.kaiming_normal_(self.conv_attn.weight, mode='fan_in', nonlinearity='relu') + if self.mlp_add is not None: + nn.init.zeros_(self.mlp_add.fc2.weight) + + def forward(self, x): + B, C, H, W = x.shape + + if self.conv_attn is not None: + attn = self.conv_attn(x).reshape(B, 1, H * W) # (B, 1, H * W) + attn = F.softmax(attn, dim=-1).unsqueeze(3) # (B, 1, H * W, 1) + context = x.reshape(B, C, H * W).unsqueeze(1) @ attn + context = context.view(B, C, 1, 1) + else: + context = x.mean(dim=(2, 3), keepdim=True) + + if self.mlp_scale is not None: + mlp_x = self.mlp_scale(context) + x = x * self.gate(mlp_x) + if self.mlp_add is not None: + mlp_x = self.mlp_add(context) + x = x + mlp_x + + return x diff --git a/janus/lib/python3.10/site-packages/timm/layers/grid.py b/janus/lib/python3.10/site-packages/timm/layers/grid.py new file mode 100644 index 0000000000000000000000000000000000000000..f760d761fd8188fe9d979027c988d6ce8ec90169 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/layers/grid.py @@ -0,0 +1,49 @@ +from typing import Tuple + +import torch + + +def ndgrid(*tensors) -> Tuple[torch.Tensor, ...]: + """generate N-D grid in dimension order. + + The ndgrid function is like meshgrid except that the order of the first two input arguments are switched. + + That is, the statement + [X1,X2,X3] = ndgrid(x1,x2,x3) + + produces the same result as + + [X2,X1,X3] = meshgrid(x2,x1,x3) + + This naming is based on MATLAB, the purpose is to avoid confusion due to torch's change to make + torch.meshgrid behaviour move from matching ndgrid ('ij') indexing to numpy meshgrid defaults of ('xy'). + + """ + try: + return torch.meshgrid(*tensors, indexing='ij') + except TypeError: + # old PyTorch < 1.10 will follow this path as it does not have indexing arg, + # the old behaviour of meshgrid was 'ij' + return torch.meshgrid(*tensors) + + +def meshgrid(*tensors) -> Tuple[torch.Tensor, ...]: + """generate N-D grid in spatial dim order. + + The meshgrid function is similar to ndgrid except that the order of the + first two input and output arguments is switched. + + That is, the statement + + [X,Y,Z] = meshgrid(x,y,z) + produces the same result as + + [Y,X,Z] = ndgrid(y,x,z) + Because of this, meshgrid is better suited to problems in two- or three-dimensional Cartesian space, + while ndgrid is better suited to multidimensional problems that aren't spatially based. + """ + + # NOTE: this will throw in PyTorch < 1.10 as meshgrid did not support indexing arg or have + # capability of generating grid in xy order before then. + return torch.meshgrid(*tensors, indexing='xy') + diff --git a/janus/lib/python3.10/site-packages/timm/layers/grn.py b/janus/lib/python3.10/site-packages/timm/layers/grn.py new file mode 100644 index 0000000000000000000000000000000000000000..ae71e013fc97bbdb4bcfcc522b9a9b36920b4efa --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/layers/grn.py @@ -0,0 +1,39 @@ +""" Global Response Normalization Module + +Based on the GRN layer presented in +`ConvNeXt-V2 - Co-designing and Scaling ConvNets with Masked Autoencoders` - https://arxiv.org/abs/2301.00808 + +This implementation +* works for both NCHW and NHWC tensor layouts +* uses affine param names matching existing torch norm layers +* slightly improves eager mode performance via fused addcmul + +Hacked together by / Copyright 2023 Ross Wightman +""" + +import torch +from torch import nn as nn + + +class GlobalResponseNorm(nn.Module): + """ Global Response Normalization layer + """ + def __init__(self, dim, eps=1e-6, channels_last=True): + super().__init__() + self.eps = eps + if channels_last: + self.spatial_dim = (1, 2) + self.channel_dim = -1 + self.wb_shape = (1, 1, 1, -1) + else: + self.spatial_dim = (2, 3) + self.channel_dim = 1 + self.wb_shape = (1, -1, 1, 1) + + self.weight = nn.Parameter(torch.zeros(dim)) + self.bias = nn.Parameter(torch.zeros(dim)) + + def forward(self, x): + x_g = x.norm(p=2, dim=self.spatial_dim, keepdim=True) + x_n = x_g / (x_g.mean(dim=self.channel_dim, keepdim=True) + self.eps) + return x + torch.addcmul(self.bias.view(self.wb_shape), self.weight.view(self.wb_shape), x * x_n) diff --git a/janus/lib/python3.10/site-packages/timm/layers/halo_attn.py b/janus/lib/python3.10/site-packages/timm/layers/halo_attn.py new file mode 100644 index 0000000000000000000000000000000000000000..f2ac64f85e08a24646434fc0a995afa0fd9b9ee7 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/layers/halo_attn.py @@ -0,0 +1,233 @@ +""" Halo Self Attention + +Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones` + - https://arxiv.org/abs/2103.12731 + +@misc{2103.12731, +Author = {Ashish Vaswani and Prajit Ramachandran and Aravind Srinivas and Niki Parmar and Blake Hechtman and + Jonathon Shlens}, +Title = {Scaling Local Self-Attention for Parameter Efficient Visual Backbones}, +Year = {2021}, +} + +Status: +This impl is a WIP, there is no official ref impl and some details in paper weren't clear to me. +The attention mechanism works but it's slow as implemented. + +Hacked together by / Copyright 2021 Ross Wightman +""" +from typing import List + +import torch +from torch import nn +import torch.nn.functional as F + +from .helpers import make_divisible +from .weight_init import trunc_normal_ +from .trace_utils import _assert + + +def rel_logits_1d(q, rel_k, permute_mask: List[int]): + """ Compute relative logits along one dimension + + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + + Args: + q: (batch, height, width, dim) + rel_k: (2 * window - 1, dim) + permute_mask: permute output dim according to this + """ + B, H, W, dim = q.shape + rel_size = rel_k.shape[0] + win_size = (rel_size + 1) // 2 + + x = (q @ rel_k.transpose(-1, -2)) + x = x.reshape(-1, W, rel_size) + + # pad to shift from relative to absolute indexing + x_pad = F.pad(x, [0, 1]).flatten(1) + x_pad = F.pad(x_pad, [0, rel_size - W]) + + # reshape and slice out the padded elements + x_pad = x_pad.reshape(-1, W + 1, rel_size) + x = x_pad[:, :W, win_size - 1:] + + # reshape and tile + x = x.reshape(B, H, 1, W, win_size).expand(-1, -1, win_size, -1, -1) + return x.permute(permute_mask) + + +class PosEmbedRel(nn.Module): + """ Relative Position Embedding + As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 + Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 + + """ + def __init__(self, block_size, win_size, dim_head, scale): + """ + Args: + block_size (int): block size + win_size (int): neighbourhood window size + dim_head (int): attention head dim + scale (float): scale factor (for init) + """ + super().__init__() + self.block_size = block_size + self.dim_head = dim_head + self.height_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale) + self.width_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale) + + def forward(self, q): + B, BB, HW, _ = q.shape + + # relative logits in width dimension. + q = q.reshape(-1, self.block_size, self.block_size, self.dim_head) + rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4)) + + # relative logits in height dimension. + q = q.transpose(1, 2) + rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2)) + + rel_logits = rel_logits_h + rel_logits_w + rel_logits = rel_logits.reshape(B, BB, HW, -1) + return rel_logits + + +class HaloAttn(nn.Module): + """ Halo Attention + + Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones` + - https://arxiv.org/abs/2103.12731 + + The internal dimensions of the attention module are controlled by the interaction of several arguments. + * the output dimension of the module is specified by dim_out, which falls back to input dim if not set + * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim + * the query and key (qk) dimensions are determined by + * num_heads * dim_head if dim_head is not None + * num_heads * (dim_out * attn_ratio // num_heads) if dim_head is None + * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not used + + Args: + dim (int): input dimension to the module + dim_out (int): output dimension of the module, same as dim if not set + feat_size (Tuple[int, int]): size of input feature_map (not used, for arg compat with bottle/lambda) + stride: output stride of the module, query downscaled if > 1 (default: 1). + num_heads: parallel attention heads (default: 8). + dim_head: dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set + block_size (int): size of blocks. (default: 8) + halo_size (int): size of halo overlap. (default: 3) + qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0) + qkv_bias (bool) : add bias to q, k, and v projections + avg_down (bool): use average pool downsample instead of strided query blocks + scale_pos_embed (bool): scale the position embedding as well as Q @ K + """ + def __init__( + self, dim, dim_out=None, feat_size=None, stride=1, num_heads=8, dim_head=None, block_size=8, halo_size=3, + qk_ratio=1.0, qkv_bias=False, avg_down=False, scale_pos_embed=False): + super().__init__() + dim_out = dim_out or dim + assert dim_out % num_heads == 0 + assert stride in (1, 2) + self.num_heads = num_heads + self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads + self.dim_head_v = dim_out // self.num_heads + self.dim_out_qk = num_heads * self.dim_head_qk + self.dim_out_v = num_heads * self.dim_head_v + self.scale = self.dim_head_qk ** -0.5 + self.scale_pos_embed = scale_pos_embed + self.block_size = self.block_size_ds = block_size + self.halo_size = halo_size + self.win_size = block_size + halo_size * 2 # neighbourhood window size + self.block_stride = 1 + use_avg_pool = False + if stride > 1: + use_avg_pool = avg_down or block_size % stride != 0 + self.block_stride = 1 if use_avg_pool else stride + self.block_size_ds = self.block_size // self.block_stride + + # FIXME not clear if this stride behaviour is what the paper intended + # Also, the paper mentions using a 3D conv for dealing with the blocking/gather, and leaving + # data in unfolded block form. I haven't wrapped my head around how that'd look. + self.q = nn.Conv2d(dim, self.dim_out_qk, 1, stride=self.block_stride, bias=qkv_bias) + self.kv = nn.Conv2d(dim, self.dim_out_qk + self.dim_out_v, 1, bias=qkv_bias) + + self.pos_embed = PosEmbedRel( + block_size=self.block_size_ds, win_size=self.win_size, dim_head=self.dim_head_qk, scale=self.scale) + + self.pool = nn.AvgPool2d(2, 2) if use_avg_pool else nn.Identity() + + self.reset_parameters() + + def reset_parameters(self): + std = self.q.weight.shape[1] ** -0.5 # fan-in + trunc_normal_(self.q.weight, std=std) + trunc_normal_(self.kv.weight, std=std) + trunc_normal_(self.pos_embed.height_rel, std=self.scale) + trunc_normal_(self.pos_embed.width_rel, std=self.scale) + + def forward(self, x): + B, C, H, W = x.shape + _assert(H % self.block_size == 0, '') + _assert(W % self.block_size == 0, '') + num_h_blocks = H // self.block_size + num_w_blocks = W // self.block_size + num_blocks = num_h_blocks * num_w_blocks + + q = self.q(x) + # unfold + q = q.reshape( + -1, self.dim_head_qk, + num_h_blocks, self.block_size_ds, num_w_blocks, self.block_size_ds).permute(0, 1, 3, 5, 2, 4) + # B, num_heads * dim_head * block_size ** 2, num_blocks + q = q.reshape(B * self.num_heads, self.dim_head_qk, -1, num_blocks).transpose(1, 3) + # B * num_heads, num_blocks, block_size ** 2, dim_head + + kv = self.kv(x) + # Generate overlapping windows for kv. This approach is good for GPU and CPU. However, unfold() is not + # lowered for PyTorch XLA so it will be very slow. See code at bottom of file for XLA friendly approach. + # FIXME figure out how to switch impl between this and conv2d if XLA being used. + kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size]) + kv = kv.unfold(2, self.win_size, self.block_size).unfold(3, self.win_size, self.block_size).reshape( + B * self.num_heads, self.dim_head_qk + self.dim_head_v, num_blocks, -1).permute(0, 2, 3, 1) + k, v = torch.split(kv, [self.dim_head_qk, self.dim_head_v], dim=-1) + # B * num_heads, num_blocks, win_size ** 2, dim_head_qk or dim_head_v + + if self.scale_pos_embed: + attn = (q @ k.transpose(-1, -2) + self.pos_embed(q)) * self.scale + else: + attn = (q @ k.transpose(-1, -2)) * self.scale + self.pos_embed(q) + # B * num_heads, num_blocks, block_size ** 2, win_size ** 2 + attn = attn.softmax(dim=-1) + + out = (attn @ v).transpose(1, 3) # B * num_heads, dim_head_v, block_size ** 2, num_blocks + # fold + out = out.reshape(-1, self.block_size_ds, self.block_size_ds, num_h_blocks, num_w_blocks) + out = out.permute(0, 3, 1, 4, 2).contiguous().view( + B, self.dim_out_v, H // self.block_stride, W // self.block_stride) + # B, dim_out, H // block_stride, W // block_stride + out = self.pool(out) + return out + + +""" Three alternatives for overlapping windows. + +`.unfold().unfold()` is same speed as stride tricks with similar clarity as F.unfold() + + if is_xla: + # This code achieves haloing on PyTorch XLA with reasonable runtime trade-off, it is + # EXTREMELY slow for backward on a GPU though so I need a way of selecting based on environment. + WW = self.win_size ** 2 + pw = torch.eye(WW, dtype=x.dtype, device=x.device).reshape(WW, 1, self.win_size, self.win_size) + kv = F.conv2d(kv.reshape(-1, 1, H, W), pw, stride=self.block_size, padding=self.halo_size) + elif self.stride_tricks: + kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size]).contiguous() + kv = kv.as_strided(( + B, self.dim_out_qk + self.dim_out_v, self.win_size, self.win_size, num_h_blocks, num_w_blocks), + stride=(kv.stride(0), kv.stride(1), kv.shape[-1], 1, self.block_size * kv.shape[-1], self.block_size)) + else: + kv = F.unfold(kv, kernel_size=self.win_size, stride=self.block_size, padding=self.halo_size) + + kv = kv.reshape( + B * self.num_heads, self.dim_head_qk + self.dim_head_v, -1, num_blocks).transpose(1, 3) +""" diff --git a/janus/lib/python3.10/site-packages/timm/layers/helpers.py b/janus/lib/python3.10/site-packages/timm/layers/helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..b003f48d845761fbed4230b3af4092ae48bfe6b9 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/layers/helpers.py @@ -0,0 +1,43 @@ +""" Layer/Module Helpers + +Hacked together by / Copyright 2020 Ross Wightman +""" +from itertools import repeat +import collections.abc + + +# From PyTorch internals +def _ntuple(n): + def parse(x): + if isinstance(x, collections.abc.Iterable) and not isinstance(x, str): + return tuple(x) + return tuple(repeat(x, n)) + return parse + + +to_1tuple = _ntuple(1) +to_2tuple = _ntuple(2) +to_3tuple = _ntuple(3) +to_4tuple = _ntuple(4) +to_ntuple = _ntuple + + +def make_divisible(v, divisor=8, min_value=None, round_limit=.9): + min_value = min_value or divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < round_limit * v: + new_v += divisor + return new_v + + +def extend_tuple(x, n): + # pads a tuple to specified n by padding with last value + if not isinstance(x, (tuple, list)): + x = (x,) + else: + x = tuple(x) + pad_n = n - len(x) + if pad_n <= 0: + return x[:n] + return x + (x[-1],) * pad_n diff --git a/janus/lib/python3.10/site-packages/timm/layers/interpolate.py b/janus/lib/python3.10/site-packages/timm/layers/interpolate.py new file mode 100644 index 0000000000000000000000000000000000000000..adba9342ec03a9fd9ad3186f73133325892ad1d9 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/layers/interpolate.py @@ -0,0 +1,68 @@ +""" Interpolation helpers for timm layers + +RegularGridInterpolator from https://github.com/sbarratt/torch_interpolations +Copyright Shane Barratt, Apache 2.0 license +""" +import torch +from itertools import product + + +class RegularGridInterpolator: + """ Interpolate data defined on a rectilinear grid with even or uneven spacing. + Produces similar results to scipy RegularGridInterpolator or interp2d + in 'linear' mode. + + Taken from https://github.com/sbarratt/torch_interpolations + """ + + def __init__(self, points, values): + self.points = points + self.values = values + + assert isinstance(self.points, tuple) or isinstance(self.points, list) + assert isinstance(self.values, torch.Tensor) + + self.ms = list(self.values.shape) + self.n = len(self.points) + + assert len(self.ms) == self.n + + for i, p in enumerate(self.points): + assert isinstance(p, torch.Tensor) + assert p.shape[0] == self.values.shape[i] + + def __call__(self, points_to_interp): + assert self.points is not None + assert self.values is not None + + assert len(points_to_interp) == len(self.points) + K = points_to_interp[0].shape[0] + for x in points_to_interp: + assert x.shape[0] == K + + idxs = [] + dists = [] + overalls = [] + for p, x in zip(self.points, points_to_interp): + idx_right = torch.bucketize(x, p) + idx_right[idx_right >= p.shape[0]] = p.shape[0] - 1 + idx_left = (idx_right - 1).clamp(0, p.shape[0] - 1) + dist_left = x - p[idx_left] + dist_right = p[idx_right] - x + dist_left[dist_left < 0] = 0. + dist_right[dist_right < 0] = 0. + both_zero = (dist_left == 0) & (dist_right == 0) + dist_left[both_zero] = dist_right[both_zero] = 1. + + idxs.append((idx_left, idx_right)) + dists.append((dist_left, dist_right)) + overalls.append(dist_left + dist_right) + + numerator = 0. + for indexer in product([0, 1], repeat=self.n): + as_s = [idx[onoff] for onoff, idx in zip(indexer, idxs)] + bs_s = [dist[1 - onoff] for onoff, dist in zip(indexer, dists)] + numerator += self.values[as_s] * \ + torch.prod(torch.stack(bs_s), dim=0) + denominator = torch.prod(torch.stack(overalls), dim=0) + return numerator / denominator diff --git a/janus/lib/python3.10/site-packages/timm/layers/layer_scale.py b/janus/lib/python3.10/site-packages/timm/layers/layer_scale.py new file mode 100644 index 0000000000000000000000000000000000000000..08566b2bd1e16697c3806b0e03ae568179291889 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/layers/layer_scale.py @@ -0,0 +1,38 @@ +import torch +from torch import nn + + +class LayerScale(nn.Module): + """ LayerScale on tensors with channels in last-dim. + """ + def __init__( + self, + dim: int, + init_values: float = 1e-5, + inplace: bool = False, + ) -> None: + super().__init__() + self.inplace = inplace + self.gamma = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return x.mul_(self.gamma) if self.inplace else x * self.gamma + + +class LayerScale2d(nn.Module): + """ LayerScale for tensors with torch 2D NCHW layout. + """ + def __init__( + self, + dim: int, + init_values: float = 1e-5, + inplace: bool = False, + ): + super().__init__() + self.inplace = inplace + self.gamma = nn.Parameter(init_values * torch.ones(dim)) + + def forward(self, x): + gamma = self.gamma.view(1, -1, 1, 1) + return x.mul_(gamma) if self.inplace else x * gamma + diff --git a/janus/lib/python3.10/site-packages/timm/layers/mixed_conv2d.py b/janus/lib/python3.10/site-packages/timm/layers/mixed_conv2d.py new file mode 100644 index 0000000000000000000000000000000000000000..fa0ce565c0a9d348d4e68165960fa77fcf7f70d7 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/layers/mixed_conv2d.py @@ -0,0 +1,51 @@ +""" PyTorch Mixed Convolution + +Paper: MixConv: Mixed Depthwise Convolutional Kernels (https://arxiv.org/abs/1907.09595) + +Hacked together by / Copyright 2020 Ross Wightman +""" + +import torch +from torch import nn as nn + +from .conv2d_same import create_conv2d_pad + + +def _split_channels(num_chan, num_groups): + split = [num_chan // num_groups for _ in range(num_groups)] + split[0] += num_chan - sum(split) + return split + + +class MixedConv2d(nn.ModuleDict): + """ Mixed Grouped Convolution + + Based on MDConv and GroupedConv in MixNet impl: + https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mixnet/custom_layers.py + """ + def __init__(self, in_channels, out_channels, kernel_size=3, + stride=1, padding='', dilation=1, depthwise=False, **kwargs): + super(MixedConv2d, self).__init__() + + kernel_size = kernel_size if isinstance(kernel_size, list) else [kernel_size] + num_groups = len(kernel_size) + in_splits = _split_channels(in_channels, num_groups) + out_splits = _split_channels(out_channels, num_groups) + self.in_channels = sum(in_splits) + self.out_channels = sum(out_splits) + for idx, (k, in_ch, out_ch) in enumerate(zip(kernel_size, in_splits, out_splits)): + conv_groups = in_ch if depthwise else 1 + # use add_module to keep key space clean + self.add_module( + str(idx), + create_conv2d_pad( + in_ch, out_ch, k, stride=stride, + padding=padding, dilation=dilation, groups=conv_groups, **kwargs) + ) + self.splits = in_splits + + def forward(self, x): + x_split = torch.split(x, self.splits, 1) + x_out = [c(x_split[i]) for i, c in enumerate(self.values())] + x = torch.cat(x_out, 1) + return x diff --git a/janus/lib/python3.10/site-packages/timm/layers/padding.py b/janus/lib/python3.10/site-packages/timm/layers/padding.py new file mode 100644 index 0000000000000000000000000000000000000000..4b85d747cb1c6ed33dfdbe76d3e0d4fd1191e36d --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/layers/padding.py @@ -0,0 +1,87 @@ +""" Padding Helpers + +Hacked together by / Copyright 2020 Ross Wightman +""" +import math +from typing import List, Tuple, Union + +import torch +import torch.nn.functional as F + +from .helpers import to_2tuple + + +# Calculate symmetric padding for a convolution +def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> Union[int, List[int]]: + if any([isinstance(v, (tuple, list)) for v in [kernel_size, stride, dilation]]): + kernel_size, stride, dilation = to_2tuple(kernel_size), to_2tuple(stride), to_2tuple(dilation) + return [get_padding(*a) for a in zip(kernel_size, stride, dilation)] + padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2 + return padding + + +# Calculate asymmetric TensorFlow-like 'SAME' padding for a convolution +def get_same_padding(x: int, kernel_size: int, stride: int, dilation: int): + if isinstance(x, torch.Tensor): + return torch.clamp(((x / stride).ceil() - 1) * stride + (kernel_size - 1) * dilation + 1 - x, min=0) + else: + return max((math.ceil(x / stride) - 1) * stride + (kernel_size - 1) * dilation + 1 - x, 0) + + +# Can SAME padding for given args be done statically? +def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_): + if any([isinstance(v, (tuple, list)) for v in [kernel_size, stride, dilation]]): + kernel_size, stride, dilation = to_2tuple(kernel_size), to_2tuple(stride), to_2tuple(dilation) + return all([is_static_pad(*a) for a in zip(kernel_size, stride, dilation)]) + return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0 + + +def pad_same_arg( + input_size: List[int], + kernel_size: List[int], + stride: List[int], + dilation: List[int] = (1, 1), +) -> List[int]: + ih, iw = input_size + kh, kw = kernel_size + pad_h = get_same_padding(ih, kh, stride[0], dilation[0]) + pad_w = get_same_padding(iw, kw, stride[1], dilation[1]) + return [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2] + + +# Dynamically pad input x with 'SAME' padding for conv with specified args +def pad_same( + x, + kernel_size: List[int], + stride: List[int], + dilation: List[int] = (1, 1), + value: float = 0, +): + ih, iw = x.size()[-2:] + pad_h = get_same_padding(ih, kernel_size[0], stride[0], dilation[0]) + pad_w = get_same_padding(iw, kernel_size[1], stride[1], dilation[1]) + x = F.pad(x, (pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2), value=value) + return x + + +def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]: + dynamic = False + if isinstance(padding, str): + # for any string padding, the padding will be calculated for you, one of three ways + padding = padding.lower() + if padding == 'same': + # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact + if is_static_pad(kernel_size, **kwargs): + # static case, no extra overhead + padding = get_padding(kernel_size, **kwargs) + else: + # dynamic 'SAME' padding, has runtime/GPU memory overhead + padding = 0 + dynamic = True + elif padding == 'valid': + # 'VALID' padding, same as padding=0 + padding = 0 + else: + # Default to PyTorch style 'same'-ish symmetric padding + padding = get_padding(kernel_size, **kwargs) + return padding, dynamic diff --git a/janus/lib/python3.10/site-packages/timm/layers/patch_embed.py b/janus/lib/python3.10/site-packages/timm/layers/patch_embed.py new file mode 100644 index 0000000000000000000000000000000000000000..c739291b327e38f046107598e03ed49dee027e66 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/layers/patch_embed.py @@ -0,0 +1,307 @@ +""" Image to Patch Embedding using Conv2d + +A convolution based approach to patchifying a 2D image w/ embedding projection. + +Based on code in: + * https://github.com/google-research/vision_transformer + * https://github.com/google-research/big_vision/tree/main/big_vision + +Hacked together by / Copyright 2020 Ross Wightman +""" +import logging +import math +from typing import Callable, List, Optional, Tuple, Union + +import torch +from torch import nn as nn +import torch.nn.functional as F + +from .format import Format, nchw_to +from .helpers import to_2tuple +from .trace_utils import _assert + +_logger = logging.getLogger(__name__) + + +class PatchEmbed(nn.Module): + """ 2D Image to Patch Embedding + """ + output_fmt: Format + dynamic_img_pad: torch.jit.Final[bool] + + def __init__( + self, + img_size: Optional[int] = 224, + patch_size: int = 16, + in_chans: int = 3, + embed_dim: int = 768, + norm_layer: Optional[Callable] = None, + flatten: bool = True, + output_fmt: Optional[str] = None, + bias: bool = True, + strict_img_size: bool = True, + dynamic_img_pad: bool = False, + ): + super().__init__() + self.patch_size = to_2tuple(patch_size) + self.img_size, self.grid_size, self.num_patches = self._init_img_size(img_size) + + if output_fmt is not None: + self.flatten = False + self.output_fmt = Format(output_fmt) + else: + # flatten spatial dim and transpose to channels last, kept for bwd compat + self.flatten = flatten + self.output_fmt = Format.NCHW + self.strict_img_size = strict_img_size + self.dynamic_img_pad = dynamic_img_pad + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size, bias=bias) + self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() + + def _init_img_size(self, img_size: Union[int, Tuple[int, int]]): + assert self.patch_size + if img_size is None: + return None, None, None + img_size = to_2tuple(img_size) + grid_size = tuple([s // p for s, p in zip(img_size, self.patch_size)]) + num_patches = grid_size[0] * grid_size[1] + return img_size, grid_size, num_patches + + def set_input_size( + self, + img_size: Optional[Union[int, Tuple[int, int]]] = None, + patch_size: Optional[Union[int, Tuple[int, int]]] = None, + ): + new_patch_size = None + if patch_size is not None: + new_patch_size = to_2tuple(patch_size) + if new_patch_size is not None and new_patch_size != self.patch_size: + with torch.no_grad(): + new_proj = nn.Conv2d( + self.proj.in_channels, + self.proj.out_channels, + kernel_size=new_patch_size, + stride=new_patch_size, + bias=self.proj.bias is not None, + ) + new_proj.weight.copy_(resample_patch_embed(self.proj.weight, new_patch_size, verbose=True)) + if self.proj.bias is not None: + new_proj.bias.copy_(self.proj.bias) + self.proj = new_proj + self.patch_size = new_patch_size + img_size = img_size or self.img_size + if img_size != self.img_size or new_patch_size is not None: + self.img_size, self.grid_size, self.num_patches = self._init_img_size(img_size) + + def feat_ratio(self, as_scalar=True) -> Union[Tuple[int, int], int]: + if as_scalar: + return max(self.patch_size) + else: + return self.patch_size + + def dynamic_feat_size(self, img_size: Tuple[int, int]) -> Tuple[int, int]: + """ Get grid (feature) size for given image size taking account of dynamic padding. + NOTE: must be torchscript compatible so using fixed tuple indexing + """ + if self.dynamic_img_pad: + return math.ceil(img_size[0] / self.patch_size[0]), math.ceil(img_size[1] / self.patch_size[1]) + else: + return img_size[0] // self.patch_size[0], img_size[1] // self.patch_size[1] + + def forward(self, x): + B, C, H, W = x.shape + if self.img_size is not None: + if self.strict_img_size: + _assert(H == self.img_size[0], f"Input height ({H}) doesn't match model ({self.img_size[0]}).") + _assert(W == self.img_size[1], f"Input width ({W}) doesn't match model ({self.img_size[1]}).") + elif not self.dynamic_img_pad: + _assert( + H % self.patch_size[0] == 0, + f"Input height ({H}) should be divisible by patch size ({self.patch_size[0]})." + ) + _assert( + W % self.patch_size[1] == 0, + f"Input width ({W}) should be divisible by patch size ({self.patch_size[1]})." + ) + if self.dynamic_img_pad: + pad_h = (self.patch_size[0] - H % self.patch_size[0]) % self.patch_size[0] + pad_w = (self.patch_size[1] - W % self.patch_size[1]) % self.patch_size[1] + x = F.pad(x, (0, pad_w, 0, pad_h)) + x = self.proj(x) + if self.flatten: + x = x.flatten(2).transpose(1, 2) # NCHW -> NLC + elif self.output_fmt != Format.NCHW: + x = nchw_to(x, self.output_fmt) + x = self.norm(x) + return x + + +class PatchEmbedWithSize(PatchEmbed): + """ 2D Image to Patch Embedding + """ + output_fmt: Format + + def __init__( + self, + img_size: Optional[int] = 224, + patch_size: int = 16, + in_chans: int = 3, + embed_dim: int = 768, + norm_layer: Optional[Callable] = None, + flatten: bool = True, + output_fmt: Optional[str] = None, + bias: bool = True, + ): + super().__init__( + img_size=img_size, + patch_size=patch_size, + in_chans=in_chans, + embed_dim=embed_dim, + norm_layer=norm_layer, + flatten=flatten, + output_fmt=output_fmt, + bias=bias, + ) + + def forward(self, x) -> Tuple[torch.Tensor, List[int]]: + B, C, H, W = x.shape + if self.img_size is not None: + _assert(H % self.patch_size[0] == 0, f"Input image height ({H}) must be divisible by patch size ({self.patch_size[0]}).") + _assert(W % self.patch_size[1] == 0, f"Input image width ({W}) must be divisible by patch size ({self.patch_size[1]}).") + + x = self.proj(x) + feat_size = x.shape[-2:] + if self.flatten: + x = x.flatten(2).transpose(1, 2) # NCHW -> NLC + elif self.output_fmt != Format.NCHW: + x = nchw_to(x, self.output_fmt) + x = self.norm(x) + return x, feat_size + + +def resample_patch_embed( + patch_embed, + new_size: List[int], + interpolation: str = 'bicubic', + antialias: bool = True, + verbose: bool = False, +): + """Resample the weights of the patch embedding kernel to target resolution. + We resample the patch embedding kernel by approximately inverting the effect + of patch resizing. + + Code based on: + https://github.com/google-research/big_vision/blob/b00544b81f8694488d5f36295aeb7972f3755ffe/big_vision/models/proj/flexi/vit.py + + With this resizing, we can for example load a B/8 filter into a B/16 model + and, on 2x larger input image, the result will match. + + Args: + patch_embed: original parameter to be resized. + new_size (tuple(int, int): target shape (height, width)-only. + interpolation (str): interpolation for resize + antialias (bool): use anti-aliasing filter in resize + verbose (bool): log operation + Returns: + Resized patch embedding kernel. + """ + import numpy as np + try: + from torch import vmap + except ImportError: + from functorch import vmap + + assert len(patch_embed.shape) == 4, "Four dimensions expected" + assert len(new_size) == 2, "New shape should only be hw" + old_size = patch_embed.shape[-2:] + if tuple(old_size) == tuple(new_size): + return patch_embed + + if verbose: + _logger.info(f"Resize patch embedding {patch_embed.shape} to {new_size}, w/ {interpolation} interpolation.") + + def resize(x_np, _new_size): + x_tf = torch.Tensor(x_np)[None, None, ...] + x_upsampled = F.interpolate( + x_tf, size=_new_size, mode=interpolation, antialias=antialias)[0, 0, ...].numpy() + return x_upsampled + + def get_resize_mat(_old_size, _new_size): + mat = [] + for i in range(np.prod(_old_size)): + basis_vec = np.zeros(_old_size) + basis_vec[np.unravel_index(i, _old_size)] = 1. + mat.append(resize(basis_vec, _new_size).reshape(-1)) + return np.stack(mat).T + + resize_mat = get_resize_mat(old_size, new_size) + resize_mat_pinv = torch.tensor(np.linalg.pinv(resize_mat.T), device=patch_embed.device) + + def resample_kernel(kernel): + resampled_kernel = resize_mat_pinv @ kernel.reshape(-1) + return resampled_kernel.reshape(new_size) + + v_resample_kernel = vmap(vmap(resample_kernel, 0, 0), 1, 1) + orig_dtype = patch_embed.dtype + patch_embed = patch_embed.float() + patch_embed = v_resample_kernel(patch_embed) + patch_embed = patch_embed.to(orig_dtype) + return patch_embed + + +# def divs(n, m=None): +# m = m or n // 2 +# if m == 1: +# return [1] +# if n % m == 0: +# return [m] + divs(n, m - 1) +# return divs(n, m - 1) +# +# +# class FlexiPatchEmbed(nn.Module): +# """ 2D Image to Patch Embedding w/ Flexible Patch sizes (FlexiViT) +# FIXME WIP +# """ +# def __init__( +# self, +# img_size=240, +# patch_size=16, +# in_chans=3, +# embed_dim=768, +# base_img_size=240, +# base_patch_size=32, +# norm_layer=None, +# flatten=True, +# bias=True, +# ): +# super().__init__() +# self.img_size = to_2tuple(img_size) +# self.patch_size = to_2tuple(patch_size) +# self.num_patches = 0 +# +# # full range for 240 = (5, 6, 8, 10, 12, 14, 15, 16, 20, 24, 30, 40, 48) +# self.seqhw = (6, 8, 10, 12, 14, 15, 16, 20, 24, 30) +# +# self.base_img_size = to_2tuple(base_img_size) +# self.base_patch_size = to_2tuple(base_patch_size) +# self.base_grid_size = tuple([i // p for i, p in zip(self.base_img_size, self.base_patch_size)]) +# self.base_num_patches = self.base_grid_size[0] * self.base_grid_size[1] +# +# self.flatten = flatten +# self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=bias) +# self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() +# +# def forward(self, x): +# B, C, H, W = x.shape +# +# if self.patch_size == self.base_patch_size: +# weight = self.proj.weight +# else: +# weight = resample_patch_embed(self.proj.weight, self.patch_size) +# patch_size = self.patch_size +# x = F.conv2d(x, weight, bias=self.proj.bias, stride=patch_size) +# if self.flatten: +# x = x.flatten(2).transpose(1, 2) # BCHW -> BNC +# x = self.norm(x) +# return x diff --git a/janus/lib/python3.10/site-packages/timm/layers/pool2d_same.py b/janus/lib/python3.10/site-packages/timm/layers/pool2d_same.py new file mode 100644 index 0000000000000000000000000000000000000000..4c2a1c44713e552be850865ada9623a1c3b1d836 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/layers/pool2d_same.py @@ -0,0 +1,73 @@ +""" AvgPool2d w/ Same Padding + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import List, Tuple, Optional + +from .helpers import to_2tuple +from .padding import pad_same, get_padding_value + + +def avg_pool2d_same(x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0), + ceil_mode: bool = False, count_include_pad: bool = True): + # FIXME how to deal with count_include_pad vs not for external padding? + x = pad_same(x, kernel_size, stride) + return F.avg_pool2d(x, kernel_size, stride, (0, 0), ceil_mode, count_include_pad) + + +class AvgPool2dSame(nn.AvgPool2d): + """ Tensorflow like 'SAME' wrapper for 2D average pooling + """ + def __init__(self, kernel_size: int, stride=None, padding=0, ceil_mode=False, count_include_pad=True): + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + super(AvgPool2dSame, self).__init__(kernel_size, stride, (0, 0), ceil_mode, count_include_pad) + + def forward(self, x): + x = pad_same(x, self.kernel_size, self.stride) + return F.avg_pool2d( + x, self.kernel_size, self.stride, self.padding, self.ceil_mode, self.count_include_pad) + + +def max_pool2d_same( + x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0), + dilation: List[int] = (1, 1), ceil_mode: bool = False): + x = pad_same(x, kernel_size, stride, value=-float('inf')) + return F.max_pool2d(x, kernel_size, stride, (0, 0), dilation, ceil_mode) + + +class MaxPool2dSame(nn.MaxPool2d): + """ Tensorflow like 'SAME' wrapper for 2D max pooling + """ + def __init__(self, kernel_size: int, stride=None, padding=0, dilation=1, ceil_mode=False): + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + dilation = to_2tuple(dilation) + super(MaxPool2dSame, self).__init__(kernel_size, stride, (0, 0), dilation, ceil_mode) + + def forward(self, x): + x = pad_same(x, self.kernel_size, self.stride, value=-float('inf')) + return F.max_pool2d(x, self.kernel_size, self.stride, (0, 0), self.dilation, self.ceil_mode) + + +def create_pool2d(pool_type, kernel_size, stride=None, **kwargs): + stride = stride or kernel_size + padding = kwargs.pop('padding', '') + padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, **kwargs) + if is_dynamic: + if pool_type == 'avg': + return AvgPool2dSame(kernel_size, stride=stride, **kwargs) + elif pool_type == 'max': + return MaxPool2dSame(kernel_size, stride=stride, **kwargs) + else: + assert False, f'Unsupported pool type {pool_type}' + else: + if pool_type == 'avg': + return nn.AvgPool2d(kernel_size, stride=stride, padding=padding, **kwargs) + elif pool_type == 'max': + return nn.MaxPool2d(kernel_size, stride=stride, padding=padding, **kwargs) + else: + assert False, f'Unsupported pool type {pool_type}' diff --git a/janus/lib/python3.10/site-packages/timm/layers/pos_embed_sincos.py b/janus/lib/python3.10/site-packages/timm/layers/pos_embed_sincos.py new file mode 100644 index 0000000000000000000000000000000000000000..5bb31af59b65d06186b51b079cc21b53ff2d2fb9 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/layers/pos_embed_sincos.py @@ -0,0 +1,443 @@ +""" Sin-cos, fourier, rotary position embedding modules and functions + +Hacked together by / Copyright 2022 Ross Wightman +""" +import math +from typing import List, Tuple, Optional, Union + +import torch +from torch import nn as nn + +from .grid import ndgrid +from .trace_utils import _assert + + +def pixel_freq_bands( + num_bands: int, + max_freq: float = 224., + linear_bands: bool = True, + device: Optional[torch.device] = None, +): + if linear_bands: + bands = torch.linspace(1.0, max_freq / 2, num_bands, dtype=torch.float32, device=device) + else: + bands = 2 ** torch.linspace(0, math.log(max_freq, 2) - 1, num_bands, dtype=torch.float32, device=device) + return bands * torch.pi + + +def freq_bands( + num_bands: int, + temperature: float = 10000., + step: int = 2, + device: Optional[torch.device] = None, +) -> torch.Tensor: + exp = torch.arange(0, num_bands, step, dtype=torch.int64, device=device).to(torch.float32) / num_bands + bands = 1. / (temperature ** exp) + return bands + + +def build_sincos2d_pos_embed( + feat_shape: List[int], + dim: int = 64, + temperature: float = 10000., + reverse_coord: bool = False, + interleave_sin_cos: bool = False, + dtype: torch.dtype = torch.float32, + device: Optional[torch.device] = None +) -> torch.Tensor: + """ + + Args: + feat_shape: + dim: + temperature: + reverse_coord: stack grid order W, H instead of H, W + interleave_sin_cos: sin, cos, sin, cos stack instead of sin, sin, cos, cos + dtype: + device: + + Returns: + + """ + assert dim % 4 == 0, 'Embed dimension must be divisible by 4 for sin-cos 2D position embedding' + pos_dim = dim // 4 + bands = freq_bands(pos_dim, temperature=temperature, step=1, device=device) + + if reverse_coord: + feat_shape = feat_shape[::-1] # stack W, H instead of H, W + grid = torch.stack(ndgrid([ + torch.arange(s, device=device, dtype=torch.int64).to(torch.float32) + for s in feat_shape + ])).flatten(1).transpose(0, 1) + pos2 = grid.unsqueeze(-1) * bands.unsqueeze(0) + # FIXME add support for unflattened spatial dim? + + stack_dim = 2 if interleave_sin_cos else 1 # stack sin, cos, sin, cos instead of sin sin cos cos + pos_emb = torch.stack([torch.sin(pos2), torch.cos(pos2)], dim=stack_dim).flatten(1) + return pos_emb.to(dtype=dtype) + + +def build_fourier_pos_embed( + feat_shape: List[int], + bands: Optional[torch.Tensor] = None, + num_bands: int = 64, + max_res: int = 224, + temperature: float = 10000., + linear_bands: bool = False, + include_grid: bool = False, + in_pixels: bool = True, + ref_feat_shape: Optional[List[int]] = None, + dtype: torch.dtype = torch.float32, + device: Optional[torch.device] = None, +) -> List[torch.Tensor]: + """ + + Args: + feat_shape: Feature shape for embedding. + bands: Pre-calculated frequency bands. + num_bands: Number of frequency bands (determines output dim). + max_res: Maximum resolution for pixel based freq. + temperature: Temperature for non-pixel freq. + linear_bands: Linear band spacing for pixel based freq. + include_grid: Include the spatial grid in output. + in_pixels: Output in pixel freq. + ref_feat_shape: Reference feature shape for resize / fine-tune. + dtype: Output dtype. + device: Output device. + + Returns: + + """ + if bands is None: + if in_pixels: + bands = pixel_freq_bands( + num_bands, + float(max_res), + linear_bands=linear_bands, + device=device, + ) + else: + bands = freq_bands( + num_bands, + temperature=temperature, + step=1, + device=device, + ) + else: + if device is None: + device = bands.device + if dtype is None: + dtype = bands.dtype + + if in_pixels: + t = [torch.linspace(-1., 1., steps=s, device=device, dtype=torch.float32) for s in feat_shape] + else: + t = [torch.arange(s, device=device, dtype=torch.int64).to(torch.float32) for s in feat_shape] + + if ref_feat_shape is not None: + # eva's scheme for resizing rope embeddings (ref shape = pretrain) + t = [x / f * r for x, f, r in zip(t, feat_shape, ref_feat_shape)] + + grid = torch.stack(ndgrid(t), dim=-1) + grid = grid.unsqueeze(-1) + pos = grid * bands + + pos_sin, pos_cos = pos.sin().to(dtype=dtype), pos.cos().to(dtype) + out = [grid, pos_sin, pos_cos] if include_grid else [pos_sin, pos_cos] + return out + + +class FourierEmbed(nn.Module): + + def __init__( + self, + max_res: int = 224, + num_bands: int = 64, + concat_grid=True, + keep_spatial=False, + ): + super().__init__() + self.max_res = max_res + self.num_bands = num_bands + self.concat_grid = concat_grid + self.keep_spatial = keep_spatial + self.register_buffer( + 'bands', + pixel_freq_bands(max_res, num_bands), + persistent=False, + ) + + def forward(self, x): + B, C = x.shape[:2] + feat_shape = x.shape[2:] + emb = build_fourier_pos_embed( + feat_shape, + self.bands, + include_grid=self.concat_grid, + dtype=x.dtype, + device=x.device, + ) + emb = torch.cat(emb, dim=-1) + emb = emb.transpose(-1, -2).flatten(len(feat_shape)) + batch_expand = (B,) + (-1,) * (x.ndim - 1) + + # FIXME support nD + if self.keep_spatial: + x = torch.cat([x, emb.unsqueeze(0).expand(batch_expand).permute(0, 3, 1, 2)], dim=1) + else: + x = torch.cat([x.permute(0, 2, 3, 1), emb.unsqueeze(0).expand(batch_expand)], dim=-1) + x = x.reshape(B, feat_shape.numel(), -1) + + return x + + +def rot(x): + return torch.stack([-x[..., 1::2], x[..., ::2]], -1).reshape(x.shape) + + +def apply_rot_embed(x: torch.Tensor, sin_emb, cos_emb): + if sin_emb.ndim == 3: + return x * cos_emb.unsqueeze(1).expand_as(x) + rot(x) * sin_emb.unsqueeze(1).expand_as(x) + return x * cos_emb + rot(x) * sin_emb + + +def apply_rot_embed_list(x: List[torch.Tensor], sin_emb, cos_emb): + if isinstance(x, torch.Tensor): + x = [x] + return [t * cos_emb + rot(t) * sin_emb for t in x] + + +def apply_rot_embed_cat(x: torch.Tensor, emb): + sin_emb, cos_emb = emb.tensor_split(2, -1) + if sin_emb.ndim == 3: + return x * cos_emb.unsqueeze(1).expand_as(x) + rot(x) * sin_emb.unsqueeze(1).expand_as(x) + return x * cos_emb + rot(x) * sin_emb + + +def apply_keep_indices_nlc(x, pos_embed, keep_indices): + pos_embed = pos_embed.unsqueeze(0).expand(x.shape[0], -1, -1) + pos_embed = pos_embed.gather(1, keep_indices.unsqueeze(-1).expand(-1, -1, pos_embed.shape[-1])) + return pos_embed + + +def build_rotary_pos_embed( + feat_shape: List[int], + bands: Optional[torch.Tensor] = None, + dim: int = 64, + max_res: int = 224, + temperature: float = 10000., + linear_bands: bool = False, + in_pixels: bool = True, + ref_feat_shape: Optional[List[int]] = None, + dtype: torch.dtype = torch.float32, + device: Optional[torch.device] = None, +): + """ + + Args: + feat_shape: Spatial shape of the target tensor for embedding. + bands: Optional pre-generated frequency bands + dim: Output dimension of embedding tensor. + max_res: Maximum resolution for pixel mode. + temperature: Temperature (inv freq) for non-pixel mode + linear_bands: Linearly (instead of log) spaced bands for pixel mode + in_pixels: Pixel vs language (inv freq) mode. + dtype: Output dtype. + device: Output device. + + Returns: + + """ + sin_emb, cos_emb = build_fourier_pos_embed( + feat_shape, + bands=bands, + num_bands=dim // 4, + max_res=max_res, + temperature=temperature, + linear_bands=linear_bands, + in_pixels=in_pixels, + ref_feat_shape=ref_feat_shape, + device=device, + dtype=dtype, + ) + num_spatial_dim = 1 + # this would be much nicer as a .numel() call to torch.Size(), but torchscript sucks + for x in feat_shape: + num_spatial_dim *= x + sin_emb = sin_emb.reshape(num_spatial_dim, -1).repeat_interleave(2, -1) + cos_emb = cos_emb.reshape(num_spatial_dim, -1).repeat_interleave(2, -1) + return sin_emb, cos_emb + + +class RotaryEmbedding(nn.Module): + """ Rotary position embedding + + NOTE: This is my initial attempt at impl rotary embedding for spatial use, it has not + been well tested, and will likely change. It will be moved to its own file. + + The following impl/resources were referenced for this impl: + * https://github.com/lucidrains/vit-pytorch/blob/6f3a5fcf0bca1c5ec33a35ef48d97213709df4ba/vit_pytorch/rvt.py + * https://blog.eleuther.ai/rotary-embeddings/ + """ + + def __init__( + self, + dim, + max_res=224, + temperature=10000, + in_pixels=True, + linear_bands: bool = False, + feat_shape: Optional[List[int]] = None, + ref_feat_shape: Optional[List[int]] = None, + ): + super().__init__() + self.dim = dim + self.max_res = max_res + self.temperature = temperature + self.in_pixels = in_pixels + self.feat_shape = feat_shape + self.ref_feat_shape = ref_feat_shape + + if feat_shape is None: + # only cache bands + if in_pixels: + bands = pixel_freq_bands( + dim // 4, + float(max_res), + linear_bands=linear_bands, + ) + else: + bands = freq_bands( + dim // 4, + temperature=temperature, + step=1, + ) + self.register_buffer( + 'bands', + bands, + persistent=False, + ) + self.pos_embed_sin = None + self.pos_embed_cos = None + else: + # cache full sin/cos embeddings if shape provided up front + emb_sin, emb_cos = build_rotary_pos_embed( + feat_shape=feat_shape, + dim=dim, + max_res=max_res, + linear_bands=linear_bands, + in_pixels=in_pixels, + ref_feat_shape=self.ref_feat_shape, + ) + self.bands = None + self.register_buffer( + 'pos_embed_sin', + emb_sin, + persistent=False, + ) + self.register_buffer( + 'pos_embed_cos', + emb_cos, + persistent=False, + ) + + def get_embed(self, shape: Optional[List[int]] = None): + if self.bands is not None: + # rebuild embeddings every call, use if target shape changes + assert shape is not None + return build_rotary_pos_embed( + shape, + self.bands, + in_pixels=self.in_pixels, + ) + else: + return self.pos_embed_sin, self.pos_embed_cos + + def forward(self, x): + # assuming channel-first tensor where spatial dim are >= 2 + sin_emb, cos_emb = self.get_embed(x.shape[2:]) + return apply_rot_embed(x, sin_emb, cos_emb) + + +class RotaryEmbeddingCat(nn.Module): + """ Rotary position embedding w/ concatenatd sin & cos + + The following impl/resources were referenced for this impl: + * https://github.com/lucidrains/vit-pytorch/blob/6f3a5fcf0bca1c5ec33a35ef48d97213709df4ba/vit_pytorch/rvt.py + * https://blog.eleuther.ai/rotary-embeddings/ + """ + + def __init__( + self, + dim, + max_res=224, + temperature=10000, + in_pixels=True, + linear_bands: bool = False, + feat_shape: Optional[List[int]] = None, + ref_feat_shape: Optional[List[int]] = None, + ): + super().__init__() + self.dim = dim + self.max_res = max_res + self.temperature = temperature + self.in_pixels = in_pixels + self.feat_shape = feat_shape + self.ref_feat_shape = ref_feat_shape + + if feat_shape is None: + # only cache bands + if in_pixels: + bands = pixel_freq_bands( + dim // 4, + float(max_res), + linear_bands=linear_bands, + ) + else: + bands = freq_bands( + dim // 4, + temperature=temperature, + step=1, + ) + self.register_buffer( + 'bands', + bands, + persistent=False, + ) + self.pos_embed = None + else: + # cache full sin/cos embeddings if shape provided up front + embeds = build_rotary_pos_embed( + feat_shape=feat_shape, + dim=dim, + max_res=max_res, + linear_bands=linear_bands, + in_pixels=in_pixels, + ref_feat_shape=self.ref_feat_shape, + ) + self.bands = None + self.register_buffer( + 'pos_embed', + torch.cat(embeds, -1), + persistent=False, + ) + + def get_embed(self, shape: Optional[List[int]] = None): + if self.bands is not None and shape is not None: + # rebuild embeddings every call, use if target shape changes + embeds = build_rotary_pos_embed( + shape, + self.bands, + in_pixels=self.in_pixels, + ref_feat_shape=self.ref_feat_shape, + ) + return torch.cat(embeds, -1) + elif self.pos_embed is not None: + return self.pos_embed + else: + assert False, "get_embed() requires pre-computed pos_embed or valid shape w/ pre-computed bands" + + def forward(self, x): + # assuming channel-first tensor where spatial dim are >= 2 + pos_embed = self.get_embed(x.shape[2:]) + return apply_rot_embed_cat(x, pos_embed) diff --git a/janus/lib/python3.10/site-packages/timm/layers/split_batchnorm.py b/janus/lib/python3.10/site-packages/timm/layers/split_batchnorm.py new file mode 100644 index 0000000000000000000000000000000000000000..830781b335161f8d6dd74c9458070bb1fa88a918 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/layers/split_batchnorm.py @@ -0,0 +1,75 @@ +""" Split BatchNorm + +A PyTorch BatchNorm layer that splits input batch into N equal parts and passes each through +a separate BN layer. The first split is passed through the parent BN layers with weight/bias +keys the same as the original BN. All other splits pass through BN sub-layers under the '.aux_bn' +namespace. + +This allows easily removing the auxiliary BN layers after training to efficiently +achieve the 'Auxiliary BatchNorm' as described in the AdvProp Paper, section 4.2, +'Disentangled Learning via An Auxiliary BN' + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +import torch.nn as nn + + +class SplitBatchNorm2d(torch.nn.BatchNorm2d): + + def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, + track_running_stats=True, num_splits=2): + super().__init__(num_features, eps, momentum, affine, track_running_stats) + assert num_splits > 1, 'Should have at least one aux BN layer (num_splits at least 2)' + self.num_splits = num_splits + self.aux_bn = nn.ModuleList([ + nn.BatchNorm2d(num_features, eps, momentum, affine, track_running_stats) for _ in range(num_splits - 1)]) + + def forward(self, input: torch.Tensor): + if self.training: # aux BN only relevant while training + split_size = input.shape[0] // self.num_splits + assert input.shape[0] == split_size * self.num_splits, "batch size must be evenly divisible by num_splits" + split_input = input.split(split_size) + x = [super().forward(split_input[0])] + for i, a in enumerate(self.aux_bn): + x.append(a(split_input[i + 1])) + return torch.cat(x, dim=0) + else: + return super().forward(input) + + +def convert_splitbn_model(module, num_splits=2): + """ + Recursively traverse module and its children to replace all instances of + ``torch.nn.modules.batchnorm._BatchNorm`` with `SplitBatchnorm2d`. + Args: + module (torch.nn.Module): input module + num_splits: number of separate batchnorm layers to split input across + Example:: + >>> # model is an instance of torch.nn.Module + >>> model = timm.models.convert_splitbn_model(model, num_splits=2) + """ + mod = module + if isinstance(module, torch.nn.modules.instancenorm._InstanceNorm): + return module + if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): + mod = SplitBatchNorm2d( + module.num_features, module.eps, module.momentum, module.affine, + module.track_running_stats, num_splits=num_splits) + mod.running_mean = module.running_mean + mod.running_var = module.running_var + mod.num_batches_tracked = module.num_batches_tracked + if module.affine: + mod.weight.data = module.weight.data.clone().detach() + mod.bias.data = module.bias.data.clone().detach() + for aux in mod.aux_bn: + aux.running_mean = module.running_mean.clone() + aux.running_var = module.running_var.clone() + aux.num_batches_tracked = module.num_batches_tracked.clone() + if module.affine: + aux.weight.data = module.weight.data.clone().detach() + aux.bias.data = module.bias.data.clone().detach() + for name, child in module.named_children(): + mod.add_module(name, convert_splitbn_model(child, num_splits=num_splits)) + del module + return mod diff --git a/janus/lib/python3.10/site-packages/timm/layers/squeeze_excite.py b/janus/lib/python3.10/site-packages/timm/layers/squeeze_excite.py new file mode 100644 index 0000000000000000000000000000000000000000..4fe568fe8f39db79b0985874e9305e4ee143995c --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/layers/squeeze_excite.py @@ -0,0 +1,102 @@ +""" Squeeze-and-Excitation Channel Attention + +An SE implementation originally based on PyTorch SE-Net impl. +Has since evolved with additional functionality / configuration. + +Paper: `Squeeze-and-Excitation Networks` - https://arxiv.org/abs/1709.01507 + +Also included is Effective Squeeze-Excitation (ESE). +Paper: `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 + +Hacked together by / Copyright 2021 Ross Wightman +""" +from torch import nn as nn + +from .create_act import create_act_layer +from .helpers import make_divisible + + +class SEModule(nn.Module): + """ SE Module as defined in original SE-Nets with a few additions + Additions include: + * divisor can be specified to keep channels % div == 0 (default: 8) + * reduction channels can be specified directly by arg (if rd_channels is set) + * reduction channels can be specified by float rd_ratio (default: 1/16) + * global max pooling can be added to the squeeze aggregation + * customizable activation, normalization, and gate layer + """ + def __init__( + self, channels, rd_ratio=1. / 16, rd_channels=None, rd_divisor=8, add_maxpool=False, + bias=True, act_layer=nn.ReLU, norm_layer=None, gate_layer='sigmoid'): + super(SEModule, self).__init__() + self.add_maxpool = add_maxpool + if not rd_channels: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + self.fc1 = nn.Conv2d(channels, rd_channels, kernel_size=1, bias=bias) + self.bn = norm_layer(rd_channels) if norm_layer else nn.Identity() + self.act = create_act_layer(act_layer, inplace=True) + self.fc2 = nn.Conv2d(rd_channels, channels, kernel_size=1, bias=bias) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_se = x.mean((2, 3), keepdim=True) + if self.add_maxpool: + # experimental codepath, may remove or change + x_se = 0.5 * x_se + 0.5 * x.amax((2, 3), keepdim=True) + x_se = self.fc1(x_se) + x_se = self.act(self.bn(x_se)) + x_se = self.fc2(x_se) + return x * self.gate(x_se) + + +SqueezeExcite = SEModule # alias + + +class EffectiveSEModule(nn.Module): + """ 'Effective Squeeze-Excitation + From `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667 + """ + def __init__(self, channels, add_maxpool=False, gate_layer='hard_sigmoid', **_): + super(EffectiveSEModule, self).__init__() + self.add_maxpool = add_maxpool + self.fc = nn.Conv2d(channels, channels, kernel_size=1, padding=0) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_se = x.mean((2, 3), keepdim=True) + if self.add_maxpool: + # experimental codepath, may remove or change + x_se = 0.5 * x_se + 0.5 * x.amax((2, 3), keepdim=True) + x_se = self.fc(x_se) + return x * self.gate(x_se) + + +EffectiveSqueezeExcite = EffectiveSEModule # alias + + +class SqueezeExciteCl(nn.Module): + """ SE Module as defined in original SE-Nets with a few additions + Additions include: + * divisor can be specified to keep channels % div == 0 (default: 8) + * reduction channels can be specified directly by arg (if rd_channels is set) + * reduction channels can be specified by float rd_ratio (default: 1/16) + * global max pooling can be added to the squeeze aggregation + * customizable activation, normalization, and gate layer + """ + def __init__( + self, channels, rd_ratio=1. / 16, rd_channels=None, rd_divisor=8, + bias=True, act_layer=nn.ReLU, gate_layer='sigmoid'): + super().__init__() + if not rd_channels: + rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.) + self.fc1 = nn.Linear(channels, rd_channels, bias=bias) + self.act = create_act_layer(act_layer, inplace=True) + self.fc2 = nn.Linear(rd_channels, channels, bias=bias) + self.gate = create_act_layer(gate_layer) + + def forward(self, x): + x_se = x.mean((1, 2), keepdims=True) # FIXME avg dim [1:n-1], don't assume 2D NHWC + x_se = self.fc1(x_se) + x_se = self.act(x_se) + x_se = self.fc2(x_se) + return x * self.gate(x_se) \ No newline at end of file diff --git a/janus/lib/python3.10/site-packages/timm/layers/trace_utils.py b/janus/lib/python3.10/site-packages/timm/layers/trace_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..83970729e628b525d24162f5df37ee5bc253438f --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/layers/trace_utils.py @@ -0,0 +1,13 @@ +try: + from torch import _assert +except ImportError: + def _assert(condition: bool, message: str): + assert condition, message + + +def _float_to_int(x: float) -> int: + """ + Symbolic tracing helper to substitute for inbuilt `int`. + Hint: Inbuilt `int` can't accept an argument of type `Proxy` + """ + return int(x) diff --git a/janus/lib/python3.10/site-packages/timm/optim/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/timm/optim/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23f4c58a1834e67f1942372bdc89cde163fd6dc2 Binary files /dev/null and b/janus/lib/python3.10/site-packages/timm/optim/__pycache__/__init__.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/timm/optim/__pycache__/_param_groups.cpython-310.pyc b/janus/lib/python3.10/site-packages/timm/optim/__pycache__/_param_groups.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..556e97e921adb9c2fbc46442dde3bd3042f558d3 Binary files /dev/null and b/janus/lib/python3.10/site-packages/timm/optim/__pycache__/_param_groups.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/timm/optim/__pycache__/adopt.cpython-310.pyc b/janus/lib/python3.10/site-packages/timm/optim/__pycache__/adopt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e799566525faf5a53ecd98608335e13a450ae314 Binary files /dev/null and b/janus/lib/python3.10/site-packages/timm/optim/__pycache__/adopt.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/timm/optim/__pycache__/lion.cpython-310.pyc b/janus/lib/python3.10/site-packages/timm/optim/__pycache__/lion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec1b81d42131adba86da82f608e6c84c636a085c Binary files /dev/null and b/janus/lib/python3.10/site-packages/timm/optim/__pycache__/lion.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/timm/optim/__pycache__/lookahead.cpython-310.pyc b/janus/lib/python3.10/site-packages/timm/optim/__pycache__/lookahead.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a689319064280a9e3940348d609dbbc15b5be363 Binary files /dev/null and b/janus/lib/python3.10/site-packages/timm/optim/__pycache__/lookahead.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/timm/optim/__pycache__/nadam.cpython-310.pyc b/janus/lib/python3.10/site-packages/timm/optim/__pycache__/nadam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd156fbceb44596be3703002de29bd022fec823c Binary files /dev/null and b/janus/lib/python3.10/site-packages/timm/optim/__pycache__/nadam.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/timm/optim/__pycache__/optim_factory.cpython-310.pyc b/janus/lib/python3.10/site-packages/timm/optim/__pycache__/optim_factory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fa116f6a7accae99d37959cf79a4140edae31bc Binary files /dev/null and b/janus/lib/python3.10/site-packages/timm/optim/__pycache__/optim_factory.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/timm/optim/__pycache__/radam.cpython-310.pyc b/janus/lib/python3.10/site-packages/timm/optim/__pycache__/radam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff3ae436d09d3dc82064562e3edae5ec7c3a466c Binary files /dev/null and b/janus/lib/python3.10/site-packages/timm/optim/__pycache__/radam.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/timm/optim/__pycache__/sgdp.cpython-310.pyc b/janus/lib/python3.10/site-packages/timm/optim/__pycache__/sgdp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3d29d0a05c988136218030f377f79df6b837ca7 Binary files /dev/null and b/janus/lib/python3.10/site-packages/timm/optim/__pycache__/sgdp.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/timm/optim/__pycache__/sgdw.cpython-310.pyc b/janus/lib/python3.10/site-packages/timm/optim/__pycache__/sgdw.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38c75c48d9e9986b008b19a7e32cd0eb7507fd4e Binary files /dev/null and b/janus/lib/python3.10/site-packages/timm/optim/__pycache__/sgdw.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/timm/optim/adamw.py b/janus/lib/python3.10/site-packages/timm/optim/adamw.py new file mode 100644 index 0000000000000000000000000000000000000000..07299ad63ef1bc712ccad016b6f2506403d92f36 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/optim/adamw.py @@ -0,0 +1,140 @@ +""" AdamW Optimizer +Impl copied from PyTorch master + +NOTE: This impl has been deprecated in favour of torch.optim.AdamW and remains as a reference +""" +import math +from typing import Tuple + +import torch +from torch.optim.optimizer import Optimizer + +from ._types import ParamsT + + +class AdamWLegacy(Optimizer): + r"""Implements AdamW algorithm. + + NOTE: This impl has been deprecated in favour of torch.optim.NAdam and remains as a reference + + References: + - Adam: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 + - Decoupled Weight Decay Regularization: https://arxiv.org/abs/1711.05101 + - On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ + + Args: + params: iterable of parameters to optimize or dicts defining parameter groups + lr: learning rate + betas: coefficients used for computing running averages of gradient and its square + eps: term added to the denominator to improve numerical stability + weight_decay: weight decay coefficient + amsgrad: whether to use the AMSGrad variant of this algorithm + from the paper `On the Convergence of Adam and Beyond` + caution: apply caution when using AdamW + """ + + def __init__( + self, + params: ParamsT, + lr: float = 1e-3, + betas: Tuple[float, float] = (0.9, 0.999), + eps: float = 1e-8, + weight_decay: float = 1e-2, + amsgrad: bool = False, + caution: bool = False, + ): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + defaults = dict( + lr=lr, + betas=betas, + eps=eps, + weight_decay=weight_decay, + amsgrad=amsgrad, + caution=caution, + ) + super(AdamWLegacy, self).__init__(params, defaults) + + def __setstate__(self, state): + super(AdamWLegacy, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + group.setdefault('caution', False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + + # Perform stepweight decay + p.data.mul_(1 - group['lr'] * group['weight_decay']) + + # Perform optimization step + grad = p.grad + if grad.is_sparse: + raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') + amsgrad = group['amsgrad'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros_like(p) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + if amsgrad: + max_exp_avg_sq = state['max_exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) + # Use the max. for normalizing running avg. of gradient + denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + else: + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + + step_size = group['lr'] / bias_correction1 + + if group['caution']: + # Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085 + mask = (exp_avg * grad > 0).to(grad.dtype) + mask.div_(mask.mean().clamp_(min=1e-3)) + exp_avg = exp_avg * mask + + p.addcdiv_(exp_avg, denom, value=-step_size) + + return loss diff --git a/janus/lib/python3.10/site-packages/timm/optim/adan.py b/janus/lib/python3.10/site-packages/timm/optim/adan.py new file mode 100644 index 0000000000000000000000000000000000000000..4db62e9cf03d6ddb8d00b20d362b3baba7d5ca98 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/optim/adan.py @@ -0,0 +1,327 @@ +""" Adan Optimizer + +Adan: Adaptive Nesterov Momentum Algorithm for Faster Optimizing Deep Models[J]. arXiv preprint arXiv:2208.06677, 2022. + https://arxiv.org/abs/2208.06677 + +Implementation adapted from https://github.com/sail-sg/Adan +""" +# Copyright 2022 Garena Online Private Limited +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +from typing import List, Optional, Tuple + +import torch +from torch import Tensor +from torch.optim.optimizer import Optimizer + + +class MultiTensorApply(object): + available = False + warned = False + + def __init__(self, chunk_size): + try: + MultiTensorApply.available = True + self.chunk_size = chunk_size + except ImportError as err: + MultiTensorApply.available = False + MultiTensorApply.import_err = err + + def __call__(self, op, noop_flag_buffer, tensor_lists, *args): + return op(self.chunk_size, noop_flag_buffer, tensor_lists, *args) + + +class Adan(Optimizer): + """ Implements a pytorch variant of Adan. + + Adan was proposed in Adan: Adaptive Nesterov Momentum Algorithm for Faster Optimizing Deep Models + https://arxiv.org/abs/2208.06677 + + Arguments: + params: Iterable of parameters to optimize or dicts defining parameter groups. + lr: Learning rate. + betas: Coefficients used for first- and second-order moments. + eps: Term added to the denominator to improve numerical stability. + weight_decay: Decoupled weight decay (L2 penalty) + no_prox: How to perform the weight decay + caution: Enable caution from 'Cautious Optimizers' + foreach: If True would use torch._foreach implementation. Faster but uses slightly more memory. + """ + + def __init__(self, + params, + lr: float = 1e-3, + betas: Tuple[float, float, float] = (0.98, 0.92, 0.99), + eps: float = 1e-8, + weight_decay: float = 0.0, + no_prox: bool = False, + caution: bool = False, + foreach: Optional[bool] = None, + ): + if not 0.0 <= lr: + raise ValueError('Invalid learning rate: {}'.format(lr)) + if not 0.0 <= eps: + raise ValueError('Invalid epsilon value: {}'.format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1])) + if not 0.0 <= betas[2] < 1.0: + raise ValueError('Invalid beta parameter at index 2: {}'.format(betas[2])) + + defaults = dict( + lr=lr, + betas=betas, + eps=eps, + weight_decay=weight_decay, + no_prox=no_prox, + caution=caution, + foreach=foreach, + ) + super().__init__(params, defaults) + + def __setstate__(self, state): + super(Adan, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('no_prox', False) + group.setdefault('caution', False) + + @torch.no_grad() + def restart_opt(self): + for group in self.param_groups: + group['step'] = 0 + for p in group['params']: + if p.requires_grad: + state = self.state[p] + # State initialization + + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p) + # Exponential moving average of gradient difference + state['exp_avg_diff'] = torch.zeros_like(p) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step.""" + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + try: + has_scalar_maximum = 'Scalar' in torch.ops.aten._foreach_maximum_.overloads() + except: + has_scalar_maximum = False + + for group in self.param_groups: + params_with_grad = [] + grads = [] + exp_avgs = [] + exp_avg_sqs = [] + exp_avg_diffs = [] + neg_pre_grads = [] + + beta1, beta2, beta3 = group['betas'] + # assume same step across group now to simplify things + # per parameter step can be easily supported by making it a tensor, or pass list into kernel + if 'step' in group: + group['step'] += 1 + else: + group['step'] = 1 + + bias_correction1 = 1.0 - beta1 ** group['step'] + bias_correction2 = 1.0 - beta2 ** group['step'] + bias_correction3 = 1.0 - beta3 ** group['step'] + + for p in group['params']: + if p.grad is None: + continue + params_with_grad.append(p) + grads.append(p.grad) + + state = self.state[p] + if len(state) == 0: + state['exp_avg'] = torch.zeros_like(p) + state['exp_avg_sq'] = torch.zeros_like(p) + state['exp_avg_diff'] = torch.zeros_like(p) + + if 'neg_pre_grad' not in state or group['step'] == 1: + state['neg_pre_grad'] = -p.grad.clone() + + exp_avgs.append(state['exp_avg']) + exp_avg_sqs.append(state['exp_avg_sq']) + exp_avg_diffs.append(state['exp_avg_diff']) + neg_pre_grads.append(state['neg_pre_grad']) + + if not params_with_grad: + continue + + if group['foreach'] is None: + use_foreach = not group['caution'] or has_scalar_maximum + else: + use_foreach = group['foreach'] + + if use_foreach: + func = _multi_tensor_adan + else: + func = _single_tensor_adan + + func( + params_with_grad, + grads, + exp_avgs=exp_avgs, + exp_avg_sqs=exp_avg_sqs, + exp_avg_diffs=exp_avg_diffs, + neg_pre_grads=neg_pre_grads, + beta1=beta1, + beta2=beta2, + beta3=beta3, + bias_correction1=bias_correction1, + bias_correction2=bias_correction2, + bias_correction3_sqrt=math.sqrt(bias_correction3), + lr=group['lr'], + weight_decay=group['weight_decay'], + eps=group['eps'], + no_prox=group['no_prox'], + caution=group['caution'], + ) + + return loss + + +def _single_tensor_adan( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + exp_avg_diffs: List[Tensor], + neg_pre_grads: List[Tensor], + *, + beta1: float, + beta2: float, + beta3: float, + bias_correction1: float, + bias_correction2: float, + bias_correction3_sqrt: float, + lr: float, + weight_decay: float, + eps: float, + no_prox: bool, + caution: bool, +): + for i, param in enumerate(params): + grad = grads[i] + exp_avg = exp_avgs[i] + exp_avg_sq = exp_avg_sqs[i] + exp_avg_diff = exp_avg_diffs[i] + neg_grad_or_diff = neg_pre_grads[i] + + # for memory saving, we use `neg_grad_or_diff` to get some temp variable in an inplace way + neg_grad_or_diff.add_(grad) + + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) # m_t + exp_avg_diff.mul_(beta2).add_(neg_grad_or_diff, alpha=1 - beta2) # diff_t + + neg_grad_or_diff.mul_(beta2).add_(grad) + exp_avg_sq.mul_(beta3).addcmul_(neg_grad_or_diff, neg_grad_or_diff, value=1 - beta3) # n_t + + denom = (exp_avg_sq.sqrt() / bias_correction3_sqrt).add_(eps) + step_size_diff = lr * beta2 / bias_correction2 + step_size = lr / bias_correction1 + + if caution: + # Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085 + mask = (exp_avg * grad > 0).to(grad.dtype) + mask.div_(mask.mean().clamp_(min=1e-3)) + exp_avg = exp_avg * mask + + if no_prox: + param.mul_(1 - lr * weight_decay) + param.addcdiv_(exp_avg, denom, value=-step_size) + param.addcdiv_(exp_avg_diff, denom, value=-step_size_diff) + else: + param.addcdiv_(exp_avg, denom, value=-step_size) + param.addcdiv_(exp_avg_diff, denom, value=-step_size_diff) + param.div_(1 + lr * weight_decay) + + neg_grad_or_diff.zero_().add_(grad, alpha=-1.0) + + +def _multi_tensor_adan( + params: List[Tensor], + grads: List[Tensor], + exp_avgs: List[Tensor], + exp_avg_sqs: List[Tensor], + exp_avg_diffs: List[Tensor], + neg_pre_grads: List[Tensor], + *, + beta1: float, + beta2: float, + beta3: float, + bias_correction1: float, + bias_correction2: float, + bias_correction3_sqrt: float, + lr: float, + weight_decay: float, + eps: float, + no_prox: bool, + caution: bool, +): + if len(params) == 0: + return + + # for memory saving, we use `neg_pre_grads` to get some temp variable in a inplace way + torch._foreach_add_(neg_pre_grads, grads) + + torch._foreach_mul_(exp_avgs, beta1) + torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) # m_t + + torch._foreach_mul_(exp_avg_diffs, beta2) + torch._foreach_add_(exp_avg_diffs, neg_pre_grads, alpha=1 - beta2) # diff_t + + torch._foreach_mul_(neg_pre_grads, beta2) + torch._foreach_add_(neg_pre_grads, grads) + torch._foreach_mul_(exp_avg_sqs, beta3) + torch._foreach_addcmul_(exp_avg_sqs, neg_pre_grads, neg_pre_grads, value=1 - beta3) # n_t + + denom = torch._foreach_sqrt(exp_avg_sqs) + torch._foreach_div_(denom, bias_correction3_sqrt) + torch._foreach_add_(denom, eps) + + step_size_diff = lr * beta2 / bias_correction2 + step_size = lr / bias_correction1 + + if caution: + # Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085 + masks = torch._foreach_mul(exp_avgs, grads) + masks = [(m > 0).to(g.dtype) for m, g in zip(masks, grads)] + mask_scale = [m.mean() for m in masks] + torch._foreach_maximum_(mask_scale, 1e-3) + torch._foreach_div_(masks, mask_scale) + exp_avgs = torch._foreach_mul(exp_avgs, masks) + + if no_prox: + torch._foreach_mul_(params, 1 - lr * weight_decay) + torch._foreach_addcdiv_(params, exp_avgs, denom, value=-step_size) + torch._foreach_addcdiv_(params, exp_avg_diffs, denom, value=-step_size_diff) + else: + torch._foreach_addcdiv_(params, exp_avgs, denom, value=-step_size) + torch._foreach_addcdiv_(params, exp_avg_diffs, denom, value=-step_size_diff) + torch._foreach_div_(params, 1 + lr * weight_decay) + + torch._foreach_zero_(neg_pre_grads) + torch._foreach_add_(neg_pre_grads, grads, alpha=-1.0) diff --git a/janus/lib/python3.10/site-packages/timm/optim/lamb.py b/janus/lib/python3.10/site-packages/timm/optim/lamb.py new file mode 100644 index 0000000000000000000000000000000000000000..fa86757441c7d5bd565d1975a53a9de5f2294eec --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/optim/lamb.py @@ -0,0 +1,230 @@ +""" PyTorch Lamb optimizer w/ behaviour similar to NVIDIA FusedLamb + +This optimizer code was adapted from the following (starting with latest) +* https://github.com/HabanaAI/Model-References/blob/2b435114fe8e31f159b1d3063b8280ae37af7423/PyTorch/nlp/bert/pretraining/lamb.py +* https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py +* https://github.com/cybertronai/pytorch-lamb + +Use FusedLamb if you can (GPU). The reason for including this variant of Lamb is to have a version that is +similar in behaviour to APEX FusedLamb if you aren't using NVIDIA GPUs or cannot install/use APEX. + +In addition to some cleanup, this Lamb impl has been modified to support PyTorch XLA and has been tested on TPU. + +Original copyrights for above sources are below. + +Modifications Copyright 2021 Ross Wightman +""" +# Copyright (c) 2021, Habana Labs Ltd. All rights reserved. + +# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# MIT License +# +# Copyright (c) 2019 cybertronai +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +import math +from typing import Optional, Tuple + +import torch +from torch.optim import Optimizer + +from ._types import ParamsT + + +class Lamb(Optimizer): + """Implements a pure pytorch variant of FuseLAMB (NvLamb variant) optimizer from apex.optimizers.FusedLAMB + reference: https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py + + LAMB was proposed in: + - Large Batch Optimization for Deep Learning - Training BERT in 76 minutes: https://arxiv.org/abs/1904.00962 + - On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ + + Args: + params: Iterable of parameters to optimize or dicts defining parameter groups. + lr: Learning rate + betas: Coefficients used for computing running averages of gradient and its norm. + eps: Term added to the denominator to improve numerical stability. + weight_decay: Weight decay + grad_averaging: Whether apply (1-beta2) to grad when calculating running averages of gradient. + max_grad_norm: Value used to clip global grad norm. + trust_clip: Enable LAMBC trust ratio clipping. + always_adapt: Apply adaptive learning rate to 0.0 weight decay parameter. + caution: Apply caution. + """ + + def __init__( + self, + params: ParamsT, + lr: float = 1e-3, + bias_correction: bool = True, + betas: Tuple[float, float] = (0.9, 0.999), + eps: float = 1e-6, + weight_decay: float = 0.01, + grad_averaging: bool = True, + max_grad_norm: Optional[float] = 1.0, + trust_clip: bool = False, + always_adapt: bool = False, + caution: bool = False, + decoupled_decay: bool = False, + ): + defaults = dict( + lr=lr, + bias_correction=bias_correction, + betas=betas, + eps=eps, + weight_decay=weight_decay, + grad_averaging=grad_averaging, + max_grad_norm=max_grad_norm, + trust_clip=trust_clip, + always_adapt=always_adapt, + caution=caution, + decoupled_decay=decoupled_decay, + ) + super().__init__(params, defaults) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault('caution', False) + group.setdefault('decoupled_decay', False) + + def _get_clip_grad_norm(self): + max_grad_norm = self.defaults['max_grad_norm'] + if max_grad_norm is None: + return None + + norms = [] + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.is_sparse: + raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instead.') + norms.append(torch.linalg.vector_norm(grad)) + global_norm = torch.linalg.vector_norm(torch.stack(norms)) + clip_global_norm = (global_norm / max_grad_norm).clamp_(min=1.0) + return clip_global_norm + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + clip_grad_norm = self._get_clip_grad_norm() # None if disabled + + for group in self.param_groups: + bias_correction = 1 if group['bias_correction'] else 0 + beta1, beta2 = group['betas'] + grad_averaging = 1 if group['grad_averaging'] else 0 + beta3 = 1 - beta1 if grad_averaging else 1.0 + + # assume same step across group now to simplify things + # per parameter step can be easily support by making it tensor, or pass list into kernel + if 'step' in group: + group['step'] += 1 + else: + group['step'] = 1 + + if bias_correction: + bias_correction1 = 1 - beta1 ** group['step'] + bias_correction2 = 1 - beta2 ** group['step'] + else: + bias_correction1, bias_correction2 = 1.0, 1.0 + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + + if clip_grad_norm is not None: + grad.div_(clip_grad_norm) + + state = self.state[p] + + # State initialization + if len(state) == 0: + # Exponential moving average of gradient valuesa + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=beta3) # m_t + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) # v_t + + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + update = (exp_avg / bias_correction1).div_(denom) + + if group['caution']: + # Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085 + mask = (update * grad > 0).to(grad.dtype) + mask.div_(mask.mean().clamp_(min=1e-3)) + update.mul_(mask) + + weight_decay = group['weight_decay'] + if weight_decay != 0: + if group.get('decoupled_decay', False): + p.add_(p, alpha=-group['lr'] * weight_decay) + else: + update.add_(p, alpha=weight_decay) + + if weight_decay != 0 or group['always_adapt']: + # Layer-wise LR adaptation. By default, skip adaptation on parameters that are + # excluded from weight decay, unless always_adapt == True, then always enabled. + w_norm = p.norm(2.0) + g_norm = update.norm(2.0) + trust_ratio = w_norm / g_norm + # FIXME nested where required since logical and/or not working in PT XLA + # Set the ratio to 1.0 (no change) if either weight norm or grad norm is zero + trust_ratio = torch.where( + w_norm > 0, + torch.where(g_norm > 0, trust_ratio, 1.0), + 1.0, + ) + if group['trust_clip']: + # LAMBC trust clipping, upper bound fixed at one + trust_ratio = torch.clamp(trust_ratio, max=1.0) + update.mul_(trust_ratio) + + p.add_(update, alpha=-group['lr']) + + return loss diff --git a/janus/lib/python3.10/site-packages/timm/optim/lion.py b/janus/lib/python3.10/site-packages/timm/optim/lion.py new file mode 100644 index 0000000000000000000000000000000000000000..186072320301b7855486f12c1c8f61b1b36c49cd --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/optim/lion.py @@ -0,0 +1,253 @@ +""" Lion Optimizer +Paper: `Symbolic Discovery of Optimization Algorithms` - https://arxiv.org/abs/2302.06675 +Original Impl: https://github.com/google/automl/tree/master/lion +""" +# Copyright 2023 Google Research. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +from typing import List, Optional, Tuple + +import torch +from torch.optim.optimizer import Optimizer + +from ._types import ParamsT + + +class Lion(Optimizer): + r"""Implements Lion algorithm.""" + + def __init__( + self, + params: ParamsT, + lr: float = 1e-4, + betas: Tuple[float, float] = (0.9, 0.99), + weight_decay: float = 0.0, + caution: bool = False, + maximize: bool = False, + foreach: Optional[bool] = None, + ): + """Initialize the hyperparameters. + + Args: + params: iterable of parameters to optimize or dicts defining parameter groups + lr: learning rate + betas: coefficients used for computing running averages of gradient and its square + weight_decay: weight decay coefficient + caution: apply caution + """ + + if not 0.0 <= lr: + raise ValueError('Invalid learning rate: {}'.format(lr)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1])) + defaults = dict( + lr=lr, + betas=betas, + weight_decay=weight_decay, + caution=caution, + foreach=foreach, + maximize=maximize, + ) + super().__init__(params, defaults) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault('caution', False) + group.setdefault('maximize', False) + group.setdefault('foreach', None) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Args: + closure: A closure that reevaluates the model and returns the loss. + + Returns: + the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + params_with_grad = [] + grads = [] + exp_avgs = [] + beta1, beta2 = group['betas'] + + for p in group['params']: + if p.grad is None: + continue + params_with_grad.append(p) + if p.grad.is_sparse: + raise RuntimeError('Lion does not support sparse gradients') + grads.append(p.grad) + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) + + exp_avgs.append(state['exp_avg']) + + lion( + params_with_grad, + grads, + exp_avgs, + beta1=beta1, + beta2=beta2, + lr=group['lr'], + weight_decay=group['weight_decay'], + caution=group['caution'], + maximize=group['maximize'], + foreach=group['foreach'], + ) + + return loss + + +def lion( + params: List[torch.Tensor], + grads: List[torch.Tensor], + exp_avgs: List[torch.Tensor], + # kwonly args with defaults are not supported by functions compiled with torchscript issue #70627 + # setting this as kwarg for now as functional API is compiled by torch/distributed/optim + maximize: bool = False, + foreach: bool = None, + *, + beta1: float, + beta2: float, + lr: float, + weight_decay: float, + caution: bool, +): + r"""Functional API that performs Lion algorithm computation. + """ + if foreach is None: + try: + # cannot do foreach if this overload doesn't exist when caution enabled + foreach = not caution or 'Scalar' in torch.ops.aten._foreach_maximum_.overloads() + except: + foreach = False + + if foreach and torch.jit.is_scripting(): + raise RuntimeError('torch.jit.script not supported with foreach optimizers') + + if foreach and not torch.jit.is_scripting(): + func = _multi_tensor_lion + else: + func = _single_tensor_lion + + func( + params, + grads, + exp_avgs, + beta1=beta1, + beta2=beta2, + lr=lr, + weight_decay=weight_decay, + caution=caution, + maximize=maximize, + ) + + +def _single_tensor_lion( + params: List[torch.Tensor], + grads: List[torch.Tensor], + exp_avgs: List[torch.Tensor], + *, + beta1: float, + beta2: float, + lr: float, + weight_decay: float, + caution: bool, + maximize: bool, +): + for i, param in enumerate(params): + grad = grads[i] if not maximize else -grads[i] + exp_avg = exp_avgs[i] + + if torch.is_complex(param): + grad = torch.view_as_real(grad) + exp_avg = torch.view_as_real(exp_avg) + param = torch.view_as_real(param) + + # Perform stepweight decay + param.mul_(1 - lr * weight_decay) + + # Weight update + update = exp_avg.mul(beta1).add_(grad, alpha=1 - beta1).sign_() + + if caution: + # Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085 + mask = (update * grad > 0).to(grad.dtype) + mask.div_(mask.mean().clamp_(min=1e-3)) + update.mul_(mask) + + param.add_(update, alpha=-lr) + + # Decay the momentum running average coefficient + exp_avg.lerp_(grad, 1 - beta2) + + +def _multi_tensor_lion( + params: List[torch.Tensor], + grads: List[torch.Tensor], + exp_avgs: List[torch.Tensor], + *, + beta1: float, + beta2: float, + lr: float, + weight_decay: float, + caution: bool, + maximize: bool, +): + if len(params) == 0: + return + + if maximize: + grads = torch._foreach_neg(tuple(grads)) # type: ignore[assignment] + + grads = [torch.view_as_real(x) if torch.is_complex(x) else x for x in grads] + exp_avgs = [torch.view_as_real(x) if torch.is_complex(x) else x for x in exp_avgs] + params = [torch.view_as_real(x) if torch.is_complex(x) else x for x in params] + + # Perform stepweight decay + torch._foreach_mul_(params, 1 - lr * weight_decay) + + # Weight update + updates = torch._foreach_mul(exp_avgs, beta1) + torch._foreach_add_(updates, grads, alpha=1 - beta1) + updates = [u.sign_() for u in updates] + + if caution: + # Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085 + masks = torch._foreach_mul(updates, grads) + masks = [(m > 0).to(g.dtype) for m, g in zip(masks, grads)] + mask_scale = [m.mean() for m in masks] + torch._foreach_maximum_(mask_scale, 1e-3) + torch._foreach_div_(masks, mask_scale) + torch._foreach_mul_(updates, masks) + + torch._foreach_add_(params, updates, alpha=-lr) + + # Decay the momentum running average coefficient + torch._foreach_mul_(exp_avgs, beta2) + torch._foreach_add_(exp_avgs, grads, alpha=1 - beta2) diff --git a/janus/lib/python3.10/site-packages/timm/optim/madgrad.py b/janus/lib/python3.10/site-packages/timm/optim/madgrad.py new file mode 100644 index 0000000000000000000000000000000000000000..8e449dce3d6a530a343b8448de5a640cb4e9f8d6 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/optim/madgrad.py @@ -0,0 +1,189 @@ +""" PyTorch MADGRAD optimizer + +MADGRAD: https://arxiv.org/abs/2101.11075 + +Code from: https://github.com/facebookresearch/madgrad +""" +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import math +from typing import TYPE_CHECKING, Any, Callable, Optional + +import torch +import torch.optim + +if TYPE_CHECKING: + from torch.optim.optimizer import _params_t +else: + _params_t = Any + + +class MADGRAD(torch.optim.Optimizer): + """ + MADGRAD_: A Momentumized, Adaptive, Dual Averaged Gradient Method for Stochastic + Optimization. + + .. _MADGRAD: https://arxiv.org/abs/2101.11075 + + MADGRAD is a general purpose optimizer that can be used in place of SGD or + Adam may converge faster and generalize better. Currently GPU-only. + Typically, the same learning rate schedule that is used for SGD or Adam may + be used. The overall learning rate is not comparable to either method and + should be determined by a hyper-parameter sweep. + + MADGRAD requires less weight decay than other methods, often as little as + zero. Momentum values used for SGD or Adam's beta1 should work here also. + + On sparse problems both weight_decay and momentum should be set to 0. + + Arguments: + params (iterable): + Iterable of parameters to optimize or dicts defining parameter groups. + lr (float): + Learning rate (default: 1e-2). + momentum (float): + Momentum value in the range [0,1) (default: 0.9). + weight_decay (float): + Weight decay, i.e. a L2 penalty (default: 0). + eps (float): + Term added to the denominator outside of the root operation to improve numerical stability. (default: 1e-6). + """ + + def __init__( + self, + params: _params_t, + lr: float = 1e-2, + momentum: float = 0.9, + weight_decay: float = 0, + eps: float = 1e-6, + decoupled_decay: bool = False, + ): + if momentum < 0 or momentum >= 1: + raise ValueError(f"Momentum {momentum} must be in the range [0,1]") + if lr <= 0: + raise ValueError(f"Learning rate {lr} must be positive") + if weight_decay < 0: + raise ValueError(f"Weight decay {weight_decay} must be non-negative") + if eps < 0: + raise ValueError(f"Eps must be non-negative") + + defaults = dict( + lr=lr, + eps=eps, + momentum=momentum, + weight_decay=weight_decay, + decoupled_decay=decoupled_decay, + ) + super().__init__(params, defaults) + + @property + def supports_memory_efficient_fp16(self) -> bool: + return False + + @property + def supports_flat_params(self) -> bool: + return True + + @torch.no_grad() + def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]: + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + eps = group['eps'] + lr = group['lr'] + eps + weight_decay = group['weight_decay'] + momentum = group['momentum'] + ck = 1 - momentum + + for p in group["params"]: + if p.grad is None: + continue + grad = p.grad + if momentum != 0.0 and grad.is_sparse: + raise RuntimeError("momentum != 0 is not compatible with sparse gradients") + + state = self.state[p] + if len(state) == 0: + state['step'] = 0 + state['grad_sum_sq'] = torch.zeros_like(p) + state['s'] = torch.zeros_like(p) + if momentum != 0: + state['x0'] = torch.clone(p).detach() + + state['step'] += 1 + grad_sum_sq = state['grad_sum_sq'] + s = state['s'] + lamb = lr * math.sqrt(state['step']) + + # Apply weight decay + if weight_decay != 0: + if group['decoupled_decay']: + p.mul_(1.0 - group['lr'] * weight_decay) + else: + if grad.is_sparse: + raise RuntimeError("weight_decay option is not compatible with sparse gradients") + grad.add_(p, alpha=weight_decay) + + if grad.is_sparse: + grad = grad.coalesce() + grad_val = grad._values() + + p_masked = p.sparse_mask(grad) + grad_sum_sq_masked = grad_sum_sq.sparse_mask(grad) + s_masked = s.sparse_mask(grad) + + # Compute x_0 from other known quantities + rms_masked_vals = grad_sum_sq_masked._values().pow(1 / 3).add_(eps) + x0_masked_vals = p_masked._values().addcdiv(s_masked._values(), rms_masked_vals, value=1) + + # Dense + sparse op + grad_sq = grad * grad + grad_sum_sq.add_(grad_sq, alpha=lamb) + grad_sum_sq_masked.add_(grad_sq, alpha=lamb) + + rms_masked_vals = grad_sum_sq_masked._values().pow_(1 / 3).add_(eps) + + s.add_(grad, alpha=lamb) + s_masked._values().add_(grad_val, alpha=lamb) + + # update masked copy of p + p_kp1_masked_vals = x0_masked_vals.addcdiv(s_masked._values(), rms_masked_vals, value=-1) + # Copy updated masked p to dense p using an add operation + p_masked._values().add_(p_kp1_masked_vals, alpha=-1) + p.add_(p_masked, alpha=-1) + else: + if momentum == 0: + # Compute x_0 from other known quantities + rms = grad_sum_sq.pow(1 / 3).add_(eps) + x0 = p.addcdiv(s, rms, value=1) + else: + x0 = state['x0'] + + # Accumulate second moments + grad_sum_sq.addcmul_(grad, grad, value=lamb) + rms = grad_sum_sq.pow(1 / 3).add_(eps) + + # Update s + s.add_(grad, alpha=lamb) + + # Step + if momentum == 0: + p.copy_(x0.addcdiv(s, rms, value=-1)) + else: + z = x0.addcdiv(s, rms, value=-1) + + # p is a moving average of z + p.mul_(1 - ck).add_(z, alpha=ck) + + return loss diff --git a/janus/lib/python3.10/site-packages/timm/optim/nadam.py b/janus/lib/python3.10/site-packages/timm/optim/nadam.py new file mode 100644 index 0000000000000000000000000000000000000000..46f6150bcb0cbe04b8eabc524283f4e9ce60a3a0 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/optim/nadam.py @@ -0,0 +1,106 @@ +import math + +import torch +from torch.optim.optimizer import Optimizer + + +class NAdamLegacy(Optimizer): + """Implements Nadam algorithm (a variant of Adam based on Nesterov momentum). + + NOTE: This impl has been deprecated in favour of torch.optim.NAdam and remains as a reference + + It has been proposed in `Incorporating Nesterov Momentum into Adam`__. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 2e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + schedule_decay (float, optional): momentum schedule decay (default: 4e-3) + + __ http://cs229.stanford.edu/proj2015/054_report.pdf + __ http://www.cs.toronto.edu/~fritz/absps/momentum.pdf + + Originally taken from: https://github.com/pytorch/pytorch/pull/1408 + NOTE: Has potential issues but does work well on some problems. + """ + + def __init__( + self, + params, + lr=2e-3, + betas=(0.9, 0.999), + eps=1e-8, + weight_decay=0, + schedule_decay=4e-3, + ): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + defaults = dict( + lr=lr, + betas=betas, + eps=eps, + weight_decay=weight_decay, + schedule_decay=schedule_decay, + ) + super(NAdamLegacy, self).__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['m_schedule'] = 1. + state['exp_avg'] = torch.zeros_like(p) + state['exp_avg_sq'] = torch.zeros_like(p) + + # Warming momentum schedule + m_schedule = state['m_schedule'] + schedule_decay = group['schedule_decay'] + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + eps = group['eps'] + state['step'] += 1 + t = state['step'] + bias_correction2 = 1 - beta2 ** t + + if group['weight_decay'] != 0: + grad = grad.add(p, alpha=group['weight_decay']) + + momentum_cache_t = beta1 * (1. - 0.5 * (0.96 ** (t * schedule_decay))) + momentum_cache_t_1 = beta1 * (1. - 0.5 * (0.96 ** ((t + 1) * schedule_decay))) + m_schedule_new = m_schedule * momentum_cache_t + m_schedule_next = m_schedule * momentum_cache_t * momentum_cache_t_1 + state['m_schedule'] = m_schedule_new + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=1. - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1. - beta2) + + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps) + p.addcdiv_(grad, denom, value=-group['lr'] * (1. - momentum_cache_t) / (1. - m_schedule_new)) + p.addcdiv_(exp_avg, denom, value=-group['lr'] * momentum_cache_t_1 / (1. - m_schedule_next)) + + return loss diff --git a/janus/lib/python3.10/site-packages/timm/optim/nvnovograd.py b/janus/lib/python3.10/site-packages/timm/optim/nvnovograd.py new file mode 100644 index 0000000000000000000000000000000000000000..068e5aa2c1c9e88a3dcff406591b5adab8338e24 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/optim/nvnovograd.py @@ -0,0 +1,132 @@ +""" Nvidia NovoGrad Optimizer. +Original impl by Nvidia from Jasper example: + - https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechRecognition/Jasper +Paper: `Stochastic Gradient Methods with Layer-wise Adaptive Moments for Training of Deep Networks` + - https://arxiv.org/abs/1905.11286 +""" + +import torch +from torch.optim.optimizer import Optimizer +import math + + +class NvNovoGrad(Optimizer): + """ + Implements Novograd algorithm. + + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.95, 0.98)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + grad_averaging: gradient averaging + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + """ + + def __init__( + self, + params, + lr=1e-3, + betas=(0.95, 0.98), + eps=1e-8, + weight_decay=0, + grad_averaging=False, + amsgrad=False, + ): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + defaults = dict( + lr=lr, + betas=betas, + eps=eps, + weight_decay=weight_decay, + grad_averaging=grad_averaging, + amsgrad=amsgrad, + ) + + super(NvNovoGrad, self).__init__(params, defaults) + + def __setstate__(self, state): + super(NvNovoGrad, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.is_sparse: + raise RuntimeError('Sparse gradients are not supported.') + amsgrad = group['amsgrad'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + if amsgrad: + max_exp_avg_sq = state['max_exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + + norm = torch.sum(torch.pow(grad, 2)) + + if exp_avg_sq == 0: + exp_avg_sq.copy_(norm) + else: + exp_avg_sq.mul_(beta2).add_(norm, alpha=1 - beta2) + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) + # Use the max. for normalizing running avg. of gradient + denom = max_exp_avg_sq.sqrt().add_(group['eps']) + else: + denom = exp_avg_sq.sqrt().add_(group['eps']) + + grad.div_(denom) + if group['weight_decay'] != 0: + grad.add_(p, alpha=group['weight_decay']) + if group['grad_averaging']: + grad.mul_(1 - beta1) + exp_avg.mul_(beta1).add_(grad) + + p.add_(exp_avg, alpha=-group['lr']) + + return loss diff --git a/janus/lib/python3.10/site-packages/timm/optim/optim_factory.py b/janus/lib/python3.10/site-packages/timm/optim/optim_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..a4227a98482c0bb04d46e8282d9194f43f7457c9 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/optim/optim_factory.py @@ -0,0 +1,7 @@ +# lots of uses of these functions directly, ala 'import timm.optim.optim_factory as optim_factory', fun :/ + +from ._optim_factory import create_optimizer, create_optimizer_v2, optimizer_kwargs +from ._param_groups import param_groups_layer_decay, param_groups_weight_decay, group_parameters, _layer_map, _group + +import warnings +warnings.warn(f"Importing from {__name__} is deprecated, please import via timm.optim", FutureWarning) diff --git a/janus/lib/python3.10/site-packages/timm/optim/radam.py b/janus/lib/python3.10/site-packages/timm/optim/radam.py new file mode 100644 index 0000000000000000000000000000000000000000..9b12b98a59e560779c5631b397cb71b285180660 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/optim/radam.py @@ -0,0 +1,105 @@ +"""RAdam Optimizer. +Implementation lifted from: https://github.com/LiyuanLucasLiu/RAdam +Paper: `On the Variance of the Adaptive Learning Rate and Beyond` - https://arxiv.org/abs/1908.03265 + +NOTE: This impl has been deprecated in favour of torch.optim.RAdam and remains as a reference +""" +import math +import torch +from torch.optim.optimizer import Optimizer + + +class RAdamLegacy(Optimizer): + """ PyTorch RAdam optimizer + + NOTE: This impl has been deprecated in favour of torch.optim.AdamW and remains as a reference + """ + def __init__( + self, + params, + lr=1e-3, + betas=(0.9, 0.999), + eps=1e-8, + weight_decay=0, + ): + defaults = dict( + lr=lr, + betas=betas, + eps=eps, + weight_decay=weight_decay, + buffer=[[None, None, None] for _ in range(10)] + ) + super(RAdamLegacy, self).__init__(params, defaults) + + def __setstate__(self, state): + super(RAdamLegacy, self).__setstate__(state) + + @torch.no_grad() + def step(self, closure=None): + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.float() + if grad.is_sparse: + raise RuntimeError('RAdam does not support sparse gradients') + + p_fp32 = p.float() + + state = self.state[p] + + if len(state) == 0: + state['step'] = 0 + state['exp_avg'] = torch.zeros_like(p_fp32) + state['exp_avg_sq'] = torch.zeros_like(p_fp32) + else: + state['exp_avg'] = state['exp_avg'].type_as(p_fp32) + state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_fp32) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + + state['step'] += 1 + buffered = group['buffer'][int(state['step'] % 10)] + if state['step'] == buffered[0]: + num_sma, step_size = buffered[1], buffered[2] + else: + buffered[0] = state['step'] + beta2_t = beta2 ** state['step'] + num_sma_max = 2 / (1 - beta2) - 1 + num_sma = num_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) + buffered[1] = num_sma + + # more conservative since it's an approximated value + if num_sma >= 5: + step_size = group['lr'] * math.sqrt( + (1 - beta2_t) * + (num_sma - 4) / (num_sma_max - 4) * + (num_sma - 2) / num_sma * + num_sma_max / (num_sma_max - 2)) / (1 - beta1 ** state['step']) + else: + step_size = group['lr'] / (1 - beta1 ** state['step']) + buffered[2] = step_size + + if group['weight_decay'] != 0: + p_fp32.add_(p_fp32, alpha=-group['weight_decay'] * group['lr']) + + # more conservative since it's an approximated value + if num_sma >= 5: + denom = exp_avg_sq.sqrt().add_(group['eps']) + p_fp32.addcdiv_(exp_avg, denom, value=-step_size) + else: + p_fp32.add_(exp_avg, alpha=-step_size) + + p.copy_(p_fp32) + + return loss diff --git a/janus/lib/python3.10/site-packages/timm/scheduler/__init__.py b/janus/lib/python3.10/site-packages/timm/scheduler/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9f7191bb0f1c921a5e214b1414cd07269297db95 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/scheduler/__init__.py @@ -0,0 +1,8 @@ +from .cosine_lr import CosineLRScheduler +from .multistep_lr import MultiStepLRScheduler +from .plateau_lr import PlateauLRScheduler +from .poly_lr import PolyLRScheduler +from .step_lr import StepLRScheduler +from .tanh_lr import TanhLRScheduler + +from .scheduler_factory import create_scheduler, create_scheduler_v2, scheduler_kwargs diff --git a/janus/lib/python3.10/site-packages/timm/scheduler/__pycache__/__init__.cpython-310.pyc b/janus/lib/python3.10/site-packages/timm/scheduler/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3adb2e8d0f7c804098f9e007c4532958acc9c22 Binary files /dev/null and b/janus/lib/python3.10/site-packages/timm/scheduler/__pycache__/__init__.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/timm/scheduler/__pycache__/cosine_lr.cpython-310.pyc b/janus/lib/python3.10/site-packages/timm/scheduler/__pycache__/cosine_lr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..619a927f7715b5bb561b0df752233acf548ce1ae Binary files /dev/null and b/janus/lib/python3.10/site-packages/timm/scheduler/__pycache__/cosine_lr.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/timm/scheduler/__pycache__/multistep_lr.cpython-310.pyc b/janus/lib/python3.10/site-packages/timm/scheduler/__pycache__/multistep_lr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e6fb15c893de5e65b91af2cf6f971df20f43bc4 Binary files /dev/null and b/janus/lib/python3.10/site-packages/timm/scheduler/__pycache__/multistep_lr.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/timm/scheduler/__pycache__/plateau_lr.cpython-310.pyc b/janus/lib/python3.10/site-packages/timm/scheduler/__pycache__/plateau_lr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c62d9518cc1ea3153568a93290d267f59d49390 Binary files /dev/null and b/janus/lib/python3.10/site-packages/timm/scheduler/__pycache__/plateau_lr.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/timm/scheduler/__pycache__/poly_lr.cpython-310.pyc b/janus/lib/python3.10/site-packages/timm/scheduler/__pycache__/poly_lr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f80e054ce5b5f4d005d634d0ce93dc15d30029b Binary files /dev/null and b/janus/lib/python3.10/site-packages/timm/scheduler/__pycache__/poly_lr.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/timm/scheduler/__pycache__/scheduler.cpython-310.pyc b/janus/lib/python3.10/site-packages/timm/scheduler/__pycache__/scheduler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc9bc22147313212bd8eb8df7ecdaa9df7eead42 Binary files /dev/null and b/janus/lib/python3.10/site-packages/timm/scheduler/__pycache__/scheduler.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/timm/scheduler/__pycache__/scheduler_factory.cpython-310.pyc b/janus/lib/python3.10/site-packages/timm/scheduler/__pycache__/scheduler_factory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..802862791901e91170bb6679ee0f568a3f0da93b Binary files /dev/null and b/janus/lib/python3.10/site-packages/timm/scheduler/__pycache__/scheduler_factory.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/timm/scheduler/__pycache__/step_lr.cpython-310.pyc b/janus/lib/python3.10/site-packages/timm/scheduler/__pycache__/step_lr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1fdfb947f28fc6c73ea7fd2459f8af2f6696ab3 Binary files /dev/null and b/janus/lib/python3.10/site-packages/timm/scheduler/__pycache__/step_lr.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/timm/scheduler/__pycache__/tanh_lr.cpython-310.pyc b/janus/lib/python3.10/site-packages/timm/scheduler/__pycache__/tanh_lr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a5c6e1eeba2d208cca31d24ce4b8c8c7b8c5a58 Binary files /dev/null and b/janus/lib/python3.10/site-packages/timm/scheduler/__pycache__/tanh_lr.cpython-310.pyc differ diff --git a/janus/lib/python3.10/site-packages/timm/scheduler/cosine_lr.py b/janus/lib/python3.10/site-packages/timm/scheduler/cosine_lr.py new file mode 100644 index 0000000000000000000000000000000000000000..00dd9357d974f0781478459879cbbfe2efbeb425 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/scheduler/cosine_lr.py @@ -0,0 +1,117 @@ +""" Cosine Scheduler + +Cosine LR schedule with warmup, cycle/restarts, noise, k-decay. + +Hacked together by / Copyright 2021 Ross Wightman +""" +import logging +import math +import numpy as np +import torch +from typing import List + +from .scheduler import Scheduler + + +_logger = logging.getLogger(__name__) + + +class CosineLRScheduler(Scheduler): + """ + Cosine decay with restarts. + This is described in the paper https://arxiv.org/abs/1608.03983. + + Inspiration from + https://github.com/allenai/allennlp/blob/master/allennlp/training/learning_rate_schedulers/cosine.py + + k-decay option based on `k-decay: A New Method For Learning Rate Schedule` - https://arxiv.org/abs/2004.05909 + """ + + def __init__( + self, + optimizer: torch.optim.Optimizer, + t_initial: int, + lr_min: float = 0., + cycle_mul: float = 1., + cycle_decay: float = 1., + cycle_limit: int = 1, + warmup_t=0, + warmup_lr_init=0, + warmup_prefix=False, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + k_decay=1.0, + initialize=True, + ) -> None: + super().__init__( + optimizer, + param_group_field="lr", + t_in_epochs=t_in_epochs, + noise_range_t=noise_range_t, + noise_pct=noise_pct, + noise_std=noise_std, + noise_seed=noise_seed, + initialize=initialize, + ) + + assert t_initial > 0 + assert lr_min >= 0 + if t_initial == 1 and cycle_mul == 1 and cycle_decay == 1: + _logger.warning( + "Cosine annealing scheduler will have no effect on the learning " + "rate since t_initial = t_mul = eta_mul = 1.") + self.t_initial = t_initial + self.lr_min = lr_min + self.cycle_mul = cycle_mul + self.cycle_decay = cycle_decay + self.cycle_limit = cycle_limit + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.warmup_prefix = warmup_prefix + self.k_decay = k_decay + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t: int) -> List[float]: + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + if self.warmup_prefix: + t = t - self.warmup_t + + if self.cycle_mul != 1: + i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) + t_i = self.cycle_mul ** i * self.t_initial + t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial + else: + i = t // self.t_initial + t_i = self.t_initial + t_curr = t - (self.t_initial * i) + + gamma = self.cycle_decay ** i + lr_max_values = [v * gamma for v in self.base_values] + k = self.k_decay + + if i < self.cycle_limit: + lrs = [ + self.lr_min + 0.5 * (lr_max - self.lr_min) * (1 + math.cos(math.pi * t_curr ** k / t_i ** k)) + for lr_max in lr_max_values + ] + else: + lrs = [self.lr_min for _ in self.base_values] + + return lrs + + def get_cycle_length(self, cycles=0): + cycles = max(1, cycles or self.cycle_limit) + if self.cycle_mul == 1.0: + t = self.t_initial * cycles + else: + t = int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul))) + return t + self.warmup_t if self.warmup_prefix else t \ No newline at end of file diff --git a/janus/lib/python3.10/site-packages/timm/scheduler/multistep_lr.py b/janus/lib/python3.10/site-packages/timm/scheduler/multistep_lr.py new file mode 100644 index 0000000000000000000000000000000000000000..e5db556d430e13c3536e43afadc32a5cfc3140f4 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/scheduler/multistep_lr.py @@ -0,0 +1,63 @@ +""" MultiStep LR Scheduler + +Basic multi step LR schedule with warmup, noise. +""" +import torch +import bisect +from timm.scheduler.scheduler import Scheduler +from typing import List + +class MultiStepLRScheduler(Scheduler): + """ + """ + + def __init__( + self, + optimizer: torch.optim.Optimizer, + decay_t: List[int], + decay_rate: float = 1., + warmup_t=0, + warmup_lr_init=0, + warmup_prefix=True, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + initialize=True, + ) -> None: + super().__init__( + optimizer, + param_group_field="lr", + t_in_epochs=t_in_epochs, + noise_range_t=noise_range_t, + noise_pct=noise_pct, + noise_std=noise_std, + noise_seed=noise_seed, + initialize=initialize, + ) + + self.decay_t = decay_t + self.decay_rate = decay_rate + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.warmup_prefix = warmup_prefix + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def get_curr_decay_steps(self, t): + # find where in the array t goes, + # assumes self.decay_t is sorted + return bisect.bisect_right(self.decay_t, t + 1) + + def _get_lr(self, t: int) -> List[float]: + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + if self.warmup_prefix: + t = t - self.warmup_t + lrs = [v * (self.decay_rate ** self.get_curr_decay_steps(t)) for v in self.base_values] + return lrs diff --git a/janus/lib/python3.10/site-packages/timm/scheduler/plateau_lr.py b/janus/lib/python3.10/site-packages/timm/scheduler/plateau_lr.py new file mode 100644 index 0000000000000000000000000000000000000000..e868bd5e58afcc0dbf9c9ce1e89a866a125ead57 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/scheduler/plateau_lr.py @@ -0,0 +1,111 @@ +""" Plateau Scheduler + +Adapts PyTorch plateau scheduler and allows application of noise, warmup. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +from typing import List + +from .scheduler import Scheduler + + +class PlateauLRScheduler(Scheduler): + """Decay the LR by a factor every time the validation loss plateaus.""" + + def __init__( + self, + optimizer, + decay_rate=0.1, + patience_t=10, + verbose=True, + threshold=1e-4, + cooldown_t=0, + warmup_t=0, + warmup_lr_init=0, + lr_min=0, + mode='max', + noise_range_t=None, + noise_type='normal', + noise_pct=0.67, + noise_std=1.0, + noise_seed=None, + initialize=True, + ): + super().__init__( + optimizer, + 'lr', + noise_range_t=noise_range_t, + noise_type=noise_type, + noise_pct=noise_pct, + noise_std=noise_std, + noise_seed=noise_seed, + initialize=initialize, + ) + + self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( + self.optimizer, + patience=patience_t, + factor=decay_rate, + verbose=verbose, + threshold=threshold, + cooldown=cooldown_t, + mode=mode, + min_lr=lr_min + ) + + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + self.restore_lr = None + + def state_dict(self): + return { + 'best': self.lr_scheduler.best, + 'last_epoch': self.lr_scheduler.last_epoch, + } + + def load_state_dict(self, state_dict): + self.lr_scheduler.best = state_dict['best'] + if 'last_epoch' in state_dict: + self.lr_scheduler.last_epoch = state_dict['last_epoch'] + + # override the base class step fn completely + def step(self, epoch, metric=None): + if epoch <= self.warmup_t: + lrs = [self.warmup_lr_init + epoch * s for s in self.warmup_steps] + super().update_groups(lrs) + else: + if self.restore_lr is not None: + # restore actual LR from before our last noise perturbation before stepping base + for i, param_group in enumerate(self.optimizer.param_groups): + param_group['lr'] = self.restore_lr[i] + self.restore_lr = None + + self.lr_scheduler.step(metric, epoch) # step the base scheduler + + if self._is_apply_noise(epoch): + self._apply_noise(epoch) + + def step_update(self, num_updates: int, metric: float = None): + return None + + def _apply_noise(self, epoch): + noise = self._calculate_noise(epoch) + + # apply the noise on top of previous LR, cache the old value so we can restore for normal + # stepping of base scheduler + restore_lr = [] + for i, param_group in enumerate(self.optimizer.param_groups): + old_lr = float(param_group['lr']) + restore_lr.append(old_lr) + new_lr = old_lr + old_lr * noise + param_group['lr'] = new_lr + self.restore_lr = restore_lr + + def _get_lr(self, t: int) -> List[float]: + assert False, 'should not be called as step is overridden' diff --git a/janus/lib/python3.10/site-packages/timm/scheduler/poly_lr.py b/janus/lib/python3.10/site-packages/timm/scheduler/poly_lr.py new file mode 100644 index 0000000000000000000000000000000000000000..f7971302ed4ef5fb9a15d5bde5269bd636a6fc1d --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/scheduler/poly_lr.py @@ -0,0 +1,113 @@ +""" Polynomial Scheduler + +Polynomial LR schedule with warmup, noise. + +Hacked together by / Copyright 2021 Ross Wightman +""" +import math +import logging +from typing import List + +import torch + +from .scheduler import Scheduler + + +_logger = logging.getLogger(__name__) + + +class PolyLRScheduler(Scheduler): + """ Polynomial LR Scheduler w/ warmup, noise, and k-decay + + k-decay option based on `k-decay: A New Method For Learning Rate Schedule` - https://arxiv.org/abs/2004.05909 + """ + + def __init__( + self, + optimizer: torch.optim.Optimizer, + t_initial: int, + power: float = 0.5, + lr_min: float = 0., + cycle_mul: float = 1., + cycle_decay: float = 1., + cycle_limit: int = 1, + warmup_t=0, + warmup_lr_init=0, + warmup_prefix=False, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + k_decay=1.0, + initialize=True, + ) -> None: + super().__init__( + optimizer, + param_group_field="lr", + t_in_epochs=t_in_epochs, + noise_range_t=noise_range_t, + noise_pct=noise_pct, + noise_std=noise_std, + noise_seed=noise_seed, + initialize=initialize + ) + + assert t_initial > 0 + assert lr_min >= 0 + if t_initial == 1 and cycle_mul == 1 and cycle_decay == 1: + _logger.warning("Cosine annealing scheduler will have no effect on the learning " + "rate since t_initial = t_mul = eta_mul = 1.") + self.t_initial = t_initial + self.power = power + self.lr_min = lr_min + self.cycle_mul = cycle_mul + self.cycle_decay = cycle_decay + self.cycle_limit = cycle_limit + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.warmup_prefix = warmup_prefix + self.k_decay = k_decay + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t: int) -> List[float]: + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + if self.warmup_prefix: + t = t - self.warmup_t + + if self.cycle_mul != 1: + i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) + t_i = self.cycle_mul ** i * self.t_initial + t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial + else: + i = t // self.t_initial + t_i = self.t_initial + t_curr = t - (self.t_initial * i) + + gamma = self.cycle_decay ** i + lr_max_values = [v * gamma for v in self.base_values] + k = self.k_decay + + if i < self.cycle_limit: + lrs = [ + self.lr_min + (lr_max - self.lr_min) * (1 - t_curr ** k / t_i ** k) ** self.power + for lr_max in lr_max_values + ] + else: + lrs = [self.lr_min for _ in self.base_values] + + return lrs + + def get_cycle_length(self, cycles=0): + cycles = max(1, cycles or self.cycle_limit) + if self.cycle_mul == 1.0: + t = self.t_initial * cycles + else: + t = int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul))) + return t + self.warmup_t if self.warmup_prefix else t diff --git a/janus/lib/python3.10/site-packages/timm/scheduler/scheduler.py b/janus/lib/python3.10/site-packages/timm/scheduler/scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..583357f7c522a0ce5091e435ac04b8e468751342 --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/scheduler/scheduler.py @@ -0,0 +1,127 @@ +import abc +from abc import ABC +from typing import Any, Dict, List, Optional + +import torch + + +class Scheduler(ABC): + """ Parameter Scheduler Base Class + A scheduler base class that can be used to schedule any optimizer parameter groups. + + Unlike the builtin PyTorch schedulers, this is intended to be consistently called + * At the END of each epoch, before incrementing the epoch count, to calculate next epoch's value + * At the END of each optimizer update, after incrementing the update count, to calculate next update's value + + The schedulers built on this should try to remain as stateless as possible (for simplicity). + + This family of schedulers is attempting to avoid the confusion of the meaning of 'last_epoch' + and -1 values for special behaviour. All epoch and update counts must be tracked in the training + code and explicitly passed in to the schedulers on the corresponding step or step_update call. + + Based on ideas from: + * https://github.com/pytorch/fairseq/tree/master/fairseq/optim/lr_scheduler + * https://github.com/allenai/allennlp/tree/master/allennlp/training/learning_rate_schedulers + """ + + def __init__( + self, + optimizer: torch.optim.Optimizer, + param_group_field: str, + t_in_epochs: bool = True, + noise_range_t=None, + noise_type='normal', + noise_pct=0.67, + noise_std=1.0, + noise_seed=None, + initialize: bool = True, + ) -> None: + self.optimizer = optimizer + self.param_group_field = param_group_field + self._initial_param_group_field = f"initial_{param_group_field}" + if initialize: + for i, group in enumerate(self.optimizer.param_groups): + if param_group_field not in group: + raise KeyError(f"{param_group_field} missing from param_groups[{i}]") + group.setdefault(self._initial_param_group_field, group[param_group_field]) + else: + for i, group in enumerate(self.optimizer.param_groups): + if self._initial_param_group_field not in group: + raise KeyError(f"{self._initial_param_group_field} missing from param_groups[{i}]") + self.base_values = [group[self._initial_param_group_field] for group in self.optimizer.param_groups] + self.metric = None # any point to having this for all? + self.t_in_epochs = t_in_epochs + self.noise_range_t = noise_range_t + self.noise_pct = noise_pct + self.noise_type = noise_type + self.noise_std = noise_std + self.noise_seed = noise_seed if noise_seed is not None else 42 + self.update_groups(self.base_values) + + def state_dict(self) -> Dict[str, Any]: + return {key: value for key, value in self.__dict__.items() if key != 'optimizer'} + + def load_state_dict(self, state_dict: Dict[str, Any]) -> None: + self.__dict__.update(state_dict) + + @abc.abstractmethod + def _get_lr(self, t: int) -> List[float]: + pass + + def _get_values(self, t: int, on_epoch: bool = True) -> Optional[List[float]]: + proceed = (on_epoch and self.t_in_epochs) or (not on_epoch and not self.t_in_epochs) + if not proceed: + return None + return self._get_lr(t) + + def step(self, epoch: int, metric: float = None) -> None: + self.metric = metric + values = self._get_values(epoch, on_epoch=True) + if values is not None: + values = self._add_noise(values, epoch) + self.update_groups(values) + + def step_update(self, num_updates: int, metric: float = None): + self.metric = metric + values = self._get_values(num_updates, on_epoch=False) + if values is not None: + values = self._add_noise(values, num_updates) + self.update_groups(values) + + def update_groups(self, values): + if not isinstance(values, (list, tuple)): + values = [values] * len(self.optimizer.param_groups) + for param_group, value in zip(self.optimizer.param_groups, values): + if 'lr_scale' in param_group: + param_group[self.param_group_field] = value * param_group['lr_scale'] + else: + param_group[self.param_group_field] = value + + def _add_noise(self, lrs, t): + if self._is_apply_noise(t): + noise = self._calculate_noise(t) + lrs = [v + v * noise for v in lrs] + return lrs + + def _is_apply_noise(self, t) -> bool: + """Return True if scheduler in noise range.""" + apply_noise = False + if self.noise_range_t is not None: + if isinstance(self.noise_range_t, (list, tuple)): + apply_noise = self.noise_range_t[0] <= t < self.noise_range_t[1] + else: + apply_noise = t >= self.noise_range_t + return apply_noise + + def _calculate_noise(self, t) -> float: + g = torch.Generator() + g.manual_seed(self.noise_seed + t) + if self.noise_type == 'normal': + while True: + # resample if noise out of percent limit, brute force but shouldn't spin much + noise = torch.randn(1, generator=g).item() + if abs(noise) < self.noise_pct: + return noise + else: + noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct + return noise diff --git a/janus/lib/python3.10/site-packages/timm/scheduler/step_lr.py b/janus/lib/python3.10/site-packages/timm/scheduler/step_lr.py new file mode 100644 index 0000000000000000000000000000000000000000..c205d437153f0960f864fddaa043f4028db3de3a --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/scheduler/step_lr.py @@ -0,0 +1,63 @@ +""" Step Scheduler + +Basic step LR schedule with warmup, noise. + +Hacked together by / Copyright 2020 Ross Wightman +""" +import math +import torch +from typing import List + + +from .scheduler import Scheduler + + +class StepLRScheduler(Scheduler): + """ + """ + + def __init__( + self, + optimizer: torch.optim.Optimizer, + decay_t: float, + decay_rate: float = 1., + warmup_t=0, + warmup_lr_init=0, + warmup_prefix=True, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + initialize=True, + ) -> None: + super().__init__( + optimizer, + param_group_field="lr", + t_in_epochs=t_in_epochs, + noise_range_t=noise_range_t, + noise_pct=noise_pct, + noise_std=noise_std, + noise_seed=noise_seed, + initialize=initialize, + ) + + self.decay_t = decay_t + self.decay_rate = decay_rate + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.warmup_prefix = warmup_prefix + if self.warmup_t: + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t: int) -> List[float]: + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + if self.warmup_prefix: + t = t - self.warmup_t + lrs = [v * (self.decay_rate ** (t // self.decay_t)) for v in self.base_values] + return lrs diff --git a/janus/lib/python3.10/site-packages/timm/scheduler/tanh_lr.py b/janus/lib/python3.10/site-packages/timm/scheduler/tanh_lr.py new file mode 100644 index 0000000000000000000000000000000000000000..932229262e32455fe5e60537b2c3000f0c1616da --- /dev/null +++ b/janus/lib/python3.10/site-packages/timm/scheduler/tanh_lr.py @@ -0,0 +1,114 @@ +""" TanH Scheduler + +TanH schedule with warmup, cycle/restarts, noise. + +Hacked together by / Copyright 2021 Ross Wightman +""" +import logging +import math +import numpy as np +import torch +from typing import List + +from .scheduler import Scheduler + + +_logger = logging.getLogger(__name__) + + +class TanhLRScheduler(Scheduler): + """ + Hyberbolic-Tangent decay with restarts. + This is described in the paper https://arxiv.org/abs/1806.01593 + """ + + def __init__( + self, + optimizer: torch.optim.Optimizer, + t_initial: int, + lb: float = -7., + ub: float = 3., + lr_min: float = 0., + cycle_mul: float = 1., + cycle_decay: float = 1., + cycle_limit: int = 1, + warmup_t=0, + warmup_lr_init=0, + warmup_prefix=False, + t_in_epochs=True, + noise_range_t=None, + noise_pct=0.67, + noise_std=1.0, + noise_seed=42, + initialize=True, + ) -> None: + super().__init__( + optimizer, + param_group_field="lr", + t_in_epochs=t_in_epochs, + noise_range_t=noise_range_t, + noise_pct=noise_pct, + noise_std=noise_std, + noise_seed=noise_seed, + initialize=initialize, + ) + + assert t_initial > 0 + assert lr_min >= 0 + assert lb < ub + assert cycle_limit >= 0 + assert warmup_t >= 0 + assert warmup_lr_init >= 0 + self.lb = lb + self.ub = ub + self.t_initial = t_initial + self.lr_min = lr_min + self.cycle_mul = cycle_mul + self.cycle_decay = cycle_decay + self.cycle_limit = cycle_limit + self.warmup_t = warmup_t + self.warmup_lr_init = warmup_lr_init + self.warmup_prefix = warmup_prefix + if self.warmup_t: + t_v = self.base_values if self.warmup_prefix else self._get_lr(self.warmup_t) + self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in t_v] + super().update_groups(self.warmup_lr_init) + else: + self.warmup_steps = [1 for _ in self.base_values] + + def _get_lr(self, t: int) -> List[float]: + if t < self.warmup_t: + lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] + else: + if self.warmup_prefix: + t = t - self.warmup_t + + if self.cycle_mul != 1: + i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) + t_i = self.cycle_mul ** i * self.t_initial + t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial + else: + i = t // self.t_initial + t_i = self.t_initial + t_curr = t - (self.t_initial * i) + + if i < self.cycle_limit: + gamma = self.cycle_decay ** i + lr_max_values = [v * gamma for v in self.base_values] + + tr = t_curr / t_i + lrs = [ + self.lr_min + 0.5 * (lr_max - self.lr_min) * (1 - math.tanh(self.lb * (1. - tr) + self.ub * tr)) + for lr_max in lr_max_values + ] + else: + lrs = [self.lr_min for _ in self.base_values] + return lrs + + def get_cycle_length(self, cycles=0): + cycles = max(1, cycles or self.cycle_limit) + if self.cycle_mul == 1.0: + t = self.t_initial * cycles + else: + t = int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul))) + return t + self.warmup_t if self.warmup_prefix else t