diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/__pycache__/__init__.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/einops/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f44adc233a9e1b7f281ae375ba60d06196f1e08 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/einops/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/__pycache__/_backends.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/einops/__pycache__/_backends.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f90f913afc2b2290458be19bb309218ab9232338 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/einops/__pycache__/_backends.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/__pycache__/_torch_specific.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/einops/__pycache__/_torch_specific.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..817be29572084430ef84af33c45939ddc5814eee Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/einops/__pycache__/_torch_specific.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/__pycache__/einops.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/einops/__pycache__/einops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7f4d56b5edd9bb7882f5e66307d023073ba1930 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/einops/__pycache__/einops.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/__pycache__/packing.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/einops/__pycache__/packing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0027f3f4fd455bcdeb68b4785478a3907253bf97 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/einops/__pycache__/packing.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/__pycache__/parsing.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/einops/__pycache__/parsing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8eff9eb808063f703597e4702a8c6e7225197d2e Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/einops/__pycache__/parsing.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/_torch_specific.py b/evalkit_tf446/lib/python3.10/site-packages/einops/_torch_specific.py new file mode 100644 index 0000000000000000000000000000000000000000..b8ab16f3d581d1f38aacafaebc6e55367287ed6e --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/einops/_torch_specific.py @@ -0,0 +1,102 @@ +""" +Specialization of einops for torch. + +Unfortunately, torch's jit scripting mechanism isn't strong enough, +and to have scripting supported at least for layers, +a number of changes is required, and this layer helps. + +Importantly, whole lib is designed so that you can't use it +""" +import warnings +from typing import Dict, List + +import torch +from einops.einops import TransformRecipe, _reconstruct_from_shape_uncached + + +class TorchJitBackend: + """ + Completely static backend that mimics part of normal backend functionality + but restricted to torch stuff only + """ + + @staticmethod + def reduce(x: torch.Tensor, operation: str, reduced_axes: List[int]): + if operation == 'min': + return x.amin(dim=reduced_axes) + elif operation == 'max': + return x.amax(dim=reduced_axes) + elif operation == 'sum': + return x.sum(dim=reduced_axes) + elif operation == 'mean': + return x.mean(dim=reduced_axes) + elif operation == 'prod': + for i in list(sorted(reduced_axes))[::-1]: + x = x.prod(dim=i) + return x + else: + raise NotImplementedError('Unknown reduction ', operation) + + @staticmethod + def transpose(x, axes: List[int]): + return x.permute(axes) + + @staticmethod + def stack_on_zeroth_dimension(tensors: List[torch.Tensor]): + return torch.stack(tensors) + + @staticmethod + def tile(x, repeats: List[int]): + return x.repeat(repeats) + + @staticmethod + def add_axes(x, n_axes: int, pos2len: Dict[int, int]): + repeats = [-1] * n_axes + for axis_position, axis_length in pos2len.items(): + x = torch.unsqueeze(x, axis_position) + repeats[axis_position] = axis_length + return x.expand(repeats) + + @staticmethod + def is_float_type(x): + return x.dtype in [torch.float16, torch.float32, torch.float64, torch.bfloat16] + + @staticmethod + def shape(x): + return x.shape + + @staticmethod + def reshape(x, shape: List[int]): + return x.reshape(shape) + + +# mirrors einops.einops._apply_recipe +def apply_for_scriptable_torch(recipe: TransformRecipe, tensor: torch.Tensor, reduction_type: str) -> torch.Tensor: + backend = TorchJitBackend + init_shapes, reduced_axes, axes_reordering, added_axes, final_shapes = \ + _reconstruct_from_shape_uncached(recipe, backend.shape(tensor)) + tensor = backend.reshape(tensor, init_shapes) + if len(reduced_axes) > 0: + tensor = backend.reduce(tensor, operation=reduction_type, reduced_axes=reduced_axes) + tensor = backend.transpose(tensor, axes_reordering) + if len(added_axes) > 0: + tensor = backend.add_axes(tensor, n_axes=len(axes_reordering) + len(added_axes), pos2len=added_axes) + return backend.reshape(tensor, final_shapes) + + +def allow_ops_in_compiled_graph(): + try: + from torch._dynamo import allow_in_graph + except ImportError: + from warnings import warn + warnings.warn("allow_ops_in_compiled_graph failed to import torch: ensure pytorch >=2.0", ImportWarning) + + from .einops import rearrange, reduce, repeat, einsum + from .packing import pack, unpack + + allow_in_graph(rearrange) + allow_in_graph(reduce) + allow_in_graph(repeat) + allow_in_graph(einsum) + allow_in_graph(pack) + allow_in_graph(unpack) diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/experimental/__init__.py b/evalkit_tf446/lib/python3.10/site-packages/einops/experimental/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/experimental/__pycache__/__init__.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/einops/experimental/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8bac816e7a18f9186c11046c0709c4c71943bf6 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/einops/experimental/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/experimental/__pycache__/data_api_packing.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/einops/experimental/__pycache__/data_api_packing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71d589404ade6724c45091764f3002164c39fd8f Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/einops/experimental/__pycache__/data_api_packing.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/experimental/__pycache__/indexing.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/einops/experimental/__pycache__/indexing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1574ae80be4a99266c11952d50f2bec9e8707cc Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/einops/experimental/__pycache__/indexing.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/experimental/data_api_packing.py b/evalkit_tf446/lib/python3.10/site-packages/einops/experimental/data_api_packing.py new file mode 100644 index 0000000000000000000000000000000000000000..5e3e04c58c4a9d6f30452fabc86ccf91fd9b8852 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/einops/experimental/data_api_packing.py @@ -0,0 +1,137 @@ +from typing import List, TypeVar, Tuple, Sequence + +from einops import EinopsError + +T = TypeVar('T') + +Shape = Tuple[int, ...] + + +def pack(pattern: str, tensors: Sequence[T]) -> Tuple[T, List[Shape]]: + axes = pattern.split() + if len(axes) != len(set(axes)): + raise EinopsError(f'Duplicates in axes names in pack("{pattern}", ...)') + if '*' not in axes: + raise EinopsError(f'No *-axis in pack("{pattern}", ...)') + + # need some validation of identifiers + + n_axes_before = axes.index('*') + n_axes_after = len(axes) - n_axes_before - 1 + min_axes = n_axes_before + n_axes_after + + xp = tensors[0].__array_namespace__() + + reshaped_tensors: List[T] = [] + packed_shapes: List[Shape] = [] + for i, tensor in enumerate(tensors): + shape = tensor.shape + if len(shape) < min_axes: + raise EinopsError(f'packed tensor #{i} (enumeration starts with 0) has shape {shape}, ' + f'while pattern {pattern} assumes at least {min_axes} axes') + axis_after_packed_axes = len(shape) - n_axes_after + packed_shapes.append(shape[n_axes_before:]) + reshaped_tensors.append( + xp.reshape(tensor, (*shape[:n_axes_before], -1, *shape[axis_after_packed_axes:])) + ) + + return xp.concat(reshaped_tensors, axis=n_axes_before), packed_shapes + + +def prod(x: Shape) -> int: + result = 1 + for i in x: + result *= i + return result + + +def unpack(pattern: str, tensor: T, packed_shapes: List[Shape]) -> List[T]: + axes = pattern.split() + if len(axes) != len(set(axes)): + raise EinopsError(f'Duplicates in axes names in unpack("{pattern}", ...)') + if '*' not in axes: + raise EinopsError(f'No *-axis in unpack("{pattern}", ...)') + + # need some validation of identifiers + + input_shape = tensor.shape + if len(input_shape) != len(axes): + raise EinopsError(f'unpack({pattern}, ...) received input of wrong dim with shape {input_shape}') + + unpacked_axis = axes.index('*') + + lengths_of_composed_axes: List[int] = [ + -1 if -1 in p_shape else prod(p_shape) + for p_shape in packed_shapes + ] + + n_unknown_composed_axes = sum(x == -1 for x in lengths_of_composed_axes) + if n_unknown_composed_axes > 1: + raise EinopsError( + f"unpack({pattern}, ...) received more than one -1 in {packed_shapes} and can't infer dimensions" + ) + + # following manipulations allow to skip some shape verifications + # and leave them to backends + + # [[], [2, 3], [4], [-1, 5], [6]] < examples of packed_axis + # split positions when computed should be + # [0, 1, 7, 11, N-6 , N ], where N = length of axis + split_positions = [0] * len(packed_shapes) + [input_shape[unpacked_axis]] + if n_unknown_composed_axes == 0: + for i, x in enumerate(lengths_of_composed_axes[:-1]): + split_positions[i + 1] = split_positions[i] + x + else: + unknown_composed_axis: int = lengths_of_composed_axes.index(-1) + for i in range(unknown_composed_axis): + split_positions[i + 1] = split_positions[i] + lengths_of_composed_axes[i] + for j in range(unknown_composed_axis + 1, len(lengths_of_composed_axes))[::-1]: + split_positions[j] = split_positions[j + 1] + lengths_of_composed_axes[j] + + xp = tensor.__array_namespace__() + shape_start = input_shape[:unpacked_axis] + shape_end = input_shape[unpacked_axis + 1:] + slice_filler = (slice(None, None),) * unpacked_axis + return [ + xp.reshape( + # shortest way slice arbitrary axis + tensor[(*slice_filler, slice(split_positions[i], split_positions[i + 1]))], + (*shape_start, *element_shape, *shape_end) + ) + for i, element_shape in enumerate(packed_shapes) + ] + + +if __name__ == '__main__': + import numpy.array_api as np + + H = 100 + W = 101 + C = 3 + + r = np.zeros((H, W)) + g = np.zeros((H, W)) + b = np.zeros((H, W)) + embeddings = np.zeros((H, W, 32)) + + im = np.stack([r, g, b], axis=-1) + print(im.shape) + + image, shapes = pack('h w *', [r, g, b]) + print(image.shape, shapes) + + print(type(image)) + print(type(im)) + assert np.all(np.equal(image, im)) + + images_and_embedding, shapes = pack('h w *', [r, g, b, embeddings]) + print(images_and_embedding.shape, shapes) + r2, g2, b2, embeddings2 = unpack('h w *', images_and_embedding, shapes) + assert np.all(np.equal(r, r2)) + assert np.all(np.equal(g, g2)) + assert np.all(np.equal(b, b2)) + assert np.all(np.equal(embeddings, embeddings2)) + + print([x.shape for x in unpack('h w *', images_and_embedding, shapes[1:])]) + + print('all is fine') diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/experimental/indexing.py b/evalkit_tf446/lib/python3.10/site-packages/einops/experimental/indexing.py new file mode 100644 index 0000000000000000000000000000000000000000..35cef4b304e531e78b0f576d441b054bb07e0c35 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/einops/experimental/indexing.py @@ -0,0 +1,393 @@ +""" + +Indexing one array with the other(s). + +Concept for discussion. + +Notation targets hard cases, not simple ones, like indexing of 1d-array with another 1d-array +(notation supports that, but you can't simplify arr[ind], and there is no reason to) + +Examples + +1. query for every token in sequence a token in the image. Images and sequences are paired + einindex('b t c <- b h w c, [h, w] b t', arr_bhwc, [h_indices_bt, w_indices_bt]) + + this is equivalent, so you can pass indexers idependently or together + einindex('b t c <- b h w c, [h, w] b t', arr_bhwc, np.asarray([h_indices_bt, w_indices_bt])) + + after some thinking I decided that having first axis for indexing variable is not too restrictive, + but should simplify mapping of such cases. + For this reason [...] part should always go first in indexer. + + This makes the largest difference with einindex https://github.com/malmaud/einindex, + which has almost identical grammar, but puts special dimension last, while we put it first. + This trick allows naturally decomposing multiindex into individual dimensions or visa versa. + + +2. query for every token in the video the most suitable word in a (matching) sentence + einindex('b t h w <- seq b, [seq] t b h w', arr_tbc, [t_indices_bhw]) + + note, that only one indexer is used, but still it has to be enclosed in the list. + That's a price for being generic. Alternatively leading singleton dimension can be added. + + +3. (not supported now, future planning) + for every timeframe in a video, find the token with the highest norm (across h and w), and compose a new stack of them + indices_2bt = argmax(x_bthwc.norm(dim=-1), 'b t h w -> [h, w] b t') + selected_embeddings_btc = einindex('b t c <- b t h w c, [h, w] b t', x_bthwc, indices_2bt) + + while currently question is around 'how do we index', + it is important to pre-align that with a question 'what are natural ways to get indices'. + Most common are min/max. less common options: topk (works here), random sampling. + + + +Some important properties of this notation: +- support for multiple indexers, including using a single tensor to keep multiple indexers +- 'batch' indexing, when some axes of indexer and array should be matched +- universal (one-indexing-to-rule-them-all) +- extensible for (named) ellipses, including variadic number of indexers +- extensible for einops-style compositions and decompositions +- extensible for outer indexing when indexers are not aligned + +Current implementation based on python array api and uses loops, +because no appropriate indexing available in the standard. + +""" + +from typing import List, Union, TypeVar, Tuple + +from einops import EinopsError + +T = TypeVar('T') + + +class CompositionDecomposition: + def __init__( + self, + decomposed_shape: List[str], + composed_shape: List[List[str]], + ): + flat_shape = [] + for x in composed_shape: + flat_shape.extend(x) + + self.compose_transposition: Tuple[int, ...] = tuple([decomposed_shape.index(x) for x in flat_shape]) + self.decompose_transposition: Tuple[int, ...] = tuple([flat_shape.index(x) for x in decomposed_shape]) + self.composed_shape = composed_shape + self.decomposed_shape = decomposed_shape + + def decompose(self, x, known_axes_lengths: dict[str, int]): + xp = x.__array_namespace__() + shape = x.shape + + flat_shape = [] + + for i, axis_group in enumerate(self.composed_shape): + unknown_axis_name = None + known_sizes_prod = 1 + for axis_name in axis_group: + if axis_name in known_axes_lengths: + known_sizes_prod *= known_axes_lengths[axis_name] + else: + if unknown_axis_name is None: + unknown_axis_name = axis_name + else: + raise EinopsError("Can't infer the size") + + if unknown_axis_name is None: + assert shape[i] == known_sizes_prod + else: + known_axes_lengths[unknown_axis_name] = shape[i] // known_sizes_prod + + for axis in axis_group: + flat_shape.append(known_axes_lengths[axis]) + + x = xp.reshape(x, flat_shape) + return xp.permute_dims(x, self.decompose_transposition) + + def compose(self, x, known_axes_lengths: dict[str, int]): + xp = x.__array_namespace__() + + for axis_len, axis_name in zip(x.shape, self.decomposed_shape): + if axis_name in known_axes_lengths: + assert known_axes_lengths[axis_name] == axis_len + else: + known_axes_lengths[axis_name] = axis_len + + x = xp.permute_dims(x, self.compose_transposition) + new_shape = [] + for axis_group in self.composed_shape: + composed_axis_size = 1 + for axis_name in axis_group: + composed_axis_size *= known_axes_lengths[axis_name] + new_shape.append(composed_axis_size) + + return xp.reshape(x, tuple(new_shape)) + + +def arange_at_position(xp, n_axes, axis, axis_len, device=None): + x = xp.arange(axis_len, dtype=xp.int64, device=device) + shape = [1] * n_axes + shape[axis] = axis_len + x = xp.reshape(x, shape) + return x + + +class IndexingFormula: + + def __init__(self, pattern: str): + """ + :param pattern: example 'b t c <- b hsel wsel c, [hsel, wsel] b t' + """ + self.pattern = pattern + left, right = pattern.split('<-') + arg_split = right.index(',') + arr_pattern, ind_pattern = right[:arg_split], right[arg_split + 1:] + ind_pattern = ind_pattern.strip() + # print( + # arr_pattern, '\n', + # ind_pattern, + # ) + assert ind_pattern.startswith('['), 'composition axis should go first in indexer (second argument) [h w] i j k' + composition_start = ind_pattern.index('[') + composition_end = ind_pattern.index(']') + composition = ind_pattern[composition_start + 1: composition_end] + ind_other_axes = ind_pattern[composition_end + 1:] + + self.result_axes_names = left.split() + self.array_axes_names = arr_pattern.split() + self.indexing_axes_names = [x.strip() for x in composition.split(',')] + self.indexer_other_axes_names = ind_other_axes.split() + + for group_name, group in [ + ('result', self.result_axes_names), + ('array', self.array_axes_names), + ('indexer', self.indexing_axes_names + self.indexer_other_axes_names), + ]: + if len(set(group)) != len(group): + # need more verbosity, which axis, raise + raise EinopsError(f'{group_name} pattern ({group}) contains a duplicated axis') + + axis_groups = [ + self.result_axes_names, + self.array_axes_names, + self.indexing_axes_names, + self.indexer_other_axes_names, + ] + + all_axes = set() + for group in axis_groups: + all_axes.update(group) + + self.indexer_axes = [] + self.batch_axes = [] + self.result_and_index_axes = [] + self.result_and_array_axes = [] + + for axis in all_axes: + presence = tuple(axis in g for g in axis_groups) + # want match-case here. sweet dreams + if presence == (False, True, True, False): + self.indexer_axes.append(axis) + elif presence[2]: + raise EinopsError(f'Wrong usage of indexer variable {axis}') + elif presence == (True, True, False, True): + self.batch_axes.append(axis) + elif presence == (True, False, False, True): + self.result_and_index_axes.append(axis) + elif presence == (True, True, False, False): + self.result_and_array_axes.append(axis) + else: + # TODO better categorization of wrong usage patterns + raise EinopsError(f'{axis} is used incorrectly in {pattern}') + + assert set(self.indexer_axes) == set(self.indexing_axes_names) + # order of these variables matters, since we can't lose mapping here + self.indexer_axes = self.indexing_axes_names + + self.array_composition = CompositionDecomposition( + decomposed_shape=self.array_axes_names, + composed_shape=[self.batch_axes + self.indexer_axes, self.result_and_array_axes], + ) + + self.index_composition = CompositionDecomposition( + decomposed_shape=self.indexer_other_axes_names, + # single axis after composition + composed_shape=[self.batch_axes + self.result_and_index_axes], + ) + + self.result_composition = CompositionDecomposition( + decomposed_shape=self.result_axes_names, + composed_shape=[self.batch_axes + self.result_and_index_axes, self.result_and_array_axes], + ) + + def apply_to_array_api(self, arr: T, ind: Union[T, List[T]]): + known_axes_sizes: dict[str, int] = {} + xp = arr.__array_namespace__() + + if not isinstance(ind, list): + ind = [ind[i, ...] for i in range(ind.shape[0])] + + for indexer in ind: + assert len(indexer.shape) == len(self.indexer_other_axes_names) + + # step 1. transpose, reshapes of arr; learn its dimensions + arr_2d = self.array_composition.compose(arr, known_axes_sizes) + + # step 2. compute shifts and create an actual indexing array + shift = 1 + full_index = xp.zeros([1] * len(ind[0].shape), dtype=xp.int64, device=arr.device) + + # original order: [*batch-like axes, *indexing_axes,] + # now we need to traverse them in the opposite direction + + for axis_name, indexer in list(zip(self.indexing_axes_names, ind))[::-1]: + full_index = full_index + shift * (indexer % known_axes_sizes[axis_name]) + shift *= known_axes_sizes[axis_name] + + for axis_name in self.batch_axes[::-1]: + axis_id = self.indexer_other_axes_names.index(axis_name) + full_index = full_index + arange_at_position( + xp, len(self.indexer_other_axes_names), axis=axis_id, axis_len=known_axes_sizes[axis_name], + device=arr.device, + ) * shift + shift *= known_axes_sizes[axis_name] + + assert shift == arr_2d.shape[0] + + # step 3. Flatten index + full_index = self.index_composition.compose(full_index, known_axes_sizes) + + # step 4. indexing + # python array api lacks any integer indexing, so... I use loops. + # did you know that there is conceptual programming ... just like art? + # result_2d = arr_2d[full_index] + result_2d = xp.stack([arr_2d[full_index[i], :] for i in range(full_index.shape[0])]) + + # step 5. doing resulting + result = self.result_composition.decompose(result_2d, known_axes_sizes) + return result + + +def einindex(pattern: str, arr: T, /, ind: Union[T, List[T]]): + """ + Demonstrates how einindex should work. + Supports data-api compliant arrays. + """ + formula = IndexingFormula(pattern) + return formula.apply_to_array_api(arr, ind) + + +def test_composition_and_decomposition(): + import numpy.array_api as np + x = np.arange(2 * 3 * 5 * 7) + x = np.reshape(x, (2, 3, 5, 7)) + comp = CompositionDecomposition( + decomposed_shape=['a', 'b', 'c', 'd'], + composed_shape=[['a', 'b'], ['c', 'd']], + ) + assert comp.compose(x, known_axes_lengths={}).shape == (2 * 3, 5 * 7) + + y = CompositionDecomposition( + decomposed_shape=['a', 'b', 'c', 'd'], + composed_shape=[['a', 'b'], [], ['c', 'd']], + ).compose(x, {}) + assert y.shape == (2 * 3, 1, 5 * 7) + assert np.all(np.reshape(x, (-1,)) == np.reshape(y, (-1,))) + + comp = CompositionDecomposition( + decomposed_shape=['a', 'b', 'e', 'c', 'd'], + composed_shape=[['e', 'c'], ['b'], ['a', 'd']], + ) + x = np.arange(2 * 3 * 5 * 7 * 3) + x = np.reshape(x, (2, 3, 5, 7, 3)) + + axes = {} + y = comp.compose(x, axes) + x2 = comp.decompose(y, axes) + assert np.all(x == x2) + + +def test_simple_indexing(): + import numpy.array_api as np + + # simple 2d test + arr = np.reshape(np.arange(5 * 7), (5, 7)) + ind = np.arange(7) % 5 + x = einindex('j <- i j, [i] j', arr, [ind]) + for j, i in enumerate(ind): + assert arr[i, j] == x[j] + + y = einindex('j <- j i, [i] j', np.permute_dims(arr, (1, 0)), [ind]) + for j, i in enumerate(ind): + assert arr[i, j] == y[j] + + +def test_multidimensional_indexing(): + import numpy.array_api as np + + embedding_bhwc = ( + + arange_at_position(np, 4, 0, 2) * 1000 + + arange_at_position(np, 4, 1, 3) * 100 + + arange_at_position(np, 4, 2, 5) * 10 + + arange_at_position(np, 4, 3, 7) * 1 + ) + + hindices_bt = np.reshape(np.arange(6), (2, 3)) % 3 + windices_bt = np.reshape(np.arange(6), (2, 3)) % 5 + + # imagine that you have pairs of image <> sentence + # your goal is to get most suitable token from image for every token in sentence + # thus for every token in sentence you compute best k and v + + result = einindex('c t b <- b h w c, [h, w] b t', embedding_bhwc, [hindices_bt, windices_bt]) + # example of using a single array for indexing multiple axes + hw_indices_bt = np.stack([hindices_bt, windices_bt]) + result2 = einindex('c t b <- b h w c, [h, w] b t', embedding_bhwc, hw_indices_bt) + assert np.all(result == result2) + + # check vs manual element computation + result_manual = result * 0 + for b in range(2): + for t in range(3): + for c in range(7): + h = hindices_bt[b, t] + w = windices_bt[b, t] + result_manual[c, t, b] = embedding_bhwc[b, h, w, c] + + assert np.all(result == result_manual) + + +def test_reverse_indexing(): + import numpy.array_api as np + + C, T, B = 2, 3, 5 + # G = GPU, batch-like varaible + G = 4 + H = 7 + W = 9 + + arr_gtbc = ( + + arange_at_position(np, 4, 0, G) * 1000 + + arange_at_position(np, 4, 1, T) * 100 + + arange_at_position(np, 4, 2, B) * 10 + + arange_at_position(np, 4, 3, C) * 1 + ) + + t_indices_gbhw = np.reshape(np.arange(G * B * H * W), (G, B, H, W)) % T + + result = einindex('g b c h w <- g t b c, [t] g b h w', arr_gtbc, [t_indices_gbhw]) + + result_manual = result * 0 + for g in range(G): + for b in range(B): + for c in range(C): + for h in range(H): + for w in range(W): + t = t_indices_gbhw[g, b, h, w] + result_manual[g, b, c, h, w] = arr_gtbc[g, t, b, c] + + assert np.all(result == result_manual) + + diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__init__.py b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..955aad82f787af1c2b389740c6a96f66047b0d71 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__init__.py @@ -0,0 +1,80 @@ +__author__ = 'Alex Rogozhnikov' + +import functools +from typing import Any + +from einops.einops import _apply_recipe + +from ..einops import TransformRecipe, _prepare_transformation_recipe +from .. import EinopsError + + +class RearrangeMixin: + """ + Rearrange layer behaves identically to einops.rearrange operation. + + :param pattern: str, rearrangement pattern + :param axes_lengths: any additional specification of dimensions + + See einops.rearrange for source_examples. + """ + + def __init__(self, pattern: str, **axes_lengths: Any) -> None: + super().__init__() + self.pattern = pattern + self.axes_lengths = axes_lengths + self._recipe = self.recipe() # checking parameters + + def __repr__(self) -> str: + params = repr(self.pattern) + for axis, length in self.axes_lengths.items(): + params += ', {}={}'.format(axis, length) + return '{}({})'.format(self.__class__.__name__, params) + + @functools.lru_cache(maxsize=1024) + def recipe(self) -> TransformRecipe: + try: + hashable_lengths = tuple(sorted(self.axes_lengths.items())) + return _prepare_transformation_recipe(self.pattern, operation='rearrange', axes_lengths=hashable_lengths) + except EinopsError as e: + raise EinopsError(' Error while preparing {!r}\n {}'.format(self, e)) + + def _apply_recipe(self, x): + return _apply_recipe(self._recipe, x, reduction_type='rearrange') + + +class ReduceMixin: + """ + Reduce layer behaves identically to einops.reduce operation. + + :param pattern: str, rearrangement pattern + :param reduction: one of available reductions ('min', 'max', 'sum', 'mean', 'prod'), case-sensitive + :param axes_lengths: any additional specification of dimensions + + See einops.reduce for source_examples. + """ + + def __init__(self, pattern: str, reduction: str, **axes_lengths: Any): + super().__init__() + self.pattern = pattern + self.reduction = reduction + self.axes_lengths = axes_lengths + self._recipe = self.recipe() # checking parameters + + def __repr__(self): + params = '{!r}, {!r}'.format(self.pattern, self.reduction) + for axis, length in self.axes_lengths.items(): + params += ', {}={}'.format(axis, length) + return '{}({})'.format(self.__class__.__name__, params) + + @functools.lru_cache(maxsize=1024) + def recipe(self) -> TransformRecipe: + try: + hashable_lengths = tuple(sorted(self.axes_lengths.items())) + return _prepare_transformation_recipe( + self.pattern, operation=self.reduction, axes_lengths=hashable_lengths) + except EinopsError as e: + raise EinopsError(' Error while preparing {!r}\n {}'.format(self, e)) + + def _apply_recipe(self, x): + return _apply_recipe(self._recipe, x, reduction_type=self.reduction) diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/__init__.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c4698ddf9f9a8d25cfa2bb56e3ad7728c8f8c92 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/_einmix.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/_einmix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d0acb6db1bda4f17c3e40f32d316c1342a48f13 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/_einmix.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/chainer.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/chainer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0158d802733f8aab5f34ebcb48f1f83159c89208 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/chainer.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/flax.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/flax.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ca6aca48faf8a2fa8dc90f2b21bcc719b594388 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/flax.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/gluon.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/gluon.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0466a93a34b567cebf595d4e32f6937161cc1cd Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/gluon.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/keras.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/keras.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af638d67808fada33079dd88973c160a4f62f1b6 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/keras.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/oneflow.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/oneflow.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..104ea3a798af3a98260ce50a038b43010416428e Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/oneflow.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/paddle.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/paddle.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a0e008378c9ab3ed66b275fd03472941d830802 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/paddle.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/tensorflow.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/tensorflow.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c65c8e0c95ae98d64df417b00ed68d07d5f79eae Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/tensorflow.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/torch.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/torch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5496b3c7e6a72c863df308d1860c83bfe7f57e9 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/__pycache__/torch.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/layers/_einmix.py b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/_einmix.py new file mode 100644 index 0000000000000000000000000000000000000000..dc9fd88d6d24d563ef270272f9ea9848f0dd50da --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/_einmix.py @@ -0,0 +1,176 @@ +from typing import Any, List, Optional, Dict + +from einops import EinopsError +from einops.parsing import ParsedExpression +import warnings +import string +from ..einops import _product + + +def _report_axes(axes: set, report_message: str): + if len(axes) > 0: + raise EinopsError(report_message.format(axes)) + + +class _EinmixMixin: + def __init__(self, pattern: str, weight_shape: str, bias_shape: Optional[str]=None, **axes_lengths: Any): + """ + EinMix - Einstein summation with automated tensor management and axis packing/unpacking. + + EinMix is an advanced tool, helpful tutorial: + https://github.com/arogozhnikov/einops/blob/master/docs/3-einmix-layer.ipynb + + Imagine taking einsum with two arguments, one of each input, and one - tensor with weights + >>> einsum('time batch channel_in, channel_in channel_out -> time batch channel_out', input, weight) + + This layer manages weights for you, syntax highlights separate role of weight matrix + >>> EinMix('time batch channel_in -> time batch channel_out', weight_shape='channel_in channel_out') + But otherwise it is the same einsum under the hood. + + Simple linear layer with bias term (you have one like that in your framework) + >>> EinMix('t b cin -> t b cout', weight_shape='cin cout', bias_shape='cout', cin=10, cout=20) + There is restriction to mix the last axis. Let's mix along height + >>> EinMix('h w c-> hout w c', weight_shape='h hout', bias_shape='hout', h=32, hout=32) + Channel-wise multiplication (like one used in normalizations) + >>> EinMix('t b c -> t b c', weight_shape='c', c=128) + Separate dense layer within each head, no connection between different heads + >>> EinMix('t b (head cin) -> t b (head cout)', weight_shape='head cin cout', ...) + + ... ah yes, you need to specify all dimensions of weight shape/bias shape in parameters. + + Use cases: + - when channel dimension is not last, use EinMix, not transposition + - patch/segment embeddings + - when need only within-group connections to reduce number of weights and computations + - perfect as a part of sequential models + - next-gen MLPs (follow tutorial to learn more) + + Uniform He initialization is applied to weight tensor and encounters for number of elements mixed. + + Parameters + :param pattern: transformation pattern, left side - dimensions of input, right side - dimensions of output + :param weight_shape: axes of weight. A tensor of this shape is created, stored, and optimized in a layer + :param bias_shape: axes of bias added to output. Weights of this shape are created and stored. If `None` (the default), no bias is added. + :param axes_lengths: dimensions of weight tensor + """ + super().__init__() + self.pattern = pattern + self.weight_shape = weight_shape + self.bias_shape = bias_shape + self.axes_lengths = axes_lengths + self.initialize_einmix(pattern=pattern, weight_shape=weight_shape, bias_shape=bias_shape, axes_lengths=axes_lengths) + + def initialize_einmix(self, pattern: str, weight_shape: str, bias_shape: Optional[str], axes_lengths: dict): + left_pattern, right_pattern = pattern.split('->') + left = ParsedExpression(left_pattern) + right = ParsedExpression(right_pattern) + weight = ParsedExpression(weight_shape) + _report_axes( + set.difference(right.identifiers, {*left.identifiers, *weight.identifiers}), + 'Unrecognized identifiers on the right side of EinMix {}' + ) + + if left.has_ellipsis or right.has_ellipsis or weight.has_ellipsis: + raise EinopsError('Ellipsis is not supported in EinMix (right now)') + if any(x.has_non_unitary_anonymous_axes for x in [left, right, weight]): + raise EinopsError('Anonymous axes (numbers) are not allowed in EinMix') + if '(' in weight_shape or ')' in weight_shape: + raise EinopsError(f'Parenthesis is not allowed in weight shape: {weight_shape}') + + pre_reshape_pattern = None + pre_reshape_lengths = None + post_reshape_pattern = None + if any(len(group) != 1 for group in left.composition): + names: List[str] = [] + for group in left.composition: + names += group + composition = ' '.join(names) + pre_reshape_pattern = f'{left_pattern}->{composition}' + pre_reshape_lengths = {name: length for name, length in axes_lengths.items() if name in names} + + if any(len(group) != 1 for group in right.composition): + names = [] + for group in right.composition: + names += group + composition = ' '.join(names) + post_reshape_pattern = f'{composition}->{right_pattern}' + + self._create_rearrange_layers(pre_reshape_pattern, pre_reshape_lengths, post_reshape_pattern, {}) + + for axis in weight.identifiers: + if axis not in axes_lengths: + raise EinopsError('Dimension {} of weight should be specified'.format(axis)) + _report_axes( + set.difference(set(axes_lengths), {*left.identifiers, *weight.identifiers}), + 'Axes {} are not used in pattern', + ) + _report_axes( + set.difference(weight.identifiers, {*left.identifiers, *right.identifiers}), + 'Weight axes {} are redundant' + ) + if len(weight.identifiers) == 0: + warnings.warn('EinMix: weight has no dimensions (means multiplication by a number)') + + _weight_shape = [axes_lengths[axis] for axis, in weight.composition] + # single output element is a combination of fan_in input elements + _fan_in = _product([axes_lengths[axis] for axis, in weight.composition if axis not in right.identifiers]) + if bias_shape is not None: + if not isinstance(bias_shape, str): + raise EinopsError('bias shape should be string specifying which axes bias depends on') + bias = ParsedExpression(bias_shape) + _report_axes( + set.difference(bias.identifiers, right.identifiers), + 'Bias axes {} not present in output' + ) + _report_axes( + set.difference(bias.identifiers, set(axes_lengths)), + 'Sizes not provided for bias axes {}', + ) + + _bias_shape = [] + for axes in right.composition: + for axis in axes: + if axis in bias.identifiers: + _bias_shape.append(axes_lengths[axis]) + else: + _bias_shape.append(1) + else: + _bias_shape = None + + weight_bound = (3 / _fan_in) ** 0.5 + bias_bound = (1 / _fan_in) ** 0.5 + self._create_parameters(_weight_shape, weight_bound, _bias_shape, bias_bound) + + # rewrite einsum expression with single-letter latin identifiers so that + # expression will be understood by any framework + mapped_identifiers = {*left.identifiers, *right.identifiers, *weight.identifiers} + mapping2letters = {k: letter for letter, k in zip(string.ascii_lowercase, mapped_identifiers)} + + def write_flat(axes: list): + return ''.join(mapping2letters[axis] for axis in axes) + + self.einsum_pattern: str = '{},{}->{}'.format( + write_flat(left.flat_axes_order()), + write_flat(weight.flat_axes_order()), + write_flat(right.flat_axes_order()), + ) + + def _create_rearrange_layers(self, + pre_reshape_pattern: Optional[str], + pre_reshape_lengths: Optional[Dict], + post_reshape_pattern: Optional[str], + post_reshape_lengths: Optional[Dict]): + raise NotImplementedError('Should be defined in framework implementations') + + def _create_parameters(self, weight_shape, weight_bound, bias_shape, bias_bound): + """ Shape and implementations """ + raise NotImplementedError('Should be defined in framework implementations') + + def __repr__(self): + params = repr(self.pattern) + params += f", '{self.weight_shape}'" + if self.bias_shape is not None: + params += f", '{self.bias_shape}'" + for axis, length in self.axes_lengths.items(): + params += ', {}={}'.format(axis, length) + return '{}({})'.format(self.__class__.__name__, params) diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/layers/chainer.py b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/chainer.py new file mode 100644 index 0000000000000000000000000000000000000000..0214bc323c4ea019128cefb00b72c9ece8d0f70b --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/chainer.py @@ -0,0 +1,53 @@ +from typing import Optional, Dict, cast + +import chainer + +from . import RearrangeMixin, ReduceMixin +from ._einmix import _EinmixMixin + +__author__ = 'Alex Rogozhnikov' + + +class Rearrange(RearrangeMixin, chainer.Link): + def __call__(self, x): + return self._apply_recipe(x) + + +class Reduce(ReduceMixin, chainer.Link): + def __call__(self, x): + return self._apply_recipe(x) + + +class EinMix(_EinmixMixin, chainer.Link): + def _create_parameters(self, weight_shape, weight_bound, bias_shape, bias_bound): + uniform = chainer.variable.initializers.Uniform + with self.init_scope(): + self.weight = chainer.variable.Parameter(uniform(weight_bound), weight_shape) + if bias_shape is not None: + self.bias = chainer.variable.Parameter(uniform(bias_bound), bias_shape) + else: + self.bias = None + + def _create_rearrange_layers(self, + pre_reshape_pattern: Optional[str], + pre_reshape_lengths: Optional[Dict], + post_reshape_pattern: Optional[str], + post_reshape_lengths: Optional[Dict], + ): + self.pre_rearrange = None + if pre_reshape_pattern is not None: + self.pre_rearrange = Rearrange(pre_reshape_pattern, **cast(dict, pre_reshape_lengths)) + + self.post_rearrange = None + if post_reshape_pattern is not None: + self.post_rearrange = Rearrange(post_reshape_pattern, **cast(dict, post_reshape_lengths)) + + def __call__(self, input): + if self.pre_rearrange is not None: + input = self.pre_rearrange(input) + result = chainer.functions.einsum(self.einsum_pattern, input, self.weight) + if self.bias is not None: + result = result + self.bias + if self.post_rearrange is not None: + result = self.post_rearrange(result) + return result diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/layers/flax.py b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/flax.py new file mode 100644 index 0000000000000000000000000000000000000000..abd4ec5b42243f44a37202cfb19aa8f1d56803ee --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/flax.py @@ -0,0 +1,80 @@ +from dataclasses import field +from typing import Optional, Dict, cast + +import flax.linen as nn +import jax +import jax.numpy as jnp + +from . import RearrangeMixin, ReduceMixin +from ._einmix import _EinmixMixin + +__author__ = 'Alex Rogozhnikov' + + +class Reduce(nn.Module): + pattern: str + reduction: str + sizes: dict = field(default_factory=lambda: {}) + + def setup(self): + self.reducer = ReduceMixin(self.pattern, self.reduction, **self.sizes) + + def __call__(self, input): + return self.reducer._apply_recipe(input) + + +class Rearrange(nn.Module): + pattern: str + sizes: dict = field(default_factory=lambda: {}) + + def setup(self): + self.rearranger = RearrangeMixin(self.pattern, **self.sizes) + + def __call__(self, input): + return self.rearranger._apply_recipe(input) + + +class EinMix(nn.Module, _EinmixMixin): + pattern: str + weight_shape: str + bias_shape: Optional[str] = None + sizes: dict = field(default_factory=lambda: {}) + + def setup(self): + self.initialize_einmix( + pattern=self.pattern, + weight_shape=self.weight_shape, + bias_shape=self.bias_shape, + axes_lengths=self.sizes, + ) + + def _create_parameters(self, weight_shape, weight_bound, bias_shape, bias_bound): + self.weight = self.param("weight", jax.nn.initializers.uniform(weight_bound), weight_shape) + + if bias_shape is not None: + self.bias = self.param("bias", jax.nn.initializers.uniform(bias_bound), bias_shape) + else: + self.bias = None + + def _create_rearrange_layers(self, + pre_reshape_pattern: Optional[str], + pre_reshape_lengths: Optional[Dict], + post_reshape_pattern: Optional[str], + post_reshape_lengths: Optional[Dict]): + self.pre_rearrange = None + if pre_reshape_pattern is not None: + self.pre_rearrange = Rearrange(pre_reshape_pattern, sizes=cast(dict, pre_reshape_lengths)) + + self.post_rearrange = None + if post_reshape_pattern is not None: + self.post_rearrange = Rearrange(post_reshape_pattern, sizes=cast(dict, post_reshape_lengths)) + + def __call__(self, input): + if self.pre_rearrange is not None: + input = self.pre_rearrange(input) + result = jnp.einsum(self.einsum_pattern, input, self.weight) + if self.bias is not None: + result += self.bias + if self.post_rearrange is not None: + result = self.post_rearrange(result) + return result diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/layers/gluon.py b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/gluon.py new file mode 100644 index 0000000000000000000000000000000000000000..141cb1ba9dae37e4f1036f0c2c39daa836b038c0 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/gluon.py @@ -0,0 +1,50 @@ +from typing import Optional, Dict + +import mxnet + +from . import RearrangeMixin, ReduceMixin +from ._einmix import _EinmixMixin + +__author__ = 'Alex Rogozhnikov' + + +class Rearrange(RearrangeMixin, mxnet.gluon.HybridBlock): + def hybrid_forward(self, F, x): + return self._apply_recipe(x) + + +class Reduce(ReduceMixin, mxnet.gluon.HybridBlock): + def hybrid_forward(self, F, x): + return self._apply_recipe(x) + + +class EinMix(_EinmixMixin, mxnet.gluon.HybridBlock): + def _create_parameters(self, weight_shape, weight_bound, bias_shape, bias_bound): + with self.name_scope(): + + self.weight = self.params.get(name='weight', shape=weight_shape, + init=mxnet.initializer.Uniform(weight_bound), + ) + if bias_shape is not None: + self.bias = self.params.get(name='bias', shape=bias_shape, + init=mxnet.initializer.Uniform(bias_bound), + ) + else: + self.bias = None + + def _create_rearrange_layers(self, + pre_reshape_pattern: Optional[str], + pre_reshape_lengths: Optional[Dict], + post_reshape_pattern: Optional[str], + post_reshape_lengths: Optional[Dict]): + if (pre_reshape_pattern is not None) or (post_reshape_pattern is not None): + raise NotImplementedError("EinMix in mxnet/gluon doesn't support axis group/ungroup " + "because einsum in gluon defined only for mx.np.ndarrays") + + def hybrid_forward(self, F, x, *args, **kwargs): + # mxnet.np can't work with 'usual' ndarrays; .data() is a standard way to get within in gluon + # .as_np_mndarray makes the necessary conversion + result = mxnet.np.einsum(self.einsum_pattern, x.as_np_ndarray(), self.weight.data()) + if self.bias is not None: + result += self.bias.data() + return result diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/layers/keras.py b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/keras.py new file mode 100644 index 0000000000000000000000000000000000000000..e2533a2f77433196f0d78b1573afb4d681c95f76 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/keras.py @@ -0,0 +1,9 @@ +__author__ = 'Alex Rogozhnikov' + +from ..layers.tensorflow import Rearrange, Reduce, EinMix + +keras_custom_objects = { + Rearrange.__name__: Rearrange, + Reduce.__name__: Reduce, + EinMix.__name__: EinMix, +} diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/layers/oneflow.py b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/oneflow.py new file mode 100644 index 0000000000000000000000000000000000000000..2885404db2c1560620f5f1800512b6b03b9e4396 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/oneflow.py @@ -0,0 +1,53 @@ +from typing import Optional, Dict, cast + +import oneflow as flow + +from . import RearrangeMixin, ReduceMixin +from ._einmix import _EinmixMixin + +__author__ = 'Tianhe Ren & Depeng Liang' + + +class Rearrange(RearrangeMixin, flow.nn.Module): + def forward(self, input): + return self._apply_recipe(input) + + +class Reduce(ReduceMixin, flow.nn.Module): + def forward(self, input): + return self._apply_recipe(input) + + +class EinMix(_EinmixMixin, flow.nn.Module): + def _create_parameters(self, weight_shape, weight_bound, bias_shape, bias_bound): + self.weight = flow.nn.Parameter(flow.zeros(weight_shape).uniform_(-weight_bound, weight_bound), + requires_grad=True) + if bias_shape is not None: + self.bias = flow.nn.Parameter(flow.zeros(bias_shape).uniform_(-bias_bound, bias_bound), + requires_grad=True) + else: + self.bias = None + + def _create_rearrange_layers(self, + pre_reshape_pattern: Optional[str], + pre_reshape_lengths: Optional[Dict], + post_reshape_pattern: Optional[str], + post_reshape_lengths: Optional[Dict], + ): + self.pre_rearrange = None + if pre_reshape_pattern is not None: + self.pre_rearrange = Rearrange(pre_reshape_pattern, **cast(dict, pre_reshape_lengths)) + + self.post_rearrange = None + if post_reshape_pattern is not None: + self.post_rearrange = Rearrange(post_reshape_pattern, **cast(dict, post_reshape_lengths)) + + def forward(self, input): + if self.pre_rearrange is not None: + input = self.pre_rearrange(input) + result = flow.einsum(self.einsum_pattern, input, self.weight) + if self.bias is not None: + result += self.bias + if self.post_rearrange is not None: + result = self.post_rearrange(result) + return result diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/layers/paddle.py b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/paddle.py new file mode 100644 index 0000000000000000000000000000000000000000..c3335604a4b8997d5a07b7c3e846b8ee9231aaeb --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/paddle.py @@ -0,0 +1,59 @@ +from typing import Optional, Dict, cast + +import paddle + +from . import RearrangeMixin, ReduceMixin +from ._einmix import _EinmixMixin + +__author__ = 'PaddlePaddle' + + +class Rearrange(RearrangeMixin, paddle.nn.Layer): + def forward(self, input): + return self._apply_recipe(input) + + +class Reduce(ReduceMixin, paddle.nn.Layer): + def forward(self, input): + return self._apply_recipe(input) + + +class EinMix(_EinmixMixin, paddle.nn.Layer): + def _create_parameters(self, weight_shape, weight_bound, bias_shape, bias_bound): + self.weight = self.create_parameter( + weight_shape, + default_initializer=paddle.nn.initializer.Uniform(-weight_bound, weight_bound) + ) + + if bias_shape is not None: + self.bias = self.create_parameter( + bias_shape, + default_initializer=paddle.nn.initializer.Uniform(-bias_bound, bias_bound) + ) + else: + self.bias = None + + def _create_rearrange_layers(self, + pre_reshape_pattern: Optional[str], + pre_reshape_lengths: Optional[Dict], + post_reshape_pattern: Optional[str], + post_reshape_lengths: Optional[Dict], + ): + self.pre_rearrange = None + if pre_reshape_pattern is not None: + self.pre_rearrange = Rearrange(pre_reshape_pattern, **cast(dict, pre_reshape_lengths)) + + self.post_rearrange = None + if post_reshape_pattern is not None: + self.post_rearrange = Rearrange(post_reshape_pattern, **cast(dict, post_reshape_lengths)) + + def forward(self, input): + if self.pre_rearrange is not None: + input = self.pre_rearrange(input) + + result = paddle.einsum(self.einsum_pattern, input, self.weight) + if self.bias is not None: + result += self.bias + if self.post_rearrange is not None: + result = self.post_rearrange(result) + return result \ No newline at end of file diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/layers/tensorflow.py b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/tensorflow.py new file mode 100644 index 0000000000000000000000000000000000000000..c89a71ad60480dbd237f99d366aedbf402e64c0b --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/tensorflow.py @@ -0,0 +1,85 @@ +from typing import List, Optional, Dict, cast + +import tensorflow as tf +from tensorflow.keras.layers import Layer + +from .._backends import UnknownSize +from . import RearrangeMixin, ReduceMixin +from ._einmix import _EinmixMixin +from ..einops import TransformRecipe, _reconstruct_from_shape_uncached + +__author__ = 'Alex Rogozhnikov' + + +def _compute_output_shape(recipe: TransformRecipe, input_shape) -> List[Optional[int]]: + input_shape = [UnknownSize() if d is None else int(d) for d in input_shape] + init_shapes, reduced_axes, axes_reordering, added_axes, final_shape = \ + _reconstruct_from_shape_uncached(recipe, input_shape) + output_shape: List[Optional[int]] = [None if isinstance(d, UnknownSize) else int(d) for d in final_shape] + return output_shape + + +class Rearrange(RearrangeMixin, Layer): + def compute_output_shape(self, input_shape): + return _compute_output_shape(self.recipe(), input_shape) + + def call(self, inputs): + return self._apply_recipe(inputs) + + def get_config(self): + return {'pattern': self.pattern, **self.axes_lengths} + + +class Reduce(ReduceMixin, Layer): + def compute_output_shape(self, input_shape): + return _compute_output_shape(self.recipe(), input_shape) + + def call(self, inputs): + return self._apply_recipe(inputs) + + def get_config(self): + return {'pattern': self.pattern, 'reduction': self.reduction, **self.axes_lengths} + + +class EinMix(_EinmixMixin, Layer): + def _create_parameters(self, weight_shape, weight_bound, bias_shape, bias_bound): + self.weight = tf.Variable(tf.random_uniform_initializer(-weight_bound, weight_bound)(shape=weight_shape), + trainable=True) + if bias_shape is not None: + self.bias = tf.Variable(tf.random_uniform_initializer(-bias_bound, bias_bound)(shape=bias_shape), + trainable=True) + else: + self.bias = None + + def _create_rearrange_layers(self, + pre_reshape_pattern: Optional[str], + pre_reshape_lengths: Optional[Dict], + post_reshape_pattern: Optional[str], + post_reshape_lengths: Optional[Dict], + ): + self.pre_rearrange = None + if pre_reshape_pattern is not None: + self.pre_rearrange = Rearrange(pre_reshape_pattern, **cast(dict, pre_reshape_lengths)) + + self.post_rearrange = None + if post_reshape_pattern is not None: + self.post_rearrange = Rearrange(post_reshape_pattern, **cast(dict, post_reshape_lengths)) + + def build(self, input_shape): + pass + + def call(self, inputs): + if self.pre_rearrange is not None: + inputs = self.pre_rearrange(inputs) + result = tf.einsum(self.einsum_pattern, inputs, self.weight) + if self.bias is not None: + result = result + self.bias + if self.post_rearrange is not None: + result = self.post_rearrange(result) + return result + + def get_config(self): + return {'pattern': self.pattern, + 'weight_shape': self.weight_shape, + 'bias_shape': self.bias_shape, + **self.axes_lengths} diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/layers/torch.py b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/torch.py new file mode 100644 index 0000000000000000000000000000000000000000..319924156122ce53799d2d8e46389e9c60b9d4eb --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/einops/layers/torch.py @@ -0,0 +1,62 @@ +from typing import Optional, Dict, cast + +import torch + +from . import RearrangeMixin, ReduceMixin +from ._einmix import _EinmixMixin +from .._torch_specific import apply_for_scriptable_torch + +__author__ = 'Alex Rogozhnikov' + + +class Rearrange(RearrangeMixin, torch.nn.Module): + def forward(self, input): + return apply_for_scriptable_torch(self._recipe, input, reduction_type='rearrange') + + def _apply_recipe(self, x): + # overriding parent method to prevent it's scripting + pass + + +class Reduce(ReduceMixin, torch.nn.Module): + def forward(self, input): + return apply_for_scriptable_torch(self._recipe, input, reduction_type=self.reduction) + + def _apply_recipe(self, x): + # overriding parent method to prevent it's scripting + pass + + +class EinMix(_EinmixMixin, torch.nn.Module): + def _create_parameters(self, weight_shape, weight_bound, bias_shape, bias_bound): + self.weight = torch.nn.Parameter(torch.zeros(weight_shape).uniform_(-weight_bound, weight_bound), + requires_grad=True) + if bias_shape is not None: + self.bias = torch.nn.Parameter(torch.zeros(bias_shape).uniform_(-bias_bound, bias_bound), + requires_grad=True) + else: + self.bias = None + + def _create_rearrange_layers(self, + pre_reshape_pattern: Optional[str], + pre_reshape_lengths: Optional[Dict], + post_reshape_pattern: Optional[str], + post_reshape_lengths: Optional[Dict], + ): + self.pre_rearrange = None + if pre_reshape_pattern is not None: + self.pre_rearrange = Rearrange(pre_reshape_pattern, **cast(dict, pre_reshape_lengths)) + + self.post_rearrange = None + if post_reshape_pattern is not None: + self.post_rearrange = Rearrange(post_reshape_pattern, **cast(dict, post_reshape_lengths)) + + def forward(self, input): + if self.pre_rearrange is not None: + input = self.pre_rearrange(input) + result = torch.einsum(self.einsum_pattern, input, self.weight) + if self.bias is not None: + result += self.bias + if self.post_rearrange is not None: + result = self.post_rearrange(result) + return result diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/parsing.py b/evalkit_tf446/lib/python3.10/site-packages/einops/parsing.py new file mode 100644 index 0000000000000000000000000000000000000000..df0f4c53032f4289e0990e6404acfc31df9a045b --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/einops/parsing.py @@ -0,0 +1,149 @@ +from einops import EinopsError +import keyword +import warnings +from typing import List, Optional, Set, Tuple, Union + +_ellipsis: str = '…' # NB, this is a single unicode symbol. String is used as it is not a list, but can be iterated + + +class AnonymousAxis(object): + """Important thing: all instances of this class are not equal to each other """ + + def __init__(self, value: str): + self.value = int(value) + if self.value <= 1: + if self.value == 1: + raise EinopsError('No need to create anonymous axis of length 1. Report this as an issue') + else: + raise EinopsError('Anonymous axis should have positive length, not {}'.format(self.value)) + + def __repr__(self): + return "{}-axis".format(str(self.value)) + + +class ParsedExpression: + """ + non-mutable structure that contains information about one side of expression (e.g. 'b c (h w)') + and keeps some information important for downstream + """ + def __init__(self, expression: str, *, allow_underscore: bool = False, + allow_duplicates: bool = False): + self.has_ellipsis: bool = False + self.has_ellipsis_parenthesized: Optional[bool] = None + self.identifiers: Set[str] = set() + # that's axes like 2, 3, 4 or 5. Axes with size 1 are exceptional and replaced with empty composition + self.has_non_unitary_anonymous_axes: bool = False + # composition keeps structure of composite axes, see how different corner cases are handled in tests + self.composition: List[Union[List[str], str]] = [] + if '.' in expression: + if '...' not in expression: + raise EinopsError('Expression may contain dots only inside ellipsis (...)') + if str.count(expression, '...') != 1 or str.count(expression, '.') != 3: + raise EinopsError( + 'Expression may contain dots only inside ellipsis (...); only one ellipsis for tensor ') + expression = expression.replace('...', _ellipsis) + self.has_ellipsis = True + + bracket_group: Optional[List[str]] = None + + def add_axis_name(x): + if x in self.identifiers: + if not (allow_underscore and x == "_") and not allow_duplicates: + raise EinopsError('Indexing expression contains duplicate dimension "{}"'.format(x)) + if x == _ellipsis: + self.identifiers.add(_ellipsis) + if bracket_group is None: + self.composition.append(_ellipsis) + self.has_ellipsis_parenthesized = False + else: + bracket_group.append(_ellipsis) + self.has_ellipsis_parenthesized = True + else: + is_number = str.isdecimal(x) + if is_number and int(x) == 1: + # handling the case of anonymous axis of length 1 + if bracket_group is None: + self.composition.append([]) + else: + pass # no need to think about 1s inside parenthesis + return + is_axis_name, reason = self.check_axis_name_return_reason(x, allow_underscore=allow_underscore) + if not (is_number or is_axis_name): + raise EinopsError('Invalid axis identifier: {}\n{}'.format(x, reason)) + if is_number: + x = AnonymousAxis(x) + self.identifiers.add(x) + if is_number: + self.has_non_unitary_anonymous_axes = True + if bracket_group is None: + self.composition.append([x]) + else: + bracket_group.append(x) + + current_identifier = None + for char in expression: + if char in '() ': + if current_identifier is not None: + add_axis_name(current_identifier) + current_identifier = None + if char == '(': + if bracket_group is not None: + raise EinopsError("Axis composition is one-level (brackets inside brackets not allowed)") + bracket_group = [] + elif char == ')': + if bracket_group is None: + raise EinopsError('Brackets are not balanced') + self.composition.append(bracket_group) + bracket_group = None + elif str.isalnum(char) or char in ['_', _ellipsis]: + if current_identifier is None: + current_identifier = char + else: + current_identifier += char + else: + raise EinopsError("Unknown character '{}'".format(char)) + + if bracket_group is not None: + raise EinopsError('Imbalanced parentheses in expression: "{}"'.format(expression)) + if current_identifier is not None: + add_axis_name(current_identifier) + + def flat_axes_order(self) -> List: + result = [] + for composed_axis in self.composition: + assert isinstance(composed_axis, list), 'does not work with ellipsis' + for axis in composed_axis: + result.append(axis) + return result + + def has_composed_axes(self) -> bool: + # this will ignore 1 inside brackets + for axes in self.composition: + if isinstance(axes, list) and len(axes) > 1: + return True + return False + + @staticmethod + def check_axis_name_return_reason(name: str, allow_underscore: bool = False) -> Tuple[bool, str]: + if not str.isidentifier(name): + return False, 'not a valid python identifier' + elif name[0] == '_' or name[-1] == '_': + if name == '_' and allow_underscore: + return True, '' + return False, 'axis name should should not start or end with underscore' + else: + if keyword.iskeyword(name): + warnings.warn("It is discouraged to use axes names that are keywords: {}".format(name), RuntimeWarning) + if name in ['axis']: + warnings.warn("It is discouraged to use 'axis' as an axis name " + "and will raise an error in future", FutureWarning) + return True, '' + + @staticmethod + def check_axis_name(name: str) -> bool: + """ + Valid axes names are python identifiers except keywords, + and additionally should not start or end with underscore + """ + is_valid, _reason = ParsedExpression.check_axis_name_return_reason(name) + return is_valid diff --git a/evalkit_tf446/lib/python3.10/site-packages/einops/py.typed b/evalkit_tf446/lib/python3.10/site-packages/einops/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/__pycache__/request_validator.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/__pycache__/request_validator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03a3a34b6f0af68487fb339a02e82440b8f76ff6 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/__pycache__/request_validator.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/__init__.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..361f0500f1743a4b823cdc3635bc2cc00f10e08f Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/access_token.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/access_token.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9043f26cb024d5bb873f74fe9c42a5120a34dd6 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/access_token.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/authorization.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/authorization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea65fedecf838431062c355a7ec6c07bf010fcbe Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/authorization.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/base.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08b0f3f5dd18ba9926b2f979ce0cbbc2859e4853 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/base.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/pre_configured.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/pre_configured.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4da7ea7ca10b82d0ed7d81ba07a9729e788e3c34 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/pre_configured.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/request_token.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/request_token.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61eb09ffd14074cfd52b043168a3393e56561967 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/request_token.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/resource.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/resource.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f48d5c8e423edc877ddc93db07e8e183c67cc8c Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/resource.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/signature_only.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/signature_only.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78d86418619b940e5754ef34f3d2b7796e429713 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/__pycache__/signature_only.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/request_token.py b/evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/request_token.py new file mode 100644 index 0000000000000000000000000000000000000000..0323cfb845a7836eab0e181cc9229031788e957e --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/oauthlib/oauth1/rfc5849/endpoints/request_token.py @@ -0,0 +1,209 @@ +# -*- coding: utf-8 -*- +""" +oauthlib.oauth1.rfc5849.endpoints.request_token +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This module is an implementation of the request token provider logic of +OAuth 1.0 RFC 5849. It validates the correctness of request token requests, +creates and persists tokens as well as create the proper response to be +returned to the client. +""" +import logging + +from oauthlib.common import urlencode + +from .. import errors +from .base import BaseEndpoint + +log = logging.getLogger(__name__) + + +class RequestTokenEndpoint(BaseEndpoint): + + """An endpoint responsible for providing OAuth 1 request tokens. + + Typical use is to instantiate with a request validator and invoke the + ``create_request_token_response`` from a view function. The tuple returned + has all information necessary (body, status, headers) to quickly form + and return a proper response. See :doc:`/oauth1/validator` for details on which + validator methods to implement for this endpoint. + """ + + def create_request_token(self, request, credentials): + """Create and save a new request token. + + :param request: OAuthlib request. + :type request: oauthlib.common.Request + :param credentials: A dict of extra token credentials. + :returns: The token as an urlencoded string. + """ + token = { + 'oauth_token': self.token_generator(), + 'oauth_token_secret': self.token_generator(), + 'oauth_callback_confirmed': 'true' + } + token.update(credentials) + self.request_validator.save_request_token(token, request) + return urlencode(token.items()) + + def create_request_token_response(self, uri, http_method='GET', body=None, + headers=None, credentials=None): + """Create a request token response, with a new request token if valid. + + :param uri: The full URI of the token request. + :param http_method: A valid HTTP verb, i.e. GET, POST, PUT, HEAD, etc. + :param body: The request body as a string. + :param headers: The request headers as a dict. + :param credentials: A list of extra credentials to include in the token. + :returns: A tuple of 3 elements. + 1. A dict of headers to set on the response. + 2. The response body as a string. + 3. The response status code as an integer. + + An example of a valid request:: + + >>> from your_validator import your_validator + >>> from oauthlib.oauth1 import RequestTokenEndpoint + >>> endpoint = RequestTokenEndpoint(your_validator) + >>> h, b, s = endpoint.create_request_token_response( + ... 'https://your.provider/request_token?foo=bar', + ... headers={ + ... 'Authorization': 'OAuth realm=movies user, oauth_....' + ... }, + ... credentials={ + ... 'my_specific': 'argument', + ... }) + >>> h + {'Content-Type': 'application/x-www-form-urlencoded'} + >>> b + 'oauth_token=lsdkfol23w54jlksdef&oauth_token_secret=qwe089234lkjsdf&oauth_callback_confirmed=true&my_specific=argument' + >>> s + 200 + + An response to invalid request would have a different body and status:: + + >>> b + 'error=invalid_request&description=missing+callback+uri' + >>> s + 400 + + The same goes for an an unauthorized request: + + >>> b + '' + >>> s + 401 + """ + resp_headers = {'Content-Type': 'application/x-www-form-urlencoded'} + try: + request = self._create_request(uri, http_method, body, headers) + valid, processed_request = self.validate_request_token_request( + request) + if valid: + token = self.create_request_token(request, credentials or {}) + return resp_headers, token, 200 + else: + return {}, None, 401 + except errors.OAuth1Error as e: + return resp_headers, e.urlencoded, e.status_code + + def validate_request_token_request(self, request): + """Validate a request token request. + + :param request: OAuthlib request. + :type request: oauthlib.common.Request + :raises: OAuth1Error if the request is invalid. + :returns: A tuple of 2 elements. + 1. The validation result (True or False). + 2. The request object. + """ + self._check_transport_security(request) + self._check_mandatory_parameters(request) + + if request.realm: + request.realms = request.realm.split(' ') + else: + request.realms = self.request_validator.get_default_realms( + request.client_key, request) + if not self.request_validator.check_realms(request.realms): + raise errors.InvalidRequestError( + description='Invalid realm {}. Allowed are {!r}.'.format( + request.realms, self.request_validator.realms)) + + if not request.redirect_uri: + raise errors.InvalidRequestError( + description='Missing callback URI.') + + if not self.request_validator.validate_timestamp_and_nonce( + request.client_key, request.timestamp, request.nonce, request, + request_token=request.resource_owner_key): + return False, request + + # The server SHOULD return a 401 (Unauthorized) status code when + # receiving a request with invalid client credentials. + # Note: This is postponed in order to avoid timing attacks, instead + # a dummy client is assigned and used to maintain near constant + # time request verification. + # + # Note that early exit would enable client enumeration + valid_client = self.request_validator.validate_client_key( + request.client_key, request) + if not valid_client: + request.client_key = self.request_validator.dummy_client + + # Note that `realm`_ is only used in authorization headers and how + # it should be interpreted is not included in the OAuth spec. + # However they could be seen as a scope or realm to which the + # client has access and as such every client should be checked + # to ensure it is authorized access to that scope or realm. + # .. _`realm`: https://tools.ietf.org/html/rfc2617#section-1.2 + # + # Note that early exit would enable client realm access enumeration. + # + # The require_realm indicates this is the first step in the OAuth + # workflow where a client requests access to a specific realm. + # This first step (obtaining request token) need not require a realm + # and can then be identified by checking the require_resource_owner + # flag and absence of realm. + # + # Clients obtaining an access token will not supply a realm and it will + # not be checked. Instead the previously requested realm should be + # transferred from the request token to the access token. + # + # Access to protected resources will always validate the realm but note + # that the realm is now tied to the access token and not provided by + # the client. + valid_realm = self.request_validator.validate_requested_realms( + request.client_key, request.realms, request) + + # Callback is normally never required, except for requests for + # a Temporary Credential as described in `Section 2.1`_ + # .._`Section 2.1`: https://tools.ietf.org/html/rfc5849#section-2.1 + valid_redirect = self.request_validator.validate_redirect_uri( + request.client_key, request.redirect_uri, request) + if not request.redirect_uri: + raise NotImplementedError('Redirect URI must either be provided ' + 'or set to a default during validation.') + + valid_signature = self._check_signature(request) + + # log the results to the validator_log + # this lets us handle internal reporting and analysis + request.validator_log['client'] = valid_client + request.validator_log['realm'] = valid_realm + request.validator_log['callback'] = valid_redirect + request.validator_log['signature'] = valid_signature + + # We delay checking validity until the very end, using dummy values for + # calculations and fetching secrets/keys to ensure the flow of every + # request remains almost identical regardless of whether valid values + # have been supplied. This ensures near constant time execution and + # prevents malicious users from guessing sensitive information + v = all((valid_client, valid_realm, valid_redirect, valid_signature)) + if not v: + log.info("[Failure] request verification failed.") + log.info("Valid client: %s.", valid_client) + log.info("Valid realm: %s.", valid_realm) + log.info("Valid callback: %s.", valid_redirect) + log.info("Valid signature: %s.", valid_signature) + return v, request diff --git a/evalkit_tf446/lib/python3.10/site-packages/timm/__init__.py b/evalkit_tf446/lib/python3.10/site-packages/timm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c5f797b156d939831ba0173ce29e33583b0a05a3 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/timm/__init__.py @@ -0,0 +1,4 @@ +from .version import __version__ +from .models import create_model, list_models, is_model, list_modules, model_entrypoint, \ + is_scriptable, is_exportable, set_scriptable, set_exportable, has_pretrained_cfg_key, is_pretrained_cfg_key, \ + get_pretrained_cfg_value, is_model_pretrained diff --git a/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__init__.py b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7ee4958eb562bcfe06a5da72be4b76ee610a0ccc --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__init__.py @@ -0,0 +1,15 @@ +from .adabelief import AdaBelief +from .adafactor import Adafactor +from .adahessian import Adahessian +from .adamp import AdamP +from .adamw import AdamW +from .lamb import Lamb +from .lars import Lars +from .lookahead import Lookahead +from .madgrad import MADGRAD +from .nadam import Nadam +from .nvnovograd import NvNovoGrad +from .radam import RAdam +from .rmsprop_tf import RMSpropTF +from .sgdp import SGDP +from .optim_factory import create_optimizer, create_optimizer_v2, optimizer_kwargs diff --git a/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/adabelief.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/adabelief.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3cdcd2a0b4a736aeb27014d9c7c77245a7e45d78 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/adabelief.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/adahessian.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/adahessian.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..252b25a9a998e120af3e51130bef40bc42b63437 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/adahessian.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/adamp.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/adamp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18b5d99745fdcd2ae3e7e814faeafe2e3d17527b Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/adamp.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/adamw.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/adamw.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae78f33f4fee3459206ed57210d58056ecea6ed7 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/adamw.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/lamb.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/lamb.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85140231464fc339a96d94b710253d2bd96a73c8 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/lamb.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/lars.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/lars.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8db087f5423a99083d90ca898ffd44b21cb2a30d Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/lars.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/lookahead.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/lookahead.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc5a285d3324e27ec5e3def85acc6f5d247afee2 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/lookahead.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/nadam.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/nadam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..079044160f869d59e46574255fb3742935404f18 Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/nadam.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/nvnovograd.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/nvnovograd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1453d88d7bb2ebfbc2d919ea6af7dcbfefd7b2bb Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/nvnovograd.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/optim_factory.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/optim_factory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2114ffc2c67ae53ddf697941139b9e2a5a2c056c Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/optim_factory.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/radam.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/radam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac69b019e691e82502b676d5f1b48a06a0d2506c Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/radam.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/rmsprop_tf.cpython-310.pyc b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/rmsprop_tf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..319ca8e0dcb437db1350389b9b7a154c0f30542e Binary files /dev/null and b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/__pycache__/rmsprop_tf.cpython-310.pyc differ diff --git a/evalkit_tf446/lib/python3.10/site-packages/timm/optim/adabelief.py b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/adabelief.py new file mode 100644 index 0000000000000000000000000000000000000000..951d715cc0b605df2f7313c95840b7784c4d0a70 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/adabelief.py @@ -0,0 +1,201 @@ +import math +import torch +from torch.optim.optimizer import Optimizer + + +class AdaBelief(Optimizer): + r"""Implements AdaBelief algorithm. Modified from Adam in PyTorch + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-16) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + decoupled_decay (boolean, optional): (default: True) If set as True, then + the optimizer uses decoupled weight decay as in AdamW + fixed_decay (boolean, optional): (default: False) This is used when weight_decouple + is set as True. + When fixed_decay == True, the weight decay is performed as + $W_{new} = W_{old} - W_{old} \times decay$. + When fixed_decay == False, the weight decay is performed as + $W_{new} = W_{old} - W_{old} \times decay \times lr$. Note that in this case, the + weight decay ratio decreases with learning rate (lr). + rectify (boolean, optional): (default: True) If set as True, then perform the rectified + update similar to RAdam + degenerated_to_sgd (boolean, optional) (default:True) If set as True, then perform SGD update + when variance of gradient is high + reference: AdaBelief Optimizer, adapting stepsizes by the belief in observed gradients, NeurIPS 2020 + + For a complete table of recommended hyperparameters, see https://github.com/juntang-zhuang/Adabelief-Optimizer' + For example train/args for EfficientNet see these gists + - link to train_scipt: https://gist.github.com/juntang-zhuang/0a501dd51c02278d952cf159bc233037 + - link to args.yaml: https://gist.github.com/juntang-zhuang/517ce3c27022b908bb93f78e4f786dc3 + """ + + def __init__( + self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-16, weight_decay=0, amsgrad=False, + decoupled_decay=True, fixed_decay=False, rectify=True, degenerated_to_sgd=True): + + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + + if isinstance(params, (list, tuple)) and len(params) > 0 and isinstance(params[0], dict): + for param in params: + if 'betas' in param and (param['betas'][0] != betas[0] or param['betas'][1] != betas[1]): + param['buffer'] = [[None, None, None] for _ in range(10)] + + defaults = dict( + lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad, + degenerated_to_sgd=degenerated_to_sgd, decoupled_decay=decoupled_decay, rectify=rectify, + fixed_decay=fixed_decay, buffer=[[None, None, None] for _ in range(10)]) + super(AdaBelief, self).__init__(params, defaults) + + def __setstate__(self, state): + super(AdaBelief, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + @torch.no_grad() + def reset(self): + for group in self.param_groups: + for p in group['params']: + state = self.state[p] + amsgrad = group['amsgrad'] + + # State initialization + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + + # Exponential moving average of squared gradient values + state['exp_avg_var'] = torch.zeros_like(p) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_var'] = torch.zeros_like(p) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.dtype in {torch.float16, torch.bfloat16}: + grad = grad.float() + if grad.is_sparse: + raise RuntimeError( + 'AdaBelief does not support sparse gradients, please consider SparseAdam instead') + + p_fp32 = p + if p.dtype in {torch.float16, torch.bfloat16}: + p_fp32 = p_fp32.float() + + amsgrad = group['amsgrad'] + beta1, beta2 = group['betas'] + state = self.state[p] + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p_fp32) + # Exponential moving average of squared gradient values + state['exp_avg_var'] = torch.zeros_like(p_fp32) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_var'] = torch.zeros_like(p_fp32) + + # perform weight decay, check if decoupled weight decay + if group['decoupled_decay']: + if not group['fixed_decay']: + p_fp32.mul_(1.0 - group['lr'] * group['weight_decay']) + else: + p_fp32.mul_(1.0 - group['weight_decay']) + else: + if group['weight_decay'] != 0: + grad.add_(p_fp32, alpha=group['weight_decay']) + + # get current state variable + exp_avg, exp_avg_var = state['exp_avg'], state['exp_avg_var'] + + state['step'] += 1 + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + # Update first and second moment running average + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + grad_residual = grad - exp_avg + exp_avg_var.mul_(beta2).addcmul_(grad_residual, grad_residual, value=1 - beta2) + + if amsgrad: + max_exp_avg_var = state['max_exp_avg_var'] + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_var, exp_avg_var.add_(group['eps']), out=max_exp_avg_var) + + # Use the max. for normalizing running avg. of gradient + denom = (max_exp_avg_var.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + else: + denom = (exp_avg_var.add_(group['eps']).sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + + # update + if not group['rectify']: + # Default update + step_size = group['lr'] / bias_correction1 + p_fp32.addcdiv_(exp_avg, denom, value=-step_size) + else: + # Rectified update, forked from RAdam + buffered = group['buffer'][int(state['step'] % 10)] + if state['step'] == buffered[0]: + num_sma, step_size = buffered[1], buffered[2] + else: + buffered[0] = state['step'] + beta2_t = beta2 ** state['step'] + num_sma_max = 2 / (1 - beta2) - 1 + num_sma = num_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) + buffered[1] = num_sma + + # more conservative since it's an approximated value + if num_sma >= 5: + step_size = math.sqrt( + (1 - beta2_t) * + (num_sma - 4) / (num_sma_max - 4) * + (num_sma - 2) / num_sma * + num_sma_max / (num_sma_max - 2)) / (1 - beta1 ** state['step']) + elif group['degenerated_to_sgd']: + step_size = 1.0 / (1 - beta1 ** state['step']) + else: + step_size = -1 + buffered[2] = step_size + + if num_sma >= 5: + denom = exp_avg_var.sqrt().add_(group['eps']) + p_fp32.addcdiv_(exp_avg, denom, value=-step_size * group['lr']) + elif step_size > 0: + p_fp32.add_(exp_avg, alpha=-step_size * group['lr']) + + if p.dtype in {torch.float16, torch.bfloat16}: + p.copy_(p_fp32) + + return loss diff --git a/evalkit_tf446/lib/python3.10/site-packages/timm/optim/adafactor.py b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/adafactor.py new file mode 100644 index 0000000000000000000000000000000000000000..06057433a9bffa555bdc13b27a1c56cff26acf15 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/adafactor.py @@ -0,0 +1,167 @@ +""" Adafactor Optimizer + +Lifted from https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py + +Original header/copyright below. + +""" +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +import torch +import math + + +class Adafactor(torch.optim.Optimizer): + """Implements Adafactor algorithm. + This implementation is based on: `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost` + (see https://arxiv.org/abs/1804.04235) + + Note that this optimizer internally adjusts the learning rate depending on the + *scale_parameter*, *relative_step* and *warmup_init* options. + + To use a manual (external) learning rate schedule you should set `scale_parameter=False` and + `relative_step=False`. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining parameter groups + lr (float, optional): external learning rate (default: None) + eps (tuple[float, float]): regularization constants for square gradient + and parameter scale respectively (default: (1e-30, 1e-3)) + clip_threshold (float): threshold of root mean square of final gradient update (default: 1.0) + decay_rate (float): coefficient used to compute running averages of square gradient (default: -0.8) + beta1 (float): coefficient used for computing running averages of gradient (default: None) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + scale_parameter (bool): if True, learning rate is scaled by root mean square of parameter (default: True) + warmup_init (bool): time-dependent learning rate computation depends on + whether warm-up initialization is being used (default: False) + """ + + def __init__(self, params, lr=None, eps=1e-30, eps_scale=1e-3, clip_threshold=1.0, + decay_rate=-0.8, betas=None, weight_decay=0.0, scale_parameter=True, warmup_init=False): + relative_step = not lr + if warmup_init and not relative_step: + raise ValueError('warmup_init requires relative_step=True') + + beta1 = None if betas is None else betas[0] # make it compat with standard betas arg + defaults = dict(lr=lr, eps=eps, eps_scale=eps_scale, clip_threshold=clip_threshold, decay_rate=decay_rate, + beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter, + relative_step=relative_step, warmup_init=warmup_init) + super(Adafactor, self).__init__(params, defaults) + + @staticmethod + def _get_lr(param_group, param_state): + if param_group['relative_step']: + min_step = 1e-6 * param_state['step'] if param_group['warmup_init'] else 1e-2 + lr_t = min(min_step, 1.0 / math.sqrt(param_state['step'])) + param_scale = 1.0 + if param_group['scale_parameter']: + param_scale = max(param_group['eps_scale'], param_state['RMS']) + param_group['lr'] = lr_t * param_scale + return param_group['lr'] + + @staticmethod + def _get_options(param_group, param_shape): + factored = len(param_shape) >= 2 + use_first_moment = param_group['beta1'] is not None + return factored, use_first_moment + + @staticmethod + def _rms(tensor): + return tensor.norm(2) / (tensor.numel() ** 0.5) + + def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col): + r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1) + c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt() + return torch.mul(r_factor, c_factor) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.dtype in {torch.float16, torch.bfloat16}: + grad = grad.float() + if grad.is_sparse: + raise RuntimeError('Adafactor does not support sparse gradients.') + + state = self.state[p] + + factored, use_first_moment = self._get_options(group, grad.shape) + # State Initialization + if len(state) == 0: + state['step'] = 0 + + if use_first_moment: + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(grad) + if factored: + state['exp_avg_sq_row'] = torch.zeros(grad.shape[:-1]).to(grad) + state['exp_avg_sq_col'] = torch.zeros(grad.shape[:-2] + grad.shape[-1:]).to(grad) + else: + state['exp_avg_sq'] = torch.zeros_like(grad) + + state['RMS'] = 0 + else: + if use_first_moment: + state['exp_avg'] = state['exp_avg'].to(grad) + if factored: + state['exp_avg_sq_row'] = state['exp_avg_sq_row'].to(grad) + state['exp_avg_sq_col'] = state['exp_avg_sq_col'].to(grad) + else: + state['exp_avg_sq'] = state['exp_avg_sq'].to(grad) + + p_fp32 = p + if p.dtype in {torch.float16, torch.bfloat16}: + p_fp32 = p_fp32.float() + + state['step'] += 1 + state['RMS'] = self._rms(p_fp32) + lr_t = self._get_lr(group, state) + + beta2t = 1.0 - math.pow(state['step'], group['decay_rate']) + update = grad ** 2 + group['eps'] + if factored: + exp_avg_sq_row = state['exp_avg_sq_row'] + exp_avg_sq_col = state['exp_avg_sq_col'] + + exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=1.0 - beta2t) + exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=1.0 - beta2t) + + # Approximation of exponential moving average of square of gradient + update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) + update.mul_(grad) + else: + exp_avg_sq = state['exp_avg_sq'] + + exp_avg_sq.mul_(beta2t).add_(update, alpha=1.0 - beta2t) + update = exp_avg_sq.rsqrt().mul_(grad) + + update.div_((self._rms(update) / group['clip_threshold']).clamp_(min=1.0)) + update.mul_(lr_t) + + if use_first_moment: + exp_avg = state['exp_avg'] + exp_avg.mul_(group['beta1']).add_(update, alpha=1 - group['beta1']) + update = exp_avg + + if group['weight_decay'] != 0: + p_fp32.add_(p_fp32, alpha=-group['weight_decay'] * lr_t) + + p_fp32.add_(-update) + if p.dtype in {torch.float16, torch.bfloat16}: + p.copy_(p_fp32) + + return loss diff --git a/evalkit_tf446/lib/python3.10/site-packages/timm/optim/adahessian.py b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/adahessian.py new file mode 100644 index 0000000000000000000000000000000000000000..985c67ca686a65f61f5c5b1a7db3e5bba815a19b --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/adahessian.py @@ -0,0 +1,156 @@ +""" AdaHessian Optimizer + +Lifted from https://github.com/davda54/ada-hessian/blob/master/ada_hessian.py +Originally licensed MIT, Copyright 2020, David Samuel +""" +import torch + + +class Adahessian(torch.optim.Optimizer): + """ + Implements the AdaHessian algorithm from "ADAHESSIAN: An Adaptive Second OrderOptimizer for Machine Learning" + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining parameter groups + lr (float, optional): learning rate (default: 0.1) + betas ((float, float), optional): coefficients used for computing running averages of gradient and the + squared hessian trace (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0.0) + hessian_power (float, optional): exponent of the hessian trace (default: 1.0) + update_each (int, optional): compute the hessian trace approximation only after *this* number of steps + (to save time) (default: 1) + n_samples (int, optional): how many times to sample `z` for the approximation of the hessian trace (default: 1) + """ + + def __init__(self, params, lr=0.1, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, + hessian_power=1.0, update_each=1, n_samples=1, avg_conv_kernel=False): + if not 0.0 <= lr: + raise ValueError(f"Invalid learning rate: {lr}") + if not 0.0 <= eps: + raise ValueError(f"Invalid epsilon value: {eps}") + if not 0.0 <= betas[0] < 1.0: + raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}") + if not 0.0 <= betas[1] < 1.0: + raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}") + if not 0.0 <= hessian_power <= 1.0: + raise ValueError(f"Invalid Hessian power value: {hessian_power}") + + self.n_samples = n_samples + self.update_each = update_each + self.avg_conv_kernel = avg_conv_kernel + + # use a separate generator that deterministically generates the same `z`s across all GPUs in case of distributed training + self.seed = 2147483647 + self.generator = torch.Generator().manual_seed(self.seed) + + defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, hessian_power=hessian_power) + super(Adahessian, self).__init__(params, defaults) + + for p in self.get_params(): + p.hess = 0.0 + self.state[p]["hessian step"] = 0 + + @property + def is_second_order(self): + return True + + def get_params(self): + """ + Gets all parameters in all param_groups with gradients + """ + + return (p for group in self.param_groups for p in group['params'] if p.requires_grad) + + def zero_hessian(self): + """ + Zeros out the accumalated hessian traces. + """ + + for p in self.get_params(): + if not isinstance(p.hess, float) and self.state[p]["hessian step"] % self.update_each == 0: + p.hess.zero_() + + @torch.no_grad() + def set_hessian(self): + """ + Computes the Hutchinson approximation of the hessian trace and accumulates it for each trainable parameter. + """ + + params = [] + for p in filter(lambda p: p.grad is not None, self.get_params()): + if self.state[p]["hessian step"] % self.update_each == 0: # compute the trace only each `update_each` step + params.append(p) + self.state[p]["hessian step"] += 1 + + if len(params) == 0: + return + + if self.generator.device != params[0].device: # hackish way of casting the generator to the right device + self.generator = torch.Generator(params[0].device).manual_seed(self.seed) + + grads = [p.grad for p in params] + + for i in range(self.n_samples): + # Rademacher distribution {-1.0, 1.0} + zs = [torch.randint(0, 2, p.size(), generator=self.generator, device=p.device) * 2.0 - 1.0 for p in params] + h_zs = torch.autograd.grad( + grads, params, grad_outputs=zs, only_inputs=True, retain_graph=i < self.n_samples - 1) + for h_z, z, p in zip(h_zs, zs, params): + p.hess += h_z * z / self.n_samples # approximate the expected values of z*(H@z) + + @torch.no_grad() + def step(self, closure=None): + """ + Performs a single optimization step. + Arguments: + closure (callable, optional) -- a closure that reevaluates the model and returns the loss (default: None) + """ + + loss = None + if closure is not None: + loss = closure() + + self.zero_hessian() + self.set_hessian() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None or p.hess is None: + continue + + if self.avg_conv_kernel and p.dim() == 4: + p.hess = torch.abs(p.hess).mean(dim=[2, 3], keepdim=True).expand_as(p.hess).clone() + + # Perform correct stepweight decay as in AdamW + p.mul_(1 - group['lr'] * group['weight_decay']) + + state = self.state[p] + + # State initialization + if len(state) == 1: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of Hessian diagonal square values + state['exp_hessian_diag_sq'] = torch.zeros_like(p) + + exp_avg, exp_hessian_diag_sq = state['exp_avg'], state['exp_hessian_diag_sq'] + beta1, beta2 = group['betas'] + state['step'] += 1 + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(p.grad, alpha=1 - beta1) + exp_hessian_diag_sq.mul_(beta2).addcmul_(p.hess, p.hess, value=1 - beta2) + + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + k = group['hessian_power'] + denom = (exp_hessian_diag_sq / bias_correction2).pow_(k / 2).add_(group['eps']) + + # make update + step_size = group['lr'] / bias_correction1 + p.addcdiv_(exp_avg, denom, value=-step_size) + + return loss diff --git a/evalkit_tf446/lib/python3.10/site-packages/timm/optim/adamp.py b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/adamp.py new file mode 100644 index 0000000000000000000000000000000000000000..ee187633ab745dbb0344dcdc3dcb1cf40e6ae5e9 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/adamp.py @@ -0,0 +1,105 @@ +""" +AdamP Optimizer Implementation copied from https://github.com/clovaai/AdamP/blob/master/adamp/adamp.py + +Paper: `Slowing Down the Weight Norm Increase in Momentum-based Optimizers` - https://arxiv.org/abs/2006.08217 +Code: https://github.com/clovaai/AdamP + +Copyright (c) 2020-present NAVER Corp. +MIT license +""" + +import torch +import torch.nn.functional as F +from torch.optim.optimizer import Optimizer +import math + + +def _channel_view(x) -> torch.Tensor: + return x.reshape(x.size(0), -1) + + +def _layer_view(x) -> torch.Tensor: + return x.reshape(1, -1) + + +def projection(p, grad, perturb, delta: float, wd_ratio: float, eps: float): + wd = 1. + expand_size = (-1,) + (1,) * (len(p.shape) - 1) + for view_func in [_channel_view, _layer_view]: + param_view = view_func(p) + grad_view = view_func(grad) + cosine_sim = F.cosine_similarity(grad_view, param_view, dim=1, eps=eps).abs_() + + # FIXME this is a problem for PyTorch XLA + if cosine_sim.max() < delta / math.sqrt(param_view.size(1)): + p_n = p / param_view.norm(p=2, dim=1).add_(eps).reshape(expand_size) + perturb -= p_n * view_func(p_n * perturb).sum(dim=1).reshape(expand_size) + wd = wd_ratio + return perturb, wd + + return perturb, wd + + +class AdamP(Optimizer): + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, + weight_decay=0, delta=0.1, wd_ratio=0.1, nesterov=False): + defaults = dict( + lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, + delta=delta, wd_ratio=wd_ratio, nesterov=nesterov) + super(AdamP, self).__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + + grad = p.grad + beta1, beta2 = group['betas'] + nesterov = group['nesterov'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['exp_avg'] = torch.zeros_like(p) + state['exp_avg_sq'] = torch.zeros_like(p) + + # Adam + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + + state['step'] += 1 + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + step_size = group['lr'] / bias_correction1 + + if nesterov: + perturb = (beta1 * exp_avg + (1 - beta1) * grad) / denom + else: + perturb = exp_avg / denom + + # Projection + wd_ratio = 1. + if len(p.shape) > 1: + perturb, wd_ratio = projection(p, grad, perturb, group['delta'], group['wd_ratio'], group['eps']) + + # Weight decay + if group['weight_decay'] > 0: + p.mul_(1. - group['lr'] * group['weight_decay'] * wd_ratio) + + # Step + p.add_(perturb, alpha=-step_size) + + return loss diff --git a/evalkit_tf446/lib/python3.10/site-packages/timm/optim/adamw.py b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/adamw.py new file mode 100644 index 0000000000000000000000000000000000000000..66478bc6ef3c50ab9d40cabb0cfb2bd24277c815 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/adamw.py @@ -0,0 +1,122 @@ +""" AdamW Optimizer +Impl copied from PyTorch master + +NOTE: Builtin optim.AdamW is used by the factory, this impl only serves as a Python based reference, will be removed +someday +""" +import math +import torch +from torch.optim.optimizer import Optimizer + + +class AdamW(Optimizer): + r"""Implements AdamW algorithm. + + The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_. + The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay coefficient (default: 1e-2) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + + .. _Adam\: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + .. _Decoupled Weight Decay Regularization: + https://arxiv.org/abs/1711.05101 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + """ + + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, + weight_decay=1e-2, amsgrad=False): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, amsgrad=amsgrad) + super(AdamW, self).__init__(params, defaults) + + def __setstate__(self, state): + super(AdamW, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + + # Perform stepweight decay + p.data.mul_(1 - group['lr'] * group['weight_decay']) + + # Perform optimization step + grad = p.grad + if grad.is_sparse: + raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') + amsgrad = group['amsgrad'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros_like(p) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + if amsgrad: + max_exp_avg_sq = state['max_exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + bias_correction1 = 1 - beta1 ** state['step'] + bias_correction2 = 1 - beta2 ** state['step'] + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) + # Use the max. for normalizing running avg. of gradient + denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + else: + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + + step_size = group['lr'] / bias_correction1 + + p.addcdiv_(exp_avg, denom, value=-step_size) + + return loss diff --git a/evalkit_tf446/lib/python3.10/site-packages/timm/optim/lamb.py b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/lamb.py new file mode 100644 index 0000000000000000000000000000000000000000..12c7c49b8a01ef793c97654ac938259ca6508449 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/lamb.py @@ -0,0 +1,192 @@ +""" PyTorch Lamb optimizer w/ behaviour similar to NVIDIA FusedLamb + +This optimizer code was adapted from the following (starting with latest) +* https://github.com/HabanaAI/Model-References/blob/2b435114fe8e31f159b1d3063b8280ae37af7423/PyTorch/nlp/bert/pretraining/lamb.py +* https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py +* https://github.com/cybertronai/pytorch-lamb + +Use FusedLamb if you can (GPU). The reason for including this variant of Lamb is to have a version that is +similar in behaviour to APEX FusedLamb if you aren't using NVIDIA GPUs or cannot install/use APEX. + +In addition to some cleanup, this Lamb impl has been modified to support PyTorch XLA and has been tested on TPU. + +Original copyrights for above sources are below. + +Modifications Copyright 2021 Ross Wightman +""" +# Copyright (c) 2021, Habana Labs Ltd. All rights reserved. + +# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# MIT License +# +# Copyright (c) 2019 cybertronai +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +import math + +import torch +from torch.optim import Optimizer + + +class Lamb(Optimizer): + """Implements a pure pytorch variant of FuseLAMB (NvLamb variant) optimizer from apex.optimizers.FusedLAMB + reference: https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py + + LAMB was proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining parameter groups. + lr (float, optional): learning rate. (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its norm. (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability. (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + grad_averaging (bool, optional): whether apply (1-beta2) to grad when + calculating running averages of gradient. (default: True) + max_grad_norm (float, optional): value used to clip global grad norm (default: 1.0) + trust_clip (bool): enable LAMBC trust ratio clipping (default: False) + always_adapt (boolean, optional): Apply adaptive learning rate to 0.0 + weight decay parameter (default: False) + + .. _Large Batch Optimization for Deep Learning - Training BERT in 76 minutes: + https://arxiv.org/abs/1904.00962 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + """ + + def __init__( + self, params, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-6, + weight_decay=0.01, grad_averaging=True, max_grad_norm=1.0, trust_clip=False, always_adapt=False): + defaults = dict( + lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay, + grad_averaging=grad_averaging, max_grad_norm=max_grad_norm, + trust_clip=trust_clip, always_adapt=always_adapt) + super().__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + device = self.param_groups[0]['params'][0].device + one_tensor = torch.tensor(1.0, device=device) # because torch.where doesn't handle scalars correctly + global_grad_norm = torch.zeros(1, device=device) + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.is_sparse: + raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instad.') + global_grad_norm.add_(grad.pow(2).sum()) + + global_grad_norm = torch.sqrt(global_grad_norm) + # FIXME it'd be nice to remove explicit tensor conversion of scalars when torch.where promotes + # scalar types properly https://github.com/pytorch/pytorch/issues/9190 + max_grad_norm = torch.tensor(self.defaults['max_grad_norm'], device=device) + clip_global_grad_norm = torch.where( + global_grad_norm > max_grad_norm, + global_grad_norm / max_grad_norm, + one_tensor) + + for group in self.param_groups: + bias_correction = 1 if group['bias_correction'] else 0 + beta1, beta2 = group['betas'] + grad_averaging = 1 if group['grad_averaging'] else 0 + beta3 = 1 - beta1 if grad_averaging else 1.0 + + # assume same step across group now to simplify things + # per parameter step can be easily support by making it tensor, or pass list into kernel + if 'step' in group: + group['step'] += 1 + else: + group['step'] = 1 + + if bias_correction: + bias_correction1 = 1 - beta1 ** group['step'] + bias_correction2 = 1 - beta2 ** group['step'] + else: + bias_correction1, bias_correction2 = 1.0, 1.0 + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.div_(clip_global_grad_norm) + state = self.state[p] + + # State initialization + if len(state) == 0: + # Exponential moving average of gradient valuesa + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=beta3) # m_t + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) # v_t + + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) + update = (exp_avg / bias_correction1).div_(denom) + + weight_decay = group['weight_decay'] + if weight_decay != 0: + update.add_(p, alpha=weight_decay) + + if weight_decay != 0 or group['always_adapt']: + # Layer-wise LR adaptation. By default, skip adaptation on parameters that are + # excluded from weight decay, unless always_adapt == True, then always enabled. + w_norm = p.norm(2.0) + g_norm = update.norm(2.0) + # FIXME nested where required since logical and/or not working in PT XLA + trust_ratio = torch.where( + w_norm > 0, + torch.where(g_norm > 0, w_norm / g_norm, one_tensor), + one_tensor, + ) + if group['trust_clip']: + # LAMBC trust clipping, upper bound fixed at one + trust_ratio = torch.minimum(trust_ratio, one_tensor) + update.mul_(trust_ratio) + + p.add_(update, alpha=-group['lr']) + + return loss diff --git a/evalkit_tf446/lib/python3.10/site-packages/timm/optim/lars.py b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/lars.py new file mode 100644 index 0000000000000000000000000000000000000000..38ca9e0b5cb90855104ce7b5ff358cb7fa343f12 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/lars.py @@ -0,0 +1,135 @@ +""" PyTorch LARS / LARC Optimizer + +An implementation of LARS (SGD) + LARC in PyTorch + +Based on: + * PyTorch SGD: https://github.com/pytorch/pytorch/blob/1.7/torch/optim/sgd.py#L100 + * NVIDIA APEX LARC: https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py + +Additional cleanup and modifications to properly support PyTorch XLA. + +Copyright 2021 Ross Wightman +""" +import torch +from torch.optim.optimizer import Optimizer + + +class Lars(Optimizer): + """ LARS for PyTorch + + Paper: `Large batch training of Convolutional Networks` - https://arxiv.org/pdf/1708.03888.pdf + + Args: + params (iterable): iterable of parameters to optimize or dicts defining parameter groups. + lr (float, optional): learning rate (default: 1.0). + momentum (float, optional): momentum factor (default: 0) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + dampening (float, optional): dampening for momentum (default: 0) + nesterov (bool, optional): enables Nesterov momentum (default: False) + trust_coeff (float): trust coefficient for computing adaptive lr / trust_ratio (default: 0.001) + eps (float): eps for division denominator (default: 1e-8) + trust_clip (bool): enable LARC trust ratio clipping (default: False) + always_adapt (bool): always apply LARS LR adapt, otherwise only when group weight_decay != 0 (default: False) + """ + + def __init__( + self, + params, + lr=1.0, + momentum=0, + dampening=0, + weight_decay=0, + nesterov=False, + trust_coeff=0.001, + eps=1e-8, + trust_clip=False, + always_adapt=False, + ): + if lr < 0.0: + raise ValueError(f"Invalid learning rate: {lr}") + if momentum < 0.0: + raise ValueError(f"Invalid momentum value: {momentum}") + if weight_decay < 0.0: + raise ValueError(f"Invalid weight_decay value: {weight_decay}") + if nesterov and (momentum <= 0 or dampening != 0): + raise ValueError("Nesterov momentum requires a momentum and zero dampening") + + defaults = dict( + lr=lr, + momentum=momentum, + dampening=dampening, + weight_decay=weight_decay, + nesterov=nesterov, + trust_coeff=trust_coeff, + eps=eps, + trust_clip=trust_clip, + always_adapt=always_adapt, + ) + super().__init__(params, defaults) + + def __setstate__(self, state): + super().__setstate__(state) + for group in self.param_groups: + group.setdefault("nesterov", False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Args: + closure (callable, optional): A closure that reevaluates the model and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + device = self.param_groups[0]['params'][0].device + one_tensor = torch.tensor(1.0, device=device) # because torch.where doesn't handle scalars correctly + + for group in self.param_groups: + weight_decay = group['weight_decay'] + momentum = group['momentum'] + dampening = group['dampening'] + nesterov = group['nesterov'] + trust_coeff = group['trust_coeff'] + eps = group['eps'] + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + + # apply LARS LR adaptation, LARC clipping, weight decay + # ref: https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py + if weight_decay != 0 or group['always_adapt']: + w_norm = p.norm(2.0) + g_norm = grad.norm(2.0) + trust_ratio = trust_coeff * w_norm / (g_norm + w_norm * weight_decay + eps) + # FIXME nested where required since logical and/or not working in PT XLA + trust_ratio = torch.where( + w_norm > 0, + torch.where(g_norm > 0, trust_ratio, one_tensor), + one_tensor, + ) + if group['trust_clip']: + trust_ratio = torch.minimum(trust_ratio / group['lr'], one_tensor) + grad.add_(p, alpha=weight_decay) + grad.mul_(trust_ratio) + + # apply SGD update https://github.com/pytorch/pytorch/blob/1.7/torch/optim/sgd.py#L100 + if momentum != 0: + param_state = self.state[p] + if 'momentum_buffer' not in param_state: + buf = param_state['momentum_buffer'] = torch.clone(grad).detach() + else: + buf = param_state['momentum_buffer'] + buf.mul_(momentum).add_(grad, alpha=1. - dampening) + if nesterov: + grad = grad.add(buf, alpha=momentum) + else: + grad = buf + + p.add_(grad, alpha=-group['lr']) + + return loss \ No newline at end of file diff --git a/evalkit_tf446/lib/python3.10/site-packages/timm/optim/lookahead.py b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/lookahead.py new file mode 100644 index 0000000000000000000000000000000000000000..462c3acd247016a94acd39a27dd44f29ae854d31 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/lookahead.py @@ -0,0 +1,61 @@ +""" Lookahead Optimizer Wrapper. +Implementation modified from: https://github.com/alphadl/lookahead.pytorch +Paper: `Lookahead Optimizer: k steps forward, 1 step back` - https://arxiv.org/abs/1907.08610 + +Hacked together by / Copyright 2020 Ross Wightman +""" +import torch +from torch.optim.optimizer import Optimizer +from collections import defaultdict + + +class Lookahead(Optimizer): + def __init__(self, base_optimizer, alpha=0.5, k=6): + # NOTE super().__init__() not called on purpose + if not 0.0 <= alpha <= 1.0: + raise ValueError(f'Invalid slow update rate: {alpha}') + if not 1 <= k: + raise ValueError(f'Invalid lookahead steps: {k}') + defaults = dict(lookahead_alpha=alpha, lookahead_k=k, lookahead_step=0) + self._base_optimizer = base_optimizer + self.param_groups = base_optimizer.param_groups + self.defaults = base_optimizer.defaults + self.defaults.update(defaults) + self.state = defaultdict(dict) + # manually add our defaults to the param groups + for name, default in defaults.items(): + for group in self._base_optimizer.param_groups: + group.setdefault(name, default) + + @torch.no_grad() + def update_slow(self, group): + for fast_p in group["params"]: + if fast_p.grad is None: + continue + param_state = self._base_optimizer.state[fast_p] + if 'lookahead_slow_buff' not in param_state: + param_state['lookahead_slow_buff'] = torch.empty_like(fast_p) + param_state['lookahead_slow_buff'].copy_(fast_p) + slow = param_state['lookahead_slow_buff'] + slow.add_(fast_p - slow, alpha=group['lookahead_alpha']) + fast_p.copy_(slow) + + def sync_lookahead(self): + for group in self._base_optimizer.param_groups: + self.update_slow(group) + + @torch.no_grad() + def step(self, closure=None): + loss = self._base_optimizer.step(closure) + for group in self._base_optimizer.param_groups: + group['lookahead_step'] += 1 + if group['lookahead_step'] % group['lookahead_k'] == 0: + self.update_slow(group) + return loss + + def state_dict(self): + return self._base_optimizer.state_dict() + + def load_state_dict(self, state_dict): + self._base_optimizer.load_state_dict(state_dict) + self.param_groups = self._base_optimizer.param_groups diff --git a/evalkit_tf446/lib/python3.10/site-packages/timm/optim/madgrad.py b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/madgrad.py new file mode 100644 index 0000000000000000000000000000000000000000..a76713bf27ed1daf0ce598ac5f25c6238c7fdb57 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/madgrad.py @@ -0,0 +1,184 @@ +""" PyTorch MADGRAD optimizer + +MADGRAD: https://arxiv.org/abs/2101.11075 + +Code from: https://github.com/facebookresearch/madgrad +""" +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import math +from typing import TYPE_CHECKING, Any, Callable, Optional + +import torch +import torch.optim + +if TYPE_CHECKING: + from torch.optim.optimizer import _params_t +else: + _params_t = Any + + +class MADGRAD(torch.optim.Optimizer): + """ + MADGRAD_: A Momentumized, Adaptive, Dual Averaged Gradient Method for Stochastic + Optimization. + + .. _MADGRAD: https://arxiv.org/abs/2101.11075 + + MADGRAD is a general purpose optimizer that can be used in place of SGD or + Adam may converge faster and generalize better. Currently GPU-only. + Typically, the same learning rate schedule that is used for SGD or Adam may + be used. The overall learning rate is not comparable to either method and + should be determined by a hyper-parameter sweep. + + MADGRAD requires less weight decay than other methods, often as little as + zero. Momentum values used for SGD or Adam's beta1 should work here also. + + On sparse problems both weight_decay and momentum should be set to 0. + + Arguments: + params (iterable): + Iterable of parameters to optimize or dicts defining parameter groups. + lr (float): + Learning rate (default: 1e-2). + momentum (float): + Momentum value in the range [0,1) (default: 0.9). + weight_decay (float): + Weight decay, i.e. a L2 penalty (default: 0). + eps (float): + Term added to the denominator outside of the root operation to improve numerical stability. (default: 1e-6). + """ + + def __init__( + self, + params: _params_t, + lr: float = 1e-2, + momentum: float = 0.9, + weight_decay: float = 0, + eps: float = 1e-6, + decoupled_decay: bool = False, + ): + if momentum < 0 or momentum >= 1: + raise ValueError(f"Momentum {momentum} must be in the range [0,1]") + if lr <= 0: + raise ValueError(f"Learning rate {lr} must be positive") + if weight_decay < 0: + raise ValueError(f"Weight decay {weight_decay} must be non-negative") + if eps < 0: + raise ValueError(f"Eps must be non-negative") + + defaults = dict( + lr=lr, eps=eps, momentum=momentum, weight_decay=weight_decay, decoupled_decay=decoupled_decay) + super().__init__(params, defaults) + + @property + def supports_memory_efficient_fp16(self) -> bool: + return False + + @property + def supports_flat_params(self) -> bool: + return True + + @torch.no_grad() + def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]: + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + eps = group['eps'] + lr = group['lr'] + eps + weight_decay = group['weight_decay'] + momentum = group['momentum'] + ck = 1 - momentum + + for p in group["params"]: + if p.grad is None: + continue + grad = p.grad + if momentum != 0.0 and grad.is_sparse: + raise RuntimeError("momentum != 0 is not compatible with sparse gradients") + + state = self.state[p] + if len(state) == 0: + state['step'] = 0 + state['grad_sum_sq'] = torch.zeros_like(p) + state['s'] = torch.zeros_like(p) + if momentum != 0: + state['x0'] = torch.clone(p).detach() + + state['step'] += 1 + grad_sum_sq = state['grad_sum_sq'] + s = state['s'] + lamb = lr * math.sqrt(state['step']) + + # Apply weight decay + if weight_decay != 0: + if group['decoupled_decay']: + p.mul_(1.0 - group['lr'] * weight_decay) + else: + if grad.is_sparse: + raise RuntimeError("weight_decay option is not compatible with sparse gradients") + grad.add_(p, alpha=weight_decay) + + if grad.is_sparse: + grad = grad.coalesce() + grad_val = grad._values() + + p_masked = p.sparse_mask(grad) + grad_sum_sq_masked = grad_sum_sq.sparse_mask(grad) + s_masked = s.sparse_mask(grad) + + # Compute x_0 from other known quantities + rms_masked_vals = grad_sum_sq_masked._values().pow(1 / 3).add_(eps) + x0_masked_vals = p_masked._values().addcdiv(s_masked._values(), rms_masked_vals, value=1) + + # Dense + sparse op + grad_sq = grad * grad + grad_sum_sq.add_(grad_sq, alpha=lamb) + grad_sum_sq_masked.add_(grad_sq, alpha=lamb) + + rms_masked_vals = grad_sum_sq_masked._values().pow_(1 / 3).add_(eps) + + s.add_(grad, alpha=lamb) + s_masked._values().add_(grad_val, alpha=lamb) + + # update masked copy of p + p_kp1_masked_vals = x0_masked_vals.addcdiv(s_masked._values(), rms_masked_vals, value=-1) + # Copy updated masked p to dense p using an add operation + p_masked._values().add_(p_kp1_masked_vals, alpha=-1) + p.add_(p_masked, alpha=-1) + else: + if momentum == 0: + # Compute x_0 from other known quantities + rms = grad_sum_sq.pow(1 / 3).add_(eps) + x0 = p.addcdiv(s, rms, value=1) + else: + x0 = state['x0'] + + # Accumulate second moments + grad_sum_sq.addcmul_(grad, grad, value=lamb) + rms = grad_sum_sq.pow(1 / 3).add_(eps) + + # Update s + s.add_(grad, alpha=lamb) + + # Step + if momentum == 0: + p.copy_(x0.addcdiv(s, rms, value=-1)) + else: + z = x0.addcdiv(s, rms, value=-1) + + # p is a moving average of z + p.mul_(1 - ck).add_(z, alpha=ck) + + return loss diff --git a/evalkit_tf446/lib/python3.10/site-packages/timm/optim/nadam.py b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/nadam.py new file mode 100644 index 0000000000000000000000000000000000000000..6268d5d451ed2fe26b47e46476dc1feee7da9649 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/nadam.py @@ -0,0 +1,92 @@ +import math + +import torch +from torch.optim.optimizer import Optimizer + + +class Nadam(Optimizer): + """Implements Nadam algorithm (a variant of Adam based on Nesterov momentum). + + It has been proposed in `Incorporating Nesterov Momentum into Adam`__. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 2e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + schedule_decay (float, optional): momentum schedule decay (default: 4e-3) + + __ http://cs229.stanford.edu/proj2015/054_report.pdf + __ http://www.cs.toronto.edu/~fritz/absps/momentum.pdf + + Originally taken from: https://github.com/pytorch/pytorch/pull/1408 + NOTE: Has potential issues but does work well on some problems. + """ + + def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8, + weight_decay=0, schedule_decay=4e-3): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + defaults = dict( + lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, schedule_decay=schedule_decay) + super(Nadam, self).__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['m_schedule'] = 1. + state['exp_avg'] = torch.zeros_like(p) + state['exp_avg_sq'] = torch.zeros_like(p) + + # Warming momentum schedule + m_schedule = state['m_schedule'] + schedule_decay = group['schedule_decay'] + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + eps = group['eps'] + state['step'] += 1 + t = state['step'] + bias_correction2 = 1 - beta2 ** t + + if group['weight_decay'] != 0: + grad = grad.add(p, alpha=group['weight_decay']) + + momentum_cache_t = beta1 * (1. - 0.5 * (0.96 ** (t * schedule_decay))) + momentum_cache_t_1 = beta1 * (1. - 0.5 * (0.96 ** ((t + 1) * schedule_decay))) + m_schedule_new = m_schedule * momentum_cache_t + m_schedule_next = m_schedule * momentum_cache_t * momentum_cache_t_1 + state['m_schedule'] = m_schedule_new + + # Decay the first and second moment running average coefficient + exp_avg.mul_(beta1).add_(grad, alpha=1. - beta1) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1. - beta2) + + denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps) + p.addcdiv_(grad, denom, value=-group['lr'] * (1. - momentum_cache_t) / (1. - m_schedule_new)) + p.addcdiv_(exp_avg, denom, value=-group['lr'] * momentum_cache_t_1 / (1. - m_schedule_next)) + + return loss diff --git a/evalkit_tf446/lib/python3.10/site-packages/timm/optim/nvnovograd.py b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/nvnovograd.py new file mode 100644 index 0000000000000000000000000000000000000000..fda3f4a620fcca5593034dfb9683f2c8f3b78ac1 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/nvnovograd.py @@ -0,0 +1,120 @@ +""" Nvidia NovoGrad Optimizer. +Original impl by Nvidia from Jasper example: + - https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechRecognition/Jasper +Paper: `Stochastic Gradient Methods with Layer-wise Adaptive Moments for Training of Deep Networks` + - https://arxiv.org/abs/1905.11286 +""" + +import torch +from torch.optim.optimizer import Optimizer +import math + + +class NvNovoGrad(Optimizer): + """ + Implements Novograd algorithm. + + Args: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square (default: (0.95, 0.98)) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + grad_averaging: gradient averaging + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) + """ + + def __init__(self, params, lr=1e-3, betas=(0.95, 0.98), eps=1e-8, + weight_decay=0, grad_averaging=False, amsgrad=False): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= betas[0] < 1.0: + raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) + if not 0.0 <= betas[1] < 1.0: + raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) + defaults = dict(lr=lr, betas=betas, eps=eps, + weight_decay=weight_decay, + grad_averaging=grad_averaging, + amsgrad=amsgrad) + + super(NvNovoGrad, self).__init__(params, defaults) + + def __setstate__(self, state): + super(NvNovoGrad, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('amsgrad', False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.is_sparse: + raise RuntimeError('Sparse gradients are not supported.') + amsgrad = group['amsgrad'] + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device) + if amsgrad: + # Maintains max of all exp. moving avg. of sq. grad. values + state['max_exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + if amsgrad: + max_exp_avg_sq = state['max_exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + + norm = torch.sum(torch.pow(grad, 2)) + + if exp_avg_sq == 0: + exp_avg_sq.copy_(norm) + else: + exp_avg_sq.mul_(beta2).add_(norm, alpha=1 - beta2) + + if amsgrad: + # Maintains the maximum of all 2nd moment running avg. till now + torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) + # Use the max. for normalizing running avg. of gradient + denom = max_exp_avg_sq.sqrt().add_(group['eps']) + else: + denom = exp_avg_sq.sqrt().add_(group['eps']) + + grad.div_(denom) + if group['weight_decay'] != 0: + grad.add_(p, alpha=group['weight_decay']) + if group['grad_averaging']: + grad.mul_(1 - beta1) + exp_avg.mul_(beta1).add_(grad) + + p.add_(exp_avg, alpha=-group['lr']) + + return loss diff --git a/evalkit_tf446/lib/python3.10/site-packages/timm/optim/optim_factory.py b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/optim_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..c82fd3d2a52fb1d3cebee75c5ee958dd2eab2f25 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/optim_factory.py @@ -0,0 +1,340 @@ +""" Optimizer Factory w/ Custom Weight Decay +Hacked together by / Copyright 2021 Ross Wightman +""" +import logging +from itertools import islice +from typing import Optional, Callable, Tuple + +import torch +import torch.nn as nn +import torch.optim as optim + +from timm.models.helpers import group_parameters + +from .adabelief import AdaBelief +from .adafactor import Adafactor +from .adahessian import Adahessian +from .adamp import AdamP +from .lamb import Lamb +from .lars import Lars +from .lookahead import Lookahead +from .madgrad import MADGRAD +from .nadam import Nadam +from .nvnovograd import NvNovoGrad +from .radam import RAdam +from .rmsprop_tf import RMSpropTF +from .sgdp import SGDP + +try: + from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD + has_apex = True +except ImportError: + has_apex = False + +_logger = logging.getLogger(__name__) + + +def param_groups_weight_decay( + model: nn.Module, + weight_decay=1e-5, + no_weight_decay_list=() +): + no_weight_decay_list = set(no_weight_decay_list) + decay = [] + no_decay = [] + for name, param in model.named_parameters(): + if not param.requires_grad: + continue + + if param.ndim <= 1 or name.endswith(".bias") or name in no_weight_decay_list: + no_decay.append(param) + else: + decay.append(param) + + return [ + {'params': no_decay, 'weight_decay': 0.}, + {'params': decay, 'weight_decay': weight_decay}] + + +def _group(it, size): + it = iter(it) + return iter(lambda: tuple(islice(it, size)), ()) + + +def _layer_map(model, layers_per_group=12, num_groups=None): + def _in_head(n, hp): + if not hp: + return True + elif isinstance(hp, (tuple, list)): + return any([n.startswith(hpi) for hpi in hp]) + else: + return n.startswith(hp) + + head_prefix = getattr(model, 'pretrained_cfg', {}).get('classifier', None) + names_trunk = [] + names_head = [] + for n, _ in model.named_parameters(): + names_head.append(n) if _in_head(n, head_prefix) else names_trunk.append(n) + + # group non-head layers + num_trunk_layers = len(names_trunk) + if num_groups is not None: + layers_per_group = -(num_trunk_layers // -num_groups) + names_trunk = list(_group(names_trunk, layers_per_group)) + + num_trunk_groups = len(names_trunk) + layer_map = {n: i for i, l in enumerate(names_trunk) for n in l} + layer_map.update({n: num_trunk_groups for n in names_head}) + return layer_map + + +def param_groups_layer_decay( + model: nn.Module, + weight_decay: float = 0.05, + no_weight_decay_list: Tuple[str] = (), + layer_decay: float = .75, + end_layer_decay: Optional[float] = None, + verbose: bool = False, +): + """ + Parameter groups for layer-wise lr decay & weight decay + Based on BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L58 + """ + no_weight_decay_list = set(no_weight_decay_list) + param_group_names = {} # NOTE for debugging + param_groups = {} + + if hasattr(model, 'group_matcher'): + # FIXME interface needs more work + layer_map = group_parameters(model, model.group_matcher(coarse=False), reverse=True) + else: + # fallback + layer_map = _layer_map(model) + num_layers = max(layer_map.values()) + 1 + layer_max = num_layers - 1 + layer_scales = list(layer_decay ** (layer_max - i) for i in range(num_layers)) + + for name, param in model.named_parameters(): + if not param.requires_grad: + continue + + # no decay: all 1D parameters and model specific ones + if param.ndim == 1 or name in no_weight_decay_list: + g_decay = "no_decay" + this_decay = 0. + else: + g_decay = "decay" + this_decay = weight_decay + + layer_id = layer_map.get(name, layer_max) + group_name = "layer_%d_%s" % (layer_id, g_decay) + + if group_name not in param_groups: + this_scale = layer_scales[layer_id] + param_group_names[group_name] = { + "lr_scale": this_scale, + "weight_decay": this_decay, + "param_names": [], + } + param_groups[group_name] = { + "lr_scale": this_scale, + "weight_decay": this_decay, + "params": [], + } + + param_group_names[group_name]["param_names"].append(name) + param_groups[group_name]["params"].append(param) + + if verbose: + import json + _logger.info("parameter groups: \n%s" % json.dumps(param_group_names, indent=2)) + + return list(param_groups.values()) + + +def optimizer_kwargs(cfg): + """ cfg/argparse to kwargs helper + Convert optimizer args in argparse args or cfg like object to keyword args for updated create fn. + """ + kwargs = dict( + opt=cfg.opt, + lr=cfg.lr, + weight_decay=cfg.weight_decay, + momentum=cfg.momentum) + if getattr(cfg, 'opt_eps', None) is not None: + kwargs['eps'] = cfg.opt_eps + if getattr(cfg, 'opt_betas', None) is not None: + kwargs['betas'] = cfg.opt_betas + if getattr(cfg, 'layer_decay', None) is not None: + kwargs['layer_decay'] = cfg.layer_decay + if getattr(cfg, 'opt_args', None) is not None: + kwargs.update(cfg.opt_args) + return kwargs + + +def create_optimizer(args, model, filter_bias_and_bn=True): + """ Legacy optimizer factory for backwards compatibility. + NOTE: Use create_optimizer_v2 for new code. + """ + return create_optimizer_v2( + model, + **optimizer_kwargs(cfg=args), + filter_bias_and_bn=filter_bias_and_bn, + ) + + +def create_optimizer_v2( + model_or_params, + opt: str = 'sgd', + lr: Optional[float] = None, + weight_decay: float = 0., + momentum: float = 0.9, + filter_bias_and_bn: bool = True, + layer_decay: Optional[float] = None, + param_group_fn: Optional[Callable] = None, + **kwargs): + """ Create an optimizer. + + TODO currently the model is passed in and all parameters are selected for optimization. + For more general use an interface that allows selection of parameters to optimize and lr groups, one of: + * a filter fn interface that further breaks params into groups in a weight_decay compatible fashion + * expose the parameters interface and leave it up to caller + + Args: + model_or_params (nn.Module): model containing parameters to optimize + opt: name of optimizer to create + lr: initial learning rate + weight_decay: weight decay to apply in optimizer + momentum: momentum for momentum based optimizers (others may use betas via kwargs) + filter_bias_and_bn: filter out bias, bn and other 1d params from weight decay + **kwargs: extra optimizer specific kwargs to pass through + + Returns: + Optimizer + """ + if isinstance(model_or_params, nn.Module): + # a model was passed in, extract parameters and add weight decays to appropriate layers + no_weight_decay = {} + if hasattr(model_or_params, 'no_weight_decay'): + no_weight_decay = model_or_params.no_weight_decay() + + if param_group_fn: + parameters = param_group_fn(model_or_params) + elif layer_decay is not None: + parameters = param_groups_layer_decay( + model_or_params, + weight_decay=weight_decay, + layer_decay=layer_decay, + no_weight_decay_list=no_weight_decay) + weight_decay = 0. + elif weight_decay and filter_bias_and_bn: + parameters = param_groups_weight_decay(model_or_params, weight_decay, no_weight_decay) + weight_decay = 0. + else: + parameters = model_or_params.parameters() + else: + # iterable of parameters or param groups passed in + parameters = model_or_params + + opt_lower = opt.lower() + opt_split = opt_lower.split('_') + opt_lower = opt_split[-1] + if 'fused' in opt_lower: + assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers' + + opt_args = dict(weight_decay=weight_decay, **kwargs) + if lr is not None: + opt_args.setdefault('lr', lr) + + # basic SGD & related + if opt_lower == 'sgd' or opt_lower == 'nesterov': + # NOTE 'sgd' refers to SGD + nesterov momentum for legacy / backwards compat reasons + opt_args.pop('eps', None) + optimizer = optim.SGD(parameters, momentum=momentum, nesterov=True, **opt_args) + elif opt_lower == 'momentum': + opt_args.pop('eps', None) + optimizer = optim.SGD(parameters, momentum=momentum, nesterov=False, **opt_args) + elif opt_lower == 'sgdp': + optimizer = SGDP(parameters, momentum=momentum, nesterov=True, **opt_args) + + # adaptive + elif opt_lower == 'adam': + optimizer = optim.Adam(parameters, **opt_args) + elif opt_lower == 'adamw': + optimizer = optim.AdamW(parameters, **opt_args) + elif opt_lower == 'adamp': + optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args) + elif opt_lower == 'nadam': + try: + # NOTE PyTorch >= 1.10 should have native NAdam + optimizer = optim.Nadam(parameters, **opt_args) + except AttributeError: + optimizer = Nadam(parameters, **opt_args) + elif opt_lower == 'radam': + optimizer = RAdam(parameters, **opt_args) + elif opt_lower == 'adamax': + optimizer = optim.Adamax(parameters, **opt_args) + elif opt_lower == 'adabelief': + optimizer = AdaBelief(parameters, rectify=False, **opt_args) + elif opt_lower == 'radabelief': + optimizer = AdaBelief(parameters, rectify=True, **opt_args) + elif opt_lower == 'adadelta': + optimizer = optim.Adadelta(parameters, **opt_args) + elif opt_lower == 'adagrad': + opt_args.setdefault('eps', 1e-8) + optimizer = optim.Adagrad(parameters, **opt_args) + elif opt_lower == 'adafactor': + optimizer = Adafactor(parameters, **opt_args) + elif opt_lower == 'lamb': + optimizer = Lamb(parameters, **opt_args) + elif opt_lower == 'lambc': + optimizer = Lamb(parameters, trust_clip=True, **opt_args) + elif opt_lower == 'larc': + optimizer = Lars(parameters, momentum=momentum, trust_clip=True, **opt_args) + elif opt_lower == 'lars': + optimizer = Lars(parameters, momentum=momentum, **opt_args) + elif opt_lower == 'nlarc': + optimizer = Lars(parameters, momentum=momentum, trust_clip=True, nesterov=True, **opt_args) + elif opt_lower == 'nlars': + optimizer = Lars(parameters, momentum=momentum, nesterov=True, **opt_args) + elif opt_lower == 'madgrad': + optimizer = MADGRAD(parameters, momentum=momentum, **opt_args) + elif opt_lower == 'madgradw': + optimizer = MADGRAD(parameters, momentum=momentum, decoupled_decay=True, **opt_args) + elif opt_lower == 'novograd' or opt_lower == 'nvnovograd': + optimizer = NvNovoGrad(parameters, **opt_args) + elif opt_lower == 'rmsprop': + optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=momentum, **opt_args) + elif opt_lower == 'rmsproptf': + optimizer = RMSpropTF(parameters, alpha=0.9, momentum=momentum, **opt_args) + + # second order + elif opt_lower == 'adahessian': + optimizer = Adahessian(parameters, **opt_args) + + # NVIDIA fused optimizers, require APEX to be installed + elif opt_lower == 'fusedsgd': + opt_args.pop('eps', None) + optimizer = FusedSGD(parameters, momentum=momentum, nesterov=True, **opt_args) + elif opt_lower == 'fusedmomentum': + opt_args.pop('eps', None) + optimizer = FusedSGD(parameters, momentum=momentum, nesterov=False, **opt_args) + elif opt_lower == 'fusedadam': + optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args) + elif opt_lower == 'fusedadamw': + optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args) + elif opt_lower == 'fusedlamb': + optimizer = FusedLAMB(parameters, **opt_args) + elif opt_lower == 'fusednovograd': + opt_args.setdefault('betas', (0.95, 0.98)) + optimizer = FusedNovoGrad(parameters, **opt_args) + + else: + assert False and "Invalid optimizer" + raise ValueError + + if len(opt_split) > 1: + if opt_split[0] == 'lookahead': + optimizer = Lookahead(optimizer) + + return optimizer diff --git a/evalkit_tf446/lib/python3.10/site-packages/timm/optim/radam.py b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/radam.py new file mode 100644 index 0000000000000000000000000000000000000000..eb8d22e06c42e487c831297008851b4adc254d78 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/radam.py @@ -0,0 +1,89 @@ +"""RAdam Optimizer. +Implementation lifted from: https://github.com/LiyuanLucasLiu/RAdam +Paper: `On the Variance of the Adaptive Learning Rate and Beyond` - https://arxiv.org/abs/1908.03265 +""" +import math +import torch +from torch.optim.optimizer import Optimizer + + +class RAdam(Optimizer): + + def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0): + defaults = dict( + lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, + buffer=[[None, None, None] for _ in range(10)]) + super(RAdam, self).__init__(params, defaults) + + def __setstate__(self, state): + super(RAdam, self).__setstate__(state) + + @torch.no_grad() + def step(self, closure=None): + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad.float() + if grad.is_sparse: + raise RuntimeError('RAdam does not support sparse gradients') + + p_fp32 = p.float() + + state = self.state[p] + + if len(state) == 0: + state['step'] = 0 + state['exp_avg'] = torch.zeros_like(p_fp32) + state['exp_avg_sq'] = torch.zeros_like(p_fp32) + else: + state['exp_avg'] = state['exp_avg'].type_as(p_fp32) + state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_fp32) + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + + state['step'] += 1 + buffered = group['buffer'][int(state['step'] % 10)] + if state['step'] == buffered[0]: + num_sma, step_size = buffered[1], buffered[2] + else: + buffered[0] = state['step'] + beta2_t = beta2 ** state['step'] + num_sma_max = 2 / (1 - beta2) - 1 + num_sma = num_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t) + buffered[1] = num_sma + + # more conservative since it's an approximated value + if num_sma >= 5: + step_size = group['lr'] * math.sqrt( + (1 - beta2_t) * + (num_sma - 4) / (num_sma_max - 4) * + (num_sma - 2) / num_sma * + num_sma_max / (num_sma_max - 2)) / (1 - beta1 ** state['step']) + else: + step_size = group['lr'] / (1 - beta1 ** state['step']) + buffered[2] = step_size + + if group['weight_decay'] != 0: + p_fp32.add_(p_fp32, alpha=-group['weight_decay'] * group['lr']) + + # more conservative since it's an approximated value + if num_sma >= 5: + denom = exp_avg_sq.sqrt().add_(group['eps']) + p_fp32.addcdiv_(exp_avg, denom, value=-step_size) + else: + p_fp32.add_(exp_avg, alpha=-step_size) + + p.copy_(p_fp32) + + return loss diff --git a/evalkit_tf446/lib/python3.10/site-packages/timm/optim/rmsprop_tf.py b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/rmsprop_tf.py new file mode 100644 index 0000000000000000000000000000000000000000..0817887db380261dfee3fcd4bd155b5d923f5248 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/rmsprop_tf.py @@ -0,0 +1,139 @@ +""" RMSProp modified to behave like Tensorflow impl + +Originally cut & paste from PyTorch RMSProp +https://github.com/pytorch/pytorch/blob/063946d2b3f3f1e953a2a3b54e0b34f1393de295/torch/optim/rmsprop.py +Licensed under BSD-Clause 3 (ish), https://github.com/pytorch/pytorch/blob/master/LICENSE + +Modifications Copyright 2021 Ross Wightman +""" + +import torch +from torch.optim import Optimizer + + +class RMSpropTF(Optimizer): + """Implements RMSprop algorithm (TensorFlow style epsilon) + + NOTE: This is a direct cut-and-paste of PyTorch RMSprop with eps applied before sqrt + and a few other modifications to closer match Tensorflow for matching hyper-params. + + Noteworthy changes include: + 1. Epsilon applied inside square-root + 2. square_avg initialized to ones + 3. LR scaling of update accumulated in momentum buffer + + Proposed by G. Hinton in his + `course `_. + + The centered version first appears in `Generating Sequences + With Recurrent Neural Networks `_. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups + lr (float, optional): learning rate (default: 1e-2) + momentum (float, optional): momentum factor (default: 0) + alpha (float, optional): smoothing (decay) constant (default: 0.9) + eps (float, optional): term added to the denominator to improve + numerical stability (default: 1e-10) + centered (bool, optional) : if ``True``, compute the centered RMSProp, + the gradient is normalized by an estimation of its variance + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + decoupled_decay (bool, optional): decoupled weight decay as per https://arxiv.org/abs/1711.05101 + lr_in_momentum (bool, optional): learning rate scaling is included in the momentum buffer + update as per defaults in Tensorflow + + """ + + def __init__(self, params, lr=1e-2, alpha=0.9, eps=1e-10, weight_decay=0, momentum=0., centered=False, + decoupled_decay=False, lr_in_momentum=True): + if not 0.0 <= lr: + raise ValueError("Invalid learning rate: {}".format(lr)) + if not 0.0 <= eps: + raise ValueError("Invalid epsilon value: {}".format(eps)) + if not 0.0 <= momentum: + raise ValueError("Invalid momentum value: {}".format(momentum)) + if not 0.0 <= weight_decay: + raise ValueError("Invalid weight_decay value: {}".format(weight_decay)) + if not 0.0 <= alpha: + raise ValueError("Invalid alpha value: {}".format(alpha)) + + defaults = dict( + lr=lr, momentum=momentum, alpha=alpha, eps=eps, centered=centered, weight_decay=weight_decay, + decoupled_decay=decoupled_decay, lr_in_momentum=lr_in_momentum) + super(RMSpropTF, self).__init__(params, defaults) + + def __setstate__(self, state): + super(RMSpropTF, self).__setstate__(state) + for group in self.param_groups: + group.setdefault('momentum', 0) + group.setdefault('centered', False) + + @torch.no_grad() + def step(self, closure=None): + """Performs a single optimization step. + + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + """ + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + if grad.is_sparse: + raise RuntimeError('RMSprop does not support sparse gradients') + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + state['square_avg'] = torch.ones_like(p) # PyTorch inits to zero + if group['momentum'] > 0: + state['momentum_buffer'] = torch.zeros_like(p) + if group['centered']: + state['grad_avg'] = torch.zeros_like(p) + + square_avg = state['square_avg'] + one_minus_alpha = 1. - group['alpha'] + + state['step'] += 1 + + if group['weight_decay'] != 0: + if group['decoupled_decay']: + p.mul_(1. - group['lr'] * group['weight_decay']) + else: + grad = grad.add(p, alpha=group['weight_decay']) + + # Tensorflow order of ops for updating squared avg + square_avg.add_(grad.pow(2) - square_avg, alpha=one_minus_alpha) + # square_avg.mul_(alpha).addcmul_(grad, grad, value=1 - alpha) # PyTorch original + + if group['centered']: + grad_avg = state['grad_avg'] + grad_avg.add_(grad - grad_avg, alpha=one_minus_alpha) + avg = square_avg.addcmul(grad_avg, grad_avg, value=-1).add(group['eps']).sqrt_() # eps in sqrt + # grad_avg.mul_(alpha).add_(grad, alpha=1 - alpha) # PyTorch original + else: + avg = square_avg.add(group['eps']).sqrt_() # eps moved in sqrt + + if group['momentum'] > 0: + buf = state['momentum_buffer'] + # Tensorflow accumulates the LR scaling in the momentum buffer + if group['lr_in_momentum']: + buf.mul_(group['momentum']).addcdiv_(grad, avg, value=group['lr']) + p.add_(-buf) + else: + # PyTorch scales the param update by LR + buf.mul_(group['momentum']).addcdiv_(grad, avg) + p.add_(buf, alpha=-group['lr']) + else: + p.addcdiv_(grad, avg, value=-group['lr']) + + return loss diff --git a/evalkit_tf446/lib/python3.10/site-packages/timm/optim/sgdp.py b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/sgdp.py new file mode 100644 index 0000000000000000000000000000000000000000..baf05fa55c632371498ec53ff679b11023429df6 --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/timm/optim/sgdp.py @@ -0,0 +1,70 @@ +""" +SGDP Optimizer Implementation copied from https://github.com/clovaai/AdamP/blob/master/adamp/sgdp.py + +Paper: `Slowing Down the Weight Norm Increase in Momentum-based Optimizers` - https://arxiv.org/abs/2006.08217 +Code: https://github.com/clovaai/AdamP + +Copyright (c) 2020-present NAVER Corp. +MIT license +""" + +import torch +import torch.nn.functional as F +from torch.optim.optimizer import Optimizer, required +import math + +from .adamp import projection + + +class SGDP(Optimizer): + def __init__(self, params, lr=required, momentum=0, dampening=0, + weight_decay=0, nesterov=False, eps=1e-8, delta=0.1, wd_ratio=0.1): + defaults = dict( + lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, + nesterov=nesterov, eps=eps, delta=delta, wd_ratio=wd_ratio) + super(SGDP, self).__init__(params, defaults) + + @torch.no_grad() + def step(self, closure=None): + loss = None + if closure is not None: + with torch.enable_grad(): + loss = closure() + + for group in self.param_groups: + weight_decay = group['weight_decay'] + momentum = group['momentum'] + dampening = group['dampening'] + nesterov = group['nesterov'] + + for p in group['params']: + if p.grad is None: + continue + grad = p.grad + state = self.state[p] + + # State initialization + if len(state) == 0: + state['momentum'] = torch.zeros_like(p) + + # SGD + buf = state['momentum'] + buf.mul_(momentum).add_(grad, alpha=1. - dampening) + if nesterov: + d_p = grad + momentum * buf + else: + d_p = buf + + # Projection + wd_ratio = 1. + if len(p.shape) > 1: + d_p, wd_ratio = projection(p, grad, d_p, group['delta'], group['wd_ratio'], group['eps']) + + # Weight decay + if weight_decay != 0: + p.mul_(1. - group['lr'] * group['weight_decay'] * wd_ratio / (1-momentum)) + + # Step + p.add_(d_p, alpha=-group['lr']) + + return loss diff --git a/evalkit_tf446/lib/python3.10/site-packages/timm/version.py b/evalkit_tf446/lib/python3.10/site-packages/timm/version.py new file mode 100644 index 0000000000000000000000000000000000000000..70039a4cbb99f75059bfe30c5e56c2295a73a5cd --- /dev/null +++ b/evalkit_tf446/lib/python3.10/site-packages/timm/version.py @@ -0,0 +1 @@ +__version__ = '0.6.13'