ZTWHHH commited on
Commit
b971833
·
verified ·
1 Parent(s): f9ca870

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. janus/lib/python3.10/site-packages/distutils-precedence.pth +3 -0
  2. janus/lib/python3.10/site-packages/einops/__pycache__/__init__.cpython-310.pyc +0 -0
  3. janus/lib/python3.10/site-packages/einops/__pycache__/_backends.cpython-310.pyc +0 -0
  4. janus/lib/python3.10/site-packages/einops/__pycache__/_torch_specific.cpython-310.pyc +0 -0
  5. janus/lib/python3.10/site-packages/einops/experimental/__pycache__/indexing.cpython-310.pyc +0 -0
  6. janus/lib/python3.10/site-packages/einops/experimental/indexing.py +398 -0
  7. janus/lib/python3.10/site-packages/einops/layers/__pycache__/_einmix.cpython-310.pyc +0 -0
  8. janus/lib/python3.10/site-packages/einops/layers/__pycache__/oneflow.cpython-310.pyc +0 -0
  9. janus/lib/python3.10/site-packages/einops/layers/__pycache__/paddle.cpython-310.pyc +0 -0
  10. janus/lib/python3.10/site-packages/einops/layers/__pycache__/tensorflow.cpython-310.pyc +0 -0
  11. janus/lib/python3.10/site-packages/einops/layers/oneflow.py +54 -0
  12. janus/lib/python3.10/site-packages/einops/parsing.py +152 -0
  13. janus/lib/python3.10/site-packages/timm/layers/__init__.py +62 -0
  14. janus/lib/python3.10/site-packages/timm/layers/attention2d.py +351 -0
  15. janus/lib/python3.10/site-packages/timm/layers/attention_pool.py +105 -0
  16. janus/lib/python3.10/site-packages/timm/layers/bottleneck_attn.py +157 -0
  17. janus/lib/python3.10/site-packages/timm/layers/classifier.py +283 -0
  18. janus/lib/python3.10/site-packages/timm/layers/conv2d_same.py +110 -0
  19. janus/lib/python3.10/site-packages/timm/layers/create_conv2d.py +36 -0
  20. janus/lib/python3.10/site-packages/timm/layers/create_norm.py +60 -0
  21. janus/lib/python3.10/site-packages/timm/layers/drop.py +182 -0
  22. janus/lib/python3.10/site-packages/timm/layers/eca.py +145 -0
  23. janus/lib/python3.10/site-packages/timm/layers/evo_norm.py +352 -0
  24. janus/lib/python3.10/site-packages/timm/layers/filter_response_norm.py +68 -0
  25. janus/lib/python3.10/site-packages/timm/layers/format.py +58 -0
  26. janus/lib/python3.10/site-packages/timm/layers/global_context.py +67 -0
  27. janus/lib/python3.10/site-packages/timm/layers/grid.py +49 -0
  28. janus/lib/python3.10/site-packages/timm/layers/grn.py +39 -0
  29. janus/lib/python3.10/site-packages/timm/layers/halo_attn.py +233 -0
  30. janus/lib/python3.10/site-packages/timm/layers/helpers.py +43 -0
  31. janus/lib/python3.10/site-packages/timm/layers/interpolate.py +68 -0
  32. janus/lib/python3.10/site-packages/timm/layers/layer_scale.py +38 -0
  33. janus/lib/python3.10/site-packages/timm/layers/mixed_conv2d.py +51 -0
  34. janus/lib/python3.10/site-packages/timm/layers/padding.py +87 -0
  35. janus/lib/python3.10/site-packages/timm/layers/patch_embed.py +307 -0
  36. janus/lib/python3.10/site-packages/timm/layers/pool2d_same.py +73 -0
  37. janus/lib/python3.10/site-packages/timm/layers/pos_embed_sincos.py +443 -0
  38. janus/lib/python3.10/site-packages/timm/layers/split_batchnorm.py +75 -0
  39. janus/lib/python3.10/site-packages/timm/layers/squeeze_excite.py +102 -0
  40. janus/lib/python3.10/site-packages/timm/layers/trace_utils.py +13 -0
  41. janus/lib/python3.10/site-packages/timm/optim/__pycache__/__init__.cpython-310.pyc +0 -0
  42. janus/lib/python3.10/site-packages/timm/optim/__pycache__/_param_groups.cpython-310.pyc +0 -0
  43. janus/lib/python3.10/site-packages/timm/optim/__pycache__/adopt.cpython-310.pyc +0 -0
  44. janus/lib/python3.10/site-packages/timm/optim/__pycache__/lion.cpython-310.pyc +0 -0
  45. janus/lib/python3.10/site-packages/timm/optim/__pycache__/lookahead.cpython-310.pyc +0 -0
  46. janus/lib/python3.10/site-packages/timm/optim/__pycache__/nadam.cpython-310.pyc +0 -0
  47. janus/lib/python3.10/site-packages/timm/optim/__pycache__/optim_factory.cpython-310.pyc +0 -0
  48. janus/lib/python3.10/site-packages/timm/optim/__pycache__/radam.cpython-310.pyc +0 -0
  49. janus/lib/python3.10/site-packages/timm/optim/__pycache__/sgdp.cpython-310.pyc +0 -0
  50. janus/lib/python3.10/site-packages/timm/optim/__pycache__/sgdw.cpython-310.pyc +0 -0
janus/lib/python3.10/site-packages/distutils-precedence.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2638ce9e2500e572a5e0de7faed6661eb569d1b696fcba07b0dd223da5f5d224
3
+ size 151
janus/lib/python3.10/site-packages/einops/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (692 Bytes). View file
 
janus/lib/python3.10/site-packages/einops/__pycache__/_backends.cpython-310.pyc ADDED
Binary file (28 kB). View file
 
janus/lib/python3.10/site-packages/einops/__pycache__/_torch_specific.cpython-310.pyc ADDED
Binary file (4.18 kB). View file
 
janus/lib/python3.10/site-packages/einops/experimental/__pycache__/indexing.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
janus/lib/python3.10/site-packages/einops/experimental/indexing.py ADDED
@@ -0,0 +1,398 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+
3
+ Indexing one array with the other(s).
4
+
5
+ Concept for discussion.
6
+
7
+ Notation targets hard cases, not simple ones, like indexing of 1d-array with another 1d-array
8
+ (notation supports that, but you can't simplify arr[ind], and there is no reason to)
9
+
10
+ Examples
11
+
12
+ 1. query for every token in sequence a token in the image. Images and sequences are paired
13
+ einindex('b t c <- b h w c, [h, w] b t', arr_bhwc, [h_indices_bt, w_indices_bt])
14
+
15
+ this is equivalent, so you can pass indexers idependently or together
16
+ einindex('b t c <- b h w c, [h, w] b t', arr_bhwc, np.asarray([h_indices_bt, w_indices_bt]))
17
+
18
+ after some thinking I decided that having first axis for indexing variable is not too restrictive,
19
+ but should simplify mapping of such cases.
20
+ For this reason [...] part should always go first in indexer.
21
+
22
+ This makes the largest difference with einindex https://github.com/malmaud/einindex,
23
+ which has almost identical grammar, but puts special dimension last, while we put it first.
24
+ This trick allows naturally decomposing multiindex into individual dimensions or visa versa.
25
+
26
+
27
+ 2. query for every token in the video the most suitable word in a (matching) sentence
28
+ einindex('b t h w <- seq b, [seq] t b h w', arr_tbc, [t_indices_bhw])
29
+
30
+ note, that only one indexer is used, but still it has to be enclosed in the list.
31
+ That's a price for being generic. Alternatively leading singleton dimension can be added.
32
+
33
+
34
+ 3. (not supported now, future planning)
35
+ for every timeframe in a video, find the token with the highest norm (across h and w), and compose a new stack of them
36
+ indices_2bt = argmax(x_bthwc.norm(dim=-1), 'b t h w -> [h, w] b t')
37
+ selected_embeddings_btc = einindex('b t c <- b t h w c, [h, w] b t', x_bthwc, indices_2bt)
38
+
39
+ while currently question is around 'how do we index',
40
+ it is important to pre-align that with a question 'what are natural ways to get indices'.
41
+ Most common are min/max. less common options: topk (works here), random sampling.
42
+
43
+
44
+
45
+ Some important properties of this notation:
46
+ - support for multiple indexers, including using a single tensor to keep multiple indexers
47
+ - 'batch' indexing, when some axes of indexer and array should be matched
48
+ - universal (one-indexing-to-rule-them-all)
49
+ - extensible for (named) ellipses, including variadic number of indexers
50
+ - extensible for einops-style compositions and decompositions
51
+ - extensible for outer indexing when indexers are not aligned
52
+
53
+ Current implementation based on python array api and uses loops,
54
+ because no appropriate indexing available in the standard.
55
+
56
+ """
57
+
58
+ from typing import List, Union, TypeVar, Tuple
59
+
60
+ from einops import EinopsError
61
+
62
+ T = TypeVar("T")
63
+
64
+
65
+ class CompositionDecomposition:
66
+ def __init__(
67
+ self,
68
+ decomposed_shape: List[str],
69
+ composed_shape: List[List[str]],
70
+ ):
71
+ flat_shape = []
72
+ for x in composed_shape:
73
+ flat_shape.extend(x)
74
+
75
+ self.compose_transposition: Tuple[int, ...] = tuple([decomposed_shape.index(x) for x in flat_shape])
76
+ self.decompose_transposition: Tuple[int, ...] = tuple([flat_shape.index(x) for x in decomposed_shape])
77
+ self.composed_shape = composed_shape
78
+ self.decomposed_shape = decomposed_shape
79
+
80
+ def decompose(self, x, known_axes_lengths: dict[str, int]):
81
+ xp = x.__array_namespace__()
82
+ shape = x.shape
83
+
84
+ flat_shape = []
85
+
86
+ for i, axis_group in enumerate(self.composed_shape):
87
+ unknown_axis_name = None
88
+ known_sizes_prod = 1
89
+ for axis_name in axis_group:
90
+ if axis_name in known_axes_lengths:
91
+ known_sizes_prod *= known_axes_lengths[axis_name]
92
+ else:
93
+ if unknown_axis_name is None:
94
+ unknown_axis_name = axis_name
95
+ else:
96
+ raise EinopsError("Can't infer the size")
97
+
98
+ if unknown_axis_name is None:
99
+ assert shape[i] == known_sizes_prod
100
+ else:
101
+ known_axes_lengths[unknown_axis_name] = shape[i] // known_sizes_prod
102
+
103
+ for axis in axis_group:
104
+ flat_shape.append(known_axes_lengths[axis])
105
+
106
+ x = xp.reshape(x, flat_shape)
107
+ return xp.permute_dims(x, self.decompose_transposition)
108
+
109
+ def compose(self, x, known_axes_lengths: dict[str, int]):
110
+ xp = x.__array_namespace__()
111
+
112
+ for axis_len, axis_name in zip(x.shape, self.decomposed_shape):
113
+ if axis_name in known_axes_lengths:
114
+ assert known_axes_lengths[axis_name] == axis_len
115
+ else:
116
+ known_axes_lengths[axis_name] = axis_len
117
+
118
+ x = xp.permute_dims(x, self.compose_transposition)
119
+ new_shape = []
120
+ for axis_group in self.composed_shape:
121
+ composed_axis_size = 1
122
+ for axis_name in axis_group:
123
+ composed_axis_size *= known_axes_lengths[axis_name]
124
+ new_shape.append(composed_axis_size)
125
+
126
+ return xp.reshape(x, tuple(new_shape))
127
+
128
+
129
+ def arange_at_position(xp, n_axes, axis, axis_len, device=None):
130
+ x = xp.arange(axis_len, dtype=xp.int64, device=device)
131
+ shape = [1] * n_axes
132
+ shape[axis] = axis_len
133
+ x = xp.reshape(x, shape)
134
+ return x
135
+
136
+
137
+ class IndexingFormula:
138
+ def __init__(self, pattern: str):
139
+ """
140
+ :param pattern: example 'b t c <- b hsel wsel c, [hsel, wsel] b t'
141
+ """
142
+ self.pattern = pattern
143
+ left, right = pattern.split("<-")
144
+ arg_split = right.index(",")
145
+ arr_pattern, ind_pattern = right[:arg_split], right[arg_split + 1 :]
146
+ ind_pattern = ind_pattern.strip()
147
+ # print(
148
+ # arr_pattern, '\n',
149
+ # ind_pattern,
150
+ # )
151
+ assert ind_pattern.startswith("["), "composition axis should go first in indexer (second argument) [h w] i j k"
152
+ composition_start = ind_pattern.index("[")
153
+ composition_end = ind_pattern.index("]")
154
+ composition = ind_pattern[composition_start + 1 : composition_end]
155
+ ind_other_axes = ind_pattern[composition_end + 1 :]
156
+
157
+ self.result_axes_names = left.split()
158
+ self.array_axes_names = arr_pattern.split()
159
+ self.indexing_axes_names = [x.strip() for x in composition.split(",")]
160
+ self.indexer_other_axes_names = ind_other_axes.split()
161
+
162
+ for group_name, group in [
163
+ ("result", self.result_axes_names),
164
+ ("array", self.array_axes_names),
165
+ ("indexer", self.indexing_axes_names + self.indexer_other_axes_names),
166
+ ]:
167
+ if len(set(group)) != len(group):
168
+ # need more verbosity, which axis, raise
169
+ raise EinopsError(f"{group_name} pattern ({group}) contains a duplicated axis")
170
+
171
+ axis_groups = [
172
+ self.result_axes_names,
173
+ self.array_axes_names,
174
+ self.indexing_axes_names,
175
+ self.indexer_other_axes_names,
176
+ ]
177
+
178
+ all_axes = set()
179
+ for group in axis_groups:
180
+ all_axes.update(group)
181
+
182
+ self.indexer_axes = []
183
+ self.batch_axes = []
184
+ self.result_and_index_axes = []
185
+ self.result_and_array_axes = []
186
+
187
+ for axis in all_axes:
188
+ presence = tuple(axis in g for g in axis_groups)
189
+ # want match-case here. sweet dreams
190
+ if presence == (False, True, True, False):
191
+ self.indexer_axes.append(axis)
192
+ elif presence[2]:
193
+ raise EinopsError(f"Wrong usage of indexer variable {axis}")
194
+ elif presence == (True, True, False, True):
195
+ self.batch_axes.append(axis)
196
+ elif presence == (True, False, False, True):
197
+ self.result_and_index_axes.append(axis)
198
+ elif presence == (True, True, False, False):
199
+ self.result_and_array_axes.append(axis)
200
+ else:
201
+ # TODO better categorization of wrong usage patterns
202
+ raise EinopsError(f"{axis} is used incorrectly in {pattern}")
203
+
204
+ assert set(self.indexer_axes) == set(self.indexing_axes_names)
205
+ # order of these variables matters, since we can't lose mapping here
206
+ self.indexer_axes = self.indexing_axes_names
207
+
208
+ self.array_composition = CompositionDecomposition(
209
+ decomposed_shape=self.array_axes_names,
210
+ composed_shape=[self.batch_axes + self.indexer_axes, self.result_and_array_axes],
211
+ )
212
+
213
+ self.index_composition = CompositionDecomposition(
214
+ decomposed_shape=self.indexer_other_axes_names,
215
+ # single axis after composition
216
+ composed_shape=[self.batch_axes + self.result_and_index_axes],
217
+ )
218
+
219
+ self.result_composition = CompositionDecomposition(
220
+ decomposed_shape=self.result_axes_names,
221
+ composed_shape=[self.batch_axes + self.result_and_index_axes, self.result_and_array_axes],
222
+ )
223
+
224
+ def apply_to_array_api(self, arr: T, ind: Union[T, List[T]]):
225
+ known_axes_sizes: dict[str, int] = {}
226
+ xp = arr.__array_namespace__()
227
+
228
+ if not isinstance(ind, list):
229
+ ind = [ind[i, ...] for i in range(ind.shape[0])]
230
+
231
+ for indexer in ind:
232
+ assert len(indexer.shape) == len(self.indexer_other_axes_names)
233
+
234
+ # step 1. transpose, reshapes of arr; learn its dimensions
235
+ arr_2d = self.array_composition.compose(arr, known_axes_sizes)
236
+
237
+ # step 2. compute shifts and create an actual indexing array
238
+ shift = 1
239
+ full_index = xp.zeros([1] * len(ind[0].shape), dtype=xp.int64, device=arr.device)
240
+
241
+ # original order: [*batch-like axes, *indexing_axes,]
242
+ # now we need to traverse them in the opposite direction
243
+
244
+ for axis_name, indexer in list(zip(self.indexing_axes_names, ind))[::-1]:
245
+ full_index = full_index + shift * (indexer % known_axes_sizes[axis_name])
246
+ shift *= known_axes_sizes[axis_name]
247
+
248
+ for axis_name in self.batch_axes[::-1]:
249
+ axis_id = self.indexer_other_axes_names.index(axis_name)
250
+ full_index = (
251
+ full_index
252
+ + arange_at_position(
253
+ xp,
254
+ len(self.indexer_other_axes_names),
255
+ axis=axis_id,
256
+ axis_len=known_axes_sizes[axis_name],
257
+ device=arr.device,
258
+ )
259
+ * shift
260
+ )
261
+ shift *= known_axes_sizes[axis_name]
262
+
263
+ assert shift == arr_2d.shape[0]
264
+
265
+ # step 3. Flatten index
266
+ full_index = self.index_composition.compose(full_index, known_axes_sizes)
267
+
268
+ # step 4. indexing
269
+ # python array api lacks any integer indexing, so... I use loops.
270
+ # did you know that there is conceptual programming ... just like art?
271
+ # result_2d = arr_2d[full_index]
272
+ result_2d = xp.stack([arr_2d[full_index[i], :] for i in range(full_index.shape[0])])
273
+
274
+ # step 5. doing resulting
275
+ result = self.result_composition.decompose(result_2d, known_axes_sizes)
276
+ return result
277
+
278
+
279
+ def einindex(pattern: str, arr: T, /, ind: Union[T, List[T]]):
280
+ """
281
+ Demonstrates how einindex should work.
282
+ Supports data-api compliant arrays.
283
+ """
284
+ formula = IndexingFormula(pattern)
285
+ return formula.apply_to_array_api(arr, ind)
286
+
287
+
288
+ def test_composition_and_decomposition():
289
+ import numpy.array_api as np
290
+
291
+ x = np.arange(2 * 3 * 5 * 7)
292
+ x = np.reshape(x, (2, 3, 5, 7))
293
+ comp = CompositionDecomposition(
294
+ decomposed_shape=["a", "b", "c", "d"],
295
+ composed_shape=[["a", "b"], ["c", "d"]],
296
+ )
297
+ assert comp.compose(x, known_axes_lengths={}).shape == (2 * 3, 5 * 7)
298
+
299
+ y = CompositionDecomposition(
300
+ decomposed_shape=["a", "b", "c", "d"],
301
+ composed_shape=[["a", "b"], [], ["c", "d"]],
302
+ ).compose(x, {})
303
+ assert y.shape == (2 * 3, 1, 5 * 7)
304
+ assert np.all(np.reshape(x, (-1,)) == np.reshape(y, (-1,)))
305
+
306
+ comp = CompositionDecomposition(
307
+ decomposed_shape=["a", "b", "e", "c", "d"],
308
+ composed_shape=[["e", "c"], ["b"], ["a", "d"]],
309
+ )
310
+ x = np.arange(2 * 3 * 5 * 7 * 3)
311
+ x = np.reshape(x, (2, 3, 5, 7, 3))
312
+
313
+ axes = {}
314
+ y = comp.compose(x, axes)
315
+ x2 = comp.decompose(y, axes)
316
+ assert np.all(x == x2)
317
+
318
+
319
+ def test_simple_indexing():
320
+ import numpy.array_api as np
321
+
322
+ # simple 2d test
323
+ arr = np.reshape(np.arange(5 * 7), (5, 7))
324
+ ind = np.arange(7) % 5
325
+ x = einindex("j <- i j, [i] j", arr, [ind])
326
+ for j, i in enumerate(ind):
327
+ assert arr[i, j] == x[j]
328
+
329
+ y = einindex("j <- j i, [i] j", np.permute_dims(arr, (1, 0)), [ind])
330
+ for j, i in enumerate(ind):
331
+ assert arr[i, j] == y[j]
332
+
333
+
334
+ def test_multidimensional_indexing():
335
+ import numpy.array_api as np
336
+
337
+ embedding_bhwc = (
338
+ +arange_at_position(np, 4, 0, 2) * 1000
339
+ + arange_at_position(np, 4, 1, 3) * 100
340
+ + arange_at_position(np, 4, 2, 5) * 10
341
+ + arange_at_position(np, 4, 3, 7) * 1
342
+ )
343
+
344
+ hindices_bt = np.reshape(np.arange(6), (2, 3)) % 3
345
+ windices_bt = np.reshape(np.arange(6), (2, 3)) % 5
346
+
347
+ # imagine that you have pairs of image <> sentence
348
+ # your goal is to get most suitable token from image for every token in sentence
349
+ # thus for every token in sentence you compute best k and v
350
+
351
+ result = einindex("c t b <- b h w c, [h, w] b t", embedding_bhwc, [hindices_bt, windices_bt])
352
+ # example of using a single array for indexing multiple axes
353
+ hw_indices_bt = np.stack([hindices_bt, windices_bt])
354
+ result2 = einindex("c t b <- b h w c, [h, w] b t", embedding_bhwc, hw_indices_bt)
355
+ assert np.all(result == result2)
356
+
357
+ # check vs manual element computation
358
+ result_manual = result * 0
359
+ for b in range(2):
360
+ for t in range(3):
361
+ for c in range(7):
362
+ h = hindices_bt[b, t]
363
+ w = windices_bt[b, t]
364
+ result_manual[c, t, b] = embedding_bhwc[b, h, w, c]
365
+
366
+ assert np.all(result == result_manual)
367
+
368
+
369
+ def test_reverse_indexing():
370
+ import numpy.array_api as np
371
+
372
+ C, T, B = 2, 3, 5
373
+ # G = GPU, batch-like varaible
374
+ G = 4
375
+ H = 7
376
+ W = 9
377
+
378
+ arr_gtbc = (
379
+ +arange_at_position(np, 4, 0, G) * 1000
380
+ + arange_at_position(np, 4, 1, T) * 100
381
+ + arange_at_position(np, 4, 2, B) * 10
382
+ + arange_at_position(np, 4, 3, C) * 1
383
+ )
384
+
385
+ t_indices_gbhw = np.reshape(np.arange(G * B * H * W), (G, B, H, W)) % T
386
+
387
+ result = einindex("g b c h w <- g t b c, [t] g b h w", arr_gtbc, [t_indices_gbhw])
388
+
389
+ result_manual = result * 0
390
+ for g in range(G):
391
+ for b in range(B):
392
+ for c in range(C):
393
+ for h in range(H):
394
+ for w in range(W):
395
+ t = t_indices_gbhw[g, b, h, w]
396
+ result_manual[g, b, c, h, w] = arr_gtbc[g, t, b, c]
397
+
398
+ assert np.all(result == result_manual)
janus/lib/python3.10/site-packages/einops/layers/__pycache__/_einmix.cpython-310.pyc ADDED
Binary file (7.89 kB). View file
 
janus/lib/python3.10/site-packages/einops/layers/__pycache__/oneflow.cpython-310.pyc ADDED
Binary file (2.15 kB). View file
 
janus/lib/python3.10/site-packages/einops/layers/__pycache__/paddle.cpython-310.pyc ADDED
Binary file (2.14 kB). View file
 
janus/lib/python3.10/site-packages/einops/layers/__pycache__/tensorflow.cpython-310.pyc ADDED
Binary file (3.44 kB). View file
 
janus/lib/python3.10/site-packages/einops/layers/oneflow.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Dict, cast
2
+
3
+ import oneflow as flow
4
+
5
+ from . import RearrangeMixin, ReduceMixin
6
+ from ._einmix import _EinmixMixin
7
+
8
+ __author__ = "Tianhe Ren & Depeng Liang"
9
+
10
+
11
+ class Rearrange(RearrangeMixin, flow.nn.Module):
12
+ def forward(self, input):
13
+ return self._apply_recipe(input)
14
+
15
+
16
+ class Reduce(ReduceMixin, flow.nn.Module):
17
+ def forward(self, input):
18
+ return self._apply_recipe(input)
19
+
20
+
21
+ class EinMix(_EinmixMixin, flow.nn.Module):
22
+ def _create_parameters(self, weight_shape, weight_bound, bias_shape, bias_bound):
23
+ self.weight = flow.nn.Parameter(
24
+ flow.zeros(weight_shape).uniform_(-weight_bound, weight_bound), requires_grad=True
25
+ )
26
+ if bias_shape is not None:
27
+ self.bias = flow.nn.Parameter(flow.zeros(bias_shape).uniform_(-bias_bound, bias_bound), requires_grad=True)
28
+ else:
29
+ self.bias = None
30
+
31
+ def _create_rearrange_layers(
32
+ self,
33
+ pre_reshape_pattern: Optional[str],
34
+ pre_reshape_lengths: Optional[Dict],
35
+ post_reshape_pattern: Optional[str],
36
+ post_reshape_lengths: Optional[Dict],
37
+ ):
38
+ self.pre_rearrange = None
39
+ if pre_reshape_pattern is not None:
40
+ self.pre_rearrange = Rearrange(pre_reshape_pattern, **cast(dict, pre_reshape_lengths))
41
+
42
+ self.post_rearrange = None
43
+ if post_reshape_pattern is not None:
44
+ self.post_rearrange = Rearrange(post_reshape_pattern, **cast(dict, post_reshape_lengths))
45
+
46
+ def forward(self, input):
47
+ if self.pre_rearrange is not None:
48
+ input = self.pre_rearrange(input)
49
+ result = flow.einsum(self.einsum_pattern, input, self.weight)
50
+ if self.bias is not None:
51
+ result += self.bias
52
+ if self.post_rearrange is not None:
53
+ result = self.post_rearrange(result)
54
+ return result
janus/lib/python3.10/site-packages/einops/parsing.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from einops import EinopsError
2
+ import keyword
3
+ import warnings
4
+ from typing import List, Optional, Set, Tuple, Union
5
+
6
+ _ellipsis: str = "…" # NB, this is a single unicode symbol. String is used as it is not a list, but can be iterated
7
+
8
+
9
+ class AnonymousAxis(object):
10
+ """Important thing: all instances of this class are not equal to each other"""
11
+
12
+ def __init__(self, value: str):
13
+ self.value = int(value)
14
+ if self.value <= 1:
15
+ if self.value == 1:
16
+ raise EinopsError("No need to create anonymous axis of length 1. Report this as an issue")
17
+ else:
18
+ raise EinopsError("Anonymous axis should have positive length, not {}".format(self.value))
19
+
20
+ def __repr__(self):
21
+ return "{}-axis".format(str(self.value))
22
+
23
+
24
+ class ParsedExpression:
25
+ """
26
+ non-mutable structure that contains information about one side of expression (e.g. 'b c (h w)')
27
+ and keeps some information important for downstream
28
+ """
29
+
30
+ def __init__(self, expression: str, *, allow_underscore: bool = False, allow_duplicates: bool = False):
31
+ self.has_ellipsis: bool = False
32
+ self.has_ellipsis_parenthesized: Optional[bool] = None
33
+ self.identifiers: Set[str] = set()
34
+ # that's axes like 2, 3, 4 or 5. Axes with size 1 are exceptional and replaced with empty composition
35
+ self.has_non_unitary_anonymous_axes: bool = False
36
+ # composition keeps structure of composite axes, see how different corner cases are handled in tests
37
+ self.composition: List[Union[List[str], str]] = []
38
+ if "." in expression:
39
+ if "..." not in expression:
40
+ raise EinopsError("Expression may contain dots only inside ellipsis (...)")
41
+ if str.count(expression, "...") != 1 or str.count(expression, ".") != 3:
42
+ raise EinopsError(
43
+ "Expression may contain dots only inside ellipsis (...); only one ellipsis for tensor "
44
+ )
45
+ expression = expression.replace("...", _ellipsis)
46
+ self.has_ellipsis = True
47
+
48
+ bracket_group: Optional[List[str]] = None
49
+
50
+ def add_axis_name(x):
51
+ if x in self.identifiers:
52
+ if not (allow_underscore and x == "_") and not allow_duplicates:
53
+ raise EinopsError('Indexing expression contains duplicate dimension "{}"'.format(x))
54
+ if x == _ellipsis:
55
+ self.identifiers.add(_ellipsis)
56
+ if bracket_group is None:
57
+ self.composition.append(_ellipsis)
58
+ self.has_ellipsis_parenthesized = False
59
+ else:
60
+ bracket_group.append(_ellipsis)
61
+ self.has_ellipsis_parenthesized = True
62
+ else:
63
+ is_number = str.isdecimal(x)
64
+ if is_number and int(x) == 1:
65
+ # handling the case of anonymous axis of length 1
66
+ if bracket_group is None:
67
+ self.composition.append([])
68
+ else:
69
+ pass # no need to think about 1s inside parenthesis
70
+ return
71
+ is_axis_name, reason = self.check_axis_name_return_reason(x, allow_underscore=allow_underscore)
72
+ if not (is_number or is_axis_name):
73
+ raise EinopsError("Invalid axis identifier: {}\n{}".format(x, reason))
74
+ if is_number:
75
+ x = AnonymousAxis(x)
76
+ self.identifiers.add(x)
77
+ if is_number:
78
+ self.has_non_unitary_anonymous_axes = True
79
+ if bracket_group is None:
80
+ self.composition.append([x])
81
+ else:
82
+ bracket_group.append(x)
83
+
84
+ current_identifier = None
85
+ for char in expression:
86
+ if char in "() ":
87
+ if current_identifier is not None:
88
+ add_axis_name(current_identifier)
89
+ current_identifier = None
90
+ if char == "(":
91
+ if bracket_group is not None:
92
+ raise EinopsError("Axis composition is one-level (brackets inside brackets not allowed)")
93
+ bracket_group = []
94
+ elif char == ")":
95
+ if bracket_group is None:
96
+ raise EinopsError("Brackets are not balanced")
97
+ self.composition.append(bracket_group)
98
+ bracket_group = None
99
+ elif str.isalnum(char) or char in ["_", _ellipsis]:
100
+ if current_identifier is None:
101
+ current_identifier = char
102
+ else:
103
+ current_identifier += char
104
+ else:
105
+ raise EinopsError("Unknown character '{}'".format(char))
106
+
107
+ if bracket_group is not None:
108
+ raise EinopsError('Imbalanced parentheses in expression: "{}"'.format(expression))
109
+ if current_identifier is not None:
110
+ add_axis_name(current_identifier)
111
+
112
+ def flat_axes_order(self) -> List:
113
+ result = []
114
+ for composed_axis in self.composition:
115
+ assert isinstance(composed_axis, list), "does not work with ellipsis"
116
+ for axis in composed_axis:
117
+ result.append(axis)
118
+ return result
119
+
120
+ def has_composed_axes(self) -> bool:
121
+ # this will ignore 1 inside brackets
122
+ for axes in self.composition:
123
+ if isinstance(axes, list) and len(axes) > 1:
124
+ return True
125
+ return False
126
+
127
+ @staticmethod
128
+ def check_axis_name_return_reason(name: str, allow_underscore: bool = False) -> Tuple[bool, str]:
129
+ if not str.isidentifier(name):
130
+ return False, "not a valid python identifier"
131
+ elif name[0] == "_" or name[-1] == "_":
132
+ if name == "_" and allow_underscore:
133
+ return True, ""
134
+ return False, "axis name should should not start or end with underscore"
135
+ else:
136
+ if keyword.iskeyword(name):
137
+ warnings.warn("It is discouraged to use axes names that are keywords: {}".format(name), RuntimeWarning)
138
+ if name in ["axis"]:
139
+ warnings.warn(
140
+ "It is discouraged to use 'axis' as an axis name " "and will raise an error in future",
141
+ FutureWarning,
142
+ )
143
+ return True, ""
144
+
145
+ @staticmethod
146
+ def check_axis_name(name: str) -> bool:
147
+ """
148
+ Valid axes names are python identifiers except keywords,
149
+ and additionally should not start or end with underscore
150
+ """
151
+ is_valid, _reason = ParsedExpression.check_axis_name_return_reason(name)
152
+ return is_valid
janus/lib/python3.10/site-packages/timm/layers/__init__.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .activations import *
2
+ from .adaptive_avgmax_pool import \
3
+ adaptive_avgmax_pool2d, select_adaptive_pool2d, AdaptiveAvgMaxPool2d, SelectAdaptivePool2d
4
+ from .attention2d import MultiQueryAttention2d, Attention2d, MultiQueryAttentionV2
5
+ from .attention_pool import AttentionPoolLatent
6
+ from .attention_pool2d import AttentionPool2d, RotAttentionPool2d, RotaryEmbedding
7
+ from .blur_pool import BlurPool2d, create_aa
8
+ from .classifier import create_classifier, ClassifierHead, NormMlpClassifierHead, ClNormMlpClassifierHead
9
+ from .cond_conv2d import CondConv2d, get_condconv_initializer
10
+ from .config import is_exportable, is_scriptable, is_no_jit, use_fused_attn, \
11
+ set_exportable, set_scriptable, set_no_jit, set_layer_config, set_fused_attn, \
12
+ set_reentrant_ckpt, use_reentrant_ckpt
13
+ from .conv2d_same import Conv2dSame, conv2d_same
14
+ from .conv_bn_act import ConvNormAct, ConvNormActAa, ConvBnAct
15
+ from .create_act import create_act_layer, get_act_layer, get_act_fn
16
+ from .create_attn import get_attn, create_attn
17
+ from .create_conv2d import create_conv2d
18
+ from .create_norm import get_norm_layer, create_norm_layer
19
+ from .create_norm_act import get_norm_act_layer, create_norm_act_layer, get_norm_act_layer
20
+ from .drop import DropBlock2d, DropPath, drop_block_2d, drop_path
21
+ from .eca import EcaModule, CecaModule, EfficientChannelAttn, CircularEfficientChannelAttn
22
+ from .evo_norm import EvoNorm2dB0, EvoNorm2dB1, EvoNorm2dB2,\
23
+ EvoNorm2dS0, EvoNorm2dS0a, EvoNorm2dS1, EvoNorm2dS1a, EvoNorm2dS2, EvoNorm2dS2a
24
+ from .fast_norm import is_fast_norm, set_fast_norm, fast_group_norm, fast_layer_norm
25
+ from .filter_response_norm import FilterResponseNormTlu2d, FilterResponseNormAct2d
26
+ from .format import Format, get_channel_dim, get_spatial_dim, nchw_to, nhwc_to
27
+ from .gather_excite import GatherExcite
28
+ from .global_context import GlobalContext
29
+ from .grid import ndgrid, meshgrid
30
+ from .helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible, extend_tuple
31
+ from .hybrid_embed import HybridEmbed, HybridEmbedWithSize
32
+ from .inplace_abn import InplaceAbn
33
+ from .layer_scale import LayerScale, LayerScale2d
34
+ from .linear import Linear
35
+ from .mixed_conv2d import MixedConv2d
36
+ from .mlp import Mlp, GluMlp, GatedMlp, SwiGLU, SwiGLUPacked, ConvMlp, GlobalResponseNormMlp
37
+ from .non_local_attn import NonLocalAttn, BatNonLocalAttn
38
+ from .norm import GroupNorm, GroupNorm1, LayerNorm, LayerNorm2d, RmsNorm, RmsNorm2d, SimpleNorm, SimpleNorm2d
39
+ from .norm_act import BatchNormAct2d, GroupNormAct, GroupNorm1Act, LayerNormAct, LayerNormAct2d,\
40
+ SyncBatchNormAct, convert_sync_batchnorm, FrozenBatchNormAct2d, freeze_batch_norm_2d, unfreeze_batch_norm_2d
41
+ from .padding import get_padding, get_same_padding, pad_same
42
+ from .patch_dropout import PatchDropout
43
+ from .patch_embed import PatchEmbed, PatchEmbedWithSize, resample_patch_embed
44
+ from .pool2d_same import AvgPool2dSame, create_pool2d
45
+ from .pos_embed import resample_abs_pos_embed, resample_abs_pos_embed_nhwc
46
+ from .pos_embed_rel import RelPosMlp, RelPosBias, RelPosBiasTf, gen_relative_position_index, gen_relative_log_coords, \
47
+ resize_rel_pos_bias_table, resize_rel_pos_bias_table_simple, resize_rel_pos_bias_table_levit
48
+ from .pos_embed_sincos import pixel_freq_bands, freq_bands, build_sincos2d_pos_embed, build_fourier_pos_embed, \
49
+ build_rotary_pos_embed, apply_rot_embed, apply_rot_embed_cat, apply_rot_embed_list, apply_keep_indices_nlc, \
50
+ FourierEmbed, RotaryEmbedding, RotaryEmbeddingCat
51
+ from .squeeze_excite import SEModule, SqueezeExcite, EffectiveSEModule, EffectiveSqueezeExcite
52
+ from .selective_kernel import SelectiveKernel
53
+ from .separable_conv import SeparableConv2d, SeparableConvNormAct
54
+ from .space_to_depth import SpaceToDepth, DepthToSpace
55
+ from .split_attn import SplitAttn
56
+ from .split_batchnorm import SplitBatchNorm2d, convert_splitbn_model
57
+ from .std_conv import StdConv2d, StdConv2dSame, ScaledStdConv2d, ScaledStdConv2dSame
58
+ from .test_time_pool import TestTimePoolHead, apply_test_time_pool
59
+ from .trace_utils import _assert, _float_to_int
60
+ from .typing import LayerType, PadType
61
+ from .weight_init import trunc_normal_, trunc_normal_tf_, variance_scaling_, lecun_normal_, \
62
+ init_weight_jax, init_weight_vit
janus/lib/python3.10/site-packages/timm/layers/attention2d.py ADDED
@@ -0,0 +1,351 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Type, Union
2
+
3
+ import torch
4
+ from torch import nn as nn
5
+ from torch.nn import functional as F
6
+
7
+ from .config import use_fused_attn
8
+ from .create_conv2d import create_conv2d
9
+ from .helpers import to_2tuple
10
+ from .pool2d_same import create_pool2d
11
+
12
+
13
+ class MultiQueryAttentionV2(nn.Module):
14
+ """Multi Query Attention.
15
+
16
+ Fast Transformer Decoding: One Write-Head is All You Need
17
+ https://arxiv.org/pdf/1911.02150.pdf
18
+
19
+ This is an acceletor optimized version - removing multiple unnecessary
20
+ tensor transpose by re-arranging indices according to the following rules: 1)
21
+ contracted indices are at the end, 2) other indices have the same order in the
22
+ input and output tensores.
23
+
24
+ Compared to V1, this gives 3x speed up.
25
+ """
26
+
27
+ def __init__(
28
+ self,
29
+ dim: int,
30
+ dim_out: Optional[int] = None,
31
+ num_heads: int = 8,
32
+ key_dim: int = 64,
33
+ value_dim: int = 64,
34
+ attn_drop: float = 0.,
35
+ proj_drop: float = 0.,
36
+ ):
37
+ """Initializer."""
38
+ super().__init__()
39
+ dim_out = dim_out or dim
40
+ self.num_heads = num_heads
41
+ self.key_dim = key_dim
42
+ self.value_dim = value_dim
43
+ self.scale = key_dim ** -0.5
44
+
45
+ self.query_proj = nn.Parameter(torch.randn([self.num_heads, self.key_dim, dim]))
46
+ self.key_proj = nn.Parameter(torch.randn([dim, self.key_dim]))
47
+ self.value_proj = nn.Parameter(torch.randn([dim, self.value_dim]))
48
+ self.attn_drop = nn.Dropout(attn_drop)
49
+ self.out_proj = nn.Parameter(torch.randn([dim_out, self.num_heads, self.value_dim]))
50
+ self.proj_drop = nn.Dropout(proj_drop)
51
+
52
+ def _reshape_input(self, t):
53
+ """Reshapes a tensor to three dimensions, keeping the first and last."""
54
+ s = t.shape
55
+ # Propagate the shape statically where possible.
56
+ #num = t.shape[1:-1].numel()
57
+ #return t.reshape(s[0], num, s[-1])
58
+ return t.reshape(s[0], s[1], -1).transpose(1, 2)
59
+
60
+ def forward(self, x, m: Optional[torch.Tensor] = None):
61
+ """Run layer computation."""
62
+ b, _, h, w = x.shape
63
+ m = m if m is not None else x
64
+
65
+ reshaped_x = self._reshape_input(x)
66
+ reshaped_m = self._reshape_input(m)
67
+
68
+ q = torch.einsum('bnd,hkd->bnhk', reshaped_x, self.query_proj)
69
+ k = torch.einsum('bmd,dk->bmk', reshaped_m, self.key_proj)
70
+
71
+ attn = torch.einsum('bnhk,bmk->bnhm', q, k) * self.scale
72
+ attn = attn.softmax(dim=-1)
73
+ attn = self.attn_drop(attn)
74
+
75
+ v = torch.einsum('bmd,dv->bmv', reshaped_m, self.value_proj)
76
+ o = torch.einsum('bnhm,bmv->bnhv', attn, v)
77
+ result = torch.einsum('bnhv,dhv->bdn', o, self.out_proj)
78
+ result = self.proj_drop(result)
79
+ return result.reshape(b, -1, h, w)
80
+
81
+
82
+ class MultiQueryAttention2d(nn.Module):
83
+ """Multi Query Attention with spatial downsampling.
84
+
85
+ 3 parameters are introduced for the spatial downsampling:
86
+ 1. kv_stride: downsampling factor on Key and Values only.
87
+ 2. query_strides: horizontal & vertical strides on Query only.
88
+
89
+ This is an optimized version.
90
+ 1. Projections in Attention is explicit written out as 1x1 Conv2D.
91
+ 2. Additional reshapes are introduced to bring a up to 3x speed up.
92
+ """
93
+ fused_attn: torch.jit.Final[bool]
94
+
95
+ def __init__(
96
+ self,
97
+ dim: int,
98
+ dim_out: Optional[int] = None,
99
+ num_heads: int = 8,
100
+ key_dim: Optional[int] = None,
101
+ value_dim: Optional[int] = None,
102
+ query_strides: int = 1,
103
+ kv_stride: int = 1,
104
+ dw_kernel_size: int = 3,
105
+ dilation: int = 1,
106
+ padding: Union[str, int, List[int]] = '',
107
+ attn_drop: float = 0.,
108
+ proj_drop: float = 0.,
109
+ norm_layer: Type[nn.Module] = nn.BatchNorm2d,
110
+ use_bias: bool = False,
111
+ ):
112
+ """Initializer.
113
+
114
+ Args:
115
+ num_heads: Number of attention heads.
116
+ key_dim: Size of the attention key dimension.
117
+ value_dim: Size of the attention value dimension.
118
+ query_strides: Vertical stride size for query only.
119
+ kv_stride: Key and value stride size.
120
+ dw_kernel_size: Spatial dimension of the depthwise kernel.
121
+ """
122
+ super().__init__()
123
+ dim_out = dim_out or dim
124
+ self.num_heads = num_heads
125
+ self.key_dim = key_dim or dim // num_heads
126
+ self.value_dim = value_dim or dim // num_heads
127
+ self.query_strides = to_2tuple(query_strides)
128
+ self.kv_stride = kv_stride
129
+ self.has_query_strides = any([s > 1 for s in self.query_strides])
130
+ self.scale = self.key_dim ** -0.5
131
+ self.fused_attn = use_fused_attn()
132
+ self.drop = attn_drop
133
+
134
+ self.query = nn.Sequential()
135
+ if self.has_query_strides:
136
+ # FIXME dilation
137
+ if padding == 'same':
138
+ self.query.add_module('down_pool', create_pool2d(
139
+ 'avg',
140
+ kernel_size=self.query_strides,
141
+ padding='same',
142
+ ))
143
+ else:
144
+ # no pad if not 'same' as kern=stride=even
145
+ self.query.add_module('down_pool', nn.AvgPool2d(kernel_size=query_strides))
146
+ self.query.add_module('norm', norm_layer(dim))
147
+ self.query.add_module('proj', create_conv2d(
148
+ dim,
149
+ self.num_heads * self.key_dim,
150
+ kernel_size=1,
151
+ bias=use_bias,
152
+ ))
153
+
154
+ self.key = nn.Sequential()
155
+ if kv_stride > 1:
156
+ self.key.add_module('down_conv', create_conv2d(
157
+ dim,
158
+ dim,
159
+ kernel_size=dw_kernel_size,
160
+ stride=kv_stride,
161
+ dilation=dilation,
162
+ padding=padding,
163
+ depthwise=True,
164
+ ))
165
+ self.key.add_module('norm', norm_layer(dim))
166
+ self.key.add_module('proj', create_conv2d(
167
+ dim,
168
+ self.key_dim,
169
+ kernel_size=1,
170
+ padding=padding,
171
+ bias=use_bias,
172
+ ))
173
+
174
+ self.value = nn.Sequential()
175
+ if kv_stride > 1:
176
+ self.value.add_module('down_conv', create_conv2d(
177
+ dim,
178
+ dim,
179
+ kernel_size=dw_kernel_size,
180
+ stride=kv_stride,
181
+ dilation=dilation,
182
+ padding=padding,
183
+ depthwise=True,
184
+ ))
185
+ self.value.add_module('norm', norm_layer(dim))
186
+ self.value.add_module('proj', create_conv2d(
187
+ dim,
188
+ self.value_dim,
189
+ kernel_size=1,
190
+ bias=use_bias,
191
+ ))
192
+
193
+ self.attn_drop = nn.Dropout(attn_drop)
194
+
195
+ self.output = nn.Sequential()
196
+ if self.has_query_strides:
197
+ self.output.add_module('upsample', nn.Upsample(scale_factor=self.query_strides, mode='bilinear', align_corners=False))
198
+ self.output.add_module('proj', create_conv2d(
199
+ self.value_dim * self.num_heads,
200
+ dim_out,
201
+ kernel_size=1,
202
+ bias=use_bias,
203
+ ))
204
+ self.output.add_module('drop', nn.Dropout(proj_drop))
205
+
206
+ self.einsum = False
207
+
208
+ def init_weights(self):
209
+ # using xavier appeared to improve stability for mobilenetv4 hybrid w/ this layer
210
+ nn.init.xavier_uniform_(self.query.proj.weight)
211
+ nn.init.xavier_uniform_(self.key.proj.weight)
212
+ nn.init.xavier_uniform_(self.value.proj.weight)
213
+ if self.kv_stride > 1:
214
+ nn.init.xavier_uniform_(self.key.down_conv.weight)
215
+ nn.init.xavier_uniform_(self.value.down_conv.weight)
216
+ nn.init.xavier_uniform_(self.output.proj.weight)
217
+
218
+ def _reshape_input(self, t: torch.Tensor):
219
+ """Reshapes a tensor to three dimensions, keeping the batch and channels."""
220
+ s = t.shape
221
+ t = t.reshape(s[0], s[1], -1).transpose(1, 2)
222
+ if self.einsum:
223
+ return t
224
+ else:
225
+ return t.unsqueeze(1).contiguous()
226
+
227
+ def _reshape_projected_query(self, t: torch.Tensor, num_heads: int, key_dim: int):
228
+ """Reshapes projected query: [b, n, n, h x k] -> [b, n x n, h, k]."""
229
+ s = t.shape
230
+ t = t.reshape(s[0], num_heads, key_dim, -1)
231
+ if self.einsum:
232
+ return t.permute(0, 3, 1, 2).contiguous()
233
+ else:
234
+ return t.transpose(-1, -2).contiguous()
235
+
236
+ def _reshape_output(self, t: torch.Tensor, num_heads: int, h_px: int, w_px: int):
237
+ """Reshape output:[b, n x n x h, k] -> [b, n, n, hk]."""
238
+ s = t.shape
239
+ feat_dim = s[-1] * num_heads
240
+ if not self.einsum:
241
+ t = t.transpose(1, 2)
242
+ return t.reshape(s[0], h_px, w_px, feat_dim).permute(0, 3, 1, 2).contiguous()
243
+
244
+ def forward(self, x, attn_mask: Optional[torch.Tensor] = None):
245
+ """Run layer computation."""
246
+ B, C, H, W = s = x.shape
247
+
248
+ q = self.query(x)
249
+ # desired q shape: [b, h, k, n x n] - [b, l, h, k]
250
+ q = self._reshape_projected_query(q, self.num_heads, self.key_dim)
251
+
252
+ k = self.key(x)
253
+ # output shape of k: [b, k, p], p = m x m
254
+ k = self._reshape_input(k)
255
+
256
+ v = self.value(x)
257
+ # output shape of v: [ b, p, k], p = m x m
258
+ v = self._reshape_input(v)
259
+
260
+ # desired q shape: [b, n x n, h, k]
261
+ # desired k shape: [b, m x m, k]
262
+ # desired logits shape: [b, n x n, h, m x m]
263
+ if self.einsum:
264
+ attn = torch.einsum('blhk,bpk->blhp', q, k) * self.scale
265
+ if attn_mask is not None:
266
+ # NOTE: assumes mask is float and in correct shape
267
+ attn = attn + attn_mask
268
+ attn = attn.softmax(dim=-1)
269
+ attn = self.attn_drop(attn)
270
+ o = torch.einsum('blhp,bpk->blhk', attn, v)
271
+ else:
272
+ if self.fused_attn:
273
+ o = F.scaled_dot_product_attention(
274
+ q, k, v,
275
+ attn_mask=attn_mask,
276
+ dropout_p=self.attn_drop.p if self.training else 0.
277
+ )
278
+ else:
279
+ q = q * self.scale
280
+ attn = q @ k.transpose(-1, -2)
281
+ if attn_mask is not None:
282
+ # NOTE: assumes mask is float and in correct shape
283
+ attn = attn + attn_mask
284
+ attn = attn.softmax(dim=-1)
285
+ attn = self.attn_drop(attn)
286
+ o = attn @ v
287
+
288
+ # reshape o into [b, hk, n, n,]
289
+ o = self._reshape_output(o, self.num_heads, H // self.query_strides[0], W // self.query_strides[1])
290
+ x = self.output(o)
291
+ return x
292
+
293
+
294
+ class Attention2d(nn.Module):
295
+ fused_attn: torch.jit.Final[bool]
296
+
297
+ """ multi-head attention for 2D NCHW tensors"""
298
+ def __init__(
299
+ self,
300
+ dim: int,
301
+ dim_out: Optional[int] = None,
302
+ num_heads: int = 32,
303
+ bias: bool = True,
304
+ expand_first: bool = False,
305
+ head_first: bool = False,
306
+ attn_drop: float = 0.,
307
+ proj_drop: float = 0.
308
+ ):
309
+ super().__init__()
310
+ dim_out = dim_out or dim
311
+ dim_attn = dim_out if expand_first else dim
312
+ self.num_heads = num_heads
313
+ self.dim_head = dim_attn // num_heads
314
+ self.head_first = head_first
315
+ self.fused_attn = use_fused_attn()
316
+
317
+ self.qkv = nn.Conv2d(dim, dim_attn * 3, 1, bias=bias)
318
+ self.attn_drop = nn.Dropout(attn_drop)
319
+ self.proj = nn.Conv2d(dim_attn, dim_out, 1, bias=bias)
320
+ self.proj_drop = nn.Dropout(proj_drop)
321
+
322
+ def forward(self, x, attn_mask: Optional[torch.Tensor] = None):
323
+ B, C, H, W = x.shape
324
+
325
+ if self.head_first:
326
+ q, k, v = self.qkv(x).view(B, self.num_heads, self.dim_head * 3, -1).chunk(3, dim=2)
327
+ else:
328
+ q, k, v = self.qkv(x).reshape(B, 3, self.num_heads, self.dim_head, -1).unbind(1)
329
+
330
+ if self.fused_attn:
331
+ x = torch.nn.functional.scaled_dot_product_attention(
332
+ q.transpose(-1, -2).contiguous(),
333
+ k.transpose(-1, -2).contiguous(),
334
+ v.transpose(-1, -2).contiguous(),
335
+ attn_mask=attn_mask,
336
+ dropout_p=self.attn_drop.p if self.training else 0.,
337
+ ).transpose(-1, -2).reshape(B, -1, H, W)
338
+ else:
339
+ q = q.transpose(-1, -2)
340
+ v = v.transpose(-1, -2)
341
+ attn = q @ k * q.size(-1) ** -0.5
342
+ if attn_mask is not None:
343
+ # NOTE: assumes mask is float and in correct shape
344
+ attn = attn + attn_mask
345
+ attn = attn.softmax(dim=-1)
346
+ attn = self.attn_drop(attn)
347
+ x = (attn @ v).transpose(-1, -2).reshape(B, -1, H, W)
348
+
349
+ x = self.proj(x)
350
+ x = self.proj_drop(x)
351
+ return x
janus/lib/python3.10/site-packages/timm/layers/attention_pool.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+
7
+ from .config import use_fused_attn
8
+ from .mlp import Mlp
9
+ from .weight_init import trunc_normal_tf_
10
+
11
+
12
+ class AttentionPoolLatent(nn.Module):
13
+ """ Attention pooling w/ latent query
14
+ """
15
+ fused_attn: torch.jit.Final[bool]
16
+
17
+ def __init__(
18
+ self,
19
+ in_features: int,
20
+ out_features: int = None,
21
+ embed_dim: int = None,
22
+ num_heads: int = 8,
23
+ feat_size: Optional[int] = None,
24
+ mlp_ratio: float = 4.0,
25
+ qkv_bias: bool = True,
26
+ qk_norm: bool = False,
27
+ latent_len: int = 1,
28
+ latent_dim: int = None,
29
+ pos_embed: str = '',
30
+ pool_type: str = 'token',
31
+ norm_layer: Optional[nn.Module] = None,
32
+ drop: float = 0.0,
33
+ ):
34
+ super().__init__()
35
+ embed_dim = embed_dim or in_features
36
+ out_features = out_features or in_features
37
+ assert embed_dim % num_heads == 0
38
+ self.num_heads = num_heads
39
+ self.head_dim = embed_dim // num_heads
40
+ self.feat_size = feat_size
41
+ self.scale = self.head_dim ** -0.5
42
+ self.pool = pool_type
43
+ self.fused_attn = use_fused_attn()
44
+
45
+ if pos_embed == 'abs':
46
+ assert feat_size is not None
47
+ self.pos_embed = nn.Parameter(torch.zeros(feat_size, in_features))
48
+ else:
49
+ self.pos_embed = None
50
+
51
+ self.latent_dim = latent_dim or embed_dim
52
+ self.latent_len = latent_len
53
+ self.latent = nn.Parameter(torch.zeros(1, self.latent_len, embed_dim))
54
+
55
+ self.q = nn.Linear(embed_dim, embed_dim, bias=qkv_bias)
56
+ self.kv = nn.Linear(embed_dim, embed_dim * 2, bias=qkv_bias)
57
+ self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
58
+ self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
59
+ self.proj = nn.Linear(embed_dim, embed_dim)
60
+ self.proj_drop = nn.Dropout(drop)
61
+
62
+ self.norm = norm_layer(out_features) if norm_layer is not None else nn.Identity()
63
+ self.mlp = Mlp(embed_dim, int(embed_dim * mlp_ratio))
64
+
65
+ self.init_weights()
66
+
67
+ def init_weights(self):
68
+ if self.pos_embed is not None:
69
+ trunc_normal_tf_(self.pos_embed, std=self.pos_embed.shape[1] ** -0.5)
70
+ trunc_normal_tf_(self.latent, std=self.latent_dim ** -0.5)
71
+
72
+ def forward(self, x):
73
+ B, N, C = x.shape
74
+
75
+ if self.pos_embed is not None:
76
+ # FIXME interpolate
77
+ x = x + self.pos_embed.unsqueeze(0).to(x.dtype)
78
+
79
+ q_latent = self.latent.expand(B, -1, -1)
80
+ q = self.q(q_latent).reshape(B, self.latent_len, self.num_heads, self.head_dim).transpose(1, 2)
81
+
82
+ kv = self.kv(x).reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
83
+ k, v = kv.unbind(0)
84
+
85
+ q, k = self.q_norm(q), self.k_norm(k)
86
+
87
+ if self.fused_attn:
88
+ x = F.scaled_dot_product_attention(q, k, v)
89
+ else:
90
+ q = q * self.scale
91
+ attn = q @ k.transpose(-2, -1)
92
+ attn = attn.softmax(dim=-1)
93
+ x = attn @ v
94
+ x = x.transpose(1, 2).reshape(B, self.latent_len, C)
95
+ x = self.proj(x)
96
+ x = self.proj_drop(x)
97
+
98
+ x = x + self.mlp(self.norm(x))
99
+
100
+ # optional pool if latent seq_len > 1 and pooled output is desired
101
+ if self.pool == 'token':
102
+ x = x[:, 0]
103
+ elif self.pool == 'avg':
104
+ x = x.mean(1)
105
+ return x
janus/lib/python3.10/site-packages/timm/layers/bottleneck_attn.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Bottleneck Self Attention (Bottleneck Transformers)
2
+
3
+ Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605
4
+
5
+ @misc{2101.11605,
6
+ Author = {Aravind Srinivas and Tsung-Yi Lin and Niki Parmar and Jonathon Shlens and Pieter Abbeel and Ashish Vaswani},
7
+ Title = {Bottleneck Transformers for Visual Recognition},
8
+ Year = {2021},
9
+ }
10
+
11
+ Based on ref gist at: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2
12
+
13
+ This impl is a WIP but given that it is based on the ref gist likely not too far off.
14
+
15
+ Hacked together by / Copyright 2021 Ross Wightman
16
+ """
17
+ from typing import List
18
+
19
+ import torch
20
+ import torch.nn as nn
21
+ import torch.nn.functional as F
22
+
23
+ from .helpers import to_2tuple, make_divisible
24
+ from .weight_init import trunc_normal_
25
+ from .trace_utils import _assert
26
+
27
+
28
+ def rel_logits_1d(q, rel_k, permute_mask: List[int]):
29
+ """ Compute relative logits along one dimension
30
+
31
+ As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2
32
+ Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925
33
+
34
+ Args:
35
+ q: (batch, heads, height, width, dim)
36
+ rel_k: (2 * width - 1, dim)
37
+ permute_mask: permute output dim according to this
38
+ """
39
+ B, H, W, dim = q.shape
40
+ x = (q @ rel_k.transpose(-1, -2))
41
+ x = x.reshape(-1, W, 2 * W -1)
42
+
43
+ # pad to shift from relative to absolute indexing
44
+ x_pad = F.pad(x, [0, 1]).flatten(1)
45
+ x_pad = F.pad(x_pad, [0, W - 1])
46
+
47
+ # reshape and slice out the padded elements
48
+ x_pad = x_pad.reshape(-1, W + 1, 2 * W - 1)
49
+ x = x_pad[:, :W, W - 1:]
50
+
51
+ # reshape and tile
52
+ x = x.reshape(B, H, 1, W, W).expand(-1, -1, H, -1, -1)
53
+ return x.permute(permute_mask)
54
+
55
+
56
+ class PosEmbedRel(nn.Module):
57
+ """ Relative Position Embedding
58
+ As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2
59
+ Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925
60
+ """
61
+ def __init__(self, feat_size, dim_head, scale):
62
+ super().__init__()
63
+ self.height, self.width = to_2tuple(feat_size)
64
+ self.dim_head = dim_head
65
+ self.height_rel = nn.Parameter(torch.randn(self.height * 2 - 1, dim_head) * scale)
66
+ self.width_rel = nn.Parameter(torch.randn(self.width * 2 - 1, dim_head) * scale)
67
+
68
+ def forward(self, q):
69
+ B, HW, _ = q.shape
70
+
71
+ # relative logits in width dimension.
72
+ q = q.reshape(B, self.height, self.width, -1)
73
+ rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4))
74
+
75
+ # relative logits in height dimension.
76
+ q = q.transpose(1, 2)
77
+ rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2))
78
+
79
+ rel_logits = rel_logits_h + rel_logits_w
80
+ rel_logits = rel_logits.reshape(B, HW, HW)
81
+ return rel_logits
82
+
83
+
84
+ class BottleneckAttn(nn.Module):
85
+ """ Bottleneck Attention
86
+ Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605
87
+
88
+ The internal dimensions of the attention module are controlled by the interaction of several arguments.
89
+ * the output dimension of the module is specified by dim_out, which falls back to input dim if not set
90
+ * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim
91
+ * the query and key (qk) dimensions are determined by
92
+ * num_heads * dim_head if dim_head is not None
93
+ * num_heads * (dim_out * attn_ratio // num_heads) if dim_head is None
94
+ * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not used
95
+
96
+ Args:
97
+ dim (int): input dimension to the module
98
+ dim_out (int): output dimension of the module, same as dim if not set
99
+ stride (int): output stride of the module, avg pool used if stride == 2 (default: 1).
100
+ num_heads (int): parallel attention heads (default: 4)
101
+ dim_head (int): dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set
102
+ qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0)
103
+ qkv_bias (bool): add bias to q, k, and v projections
104
+ scale_pos_embed (bool): scale the position embedding as well as Q @ K
105
+ """
106
+ def __init__(
107
+ self, dim, dim_out=None, feat_size=None, stride=1, num_heads=4, dim_head=None,
108
+ qk_ratio=1.0, qkv_bias=False, scale_pos_embed=False):
109
+ super().__init__()
110
+ assert feat_size is not None, 'A concrete feature size matching expected input (H, W) is required'
111
+ dim_out = dim_out or dim
112
+ assert dim_out % num_heads == 0
113
+ self.num_heads = num_heads
114
+ self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads
115
+ self.dim_head_v = dim_out // self.num_heads
116
+ self.dim_out_qk = num_heads * self.dim_head_qk
117
+ self.dim_out_v = num_heads * self.dim_head_v
118
+ self.scale = self.dim_head_qk ** -0.5
119
+ self.scale_pos_embed = scale_pos_embed
120
+
121
+ self.qkv = nn.Conv2d(dim, self.dim_out_qk * 2 + self.dim_out_v, 1, bias=qkv_bias)
122
+
123
+ # NOTE I'm only supporting relative pos embedding for now
124
+ self.pos_embed = PosEmbedRel(feat_size, dim_head=self.dim_head_qk, scale=self.scale)
125
+
126
+ self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity()
127
+
128
+ self.reset_parameters()
129
+
130
+ def reset_parameters(self):
131
+ trunc_normal_(self.qkv.weight, std=self.qkv.weight.shape[1] ** -0.5) # fan-in
132
+ trunc_normal_(self.pos_embed.height_rel, std=self.scale)
133
+ trunc_normal_(self.pos_embed.width_rel, std=self.scale)
134
+
135
+ def forward(self, x):
136
+ B, C, H, W = x.shape
137
+ _assert(H == self.pos_embed.height, '')
138
+ _assert(W == self.pos_embed.width, '')
139
+
140
+ x = self.qkv(x) # B, (2 * dim_head_qk + dim_head_v) * num_heads, H, W
141
+
142
+ # NOTE head vs channel split ordering in qkv projection was decided before I allowed qk to differ from v
143
+ # So, this is more verbose than if heads were before qkv splits, but throughput is not impacted.
144
+ q, k, v = torch.split(x, [self.dim_out_qk, self.dim_out_qk, self.dim_out_v], dim=1)
145
+ q = q.reshape(B * self.num_heads, self.dim_head_qk, -1).transpose(-1, -2)
146
+ k = k.reshape(B * self.num_heads, self.dim_head_qk, -1) # no transpose, for q @ k
147
+ v = v.reshape(B * self.num_heads, self.dim_head_v, -1).transpose(-1, -2)
148
+
149
+ if self.scale_pos_embed:
150
+ attn = (q @ k + self.pos_embed(q)) * self.scale # B * num_heads, H * W, H * W
151
+ else:
152
+ attn = (q @ k) * self.scale + self.pos_embed(q)
153
+ attn = attn.softmax(dim=-1)
154
+
155
+ out = (attn @ v).transpose(-1, -2).reshape(B, self.dim_out_v, H, W) # B, dim_out, H, W
156
+ out = self.pool(out)
157
+ return out
janus/lib/python3.10/site-packages/timm/layers/classifier.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Classifier head and layer factory
2
+
3
+ Hacked together by / Copyright 2020 Ross Wightman
4
+ """
5
+ from collections import OrderedDict
6
+ from functools import partial
7
+ from typing import Optional, Union, Callable
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ from torch.nn import functional as F
12
+
13
+ from .adaptive_avgmax_pool import SelectAdaptivePool2d
14
+ from .create_act import get_act_layer
15
+ from .create_norm import get_norm_layer
16
+
17
+
18
+ def _create_pool(
19
+ num_features: int,
20
+ num_classes: int,
21
+ pool_type: str = 'avg',
22
+ use_conv: bool = False,
23
+ input_fmt: Optional[str] = None,
24
+ ):
25
+ flatten_in_pool = not use_conv # flatten when we use a Linear layer after pooling
26
+ if not pool_type:
27
+ flatten_in_pool = False # disable flattening if pooling is pass-through (no pooling)
28
+ global_pool = SelectAdaptivePool2d(
29
+ pool_type=pool_type,
30
+ flatten=flatten_in_pool,
31
+ input_fmt=input_fmt,
32
+ )
33
+ num_pooled_features = num_features * global_pool.feat_mult()
34
+ return global_pool, num_pooled_features
35
+
36
+
37
+ def _create_fc(num_features, num_classes, use_conv=False):
38
+ if num_classes <= 0:
39
+ fc = nn.Identity() # pass-through (no classifier)
40
+ elif use_conv:
41
+ fc = nn.Conv2d(num_features, num_classes, 1, bias=True)
42
+ else:
43
+ fc = nn.Linear(num_features, num_classes, bias=True)
44
+ return fc
45
+
46
+
47
+ def create_classifier(
48
+ num_features: int,
49
+ num_classes: int,
50
+ pool_type: str = 'avg',
51
+ use_conv: bool = False,
52
+ input_fmt: str = 'NCHW',
53
+ drop_rate: Optional[float] = None,
54
+ ):
55
+ global_pool, num_pooled_features = _create_pool(
56
+ num_features,
57
+ num_classes,
58
+ pool_type,
59
+ use_conv=use_conv,
60
+ input_fmt=input_fmt,
61
+ )
62
+ fc = _create_fc(
63
+ num_pooled_features,
64
+ num_classes,
65
+ use_conv=use_conv,
66
+ )
67
+ if drop_rate is not None:
68
+ dropout = nn.Dropout(drop_rate)
69
+ return global_pool, dropout, fc
70
+ return global_pool, fc
71
+
72
+
73
+ class ClassifierHead(nn.Module):
74
+ """Classifier head w/ configurable global pooling and dropout."""
75
+
76
+ def __init__(
77
+ self,
78
+ in_features: int,
79
+ num_classes: int,
80
+ pool_type: str = 'avg',
81
+ drop_rate: float = 0.,
82
+ use_conv: bool = False,
83
+ input_fmt: str = 'NCHW',
84
+ ):
85
+ """
86
+ Args:
87
+ in_features: The number of input features.
88
+ num_classes: The number of classes for the final classifier layer (output).
89
+ pool_type: Global pooling type, pooling disabled if empty string ('').
90
+ drop_rate: Pre-classifier dropout rate.
91
+ """
92
+ super(ClassifierHead, self).__init__()
93
+ self.in_features = in_features
94
+ self.use_conv = use_conv
95
+ self.input_fmt = input_fmt
96
+
97
+ global_pool, fc = create_classifier(
98
+ in_features,
99
+ num_classes,
100
+ pool_type,
101
+ use_conv=use_conv,
102
+ input_fmt=input_fmt,
103
+ )
104
+ self.global_pool = global_pool
105
+ self.drop = nn.Dropout(drop_rate)
106
+ self.fc = fc
107
+ self.flatten = nn.Flatten(1) if use_conv and pool_type else nn.Identity()
108
+
109
+ def reset(self, num_classes: int, pool_type: Optional[str] = None):
110
+ if pool_type is not None and pool_type != self.global_pool.pool_type:
111
+ self.global_pool, self.fc = create_classifier(
112
+ self.in_features,
113
+ num_classes,
114
+ pool_type=pool_type,
115
+ use_conv=self.use_conv,
116
+ input_fmt=self.input_fmt,
117
+ )
118
+ self.flatten = nn.Flatten(1) if self.use_conv and pool_type else nn.Identity()
119
+ else:
120
+ num_pooled_features = self.in_features * self.global_pool.feat_mult()
121
+ self.fc = _create_fc(
122
+ num_pooled_features,
123
+ num_classes,
124
+ use_conv=self.use_conv,
125
+ )
126
+
127
+ def forward(self, x, pre_logits: bool = False):
128
+ x = self.global_pool(x)
129
+ x = self.drop(x)
130
+ if pre_logits:
131
+ return self.flatten(x)
132
+ x = self.fc(x)
133
+ return self.flatten(x)
134
+
135
+
136
+ class NormMlpClassifierHead(nn.Module):
137
+ """ A Pool -> Norm -> Mlp Classifier Head for '2D' NCHW tensors
138
+ """
139
+ def __init__(
140
+ self,
141
+ in_features: int,
142
+ num_classes: int,
143
+ hidden_size: Optional[int] = None,
144
+ pool_type: str = 'avg',
145
+ drop_rate: float = 0.,
146
+ norm_layer: Union[str, Callable] = 'layernorm2d',
147
+ act_layer: Union[str, Callable] = 'tanh',
148
+ ):
149
+ """
150
+ Args:
151
+ in_features: The number of input features.
152
+ num_classes: The number of classes for the final classifier layer (output).
153
+ hidden_size: The hidden size of the MLP (pre-logits FC layer) if not None.
154
+ pool_type: Global pooling type, pooling disabled if empty string ('').
155
+ drop_rate: Pre-classifier dropout rate.
156
+ norm_layer: Normalization layer type.
157
+ act_layer: MLP activation layer type (only used if hidden_size is not None).
158
+ """
159
+ super().__init__()
160
+ self.in_features = in_features
161
+ self.hidden_size = hidden_size
162
+ self.num_features = in_features
163
+ self.use_conv = not pool_type
164
+ norm_layer = get_norm_layer(norm_layer)
165
+ act_layer = get_act_layer(act_layer)
166
+ linear_layer = partial(nn.Conv2d, kernel_size=1) if self.use_conv else nn.Linear
167
+
168
+ self.global_pool = SelectAdaptivePool2d(pool_type=pool_type)
169
+ self.norm = norm_layer(in_features)
170
+ self.flatten = nn.Flatten(1) if pool_type else nn.Identity()
171
+ if hidden_size:
172
+ self.pre_logits = nn.Sequential(OrderedDict([
173
+ ('fc', linear_layer(in_features, hidden_size)),
174
+ ('act', act_layer()),
175
+ ]))
176
+ self.num_features = hidden_size
177
+ else:
178
+ self.pre_logits = nn.Identity()
179
+ self.drop = nn.Dropout(drop_rate)
180
+ self.fc = linear_layer(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
181
+
182
+ def reset(self, num_classes: int, pool_type: Optional[str] = None):
183
+ if pool_type is not None:
184
+ self.global_pool = SelectAdaptivePool2d(pool_type=pool_type)
185
+ self.flatten = nn.Flatten(1) if pool_type else nn.Identity()
186
+ self.use_conv = self.global_pool.is_identity()
187
+ linear_layer = partial(nn.Conv2d, kernel_size=1) if self.use_conv else nn.Linear
188
+ if self.hidden_size:
189
+ if ((isinstance(self.pre_logits.fc, nn.Conv2d) and not self.use_conv) or
190
+ (isinstance(self.pre_logits.fc, nn.Linear) and self.use_conv)):
191
+ with torch.no_grad():
192
+ new_fc = linear_layer(self.in_features, self.hidden_size)
193
+ new_fc.weight.copy_(self.pre_logits.fc.weight.reshape(new_fc.weight.shape))
194
+ new_fc.bias.copy_(self.pre_logits.fc.bias)
195
+ self.pre_logits.fc = new_fc
196
+ self.fc = linear_layer(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
197
+
198
+ def forward(self, x, pre_logits: bool = False):
199
+ x = self.global_pool(x)
200
+ x = self.norm(x)
201
+ x = self.flatten(x)
202
+ x = self.pre_logits(x)
203
+ x = self.drop(x)
204
+ if pre_logits:
205
+ return x
206
+ x = self.fc(x)
207
+ return x
208
+
209
+
210
+ class ClNormMlpClassifierHead(nn.Module):
211
+ """ A Pool -> Norm -> Mlp Classifier Head for n-D NxxC tensors
212
+ """
213
+ def __init__(
214
+ self,
215
+ in_features: int,
216
+ num_classes: int,
217
+ hidden_size: Optional[int] = None,
218
+ pool_type: str = 'avg',
219
+ drop_rate: float = 0.,
220
+ norm_layer: Union[str, Callable] = 'layernorm',
221
+ act_layer: Union[str, Callable] = 'gelu',
222
+ input_fmt: str = 'NHWC',
223
+ ):
224
+ """
225
+ Args:
226
+ in_features: The number of input features.
227
+ num_classes: The number of classes for the final classifier layer (output).
228
+ hidden_size: The hidden size of the MLP (pre-logits FC layer) if not None.
229
+ pool_type: Global pooling type, pooling disabled if empty string ('').
230
+ drop_rate: Pre-classifier dropout rate.
231
+ norm_layer: Normalization layer type.
232
+ act_layer: MLP activation layer type (only used if hidden_size is not None).
233
+ """
234
+ super().__init__()
235
+ self.in_features = in_features
236
+ self.hidden_size = hidden_size
237
+ self.num_features = in_features
238
+ assert pool_type in ('', 'avg', 'max', 'avgmax')
239
+ self.pool_type = pool_type
240
+ assert input_fmt in ('NHWC', 'NLC')
241
+ self.pool_dim = 1 if input_fmt == 'NLC' else (1, 2)
242
+ norm_layer = get_norm_layer(norm_layer)
243
+ act_layer = get_act_layer(act_layer)
244
+
245
+ self.norm = norm_layer(in_features)
246
+ if hidden_size:
247
+ self.pre_logits = nn.Sequential(OrderedDict([
248
+ ('fc', nn.Linear(in_features, hidden_size)),
249
+ ('act', act_layer()),
250
+ ]))
251
+ self.num_features = hidden_size
252
+ else:
253
+ self.pre_logits = nn.Identity()
254
+ self.drop = nn.Dropout(drop_rate)
255
+ self.fc = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
256
+
257
+ def reset(self, num_classes: int, pool_type: Optional[str] = None, reset_other: bool = False):
258
+ if pool_type is not None:
259
+ self.pool_type = pool_type
260
+ if reset_other:
261
+ self.pre_logits = nn.Identity()
262
+ self.norm = nn.Identity()
263
+ self.fc = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
264
+
265
+ def _global_pool(self, x):
266
+ if self.pool_type:
267
+ if self.pool_type == 'avg':
268
+ x = x.mean(dim=self.pool_dim)
269
+ elif self.pool_type == 'max':
270
+ x = x.amax(dim=self.pool_dim)
271
+ elif self.pool_type == 'avgmax':
272
+ x = 0.5 * (x.amax(dim=self.pool_dim) + x.mean(dim=self.pool_dim))
273
+ return x
274
+
275
+ def forward(self, x, pre_logits: bool = False):
276
+ x = self._global_pool(x)
277
+ x = self.norm(x)
278
+ x = self.pre_logits(x)
279
+ x = self.drop(x)
280
+ if pre_logits:
281
+ return x
282
+ x = self.fc(x)
283
+ return x
janus/lib/python3.10/site-packages/timm/layers/conv2d_same.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Conv2d w/ Same Padding
2
+
3
+ Hacked together by / Copyright 2020 Ross Wightman
4
+ """
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+ from typing import Tuple, Optional
9
+
10
+ from .config import is_exportable, is_scriptable
11
+ from .padding import pad_same, pad_same_arg, get_padding_value
12
+
13
+
14
+ _USE_EXPORT_CONV = False
15
+
16
+
17
+ def conv2d_same(
18
+ x,
19
+ weight: torch.Tensor,
20
+ bias: Optional[torch.Tensor] = None,
21
+ stride: Tuple[int, int] = (1, 1),
22
+ padding: Tuple[int, int] = (0, 0),
23
+ dilation: Tuple[int, int] = (1, 1),
24
+ groups: int = 1,
25
+ ):
26
+ x = pad_same(x, weight.shape[-2:], stride, dilation)
27
+ return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups)
28
+
29
+
30
+ class Conv2dSame(nn.Conv2d):
31
+ """ Tensorflow like 'SAME' convolution wrapper for 2D convolutions
32
+ """
33
+
34
+ def __init__(
35
+ self,
36
+ in_channels,
37
+ out_channels,
38
+ kernel_size,
39
+ stride=1,
40
+ padding=0,
41
+ dilation=1,
42
+ groups=1,
43
+ bias=True,
44
+ ):
45
+ super(Conv2dSame, self).__init__(
46
+ in_channels, out_channels, kernel_size,
47
+ stride, 0, dilation, groups, bias,
48
+ )
49
+
50
+ def forward(self, x):
51
+ return conv2d_same(
52
+ x, self.weight, self.bias,
53
+ self.stride, self.padding, self.dilation, self.groups,
54
+ )
55
+
56
+
57
+ class Conv2dSameExport(nn.Conv2d):
58
+ """ ONNX export friendly Tensorflow like 'SAME' convolution wrapper for 2D convolutions
59
+
60
+ NOTE: This does not currently work with torch.jit.script
61
+ """
62
+
63
+ # pylint: disable=unused-argument
64
+ def __init__(
65
+ self,
66
+ in_channels,
67
+ out_channels,
68
+ kernel_size,
69
+ stride=1,
70
+ padding=0,
71
+ dilation=1,
72
+ groups=1,
73
+ bias=True,
74
+ ):
75
+ super(Conv2dSameExport, self).__init__(
76
+ in_channels, out_channels, kernel_size,
77
+ stride, 0, dilation, groups, bias,
78
+ )
79
+ self.pad = None
80
+ self.pad_input_size = (0, 0)
81
+
82
+ def forward(self, x):
83
+ input_size = x.size()[-2:]
84
+ if self.pad is None:
85
+ pad_arg = pad_same_arg(input_size, self.weight.size()[-2:], self.stride, self.dilation)
86
+ self.pad = nn.ZeroPad2d(pad_arg)
87
+ self.pad_input_size = input_size
88
+
89
+ x = self.pad(x)
90
+ return F.conv2d(
91
+ x, self.weight, self.bias,
92
+ self.stride, self.padding, self.dilation, self.groups,
93
+ )
94
+
95
+
96
+ def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs):
97
+ padding = kwargs.pop('padding', '')
98
+ kwargs.setdefault('bias', False)
99
+ padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs)
100
+ if is_dynamic:
101
+ if _USE_EXPORT_CONV and is_exportable():
102
+ # older PyTorch ver needed this to export same padding reasonably
103
+ assert not is_scriptable() # Conv2DSameExport does not work with jit
104
+ return Conv2dSameExport(in_chs, out_chs, kernel_size, **kwargs)
105
+ else:
106
+ return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs)
107
+ else:
108
+ return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs)
109
+
110
+
janus/lib/python3.10/site-packages/timm/layers/create_conv2d.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Create Conv2d Factory Method
2
+
3
+ Hacked together by / Copyright 2020 Ross Wightman
4
+ """
5
+
6
+ from .mixed_conv2d import MixedConv2d
7
+ from .cond_conv2d import CondConv2d
8
+ from .conv2d_same import create_conv2d_pad
9
+
10
+
11
+ def create_conv2d(in_channels, out_channels, kernel_size, **kwargs):
12
+ """ Select a 2d convolution implementation based on arguments
13
+ Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d.
14
+
15
+ Used extensively by EfficientNet, MobileNetv3 and related networks.
16
+ """
17
+ if isinstance(kernel_size, list):
18
+ assert 'num_experts' not in kwargs # MixNet + CondConv combo not supported currently
19
+ if 'groups' in kwargs:
20
+ groups = kwargs.pop('groups')
21
+ if groups == in_channels:
22
+ kwargs['depthwise'] = True
23
+ else:
24
+ assert groups == 1
25
+ # We're going to use only lists for defining the MixedConv2d kernel groups,
26
+ # ints, tuples, other iterables will continue to pass to normal conv and specify h, w.
27
+ m = MixedConv2d(in_channels, out_channels, kernel_size, **kwargs)
28
+ else:
29
+ depthwise = kwargs.pop('depthwise', False)
30
+ # for DW out_channels must be multiple of in_channels as must have out_channels % groups == 0
31
+ groups = in_channels if depthwise else kwargs.pop('groups', 1)
32
+ if 'num_experts' in kwargs and kwargs['num_experts'] > 0:
33
+ m = CondConv2d(in_channels, out_channels, kernel_size, groups=groups, **kwargs)
34
+ else:
35
+ m = create_conv2d_pad(in_channels, out_channels, kernel_size, groups=groups, **kwargs)
36
+ return m
janus/lib/python3.10/site-packages/timm/layers/create_norm.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Norm Layer Factory
2
+
3
+ Create norm modules by string (to mirror create_act and creat_norm-act fns)
4
+
5
+ Copyright 2022 Ross Wightman
6
+ """
7
+ import functools
8
+ import types
9
+ from typing import Type
10
+
11
+ import torch.nn as nn
12
+
13
+ from .norm import GroupNorm, GroupNorm1, LayerNorm, LayerNorm2d, RmsNorm, RmsNorm2d, SimpleNorm, SimpleNorm2d
14
+ from torchvision.ops.misc import FrozenBatchNorm2d
15
+
16
+ _NORM_MAP = dict(
17
+ batchnorm=nn.BatchNorm2d,
18
+ batchnorm2d=nn.BatchNorm2d,
19
+ batchnorm1d=nn.BatchNorm1d,
20
+ groupnorm=GroupNorm,
21
+ groupnorm1=GroupNorm1,
22
+ layernorm=LayerNorm,
23
+ layernorm2d=LayerNorm2d,
24
+ rmsnorm=RmsNorm,
25
+ rmsnorm2d=RmsNorm2d,
26
+ simplenorm=SimpleNorm,
27
+ simplenorm2d=SimpleNorm2d,
28
+ frozenbatchnorm2d=FrozenBatchNorm2d,
29
+ )
30
+ _NORM_TYPES = {m for n, m in _NORM_MAP.items()}
31
+
32
+
33
+ def create_norm_layer(layer_name, num_features, **kwargs):
34
+ layer = get_norm_layer(layer_name)
35
+ layer_instance = layer(num_features, **kwargs)
36
+ return layer_instance
37
+
38
+
39
+ def get_norm_layer(norm_layer):
40
+ if norm_layer is None:
41
+ return None
42
+ assert isinstance(norm_layer, (type, str, types.FunctionType, functools.partial))
43
+ norm_kwargs = {}
44
+
45
+ # unbind partial fn, so args can be rebound later
46
+ if isinstance(norm_layer, functools.partial):
47
+ norm_kwargs.update(norm_layer.keywords)
48
+ norm_layer = norm_layer.func
49
+
50
+ if isinstance(norm_layer, str):
51
+ if not norm_layer:
52
+ return None
53
+ layer_name = norm_layer.replace('_', '').lower()
54
+ norm_layer = _NORM_MAP[layer_name]
55
+ else:
56
+ norm_layer = norm_layer
57
+
58
+ if norm_kwargs:
59
+ norm_layer = functools.partial(norm_layer, **norm_kwargs) # bind/rebind args
60
+ return norm_layer
janus/lib/python3.10/site-packages/timm/layers/drop.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ DropBlock, DropPath
2
+
3
+ PyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization layers.
4
+
5
+ Papers:
6
+ DropBlock: A regularization method for convolutional networks (https://arxiv.org/abs/1810.12890)
7
+
8
+ Deep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382)
9
+
10
+ Code:
11
+ DropBlock impl inspired by two Tensorflow impl that I liked:
12
+ - https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74
13
+ - https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py
14
+
15
+ Hacked together by / Copyright 2020 Ross Wightman
16
+ """
17
+ import torch
18
+ import torch.nn as nn
19
+ import torch.nn.functional as F
20
+
21
+ from .grid import ndgrid
22
+
23
+
24
+ def drop_block_2d(
25
+ x,
26
+ drop_prob: float = 0.1,
27
+ block_size: int = 7,
28
+ gamma_scale: float = 1.0,
29
+ with_noise: bool = False,
30
+ inplace: bool = False,
31
+ batchwise: bool = False
32
+ ):
33
+ """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
34
+
35
+ DropBlock with an experimental gaussian noise option. This layer has been tested on a few training
36
+ runs with success, but needs further validation and possibly optimization for lower runtime impact.
37
+ """
38
+ B, C, H, W = x.shape
39
+ total_size = W * H
40
+ clipped_block_size = min(block_size, min(W, H))
41
+ # seed_drop_rate, the gamma parameter
42
+ gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / (
43
+ (W - block_size + 1) * (H - block_size + 1))
44
+
45
+ # Forces the block to be inside the feature map.
46
+ w_i, h_i = ndgrid(torch.arange(W, device=x.device), torch.arange(H, device=x.device))
47
+ valid_block = ((w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2)) & \
48
+ ((h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2))
49
+ valid_block = torch.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype)
50
+
51
+ if batchwise:
52
+ # one mask for whole batch, quite a bit faster
53
+ uniform_noise = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device)
54
+ else:
55
+ uniform_noise = torch.rand_like(x)
56
+ block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to(dtype=x.dtype)
57
+ block_mask = -F.max_pool2d(
58
+ -block_mask,
59
+ kernel_size=clipped_block_size, # block_size,
60
+ stride=1,
61
+ padding=clipped_block_size // 2)
62
+
63
+ if with_noise:
64
+ normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x)
65
+ if inplace:
66
+ x.mul_(block_mask).add_(normal_noise * (1 - block_mask))
67
+ else:
68
+ x = x * block_mask + normal_noise * (1 - block_mask)
69
+ else:
70
+ normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(x.dtype)
71
+ if inplace:
72
+ x.mul_(block_mask * normalize_scale)
73
+ else:
74
+ x = x * block_mask * normalize_scale
75
+ return x
76
+
77
+
78
+ def drop_block_fast_2d(
79
+ x: torch.Tensor,
80
+ drop_prob: float = 0.1,
81
+ block_size: int = 7,
82
+ gamma_scale: float = 1.0,
83
+ with_noise: bool = False,
84
+ inplace: bool = False,
85
+ ):
86
+ """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
87
+
88
+ DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid
89
+ block mask at edges.
90
+ """
91
+ B, C, H, W = x.shape
92
+ total_size = W * H
93
+ clipped_block_size = min(block_size, min(W, H))
94
+ gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / (
95
+ (W - block_size + 1) * (H - block_size + 1))
96
+
97
+ block_mask = torch.empty_like(x).bernoulli_(gamma)
98
+ block_mask = F.max_pool2d(
99
+ block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2)
100
+
101
+ if with_noise:
102
+ normal_noise = torch.empty_like(x).normal_()
103
+ if inplace:
104
+ x.mul_(1. - block_mask).add_(normal_noise * block_mask)
105
+ else:
106
+ x = x * (1. - block_mask) + normal_noise * block_mask
107
+ else:
108
+ block_mask = 1 - block_mask
109
+ normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-6)).to(dtype=x.dtype)
110
+ if inplace:
111
+ x.mul_(block_mask * normalize_scale)
112
+ else:
113
+ x = x * block_mask * normalize_scale
114
+ return x
115
+
116
+
117
+ class DropBlock2d(nn.Module):
118
+ """ DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
119
+ """
120
+
121
+ def __init__(
122
+ self,
123
+ drop_prob: float = 0.1,
124
+ block_size: int = 7,
125
+ gamma_scale: float = 1.0,
126
+ with_noise: bool = False,
127
+ inplace: bool = False,
128
+ batchwise: bool = False,
129
+ fast: bool = True):
130
+ super(DropBlock2d, self).__init__()
131
+ self.drop_prob = drop_prob
132
+ self.gamma_scale = gamma_scale
133
+ self.block_size = block_size
134
+ self.with_noise = with_noise
135
+ self.inplace = inplace
136
+ self.batchwise = batchwise
137
+ self.fast = fast # FIXME finish comparisons of fast vs not
138
+
139
+ def forward(self, x):
140
+ if not self.training or not self.drop_prob:
141
+ return x
142
+ if self.fast:
143
+ return drop_block_fast_2d(
144
+ x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace)
145
+ else:
146
+ return drop_block_2d(
147
+ x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise)
148
+
149
+
150
+ def drop_path(x, drop_prob: float = 0., training: bool = False, scale_by_keep: bool = True):
151
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
152
+
153
+ This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
154
+ the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
155
+ See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
156
+ changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
157
+ 'survival rate' as the argument.
158
+
159
+ """
160
+ if drop_prob == 0. or not training:
161
+ return x
162
+ keep_prob = 1 - drop_prob
163
+ shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
164
+ random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
165
+ if keep_prob > 0.0 and scale_by_keep:
166
+ random_tensor.div_(keep_prob)
167
+ return x * random_tensor
168
+
169
+
170
+ class DropPath(nn.Module):
171
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
172
+ """
173
+ def __init__(self, drop_prob: float = 0., scale_by_keep: bool = True):
174
+ super(DropPath, self).__init__()
175
+ self.drop_prob = drop_prob
176
+ self.scale_by_keep = scale_by_keep
177
+
178
+ def forward(self, x):
179
+ return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)
180
+
181
+ def extra_repr(self):
182
+ return f'drop_prob={round(self.drop_prob,3):0.3f}'
janus/lib/python3.10/site-packages/timm/layers/eca.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ECA module from ECAnet
3
+
4
+ paper: ECA-Net: Efficient Channel Attention for Deep Convolutional Neural Networks
5
+ https://arxiv.org/abs/1910.03151
6
+
7
+ Original ECA model borrowed from https://github.com/BangguWu/ECANet
8
+
9
+ Modified circular ECA implementation and adaption for use in timm package
10
+ by Chris Ha https://github.com/VRandme
11
+
12
+ Original License:
13
+
14
+ MIT License
15
+
16
+ Copyright (c) 2019 BangguWu, Qilong Wang
17
+
18
+ Permission is hereby granted, free of charge, to any person obtaining a copy
19
+ of this software and associated documentation files (the "Software"), to deal
20
+ in the Software without restriction, including without limitation the rights
21
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
22
+ copies of the Software, and to permit persons to whom the Software is
23
+ furnished to do so, subject to the following conditions:
24
+
25
+ The above copyright notice and this permission notice shall be included in all
26
+ copies or substantial portions of the Software.
27
+
28
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
33
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34
+ SOFTWARE.
35
+ """
36
+ import math
37
+ from torch import nn
38
+ import torch.nn.functional as F
39
+
40
+
41
+ from .create_act import create_act_layer
42
+ from .helpers import make_divisible
43
+
44
+
45
+ class EcaModule(nn.Module):
46
+ """Constructs an ECA module.
47
+
48
+ Args:
49
+ channels: Number of channels of the input feature map for use in adaptive kernel sizes
50
+ for actual calculations according to channel.
51
+ gamma, beta: when channel is given parameters of mapping function
52
+ refer to original paper https://arxiv.org/pdf/1910.03151.pdf
53
+ (default=None. if channel size not given, use k_size given for kernel size.)
54
+ kernel_size: Adaptive selection of kernel size (default=3)
55
+ gamm: used in kernel_size calc, see above
56
+ beta: used in kernel_size calc, see above
57
+ act_layer: optional non-linearity after conv, enables conv bias, this is an experiment
58
+ gate_layer: gating non-linearity to use
59
+ """
60
+ def __init__(
61
+ self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid',
62
+ rd_ratio=1/8, rd_channels=None, rd_divisor=8, use_mlp=False):
63
+ super(EcaModule, self).__init__()
64
+ if channels is not None:
65
+ t = int(abs(math.log(channels, 2) + beta) / gamma)
66
+ kernel_size = max(t if t % 2 else t + 1, 3)
67
+ assert kernel_size % 2 == 1
68
+ padding = (kernel_size - 1) // 2
69
+ if use_mlp:
70
+ # NOTE 'mlp' mode is a timm experiment, not in paper
71
+ assert channels is not None
72
+ if rd_channels is None:
73
+ rd_channels = make_divisible(channels * rd_ratio, divisor=rd_divisor)
74
+ act_layer = act_layer or nn.ReLU
75
+ self.conv = nn.Conv1d(1, rd_channels, kernel_size=1, padding=0, bias=True)
76
+ self.act = create_act_layer(act_layer)
77
+ self.conv2 = nn.Conv1d(rd_channels, 1, kernel_size=kernel_size, padding=padding, bias=True)
78
+ else:
79
+ self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=padding, bias=False)
80
+ self.act = None
81
+ self.conv2 = None
82
+ self.gate = create_act_layer(gate_layer)
83
+
84
+ def forward(self, x):
85
+ y = x.mean((2, 3)).view(x.shape[0], 1, -1) # view for 1d conv
86
+ y = self.conv(y)
87
+ if self.conv2 is not None:
88
+ y = self.act(y)
89
+ y = self.conv2(y)
90
+ y = self.gate(y).view(x.shape[0], -1, 1, 1)
91
+ return x * y.expand_as(x)
92
+
93
+
94
+ EfficientChannelAttn = EcaModule # alias
95
+
96
+
97
+ class CecaModule(nn.Module):
98
+ """Constructs a circular ECA module.
99
+
100
+ ECA module where the conv uses circular padding rather than zero padding.
101
+ Unlike the spatial dimension, the channels do not have inherent ordering nor
102
+ locality. Although this module in essence, applies such an assumption, it is unnecessary
103
+ to limit the channels on either "edge" from being circularly adapted to each other.
104
+ This will fundamentally increase connectivity and possibly increase performance metrics
105
+ (accuracy, robustness), without significantly impacting resource metrics
106
+ (parameter size, throughput,latency, etc)
107
+
108
+ Args:
109
+ channels: Number of channels of the input feature map for use in adaptive kernel sizes
110
+ for actual calculations according to channel.
111
+ gamma, beta: when channel is given parameters of mapping function
112
+ refer to original paper https://arxiv.org/pdf/1910.03151.pdf
113
+ (default=None. if channel size not given, use k_size given for kernel size.)
114
+ kernel_size: Adaptive selection of kernel size (default=3)
115
+ gamm: used in kernel_size calc, see above
116
+ beta: used in kernel_size calc, see above
117
+ act_layer: optional non-linearity after conv, enables conv bias, this is an experiment
118
+ gate_layer: gating non-linearity to use
119
+ """
120
+
121
+ def __init__(self, channels=None, kernel_size=3, gamma=2, beta=1, act_layer=None, gate_layer='sigmoid'):
122
+ super(CecaModule, self).__init__()
123
+ if channels is not None:
124
+ t = int(abs(math.log(channels, 2) + beta) / gamma)
125
+ kernel_size = max(t if t % 2 else t + 1, 3)
126
+ has_act = act_layer is not None
127
+ assert kernel_size % 2 == 1
128
+
129
+ # PyTorch circular padding mode is buggy as of pytorch 1.4
130
+ # see https://github.com/pytorch/pytorch/pull/17240
131
+ # implement manual circular padding
132
+ self.padding = (kernel_size - 1) // 2
133
+ self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=0, bias=has_act)
134
+ self.gate = create_act_layer(gate_layer)
135
+
136
+ def forward(self, x):
137
+ y = x.mean((2, 3)).view(x.shape[0], 1, -1)
138
+ # Manually implement circular padding, F.pad does not seemed to be bugged
139
+ y = F.pad(y, (self.padding, self.padding), mode='circular')
140
+ y = self.conv(y)
141
+ y = self.gate(y).view(x.shape[0], -1, 1, 1)
142
+ return x * y.expand_as(x)
143
+
144
+
145
+ CircularEfficientChannelAttn = CecaModule
janus/lib/python3.10/site-packages/timm/layers/evo_norm.py ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ EvoNorm in PyTorch
2
+
3
+ Based on `Evolving Normalization-Activation Layers` - https://arxiv.org/abs/2004.02967
4
+ @inproceedings{NEURIPS2020,
5
+ author = {Liu, Hanxiao and Brock, Andy and Simonyan, Karen and Le, Quoc},
6
+ booktitle = {Advances in Neural Information Processing Systems},
7
+ editor = {H. Larochelle and M. Ranzato and R. Hadsell and M. F. Balcan and H. Lin},
8
+ pages = {13539--13550},
9
+ publisher = {Curran Associates, Inc.},
10
+ title = {Evolving Normalization-Activation Layers},
11
+ url = {https://proceedings.neurips.cc/paper/2020/file/9d4c03631b8b0c85ae08bf05eda37d0f-Paper.pdf},
12
+ volume = {33},
13
+ year = {2020}
14
+ }
15
+
16
+ An attempt at getting decent performing EvoNorms running in PyTorch.
17
+ While faster than other PyTorch impl, still quite a ways off the built-in BatchNorm
18
+ in terms of memory usage and throughput on GPUs.
19
+
20
+ I'm testing these modules on TPU w/ PyTorch XLA. Promising start but
21
+ currently working around some issues with builtin torch/tensor.var/std. Unlike
22
+ GPU, similar train speeds for EvoNormS variants and BatchNorm.
23
+
24
+ Hacked together by / Copyright 2020 Ross Wightman
25
+ """
26
+ from typing import Sequence, Union
27
+
28
+ import torch
29
+ import torch.nn as nn
30
+ import torch.nn.functional as F
31
+
32
+ from .create_act import create_act_layer
33
+ from .trace_utils import _assert
34
+
35
+
36
+ def instance_std(x, eps: float = 1e-5):
37
+ std = x.float().var(dim=(2, 3), unbiased=False, keepdim=True).add(eps).sqrt().to(x.dtype)
38
+ return std.expand(x.shape)
39
+
40
+
41
+ def instance_std_tpu(x, eps: float = 1e-5):
42
+ std = manual_var(x, dim=(2, 3)).add(eps).sqrt()
43
+ return std.expand(x.shape)
44
+ # instance_std = instance_std_tpu
45
+
46
+
47
+ def instance_rms(x, eps: float = 1e-5):
48
+ rms = x.float().square().mean(dim=(2, 3), keepdim=True).add(eps).sqrt().to(x.dtype)
49
+ return rms.expand(x.shape)
50
+
51
+
52
+ def manual_var(x, dim: Union[int, Sequence[int]], diff_sqm: bool = False):
53
+ xm = x.mean(dim=dim, keepdim=True)
54
+ if diff_sqm:
55
+ # difference of squared mean and mean squared, faster on TPU can be less stable
56
+ var = ((x * x).mean(dim=dim, keepdim=True) - (xm * xm)).clamp(0)
57
+ else:
58
+ var = ((x - xm) * (x - xm)).mean(dim=dim, keepdim=True)
59
+ return var
60
+
61
+
62
+ def group_std(x, groups: int = 32, eps: float = 1e-5, flatten: bool = False):
63
+ B, C, H, W = x.shape
64
+ x_dtype = x.dtype
65
+ _assert(C % groups == 0, '')
66
+ if flatten:
67
+ x = x.reshape(B, groups, -1) # FIXME simpler shape causing TPU / XLA issues
68
+ std = x.float().var(dim=2, unbiased=False, keepdim=True).add(eps).sqrt().to(x_dtype)
69
+ else:
70
+ x = x.reshape(B, groups, C // groups, H, W)
71
+ std = x.float().var(dim=(2, 3, 4), unbiased=False, keepdim=True).add(eps).sqrt().to(x_dtype)
72
+ return std.expand(x.shape).reshape(B, C, H, W)
73
+
74
+
75
+ def group_std_tpu(x, groups: int = 32, eps: float = 1e-5, diff_sqm: bool = False, flatten: bool = False):
76
+ # This is a workaround for some stability / odd behaviour of .var and .std
77
+ # running on PyTorch XLA w/ TPUs. These manual var impl are producing much better results
78
+ B, C, H, W = x.shape
79
+ _assert(C % groups == 0, '')
80
+ if flatten:
81
+ x = x.reshape(B, groups, -1) # FIXME simpler shape causing TPU / XLA issues
82
+ var = manual_var(x, dim=-1, diff_sqm=diff_sqm)
83
+ else:
84
+ x = x.reshape(B, groups, C // groups, H, W)
85
+ var = manual_var(x, dim=(2, 3, 4), diff_sqm=diff_sqm)
86
+ return var.add(eps).sqrt().expand(x.shape).reshape(B, C, H, W)
87
+ #group_std = group_std_tpu # FIXME TPU temporary
88
+
89
+
90
+ def group_rms(x, groups: int = 32, eps: float = 1e-5):
91
+ B, C, H, W = x.shape
92
+ _assert(C % groups == 0, '')
93
+ x_dtype = x.dtype
94
+ x = x.reshape(B, groups, C // groups, H, W)
95
+ rms = x.float().square().mean(dim=(2, 3, 4), keepdim=True).add(eps).sqrt_().to(x_dtype)
96
+ return rms.expand(x.shape).reshape(B, C, H, W)
97
+
98
+
99
+ class EvoNorm2dB0(nn.Module):
100
+ def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-3, **_):
101
+ super().__init__()
102
+ self.apply_act = apply_act # apply activation (non-linearity)
103
+ self.momentum = momentum
104
+ self.eps = eps
105
+ self.weight = nn.Parameter(torch.ones(num_features))
106
+ self.bias = nn.Parameter(torch.zeros(num_features))
107
+ self.v = nn.Parameter(torch.ones(num_features)) if apply_act else None
108
+ self.register_buffer('running_var', torch.ones(num_features))
109
+ self.reset_parameters()
110
+
111
+ def reset_parameters(self):
112
+ nn.init.ones_(self.weight)
113
+ nn.init.zeros_(self.bias)
114
+ if self.v is not None:
115
+ nn.init.ones_(self.v)
116
+
117
+ def forward(self, x):
118
+ _assert(x.dim() == 4, 'expected 4D input')
119
+ x_dtype = x.dtype
120
+ v_shape = (1, -1, 1, 1)
121
+ if self.v is not None:
122
+ if self.training:
123
+ var = x.float().var(dim=(0, 2, 3), unbiased=False)
124
+ # var = manual_var(x, dim=(0, 2, 3)).squeeze()
125
+ n = x.numel() / x.shape[1]
126
+ self.running_var.copy_(
127
+ self.running_var * (1 - self.momentum) +
128
+ var.detach() * self.momentum * (n / (n - 1)))
129
+ else:
130
+ var = self.running_var
131
+ left = var.add(self.eps).sqrt_().to(x_dtype).view(v_shape).expand_as(x)
132
+ v = self.v.to(x_dtype).view(v_shape)
133
+ right = x * v + instance_std(x, self.eps)
134
+ x = x / left.max(right)
135
+ return x * self.weight.to(x_dtype).view(v_shape) + self.bias.to(x_dtype).view(v_shape)
136
+
137
+
138
+ class EvoNorm2dB1(nn.Module):
139
+ def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-5, **_):
140
+ super().__init__()
141
+ self.apply_act = apply_act # apply activation (non-linearity)
142
+ self.momentum = momentum
143
+ self.eps = eps
144
+ self.weight = nn.Parameter(torch.ones(num_features))
145
+ self.bias = nn.Parameter(torch.zeros(num_features))
146
+ self.register_buffer('running_var', torch.ones(num_features))
147
+ self.reset_parameters()
148
+
149
+ def reset_parameters(self):
150
+ nn.init.ones_(self.weight)
151
+ nn.init.zeros_(self.bias)
152
+
153
+ def forward(self, x):
154
+ _assert(x.dim() == 4, 'expected 4D input')
155
+ x_dtype = x.dtype
156
+ v_shape = (1, -1, 1, 1)
157
+ if self.apply_act:
158
+ if self.training:
159
+ var = x.float().var(dim=(0, 2, 3), unbiased=False)
160
+ n = x.numel() / x.shape[1]
161
+ self.running_var.copy_(
162
+ self.running_var * (1 - self.momentum) +
163
+ var.detach().to(self.running_var.dtype) * self.momentum * (n / (n - 1)))
164
+ else:
165
+ var = self.running_var
166
+ var = var.to(x_dtype).view(v_shape)
167
+ left = var.add(self.eps).sqrt_()
168
+ right = (x + 1) * instance_rms(x, self.eps)
169
+ x = x / left.max(right)
170
+ return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype)
171
+
172
+
173
+ class EvoNorm2dB2(nn.Module):
174
+ def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-5, **_):
175
+ super().__init__()
176
+ self.apply_act = apply_act # apply activation (non-linearity)
177
+ self.momentum = momentum
178
+ self.eps = eps
179
+ self.weight = nn.Parameter(torch.ones(num_features))
180
+ self.bias = nn.Parameter(torch.zeros(num_features))
181
+ self.register_buffer('running_var', torch.ones(num_features))
182
+ self.reset_parameters()
183
+
184
+ def reset_parameters(self):
185
+ nn.init.ones_(self.weight)
186
+ nn.init.zeros_(self.bias)
187
+
188
+ def forward(self, x):
189
+ _assert(x.dim() == 4, 'expected 4D input')
190
+ x_dtype = x.dtype
191
+ v_shape = (1, -1, 1, 1)
192
+ if self.apply_act:
193
+ if self.training:
194
+ var = x.float().var(dim=(0, 2, 3), unbiased=False)
195
+ n = x.numel() / x.shape[1]
196
+ self.running_var.copy_(
197
+ self.running_var * (1 - self.momentum) +
198
+ var.detach().to(self.running_var.dtype) * self.momentum * (n / (n - 1)))
199
+ else:
200
+ var = self.running_var
201
+ var = var.to(x_dtype).view(v_shape)
202
+ left = var.add(self.eps).sqrt_()
203
+ right = instance_rms(x, self.eps) - x
204
+ x = x / left.max(right)
205
+ return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype)
206
+
207
+
208
+ class EvoNorm2dS0(nn.Module):
209
+ def __init__(self, num_features, groups=32, group_size=None, apply_act=True, eps=1e-5, **_):
210
+ super().__init__()
211
+ self.apply_act = apply_act # apply activation (non-linearity)
212
+ if group_size:
213
+ assert num_features % group_size == 0
214
+ self.groups = num_features // group_size
215
+ else:
216
+ self.groups = groups
217
+ self.eps = eps
218
+ self.weight = nn.Parameter(torch.ones(num_features))
219
+ self.bias = nn.Parameter(torch.zeros(num_features))
220
+ self.v = nn.Parameter(torch.ones(num_features)) if apply_act else None
221
+ self.reset_parameters()
222
+
223
+ def reset_parameters(self):
224
+ nn.init.ones_(self.weight)
225
+ nn.init.zeros_(self.bias)
226
+ if self.v is not None:
227
+ nn.init.ones_(self.v)
228
+
229
+ def forward(self, x):
230
+ _assert(x.dim() == 4, 'expected 4D input')
231
+ x_dtype = x.dtype
232
+ v_shape = (1, -1, 1, 1)
233
+ if self.v is not None:
234
+ v = self.v.view(v_shape).to(x_dtype)
235
+ x = x * (x * v).sigmoid() / group_std(x, self.groups, self.eps)
236
+ return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype)
237
+
238
+
239
+ class EvoNorm2dS0a(EvoNorm2dS0):
240
+ def __init__(self, num_features, groups=32, group_size=None, apply_act=True, eps=1e-3, **_):
241
+ super().__init__(
242
+ num_features, groups=groups, group_size=group_size, apply_act=apply_act, eps=eps)
243
+
244
+ def forward(self, x):
245
+ _assert(x.dim() == 4, 'expected 4D input')
246
+ x_dtype = x.dtype
247
+ v_shape = (1, -1, 1, 1)
248
+ d = group_std(x, self.groups, self.eps)
249
+ if self.v is not None:
250
+ v = self.v.view(v_shape).to(x_dtype)
251
+ x = x * (x * v).sigmoid()
252
+ x = x / d
253
+ return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype)
254
+
255
+
256
+ class EvoNorm2dS1(nn.Module):
257
+ def __init__(
258
+ self, num_features, groups=32, group_size=None,
259
+ apply_act=True, act_layer=None, eps=1e-5, **_):
260
+ super().__init__()
261
+ act_layer = act_layer or nn.SiLU
262
+ self.apply_act = apply_act # apply activation (non-linearity)
263
+ if act_layer is not None and apply_act:
264
+ self.act = create_act_layer(act_layer)
265
+ else:
266
+ self.act = nn.Identity()
267
+ if group_size:
268
+ assert num_features % group_size == 0
269
+ self.groups = num_features // group_size
270
+ else:
271
+ self.groups = groups
272
+ self.eps = eps
273
+ self.pre_act_norm = False
274
+ self.weight = nn.Parameter(torch.ones(num_features))
275
+ self.bias = nn.Parameter(torch.zeros(num_features))
276
+ self.reset_parameters()
277
+
278
+ def reset_parameters(self):
279
+ nn.init.ones_(self.weight)
280
+ nn.init.zeros_(self.bias)
281
+
282
+ def forward(self, x):
283
+ _assert(x.dim() == 4, 'expected 4D input')
284
+ x_dtype = x.dtype
285
+ v_shape = (1, -1, 1, 1)
286
+ if self.apply_act:
287
+ x = self.act(x) / group_std(x, self.groups, self.eps)
288
+ return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype)
289
+
290
+
291
+ class EvoNorm2dS1a(EvoNorm2dS1):
292
+ def __init__(
293
+ self, num_features, groups=32, group_size=None,
294
+ apply_act=True, act_layer=None, eps=1e-3, **_):
295
+ super().__init__(
296
+ num_features, groups=groups, group_size=group_size, apply_act=apply_act, act_layer=act_layer, eps=eps)
297
+
298
+ def forward(self, x):
299
+ _assert(x.dim() == 4, 'expected 4D input')
300
+ x_dtype = x.dtype
301
+ v_shape = (1, -1, 1, 1)
302
+ x = self.act(x) / group_std(x, self.groups, self.eps)
303
+ return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype)
304
+
305
+
306
+ class EvoNorm2dS2(nn.Module):
307
+ def __init__(
308
+ self, num_features, groups=32, group_size=None,
309
+ apply_act=True, act_layer=None, eps=1e-5, **_):
310
+ super().__init__()
311
+ act_layer = act_layer or nn.SiLU
312
+ self.apply_act = apply_act # apply activation (non-linearity)
313
+ if act_layer is not None and apply_act:
314
+ self.act = create_act_layer(act_layer)
315
+ else:
316
+ self.act = nn.Identity()
317
+ if group_size:
318
+ assert num_features % group_size == 0
319
+ self.groups = num_features // group_size
320
+ else:
321
+ self.groups = groups
322
+ self.eps = eps
323
+ self.weight = nn.Parameter(torch.ones(num_features))
324
+ self.bias = nn.Parameter(torch.zeros(num_features))
325
+ self.reset_parameters()
326
+
327
+ def reset_parameters(self):
328
+ nn.init.ones_(self.weight)
329
+ nn.init.zeros_(self.bias)
330
+
331
+ def forward(self, x):
332
+ _assert(x.dim() == 4, 'expected 4D input')
333
+ x_dtype = x.dtype
334
+ v_shape = (1, -1, 1, 1)
335
+ if self.apply_act:
336
+ x = self.act(x) / group_rms(x, self.groups, self.eps)
337
+ return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype)
338
+
339
+
340
+ class EvoNorm2dS2a(EvoNorm2dS2):
341
+ def __init__(
342
+ self, num_features, groups=32, group_size=None,
343
+ apply_act=True, act_layer=None, eps=1e-3, **_):
344
+ super().__init__(
345
+ num_features, groups=groups, group_size=group_size, apply_act=apply_act, act_layer=act_layer, eps=eps)
346
+
347
+ def forward(self, x):
348
+ _assert(x.dim() == 4, 'expected 4D input')
349
+ x_dtype = x.dtype
350
+ v_shape = (1, -1, 1, 1)
351
+ x = self.act(x) / group_rms(x, self.groups, self.eps)
352
+ return x * self.weight.view(v_shape).to(x_dtype) + self.bias.view(v_shape).to(x_dtype)
janus/lib/python3.10/site-packages/timm/layers/filter_response_norm.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Filter Response Norm in PyTorch
2
+
3
+ Based on `Filter Response Normalization Layer` - https://arxiv.org/abs/1911.09737
4
+
5
+ Hacked together by / Copyright 2021 Ross Wightman
6
+ """
7
+ import torch
8
+ import torch.nn as nn
9
+
10
+ from .create_act import create_act_layer
11
+ from .trace_utils import _assert
12
+
13
+
14
+ def inv_instance_rms(x, eps: float = 1e-5):
15
+ rms = x.square().float().mean(dim=(2, 3), keepdim=True).add(eps).rsqrt().to(x.dtype)
16
+ return rms.expand(x.shape)
17
+
18
+
19
+ class FilterResponseNormTlu2d(nn.Module):
20
+ def __init__(self, num_features, apply_act=True, eps=1e-5, rms=True, **_):
21
+ super(FilterResponseNormTlu2d, self).__init__()
22
+ self.apply_act = apply_act # apply activation (non-linearity)
23
+ self.rms = rms
24
+ self.eps = eps
25
+ self.weight = nn.Parameter(torch.ones(num_features))
26
+ self.bias = nn.Parameter(torch.zeros(num_features))
27
+ self.tau = nn.Parameter(torch.zeros(num_features)) if apply_act else None
28
+ self.reset_parameters()
29
+
30
+ def reset_parameters(self):
31
+ nn.init.ones_(self.weight)
32
+ nn.init.zeros_(self.bias)
33
+ if self.tau is not None:
34
+ nn.init.zeros_(self.tau)
35
+
36
+ def forward(self, x):
37
+ _assert(x.dim() == 4, 'expected 4D input')
38
+ x_dtype = x.dtype
39
+ v_shape = (1, -1, 1, 1)
40
+ x = x * inv_instance_rms(x, self.eps)
41
+ x = x * self.weight.view(v_shape).to(dtype=x_dtype) + self.bias.view(v_shape).to(dtype=x_dtype)
42
+ return torch.maximum(x, self.tau.reshape(v_shape).to(dtype=x_dtype)) if self.tau is not None else x
43
+
44
+
45
+ class FilterResponseNormAct2d(nn.Module):
46
+ def __init__(self, num_features, apply_act=True, act_layer=nn.ReLU, inplace=None, rms=True, eps=1e-5, **_):
47
+ super(FilterResponseNormAct2d, self).__init__()
48
+ if act_layer is not None and apply_act:
49
+ self.act = create_act_layer(act_layer, inplace=inplace)
50
+ else:
51
+ self.act = nn.Identity()
52
+ self.rms = rms
53
+ self.eps = eps
54
+ self.weight = nn.Parameter(torch.ones(num_features))
55
+ self.bias = nn.Parameter(torch.zeros(num_features))
56
+ self.reset_parameters()
57
+
58
+ def reset_parameters(self):
59
+ nn.init.ones_(self.weight)
60
+ nn.init.zeros_(self.bias)
61
+
62
+ def forward(self, x):
63
+ _assert(x.dim() == 4, 'expected 4D input')
64
+ x_dtype = x.dtype
65
+ v_shape = (1, -1, 1, 1)
66
+ x = x * inv_instance_rms(x, self.eps)
67
+ x = x * self.weight.view(v_shape).to(dtype=x_dtype) + self.bias.view(v_shape).to(dtype=x_dtype)
68
+ return self.act(x)
janus/lib/python3.10/site-packages/timm/layers/format.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+ from typing import Union
3
+
4
+ import torch
5
+
6
+
7
+ class Format(str, Enum):
8
+ NCHW = 'NCHW'
9
+ NHWC = 'NHWC'
10
+ NCL = 'NCL'
11
+ NLC = 'NLC'
12
+
13
+
14
+ FormatT = Union[str, Format]
15
+
16
+
17
+ def get_spatial_dim(fmt: FormatT):
18
+ fmt = Format(fmt)
19
+ if fmt is Format.NLC:
20
+ dim = (1,)
21
+ elif fmt is Format.NCL:
22
+ dim = (2,)
23
+ elif fmt is Format.NHWC:
24
+ dim = (1, 2)
25
+ else:
26
+ dim = (2, 3)
27
+ return dim
28
+
29
+
30
+ def get_channel_dim(fmt: FormatT):
31
+ fmt = Format(fmt)
32
+ if fmt is Format.NHWC:
33
+ dim = 3
34
+ elif fmt is Format.NLC:
35
+ dim = 2
36
+ else:
37
+ dim = 1
38
+ return dim
39
+
40
+
41
+ def nchw_to(x: torch.Tensor, fmt: Format):
42
+ if fmt == Format.NHWC:
43
+ x = x.permute(0, 2, 3, 1)
44
+ elif fmt == Format.NLC:
45
+ x = x.flatten(2).transpose(1, 2)
46
+ elif fmt == Format.NCL:
47
+ x = x.flatten(2)
48
+ return x
49
+
50
+
51
+ def nhwc_to(x: torch.Tensor, fmt: Format):
52
+ if fmt == Format.NCHW:
53
+ x = x.permute(0, 3, 1, 2)
54
+ elif fmt == Format.NLC:
55
+ x = x.flatten(1, 2)
56
+ elif fmt == Format.NCL:
57
+ x = x.flatten(1, 2).transpose(1, 2)
58
+ return x
janus/lib/python3.10/site-packages/timm/layers/global_context.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Global Context Attention Block
2
+
3
+ Paper: `GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond`
4
+ - https://arxiv.org/abs/1904.11492
5
+
6
+ Official code consulted as reference: https://github.com/xvjiarui/GCNet
7
+
8
+ Hacked together by / Copyright 2021 Ross Wightman
9
+ """
10
+ from torch import nn as nn
11
+ import torch.nn.functional as F
12
+
13
+ from .create_act import create_act_layer, get_act_layer
14
+ from .helpers import make_divisible
15
+ from .mlp import ConvMlp
16
+ from .norm import LayerNorm2d
17
+
18
+
19
+ class GlobalContext(nn.Module):
20
+
21
+ def __init__(self, channels, use_attn=True, fuse_add=False, fuse_scale=True, init_last_zero=False,
22
+ rd_ratio=1./8, rd_channels=None, rd_divisor=1, act_layer=nn.ReLU, gate_layer='sigmoid'):
23
+ super(GlobalContext, self).__init__()
24
+ act_layer = get_act_layer(act_layer)
25
+
26
+ self.conv_attn = nn.Conv2d(channels, 1, kernel_size=1, bias=True) if use_attn else None
27
+
28
+ if rd_channels is None:
29
+ rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.)
30
+ if fuse_add:
31
+ self.mlp_add = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d)
32
+ else:
33
+ self.mlp_add = None
34
+ if fuse_scale:
35
+ self.mlp_scale = ConvMlp(channels, rd_channels, act_layer=act_layer, norm_layer=LayerNorm2d)
36
+ else:
37
+ self.mlp_scale = None
38
+
39
+ self.gate = create_act_layer(gate_layer)
40
+ self.init_last_zero = init_last_zero
41
+ self.reset_parameters()
42
+
43
+ def reset_parameters(self):
44
+ if self.conv_attn is not None:
45
+ nn.init.kaiming_normal_(self.conv_attn.weight, mode='fan_in', nonlinearity='relu')
46
+ if self.mlp_add is not None:
47
+ nn.init.zeros_(self.mlp_add.fc2.weight)
48
+
49
+ def forward(self, x):
50
+ B, C, H, W = x.shape
51
+
52
+ if self.conv_attn is not None:
53
+ attn = self.conv_attn(x).reshape(B, 1, H * W) # (B, 1, H * W)
54
+ attn = F.softmax(attn, dim=-1).unsqueeze(3) # (B, 1, H * W, 1)
55
+ context = x.reshape(B, C, H * W).unsqueeze(1) @ attn
56
+ context = context.view(B, C, 1, 1)
57
+ else:
58
+ context = x.mean(dim=(2, 3), keepdim=True)
59
+
60
+ if self.mlp_scale is not None:
61
+ mlp_x = self.mlp_scale(context)
62
+ x = x * self.gate(mlp_x)
63
+ if self.mlp_add is not None:
64
+ mlp_x = self.mlp_add(context)
65
+ x = x + mlp_x
66
+
67
+ return x
janus/lib/python3.10/site-packages/timm/layers/grid.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Tuple
2
+
3
+ import torch
4
+
5
+
6
+ def ndgrid(*tensors) -> Tuple[torch.Tensor, ...]:
7
+ """generate N-D grid in dimension order.
8
+
9
+ The ndgrid function is like meshgrid except that the order of the first two input arguments are switched.
10
+
11
+ That is, the statement
12
+ [X1,X2,X3] = ndgrid(x1,x2,x3)
13
+
14
+ produces the same result as
15
+
16
+ [X2,X1,X3] = meshgrid(x2,x1,x3)
17
+
18
+ This naming is based on MATLAB, the purpose is to avoid confusion due to torch's change to make
19
+ torch.meshgrid behaviour move from matching ndgrid ('ij') indexing to numpy meshgrid defaults of ('xy').
20
+
21
+ """
22
+ try:
23
+ return torch.meshgrid(*tensors, indexing='ij')
24
+ except TypeError:
25
+ # old PyTorch < 1.10 will follow this path as it does not have indexing arg,
26
+ # the old behaviour of meshgrid was 'ij'
27
+ return torch.meshgrid(*tensors)
28
+
29
+
30
+ def meshgrid(*tensors) -> Tuple[torch.Tensor, ...]:
31
+ """generate N-D grid in spatial dim order.
32
+
33
+ The meshgrid function is similar to ndgrid except that the order of the
34
+ first two input and output arguments is switched.
35
+
36
+ That is, the statement
37
+
38
+ [X,Y,Z] = meshgrid(x,y,z)
39
+ produces the same result as
40
+
41
+ [Y,X,Z] = ndgrid(y,x,z)
42
+ Because of this, meshgrid is better suited to problems in two- or three-dimensional Cartesian space,
43
+ while ndgrid is better suited to multidimensional problems that aren't spatially based.
44
+ """
45
+
46
+ # NOTE: this will throw in PyTorch < 1.10 as meshgrid did not support indexing arg or have
47
+ # capability of generating grid in xy order before then.
48
+ return torch.meshgrid(*tensors, indexing='xy')
49
+
janus/lib/python3.10/site-packages/timm/layers/grn.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Global Response Normalization Module
2
+
3
+ Based on the GRN layer presented in
4
+ `ConvNeXt-V2 - Co-designing and Scaling ConvNets with Masked Autoencoders` - https://arxiv.org/abs/2301.00808
5
+
6
+ This implementation
7
+ * works for both NCHW and NHWC tensor layouts
8
+ * uses affine param names matching existing torch norm layers
9
+ * slightly improves eager mode performance via fused addcmul
10
+
11
+ Hacked together by / Copyright 2023 Ross Wightman
12
+ """
13
+
14
+ import torch
15
+ from torch import nn as nn
16
+
17
+
18
+ class GlobalResponseNorm(nn.Module):
19
+ """ Global Response Normalization layer
20
+ """
21
+ def __init__(self, dim, eps=1e-6, channels_last=True):
22
+ super().__init__()
23
+ self.eps = eps
24
+ if channels_last:
25
+ self.spatial_dim = (1, 2)
26
+ self.channel_dim = -1
27
+ self.wb_shape = (1, 1, 1, -1)
28
+ else:
29
+ self.spatial_dim = (2, 3)
30
+ self.channel_dim = 1
31
+ self.wb_shape = (1, -1, 1, 1)
32
+
33
+ self.weight = nn.Parameter(torch.zeros(dim))
34
+ self.bias = nn.Parameter(torch.zeros(dim))
35
+
36
+ def forward(self, x):
37
+ x_g = x.norm(p=2, dim=self.spatial_dim, keepdim=True)
38
+ x_n = x_g / (x_g.mean(dim=self.channel_dim, keepdim=True) + self.eps)
39
+ return x + torch.addcmul(self.bias.view(self.wb_shape), self.weight.view(self.wb_shape), x * x_n)
janus/lib/python3.10/site-packages/timm/layers/halo_attn.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Halo Self Attention
2
+
3
+ Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones`
4
+ - https://arxiv.org/abs/2103.12731
5
+
6
+ @misc{2103.12731,
7
+ Author = {Ashish Vaswani and Prajit Ramachandran and Aravind Srinivas and Niki Parmar and Blake Hechtman and
8
+ Jonathon Shlens},
9
+ Title = {Scaling Local Self-Attention for Parameter Efficient Visual Backbones},
10
+ Year = {2021},
11
+ }
12
+
13
+ Status:
14
+ This impl is a WIP, there is no official ref impl and some details in paper weren't clear to me.
15
+ The attention mechanism works but it's slow as implemented.
16
+
17
+ Hacked together by / Copyright 2021 Ross Wightman
18
+ """
19
+ from typing import List
20
+
21
+ import torch
22
+ from torch import nn
23
+ import torch.nn.functional as F
24
+
25
+ from .helpers import make_divisible
26
+ from .weight_init import trunc_normal_
27
+ from .trace_utils import _assert
28
+
29
+
30
+ def rel_logits_1d(q, rel_k, permute_mask: List[int]):
31
+ """ Compute relative logits along one dimension
32
+
33
+ As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2
34
+ Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925
35
+
36
+ Args:
37
+ q: (batch, height, width, dim)
38
+ rel_k: (2 * window - 1, dim)
39
+ permute_mask: permute output dim according to this
40
+ """
41
+ B, H, W, dim = q.shape
42
+ rel_size = rel_k.shape[0]
43
+ win_size = (rel_size + 1) // 2
44
+
45
+ x = (q @ rel_k.transpose(-1, -2))
46
+ x = x.reshape(-1, W, rel_size)
47
+
48
+ # pad to shift from relative to absolute indexing
49
+ x_pad = F.pad(x, [0, 1]).flatten(1)
50
+ x_pad = F.pad(x_pad, [0, rel_size - W])
51
+
52
+ # reshape and slice out the padded elements
53
+ x_pad = x_pad.reshape(-1, W + 1, rel_size)
54
+ x = x_pad[:, :W, win_size - 1:]
55
+
56
+ # reshape and tile
57
+ x = x.reshape(B, H, 1, W, win_size).expand(-1, -1, win_size, -1, -1)
58
+ return x.permute(permute_mask)
59
+
60
+
61
+ class PosEmbedRel(nn.Module):
62
+ """ Relative Position Embedding
63
+ As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2
64
+ Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925
65
+
66
+ """
67
+ def __init__(self, block_size, win_size, dim_head, scale):
68
+ """
69
+ Args:
70
+ block_size (int): block size
71
+ win_size (int): neighbourhood window size
72
+ dim_head (int): attention head dim
73
+ scale (float): scale factor (for init)
74
+ """
75
+ super().__init__()
76
+ self.block_size = block_size
77
+ self.dim_head = dim_head
78
+ self.height_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale)
79
+ self.width_rel = nn.Parameter(torch.randn(win_size * 2 - 1, dim_head) * scale)
80
+
81
+ def forward(self, q):
82
+ B, BB, HW, _ = q.shape
83
+
84
+ # relative logits in width dimension.
85
+ q = q.reshape(-1, self.block_size, self.block_size, self.dim_head)
86
+ rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4))
87
+
88
+ # relative logits in height dimension.
89
+ q = q.transpose(1, 2)
90
+ rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2))
91
+
92
+ rel_logits = rel_logits_h + rel_logits_w
93
+ rel_logits = rel_logits.reshape(B, BB, HW, -1)
94
+ return rel_logits
95
+
96
+
97
+ class HaloAttn(nn.Module):
98
+ """ Halo Attention
99
+
100
+ Paper: `Scaling Local Self-Attention for Parameter Efficient Visual Backbones`
101
+ - https://arxiv.org/abs/2103.12731
102
+
103
+ The internal dimensions of the attention module are controlled by the interaction of several arguments.
104
+ * the output dimension of the module is specified by dim_out, which falls back to input dim if not set
105
+ * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim
106
+ * the query and key (qk) dimensions are determined by
107
+ * num_heads * dim_head if dim_head is not None
108
+ * num_heads * (dim_out * attn_ratio // num_heads) if dim_head is None
109
+ * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not used
110
+
111
+ Args:
112
+ dim (int): input dimension to the module
113
+ dim_out (int): output dimension of the module, same as dim if not set
114
+ feat_size (Tuple[int, int]): size of input feature_map (not used, for arg compat with bottle/lambda)
115
+ stride: output stride of the module, query downscaled if > 1 (default: 1).
116
+ num_heads: parallel attention heads (default: 8).
117
+ dim_head: dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set
118
+ block_size (int): size of blocks. (default: 8)
119
+ halo_size (int): size of halo overlap. (default: 3)
120
+ qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0)
121
+ qkv_bias (bool) : add bias to q, k, and v projections
122
+ avg_down (bool): use average pool downsample instead of strided query blocks
123
+ scale_pos_embed (bool): scale the position embedding as well as Q @ K
124
+ """
125
+ def __init__(
126
+ self, dim, dim_out=None, feat_size=None, stride=1, num_heads=8, dim_head=None, block_size=8, halo_size=3,
127
+ qk_ratio=1.0, qkv_bias=False, avg_down=False, scale_pos_embed=False):
128
+ super().__init__()
129
+ dim_out = dim_out or dim
130
+ assert dim_out % num_heads == 0
131
+ assert stride in (1, 2)
132
+ self.num_heads = num_heads
133
+ self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads
134
+ self.dim_head_v = dim_out // self.num_heads
135
+ self.dim_out_qk = num_heads * self.dim_head_qk
136
+ self.dim_out_v = num_heads * self.dim_head_v
137
+ self.scale = self.dim_head_qk ** -0.5
138
+ self.scale_pos_embed = scale_pos_embed
139
+ self.block_size = self.block_size_ds = block_size
140
+ self.halo_size = halo_size
141
+ self.win_size = block_size + halo_size * 2 # neighbourhood window size
142
+ self.block_stride = 1
143
+ use_avg_pool = False
144
+ if stride > 1:
145
+ use_avg_pool = avg_down or block_size % stride != 0
146
+ self.block_stride = 1 if use_avg_pool else stride
147
+ self.block_size_ds = self.block_size // self.block_stride
148
+
149
+ # FIXME not clear if this stride behaviour is what the paper intended
150
+ # Also, the paper mentions using a 3D conv for dealing with the blocking/gather, and leaving
151
+ # data in unfolded block form. I haven't wrapped my head around how that'd look.
152
+ self.q = nn.Conv2d(dim, self.dim_out_qk, 1, stride=self.block_stride, bias=qkv_bias)
153
+ self.kv = nn.Conv2d(dim, self.dim_out_qk + self.dim_out_v, 1, bias=qkv_bias)
154
+
155
+ self.pos_embed = PosEmbedRel(
156
+ block_size=self.block_size_ds, win_size=self.win_size, dim_head=self.dim_head_qk, scale=self.scale)
157
+
158
+ self.pool = nn.AvgPool2d(2, 2) if use_avg_pool else nn.Identity()
159
+
160
+ self.reset_parameters()
161
+
162
+ def reset_parameters(self):
163
+ std = self.q.weight.shape[1] ** -0.5 # fan-in
164
+ trunc_normal_(self.q.weight, std=std)
165
+ trunc_normal_(self.kv.weight, std=std)
166
+ trunc_normal_(self.pos_embed.height_rel, std=self.scale)
167
+ trunc_normal_(self.pos_embed.width_rel, std=self.scale)
168
+
169
+ def forward(self, x):
170
+ B, C, H, W = x.shape
171
+ _assert(H % self.block_size == 0, '')
172
+ _assert(W % self.block_size == 0, '')
173
+ num_h_blocks = H // self.block_size
174
+ num_w_blocks = W // self.block_size
175
+ num_blocks = num_h_blocks * num_w_blocks
176
+
177
+ q = self.q(x)
178
+ # unfold
179
+ q = q.reshape(
180
+ -1, self.dim_head_qk,
181
+ num_h_blocks, self.block_size_ds, num_w_blocks, self.block_size_ds).permute(0, 1, 3, 5, 2, 4)
182
+ # B, num_heads * dim_head * block_size ** 2, num_blocks
183
+ q = q.reshape(B * self.num_heads, self.dim_head_qk, -1, num_blocks).transpose(1, 3)
184
+ # B * num_heads, num_blocks, block_size ** 2, dim_head
185
+
186
+ kv = self.kv(x)
187
+ # Generate overlapping windows for kv. This approach is good for GPU and CPU. However, unfold() is not
188
+ # lowered for PyTorch XLA so it will be very slow. See code at bottom of file for XLA friendly approach.
189
+ # FIXME figure out how to switch impl between this and conv2d if XLA being used.
190
+ kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size])
191
+ kv = kv.unfold(2, self.win_size, self.block_size).unfold(3, self.win_size, self.block_size).reshape(
192
+ B * self.num_heads, self.dim_head_qk + self.dim_head_v, num_blocks, -1).permute(0, 2, 3, 1)
193
+ k, v = torch.split(kv, [self.dim_head_qk, self.dim_head_v], dim=-1)
194
+ # B * num_heads, num_blocks, win_size ** 2, dim_head_qk or dim_head_v
195
+
196
+ if self.scale_pos_embed:
197
+ attn = (q @ k.transpose(-1, -2) + self.pos_embed(q)) * self.scale
198
+ else:
199
+ attn = (q @ k.transpose(-1, -2)) * self.scale + self.pos_embed(q)
200
+ # B * num_heads, num_blocks, block_size ** 2, win_size ** 2
201
+ attn = attn.softmax(dim=-1)
202
+
203
+ out = (attn @ v).transpose(1, 3) # B * num_heads, dim_head_v, block_size ** 2, num_blocks
204
+ # fold
205
+ out = out.reshape(-1, self.block_size_ds, self.block_size_ds, num_h_blocks, num_w_blocks)
206
+ out = out.permute(0, 3, 1, 4, 2).contiguous().view(
207
+ B, self.dim_out_v, H // self.block_stride, W // self.block_stride)
208
+ # B, dim_out, H // block_stride, W // block_stride
209
+ out = self.pool(out)
210
+ return out
211
+
212
+
213
+ """ Three alternatives for overlapping windows.
214
+
215
+ `.unfold().unfold()` is same speed as stride tricks with similar clarity as F.unfold()
216
+
217
+ if is_xla:
218
+ # This code achieves haloing on PyTorch XLA with reasonable runtime trade-off, it is
219
+ # EXTREMELY slow for backward on a GPU though so I need a way of selecting based on environment.
220
+ WW = self.win_size ** 2
221
+ pw = torch.eye(WW, dtype=x.dtype, device=x.device).reshape(WW, 1, self.win_size, self.win_size)
222
+ kv = F.conv2d(kv.reshape(-1, 1, H, W), pw, stride=self.block_size, padding=self.halo_size)
223
+ elif self.stride_tricks:
224
+ kv = F.pad(kv, [self.halo_size, self.halo_size, self.halo_size, self.halo_size]).contiguous()
225
+ kv = kv.as_strided((
226
+ B, self.dim_out_qk + self.dim_out_v, self.win_size, self.win_size, num_h_blocks, num_w_blocks),
227
+ stride=(kv.stride(0), kv.stride(1), kv.shape[-1], 1, self.block_size * kv.shape[-1], self.block_size))
228
+ else:
229
+ kv = F.unfold(kv, kernel_size=self.win_size, stride=self.block_size, padding=self.halo_size)
230
+
231
+ kv = kv.reshape(
232
+ B * self.num_heads, self.dim_head_qk + self.dim_head_v, -1, num_blocks).transpose(1, 3)
233
+ """
janus/lib/python3.10/site-packages/timm/layers/helpers.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Layer/Module Helpers
2
+
3
+ Hacked together by / Copyright 2020 Ross Wightman
4
+ """
5
+ from itertools import repeat
6
+ import collections.abc
7
+
8
+
9
+ # From PyTorch internals
10
+ def _ntuple(n):
11
+ def parse(x):
12
+ if isinstance(x, collections.abc.Iterable) and not isinstance(x, str):
13
+ return tuple(x)
14
+ return tuple(repeat(x, n))
15
+ return parse
16
+
17
+
18
+ to_1tuple = _ntuple(1)
19
+ to_2tuple = _ntuple(2)
20
+ to_3tuple = _ntuple(3)
21
+ to_4tuple = _ntuple(4)
22
+ to_ntuple = _ntuple
23
+
24
+
25
+ def make_divisible(v, divisor=8, min_value=None, round_limit=.9):
26
+ min_value = min_value or divisor
27
+ new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
28
+ # Make sure that round down does not go down by more than 10%.
29
+ if new_v < round_limit * v:
30
+ new_v += divisor
31
+ return new_v
32
+
33
+
34
+ def extend_tuple(x, n):
35
+ # pads a tuple to specified n by padding with last value
36
+ if not isinstance(x, (tuple, list)):
37
+ x = (x,)
38
+ else:
39
+ x = tuple(x)
40
+ pad_n = n - len(x)
41
+ if pad_n <= 0:
42
+ return x[:n]
43
+ return x + (x[-1],) * pad_n
janus/lib/python3.10/site-packages/timm/layers/interpolate.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Interpolation helpers for timm layers
2
+
3
+ RegularGridInterpolator from https://github.com/sbarratt/torch_interpolations
4
+ Copyright Shane Barratt, Apache 2.0 license
5
+ """
6
+ import torch
7
+ from itertools import product
8
+
9
+
10
+ class RegularGridInterpolator:
11
+ """ Interpolate data defined on a rectilinear grid with even or uneven spacing.
12
+ Produces similar results to scipy RegularGridInterpolator or interp2d
13
+ in 'linear' mode.
14
+
15
+ Taken from https://github.com/sbarratt/torch_interpolations
16
+ """
17
+
18
+ def __init__(self, points, values):
19
+ self.points = points
20
+ self.values = values
21
+
22
+ assert isinstance(self.points, tuple) or isinstance(self.points, list)
23
+ assert isinstance(self.values, torch.Tensor)
24
+
25
+ self.ms = list(self.values.shape)
26
+ self.n = len(self.points)
27
+
28
+ assert len(self.ms) == self.n
29
+
30
+ for i, p in enumerate(self.points):
31
+ assert isinstance(p, torch.Tensor)
32
+ assert p.shape[0] == self.values.shape[i]
33
+
34
+ def __call__(self, points_to_interp):
35
+ assert self.points is not None
36
+ assert self.values is not None
37
+
38
+ assert len(points_to_interp) == len(self.points)
39
+ K = points_to_interp[0].shape[0]
40
+ for x in points_to_interp:
41
+ assert x.shape[0] == K
42
+
43
+ idxs = []
44
+ dists = []
45
+ overalls = []
46
+ for p, x in zip(self.points, points_to_interp):
47
+ idx_right = torch.bucketize(x, p)
48
+ idx_right[idx_right >= p.shape[0]] = p.shape[0] - 1
49
+ idx_left = (idx_right - 1).clamp(0, p.shape[0] - 1)
50
+ dist_left = x - p[idx_left]
51
+ dist_right = p[idx_right] - x
52
+ dist_left[dist_left < 0] = 0.
53
+ dist_right[dist_right < 0] = 0.
54
+ both_zero = (dist_left == 0) & (dist_right == 0)
55
+ dist_left[both_zero] = dist_right[both_zero] = 1.
56
+
57
+ idxs.append((idx_left, idx_right))
58
+ dists.append((dist_left, dist_right))
59
+ overalls.append(dist_left + dist_right)
60
+
61
+ numerator = 0.
62
+ for indexer in product([0, 1], repeat=self.n):
63
+ as_s = [idx[onoff] for onoff, idx in zip(indexer, idxs)]
64
+ bs_s = [dist[1 - onoff] for onoff, dist in zip(indexer, dists)]
65
+ numerator += self.values[as_s] * \
66
+ torch.prod(torch.stack(bs_s), dim=0)
67
+ denominator = torch.prod(torch.stack(overalls), dim=0)
68
+ return numerator / denominator
janus/lib/python3.10/site-packages/timm/layers/layer_scale.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+
4
+
5
+ class LayerScale(nn.Module):
6
+ """ LayerScale on tensors with channels in last-dim.
7
+ """
8
+ def __init__(
9
+ self,
10
+ dim: int,
11
+ init_values: float = 1e-5,
12
+ inplace: bool = False,
13
+ ) -> None:
14
+ super().__init__()
15
+ self.inplace = inplace
16
+ self.gamma = nn.Parameter(init_values * torch.ones(dim))
17
+
18
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
19
+ return x.mul_(self.gamma) if self.inplace else x * self.gamma
20
+
21
+
22
+ class LayerScale2d(nn.Module):
23
+ """ LayerScale for tensors with torch 2D NCHW layout.
24
+ """
25
+ def __init__(
26
+ self,
27
+ dim: int,
28
+ init_values: float = 1e-5,
29
+ inplace: bool = False,
30
+ ):
31
+ super().__init__()
32
+ self.inplace = inplace
33
+ self.gamma = nn.Parameter(init_values * torch.ones(dim))
34
+
35
+ def forward(self, x):
36
+ gamma = self.gamma.view(1, -1, 1, 1)
37
+ return x.mul_(gamma) if self.inplace else x * gamma
38
+
janus/lib/python3.10/site-packages/timm/layers/mixed_conv2d.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ PyTorch Mixed Convolution
2
+
3
+ Paper: MixConv: Mixed Depthwise Convolutional Kernels (https://arxiv.org/abs/1907.09595)
4
+
5
+ Hacked together by / Copyright 2020 Ross Wightman
6
+ """
7
+
8
+ import torch
9
+ from torch import nn as nn
10
+
11
+ from .conv2d_same import create_conv2d_pad
12
+
13
+
14
+ def _split_channels(num_chan, num_groups):
15
+ split = [num_chan // num_groups for _ in range(num_groups)]
16
+ split[0] += num_chan - sum(split)
17
+ return split
18
+
19
+
20
+ class MixedConv2d(nn.ModuleDict):
21
+ """ Mixed Grouped Convolution
22
+
23
+ Based on MDConv and GroupedConv in MixNet impl:
24
+ https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mixnet/custom_layers.py
25
+ """
26
+ def __init__(self, in_channels, out_channels, kernel_size=3,
27
+ stride=1, padding='', dilation=1, depthwise=False, **kwargs):
28
+ super(MixedConv2d, self).__init__()
29
+
30
+ kernel_size = kernel_size if isinstance(kernel_size, list) else [kernel_size]
31
+ num_groups = len(kernel_size)
32
+ in_splits = _split_channels(in_channels, num_groups)
33
+ out_splits = _split_channels(out_channels, num_groups)
34
+ self.in_channels = sum(in_splits)
35
+ self.out_channels = sum(out_splits)
36
+ for idx, (k, in_ch, out_ch) in enumerate(zip(kernel_size, in_splits, out_splits)):
37
+ conv_groups = in_ch if depthwise else 1
38
+ # use add_module to keep key space clean
39
+ self.add_module(
40
+ str(idx),
41
+ create_conv2d_pad(
42
+ in_ch, out_ch, k, stride=stride,
43
+ padding=padding, dilation=dilation, groups=conv_groups, **kwargs)
44
+ )
45
+ self.splits = in_splits
46
+
47
+ def forward(self, x):
48
+ x_split = torch.split(x, self.splits, 1)
49
+ x_out = [c(x_split[i]) for i, c in enumerate(self.values())]
50
+ x = torch.cat(x_out, 1)
51
+ return x
janus/lib/python3.10/site-packages/timm/layers/padding.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Padding Helpers
2
+
3
+ Hacked together by / Copyright 2020 Ross Wightman
4
+ """
5
+ import math
6
+ from typing import List, Tuple, Union
7
+
8
+ import torch
9
+ import torch.nn.functional as F
10
+
11
+ from .helpers import to_2tuple
12
+
13
+
14
+ # Calculate symmetric padding for a convolution
15
+ def get_padding(kernel_size: int, stride: int = 1, dilation: int = 1, **_) -> Union[int, List[int]]:
16
+ if any([isinstance(v, (tuple, list)) for v in [kernel_size, stride, dilation]]):
17
+ kernel_size, stride, dilation = to_2tuple(kernel_size), to_2tuple(stride), to_2tuple(dilation)
18
+ return [get_padding(*a) for a in zip(kernel_size, stride, dilation)]
19
+ padding = ((stride - 1) + dilation * (kernel_size - 1)) // 2
20
+ return padding
21
+
22
+
23
+ # Calculate asymmetric TensorFlow-like 'SAME' padding for a convolution
24
+ def get_same_padding(x: int, kernel_size: int, stride: int, dilation: int):
25
+ if isinstance(x, torch.Tensor):
26
+ return torch.clamp(((x / stride).ceil() - 1) * stride + (kernel_size - 1) * dilation + 1 - x, min=0)
27
+ else:
28
+ return max((math.ceil(x / stride) - 1) * stride + (kernel_size - 1) * dilation + 1 - x, 0)
29
+
30
+
31
+ # Can SAME padding for given args be done statically?
32
+ def is_static_pad(kernel_size: int, stride: int = 1, dilation: int = 1, **_):
33
+ if any([isinstance(v, (tuple, list)) for v in [kernel_size, stride, dilation]]):
34
+ kernel_size, stride, dilation = to_2tuple(kernel_size), to_2tuple(stride), to_2tuple(dilation)
35
+ return all([is_static_pad(*a) for a in zip(kernel_size, stride, dilation)])
36
+ return stride == 1 and (dilation * (kernel_size - 1)) % 2 == 0
37
+
38
+
39
+ def pad_same_arg(
40
+ input_size: List[int],
41
+ kernel_size: List[int],
42
+ stride: List[int],
43
+ dilation: List[int] = (1, 1),
44
+ ) -> List[int]:
45
+ ih, iw = input_size
46
+ kh, kw = kernel_size
47
+ pad_h = get_same_padding(ih, kh, stride[0], dilation[0])
48
+ pad_w = get_same_padding(iw, kw, stride[1], dilation[1])
49
+ return [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2]
50
+
51
+
52
+ # Dynamically pad input x with 'SAME' padding for conv with specified args
53
+ def pad_same(
54
+ x,
55
+ kernel_size: List[int],
56
+ stride: List[int],
57
+ dilation: List[int] = (1, 1),
58
+ value: float = 0,
59
+ ):
60
+ ih, iw = x.size()[-2:]
61
+ pad_h = get_same_padding(ih, kernel_size[0], stride[0], dilation[0])
62
+ pad_w = get_same_padding(iw, kernel_size[1], stride[1], dilation[1])
63
+ x = F.pad(x, (pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2), value=value)
64
+ return x
65
+
66
+
67
+ def get_padding_value(padding, kernel_size, **kwargs) -> Tuple[Tuple, bool]:
68
+ dynamic = False
69
+ if isinstance(padding, str):
70
+ # for any string padding, the padding will be calculated for you, one of three ways
71
+ padding = padding.lower()
72
+ if padding == 'same':
73
+ # TF compatible 'SAME' padding, has a performance and GPU memory allocation impact
74
+ if is_static_pad(kernel_size, **kwargs):
75
+ # static case, no extra overhead
76
+ padding = get_padding(kernel_size, **kwargs)
77
+ else:
78
+ # dynamic 'SAME' padding, has runtime/GPU memory overhead
79
+ padding = 0
80
+ dynamic = True
81
+ elif padding == 'valid':
82
+ # 'VALID' padding, same as padding=0
83
+ padding = 0
84
+ else:
85
+ # Default to PyTorch style 'same'-ish symmetric padding
86
+ padding = get_padding(kernel_size, **kwargs)
87
+ return padding, dynamic
janus/lib/python3.10/site-packages/timm/layers/patch_embed.py ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Image to Patch Embedding using Conv2d
2
+
3
+ A convolution based approach to patchifying a 2D image w/ embedding projection.
4
+
5
+ Based on code in:
6
+ * https://github.com/google-research/vision_transformer
7
+ * https://github.com/google-research/big_vision/tree/main/big_vision
8
+
9
+ Hacked together by / Copyright 2020 Ross Wightman
10
+ """
11
+ import logging
12
+ import math
13
+ from typing import Callable, List, Optional, Tuple, Union
14
+
15
+ import torch
16
+ from torch import nn as nn
17
+ import torch.nn.functional as F
18
+
19
+ from .format import Format, nchw_to
20
+ from .helpers import to_2tuple
21
+ from .trace_utils import _assert
22
+
23
+ _logger = logging.getLogger(__name__)
24
+
25
+
26
+ class PatchEmbed(nn.Module):
27
+ """ 2D Image to Patch Embedding
28
+ """
29
+ output_fmt: Format
30
+ dynamic_img_pad: torch.jit.Final[bool]
31
+
32
+ def __init__(
33
+ self,
34
+ img_size: Optional[int] = 224,
35
+ patch_size: int = 16,
36
+ in_chans: int = 3,
37
+ embed_dim: int = 768,
38
+ norm_layer: Optional[Callable] = None,
39
+ flatten: bool = True,
40
+ output_fmt: Optional[str] = None,
41
+ bias: bool = True,
42
+ strict_img_size: bool = True,
43
+ dynamic_img_pad: bool = False,
44
+ ):
45
+ super().__init__()
46
+ self.patch_size = to_2tuple(patch_size)
47
+ self.img_size, self.grid_size, self.num_patches = self._init_img_size(img_size)
48
+
49
+ if output_fmt is not None:
50
+ self.flatten = False
51
+ self.output_fmt = Format(output_fmt)
52
+ else:
53
+ # flatten spatial dim and transpose to channels last, kept for bwd compat
54
+ self.flatten = flatten
55
+ self.output_fmt = Format.NCHW
56
+ self.strict_img_size = strict_img_size
57
+ self.dynamic_img_pad = dynamic_img_pad
58
+
59
+ self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size, bias=bias)
60
+ self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
61
+
62
+ def _init_img_size(self, img_size: Union[int, Tuple[int, int]]):
63
+ assert self.patch_size
64
+ if img_size is None:
65
+ return None, None, None
66
+ img_size = to_2tuple(img_size)
67
+ grid_size = tuple([s // p for s, p in zip(img_size, self.patch_size)])
68
+ num_patches = grid_size[0] * grid_size[1]
69
+ return img_size, grid_size, num_patches
70
+
71
+ def set_input_size(
72
+ self,
73
+ img_size: Optional[Union[int, Tuple[int, int]]] = None,
74
+ patch_size: Optional[Union[int, Tuple[int, int]]] = None,
75
+ ):
76
+ new_patch_size = None
77
+ if patch_size is not None:
78
+ new_patch_size = to_2tuple(patch_size)
79
+ if new_patch_size is not None and new_patch_size != self.patch_size:
80
+ with torch.no_grad():
81
+ new_proj = nn.Conv2d(
82
+ self.proj.in_channels,
83
+ self.proj.out_channels,
84
+ kernel_size=new_patch_size,
85
+ stride=new_patch_size,
86
+ bias=self.proj.bias is not None,
87
+ )
88
+ new_proj.weight.copy_(resample_patch_embed(self.proj.weight, new_patch_size, verbose=True))
89
+ if self.proj.bias is not None:
90
+ new_proj.bias.copy_(self.proj.bias)
91
+ self.proj = new_proj
92
+ self.patch_size = new_patch_size
93
+ img_size = img_size or self.img_size
94
+ if img_size != self.img_size or new_patch_size is not None:
95
+ self.img_size, self.grid_size, self.num_patches = self._init_img_size(img_size)
96
+
97
+ def feat_ratio(self, as_scalar=True) -> Union[Tuple[int, int], int]:
98
+ if as_scalar:
99
+ return max(self.patch_size)
100
+ else:
101
+ return self.patch_size
102
+
103
+ def dynamic_feat_size(self, img_size: Tuple[int, int]) -> Tuple[int, int]:
104
+ """ Get grid (feature) size for given image size taking account of dynamic padding.
105
+ NOTE: must be torchscript compatible so using fixed tuple indexing
106
+ """
107
+ if self.dynamic_img_pad:
108
+ return math.ceil(img_size[0] / self.patch_size[0]), math.ceil(img_size[1] / self.patch_size[1])
109
+ else:
110
+ return img_size[0] // self.patch_size[0], img_size[1] // self.patch_size[1]
111
+
112
+ def forward(self, x):
113
+ B, C, H, W = x.shape
114
+ if self.img_size is not None:
115
+ if self.strict_img_size:
116
+ _assert(H == self.img_size[0], f"Input height ({H}) doesn't match model ({self.img_size[0]}).")
117
+ _assert(W == self.img_size[1], f"Input width ({W}) doesn't match model ({self.img_size[1]}).")
118
+ elif not self.dynamic_img_pad:
119
+ _assert(
120
+ H % self.patch_size[0] == 0,
121
+ f"Input height ({H}) should be divisible by patch size ({self.patch_size[0]})."
122
+ )
123
+ _assert(
124
+ W % self.patch_size[1] == 0,
125
+ f"Input width ({W}) should be divisible by patch size ({self.patch_size[1]})."
126
+ )
127
+ if self.dynamic_img_pad:
128
+ pad_h = (self.patch_size[0] - H % self.patch_size[0]) % self.patch_size[0]
129
+ pad_w = (self.patch_size[1] - W % self.patch_size[1]) % self.patch_size[1]
130
+ x = F.pad(x, (0, pad_w, 0, pad_h))
131
+ x = self.proj(x)
132
+ if self.flatten:
133
+ x = x.flatten(2).transpose(1, 2) # NCHW -> NLC
134
+ elif self.output_fmt != Format.NCHW:
135
+ x = nchw_to(x, self.output_fmt)
136
+ x = self.norm(x)
137
+ return x
138
+
139
+
140
+ class PatchEmbedWithSize(PatchEmbed):
141
+ """ 2D Image to Patch Embedding
142
+ """
143
+ output_fmt: Format
144
+
145
+ def __init__(
146
+ self,
147
+ img_size: Optional[int] = 224,
148
+ patch_size: int = 16,
149
+ in_chans: int = 3,
150
+ embed_dim: int = 768,
151
+ norm_layer: Optional[Callable] = None,
152
+ flatten: bool = True,
153
+ output_fmt: Optional[str] = None,
154
+ bias: bool = True,
155
+ ):
156
+ super().__init__(
157
+ img_size=img_size,
158
+ patch_size=patch_size,
159
+ in_chans=in_chans,
160
+ embed_dim=embed_dim,
161
+ norm_layer=norm_layer,
162
+ flatten=flatten,
163
+ output_fmt=output_fmt,
164
+ bias=bias,
165
+ )
166
+
167
+ def forward(self, x) -> Tuple[torch.Tensor, List[int]]:
168
+ B, C, H, W = x.shape
169
+ if self.img_size is not None:
170
+ _assert(H % self.patch_size[0] == 0, f"Input image height ({H}) must be divisible by patch size ({self.patch_size[0]}).")
171
+ _assert(W % self.patch_size[1] == 0, f"Input image width ({W}) must be divisible by patch size ({self.patch_size[1]}).")
172
+
173
+ x = self.proj(x)
174
+ feat_size = x.shape[-2:]
175
+ if self.flatten:
176
+ x = x.flatten(2).transpose(1, 2) # NCHW -> NLC
177
+ elif self.output_fmt != Format.NCHW:
178
+ x = nchw_to(x, self.output_fmt)
179
+ x = self.norm(x)
180
+ return x, feat_size
181
+
182
+
183
+ def resample_patch_embed(
184
+ patch_embed,
185
+ new_size: List[int],
186
+ interpolation: str = 'bicubic',
187
+ antialias: bool = True,
188
+ verbose: bool = False,
189
+ ):
190
+ """Resample the weights of the patch embedding kernel to target resolution.
191
+ We resample the patch embedding kernel by approximately inverting the effect
192
+ of patch resizing.
193
+
194
+ Code based on:
195
+ https://github.com/google-research/big_vision/blob/b00544b81f8694488d5f36295aeb7972f3755ffe/big_vision/models/proj/flexi/vit.py
196
+
197
+ With this resizing, we can for example load a B/8 filter into a B/16 model
198
+ and, on 2x larger input image, the result will match.
199
+
200
+ Args:
201
+ patch_embed: original parameter to be resized.
202
+ new_size (tuple(int, int): target shape (height, width)-only.
203
+ interpolation (str): interpolation for resize
204
+ antialias (bool): use anti-aliasing filter in resize
205
+ verbose (bool): log operation
206
+ Returns:
207
+ Resized patch embedding kernel.
208
+ """
209
+ import numpy as np
210
+ try:
211
+ from torch import vmap
212
+ except ImportError:
213
+ from functorch import vmap
214
+
215
+ assert len(patch_embed.shape) == 4, "Four dimensions expected"
216
+ assert len(new_size) == 2, "New shape should only be hw"
217
+ old_size = patch_embed.shape[-2:]
218
+ if tuple(old_size) == tuple(new_size):
219
+ return patch_embed
220
+
221
+ if verbose:
222
+ _logger.info(f"Resize patch embedding {patch_embed.shape} to {new_size}, w/ {interpolation} interpolation.")
223
+
224
+ def resize(x_np, _new_size):
225
+ x_tf = torch.Tensor(x_np)[None, None, ...]
226
+ x_upsampled = F.interpolate(
227
+ x_tf, size=_new_size, mode=interpolation, antialias=antialias)[0, 0, ...].numpy()
228
+ return x_upsampled
229
+
230
+ def get_resize_mat(_old_size, _new_size):
231
+ mat = []
232
+ for i in range(np.prod(_old_size)):
233
+ basis_vec = np.zeros(_old_size)
234
+ basis_vec[np.unravel_index(i, _old_size)] = 1.
235
+ mat.append(resize(basis_vec, _new_size).reshape(-1))
236
+ return np.stack(mat).T
237
+
238
+ resize_mat = get_resize_mat(old_size, new_size)
239
+ resize_mat_pinv = torch.tensor(np.linalg.pinv(resize_mat.T), device=patch_embed.device)
240
+
241
+ def resample_kernel(kernel):
242
+ resampled_kernel = resize_mat_pinv @ kernel.reshape(-1)
243
+ return resampled_kernel.reshape(new_size)
244
+
245
+ v_resample_kernel = vmap(vmap(resample_kernel, 0, 0), 1, 1)
246
+ orig_dtype = patch_embed.dtype
247
+ patch_embed = patch_embed.float()
248
+ patch_embed = v_resample_kernel(patch_embed)
249
+ patch_embed = patch_embed.to(orig_dtype)
250
+ return patch_embed
251
+
252
+
253
+ # def divs(n, m=None):
254
+ # m = m or n // 2
255
+ # if m == 1:
256
+ # return [1]
257
+ # if n % m == 0:
258
+ # return [m] + divs(n, m - 1)
259
+ # return divs(n, m - 1)
260
+ #
261
+ #
262
+ # class FlexiPatchEmbed(nn.Module):
263
+ # """ 2D Image to Patch Embedding w/ Flexible Patch sizes (FlexiViT)
264
+ # FIXME WIP
265
+ # """
266
+ # def __init__(
267
+ # self,
268
+ # img_size=240,
269
+ # patch_size=16,
270
+ # in_chans=3,
271
+ # embed_dim=768,
272
+ # base_img_size=240,
273
+ # base_patch_size=32,
274
+ # norm_layer=None,
275
+ # flatten=True,
276
+ # bias=True,
277
+ # ):
278
+ # super().__init__()
279
+ # self.img_size = to_2tuple(img_size)
280
+ # self.patch_size = to_2tuple(patch_size)
281
+ # self.num_patches = 0
282
+ #
283
+ # # full range for 240 = (5, 6, 8, 10, 12, 14, 15, 16, 20, 24, 30, 40, 48)
284
+ # self.seqhw = (6, 8, 10, 12, 14, 15, 16, 20, 24, 30)
285
+ #
286
+ # self.base_img_size = to_2tuple(base_img_size)
287
+ # self.base_patch_size = to_2tuple(base_patch_size)
288
+ # self.base_grid_size = tuple([i // p for i, p in zip(self.base_img_size, self.base_patch_size)])
289
+ # self.base_num_patches = self.base_grid_size[0] * self.base_grid_size[1]
290
+ #
291
+ # self.flatten = flatten
292
+ # self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=bias)
293
+ # self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
294
+ #
295
+ # def forward(self, x):
296
+ # B, C, H, W = x.shape
297
+ #
298
+ # if self.patch_size == self.base_patch_size:
299
+ # weight = self.proj.weight
300
+ # else:
301
+ # weight = resample_patch_embed(self.proj.weight, self.patch_size)
302
+ # patch_size = self.patch_size
303
+ # x = F.conv2d(x, weight, bias=self.proj.bias, stride=patch_size)
304
+ # if self.flatten:
305
+ # x = x.flatten(2).transpose(1, 2) # BCHW -> BNC
306
+ # x = self.norm(x)
307
+ # return x
janus/lib/python3.10/site-packages/timm/layers/pool2d_same.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ AvgPool2d w/ Same Padding
2
+
3
+ Hacked together by / Copyright 2020 Ross Wightman
4
+ """
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+ from typing import List, Tuple, Optional
9
+
10
+ from .helpers import to_2tuple
11
+ from .padding import pad_same, get_padding_value
12
+
13
+
14
+ def avg_pool2d_same(x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0),
15
+ ceil_mode: bool = False, count_include_pad: bool = True):
16
+ # FIXME how to deal with count_include_pad vs not for external padding?
17
+ x = pad_same(x, kernel_size, stride)
18
+ return F.avg_pool2d(x, kernel_size, stride, (0, 0), ceil_mode, count_include_pad)
19
+
20
+
21
+ class AvgPool2dSame(nn.AvgPool2d):
22
+ """ Tensorflow like 'SAME' wrapper for 2D average pooling
23
+ """
24
+ def __init__(self, kernel_size: int, stride=None, padding=0, ceil_mode=False, count_include_pad=True):
25
+ kernel_size = to_2tuple(kernel_size)
26
+ stride = to_2tuple(stride)
27
+ super(AvgPool2dSame, self).__init__(kernel_size, stride, (0, 0), ceil_mode, count_include_pad)
28
+
29
+ def forward(self, x):
30
+ x = pad_same(x, self.kernel_size, self.stride)
31
+ return F.avg_pool2d(
32
+ x, self.kernel_size, self.stride, self.padding, self.ceil_mode, self.count_include_pad)
33
+
34
+
35
+ def max_pool2d_same(
36
+ x, kernel_size: List[int], stride: List[int], padding: List[int] = (0, 0),
37
+ dilation: List[int] = (1, 1), ceil_mode: bool = False):
38
+ x = pad_same(x, kernel_size, stride, value=-float('inf'))
39
+ return F.max_pool2d(x, kernel_size, stride, (0, 0), dilation, ceil_mode)
40
+
41
+
42
+ class MaxPool2dSame(nn.MaxPool2d):
43
+ """ Tensorflow like 'SAME' wrapper for 2D max pooling
44
+ """
45
+ def __init__(self, kernel_size: int, stride=None, padding=0, dilation=1, ceil_mode=False):
46
+ kernel_size = to_2tuple(kernel_size)
47
+ stride = to_2tuple(stride)
48
+ dilation = to_2tuple(dilation)
49
+ super(MaxPool2dSame, self).__init__(kernel_size, stride, (0, 0), dilation, ceil_mode)
50
+
51
+ def forward(self, x):
52
+ x = pad_same(x, self.kernel_size, self.stride, value=-float('inf'))
53
+ return F.max_pool2d(x, self.kernel_size, self.stride, (0, 0), self.dilation, self.ceil_mode)
54
+
55
+
56
+ def create_pool2d(pool_type, kernel_size, stride=None, **kwargs):
57
+ stride = stride or kernel_size
58
+ padding = kwargs.pop('padding', '')
59
+ padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, **kwargs)
60
+ if is_dynamic:
61
+ if pool_type == 'avg':
62
+ return AvgPool2dSame(kernel_size, stride=stride, **kwargs)
63
+ elif pool_type == 'max':
64
+ return MaxPool2dSame(kernel_size, stride=stride, **kwargs)
65
+ else:
66
+ assert False, f'Unsupported pool type {pool_type}'
67
+ else:
68
+ if pool_type == 'avg':
69
+ return nn.AvgPool2d(kernel_size, stride=stride, padding=padding, **kwargs)
70
+ elif pool_type == 'max':
71
+ return nn.MaxPool2d(kernel_size, stride=stride, padding=padding, **kwargs)
72
+ else:
73
+ assert False, f'Unsupported pool type {pool_type}'
janus/lib/python3.10/site-packages/timm/layers/pos_embed_sincos.py ADDED
@@ -0,0 +1,443 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Sin-cos, fourier, rotary position embedding modules and functions
2
+
3
+ Hacked together by / Copyright 2022 Ross Wightman
4
+ """
5
+ import math
6
+ from typing import List, Tuple, Optional, Union
7
+
8
+ import torch
9
+ from torch import nn as nn
10
+
11
+ from .grid import ndgrid
12
+ from .trace_utils import _assert
13
+
14
+
15
+ def pixel_freq_bands(
16
+ num_bands: int,
17
+ max_freq: float = 224.,
18
+ linear_bands: bool = True,
19
+ device: Optional[torch.device] = None,
20
+ ):
21
+ if linear_bands:
22
+ bands = torch.linspace(1.0, max_freq / 2, num_bands, dtype=torch.float32, device=device)
23
+ else:
24
+ bands = 2 ** torch.linspace(0, math.log(max_freq, 2) - 1, num_bands, dtype=torch.float32, device=device)
25
+ return bands * torch.pi
26
+
27
+
28
+ def freq_bands(
29
+ num_bands: int,
30
+ temperature: float = 10000.,
31
+ step: int = 2,
32
+ device: Optional[torch.device] = None,
33
+ ) -> torch.Tensor:
34
+ exp = torch.arange(0, num_bands, step, dtype=torch.int64, device=device).to(torch.float32) / num_bands
35
+ bands = 1. / (temperature ** exp)
36
+ return bands
37
+
38
+
39
+ def build_sincos2d_pos_embed(
40
+ feat_shape: List[int],
41
+ dim: int = 64,
42
+ temperature: float = 10000.,
43
+ reverse_coord: bool = False,
44
+ interleave_sin_cos: bool = False,
45
+ dtype: torch.dtype = torch.float32,
46
+ device: Optional[torch.device] = None
47
+ ) -> torch.Tensor:
48
+ """
49
+
50
+ Args:
51
+ feat_shape:
52
+ dim:
53
+ temperature:
54
+ reverse_coord: stack grid order W, H instead of H, W
55
+ interleave_sin_cos: sin, cos, sin, cos stack instead of sin, sin, cos, cos
56
+ dtype:
57
+ device:
58
+
59
+ Returns:
60
+
61
+ """
62
+ assert dim % 4 == 0, 'Embed dimension must be divisible by 4 for sin-cos 2D position embedding'
63
+ pos_dim = dim // 4
64
+ bands = freq_bands(pos_dim, temperature=temperature, step=1, device=device)
65
+
66
+ if reverse_coord:
67
+ feat_shape = feat_shape[::-1] # stack W, H instead of H, W
68
+ grid = torch.stack(ndgrid([
69
+ torch.arange(s, device=device, dtype=torch.int64).to(torch.float32)
70
+ for s in feat_shape
71
+ ])).flatten(1).transpose(0, 1)
72
+ pos2 = grid.unsqueeze(-1) * bands.unsqueeze(0)
73
+ # FIXME add support for unflattened spatial dim?
74
+
75
+ stack_dim = 2 if interleave_sin_cos else 1 # stack sin, cos, sin, cos instead of sin sin cos cos
76
+ pos_emb = torch.stack([torch.sin(pos2), torch.cos(pos2)], dim=stack_dim).flatten(1)
77
+ return pos_emb.to(dtype=dtype)
78
+
79
+
80
+ def build_fourier_pos_embed(
81
+ feat_shape: List[int],
82
+ bands: Optional[torch.Tensor] = None,
83
+ num_bands: int = 64,
84
+ max_res: int = 224,
85
+ temperature: float = 10000.,
86
+ linear_bands: bool = False,
87
+ include_grid: bool = False,
88
+ in_pixels: bool = True,
89
+ ref_feat_shape: Optional[List[int]] = None,
90
+ dtype: torch.dtype = torch.float32,
91
+ device: Optional[torch.device] = None,
92
+ ) -> List[torch.Tensor]:
93
+ """
94
+
95
+ Args:
96
+ feat_shape: Feature shape for embedding.
97
+ bands: Pre-calculated frequency bands.
98
+ num_bands: Number of frequency bands (determines output dim).
99
+ max_res: Maximum resolution for pixel based freq.
100
+ temperature: Temperature for non-pixel freq.
101
+ linear_bands: Linear band spacing for pixel based freq.
102
+ include_grid: Include the spatial grid in output.
103
+ in_pixels: Output in pixel freq.
104
+ ref_feat_shape: Reference feature shape for resize / fine-tune.
105
+ dtype: Output dtype.
106
+ device: Output device.
107
+
108
+ Returns:
109
+
110
+ """
111
+ if bands is None:
112
+ if in_pixels:
113
+ bands = pixel_freq_bands(
114
+ num_bands,
115
+ float(max_res),
116
+ linear_bands=linear_bands,
117
+ device=device,
118
+ )
119
+ else:
120
+ bands = freq_bands(
121
+ num_bands,
122
+ temperature=temperature,
123
+ step=1,
124
+ device=device,
125
+ )
126
+ else:
127
+ if device is None:
128
+ device = bands.device
129
+ if dtype is None:
130
+ dtype = bands.dtype
131
+
132
+ if in_pixels:
133
+ t = [torch.linspace(-1., 1., steps=s, device=device, dtype=torch.float32) for s in feat_shape]
134
+ else:
135
+ t = [torch.arange(s, device=device, dtype=torch.int64).to(torch.float32) for s in feat_shape]
136
+
137
+ if ref_feat_shape is not None:
138
+ # eva's scheme for resizing rope embeddings (ref shape = pretrain)
139
+ t = [x / f * r for x, f, r in zip(t, feat_shape, ref_feat_shape)]
140
+
141
+ grid = torch.stack(ndgrid(t), dim=-1)
142
+ grid = grid.unsqueeze(-1)
143
+ pos = grid * bands
144
+
145
+ pos_sin, pos_cos = pos.sin().to(dtype=dtype), pos.cos().to(dtype)
146
+ out = [grid, pos_sin, pos_cos] if include_grid else [pos_sin, pos_cos]
147
+ return out
148
+
149
+
150
+ class FourierEmbed(nn.Module):
151
+
152
+ def __init__(
153
+ self,
154
+ max_res: int = 224,
155
+ num_bands: int = 64,
156
+ concat_grid=True,
157
+ keep_spatial=False,
158
+ ):
159
+ super().__init__()
160
+ self.max_res = max_res
161
+ self.num_bands = num_bands
162
+ self.concat_grid = concat_grid
163
+ self.keep_spatial = keep_spatial
164
+ self.register_buffer(
165
+ 'bands',
166
+ pixel_freq_bands(max_res, num_bands),
167
+ persistent=False,
168
+ )
169
+
170
+ def forward(self, x):
171
+ B, C = x.shape[:2]
172
+ feat_shape = x.shape[2:]
173
+ emb = build_fourier_pos_embed(
174
+ feat_shape,
175
+ self.bands,
176
+ include_grid=self.concat_grid,
177
+ dtype=x.dtype,
178
+ device=x.device,
179
+ )
180
+ emb = torch.cat(emb, dim=-1)
181
+ emb = emb.transpose(-1, -2).flatten(len(feat_shape))
182
+ batch_expand = (B,) + (-1,) * (x.ndim - 1)
183
+
184
+ # FIXME support nD
185
+ if self.keep_spatial:
186
+ x = torch.cat([x, emb.unsqueeze(0).expand(batch_expand).permute(0, 3, 1, 2)], dim=1)
187
+ else:
188
+ x = torch.cat([x.permute(0, 2, 3, 1), emb.unsqueeze(0).expand(batch_expand)], dim=-1)
189
+ x = x.reshape(B, feat_shape.numel(), -1)
190
+
191
+ return x
192
+
193
+
194
+ def rot(x):
195
+ return torch.stack([-x[..., 1::2], x[..., ::2]], -1).reshape(x.shape)
196
+
197
+
198
+ def apply_rot_embed(x: torch.Tensor, sin_emb, cos_emb):
199
+ if sin_emb.ndim == 3:
200
+ return x * cos_emb.unsqueeze(1).expand_as(x) + rot(x) * sin_emb.unsqueeze(1).expand_as(x)
201
+ return x * cos_emb + rot(x) * sin_emb
202
+
203
+
204
+ def apply_rot_embed_list(x: List[torch.Tensor], sin_emb, cos_emb):
205
+ if isinstance(x, torch.Tensor):
206
+ x = [x]
207
+ return [t * cos_emb + rot(t) * sin_emb for t in x]
208
+
209
+
210
+ def apply_rot_embed_cat(x: torch.Tensor, emb):
211
+ sin_emb, cos_emb = emb.tensor_split(2, -1)
212
+ if sin_emb.ndim == 3:
213
+ return x * cos_emb.unsqueeze(1).expand_as(x) + rot(x) * sin_emb.unsqueeze(1).expand_as(x)
214
+ return x * cos_emb + rot(x) * sin_emb
215
+
216
+
217
+ def apply_keep_indices_nlc(x, pos_embed, keep_indices):
218
+ pos_embed = pos_embed.unsqueeze(0).expand(x.shape[0], -1, -1)
219
+ pos_embed = pos_embed.gather(1, keep_indices.unsqueeze(-1).expand(-1, -1, pos_embed.shape[-1]))
220
+ return pos_embed
221
+
222
+
223
+ def build_rotary_pos_embed(
224
+ feat_shape: List[int],
225
+ bands: Optional[torch.Tensor] = None,
226
+ dim: int = 64,
227
+ max_res: int = 224,
228
+ temperature: float = 10000.,
229
+ linear_bands: bool = False,
230
+ in_pixels: bool = True,
231
+ ref_feat_shape: Optional[List[int]] = None,
232
+ dtype: torch.dtype = torch.float32,
233
+ device: Optional[torch.device] = None,
234
+ ):
235
+ """
236
+
237
+ Args:
238
+ feat_shape: Spatial shape of the target tensor for embedding.
239
+ bands: Optional pre-generated frequency bands
240
+ dim: Output dimension of embedding tensor.
241
+ max_res: Maximum resolution for pixel mode.
242
+ temperature: Temperature (inv freq) for non-pixel mode
243
+ linear_bands: Linearly (instead of log) spaced bands for pixel mode
244
+ in_pixels: Pixel vs language (inv freq) mode.
245
+ dtype: Output dtype.
246
+ device: Output device.
247
+
248
+ Returns:
249
+
250
+ """
251
+ sin_emb, cos_emb = build_fourier_pos_embed(
252
+ feat_shape,
253
+ bands=bands,
254
+ num_bands=dim // 4,
255
+ max_res=max_res,
256
+ temperature=temperature,
257
+ linear_bands=linear_bands,
258
+ in_pixels=in_pixels,
259
+ ref_feat_shape=ref_feat_shape,
260
+ device=device,
261
+ dtype=dtype,
262
+ )
263
+ num_spatial_dim = 1
264
+ # this would be much nicer as a .numel() call to torch.Size(), but torchscript sucks
265
+ for x in feat_shape:
266
+ num_spatial_dim *= x
267
+ sin_emb = sin_emb.reshape(num_spatial_dim, -1).repeat_interleave(2, -1)
268
+ cos_emb = cos_emb.reshape(num_spatial_dim, -1).repeat_interleave(2, -1)
269
+ return sin_emb, cos_emb
270
+
271
+
272
+ class RotaryEmbedding(nn.Module):
273
+ """ Rotary position embedding
274
+
275
+ NOTE: This is my initial attempt at impl rotary embedding for spatial use, it has not
276
+ been well tested, and will likely change. It will be moved to its own file.
277
+
278
+ The following impl/resources were referenced for this impl:
279
+ * https://github.com/lucidrains/vit-pytorch/blob/6f3a5fcf0bca1c5ec33a35ef48d97213709df4ba/vit_pytorch/rvt.py
280
+ * https://blog.eleuther.ai/rotary-embeddings/
281
+ """
282
+
283
+ def __init__(
284
+ self,
285
+ dim,
286
+ max_res=224,
287
+ temperature=10000,
288
+ in_pixels=True,
289
+ linear_bands: bool = False,
290
+ feat_shape: Optional[List[int]] = None,
291
+ ref_feat_shape: Optional[List[int]] = None,
292
+ ):
293
+ super().__init__()
294
+ self.dim = dim
295
+ self.max_res = max_res
296
+ self.temperature = temperature
297
+ self.in_pixels = in_pixels
298
+ self.feat_shape = feat_shape
299
+ self.ref_feat_shape = ref_feat_shape
300
+
301
+ if feat_shape is None:
302
+ # only cache bands
303
+ if in_pixels:
304
+ bands = pixel_freq_bands(
305
+ dim // 4,
306
+ float(max_res),
307
+ linear_bands=linear_bands,
308
+ )
309
+ else:
310
+ bands = freq_bands(
311
+ dim // 4,
312
+ temperature=temperature,
313
+ step=1,
314
+ )
315
+ self.register_buffer(
316
+ 'bands',
317
+ bands,
318
+ persistent=False,
319
+ )
320
+ self.pos_embed_sin = None
321
+ self.pos_embed_cos = None
322
+ else:
323
+ # cache full sin/cos embeddings if shape provided up front
324
+ emb_sin, emb_cos = build_rotary_pos_embed(
325
+ feat_shape=feat_shape,
326
+ dim=dim,
327
+ max_res=max_res,
328
+ linear_bands=linear_bands,
329
+ in_pixels=in_pixels,
330
+ ref_feat_shape=self.ref_feat_shape,
331
+ )
332
+ self.bands = None
333
+ self.register_buffer(
334
+ 'pos_embed_sin',
335
+ emb_sin,
336
+ persistent=False,
337
+ )
338
+ self.register_buffer(
339
+ 'pos_embed_cos',
340
+ emb_cos,
341
+ persistent=False,
342
+ )
343
+
344
+ def get_embed(self, shape: Optional[List[int]] = None):
345
+ if self.bands is not None:
346
+ # rebuild embeddings every call, use if target shape changes
347
+ assert shape is not None
348
+ return build_rotary_pos_embed(
349
+ shape,
350
+ self.bands,
351
+ in_pixels=self.in_pixels,
352
+ )
353
+ else:
354
+ return self.pos_embed_sin, self.pos_embed_cos
355
+
356
+ def forward(self, x):
357
+ # assuming channel-first tensor where spatial dim are >= 2
358
+ sin_emb, cos_emb = self.get_embed(x.shape[2:])
359
+ return apply_rot_embed(x, sin_emb, cos_emb)
360
+
361
+
362
+ class RotaryEmbeddingCat(nn.Module):
363
+ """ Rotary position embedding w/ concatenatd sin & cos
364
+
365
+ The following impl/resources were referenced for this impl:
366
+ * https://github.com/lucidrains/vit-pytorch/blob/6f3a5fcf0bca1c5ec33a35ef48d97213709df4ba/vit_pytorch/rvt.py
367
+ * https://blog.eleuther.ai/rotary-embeddings/
368
+ """
369
+
370
+ def __init__(
371
+ self,
372
+ dim,
373
+ max_res=224,
374
+ temperature=10000,
375
+ in_pixels=True,
376
+ linear_bands: bool = False,
377
+ feat_shape: Optional[List[int]] = None,
378
+ ref_feat_shape: Optional[List[int]] = None,
379
+ ):
380
+ super().__init__()
381
+ self.dim = dim
382
+ self.max_res = max_res
383
+ self.temperature = temperature
384
+ self.in_pixels = in_pixels
385
+ self.feat_shape = feat_shape
386
+ self.ref_feat_shape = ref_feat_shape
387
+
388
+ if feat_shape is None:
389
+ # only cache bands
390
+ if in_pixels:
391
+ bands = pixel_freq_bands(
392
+ dim // 4,
393
+ float(max_res),
394
+ linear_bands=linear_bands,
395
+ )
396
+ else:
397
+ bands = freq_bands(
398
+ dim // 4,
399
+ temperature=temperature,
400
+ step=1,
401
+ )
402
+ self.register_buffer(
403
+ 'bands',
404
+ bands,
405
+ persistent=False,
406
+ )
407
+ self.pos_embed = None
408
+ else:
409
+ # cache full sin/cos embeddings if shape provided up front
410
+ embeds = build_rotary_pos_embed(
411
+ feat_shape=feat_shape,
412
+ dim=dim,
413
+ max_res=max_res,
414
+ linear_bands=linear_bands,
415
+ in_pixels=in_pixels,
416
+ ref_feat_shape=self.ref_feat_shape,
417
+ )
418
+ self.bands = None
419
+ self.register_buffer(
420
+ 'pos_embed',
421
+ torch.cat(embeds, -1),
422
+ persistent=False,
423
+ )
424
+
425
+ def get_embed(self, shape: Optional[List[int]] = None):
426
+ if self.bands is not None and shape is not None:
427
+ # rebuild embeddings every call, use if target shape changes
428
+ embeds = build_rotary_pos_embed(
429
+ shape,
430
+ self.bands,
431
+ in_pixels=self.in_pixels,
432
+ ref_feat_shape=self.ref_feat_shape,
433
+ )
434
+ return torch.cat(embeds, -1)
435
+ elif self.pos_embed is not None:
436
+ return self.pos_embed
437
+ else:
438
+ assert False, "get_embed() requires pre-computed pos_embed or valid shape w/ pre-computed bands"
439
+
440
+ def forward(self, x):
441
+ # assuming channel-first tensor where spatial dim are >= 2
442
+ pos_embed = self.get_embed(x.shape[2:])
443
+ return apply_rot_embed_cat(x, pos_embed)
janus/lib/python3.10/site-packages/timm/layers/split_batchnorm.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Split BatchNorm
2
+
3
+ A PyTorch BatchNorm layer that splits input batch into N equal parts and passes each through
4
+ a separate BN layer. The first split is passed through the parent BN layers with weight/bias
5
+ keys the same as the original BN. All other splits pass through BN sub-layers under the '.aux_bn'
6
+ namespace.
7
+
8
+ This allows easily removing the auxiliary BN layers after training to efficiently
9
+ achieve the 'Auxiliary BatchNorm' as described in the AdvProp Paper, section 4.2,
10
+ 'Disentangled Learning via An Auxiliary BN'
11
+
12
+ Hacked together by / Copyright 2020 Ross Wightman
13
+ """
14
+ import torch
15
+ import torch.nn as nn
16
+
17
+
18
+ class SplitBatchNorm2d(torch.nn.BatchNorm2d):
19
+
20
+ def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True,
21
+ track_running_stats=True, num_splits=2):
22
+ super().__init__(num_features, eps, momentum, affine, track_running_stats)
23
+ assert num_splits > 1, 'Should have at least one aux BN layer (num_splits at least 2)'
24
+ self.num_splits = num_splits
25
+ self.aux_bn = nn.ModuleList([
26
+ nn.BatchNorm2d(num_features, eps, momentum, affine, track_running_stats) for _ in range(num_splits - 1)])
27
+
28
+ def forward(self, input: torch.Tensor):
29
+ if self.training: # aux BN only relevant while training
30
+ split_size = input.shape[0] // self.num_splits
31
+ assert input.shape[0] == split_size * self.num_splits, "batch size must be evenly divisible by num_splits"
32
+ split_input = input.split(split_size)
33
+ x = [super().forward(split_input[0])]
34
+ for i, a in enumerate(self.aux_bn):
35
+ x.append(a(split_input[i + 1]))
36
+ return torch.cat(x, dim=0)
37
+ else:
38
+ return super().forward(input)
39
+
40
+
41
+ def convert_splitbn_model(module, num_splits=2):
42
+ """
43
+ Recursively traverse module and its children to replace all instances of
44
+ ``torch.nn.modules.batchnorm._BatchNorm`` with `SplitBatchnorm2d`.
45
+ Args:
46
+ module (torch.nn.Module): input module
47
+ num_splits: number of separate batchnorm layers to split input across
48
+ Example::
49
+ >>> # model is an instance of torch.nn.Module
50
+ >>> model = timm.models.convert_splitbn_model(model, num_splits=2)
51
+ """
52
+ mod = module
53
+ if isinstance(module, torch.nn.modules.instancenorm._InstanceNorm):
54
+ return module
55
+ if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
56
+ mod = SplitBatchNorm2d(
57
+ module.num_features, module.eps, module.momentum, module.affine,
58
+ module.track_running_stats, num_splits=num_splits)
59
+ mod.running_mean = module.running_mean
60
+ mod.running_var = module.running_var
61
+ mod.num_batches_tracked = module.num_batches_tracked
62
+ if module.affine:
63
+ mod.weight.data = module.weight.data.clone().detach()
64
+ mod.bias.data = module.bias.data.clone().detach()
65
+ for aux in mod.aux_bn:
66
+ aux.running_mean = module.running_mean.clone()
67
+ aux.running_var = module.running_var.clone()
68
+ aux.num_batches_tracked = module.num_batches_tracked.clone()
69
+ if module.affine:
70
+ aux.weight.data = module.weight.data.clone().detach()
71
+ aux.bias.data = module.bias.data.clone().detach()
72
+ for name, child in module.named_children():
73
+ mod.add_module(name, convert_splitbn_model(child, num_splits=num_splits))
74
+ del module
75
+ return mod
janus/lib/python3.10/site-packages/timm/layers/squeeze_excite.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Squeeze-and-Excitation Channel Attention
2
+
3
+ An SE implementation originally based on PyTorch SE-Net impl.
4
+ Has since evolved with additional functionality / configuration.
5
+
6
+ Paper: `Squeeze-and-Excitation Networks` - https://arxiv.org/abs/1709.01507
7
+
8
+ Also included is Effective Squeeze-Excitation (ESE).
9
+ Paper: `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667
10
+
11
+ Hacked together by / Copyright 2021 Ross Wightman
12
+ """
13
+ from torch import nn as nn
14
+
15
+ from .create_act import create_act_layer
16
+ from .helpers import make_divisible
17
+
18
+
19
+ class SEModule(nn.Module):
20
+ """ SE Module as defined in original SE-Nets with a few additions
21
+ Additions include:
22
+ * divisor can be specified to keep channels % div == 0 (default: 8)
23
+ * reduction channels can be specified directly by arg (if rd_channels is set)
24
+ * reduction channels can be specified by float rd_ratio (default: 1/16)
25
+ * global max pooling can be added to the squeeze aggregation
26
+ * customizable activation, normalization, and gate layer
27
+ """
28
+ def __init__(
29
+ self, channels, rd_ratio=1. / 16, rd_channels=None, rd_divisor=8, add_maxpool=False,
30
+ bias=True, act_layer=nn.ReLU, norm_layer=None, gate_layer='sigmoid'):
31
+ super(SEModule, self).__init__()
32
+ self.add_maxpool = add_maxpool
33
+ if not rd_channels:
34
+ rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.)
35
+ self.fc1 = nn.Conv2d(channels, rd_channels, kernel_size=1, bias=bias)
36
+ self.bn = norm_layer(rd_channels) if norm_layer else nn.Identity()
37
+ self.act = create_act_layer(act_layer, inplace=True)
38
+ self.fc2 = nn.Conv2d(rd_channels, channels, kernel_size=1, bias=bias)
39
+ self.gate = create_act_layer(gate_layer)
40
+
41
+ def forward(self, x):
42
+ x_se = x.mean((2, 3), keepdim=True)
43
+ if self.add_maxpool:
44
+ # experimental codepath, may remove or change
45
+ x_se = 0.5 * x_se + 0.5 * x.amax((2, 3), keepdim=True)
46
+ x_se = self.fc1(x_se)
47
+ x_se = self.act(self.bn(x_se))
48
+ x_se = self.fc2(x_se)
49
+ return x * self.gate(x_se)
50
+
51
+
52
+ SqueezeExcite = SEModule # alias
53
+
54
+
55
+ class EffectiveSEModule(nn.Module):
56
+ """ 'Effective Squeeze-Excitation
57
+ From `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667
58
+ """
59
+ def __init__(self, channels, add_maxpool=False, gate_layer='hard_sigmoid', **_):
60
+ super(EffectiveSEModule, self).__init__()
61
+ self.add_maxpool = add_maxpool
62
+ self.fc = nn.Conv2d(channels, channels, kernel_size=1, padding=0)
63
+ self.gate = create_act_layer(gate_layer)
64
+
65
+ def forward(self, x):
66
+ x_se = x.mean((2, 3), keepdim=True)
67
+ if self.add_maxpool:
68
+ # experimental codepath, may remove or change
69
+ x_se = 0.5 * x_se + 0.5 * x.amax((2, 3), keepdim=True)
70
+ x_se = self.fc(x_se)
71
+ return x * self.gate(x_se)
72
+
73
+
74
+ EffectiveSqueezeExcite = EffectiveSEModule # alias
75
+
76
+
77
+ class SqueezeExciteCl(nn.Module):
78
+ """ SE Module as defined in original SE-Nets with a few additions
79
+ Additions include:
80
+ * divisor can be specified to keep channels % div == 0 (default: 8)
81
+ * reduction channels can be specified directly by arg (if rd_channels is set)
82
+ * reduction channels can be specified by float rd_ratio (default: 1/16)
83
+ * global max pooling can be added to the squeeze aggregation
84
+ * customizable activation, normalization, and gate layer
85
+ """
86
+ def __init__(
87
+ self, channels, rd_ratio=1. / 16, rd_channels=None, rd_divisor=8,
88
+ bias=True, act_layer=nn.ReLU, gate_layer='sigmoid'):
89
+ super().__init__()
90
+ if not rd_channels:
91
+ rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.)
92
+ self.fc1 = nn.Linear(channels, rd_channels, bias=bias)
93
+ self.act = create_act_layer(act_layer, inplace=True)
94
+ self.fc2 = nn.Linear(rd_channels, channels, bias=bias)
95
+ self.gate = create_act_layer(gate_layer)
96
+
97
+ def forward(self, x):
98
+ x_se = x.mean((1, 2), keepdims=True) # FIXME avg dim [1:n-1], don't assume 2D NHWC
99
+ x_se = self.fc1(x_se)
100
+ x_se = self.act(x_se)
101
+ x_se = self.fc2(x_se)
102
+ return x * self.gate(x_se)
janus/lib/python3.10/site-packages/timm/layers/trace_utils.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ try:
2
+ from torch import _assert
3
+ except ImportError:
4
+ def _assert(condition: bool, message: str):
5
+ assert condition, message
6
+
7
+
8
+ def _float_to_int(x: float) -> int:
9
+ """
10
+ Symbolic tracing helper to substitute for inbuilt `int`.
11
+ Hint: Inbuilt `int` can't accept an argument of type `Proxy`
12
+ """
13
+ return int(x)
janus/lib/python3.10/site-packages/timm/optim/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.62 kB). View file
 
janus/lib/python3.10/site-packages/timm/optim/__pycache__/_param_groups.cpython-310.pyc ADDED
Binary file (3.91 kB). View file
 
janus/lib/python3.10/site-packages/timm/optim/__pycache__/adopt.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
janus/lib/python3.10/site-packages/timm/optim/__pycache__/lion.cpython-310.pyc ADDED
Binary file (5.85 kB). View file
 
janus/lib/python3.10/site-packages/timm/optim/__pycache__/lookahead.cpython-310.pyc ADDED
Binary file (2.65 kB). View file
 
janus/lib/python3.10/site-packages/timm/optim/__pycache__/nadam.cpython-310.pyc ADDED
Binary file (3.36 kB). View file
 
janus/lib/python3.10/site-packages/timm/optim/__pycache__/optim_factory.cpython-310.pyc ADDED
Binary file (582 Bytes). View file
 
janus/lib/python3.10/site-packages/timm/optim/__pycache__/radam.cpython-310.pyc ADDED
Binary file (2.97 kB). View file
 
janus/lib/python3.10/site-packages/timm/optim/__pycache__/sgdp.cpython-310.pyc ADDED
Binary file (2.03 kB). View file
 
janus/lib/python3.10/site-packages/timm/optim/__pycache__/sgdw.cpython-310.pyc ADDED
Binary file (6.35 kB). View file