introvoyz041 commited on
Commit
e11c92f
·
verified ·
1 Parent(s): 45ed5f3

Migrated from GitHub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. data/.pylintrc +55 -0
  2. data/LICENSE +7 -0
  3. data/MANIFEST.in +1 -0
  4. data/MARBLE/__init__.py +6 -0
  5. data/MARBLE/dataloader.py +97 -0
  6. data/MARBLE/default_params.yaml +25 -0
  7. data/MARBLE/dynamics.py +208 -0
  8. data/MARBLE/geometry.py +602 -0
  9. data/MARBLE/layers.py +130 -0
  10. data/MARBLE/lib/__init__.py +1 -0
  11. data/MARBLE/lib/cknn.py +146 -0
  12. data/MARBLE/lib/ptu_dijkstra.pyx +689 -0
  13. data/MARBLE/main.py +469 -0
  14. data/MARBLE/plotting.py +751 -0
  15. data/MARBLE/postprocessing.py +83 -0
  16. data/MARBLE/preprocessing.py +223 -0
  17. data/MARBLE/smoothing.py +63 -0
  18. data/MARBLE/utils.py +277 -0
  19. data/doc/Makefile +26 -0
  20. data/doc/assets/illustration_for_github.png +3 -0
  21. data/doc/index_readme.md +272 -0
  22. data/doc/source/conf.py +42 -0
  23. data/doc/source/dataloader.rst +4 -0
  24. data/doc/source/dynamics.rst +4 -0
  25. data/doc/source/geometry.rst +4 -0
  26. data/doc/source/index.rst +15 -0
  27. data/doc/source/layers.rst +4 -0
  28. data/doc/source/main.rst +4 -0
  29. data/doc/source/plotting.rst +4 -0
  30. data/doc/source/postprocessing.rst +4 -0
  31. data/doc/source/preprocessing.rst +4 -0
  32. data/doc/source/utils.rst +4 -0
  33. data/environment.yml +159 -0
  34. data/environment_osx_arm.yml +111 -0
  35. data/environment_osx_intel.yml +131 -0
  36. data/environment_windows_native.yml +119 -0
  37. data/examples/RNN/RNN.ipynb +0 -0
  38. data/examples/RNN/RNN_scripts/__init__.py +10 -0
  39. data/examples/RNN/RNN_scripts/clustering.py +467 -0
  40. data/examples/RNN/RNN_scripts/dms.py +270 -0
  41. data/examples/RNN/RNN_scripts/helpers.py +507 -0
  42. data/examples/RNN/RNN_scripts/modules.py +1273 -0
  43. data/examples/RNN/RNN_scripts/ranktwo.py +538 -0
  44. data/examples/macaque_reaching/convert_spikes_to_firing_rates.py +150 -0
  45. data/examples/macaque_reaching/iframe_figures/figure_54.html +0 -0
  46. data/examples/macaque_reaching/kinematic_decoding.ipynb +657 -0
  47. data/examples/macaque_reaching/macaque_reaching_helpers.py +218 -0
  48. data/examples/macaque_reaching/plot_MARBLE_representations.ipynb +390 -0
  49. data/examples/macaque_reaching/plot_vector_fields.ipynb +177 -0
  50. data/examples/macaque_reaching/run_cebra.py +112 -0
data/.pylintrc ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## look at http://docutils.sourceforge.net/sandbox/py-rest-doc/utils/pylintrc
2
+ # for some of the options that are available
3
+
4
+ [MESSAGES CONTROL]
5
+ disable=C0103,R0904,R0903,W0511,R0801,R0401,I0013,W0622,C0325,R0205,W1201,W0621,R0913,R0914,C0415,W0719,R0917
6
+
7
+ [FORMAT]
8
+ # Maximum number of characters on a single line.
9
+ max-line-length=100
10
+
11
+ [DESIGN]
12
+ # Maximum number of arguments for function / method
13
+ max-args=15
14
+ # Argument names that match this expression will be ignored. Default to name
15
+ # with leading underscore
16
+ ignored-argument-names=_.*
17
+ # Maximum number of locals for function / method body
18
+ max-locals=15
19
+ # Maximum number of return / yield for function / method body
20
+ max-returns=6
21
+ # Maximum number of branch for function / method body
22
+ max-branches=30
23
+ # Maximum number of statements in function / method body
24
+ max-statements=65
25
+ # Maximum number of parents for a class (see R0901).
26
+ max-parents=7
27
+ # Maximum number of attributes for a class (see R0902).
28
+ max-attributes=40
29
+ # Minimum number of public methods for a class (see R0903).
30
+ min-public-methods=2
31
+ # Maximum number of public methods for a class (see R0904).
32
+ max-public-methods=60
33
+ # checks for similarities and duplicated code. This computation may be
34
+ # memory / CPU intensive, so you should disable it if you experiments some
35
+ # problems.
36
+ #
37
+
38
+ [SIMILARITIES]
39
+ # Minimum lines number of a similarity.
40
+ min-similarity-lines=25
41
+ # Ignore comments when computing similarities.
42
+ ignore-comments=yes
43
+ # Ignore docstrings when computing similarities.
44
+ ignore-docstrings=yes
45
+
46
+ [TYPECHECK]
47
+ # List of classes names for which member attributes should not be checked
48
+ # (useful for classes with attributes dynamically set).
49
+ #ignored-classes=foo.bar
50
+
51
+ # List of module names for which member attributes should not be checked
52
+ # (useful for modules/projects where namespaces are manipulated during runtime
53
+ # and thus existing member attributes cannot be deduced by static analysis. It
54
+ # supports qualified module names, as well as Unix pattern matching.
55
+ ignored-modules=torch,torch_geometric,torch_scatter,torch_cluster,torch_sparse,ptu_dijkstra,scipy.spatial,matplotlib,Cython
data/LICENSE ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ Copyright 2023 Adam Gosztolai, EPFL
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4
+
5
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6
+
7
+ THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
data/MANIFEST.in ADDED
@@ -0,0 +1 @@
 
 
1
+ include MARBLE/default_params.yaml
data/MARBLE/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ """MARBLE main functions."""
2
+
3
+ from MARBLE.main import net
4
+ from MARBLE.postprocessing import distribution_distances
5
+ from MARBLE.postprocessing import embed_in_2D
6
+ from MARBLE.preprocessing import construct_dataset
data/MARBLE/dataloader.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Data loader module."""
2
+
3
+ import torch
4
+ from torch_cluster import random_walk
5
+ from torch_geometric.loader import NeighborSampler as NS
6
+
7
+
8
+ def loaders(data, par):
9
+ """Loaders."""
10
+ nb = [par["n_sampled_nb"]] * par["order"]
11
+
12
+ train_loader = NeighborSampler(
13
+ data.edge_index,
14
+ sizes=nb,
15
+ batch_size=par["batch_size"],
16
+ shuffle=True,
17
+ num_nodes=data.num_nodes,
18
+ node_idx=data.train_mask,
19
+ )
20
+
21
+ val_loader = NeighborSampler(
22
+ data.edge_index,
23
+ sizes=nb,
24
+ batch_size=par["batch_size"],
25
+ shuffle=False,
26
+ num_nodes=data.num_nodes,
27
+ node_idx=data.val_mask,
28
+ )
29
+
30
+ test_loader = NeighborSampler(
31
+ data.edge_index,
32
+ sizes=nb,
33
+ batch_size=par["batch_size"],
34
+ shuffle=False,
35
+ num_nodes=data.num_nodes,
36
+ node_idx=data.test_mask,
37
+ )
38
+
39
+ return train_loader, val_loader, test_loader
40
+
41
+
42
+ class NeighborSampler(NS):
43
+ """Neighbor Sampler."""
44
+
45
+ def sample(self, batch):
46
+ """Sample."""
47
+ row, col, _ = self.adj_t.coo()
48
+
49
+ # For each node in `batch`, we sample a direct neighbor (as positive
50
+ # sample) and a random node (as negative sample):
51
+ batch = torch.tensor(batch)
52
+ pos_batch = random_walk(row, col, batch, walk_length=1, coalesced=False)
53
+ neg_batch = torch.randint(0, self.adj_t.size(1), (batch.numel(),))
54
+ batch = torch.cat([batch, pos_batch[:, 1], neg_batch], dim=0)
55
+
56
+ return super().sample(batch)
57
+
58
+
59
+ # =============================================================================
60
+ # below is an alternative implementation, not working yet
61
+ # =============================================================================
62
+
63
+ # from torch_geometric.loader import LinkNeighborLoader
64
+ # from torch_geometric.utils import subgraph
65
+
66
+ # def loaders(data, par):
67
+
68
+ # nb = [par['n_sampled_nb'] for i in range(max(par['order'], par['depth']))]
69
+
70
+ # train_loader = LinkNeighborLoader(
71
+ # data,
72
+ # num_neighbors=nb,
73
+ # shuffle=True,
74
+ # batch_size=par['batch_size'],
75
+ # edge_label_index=subgraph(data.train_mask, data.edge_index)[0],
76
+ # neg_sampling_ratio=1
77
+ # )
78
+
79
+ # val_loader = LinkNeighborLoader(
80
+ # data,
81
+ # num_neighbors=nb,
82
+ # shuffle=False,
83
+ # batch_size=par['batch_size'],
84
+ # edge_label_index=subgraph(data.val_mask, data.edge_index)[0],
85
+ # neg_sampling_ratio=1
86
+ # )
87
+
88
+ # test_loader = LinkNeighborLoader(
89
+ # data,
90
+ # num_neighbors=nb,
91
+ # shuffle=False,
92
+ # batch_size=par['batch_size'],
93
+ # edge_label_index=subgraph(data.test_mask, data.edge_index)[0],
94
+ # neg_sampling_ratio=1
95
+ # )
96
+
97
+ # return train_loader, val_loader, test_loader
data/MARBLE/default_params.yaml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #training parameters
2
+ epochs : 100 # optimisation epochs
3
+ batch_size : 64 # batch size
4
+ lr: 0.01 # learning rate
5
+ momentum: 0.9
6
+ dropout: 0. # dropout in the MLP
7
+ batch_norm: True # batch normalisation
8
+ hidden_channels: [32] # number of hidden channels
9
+ bias: True # learn bias parameters in MLP
10
+
11
+ #manifold/signal parameters
12
+ order: 2 # order to which to compute the directional derivatives
13
+ inner_product_features: False
14
+ diffusion: True
15
+ frac_sampled_nb: -1 # fraction of neighbours to sample for gradient computation (if -1 then all neighbours)
16
+ include_positions: False # include positions as features
17
+ include_self: True # include vector at the center of feature
18
+
19
+ # embedding parameters
20
+ out_channels: 3 # number of output channels (if null, then =hidden_channels)
21
+ vec_norm: False # normalise features at each order of derivatives
22
+ emb_norm: False # spherical output
23
+
24
+ # other params
25
+ seed: 0 # seed for reproducibility
data/MARBLE/dynamics.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Dynamics module, adapted from DE_library.
2
+
3
+ TODO: clean this up
4
+ """
5
+
6
+ import sys
7
+
8
+ import numpy as np
9
+ from scipy.integrate import ode
10
+ from scipy.integrate import odeint
11
+
12
+
13
+ def fun_vanderpol(par=None):
14
+ """Van der parol oscillator exhibiting a degenerate Hopf bifurcation"""
15
+ if par is None:
16
+ par = {"mu": 1.0}
17
+
18
+ def f(_, X):
19
+ x, y = X
20
+ f1 = y
21
+ f2 = par["mu"] * (1 - x**2) * y - x
22
+
23
+ return [f1, f2]
24
+
25
+ def jac(_, X):
26
+ x, y = X
27
+ df1 = [0.0, 1.0]
28
+ df2 = [-2.0 * par["mu"] * x * y - 1.0, -par["mu"] * x**2]
29
+
30
+ return [df1, df2]
31
+
32
+ return f, jac
33
+
34
+
35
+ def load_ODE(whichmodel, par=None):
36
+ """Load ODE system.
37
+
38
+ Args:
39
+ whichmodel (string): ODE system from ODE_library.py
40
+ par (dict, optional): parameters. The default is None
41
+
42
+ Returns:
43
+ f (Callable): ODE function
44
+ jac (Callable): Jacobian
45
+ """
46
+ if par is None:
47
+ f, jac = getattr(sys.modules[__name__], f"fun_{whichmodel}")()
48
+ else:
49
+ f, jac = getattr(sys.modules[__name__], f"fun_{whichmodel}")(par)
50
+
51
+ return f, jac
52
+
53
+
54
+ def solve_ODE(f, jac, t, x0, solver="standard"):
55
+ """Solve ODE."""
56
+ if solver == "standard":
57
+ x = odeint(f, x0, t, Dfun=jac, tfirst=True)
58
+ xprime = [f(t_, x_) for t_, x_ in zip(t, x)]
59
+
60
+ elif solver == "zvode":
61
+ r = ode(f, jac)
62
+ r.set_integrator("zvode", method="bdf")
63
+ # r.set_integrator('dopri5')
64
+ r.set_initial_value(x0, t[0])
65
+
66
+ # Run ODE integrator
67
+ x = [x0]
68
+ xprime = [f(0.0, x0)]
69
+
70
+ for _t in t[1:]:
71
+ r.integrate(_t)
72
+ x.append(np.real(r.y))
73
+ xprime.append(f(r.t, np.real(r.y)))
74
+
75
+ return np.vstack(x), np.vstack(xprime)
76
+
77
+
78
+ def addnoise(X, **noise_pars):
79
+ """Add noise to trajectories.
80
+
81
+ Args:
82
+ X (numpy array): Trajectories
83
+ noise_pars: additional keyword argument to specify noise parameters
84
+
85
+ Returns:
86
+ X (len(t)xlen(X0) numpy array) Trajectory
87
+ """
88
+ if noise_pars["noise"] == "Gaussian":
89
+ mu = noise_pars["mu"]
90
+ sigma = noise_pars["sigma"]
91
+ X += np.random.normal(mu, sigma, size=X.shape)
92
+
93
+ return X
94
+
95
+
96
+ def simulate_ODE(whichmodel, t, X0, par=None, **noise_pars):
97
+ """Load ODE functions and run appropriate solver.
98
+
99
+ Args:
100
+ whichmodel (string): ODE system from ODE_library.py
101
+ t (array or list): Time steps to evaluate system at
102
+ x0 (array or list): Initial condition. Size must match the dimension of the ODE system
103
+ par (dict, optional): Parameters. The default is None
104
+
105
+ Returns:
106
+ X (list): Solution
107
+ Xprime (list): Time derivative of solution
108
+ """
109
+ f, jac = load_ODE(whichmodel, par=par)
110
+ X, Xprime = solve_ODE(f, jac, t, X0)
111
+
112
+ if noise_pars:
113
+ X = addnoise(X, **noise_pars)
114
+
115
+ return X, Xprime
116
+
117
+
118
+ def simulate_trajectories(whichmodel, X0_range, t=1, par=None, **noise_pars):
119
+ """Compute a number of trajectories from the given initial conditions.
120
+
121
+ Args:
122
+ Same as in simulate_ODE(), except:
123
+ whichmodel (string): ODE system from ODE_library.py
124
+ X0_range (list(list)): List of initial conditions
125
+ t (array or list): Time steps to evaluate system at
126
+ par (dict, optional): Parameters. The default is None
127
+ noise_pars: additional keyword argument to specify noise parameters
128
+
129
+ Returns:
130
+ X_list (list(list)): Solution for all trajectories
131
+ Xprime_list (list): Time derivative of solution for all trajectories
132
+ """
133
+ X_list, Xprime_list = [], []
134
+ for X0 in X0_range:
135
+ X, Xprime = simulate_ODE(whichmodel, t, X0, par=par, **noise_pars)
136
+ X_list.append(X)
137
+ Xprime_list.append(Xprime)
138
+
139
+ return X_list, Xprime_list
140
+
141
+
142
+ def reject_outliers(*args, min_v=-5, max_v=5):
143
+ """Reject outliers."""
144
+ inds = []
145
+ for arg in args:
146
+ inds.append(np.where((arg > min_v).all(1) * (arg < max_v).all(1))[0])
147
+
148
+ return list(set.intersection(*map(set, inds)))
149
+
150
+
151
+ def parabola(X, Y, alpha=0.05):
152
+ """Parabola."""
153
+ Z = -((alpha * X) ** 2) - (alpha * Y) ** 2
154
+
155
+ return np.column_stack([X.flatten(), Y.flatten(), Z.flatten()])
156
+
157
+
158
+ def embed_parabola(pos, vel, alpha=0.05):
159
+ """Embed on parabola."""
160
+ for i, (p, v) in enumerate(zip(pos, vel)):
161
+ end_point = p + v
162
+ new_endpoint = parabola(end_point[:, 0], end_point[:, 1], alpha=alpha)
163
+ pos[i] = parabola(p[:, 0], p[:, 1], alpha=alpha)
164
+ vel[i] = new_endpoint - pos[i]
165
+ return pos, vel
166
+
167
+
168
+ def sample_2d(N=100, interval=None, method="uniform", seed=0):
169
+ """Sample N points in a 2D area."""
170
+ if interval is None:
171
+ interval = [[-1, -1], [1, 1]]
172
+ if method == "uniform":
173
+ x = np.linspace(interval[0][0], interval[1][0], int(np.sqrt(N)))
174
+ y = np.linspace(interval[0][1], interval[1][1], int(np.sqrt(N)))
175
+ x, y = np.meshgrid(x, y)
176
+ x = np.vstack((x.flatten(), y.flatten())).T
177
+
178
+ elif method == "random":
179
+ np.random.seed(seed)
180
+ x = np.random.uniform(
181
+ (interval[0][0], interval[0][1]), (interval[1][0], interval[1][1]), (N, 2)
182
+ )
183
+
184
+ return x
185
+
186
+
187
+ def initial_conditions(n, reps, area=None, seed=0):
188
+ """Generate iniital condition."""
189
+ if area is None:
190
+ area = [[-3, -3], [3, 3]]
191
+ X0_range = [sample_2d(n, area, "random", seed=i + seed) for i in range(reps)]
192
+
193
+ return X0_range
194
+
195
+
196
+ def simulate_vanderpol(mu, X0, t, keep_v=False):
197
+ """Simulate vanderpol."""
198
+ p, v = simulate_trajectories("vanderpol", X0, t, par={"mu": mu})
199
+ pos, vel = [], []
200
+ for p_, v_ in zip(p, v):
201
+ if keep_v:
202
+ ind = reject_outliers(p_)
203
+ else:
204
+ ind = reject_outliers(p_, v_)
205
+ pos.append(p_[ind])
206
+ vel.append(v_[ind])
207
+
208
+ return pos, vel
data/MARBLE/geometry.py ADDED
@@ -0,0 +1,602 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Geometry module."""
2
+
3
+ import numpy as np
4
+ import ot
5
+ import scipy.sparse as sp
6
+ import torch
7
+ import torch_geometric.utils as PyGu
8
+ import umap
9
+ from sklearn.cluster import KMeans
10
+ from sklearn.cluster import MeanShift
11
+ from sklearn.decomposition import PCA
12
+ from sklearn.manifold import MDS
13
+ from sklearn.manifold import TSNE
14
+ from sklearn.manifold import Isomap
15
+ from sklearn.metrics import pairwise_distances
16
+ from sklearn.preprocessing import StandardScaler
17
+ from torch_geometric.nn import knn_graph
18
+ from torch_geometric.nn import radius_graph
19
+ from torch_scatter import scatter_add
20
+
21
+ from ptu_dijkstra import connections, tangent_frames # isort:skip
22
+
23
+ from MARBLE.lib.cknn import cknneighbors_graph # isort:skip
24
+ from MARBLE import utils # isort:skip
25
+
26
+
27
+ def furthest_point_sampling(x, N=None, spacing=0.0, start_idx=0):
28
+ """A greedy O(N^2) algorithm to do furthest points sampling
29
+
30
+ Args:
31
+ x (nxdim matrix): input data
32
+ N (int): number of sampled points
33
+ stop_crit: when reaching this fraction of the total manifold diameter, we stop sampling
34
+ start_idx: index of starting node
35
+
36
+ Returns:
37
+ perm: node indices of the N sampled points
38
+ lambdas: list of distances of furthest points
39
+ """
40
+ if spacing == 0.0:
41
+ return torch.arange(len(x)), None
42
+
43
+ D = utils.np2torch(pairwise_distances(x))
44
+ n = D.shape[0] if N is None else N
45
+ diam = D.max()
46
+
47
+ perm = torch.zeros(n, dtype=torch.int64)
48
+ perm[0] = start_idx
49
+ lambdas = torch.zeros(n)
50
+ ds = D[start_idx, :].flatten()
51
+ for i in range(1, n):
52
+ idx = torch.argmax(ds)
53
+ perm[i] = idx
54
+ lambdas[i] = ds[idx]
55
+ ds = torch.minimum(ds, D[idx, :])
56
+
57
+ if N is None:
58
+ if lambdas[i] / diam < spacing:
59
+ perm = perm[:i]
60
+ lambdas = lambdas[:i]
61
+ break
62
+
63
+ assert len(perm) == len(np.unique(perm)), "Returned duplicated points"
64
+
65
+ return perm, lambdas
66
+
67
+
68
+ def cluster(x, cluster_typ="kmeans", n_clusters=15, seed=0):
69
+ """Cluster data.
70
+
71
+ Args:
72
+ x (nxdim matrix) data
73
+ cluster_typ: Clustering method.
74
+ n_clusters: Number of clusters.
75
+ seed: seed
76
+
77
+ Returns:
78
+ clusters: sklearn cluster object
79
+ """
80
+ clusters = {}
81
+ if cluster_typ == "kmeans":
82
+ kmeans = KMeans(n_clusters=n_clusters, random_state=seed).fit(x)
83
+ clusters["n_clusters"] = n_clusters
84
+ clusters["labels"] = kmeans.labels_
85
+ clusters["centroids"] = kmeans.cluster_centers_
86
+ elif cluster_typ == "meanshift":
87
+ meanshift = MeanShift(bandwidth=n_clusters).fit(x)
88
+ clusters["n_clusters"] = len(set(meanshift.labels_))
89
+ clusters["labels"] = meanshift.labels_
90
+ clusters["centroids"] = meanshift.cluster_centers_
91
+ else:
92
+ raise NotImplementedError
93
+
94
+ return clusters
95
+
96
+
97
+ def embed(x, embed_typ="umap", dim_emb=2, manifold=None, verbose=True, seed=0, **kwargs):
98
+ """Embed data to 2D space.
99
+
100
+ Args:
101
+ x (nxdim matrix): data
102
+ embed_typ: embedding method. The default is 'tsne'.
103
+
104
+ Returns:
105
+ emb (nx2 matrix): embedded data
106
+ """
107
+ if x.shape[1] <= 2:
108
+ print(
109
+ f"\n No {embed_typ} embedding performed. Embedding seems to be \
110
+ already in 2D."
111
+ )
112
+ return x, None
113
+
114
+ if embed_typ == "tsne":
115
+ x = StandardScaler().fit_transform(x)
116
+ if manifold is not None:
117
+ raise Exception("t-SNE cannot fit on existing manifold")
118
+
119
+ emb = TSNE(init="random", learning_rate="auto", random_state=seed).fit_transform(x)
120
+
121
+ elif embed_typ == "umap":
122
+ x = StandardScaler().fit_transform(x)
123
+ if manifold is None:
124
+ manifold = umap.UMAP(n_components=dim_emb, random_state=seed, **kwargs).fit(x)
125
+
126
+ emb = manifold.transform(x)
127
+
128
+ elif embed_typ == "MDS":
129
+ if manifold is not None:
130
+ raise Exception("MDS cannot fit on existing manifold")
131
+
132
+ emb = MDS(
133
+ n_components=dim_emb, n_init=20, dissimilarity="precomputed", random_state=seed
134
+ ).fit_transform(x)
135
+
136
+ elif embed_typ == "PCA":
137
+ if manifold is None:
138
+ manifold = PCA(n_components=dim_emb).fit(x)
139
+
140
+ emb = manifold.transform(x)
141
+
142
+ elif embed_typ == "Isomap":
143
+ radius = pairwise_distances(x)
144
+ radius = 0.1 * (radius.max() - radius.min())
145
+ if manifold is None:
146
+ manifold = Isomap(n_components=dim_emb, n_neighbors=None, radius=radius).fit(x)
147
+
148
+ emb = manifold.transform(x)
149
+
150
+ else:
151
+ raise NotImplementedError
152
+
153
+ if verbose:
154
+ print(f"Performed {embed_typ} embedding on embedded results.")
155
+
156
+ return emb, manifold
157
+
158
+
159
+ def relabel_by_proximity(clusters):
160
+ """Update clusters labels such that nearby clusters in the embedding get similar labels.
161
+
162
+ Args:
163
+ clusters: sklearn object containing 'centroids', 'n_clusters', 'labels' as attributes
164
+
165
+ Returns:
166
+ clusters: sklearn object with updated labels
167
+ """
168
+ pd = pairwise_distances(clusters["centroids"], metric="euclidean")
169
+ pd += np.max(pd) * np.eye(clusters["n_clusters"])
170
+
171
+ mapping = {}
172
+ id_old = 0
173
+ for i in range(clusters["n_clusters"]):
174
+ id_new = np.argmin(pd[id_old, :])
175
+ while id_new in mapping:
176
+ pd[id_old, id_new] += np.max(pd)
177
+ id_new = np.argmin(pd[id_old, :])
178
+ mapping[id_new] = i
179
+ id_old = id_new
180
+
181
+ labels = clusters["labels"]
182
+ clusters["labels"] = np.array([mapping[label] for label in labels])
183
+ clusters["centroids"] = clusters["centroids"][list(mapping.keys())]
184
+
185
+ return clusters
186
+
187
+
188
+ def compute_distribution_distances(clusters=None, data=None, slices=None):
189
+ """Compute the distance between clustered distributions across datasets.
190
+
191
+ Args:
192
+ clusters: sklearn object containing 'centroids', 'slices', 'labels' as attributes
193
+
194
+ Returns:
195
+ dist: distance matrix
196
+ gamma: optimal transport matrix
197
+ centroid_distances: distances between cluster centroids
198
+ """
199
+ s = slices
200
+ pdists, cdists = None, None
201
+ if clusters is not None:
202
+ # compute discrete measures supported on cluster centroids
203
+ labels = clusters["labels"]
204
+ labels = [labels[s[i] : s[i + 1]] + 1 for i in range(len(s) - 1)]
205
+ nc, nl = clusters["n_clusters"], len(labels)
206
+ bins_dataset = []
207
+ for l_ in labels: # loop over datasets
208
+ bins = [(l_ == i + 1).sum() for i in range(nc)] # loop over clusters
209
+ bins = np.array(bins)
210
+ bins_dataset.append(bins / bins.sum())
211
+
212
+ cdists = pairwise_distances(clusters["centroids"])
213
+ gamma = np.zeros([nl, nl, nc, nc])
214
+
215
+ elif data is not None:
216
+ # compute empirical measures from datapoints
217
+ nl = len(s) - 1
218
+
219
+ bins_dataset = []
220
+ for i in range(nl):
221
+ mu = np.ones(s[i + 1] - s[i])
222
+ mu /= len(mu)
223
+ bins_dataset.append(mu)
224
+
225
+ pdists = pairwise_distances(data.emb)
226
+ else:
227
+ raise Exception("No input provided.")
228
+
229
+ # compute distance between measures
230
+ dist = np.zeros([nl, nl])
231
+ for i in range(nl):
232
+ for j in range(i + 1, nl):
233
+ mu, nu = bins_dataset[i], bins_dataset[j]
234
+
235
+ if data is not None and pdists is not None:
236
+ cdists = pdists[s[i] : s[i + 1], s[j] : s[j + 1]]
237
+
238
+ dist[i, j] = ot.emd2(mu, nu, cdists)
239
+ dist[j, i] = dist[i, j]
240
+
241
+ if clusters is not None:
242
+ gamma[i, j, ...] = ot.emd(mu, nu, cdists)
243
+ gamma[j, i, ...] = gamma[i, j, ...]
244
+ else:
245
+ gamma = None
246
+
247
+ return dist, gamma
248
+
249
+
250
+ def neighbour_vectors(pos, edge_index):
251
+ """Local out-going edge vectors around each node.
252
+
253
+ Args:
254
+ pos (nxdim matrix): node positions
255
+ edge_index (2xE matrix): edge indices
256
+
257
+ Returns:
258
+ nvec (Exdim matrix): neighbourhood vectors.
259
+
260
+ """
261
+ ei, ej = edge_index[0], edge_index[1]
262
+ nvec = pos[ej] - pos[ei]
263
+
264
+ return nvec
265
+
266
+
267
+ def project_gauge_to_neighbours(nvec, gauges, edge_index):
268
+ """Project the gauge vectors to local edge vectors.
269
+
270
+ Args:
271
+ nvec (Exdim matrix): neighbourhood vectors
272
+ local_gauge (dimxnxdim torch tensor): if None, global gauge is generated
273
+
274
+ Returns:
275
+ list of (nxn) torch tensors of projected components
276
+ """
277
+ n, _, d = gauges.shape
278
+ ei = edge_index[0]
279
+ proj = torch.einsum("bi,bic->bc", nvec, gauges[ei])
280
+
281
+ proj = [sp.coo_matrix((proj[:, i], (edge_index)), [n, n]).tocsr() for i in range(d)]
282
+
283
+ return proj
284
+
285
+
286
+ def gradient_op(pos, edge_index, gauges):
287
+ """Directional derivative kernel from Beaini et al. 2021.
288
+
289
+ Args:
290
+ pos (nxdim Matrix) node positions
291
+ edge_index (2x|E| matrix) edge indices
292
+ gauge (list): orthonormal unit vectors
293
+
294
+ Returns:
295
+ list of (nxn) Anisotropic kernels
296
+ """
297
+ nvec = neighbour_vectors(pos, edge_index)
298
+ F = project_gauge_to_neighbours(nvec, gauges, edge_index)
299
+
300
+ K = []
301
+ for _F in F:
302
+ norm = np.repeat(np.add.reduceat(np.abs(_F.data), _F.indptr[:-1]), np.diff(_F.indptr))
303
+ _F.data /= norm
304
+ _F -= sp.diags(np.array(_F.sum(1)).flatten())
305
+ _F = _F.tocoo()
306
+ K.append(torch.sparse_coo_tensor(np.vstack([_F.row, _F.col]), _F.data.data))
307
+
308
+ return K
309
+
310
+
311
+ def normalize_sparse_matrix(sp_tensor):
312
+ """Normalize sparse matrix."""
313
+ row_sum = sp_tensor.sum(axis=1)
314
+ row_sum[row_sum == 0] = 1 # to avoid divide by zero
315
+ sp_tensor = sp_tensor.multiply(1.0 / row_sum)
316
+
317
+ return sp_tensor
318
+
319
+
320
+ def global_to_local_frame(x, gauges, length_correction=False, reverse=False):
321
+ """Transform signal into local coordinates."""
322
+
323
+ if reverse:
324
+ proj = torch.einsum("bji,bi->bj", gauges, x)
325
+ else:
326
+ proj = torch.einsum("bij,bi->bj", gauges, x)
327
+
328
+ if length_correction:
329
+ norm_x = x.norm(p=2, dim=1, keepdim=True)
330
+ norm_proj = proj.norm(p=2, dim=1, keepdim=True)
331
+ proj = proj / norm_proj * norm_x
332
+
333
+ return proj
334
+
335
+
336
+ def project_to_gauges(x, gauges, dim=2):
337
+ """Project to gauges."""
338
+ coeffs = torch.einsum("bij,bi->bj", gauges, x)
339
+ return torch.einsum("bj,bij->bi", coeffs[:, :dim], gauges[:, :, :dim])
340
+
341
+
342
+ def manifold_dimension(Sigma, frac_explained=0.9):
343
+ """Estimate manifold dimension based on singular vectors"""
344
+
345
+ if frac_explained == 1.0:
346
+ return Sigma.shape[1]
347
+
348
+ Sigma **= 2
349
+ Sigma /= Sigma.sum(1, keepdim=True)
350
+ Sigma = Sigma.cumsum(dim=1)
351
+ var_exp = Sigma.mean(0) - Sigma.std(0)
352
+ dim_man = torch.where(var_exp >= frac_explained)[0][0] + 1
353
+
354
+ print("\nFraction of variance explained: ", var_exp.tolist())
355
+
356
+ return int(dim_man)
357
+
358
+
359
+ def fit_graph(x, graph_type="cknn", par=1, delta=1.0, metric="euclidean"):
360
+ """Fit graph to node positions"""
361
+
362
+ if graph_type == "cknn":
363
+ edge_index = cknneighbors_graph(x, n_neighbors=par, delta=delta, metric=metric).tocoo()
364
+ edge_index = np.vstack([edge_index.row, edge_index.col])
365
+ edge_index = utils.np2torch(edge_index, dtype="double")
366
+
367
+ elif graph_type == "knn":
368
+ edge_index = knn_graph(x, k=par)
369
+ edge_index = PyGu.add_self_loops(edge_index)[0]
370
+
371
+ elif graph_type == "radius":
372
+ edge_index = radius_graph(x, r=par)
373
+ edge_index = PyGu.add_self_loops(edge_index)[0]
374
+
375
+ else:
376
+ raise NotImplementedError
377
+
378
+ assert is_connected(edge_index), "Graph is not connected! Try increasing k."
379
+
380
+ edge_index = PyGu.to_undirected(edge_index)
381
+ pdist = torch.nn.PairwiseDistance(p=2)
382
+ edge_weight = pdist(x[edge_index[0]], x[edge_index[1]])
383
+ edge_weight = 1 / edge_weight
384
+
385
+ return edge_index, edge_weight
386
+
387
+
388
+ def is_connected(edge_index):
389
+ """Check if it is connected."""
390
+ adj = torch.sparse_coo_tensor(edge_index, torch.ones(edge_index.shape[1]))
391
+ deg = torch.sparse.sum(adj, 0).values()
392
+
393
+ return (deg > 1).all()
394
+
395
+
396
+ def compute_laplacian(data, normalization="rw"):
397
+ """Compute Laplacian."""
398
+ edge_index, edge_attr = PyGu.get_laplacian(
399
+ data.edge_index,
400
+ edge_weight=data.edge_weight,
401
+ normalization=normalization,
402
+ num_nodes=data.num_nodes,
403
+ )
404
+
405
+ # return PyGu.to_dense_adj(edge_index, edge_attr=edge_attr).squeeze()
406
+ return torch.sparse_coo_tensor(edge_index, edge_attr).coalesce()
407
+
408
+
409
+ def compute_connection_laplacian(data, R, normalization="rw"):
410
+ r"""Connection Laplacian
411
+
412
+ Args:
413
+ data: Pytorch geometric data object.
414
+ R (nxnxdxd): Connection matrices between all pairs of nodes. Default is None,
415
+ in case of a global coordinate system.
416
+ normalization: None, 'rw'
417
+ 1. None: No normalization
418
+ :math:`\mathbf{L} = \mathbf{D} - \mathbf{A}`
419
+
420
+ 2. "rw"`: Random-walk normalization
421
+ :math:`\mathbf{L} = \mathbf{I} - \mathbf{D}^{-1} \mathbf{A}`
422
+
423
+ Returns:
424
+ ndxnd normalised connection Laplacian matrix.
425
+ """
426
+ n = data.x.shape[0]
427
+ d = R.size()[0] // n
428
+
429
+ # unnormalised (combinatorial) laplacian, to be normalised later
430
+ L = compute_laplacian(data, normalization=None) # .to_sparse()
431
+
432
+ # rearrange into block form (kron(L, ones(d,d)))
433
+ edge_index = utils.expand_edge_index(L.indices(), dim=d)
434
+ L = torch.sparse_coo_tensor(edge_index, L.values().repeat_interleave(d * d))
435
+
436
+ # unnormalised connection laplacian
437
+ # Lc(i,j) = L(i,j)*R(i,j) if (i,j)=\in E else 0
438
+ Lc = L * R
439
+
440
+ # normalize
441
+ edge_index, edge_weight = PyGu.remove_self_loops(data.edge_index, data.edge_weight)
442
+ if edge_weight is None:
443
+ edge_weight = torch.ones(edge_index.size(1), device=edge_index.device)
444
+
445
+ # degree matrix
446
+ deg = scatter_add(edge_weight, edge_index[0], dim=0, dim_size=n)
447
+
448
+ if normalization == "rw":
449
+ deg_inv = 1.0 / deg
450
+ deg_inv.masked_fill_(deg_inv == float("inf"), 0)
451
+ deg_inv = deg_inv.repeat_interleave(d, dim=0)
452
+ Lc = torch.diag(deg_inv).to_sparse() @ Lc
453
+
454
+ return Lc.coalesce()
455
+
456
+
457
+ def compute_gauges(data, dim_man=None, n_geodesic_nb=10, processes=1):
458
+ """Orthonormal gauges for the tangent space at each node.
459
+
460
+ Args:
461
+ data: Pytorch geometric data object.
462
+ n_geodesic_nb: number of geodesic neighbours. The default is 10.
463
+ processes: number of CPUs to use
464
+
465
+ Returns:
466
+ gauges (nxdimxdim matrix): Matrix containing dim unit vectors for each node.
467
+ Sigma: Singular valued
468
+ """
469
+ X = data.pos.numpy().astype(np.float64)
470
+ A = PyGu.to_scipy_sparse_matrix(data.edge_index).tocsr()
471
+
472
+ # make chunks for data processing
473
+ sl = data._slice_dict["x"] # pylint: disable=protected-access
474
+ n = len(sl) - 1
475
+ X = [X[sl[i] : sl[i + 1]] for i in range(n)]
476
+ A = [A[sl[i] : sl[i + 1], :][:, sl[i] : sl[i + 1]] for i in range(n)]
477
+
478
+ if dim_man is None:
479
+ dim_man = X[0].shape[1]
480
+
481
+ inputs = [X, A, dim_man, n_geodesic_nb]
482
+ out = utils.parallel_proc(
483
+ _compute_gauges,
484
+ range(n),
485
+ inputs,
486
+ processes=processes,
487
+ desc="\n---- Computing tangent spaces...",
488
+ )
489
+
490
+ gauges, Sigma = zip(*out)
491
+ gauges, Sigma = np.vstack(gauges), np.vstack(Sigma)
492
+
493
+ return utils.np2torch(gauges), utils.np2torch(Sigma)
494
+
495
+
496
+ def _compute_gauges(inputs, i):
497
+ """Helper function to compute_gauges()"""
498
+ X_chunks, A_chunks, dim_man, n_geodesic_nb = inputs
499
+ gauges, Sigma = tangent_frames(X_chunks[i], A_chunks[i], dim_man, n_geodesic_nb)
500
+
501
+ return gauges, Sigma
502
+
503
+
504
+ def compute_connections(data, gauges, processes=1):
505
+ """Find smallest rotations R between gauges pairs. It is assumed that the first
506
+ row of edge_index is what we want to align to, i.e.,
507
+ gauges(i) = gauges(j)@R[i,j].T
508
+
509
+ R[i,j] is optimal rotation that minimises ||X - RY||_F computed by SVD:
510
+ X, Y = gauges[i].T, gauges[j].T
511
+ U, _, Vt = scipy.linalg.svd(X.T@Y)
512
+ R[i,j] = U@Vt
513
+
514
+ Args:
515
+ data: Pytorch geometric data object
516
+ gauges (nxdxd matrix): Orthogonal unit vectors for each node
517
+ processes: number of CPUs to use
518
+
519
+ Returns:
520
+ (n*dim,n*dim) matrix of rotation matrices
521
+ """
522
+ gauges = np.array(gauges, dtype=np.float64)
523
+ A = PyGu.to_scipy_sparse_matrix(data.edge_index).tocsr()
524
+
525
+ # make chunks for data processing
526
+ sl = data._slice_dict["x"] # pylint: disable=protected-access
527
+ dim_man = gauges.shape[-1]
528
+
529
+ n = len(sl) - 1
530
+ gauges = [gauges[sl[i] : sl[i + 1]] for i in range(n)]
531
+ A = [A[sl[i] : sl[i + 1], :][:, sl[i] : sl[i + 1]] for i in range(n)]
532
+
533
+ inputs = [gauges, A, dim_man]
534
+ out = utils.parallel_proc(
535
+ _compute_connections,
536
+ range(n),
537
+ inputs,
538
+ processes=processes,
539
+ desc="\n---- Computing connections...",
540
+ )
541
+
542
+ return utils.to_block_diag(out)
543
+
544
+
545
+ def _compute_connections(inputs, i):
546
+ """helper function to compute_connections()"""
547
+ gauges_chunks, A_chunks, dim_man = inputs
548
+
549
+ R = connections(gauges_chunks[i], A_chunks[i], dim_man)
550
+
551
+ edge_index = np.vstack([A_chunks[i].tocoo().row, A_chunks[i].tocoo().col])
552
+ edge_index = torch.tensor(edge_index)
553
+ edge_index = utils.expand_edge_index(edge_index, dim=R.shape[-1])
554
+ return torch.sparse_coo_tensor(edge_index, R.flatten(), dtype=torch.float32).coalesce()
555
+
556
+
557
+ def compute_eigendecomposition(A, k=None, eps=1e-8):
558
+ """Eigendecomposition of a square matrix A.
559
+
560
+ Args:
561
+ A: square matrix A
562
+ k: number of eigenvectors
563
+ eps: small error term
564
+
565
+ Returns:
566
+ evals (k): eigenvalues of the Laplacian
567
+ evecs (V,k): matrix of eigenvectors of the Laplacian
568
+ """
569
+ if A is None:
570
+ return None
571
+
572
+ if k is None:
573
+ A = A.to_dense().double()
574
+ else:
575
+ indices, values, size = A.indices(), A.values(), A.size()
576
+ A = sp.coo_array((values, (indices[0], indices[1])), shape=size)
577
+
578
+ failcount = 0
579
+ while True:
580
+ try:
581
+ if k is None:
582
+ evals, evecs = torch.linalg.eigh(A) # pylint: disable=not-callable
583
+ else:
584
+ evals, evecs = sp.linalg.eigsh(A, k=k, which="SM")
585
+ evals, evecs = torch.tensor(evals), torch.tensor(evecs)
586
+
587
+ evals = torch.clamp(evals, min=0.0)
588
+ evecs *= np.sqrt(len(evecs))
589
+
590
+ break
591
+ except Exception as e: # pylint: disable=broad-exception-caught
592
+ print(e)
593
+ if failcount > 3:
594
+ raise ValueError("failed to compute eigendecomp") from e
595
+ failcount += 1
596
+ print("--- decomp failed; adding eps ===> count: " + str(failcount))
597
+ if k is None:
598
+ A += torch.eye(A.shape[0]) * (eps * 10 ** (failcount - 1))
599
+ else:
600
+ A += sp.eye(A.shape[0]) * (eps * 10 ** (failcount - 1))
601
+
602
+ return evals.float(), evecs.float()
data/MARBLE/layers.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Layer module."""
2
+
3
+ import torch
4
+ from torch import nn
5
+ from torch_geometric.nn.conv import MessagePassing
6
+
7
+ from MARBLE import smoothing as s
8
+
9
+
10
+ class Diffusion(nn.Module):
11
+ """Diffusion with learned t."""
12
+
13
+ def __init__(self, tau0=0.0):
14
+ """initialise."""
15
+ super().__init__()
16
+
17
+ self.diffusion_time = nn.Parameter(torch.tensor(float(tau0)))
18
+
19
+ def forward(self, x, L, Lc=None, method="spectral"):
20
+ """Forward."""
21
+ if method == "spectral":
22
+ assert len(L) == 2, "L must be a matrix or a pair of eigenvalues and eigenvectors"
23
+
24
+ # making sure diffusion times are positive
25
+ with torch.no_grad():
26
+ self.diffusion_time.data = torch.clamp(self.diffusion_time, min=1e-8)
27
+
28
+ t = self.diffusion_time
29
+
30
+ if Lc is not None:
31
+ out = s.vector_diffusion(x, t, Lc, L=L, method=method, normalise=True)
32
+ else:
33
+ out = [s.scalar_diffusion(x_, t, method, L) for x_ in x.T]
34
+ out = torch.cat(out, axis=1)
35
+
36
+ return out
37
+
38
+
39
+ class AnisoConv(MessagePassing):
40
+ """Anisotropic Convolution"""
41
+
42
+ def __init__(self, **kwargs):
43
+ """Initialize."""
44
+ super().__init__(aggr="add", **kwargs)
45
+
46
+ def forward(self, x, kernels):
47
+ """Forward."""
48
+ out = []
49
+ for K in kernels:
50
+ out.append(self.propagate(K, x=x))
51
+
52
+ # [[dx1/du, dx2/du], [dx1/dv, dx2/dv]] -> [dx1/du, dx1/dv, dx2/du, dx2/dv]
53
+ out = torch.stack(out, axis=2)
54
+ out = out.view(out.shape[0], -1)
55
+
56
+ return out
57
+
58
+ def message_and_aggregate(self, K_t, x):
59
+ """Message passing step. If K_t is a txs matrix (s sources, t targets),
60
+ do matrix multiplication K_t@x, broadcasting over column features.
61
+ If K_t is a t*dimxs*dim matrix, in case of manifold computations,
62
+ then first reshape, assuming that the columns of x are ordered as
63
+ [dx1/du, x1/dv, ..., dx2/du, dx2/dv, ...].
64
+ """
65
+ n, dim = x.shape
66
+
67
+ if (K_t.size(dim=1) % n * dim) == 0:
68
+ n_ch = torch.div(n * dim, K_t.size(dim=1), rounding_mode="floor")
69
+ x = x.view(-1, n_ch)
70
+
71
+ x = K_t.matmul(x, reduce=self.aggr)
72
+
73
+ return x.view(-1, dim)
74
+
75
+
76
+ class InnerProductFeatures(nn.Module):
77
+ r"""Compute scaled inner-products between channel vectors.
78
+
79
+ Input: (V x C*D) vector of (V x n_i) list of vectors with \sum_in_i = C*D
80
+ Output: (VxC) dot products
81
+ """
82
+
83
+ def __init__(self, C, D):
84
+ super().__init__()
85
+
86
+ self.C, self.D = C, D
87
+
88
+ self.O_mat = nn.ModuleList()
89
+ for _ in range(C):
90
+ self.O_mat.append(nn.Linear(D, D, bias=False))
91
+
92
+ self.reset_parameters()
93
+
94
+ def reset_parameters(self):
95
+ """Reset parameters."""
96
+ for i, _ in enumerate(self.O_mat):
97
+ self.O_mat[i].weight.data = torch.eye(self.D)
98
+
99
+ def forward(self, x):
100
+ """Forward."""
101
+ if not isinstance(x, list):
102
+ x = [x]
103
+
104
+ x = [x_.view(x_.shape[0], -1, self.D) for x_ in x]
105
+
106
+ # for scalar signals take magnitude
107
+ if self.D == 1:
108
+ x = [x_.norm(dim=2) for x_ in x]
109
+
110
+ return torch.cat(x, axis=1)
111
+
112
+ # for vector signals take inner products
113
+ # bring to form where all columns are vector in the tangent space
114
+ # so taking inner products is possible
115
+ # [ x1 dx1/du ...]
116
+ # x2 dx1/dv
117
+ # x3 dx1/dw
118
+ x = [x_.swapaxes(1, 2) for x_ in x]
119
+ x = torch.cat(x, axis=2)
120
+
121
+ assert x.shape[2] == self.C, "Number of channels is incorrect!"
122
+
123
+ # O_ij@x_j
124
+ Ox = [self.O_mat[j](x[..., j]) for j in range(self.C)]
125
+ Ox = torch.stack(Ox, dim=2)
126
+
127
+ # \sum_j x_i^T@O_ij@x_j
128
+ xOx = torch.einsum("bki,bkj->bi", x, Ox)
129
+
130
+ return torch.tanh(xOx).reshape(x.shape[0], -1)
data/MARBLE/lib/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ """Lib module."""
data/MARBLE/lib/cknn.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Module imported and adapted from https://github.com/chlorochrule/cknn."""
2
+
3
+ import numpy as np
4
+ from scipy.sparse import csr_matrix
5
+ from scipy.spatial.distance import pdist
6
+ from scipy.spatial.distance import squareform
7
+
8
+
9
+ def cknneighbors_graph(
10
+ X,
11
+ n_neighbors,
12
+ delta=1.0,
13
+ metric="euclidean",
14
+ t="inf",
15
+ include_self=False,
16
+ is_sparse=True,
17
+ return_instance=False,
18
+ ):
19
+ """Main function to call, see CkNearestNeighbors for the doc."""
20
+ cknn = CkNearestNeighbors(
21
+ n_neighbors=n_neighbors,
22
+ delta=delta,
23
+ metric=metric,
24
+ t=t,
25
+ include_self=include_self,
26
+ is_sparse=is_sparse,
27
+ )
28
+ cknn.cknneighbors_graph(X)
29
+
30
+ if return_instance:
31
+ return cknn
32
+ return cknn.ckng
33
+
34
+
35
+ class CkNearestNeighbors(object):
36
+ """This object provides the all logic of CkNN.
37
+
38
+ Args:
39
+ n_neighbors: int, optional, default=5
40
+ Number of neighbors to estimate the density around the point.
41
+ It appeared as a parameter `k` in the paper.
42
+
43
+ delta: float, optional, default=1.0
44
+ A parameter to decide the radius for each points. The combination
45
+ radius increases in proportion to this parameter.
46
+
47
+ metric: str, optional, default='euclidean'
48
+ The metric of each points. This parameter depends on the parameter
49
+ `metric` of scipy.spatial.distance.pdist.
50
+
51
+ t: 'inf' or float or int, optional, default='inf'
52
+ The decay parameter of heat kernel. The weights are calculated as
53
+ follow:
54
+
55
+ W_{ij} = exp(-(||x_{i}-x_{j}||^2)/t)
56
+
57
+ For more infomation, read the paper 'Laplacian Eigenmaps for
58
+ Dimensionality Reduction and Data Representation', Belkin, et. al.
59
+
60
+ include_self: bool, optional, default=True
61
+ All diagonal elements are 1.0 if this parameter is True.
62
+
63
+ is_sparse: bool, optional, default=True
64
+ The method `cknneighbors_graph` returns csr_matrix object if this
65
+ parameter is True else returns ndarray object.
66
+ """
67
+
68
+ def __init__(
69
+ self,
70
+ n_neighbors=5,
71
+ delta=1.0,
72
+ metric="euclidean",
73
+ t="inf",
74
+ include_self=False,
75
+ is_sparse=True,
76
+ ):
77
+ self.n_neighbors = n_neighbors
78
+ self.delta = delta
79
+ self.metric = metric
80
+ self.t = t
81
+ self.include_self = include_self
82
+ self.is_sparse = is_sparse
83
+ self.ckng = None
84
+
85
+ def cknneighbors_graph(self, X):
86
+ """A method to calculate the CkNN graph
87
+
88
+ Args:
89
+ X: ndarray
90
+ The data matrix.
91
+
92
+ return: csr_matrix (if self.is_sparse is True)
93
+ or ndarray(if self.is_sparse is False)
94
+ """
95
+
96
+ n_neighbors = self.n_neighbors
97
+ delta = self.delta
98
+ metric = self.metric
99
+ t = self.t
100
+ include_self = self.include_self
101
+ is_sparse = self.is_sparse
102
+
103
+ n_samples = X.shape[0]
104
+
105
+ if n_neighbors < 1 or n_neighbors > n_samples - 1:
106
+ raise ValueError("`n_neighbors` must be in the range 1 to number of samples")
107
+ if len(X.shape) != 2:
108
+ raise ValueError("`X` must be 2D matrix")
109
+ if n_samples < 2:
110
+ raise ValueError("At least 2 data points are required")
111
+
112
+ if metric == "precomputed":
113
+ if X.shape[0] != X.shape[1]:
114
+ raise ValueError("`X` must be square matrix")
115
+ dmatrix = X
116
+ else:
117
+ dmatrix = squareform(pdist(X, metric=metric))
118
+
119
+ darray_n_nbrs = np.partition(dmatrix, n_neighbors)[:, [n_neighbors]]
120
+ ratio_matrix = dmatrix / np.sqrt(darray_n_nbrs.dot(darray_n_nbrs.T))
121
+ diag_ptr = np.arange(n_samples)
122
+
123
+ if not isinstance(delta, (int, float)):
124
+ raise ValueError("Invalid argument type. Type of `delta` must be float or int")
125
+ adjacency = csr_matrix(ratio_matrix < delta)
126
+
127
+ if include_self:
128
+ adjacency[diag_ptr, diag_ptr] = True
129
+ else:
130
+ adjacency[diag_ptr, diag_ptr] = False
131
+
132
+ if t == "inf":
133
+ neigh = adjacency.astype(float)
134
+ else:
135
+ mask = adjacency.nonzero()
136
+ weights = np.exp(-np.power(dmatrix[mask], 2) / t)
137
+ dmatrix[:] = 0.0
138
+ dmatrix[mask] = weights
139
+ neigh = csr_matrix(dmatrix)
140
+
141
+ if is_sparse:
142
+ self.ckng = neigh
143
+ else:
144
+ self.ckng = neigh.toarray()
145
+
146
+ return self.ckng
data/MARBLE/lib/ptu_dijkstra.pyx ADDED
@@ -0,0 +1,689 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Contains modified scripts from the ptu_dijkstra algorithm.
3
+
4
+ The Fibbonacci Heap data structure and several components related to Dijkstra's
5
+ standard algorithm were adopted from `scipy.sparse.csgraph._shortest_path.pyx`,
6
+ authored and copywrited by Jake Vanderplas -- <vanderplas@astro.washington.edu>
7
+ under BSD in 2011.
8
+
9
+ Author of all additional code pertaining to the PTU Dijkstra Algorithm is
10
+ Max Budninskiy (C). License: modified (3-clause) BSD, 2020.
11
+ """
12
+ import warnings
13
+
14
+ import numpy as np
15
+ from scipy.sparse.csgraph._validation import validate_graph
16
+
17
+ cimport cython
18
+ cimport numpy as np
19
+ cimport scipy.linalg.cython_lapack as cython_lapack
20
+ from libc.math cimport sqrt
21
+ from libc.stdlib cimport free
22
+ from libc.stdlib cimport malloc
23
+
24
+ DTYPE = np.float64
25
+ ctypedef np.float64_t DTYPE_t
26
+
27
+ ITYPE = np.int32
28
+ ctypedef np.int32_t ITYPE_t
29
+
30
+
31
+ def tangent_frames(X,
32
+ csgraph,
33
+ d,
34
+ K):
35
+ """
36
+ Algorithm for tangent frame computation.
37
+
38
+ Parameters
39
+ ----------
40
+ X: numpy matrix
41
+ (N, D) matrix of N input data points in D dimensional space sampling a
42
+ lower dimensional manifold S
43
+ csgraph : sparse matrix
44
+ Distance weighted proximity graph of pointset X
45
+ d : int
46
+ Dimension of the manifold S
47
+ K : int
48
+ Number of points to include in geodesic neighborhoods. Geodesic
49
+ neighborhood of a point x of size K is K nearest neighbors to x in
50
+ the proximity graph. Notice it's different than simple K nearest
51
+ neighbors in ambient D dimensional space. Geodesic neighborhood of
52
+ point x is used to compute local tangent space to the data manifold at
53
+ x.
54
+
55
+ Returns
56
+ -------
57
+
58
+ Notes
59
+ -----
60
+ The input csgraph is symmetrized first.
61
+ """
62
+ N = X.shape[0]
63
+ D = X.shape[1]
64
+ cdef ITYPE_t N_t = N
65
+ cdef ITYPE_t K_t = K
66
+ cdef ITYPE_t D_t = D
67
+ cdef ITYPE_t d_t = d
68
+
69
+ if K >= N:
70
+ raise ValueError(
71
+ "Geodesic neighborhood size must be less than the "
72
+ "total number of samples"
73
+ )
74
+ if K < d:
75
+ raise ValueError(
76
+ "Geodesic neighborhood size must be larger or equal to the "
77
+ "embedding dimension"
78
+ )
79
+ if D < d:
80
+ raise ValueError(
81
+ "Embedding dimension must be less or equal to the ambient "
82
+ "dimension of input data"
83
+ )
84
+
85
+ csgraph = validate_graph(csgraph, directed=True, dtype=DTYPE,
86
+ dense_output=False)
87
+
88
+ if np.any(csgraph.data < 0):
89
+ warnings.warn("Graph has negative weights: \
90
+ negative distances are not allowed.")
91
+
92
+ # initialize ptu distances, and tangent spaces
93
+ ptu_dists = np.zeros((N, N), dtype=DTYPE)
94
+ ptu_dists.fill(np.inf)
95
+ tangents = np.empty((N, D, d), dtype=DTYPE)
96
+ Sigma = np.empty((N, d), dtype=DTYPE)
97
+
98
+ # symmetrize the graph
99
+ csgraphT = csgraph.T.tocsr()
100
+ symmetrized_graph = csgraph.maximum(csgraphT)
101
+ graph_data = symmetrized_graph.data
102
+ graph_indices = symmetrized_graph.indices
103
+ graph_indptr = symmetrized_graph.indptr
104
+
105
+ e = len(graph_indices)
106
+
107
+ tangents_status = _geodesic_neigborhood_tangents(
108
+ X,
109
+ graph_data,
110
+ graph_indices,
111
+ graph_indptr,
112
+ tangents,
113
+ Sigma,
114
+ N_t,
115
+ K_t,
116
+ D_t,
117
+ d_t
118
+ )
119
+ if tangents_status == -1:
120
+ raise RuntimeError(
121
+ 'Local tangent space approximation failed, at least one geodesic '
122
+ 'neighborhood does not span d-dimensional space'
123
+ )
124
+
125
+ return tangents, Sigma
126
+
127
+
128
+ def connections(tangents,
129
+ csgraph,
130
+ d):
131
+ """
132
+ Algorithm for tangent frame computation.
133
+
134
+ Parameters
135
+ ----------
136
+ tangents
137
+ csgraph : sparse matrix
138
+ Distance weighted proximity graph of pointset X
139
+ d : int
140
+ Dimension of the manifold S
141
+
142
+ Returns
143
+ -------
144
+
145
+
146
+ Notes
147
+ -----
148
+ The input csgraph is symmetrized first.
149
+ """
150
+ N = tangents.shape[0]
151
+ D = tangents.shape[1]
152
+ cdef ITYPE_t N_t = N
153
+ cdef ITYPE_t D_t = D
154
+ cdef ITYPE_t d_t = d
155
+
156
+ if D < d:
157
+ raise ValueError(
158
+ "Embedding dimension must be less or equal to the ambient "
159
+ "dimension of input data"
160
+ )
161
+
162
+ csgraph = validate_graph(csgraph, directed=True, dtype=DTYPE,
163
+ dense_output=False)
164
+
165
+ if np.any(csgraph.data < 0):
166
+ warnings.warn("Graph has negative weights: \
167
+ negative distances are not allowed.")
168
+
169
+ # initialize ptu distances, and tangent spaces
170
+ ptu_dists = np.zeros((N, N), dtype=DTYPE)
171
+ ptu_dists.fill(np.inf)
172
+
173
+ # symmetrize the graph
174
+ csgraphT = csgraph.T.tocsr()
175
+ symmetrized_graph = csgraph.maximum(csgraphT)
176
+ graph_data = symmetrized_graph.data
177
+ graph_indices = symmetrized_graph.indices
178
+ graph_indptr = symmetrized_graph.indptr
179
+
180
+ e = len(graph_indices)
181
+ R = np.empty(shape=[e, d, d], dtype=DTYPE)
182
+
183
+ _parallel_transport_dijkstra(
184
+ graph_indices,
185
+ graph_indptr,
186
+ tangents,
187
+ R,
188
+ N_t,
189
+ D_t,
190
+ d_t
191
+ )
192
+
193
+ return R
194
+
195
+
196
+ @cython.boundscheck(False)
197
+ @cython.wraparound(False)
198
+ @cython.nonecheck(False)
199
+ @cython.cdivision(True)
200
+ cdef _parallel_transport_dijkstra(
201
+ int[:] csr_indices,
202
+ int[:] csr_indptr,
203
+ double[:, :, :] tangents,
204
+ double[:, :, :] R,
205
+ int N,
206
+ int D,
207
+ int d
208
+ ):
209
+ """
210
+ Performs parallel transport Dijkstra pairwise distance estimation.
211
+
212
+ Parameters:
213
+ csr_indices: array (int)
214
+ Indices of sparce csr proximity graph matrix.
215
+ csr_indptr: array (int)
216
+ Index pointers of sparce csr proximity graph matrix.
217
+ tangents: 3 dimensional tensor
218
+ Collection of N local tangent space bases of size (D, d).
219
+ N: int
220
+ Number of points in dataset X.
221
+ D: int
222
+ Ambient dimension of pointset X.
223
+ d: int
224
+ Dimension of manifold S that is sampled by X.
225
+ """
226
+ cdef:
227
+ int i, k, p, q, j, count
228
+ int info, lwork = 6*d
229
+ double temp
230
+
231
+ np.ndarray[DTYPE_t, ndim=1] Work = np.empty(shape=[lwork], dtype=DTYPE)
232
+ np.ndarray[DTYPE_t, ndim=2] TtT = np.empty(shape=[d, d], dtype=DTYPE, order='F')
233
+ np.ndarray[DTYPE_t, ndim=2] U = np.empty(shape=[d, d], dtype=DTYPE, order='F')
234
+ np.ndarray[DTYPE_t, ndim=2] VT = np.empty(shape=[d, d], dtype=DTYPE, order='F')
235
+ np.ndarray[DTYPE_t, ndim=1] S = np.empty(shape=d, dtype=DTYPE)
236
+
237
+ count = 0
238
+ for i in range(N):
239
+ for j in csr_indices[csr_indptr[i]:csr_indptr[i+1]]:
240
+ for p in range(d):
241
+ for q in range(d):
242
+ temp = 0
243
+ for k in range(D):
244
+ temp += tangents[i, k, p] * tangents[j, k, q]
245
+ TtT[p, q] = temp
246
+
247
+ # U, S, VT = SVD(TtT)
248
+ # see LAPACK docs for details
249
+ cython_lapack.dgesvd(
250
+ 'A',
251
+ 'A',
252
+ &d,
253
+ &d,
254
+ &TtT[0, 0],
255
+ &d,
256
+ &S[0],
257
+ &U[0, 0],
258
+ &d,
259
+ &VT[0, 0],
260
+ &d,
261
+ &Work[0],
262
+ &lwork,
263
+ &info
264
+ )
265
+
266
+ for p in range(d):
267
+ for q in range(d):
268
+ temp = 0
269
+ for k in range(d):
270
+ temp += U[p, k] * VT[k, q]
271
+ R[count,p,q] = temp
272
+ count += 1
273
+
274
+
275
+ @cython.boundscheck(False)
276
+ @cython.wraparound(False)
277
+ @cython.nonecheck(False)
278
+ @cython.cdivision(True)
279
+ cdef int _geodesic_neigborhood_tangents(
280
+ double[:, :] X,
281
+ double[:] csr_weights,
282
+ int[:] csr_indices,
283
+ int[:] csr_indptr,
284
+ double[:, :, :] tangents,
285
+ double[:, :] Sigma,
286
+ int N,
287
+ int K,
288
+ int D,
289
+ int d,
290
+ ):
291
+ """
292
+ Computes a tangent space for every input point using geodesic neighborhoods.
293
+
294
+ Parameters:
295
+ X: matrix
296
+ The (N, D) matrix of N input data points in D dimensional space sampling
297
+ a lower dimensional manifold S.
298
+ csr_weights: array
299
+ Values of sparse csr distance weighted adjacency matrix
300
+ representing proximity graph of pointset X.
301
+ csr_indices: array (int)
302
+ Indices of sparce csr proximity graph matrix.
303
+ csr_indptr: array (int)
304
+ Index pointers of sparce csr proximity graph matrix.
305
+ tangets: 3 dimensional tensor
306
+ [Output] Collection of N local tangent space bases of size (D, d).
307
+ Sigma: 2 dimensional tensor
308
+ [Output] Singular values for all vertices
309
+ N: int
310
+ Number of points in dataset X.
311
+ K: int
312
+ Number of points used to define a gedesic neighborhood.
313
+ D: int
314
+ Ambient dimension of pointset X.
315
+ d: int
316
+ Dimension of manifold S that is sampled by X.
317
+ """
318
+ cdef:
319
+ unsigned int i, j, k, l, p, q, scanned_cntr, k_current
320
+ int return_pred = 0, Kp1 = K + 1, mn = min(D, Kp1)
321
+ int info, lwork = max(3*min(D, Kp1) + max(D, Kp1), 5*min(D, Kp1))
322
+ double mean, next_val
323
+
324
+ np.ndarray[DTYPE_t, ndim=2] geoNbh = np.empty(shape=[D, Kp1], dtype=DTYPE, order='F')
325
+ np.ndarray[DTYPE_t, ndim=2] U = np.empty(shape=(D, mn), dtype=DTYPE, order='F')
326
+ np.ndarray[DTYPE_t, ndim=2] VT = np.empty(shape=(mn, Kp1), dtype=DTYPE, order='F')
327
+ np.ndarray[DTYPE_t, ndim=1] S = np.empty(shape=mn, dtype=DTYPE)
328
+ np.ndarray[DTYPE_t, ndim=1] Work = np.empty(shape=[lwork], dtype=DTYPE)
329
+ np.ndarray[ITYPE_t, ndim=1] geoNbh_indices = np.empty(shape=Kp1, dtype=ITYPE)
330
+
331
+ FibonacciHeap heap
332
+ FibonacciNode *v
333
+ FibonacciNode *nodes = <FibonacciNode*> malloc(N *
334
+ sizeof(FibonacciNode))
335
+ FibonacciNode *current_node
336
+
337
+ if nodes == NULL:
338
+ raise MemoryError("Failed to allocate memory in _geodesic_neigborhood_tangents")
339
+
340
+ for i in range(N):
341
+
342
+ # initialize nodes for Dijkstra
343
+ for k in range(N):
344
+ initialize_node(&nodes[k], k)
345
+
346
+ # insert node i into heap
347
+ heap.min_node = NULL
348
+ insert_node(&heap, &nodes[i])
349
+
350
+ # counter of processed points closest to i
351
+ scanned_cntr = 0
352
+
353
+ # perform standard Dijkstra until K closest points are discovered
354
+ # keep track of the indices of these K points
355
+ while (heap.min_node) and (scanned_cntr <= K):
356
+ v = remove_min(&heap)
357
+ v.state = SCANNED
358
+ j = v.index
359
+ geoNbh_indices[scanned_cntr] = j
360
+ scanned_cntr += 1
361
+ if scanned_cntr <= K:
362
+ for k in range(csr_indptr[j], csr_indptr[j + 1]):
363
+ k_current = csr_indices[k]
364
+ current_node = &nodes[k_current]
365
+ if current_node.state != SCANNED:
366
+ next_val = v.val + csr_weights[k]
367
+ if current_node.state == NOT_IN_HEAP:
368
+ current_node.state = IN_HEAP
369
+ current_node.val = next_val
370
+ insert_node(&heap, current_node)
371
+ elif current_node.val > next_val:
372
+ decrease_val(&heap, current_node,
373
+ next_val)
374
+
375
+ # construct and center geodesic neighborhood from indices
376
+ for p in range(D):
377
+ mean = 0
378
+ for q in range(Kp1):
379
+ geoNbh[p, q] = X[geoNbh_indices[q], p]
380
+ mean += geoNbh[p, q]
381
+ mean = mean / Kp1
382
+ for q in range(Kp1):
383
+ geoNbh[p, q] -= mean
384
+
385
+ # perform SVD of the geodesic neighborhood points
386
+ # see LAPACK docs for details
387
+ cython_lapack.dgesvd(
388
+ 'S',
389
+ 'N',
390
+ &D,
391
+ &Kp1,
392
+ &geoNbh[0, 0],
393
+ &D,
394
+ &S[0],
395
+ &U[0, 0],
396
+ &D,
397
+ &VT[0, 0],
398
+ &mn,
399
+ &Work[0],
400
+ &lwork,
401
+ &info
402
+ )
403
+
404
+ # d left singular vectors form a basis for tangent space at point i
405
+ for q in range(d):
406
+ if S[q] < 1e-10:
407
+ return -1
408
+ for p in range(D):
409
+ tangents[i, p, q] = U[p, q]
410
+
411
+ # d left singular vectors form a basis for tangent space at point i
412
+ for q in range(d):
413
+ Sigma[i, q] = S[q]
414
+
415
+ free(nodes)
416
+ return 1
417
+
418
+ ######################################################################
419
+ # FibonacciNode structure
420
+ # This structure and the operations on it are the nodes of the
421
+ # Fibonacci heap.
422
+ #
423
+ cdef enum FibonacciState:
424
+ SCANNED
425
+ NOT_IN_HEAP
426
+ IN_HEAP
427
+
428
+
429
+ cdef struct FibonacciNode:
430
+ unsigned int index
431
+ unsigned int rank
432
+ unsigned int source
433
+ FibonacciState state
434
+ DTYPE_t val
435
+ FibonacciNode* parent
436
+ FibonacciNode* left_sibling
437
+ FibonacciNode* right_sibling
438
+ FibonacciNode* children
439
+
440
+ @cython.boundscheck(False)
441
+ @cython.wraparound(False)
442
+ @cython.nonecheck(False)
443
+ @cython.cdivision(True)
444
+ cdef void initialize_node(FibonacciNode* node,
445
+ unsigned int index,
446
+ DTYPE_t val=0):
447
+ # Assumptions: - node is a valid pointer
448
+ # - node is not currently part of a heap
449
+ node.index = index
450
+ node.source = -9999
451
+ node.val = val
452
+ node.rank = 0
453
+ node.state = NOT_IN_HEAP
454
+
455
+ node.parent = NULL
456
+ node.left_sibling = NULL
457
+ node.right_sibling = NULL
458
+ node.children = NULL
459
+
460
+ @cython.boundscheck(False)
461
+ @cython.wraparound(False)
462
+ @cython.nonecheck(False)
463
+ @cython.cdivision(True)
464
+ cdef FibonacciNode* rightmost_sibling(FibonacciNode* node):
465
+ # Assumptions: - node is a valid pointer
466
+ cdef FibonacciNode* temp = node
467
+ while(temp.right_sibling):
468
+ temp = temp.right_sibling
469
+ return temp
470
+
471
+ @cython.boundscheck(False)
472
+ @cython.wraparound(False)
473
+ @cython.nonecheck(False)
474
+ @cython.cdivision(True)
475
+ cdef FibonacciNode* leftmost_sibling(FibonacciNode* node):
476
+ # Assumptions: - node is a valid pointer
477
+ cdef FibonacciNode* temp = node
478
+ while(temp.left_sibling):
479
+ temp = temp.left_sibling
480
+ return temp
481
+
482
+ @cython.boundscheck(False)
483
+ @cython.wraparound(False)
484
+ @cython.nonecheck(False)
485
+ @cython.cdivision(True)
486
+ cdef void add_child(FibonacciNode* node, FibonacciNode* new_child):
487
+ # Assumptions: - node is a valid pointer
488
+ # - new_child is a valid pointer
489
+ # - new_child is not the sibling or child of another node
490
+ new_child.parent = node
491
+
492
+ if node.children:
493
+ add_sibling(node.children, new_child)
494
+ else:
495
+
496
+ node.children = new_child
497
+ new_child.right_sibling = NULL
498
+ new_child.left_sibling = NULL
499
+ node.rank = 1
500
+
501
+ @cython.boundscheck(False)
502
+ @cython.wraparound(False)
503
+ @cython.nonecheck(False)
504
+ @cython.cdivision(True)
505
+ cdef void add_sibling(FibonacciNode* node, FibonacciNode* new_sibling):
506
+ # Assumptions: - node is a valid pointer
507
+ # - new_sibling is a valid pointer
508
+ # - new_sibling is not the child or sibling of another node
509
+ cdef FibonacciNode* temp = rightmost_sibling(node)
510
+ temp.right_sibling = new_sibling
511
+ new_sibling.left_sibling = temp
512
+ new_sibling.right_sibling = NULL
513
+ new_sibling.parent = node.parent
514
+ if new_sibling.parent:
515
+ new_sibling.parent.rank += 1
516
+
517
+ @cython.boundscheck(False)
518
+ @cython.wraparound(False)
519
+ @cython.nonecheck(False)
520
+ @cython.cdivision(True)
521
+ cdef void remove(FibonacciNode* node):
522
+ # Assumptions: - node is a valid pointer
523
+ if node.parent:
524
+ node.parent.rank -= 1
525
+ if node.left_sibling:
526
+ node.parent.children = node.left_sibling
527
+ elif node.right_sibling:
528
+ node.parent.children = node.right_sibling
529
+ else:
530
+ node.parent.children = NULL
531
+
532
+ if node.left_sibling:
533
+ node.left_sibling.right_sibling = node.right_sibling
534
+ if node.right_sibling:
535
+ node.right_sibling.left_sibling = node.left_sibling
536
+
537
+ node.left_sibling = NULL
538
+ node.right_sibling = NULL
539
+ node.parent = NULL
540
+
541
+
542
+ ######################################################################
543
+ # FibonacciHeap structure
544
+ # This structure and operations on it use the FibonacciNode
545
+ # routines to implement a Fibonacci heap
546
+
547
+ ctypedef FibonacciNode* pFibonacciNode
548
+
549
+
550
+ cdef struct FibonacciHeap:
551
+ FibonacciNode* min_node
552
+ pFibonacciNode[100] roots_by_rank # maximum number of nodes is ~2^100.
553
+
554
+ @cython.boundscheck(False)
555
+ @cython.wraparound(False)
556
+ @cython.nonecheck(False)
557
+ @cython.cdivision(True)
558
+ cdef void insert_node(FibonacciHeap* heap,
559
+ FibonacciNode* node):
560
+ # Assumptions: - heap is a valid pointer
561
+ # - node is a valid pointer
562
+ # - node is not the child or sibling of another node
563
+ if heap.min_node:
564
+ add_sibling(heap.min_node, node)
565
+ if node.val < heap.min_node.val:
566
+ heap.min_node = node
567
+ else:
568
+ heap.min_node = node
569
+
570
+ @cython.boundscheck(False)
571
+ @cython.wraparound(False)
572
+ @cython.nonecheck(False)
573
+ @cython.cdivision(True)
574
+ cdef void decrease_val(FibonacciHeap* heap,
575
+ FibonacciNode* node,
576
+ DTYPE_t newval):
577
+ # Assumptions: - heap is a valid pointer
578
+ # - newval <= node.val
579
+ # - node is a valid pointer
580
+ # - node is not the child or sibling of another node
581
+ # - node is in the heap
582
+ node.val = newval
583
+ if node.parent and (node.parent.val >= newval):
584
+ remove(node)
585
+ insert_node(heap, node)
586
+ elif heap.min_node.val > node.val:
587
+ heap.min_node = node
588
+
589
+ @cython.boundscheck(False)
590
+ @cython.wraparound(False)
591
+ @cython.nonecheck(False)
592
+ @cython.cdivision(True)
593
+ cdef void link(FibonacciHeap* heap, FibonacciNode* node):
594
+ # Assumptions: - heap is a valid pointer
595
+ # - node is a valid pointer
596
+ # - node is already within heap
597
+
598
+ cdef FibonacciNode *linknode
599
+ cdef FibonacciNode *parent
600
+ cdef FibonacciNode *child
601
+
602
+ if heap.roots_by_rank[node.rank] == NULL:
603
+ heap.roots_by_rank[node.rank] = node
604
+ else:
605
+ linknode = heap.roots_by_rank[node.rank]
606
+ heap.roots_by_rank[node.rank] = NULL
607
+
608
+ if node.val < linknode.val or node == heap.min_node:
609
+ remove(linknode)
610
+ add_child(node, linknode)
611
+ link(heap, node)
612
+ else:
613
+ remove(node)
614
+ add_child(linknode, node)
615
+ link(heap, linknode)
616
+
617
+ @cython.boundscheck(False)
618
+ @cython.wraparound(False)
619
+ @cython.nonecheck(False)
620
+ @cython.cdivision(True)
621
+ cdef FibonacciNode* remove_min(FibonacciHeap* heap):
622
+ # Assumptions: - heap is a valid pointer
623
+ # - heap.min_node is a valid pointer
624
+ cdef:
625
+ FibonacciNode *temp
626
+ FibonacciNode *temp_right
627
+ FibonacciNode *out
628
+ unsigned int i
629
+
630
+ # make all min_node children into root nodes
631
+ if heap.min_node.children:
632
+ temp = leftmost_sibling(heap.min_node.children)
633
+ temp_right = NULL
634
+
635
+ while temp:
636
+ temp_right = temp.right_sibling
637
+ remove(temp)
638
+ add_sibling(heap.min_node, temp)
639
+ temp = temp_right
640
+
641
+ heap.min_node.children = NULL
642
+
643
+ # choose a root node other than min_node
644
+ temp = leftmost_sibling(heap.min_node)
645
+ if temp == heap.min_node:
646
+ if heap.min_node.right_sibling:
647
+ temp = heap.min_node.right_sibling
648
+ else:
649
+ out = heap.min_node
650
+ heap.min_node = NULL
651
+ return out
652
+
653
+ # remove min_node, and point heap to the new min
654
+ out = heap.min_node
655
+ remove(heap.min_node)
656
+ heap.min_node = temp
657
+
658
+ # re-link the heap
659
+ for i in range(100):
660
+ heap.roots_by_rank[i] = NULL
661
+
662
+ while temp:
663
+ if temp.val < heap.min_node.val:
664
+ heap.min_node = temp
665
+ temp_right = temp.right_sibling
666
+ link(heap, temp)
667
+ temp = temp_right
668
+
669
+ return out
670
+
671
+
672
+ ######################################################################
673
+ # Debugging: Functions for printing the Fibonacci heap
674
+ #
675
+ #cdef void print_node(FibonacciNode* node, int level=0):
676
+ # print '%s(%i,%i) %i' % (level*' ', node.index, node.val, node.rank)
677
+ # if node.children:
678
+ # print_node(leftmost_sibling(node.children), level+1)
679
+ # if node.right_sibling:
680
+ # print_node(node.right_sibling, level)
681
+ #
682
+ #
683
+ #cdef void print_heap(FibonacciHeap* heap):
684
+ # print "---------------------------------"
685
+ # print "min node: (%i, %i)" % (heap.min_node.index, heap.min_node.val)
686
+ # if heap.min_node:
687
+ # print_node(leftmost_sibling(heap.min_node))
688
+ # else:
689
+ # print "[empty heap]"
data/MARBLE/main.py ADDED
@@ -0,0 +1,469 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Main network"""
2
+
3
+ import glob
4
+ import os
5
+ import warnings
6
+ from datetime import datetime
7
+ from pathlib import Path
8
+
9
+ import torch
10
+ import torch.nn.functional as F
11
+ import torch.optim as opt
12
+ import yaml
13
+ from torch import nn
14
+ from torch_geometric.nn import MLP
15
+ from tqdm import tqdm
16
+
17
+ from MARBLE import dataloader
18
+ from MARBLE import geometry
19
+ from MARBLE import layers
20
+ from MARBLE import utils
21
+
22
+
23
+ class net(nn.Module):
24
+ """MARBLE neural network.
25
+
26
+ The possible parameters and their default values are described below,
27
+ and can be accessed via the `params` dictionnary in this class constructor.
28
+
29
+ Args:
30
+ batch_size: batch size (default=64)
31
+ epochs: optimisation epochs (default=20)
32
+ lr: iniital learning rate (default=0.01)
33
+ momentum: momentum (default=0.9)
34
+ diffusion: set to True to use diffusion layer before gradient computation (default=False)
35
+ include_positions: include positions as features (warning: this is untested) (default=False)
36
+ include_self: include vector at the center of feature (default=True)
37
+ order: order to which to compute the directional derivatives (default=2)
38
+ inner_product_features: transform gradient features to inner product features (default=True)
39
+ frac_sampled_nb: fraction of neighbours to sample for gradient computation
40
+ (if -1 then all neighbours) (default=-1)
41
+ dropout: dropout in the MLP (default=0.)
42
+ hidden_channels: number of hidden channels (default=16). If list, then adds multiple layers.
43
+ out_channels: number of output channels (if null, then =hidden_channels) (default=3)
44
+ bias: learn bias parameters in MLP (default=True)
45
+ vec_norm: normalise features at each derivative order to unit length (default=False)
46
+ emb_norm: normalise MLP output to unit length (default=False)
47
+ batch_norm: batch normalisation (default=True)
48
+ seed: seed for reproducibility
49
+ """
50
+
51
+ def __init__(self, data, loadpath=None, params=None, verbose=True):
52
+ """
53
+ Constructor of the MARBLE net.
54
+
55
+ Args:
56
+ data: PyG data
57
+ loadpath: path to a model file, or a directory with models (best model will be used)
58
+ params: dict with parameters to overwrite default params or a path to a yaml file
59
+ verbose: run in verbose mode
60
+ """
61
+ super().__init__()
62
+
63
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
64
+
65
+ if loadpath is not None:
66
+ if Path(loadpath).is_dir():
67
+ loadpath = max(glob.glob(f"{loadpath}/best_model*"))
68
+ self.params = torch.load(loadpath, map_location=device)["params"]
69
+ else:
70
+ if params is not None:
71
+ self.params = params
72
+ else:
73
+ self.params = {}
74
+
75
+ self._epoch = 0 # to resume optimisation
76
+ self.parse_parameters(data)
77
+ self.check_parameters(data)
78
+ self.setup_layers()
79
+ self.loss = loss_fun()
80
+ self.reset_parameters()
81
+ self.timestamp = None
82
+
83
+ if verbose:
84
+ utils.print_settings(self)
85
+
86
+ if loadpath is not None:
87
+ self.load_model(loadpath)
88
+
89
+ def parse_parameters(self, data):
90
+ """Load default parameters and merge with user specified parameters"""
91
+
92
+ file = os.path.dirname(__file__) + "/default_params.yaml"
93
+ with open(file, "rb") as f:
94
+ params = yaml.safe_load(f)
95
+
96
+ params["dim_signal"] = data.x.shape[1]
97
+ params["dim_emb"] = data.pos.shape[1]
98
+
99
+ if hasattr(data, "dim_man"):
100
+ params["dim_man"] = data.dim_man
101
+
102
+ # merge dictionaries without duplications
103
+ for key in params.keys():
104
+ if key not in self.params.keys():
105
+ self.params[key] = params[key]
106
+
107
+ if params["frac_sampled_nb"] != -1:
108
+ self.params["n_sampled_nb"] = int(data.degree * params["frac_sampled_nb"])
109
+ else:
110
+ self.params["n_sampled_nb"] = -1
111
+
112
+ if self.params["batch_norm"]:
113
+ self.params["batch_norm"] = "batch_norm"
114
+ else:
115
+ self.params["batch_norm"] = None
116
+
117
+ def check_parameters(self, data):
118
+ """Check parameter validity"""
119
+
120
+ assert self.params["order"] > 0, "Derivative order must be at least 1!"
121
+
122
+ if self.params["vec_norm"]:
123
+ assert data.x.shape[1] > 1, "Using vec_norm=True is not permitted for scalar signals"
124
+
125
+ if self.params["diffusion"]:
126
+ assert hasattr(data, "L"), "No Laplacian found. Compute it in preprocessing()!"
127
+
128
+ pars = [
129
+ "batch_size",
130
+ "epochs",
131
+ "lr",
132
+ "momentum",
133
+ "order",
134
+ "inner_product_features",
135
+ "dim_signal",
136
+ "dim_emb",
137
+ "frac_sampled_nb",
138
+ "dropout",
139
+ "diffusion",
140
+ "hidden_channels",
141
+ "out_channels",
142
+ "bias",
143
+ "batch_norm",
144
+ "vec_norm",
145
+ "emb_norm",
146
+ "seed",
147
+ "include_positions",
148
+ "include_self",
149
+ ]
150
+
151
+ for p in pars:
152
+ assert p in list(self.params.keys()), f"Parameter {p} is not specified!"
153
+
154
+ def reset_parameters(self):
155
+ """reset parmaeters."""
156
+ for layer in self.children():
157
+ if hasattr(layer, "reset_parameters"):
158
+ layer.reset_parameters()
159
+
160
+ def setup_layers(self):
161
+ """Setup layers."""
162
+
163
+ s, d, o = self.params["dim_signal"], self.params["dim_emb"], self.params["order"]
164
+ if "dim_man" in self.params.keys():
165
+ s = d = self.params["dim_man"]
166
+
167
+ # diffusion
168
+ self.diffusion = layers.Diffusion()
169
+
170
+ # gradient features
171
+ self.grad = nn.ModuleList(layers.AnisoConv() for i in range(o))
172
+
173
+ # cumulated number of channels after gradient features
174
+ cum_channels = s * (1 - d ** (o + 1)) // (1 - d)
175
+ if not self.params["include_self"]:
176
+ cum_channels -= s
177
+
178
+ if self.params["inner_product_features"]:
179
+ cum_channels //= s
180
+ if s == 1:
181
+ cum_channels = o + 1
182
+
183
+ self.inner_products = layers.InnerProductFeatures(cum_channels, s)
184
+ else:
185
+ self.inner_products = None
186
+
187
+ if self.params["include_positions"]:
188
+ cum_channels += d
189
+
190
+ # encoder
191
+ if not isinstance(self.params["hidden_channels"], list):
192
+ self.params["hidden_channels"] = [self.params["hidden_channels"]]
193
+
194
+ channel_list = (
195
+ [cum_channels] + self.params["hidden_channels"] + [self.params["out_channels"]]
196
+ )
197
+
198
+ self.enc = MLP(
199
+ channel_list=channel_list,
200
+ dropout=self.params["dropout"],
201
+ bias=self.params["bias"],
202
+ norm=self.params["batch_norm"],
203
+ )
204
+
205
+ def forward(self, data, n_id, adjs=None):
206
+ """Forward pass.
207
+ Messages are passed to a set target nodes (current batch) from source
208
+ nodes. The source nodes and target nodes form a bipartite graph to
209
+ simplify message passing. By convention, the first size[1] entries of x
210
+ are the target nodes, i.e, x = concat[x_target, x_other]."""
211
+
212
+ x = data.x
213
+ n, d = x.shape[0], data.gauges.shape[2]
214
+ mask = data.mask
215
+
216
+ # diffusion
217
+ if self.params["diffusion"]:
218
+ if hasattr(data, "Lc"):
219
+ x = geometry.global_to_local_frame(x, data.gauges)
220
+ x = self.diffusion(x, data.L, Lc=data.Lc, method="spectral")
221
+ x = geometry.global_to_local_frame(x, data.gauges, reverse=True)
222
+ else:
223
+ x = self.diffusion(x, data.L, method="spectral")
224
+
225
+ # local gauges
226
+ if self.params["inner_product_features"]:
227
+ x = geometry.global_to_local_frame(x, data.gauges)
228
+
229
+ # restrict to current batch
230
+ x = x[n_id]
231
+ mask = mask[n_id]
232
+ if data.kernels[0].size(0) == n * d:
233
+ n_id = utils.expand_index(n_id, d)
234
+ else:
235
+ d = 1
236
+
237
+ if self.params["vec_norm"]:
238
+ x = F.normalize(x, dim=-1, p=2)
239
+
240
+ # gradients
241
+ if self.params["include_self"]:
242
+ out = [x]
243
+ else:
244
+ out = []
245
+ for i, (_, _, size) in enumerate(adjs):
246
+ kernels = [K[n_id[: size[1] * d], :][:, n_id[: size[0] * d]] for K in data.kernels]
247
+
248
+ x = self.grad[i](x, kernels)
249
+
250
+ if self.params["vec_norm"]:
251
+ x = F.normalize(x, dim=-1, p=2)
252
+
253
+ out.append(x)
254
+
255
+ last_size = adjs[-1][2]
256
+ # take target nodes
257
+ out = [o[: last_size[1]] for o in out]
258
+
259
+ # inner products
260
+ if self.params["inner_product_features"]:
261
+ out = self.inner_products(out)
262
+ else:
263
+ out = torch.cat(out, axis=1)
264
+
265
+ if self.params["include_positions"]:
266
+ out = torch.hstack([data.pos[n_id[: last_size[1]]], out])
267
+
268
+ emb = self.enc(out)
269
+
270
+ if self.params["emb_norm"]: # spherical output
271
+ emb = F.normalize(emb)
272
+
273
+ return emb, mask[: last_size[1]]
274
+
275
+ def evaluate(self, data):
276
+ """Evaluate."""
277
+ warnings.warn("MARBLE.evaluate() is deprecated. Use MARBLE.transform() instead.")
278
+ return self.transform(data)
279
+
280
+ def transform(self, data):
281
+ """Forward pass @ evaluation (no minibatches)"""
282
+ with torch.no_grad():
283
+ size = (data.x.shape[0], data.x.shape[0])
284
+ adjs = utils.EdgeIndex(data.edge_index, torch.arange(data.edge_index.shape[1]), size)
285
+ adjs = utils.to_list(adjs) * self.params["order"]
286
+
287
+ try:
288
+ data.kernels = [
289
+ utils.to_SparseTensor(K.coalesce().indices(), value=K.coalesce().values()).t()
290
+ for K in utils.to_list(data.kernels)
291
+ ]
292
+ except Exception: # pylint: disable=broad-exception-caught
293
+ pass
294
+
295
+ _, data, adjs = utils.move_to_gpu(self, data, adjs)
296
+ out, _ = self.forward(data, torch.arange(len(data.x)), adjs)
297
+ utils.detach_from_gpu(self, data, adjs)
298
+
299
+ data.emb = out.detach().cpu()
300
+
301
+ return data
302
+
303
+ def batch_loss(self, data, loader, train=False, verbose=False, optimizer=None):
304
+ """Loop over minibatches provided by loader function.
305
+
306
+ Args:
307
+ x : (nxdim) feature matrix
308
+ loader : dataloader object from dataloader.py
309
+
310
+ """
311
+
312
+ if train: # training mode (enables dropout in MLP)
313
+ self.train()
314
+
315
+ if verbose:
316
+ print("\n")
317
+
318
+ cum_loss = 0
319
+ for batch in tqdm(loader, disable=not verbose):
320
+ _, n_id, adjs = batch
321
+ adjs = [adj.to(data.x.device) for adj in utils.to_list(adjs)]
322
+ emb, mask = self.forward(data, n_id, adjs)
323
+ loss = self.loss(emb, mask)
324
+ cum_loss += float(loss)
325
+
326
+ if optimizer is not None:
327
+ optimizer.zero_grad() # zero gradients, otherwise accumulates
328
+ loss.backward() # backprop
329
+ optimizer.step()
330
+
331
+ self.eval()
332
+
333
+ return cum_loss / len(loader), optimizer
334
+
335
+ def run_training(self, data, outdir=None, verbose=False):
336
+ """Run training."""
337
+ warnings.warn("MARBLE.run_training() is deprecated. Use MARBLE.fit() instead.")
338
+
339
+ self.fit(data, outdir=outdir, verbose=verbose)
340
+
341
+ def fit(self, data, outdir=None, verbose=False):
342
+ """Network training.
343
+
344
+ Args:
345
+ data: PyG data
346
+ outdir: folder to save intermediate models
347
+ verbose: run in verbose mode
348
+ """
349
+
350
+ print("\n---- Training network ...")
351
+
352
+ self.timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
353
+
354
+ print(f"\n---- Timestamp: {self.timestamp}")
355
+
356
+ # load to gpu (if possible)
357
+ # pylint: disable=self-cls-assignment
358
+ self, data, _ = utils.move_to_gpu(self, data)
359
+
360
+ # data loader
361
+ train_loader, val_loader, test_loader = dataloader.loaders(data, self.params)
362
+ optimizer = opt.SGD(
363
+ self.parameters(), lr=self.params["lr"], momentum=self.params["momentum"]
364
+ )
365
+ if hasattr(self, "optimizer_state_dict"):
366
+ optimizer.load_state_dict(self.optimizer_state_dict)
367
+
368
+ # training scheduler
369
+ scheduler = opt.lr_scheduler.ReduceLROnPlateau(optimizer)
370
+
371
+ best_loss = -1
372
+ self.losses = {"train_loss": [], "val_loss": [], "test_loss": []}
373
+ for epoch in range(
374
+ self.params.get("epoch", 0), self.params.get("epoch", 0) + self.params["epochs"]
375
+ ):
376
+ self._epoch = epoch
377
+
378
+ train_loss, optimizer = self.batch_loss(
379
+ data, train_loader, train=True, verbose=verbose, optimizer=optimizer
380
+ )
381
+ val_loss, _ = self.batch_loss(data, val_loader, verbose=verbose)
382
+ scheduler.step(train_loss)
383
+
384
+ print(
385
+ f"\nEpoch: {self._epoch}, Training loss: {train_loss:4f}, Validation loss: {val_loss:.4f}, lr: {scheduler._last_lr[0]:.4f}", # noqa, pylint: disable=line-too-long,protected-access
386
+ end="",
387
+ )
388
+
389
+ if best_loss == -1 or (val_loss < best_loss):
390
+ outdir = self.save_model(
391
+ optimizer, self.losses, outdir=outdir, best=True, timestamp=self.timestamp
392
+ )
393
+ best_loss = val_loss
394
+ print(" *", end="")
395
+
396
+ self.losses["train_loss"].append(train_loss)
397
+ self.losses["val_loss"].append(val_loss)
398
+
399
+ test_loss, _ = self.batch_loss(data, test_loader)
400
+ print(f"\nFinal test loss: {test_loss:.4f}")
401
+
402
+ self.losses["test_loss"].append(test_loss)
403
+
404
+ self.save_model(optimizer, self.losses, outdir=outdir, best=False, timestamp=self.timestamp)
405
+ self.load_model(os.path.join(outdir, f"best_model_{self.timestamp}.pth"))
406
+
407
+ def load_model(self, loadpath):
408
+ """Load model.
409
+
410
+ Args:
411
+ loadpath: directory with models to load best model, or specific model path
412
+ """
413
+ checkpoint = torch.load(
414
+ loadpath, map_location=torch.device("cuda" if torch.cuda.is_available() else "cpu")
415
+ )
416
+ self._epoch = checkpoint["epoch"]
417
+ self.load_state_dict(checkpoint["model_state_dict"])
418
+ self.optimizer_state_dict = checkpoint["optimizer_state_dict"]
419
+ if hasattr(self, "losses"):
420
+ self.losses = checkpoint["losses"]
421
+
422
+ def save_model(self, optimizer, losses, outdir=None, best=False, timestamp=""):
423
+ """Save model."""
424
+ if outdir is None:
425
+ outdir = "./outputs/"
426
+
427
+ if not os.path.exists(outdir):
428
+ os.makedirs(outdir)
429
+
430
+ checkpoint = {
431
+ "epoch": self._epoch,
432
+ "model_state_dict": self.state_dict(),
433
+ "optimizer_state_dict": optimizer.state_dict(),
434
+ "time": timestamp,
435
+ "params": self.params,
436
+ "losses": losses,
437
+ }
438
+
439
+ if best:
440
+ fname = "best_model_"
441
+ else:
442
+ fname = "last_model_"
443
+
444
+ fname += timestamp
445
+ fname += ".pth"
446
+
447
+ if best:
448
+ torch.save(checkpoint, os.path.join(outdir, fname))
449
+ else:
450
+ torch.save(checkpoint, os.path.join(outdir, fname))
451
+
452
+ return outdir
453
+
454
+
455
+ class loss_fun(nn.Module):
456
+ """Loss function."""
457
+
458
+ def forward(self, out, mask=None):
459
+ """forward."""
460
+ z, z_pos, z_neg = out.split(out.size(0) // 3, dim=0)
461
+ pos_loss = F.logsigmoid((z * z_pos).sum(-1)).mean() # pylint: disable=not-callable
462
+ neg_loss = F.logsigmoid(-(z * z_neg).sum(-1)).mean() # pylint: disable=not-callable
463
+
464
+ coagulation_loss = 0.0
465
+ if mask is not None:
466
+ z_mask = out[mask]
467
+ coagulation_loss = (z_mask - z_mask.mean(dim=0)).norm(dim=1).sum()
468
+
469
+ return -pos_loss - neg_loss + torch.sigmoid(coagulation_loss) - 0.5
data/MARBLE/plotting.py ADDED
@@ -0,0 +1,751 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Plotting module."""
2
+
3
+ import matplotlib
4
+ import matplotlib.pyplot as plt
5
+ import networkx as nx
6
+ import numpy as np
7
+ import seaborn as sns
8
+ import torch
9
+ from matplotlib import gridspec
10
+ from matplotlib.colors import LinearSegmentedColormap
11
+ from matplotlib.patches import FancyArrowPatch
12
+ from mpl_toolkits.mplot3d import proj3d
13
+ from scipy.spatial import Voronoi
14
+ from scipy.spatial import voronoi_plot_2d
15
+ from torch_geometric.utils.convert import to_networkx
16
+
17
+ from .geometry import embed
18
+
19
+
20
+ def fields(
21
+ data,
22
+ titles=None,
23
+ col=1,
24
+ figsize=(8, 8),
25
+ axlim=None,
26
+ axes_visible=False,
27
+ color=None,
28
+ alpha=0.5,
29
+ node_size=10,
30
+ plot_gauges=False,
31
+ width=0.005,
32
+ edge_width=1.0,
33
+ scale=5,
34
+ view=None,
35
+ ):
36
+ """Plot scalar or vector fields
37
+
38
+ Args:
39
+ data: PyG Batch data object class created with utils.construct_dataset
40
+ titles: list of titles
41
+ col: int for number of columns to plot
42
+ figsize: tuple of figure dimensions
43
+ """
44
+ if hasattr(data, "gauges"):
45
+ gauges = data.gauges
46
+ else:
47
+ gauges = None
48
+
49
+ if not isinstance(data, list):
50
+ number_of_resamples = data.number_of_resamples
51
+ data = data.to_data_list() # split data batch
52
+
53
+ if number_of_resamples > 1:
54
+ print("\nDetected several samples. Taking only first one for visualisation!")
55
+ data = data[::number_of_resamples]
56
+
57
+ dim = data[0].pos.shape[1]
58
+ vector = data[0].x.shape[1] > 1
59
+ row = int(np.ceil(len(data) / col))
60
+
61
+ fig = plt.figure(figsize=figsize, constrained_layout=True)
62
+ grid = gridspec.GridSpec(row, col, wspace=0.0, hspace=0.0, figure=fig)
63
+
64
+ ax_list, lims = [], None
65
+ for i, d in enumerate(data):
66
+ signal = d.x.detach().numpy()
67
+ _, ax = create_axis(dim, grid[i], fig=fig)
68
+
69
+ if view is not None:
70
+ ax.view_init(elev=view[0], azim=view[1])
71
+
72
+ G = to_networkx(
73
+ d, node_attrs=["pos"], edge_attrs=None, to_undirected=True, remove_self_loops=True
74
+ )
75
+
76
+ if color is None:
77
+ c = np.linalg.norm(signal, axis=1) if vector else signal
78
+ c, _ = set_colors(c.squeeze())
79
+ else:
80
+ c = color
81
+
82
+ graph(
83
+ G,
84
+ labels=None if vector else c,
85
+ ax=ax,
86
+ node_size=node_size,
87
+ edge_width=edge_width,
88
+ edge_alpha=alpha,
89
+ axes_visible=axes_visible,
90
+ )
91
+
92
+ if vector:
93
+ pos = d.pos.numpy()
94
+ plot_arrows(pos, signal, ax, c, scale=scale, width=width)
95
+
96
+ if plot_gauges and (gauges is not None):
97
+ for j in range(gauges.shape[2]):
98
+ plot_arrows(pos, gauges[..., j], ax, "k", scale=scale)
99
+
100
+ if titles is not None:
101
+ ax.set_title(titles[i])
102
+
103
+ fig.add_subplot(ax)
104
+
105
+ if axlim is not None:
106
+ if axlim == "same" and (lims is None):
107
+ lims = get_limits(ax)
108
+ elif len(axlim) == len(data):
109
+ lims = axlim[i]
110
+ else:
111
+ raise NotImplementedError
112
+
113
+ set_axes(ax, lims=lims, axes_visible=axes_visible)
114
+
115
+ ax_list.append(ax)
116
+
117
+ return ax_list
118
+
119
+
120
+ def histograms(data, titles=None, col=2, figsize=(10, 10)):
121
+ """Plot histograms of cluster distribution across datasets.
122
+
123
+ Args:
124
+ data: PyG Batch data object class created with utils.construct_dataset
125
+ clusters: sklearn cluster object
126
+ titles: list of titles
127
+ col: int for number of columns to plot
128
+ figsize: tuple of figure dimensions
129
+ """
130
+ assert hasattr(data, "clusters"), "No clusters found. First, run postprocessing.cluster(data)!"
131
+
132
+ labels, s = data.clusters["labels"], data.clusters["slices"]
133
+ n_slices = len(s) - 1
134
+ labels = [labels[s[i] : s[i + 1]] + 1 for i in range(n_slices)]
135
+ nc = data.clusters["n_clusters"]
136
+
137
+ row = int(np.ceil(n_slices / col))
138
+
139
+ fig = plt.figure(figsize=figsize, constrained_layout=True)
140
+ grid = gridspec.GridSpec(row, col, wspace=0.5, hspace=0.5, figure=fig)
141
+
142
+ for i in range(n_slices):
143
+ ax = plt.Subplot(fig, grid[i])
144
+
145
+ ax.hist(labels[i], bins=np.arange(nc + 1) + 0.5, rwidth=0.85, density=True)
146
+ ax.set_xticks(np.arange(nc) + 1) # pylint: disable=not-callable
147
+ ax.set_xlim([0, nc + 1])
148
+ ax.set_xlabel("Feature number")
149
+ ax.set_ylabel("Probability density")
150
+
151
+ if titles is not None:
152
+ ax.set_title(titles[i])
153
+
154
+ fig.add_subplot(ax)
155
+
156
+
157
+ def embedding(
158
+ data,
159
+ labels=None,
160
+ titles=None,
161
+ mask=None,
162
+ ax=None,
163
+ alpha=0.3,
164
+ s=5,
165
+ axes_visible=False,
166
+ cbar_visible=True,
167
+ clusters_visible=False,
168
+ cmap="coolwarm",
169
+ plot_trajectories=False,
170
+ style="o",
171
+ lw=1,
172
+ time_gradient=False,
173
+ ):
174
+ """Plot embeddings.
175
+
176
+ Args:
177
+ data: PyG data object with attribute emb or nxdim matrix of embedded points with dim=2 or 3
178
+ labels: list of increasing integer node labels
179
+ clusters: sklearn cluster object
180
+ titles: list of titles
181
+ """
182
+ if hasattr(data, "emb_2D"):
183
+ emb = data.emb_2D
184
+ elif isinstance(data, np.ndarray) or torch.is_tensor(data):
185
+ emb = data
186
+ else:
187
+ raise TypeError
188
+
189
+ dim = emb.shape[1]
190
+ assert dim in [2, 3], f"Embedding dimension is {dim} which cannot be displayed."
191
+
192
+ if ax is None:
193
+ _, ax = create_axis(dim)
194
+
195
+ if labels is not None:
196
+ assert emb.shape[0] == len(labels)
197
+
198
+ if labels is None:
199
+ labels = np.ones(emb.shape[0])
200
+
201
+ if mask is None:
202
+ mask = np.ones(len(emb), dtype=bool)
203
+ labels = labels[mask]
204
+
205
+ types = sorted(set(labels))
206
+
207
+ color, cbar = set_colors(types, cmap)
208
+
209
+ if titles is not None:
210
+ assert len(titles) == len(types)
211
+
212
+ for i, typ in enumerate(types):
213
+ title = titles[i] if titles is not None else str(typ)
214
+ c_ = color[i]
215
+ emb_ = emb[mask * (labels == typ)]
216
+
217
+ if isinstance(data, np.ndarray) or torch.is_tensor(data):
218
+ print("You need to pass a data object to plot trajectories!")
219
+ plot_trajectories = False
220
+
221
+ if plot_trajectories:
222
+ l_ = data.label[mask * (labels == typ)]
223
+ if len(l_) == 0:
224
+ continue
225
+ end = np.where(np.diff(l_) < 0)[0] + 1
226
+ start = np.hstack([0, end])
227
+ end = np.hstack([end, len(emb_)])
228
+ cmap = LinearSegmentedColormap.from_list("Custom", [(0, 0, 0), c_], N=max(l_))
229
+
230
+ for i, (s_, e_) in enumerate(zip(start, end)):
231
+ t = range(s_, e_)
232
+ cgrad = cmap(l_[t] / max(l_))
233
+ if style == "-":
234
+ if time_gradient:
235
+ trajectories(
236
+ emb_[t], style="-", ax=ax, ms=s, node_feature=cgrad, alpha=alpha, lw=lw
237
+ )
238
+ else:
239
+ trajectories(
240
+ emb_[t],
241
+ style="-",
242
+ ax=ax,
243
+ ms=s,
244
+ node_feature=[c_] * len(t),
245
+ alpha=alpha,
246
+ lw=lw,
247
+ )
248
+ elif style == "o":
249
+ if dim == 2:
250
+ ax.scatter(emb_[t, 0], emb_[t, 1], c=cgrad, alpha=alpha, s=s, label=title)
251
+ elif dim == 3:
252
+ ax.scatter(
253
+ emb_[t, 0],
254
+ emb_[t, 1],
255
+ emb_[t, 2],
256
+ c=cgrad,
257
+ alpha=alpha,
258
+ s=s,
259
+ label=title,
260
+ )
261
+ else:
262
+ if dim == 2:
263
+ ax.scatter(emb_[:, 0], emb_[:, 1], color=c_, alpha=alpha, s=s, label=title)
264
+ elif dim == 3:
265
+ ax.scatter(
266
+ emb_[:, 0], emb_[:, 1], emb_[:, 2], color=c_, alpha=alpha, s=s, label=title
267
+ )
268
+
269
+ if dim == 2:
270
+ if hasattr(data, "clusters") and clusters_visible:
271
+ voronoi(data.clusters, ax)
272
+
273
+ if titles is not None:
274
+ ax.legend(loc="upper right")
275
+
276
+ if not axes_visible:
277
+ ax.set_axis_off()
278
+
279
+ if cbar_visible and cbar is not None:
280
+ plt.colorbar(cbar, ax=ax)
281
+
282
+ return ax
283
+
284
+
285
+ def losses(model):
286
+ """Model losses"""
287
+
288
+ plt.plot(model.losses["train_loss"], label="Training loss")
289
+ plt.plot(model.losses["val_loss"], label="Validation loss")
290
+ plt.xlabel("Epochs")
291
+ plt.ylabel("MSE loss")
292
+ plt.legend()
293
+
294
+
295
+ def voronoi(clusters, ax):
296
+ """Voronoi tesselation of clusters"""
297
+ vor = Voronoi(clusters["centroids"])
298
+ voronoi_plot_2d(vor, ax=ax, show_vertices=False)
299
+ for k in range(clusters["n_clusters"]):
300
+ ax.annotate(k + 1, clusters["centroids"][k, :])
301
+
302
+
303
+ def neighbourhoods(
304
+ data,
305
+ hops=1,
306
+ cols=4,
307
+ norm=False,
308
+ color=None,
309
+ plot_graph=False,
310
+ figsize=(15, 20),
311
+ fontsize=20,
312
+ width=0.025,
313
+ scale=1,
314
+ ):
315
+ """For each clustered neighbourhood type, draw one sample neighbourhood from each dataset.
316
+
317
+ Args:
318
+ data: postprocessed PyG Batch data object class created with utils.construct_dataset
319
+ hops: size of neighbourhood in number of hops
320
+ norm: if True, then normalise values to zero mean within clusters
321
+ plot_graph: if True, then plot the underlying graph.
322
+ """
323
+
324
+ assert hasattr(data, "clusters"), "No clusters found. First, run postprocessing.cluster(data)!"
325
+
326
+ vector = data.x.shape[1] > 1
327
+ clusters = data.clusters
328
+ nc = clusters["n_clusters"]
329
+ fig = plt.figure(figsize=figsize, constrained_layout=True)
330
+ outer = gridspec.GridSpec(int(np.ceil(nc / cols)), cols, wspace=0.2, hspace=0.2, figure=fig)
331
+
332
+ number_of_resamples = data.number_of_resamples
333
+ data = data.to_data_list() # split data batch
334
+
335
+ if number_of_resamples > 1:
336
+ print(
337
+ "\nDetected several samples of the same data. Taking only first one for visualisation!"
338
+ )
339
+ data = data[::number_of_resamples]
340
+
341
+ graphs = []
342
+ for d in data:
343
+ graphs.append(
344
+ to_networkx(
345
+ d, node_attrs=["pos"], edge_attrs=None, to_undirected=True, remove_self_loops=True
346
+ )
347
+ )
348
+
349
+ signals = [d.x for d in data]
350
+
351
+ for i in range(nc):
352
+ col = 2
353
+ row = int(np.ceil(len(data) / col))
354
+ inner = gridspec.GridSpecFromSubplotSpec(
355
+ row, col, subplot_spec=outer[i], wspace=0.0, hspace=0.0
356
+ )
357
+
358
+ ax = plt.Subplot(fig, outer[i])
359
+ ax.set_title(f"Type {i+1}", fontsize=fontsize)
360
+ ax.axis("off")
361
+ fig.add_subplot(ax)
362
+
363
+ n_nodes = [0] + [nx.number_of_nodes(g) for g in graphs]
364
+ n_nodes = np.cumsum(n_nodes)
365
+
366
+ for j, G in enumerate(graphs):
367
+ label_i = clusters["labels"][n_nodes[j] : n_nodes[j + 1]] == i
368
+ label_i = np.where(label_i)[0]
369
+ if not list(label_i):
370
+ continue
371
+ random_node = np.random.choice(label_i)
372
+
373
+ signal = signals[j].numpy()
374
+ node_ids = nx.ego_graph(G, random_node, radius=hops).nodes
375
+ node_ids = np.sort(node_ids) # sort nodes
376
+
377
+ # convert node values to colors
378
+ if color is not None:
379
+ c = color
380
+ else:
381
+ c = signal
382
+ if vector:
383
+ c = np.linalg.norm(signal, axis=1)
384
+
385
+ if not norm: # set colors based on global values
386
+ c, _ = set_colors(c)
387
+ c = [c[i] for i in node_ids] if isinstance(c, (list, np.ndarray)) else c
388
+ signal = signal[node_ids]
389
+ else: # first extract subgraph, then compute normalized colors
390
+ signal = signal[node_ids]
391
+ signal -= signal.mean()
392
+ c, _ = set_colors(signal.squeeze())
393
+
394
+ ax = plt.Subplot(fig, inner[j])
395
+
396
+ # extract subgraph with nodes sorted
397
+ subgraph = nx.Graph()
398
+ subgraph.add_nodes_from(sorted(G.subgraph(node_ids).nodes(data=True)))
399
+ subgraph.add_edges_from(G.subgraph(node_ids).edges(data=True))
400
+
401
+ ax.set_aspect("equal", "box")
402
+ if plot_graph:
403
+ graph(subgraph, labels=None, ax=ax, node_size=30, edge_width=0.5)
404
+
405
+ pos = np.array(list(nx.get_node_attributes(subgraph, name="pos").values()))
406
+
407
+ if pos.shape[1] > 2:
408
+ pos, manifold = embed(pos, embed_typ="PCA")
409
+ signal = embed(signal, embed_typ="PCA", manifold=manifold)[0]
410
+ if vector:
411
+ plot_arrows(pos, signal, ax, c, width=width, scale=scale)
412
+ else:
413
+ ax.scatter(pos[:, 0], pos[:, 1], c=c)
414
+
415
+ ax.set_frame_on(False)
416
+ set_axes(ax, axes_visible=False)
417
+ fig.add_subplot(ax)
418
+
419
+
420
+ def graph(
421
+ G,
422
+ labels="b",
423
+ edge_width=1,
424
+ edge_alpha=1.0,
425
+ node_size=20,
426
+ layout=None,
427
+ ax=None,
428
+ axes_visible=True,
429
+ ):
430
+ """Plot scalar values on graph nodes embedded in 2D or 3D."""
431
+
432
+ G = nx.convert_node_labels_to_integers(G)
433
+ pos = list(nx.get_node_attributes(G, "pos").values())
434
+
435
+ if not pos:
436
+ if layout == "spectral":
437
+ pos = nx.spectral_layout(G)
438
+ else:
439
+ pos = nx.spring_layout(G)
440
+
441
+ dim = len(pos[0])
442
+ assert dim in (2, 3), "Dimension must be 2 or 3."
443
+
444
+ if ax is None:
445
+ _, ax = create_axis(dim)
446
+
447
+ if dim == 2:
448
+ if labels is not None:
449
+ nx.draw_networkx_nodes(
450
+ G, pos=pos, node_size=node_size, node_color=labels, alpha=0.8, ax=ax
451
+ )
452
+
453
+ nx.draw_networkx_edges(G, pos=pos, width=edge_width, alpha=edge_alpha, ax=ax)
454
+
455
+ elif dim == 3:
456
+ node_xyz = np.array([pos[v] for v in sorted(G)])
457
+ edge_xyz = np.array([(pos[u], pos[v]) for u, v in G.edges()])
458
+
459
+ if labels is not None:
460
+ ax.scatter(*node_xyz.T, s=node_size, c=labels, ec="w")
461
+
462
+ for vizedge in edge_xyz:
463
+ ax.plot(*vizedge.T, color="tab:gray", alpha=edge_alpha, linewidth=edge_width)
464
+
465
+ set_axes(ax, axes_visible=axes_visible)
466
+
467
+ return ax
468
+
469
+
470
+ # def time_series(T, X, style="o", node_feature=None, figsize=(10, 5), lw=1, ms=5):
471
+ # """Plot time series.
472
+
473
+ # Args:
474
+ # X (np array or list[np array]): Trajectories
475
+ # style (string): Plotting style. The default is 'o'
476
+ # color (bool): Color lines. The default is True
477
+ # lw (int): Line width
478
+ # ms (int): Marker size.
479
+
480
+ # Returns:
481
+ # matplotlib axes object
482
+ # """
483
+ # if not isinstance(X, list):
484
+ # X = [X]
485
+
486
+ # fig = plt.figure(figsize=figsize, constrained_layout=True)
487
+ # grid = gridspec.GridSpec(len(X), 1, wspace=0.5, hspace=0, figure=fig)
488
+
489
+ # for sp, X_ in enumerate(X):
490
+ # if sp == 0:
491
+ # ax = plt.Subplot(fig, grid[sp])
492
+ # else:
493
+ # ax = plt.Subplot(fig, grid[sp], sharex=ax)
494
+
495
+ # ax.spines["top"].set_visible(False)
496
+ # ax.spines["right"].set_visible(False)
497
+
498
+ # if sp < len(X) - 1:
499
+ # plt.setp(ax.get_xticklabels(), visible=False) # pylint: disable=not-callable
500
+ # ax.spines["bottom"].set_visible(False)
501
+ # ax.xaxis.set_ticks_position("none")
502
+
503
+ # colors = set_colors(node_feature)[0]
504
+
505
+ # for i in range(len(X_) - 2):
506
+ # if X_[i] is None:
507
+ # continue
508
+
509
+ # c = colors[i] if len(colors) > 1 and not isinstance(colors, str) else colors
510
+
511
+ # ax.plot(T[i : i + 2], X_[i : i + 2], style, c=c, linewidth=lw, markersize=ms)
512
+
513
+ # fig.add_subplot(ax)
514
+
515
+ # return ax
516
+
517
+
518
+ def trajectories(
519
+ X,
520
+ V=None,
521
+ ax=None,
522
+ style="o",
523
+ node_feature=None,
524
+ lw=1,
525
+ ms=5,
526
+ scale=1,
527
+ arrow_spacing=1,
528
+ axes_visible=True,
529
+ alpha=1.0,
530
+ ):
531
+ """Plot trajectory in phase space. If multiple trajectories are given, they are plotted with
532
+ different colors.
533
+
534
+ Args:
535
+ X (np array): Positions
536
+ V (np array): Velocities
537
+ ax (matplotlib axes object): If specificed, it will plot on existing axes. Default is None
538
+ style (string): Plotting style. 'o' for scatter plot or '-' for line plot
539
+ node_feature: Color lines. The default is None
540
+ lw (int): Line width
541
+ ms (int): Marker size
542
+ scale (float): Scaling of arrows
543
+ arrow_spacing (int): How many timesteps apart are the arrows spaced.
544
+ axes_visible (bool): Whether to display axes
545
+ alpha (float): transparancy of the markers
546
+
547
+ Returns:
548
+ matplotlib axes object.
549
+ """
550
+ dim = X.shape[1]
551
+ assert dim in (2, 3), "Dimension must be 2 or 3."
552
+
553
+ if ax is None:
554
+ _, ax = create_axis(dim)
555
+
556
+ c = set_colors(node_feature)[0]
557
+
558
+ if dim == 2:
559
+ if "o" in style:
560
+ ax.scatter(X[:, 0], X[:, 1], c=c, s=ms, alpha=alpha)
561
+ if "-" in style:
562
+ if isinstance(c, (np.ndarray, list, tuple)):
563
+ for i in range(len(X) - 2):
564
+ ax.plot(
565
+ X[i : i + 2, 0],
566
+ X[i : i + 2, 1],
567
+ c=c[i],
568
+ linewidth=lw,
569
+ markersize=ms,
570
+ alpha=alpha,
571
+ )
572
+ else:
573
+ ax.plot(X[:, 0], X[:, 1], c=c, linewidth=lw, markersize=ms, alpha=alpha)
574
+ if ">" in style:
575
+ skip = (slice(None, None, arrow_spacing), slice(None))
576
+ X, V = X[skip], V[skip]
577
+ plot_arrows(X, V, ax, c, width=lw, scale=scale)
578
+
579
+ elif dim == 3:
580
+ if "o" in style:
581
+ ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=c, s=ms, alpha=alpha)
582
+ if "-" in style:
583
+ if isinstance(c, (np.ndarray, list, tuple)):
584
+ for i in range(len(X) - 2):
585
+ ax.plot(
586
+ X[i : i + 2, 0],
587
+ X[i : i + 2, 1],
588
+ X[i : i + 2, 2],
589
+ c=c[i],
590
+ linewidth=lw,
591
+ markersize=ms,
592
+ alpha=alpha,
593
+ zorder=3,
594
+ )
595
+ else:
596
+ ax.plot(
597
+ X[:, 0],
598
+ X[:, 1],
599
+ X[:, 2],
600
+ c=c,
601
+ linewidth=lw,
602
+ markersize=ms,
603
+ alpha=alpha,
604
+ zorder=3,
605
+ )
606
+ if ">" in style:
607
+ skip = (slice(None, None, arrow_spacing), slice(None))
608
+ X, V = X[skip], V[skip]
609
+ plot_arrows(X, V, ax, c, width=lw, scale=scale)
610
+ else:
611
+ raise Exception(f"Data dimension is: {dim}. It needs to be 2 or 3 to allow plotting.")
612
+
613
+ set_axes(ax, axes_visible=axes_visible)
614
+
615
+ return ax
616
+
617
+
618
+ def plot_arrows(pos, signal, ax, c="k", alpha=1.0, width=1.0, scale=1.0):
619
+ """Plot arrows."""
620
+ dim = pos.shape[1]
621
+ if dim == 3:
622
+ norm = signal.max() - signal.min()
623
+ norm = norm if norm != 0 else 1
624
+ scaling = (pos.max() - pos.min()) / norm / scale
625
+ arrow_prop_dict = {
626
+ "alpha": alpha,
627
+ "mutation_scale": width,
628
+ "arrowstyle": "-|>",
629
+ "zorder": 3,
630
+ }
631
+ for j in range(len(pos)):
632
+ a = Arrow3D(
633
+ [pos[j, 0], pos[j, 0] + signal[j, 0] * scaling],
634
+ [pos[j, 1], pos[j, 1] + signal[j, 1] * scaling],
635
+ [pos[j, 2], pos[j, 2] + signal[j, 2] * scaling],
636
+ **arrow_prop_dict,
637
+ color=c[j] if len(c) > 1 else c,
638
+ )
639
+ ax.add_artist(a)
640
+
641
+ if dim == 2:
642
+ arrow_prop_dict = {"alpha": alpha, "zorder": 3, "scale_units": "inches"}
643
+ ax.quiver(
644
+ pos[:, 0],
645
+ pos[:, 1],
646
+ signal[:, 0],
647
+ signal[:, 1],
648
+ color=c if len(c) > 1 else c,
649
+ scale=scale,
650
+ width=width,
651
+ **arrow_prop_dict,
652
+ )
653
+
654
+
655
+ class Arrow3D(FancyArrowPatch):
656
+ """Arrow 3D."""
657
+
658
+ def __init__(self, xs, ys, zs, *args, **kwargs):
659
+ FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
660
+ self._verts3d = xs, ys, zs
661
+
662
+ def draw(self, renderer):
663
+ """draw."""
664
+ xs3d, ys3d, zs3d = self._verts3d
665
+ xs, ys, _ = proj3d.proj_transform(xs3d, ys3d, zs3d, self.axes.M)
666
+ self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
667
+ FancyArrowPatch.draw(self, renderer)
668
+
669
+ def do_3d_projection(self):
670
+ """do 3d projection."""
671
+ xs3d, ys3d, zs3d = self._verts3d
672
+ xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, self.axes.M)
673
+ self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
674
+
675
+ return np.min(zs)
676
+
677
+
678
+ def create_axis(*args, fig=None):
679
+ """Create axis."""
680
+ dim = args[0]
681
+ if len(args) > 1:
682
+ args = [args[i] for i in range(1, len(args))]
683
+ else:
684
+ args = (1, 1, 1)
685
+
686
+ if fig is None:
687
+ fig = plt.figure()
688
+
689
+ if dim == 2:
690
+ ax = fig.add_subplot(*args)
691
+ elif dim == 3:
692
+ ax = fig.add_subplot(*args, projection="3d")
693
+ else:
694
+ raise Exception(f"Data dimension is {dim}. We can only plot 2D or 3D data.")
695
+
696
+ return fig, ax
697
+
698
+
699
+ def get_limits(ax):
700
+ """Get limits."""
701
+ lims = [ax.get_xlim(), ax.get_ylim()]
702
+ if ax.name == "3d":
703
+ lims.append(ax.get_zlim())
704
+
705
+ return lims
706
+
707
+
708
+ def set_axes(ax, lims=None, padding=0.1, axes_visible=True):
709
+ """Set axes."""
710
+ if lims is not None:
711
+ xlim = lims[0]
712
+ ylim = lims[1]
713
+ pad = padding * (xlim[1] - xlim[0])
714
+
715
+ ax.set_xlim([xlim[0] - pad, xlim[1] + pad])
716
+ ax.set_ylim([ylim[0] - pad, ylim[1] + pad])
717
+ if ax.name == "3d":
718
+ zlim = lims[2]
719
+ ax.set_zlim([zlim[0] - pad, zlim[1] + pad])
720
+
721
+ if not axes_visible:
722
+ ax.set_yticklabels([])
723
+ ax.set_xticklabels([])
724
+ if ax.name == "3d":
725
+ ax.set_zticklabels([])
726
+ ax.axis("off")
727
+
728
+
729
+ def set_colors(color, cmap="coolwarm"):
730
+ """Set colors."""
731
+ if color is None:
732
+ return "k", None
733
+
734
+ if isinstance(color[0], (float, np.floating)):
735
+ cmap = sns.color_palette(cmap, as_cmap=True)
736
+ norm = plt.cm.colors.Normalize(0, np.max(np.abs(color)))
737
+ colors = [cmap(norm(np.array(c).flatten())) for c in color]
738
+
739
+ elif isinstance(color[0], (int, np.integer)):
740
+ cmap = sns.color_palette()
741
+ colors = [f"C{i}" for i in color]
742
+ colors = [matplotlib.colors.to_rgba(c) for c in colors]
743
+ cmap, norm = matplotlib.colors.from_levels_and_colors(np.arange(1, len(color) + 2), colors)
744
+ elif isinstance(color[0], (np.ndarray, list, tuple)):
745
+ return color, None
746
+ else:
747
+ raise Exception("color must be a list of integers or floats")
748
+
749
+ cbar = plt.cm.ScalarMappable(norm=norm, cmap=cmap)
750
+
751
+ return colors, cbar
data/MARBLE/postprocessing.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Postprocessing module."""
2
+
3
+ import numpy as np
4
+
5
+ from MARBLE import geometry as g
6
+
7
+
8
+ def cluster(data, cluster_typ="kmeans", n_clusters=15, seed=0):
9
+ """Cluster data."""
10
+ clusters = g.cluster(data.emb, cluster_typ, n_clusters, seed)
11
+ clusters = g.relabel_by_proximity(clusters)
12
+
13
+ clusters["slices"] = data._slice_dict["x"] # pylint: disable=protected-access
14
+
15
+ if data.number_of_resamples > 1:
16
+ clusters["slices"] = clusters["slices"][:: data.number_of_resamples]
17
+
18
+ data.clusters = clusters
19
+
20
+ return data
21
+
22
+
23
+ def distribution_distances(data, cluster_typ="kmeans", n_clusters=None, seed=0):
24
+ """Return distance between datasets.
25
+
26
+ Returns:
27
+ data: PyG data object containing .out attribute, a nx2 matrix of embedded data
28
+ clusters: sklearn cluster object
29
+ dist (cxc matrix): pairwise distances where c is the number of clusters
30
+
31
+ """
32
+
33
+ emb = data.emb
34
+
35
+ if n_clusters is not None:
36
+ # k-means cluster
37
+ data = cluster(data, cluster_typ, n_clusters, seed)
38
+
39
+ # compute distances between clusters
40
+ data.dist, data.gamma = g.compute_distribution_distances(
41
+ clusters=data.clusters, slices=data.clusters["slices"]
42
+ )
43
+
44
+ else:
45
+ data.emb = emb
46
+ data.dist, _ = g.compute_distribution_distances(
47
+ data=data, slices=data._slice_dict["x"] # pylint: disable=protected-access
48
+ )
49
+
50
+ return data
51
+
52
+
53
+ def embed_in_2D(data, embed_typ="umap", manifold=None, seed=0):
54
+ """Embed into 2D via for visualisation.
55
+
56
+ Args:
57
+ data: PyG input data
58
+ embed_typl (string, optional): Embedding algorithm to use (tsne, umap, PCA)
59
+ manifold (sklearn object, optional): Manifold object returned by some embedding algorithms
60
+ (PCA, umap). Useful when trying to compare datasets.
61
+ seed (int, optional): Random seed. The default is 0.
62
+
63
+ Returns:
64
+ PyG data object containing emb_2D attribute.
65
+ """
66
+ if isinstance(data, list):
67
+ emb = np.vstack([d.emb for d in data])
68
+ else:
69
+ emb = data.emb
70
+
71
+ if hasattr(data, "clusters"):
72
+ clusters = data.clusters
73
+ emb = np.vstack([emb, clusters["centroids"]])
74
+ emb_2D, data.manifold = g.embed(emb, embed_typ=embed_typ, manifold=manifold, seed=seed)
75
+ data.emb_2D, clusters["centroids"] = (
76
+ emb_2D[: -clusters["n_clusters"]],
77
+ emb_2D[-clusters["n_clusters"] :],
78
+ )
79
+
80
+ else:
81
+ data.emb_2D, data.manifold = g.embed(emb, embed_typ=embed_typ, manifold=manifold, seed=seed)
82
+
83
+ return data
data/MARBLE/preprocessing.py ADDED
@@ -0,0 +1,223 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Preprocessing module."""
2
+
3
+ import torch
4
+ from torch_geometric.data import Batch
5
+ from torch_geometric.data import Data
6
+ from torch_geometric.transforms import RandomNodeSplit
7
+
8
+ from MARBLE import geometry as g
9
+ from MARBLE import utils
10
+
11
+
12
+ def construct_dataset(
13
+ anchor,
14
+ vector,
15
+ label=None,
16
+ mask=None,
17
+ graph_type="cknn",
18
+ k=20,
19
+ delta=1.0,
20
+ frac_geodesic_nb=1.5,
21
+ spacing=0.0,
22
+ number_of_resamples=1,
23
+ var_explained=0.9,
24
+ local_gauges=False,
25
+ seed=None,
26
+ metric="euclidean",
27
+ number_of_eigenvectors=None,
28
+ ):
29
+ """Construct PyG dataset from node positions and features.
30
+
31
+ Args:
32
+ pos: matrix with position of points
33
+ features: matrix with feature values for each point
34
+ labels: any additional data labels used for plotting only
35
+ mask: boolean array, that will be forced to be close (default is None)
36
+ graph_type: type of nearest-neighbours graph: cknn (default), knn or radius
37
+ k: number of nearest-neighbours to construct the graph
38
+ delta: argument for cknn graph construction to decide the radius for each points.
39
+ frac_geodesic_nb: number of geodesic neighbours to fit the gauges to
40
+ to map to tangent space k*frac_geodesic_nb
41
+ stop_crit: stopping criterion for furthest point sampling
42
+ number_of_resamples: number of furthest point sampling runs to prevent bias (experimental)
43
+ var_explained: fraction of variance explained by the local gauges
44
+ local_gauges: is True, it will try to compute local gauges if it can (signal dim is > 2,
45
+ embedding dimension is > 2 or dim embedding is not dim of manifold)
46
+ seed: Specify for reproducibility in the furthest point sampling.
47
+ The default is None, which means a random starting vertex.
48
+ metric: metric used to fit proximity graph
49
+ number_of_eigenvectors: int number of eigenvectors to use. Default: None, meaning use all.
50
+ """
51
+
52
+ anchor = [torch.tensor(a).float() for a in utils.to_list(anchor)]
53
+ vector = [torch.tensor(v).float() for v in utils.to_list(vector)]
54
+ num_node_features = vector[0].shape[1]
55
+
56
+ if label is None:
57
+ label = [torch.arange(len(a)) for a in utils.to_list(anchor)]
58
+ else:
59
+ label = [torch.tensor(lab).float() for lab in utils.to_list(label)]
60
+
61
+ if mask is None:
62
+ mask = [torch.zeros(len(a), dtype=torch.bool) for a in utils.to_list(anchor)]
63
+ else:
64
+ mask = [torch.tensor(m) for m in utils.to_list(mask)]
65
+
66
+ if spacing == 0.0:
67
+ number_of_resamples = 1
68
+
69
+ data_list = []
70
+ for i, (a, v, l, m) in enumerate(zip(anchor, vector, label, mask)):
71
+ for _ in range(number_of_resamples):
72
+ if len(a) != 0:
73
+ # even sampling of points
74
+ if seed is None:
75
+ start_idx = torch.randint(low=0, high=len(a), size=(1,))
76
+ else:
77
+ start_idx = 0
78
+
79
+ sample_ind, _ = g.furthest_point_sampling(a, spacing=spacing, start_idx=start_idx)
80
+ sample_ind, _ = torch.sort(sample_ind) # this will make postprocessing easier
81
+ a_, v_, l_, m_ = (
82
+ a[sample_ind],
83
+ v[sample_ind],
84
+ l[sample_ind],
85
+ m[sample_ind],
86
+ )
87
+
88
+ # fit graph to point cloud
89
+ edge_index, edge_weight = g.fit_graph(
90
+ a_, graph_type=graph_type, par=k, delta=delta, metric=metric
91
+ )
92
+
93
+ # define data object
94
+ data_ = Data(
95
+ pos=a_,
96
+ x=v_,
97
+ label=l_,
98
+ mask=m_,
99
+ edge_index=edge_index,
100
+ edge_weight=edge_weight,
101
+ num_nodes=len(a_),
102
+ num_node_features=num_node_features,
103
+ y=torch.ones(len(a_), dtype=int) * i,
104
+ sample_ind=sample_ind,
105
+ )
106
+
107
+ data_list.append(data_)
108
+
109
+ # collate datasets
110
+ batch = Batch.from_data_list(data_list)
111
+ batch.degree = k
112
+ batch.number_of_resamples = number_of_resamples
113
+
114
+ # split into training/validation/test datasets
115
+ split = RandomNodeSplit(split="train_rest", num_val=0.1, num_test=0.1)
116
+ split(batch)
117
+
118
+ return _compute_geometric_objects(
119
+ batch,
120
+ local_gauges=local_gauges,
121
+ n_geodesic_nb=k * frac_geodesic_nb,
122
+ var_explained=var_explained,
123
+ number_of_eigenvectors=number_of_eigenvectors,
124
+ )
125
+
126
+
127
+ def _compute_geometric_objects(
128
+ data,
129
+ n_geodesic_nb=10,
130
+ var_explained=0.9,
131
+ local_gauges=False,
132
+ number_of_eigenvectors=None,
133
+ ):
134
+ """
135
+ Compute geometric objects used later: local gauges, Levi-Civita connections
136
+ gradient kernels, scalar and connection laplacians.
137
+
138
+ Args:
139
+ data: pytorch geometric data object
140
+ n_geodesic_nb: number of geodesic neighbours to fit the tangent spaces to
141
+ var_explained: fraction of variance explained by the local gauges
142
+ local_gauges: whether to use local or global gauges
143
+ number_of_eigenvectors: int number of eigenvectors to use. Default: None, meaning use all.
144
+
145
+ Returns:
146
+ data: pytorch geometric data object with the following new attributes
147
+ kernels (list of d (nxn) matrices): directional kernels
148
+ L (nxn matrix): scalar laplacian
149
+ Lc (ndxnd matrix): connection laplacian
150
+ gauges (nxdxd): local gauges at all points
151
+ par (dict): updated dictionary of parameters
152
+ local_gauges: whether to use local gauges
153
+
154
+ """
155
+ n, dim_emb = data.pos.shape
156
+ dim_signal = data.x.shape[1]
157
+ print(f"\n---- Embedding dimension: {dim_emb}", end="")
158
+ print(f"\n---- Signal dimension: {dim_signal}", end="")
159
+
160
+ # disable vector computations if 1) signal is scalar or 2) embedding dimension
161
+ # is <= 2. In case 2), either M=R^2 (manifold is whole space) or case 1).
162
+ if dim_signal == 1:
163
+ print("\nSignal dimension is 1, so manifold computations are disabled!")
164
+ local_gauges = False
165
+ if dim_emb <= 2:
166
+ print("\nEmbedding dimension <= 2, so manifold computations are disabled!")
167
+ local_gauges = False
168
+ if dim_emb != dim_signal:
169
+ print("\nEmbedding dimension /= signal dimension, so manifold computations are disabled!")
170
+
171
+ if local_gauges:
172
+ try:
173
+ gauges, Sigma = g.compute_gauges(data, n_geodesic_nb=n_geodesic_nb)
174
+ except Exception as exc:
175
+ raise Exception(
176
+ "\nCould not compute gauges (possibly data is too sparse or the \
177
+ number of neighbours is too small)"
178
+ ) from exc
179
+ else:
180
+ gauges = torch.eye(dim_emb).repeat(n, 1, 1)
181
+
182
+ L = g.compute_laplacian(data)
183
+
184
+ if local_gauges:
185
+ data.dim_man = g.manifold_dimension(Sigma, frac_explained=var_explained)
186
+ print(f"---- Manifold dimension: {data.dim_man}")
187
+
188
+ gauges = gauges[:, :, : data.dim_man]
189
+ R = g.compute_connections(data, gauges)
190
+
191
+ print("\n---- Computing kernels ... ", end="")
192
+ kernels = g.gradient_op(data.pos, data.edge_index, gauges)
193
+ kernels = [utils.tile_tensor(K, data.dim_man) for K in kernels]
194
+ kernels = [K * R for K in kernels]
195
+
196
+ Lc = g.compute_connection_laplacian(data, R)
197
+
198
+ else:
199
+ print("\n---- Computing kernels ... ", end="")
200
+ kernels = g.gradient_op(data.pos, data.edge_index, gauges)
201
+ Lc = None
202
+
203
+ if number_of_eigenvectors is None:
204
+ print(
205
+ """\n---- Computing full spectrum ...
206
+ (if this takes too long, then run construct_dataset()
207
+ with number_of_eigenvectors specified) """,
208
+ end="",
209
+ )
210
+ else:
211
+ print(
212
+ f"\n---- Computing spectrum with {number_of_eigenvectors} eigenvectors...",
213
+ end="",
214
+ )
215
+ L = g.compute_eigendecomposition(L, k=number_of_eigenvectors)
216
+ Lc = g.compute_eigendecomposition(Lc, k=number_of_eigenvectors)
217
+
218
+ data.kernels = [
219
+ utils.to_SparseTensor(K.coalesce().indices(), value=K.coalesce().values()) for K in kernels
220
+ ]
221
+ data.L, data.Lc, data.gauges, data.local_gauges = L, Lc, gauges, local_gauges
222
+
223
+ return data
data/MARBLE/smoothing.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Smoothing module."""
2
+
3
+ import torch
4
+
5
+
6
+ def scalar_diffusion(x, t, method="matrix_exp", par=None):
7
+ """Scalar diffusion."""
8
+ if len(x.shape) == 1:
9
+ x = x.unsqueeze(1)
10
+
11
+ if method == "matrix_exp":
12
+ if par.is_sparse:
13
+ par = par.to_dense()
14
+ return torch.matrix_exp(-t * par.to_dense()).mm(x)
15
+
16
+ if method == "spectral":
17
+ assert (
18
+ isinstance(par, (list, tuple)) and len(par) == 2
19
+ ), "For spectral method, par must be a tuple of \
20
+ eigenvalues, eigenvectors!"
21
+ evals, evecs = par
22
+
23
+ # Transform to spectral
24
+ x_spec = torch.mm(evecs.T, x)
25
+
26
+ # Diffuse
27
+ diffusion_coefs = torch.exp(-evals.unsqueeze(-1) * t.unsqueeze(0))
28
+ x_diffuse_spec = diffusion_coefs * x_spec
29
+
30
+ # Transform back to per-vertex
31
+ return evecs.mm(x_diffuse_spec)
32
+
33
+ raise NotImplementedError
34
+
35
+
36
+ def vector_diffusion(x, t, Lc, L=None, method="spectral", normalise=True):
37
+ """Vector diffusion."""
38
+ n, d = x.shape[0], x.shape[1]
39
+
40
+ if method == "spectral":
41
+ assert len(Lc) == 2, "Lc must be a tuple of eigenvalues, eigenvectors!"
42
+ nd = Lc[1].shape[0]
43
+ else:
44
+ nd = Lc.shape[0]
45
+
46
+ assert (
47
+ n * d % nd
48
+ ) == 0, "Data dimension must be an integer multiple of the dimensions \
49
+ of the connection Laplacian!"
50
+
51
+ # vector diffusion with connection Laplacian
52
+ out = x.view(nd, -1)
53
+ out = scalar_diffusion(out, t, method, Lc)
54
+ out = out.view(x.shape)
55
+
56
+ if normalise:
57
+ assert L is not None, "Need Laplacian for normalised diffusion!"
58
+ x_abs = x.norm(dim=-1, p=2, keepdim=True)
59
+ out_abs = scalar_diffusion(x_abs, t, method, L)
60
+ ind = scalar_diffusion(torch.ones(x.shape[0], 1).to(x.device), t, method, L)
61
+ out = out * out_abs / (ind * out.norm(dim=-1, p=2, keepdim=True))
62
+
63
+ return out
data/MARBLE/utils.py ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Utils module."""
2
+
3
+ import multiprocessing
4
+ from functools import partial
5
+ from typing import NamedTuple
6
+ from typing import Optional
7
+ from typing import Tuple
8
+
9
+ import numpy as np
10
+ import pandas as pd
11
+ import torch
12
+ from torch import Tensor
13
+ from torch_sparse import SparseTensor
14
+ from tqdm import tqdm
15
+
16
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
17
+ torch.manual_seed(0)
18
+
19
+
20
+ def print_settings(model):
21
+ """Print parameters to screen"""
22
+
23
+ print("\n---- Settings: \n")
24
+
25
+ for x in model.params:
26
+ print(x, ":", model.params[x])
27
+
28
+ n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
29
+ n_features = model.enc.in_channels
30
+
31
+ print("\n---- Number of features to pass to the MLP: ", n_features)
32
+ print("---- Total number of parameters: ", n_parameters)
33
+ print(f"\nUsing device {device}")
34
+
35
+
36
+ def parallel_proc(fun, iterable, inputs, processes=-1, desc=""):
37
+ """Distribute an iterable function between processes"""
38
+
39
+ if processes == -1:
40
+ processes = multiprocessing.cpu_count()
41
+
42
+ if processes > 1 and len(iterable) > 1:
43
+ with multiprocessing.Pool(processes=processes) as pool:
44
+ fun = partial(fun, inputs)
45
+ result = list(tqdm(pool.imap(fun, iterable), total=len(iterable), desc=desc))
46
+ else:
47
+ result = [fun(inputs, i) for i in tqdm(iterable, desc=desc)]
48
+
49
+ return result
50
+
51
+
52
+ def move_to_gpu(model, data, adjs=None):
53
+ """Move stuff to gpu"""
54
+
55
+ assert hasattr(data, "kernels"), "It seems that data is not preprocessed. Run preprocess(data)!"
56
+
57
+ model = model.to(device)
58
+ data.x = data.x.to(device)
59
+ data.pos = data.pos.to(device)
60
+ data.mask = data.mask.to(device)
61
+
62
+ if hasattr(data, "L"):
63
+ if len(data.L) == 2:
64
+ data.L = [_l.to(device) for _l in data.L]
65
+ else:
66
+ data.L = data.L.to(device)
67
+ else:
68
+ data.L = None
69
+
70
+ if hasattr(data, "Lc"):
71
+ if len(data.Lc) == 2:
72
+ data.Lc = [_l.to(device) for _l in data.Lc]
73
+ else:
74
+ data.Lc = data.Lc.to(device)
75
+ else:
76
+ data.Lc = None
77
+
78
+ data.kernels = [K.to(device) for K in data.kernels]
79
+ data.gauges = data.gauges.to(device)
80
+
81
+ if adjs is None:
82
+ return model, data, None
83
+
84
+ adjs = [adj.to(device) for adj in adjs]
85
+ return model, data, adjs
86
+
87
+
88
+ def detach_from_gpu(model, data, adjs=None):
89
+ """detach stuff from gpu"""
90
+
91
+ assert hasattr(data, "kernels"), "It seems that data is not preprocessed. Run preprocess(data)!"
92
+
93
+ model = model.to(device)
94
+ data.x = data.x.detach().cpu()
95
+ data.pos = data.pos.detach().cpu()
96
+ data.mask = data.mask.detach().cpu()
97
+
98
+ if hasattr(data, "L"):
99
+ data.L = [_l.detach().cpu() for _l in data.L]
100
+ else:
101
+ data.L = None
102
+
103
+ if hasattr(data, "Lc"):
104
+ data.Lc = [_l.detach().cpu() for _l in data.Lc]
105
+ else:
106
+ data.Lc = None
107
+
108
+ data.kernels = [K.detach().cpu() for K in data.kernels]
109
+ data.gauges = data.gauges.detach().cpu()
110
+
111
+ if adjs is None:
112
+ return model, data, None
113
+
114
+ for i, adj in enumerate(adjs):
115
+ adjs[i] = [adj[0].detach().cpu(), adj[1].detach().cpu(), adj[2]]
116
+ return model, data, adjs
117
+
118
+
119
+ def to_SparseTensor(edge_index, size=None, value=None):
120
+ """
121
+ Adjacency matrix as torch_sparse tensor
122
+
123
+ Args:
124
+ edge_index (2xE matrix): edge indices
125
+ size: pair (rows,cols) giving the size of the matrix.
126
+ The default is the largest node of the edge_index.
127
+ value: list of weights. The default is unit values.
128
+
129
+ Returns:
130
+ adjacency matrix in SparseTensor format
131
+ """
132
+ if value is None:
133
+ value = torch.ones(edge_index.shape[1])
134
+ if size is None:
135
+ size = (int(edge_index[0].max()) + 1, int(edge_index[1].max()) + 1)
136
+
137
+ adj = SparseTensor(
138
+ row=edge_index[0], col=edge_index[1], value=value, sparse_sizes=(size[0], size[1])
139
+ )
140
+
141
+ return adj
142
+
143
+
144
+ def np2torch(x, dtype=None):
145
+ """Convert numpy to torch"""
146
+ if dtype is None:
147
+ return torch.from_numpy(x).float()
148
+ if dtype == "double":
149
+ return torch.tensor(x, dtype=torch.int64)
150
+ raise NotImplementedError
151
+
152
+
153
+ def to_list(x):
154
+ """Convert to list"""
155
+ if not isinstance(x, list):
156
+ x = [x]
157
+
158
+ return x
159
+
160
+
161
+ def to_pandas(x, augment_time=True):
162
+ """Convert numpy to pandas"""
163
+ columns = [str(i) for i in range(x.shape[1])]
164
+
165
+ if augment_time:
166
+ xaug = np.hstack([np.arange(len(x))[:, None], x])
167
+ df = pd.DataFrame(xaug, columns=["Time"] + columns, index=np.arange(len(x)))
168
+ else:
169
+ df = pd.DataFrame(xaug, columns=columns, index=np.arange(len(x)))
170
+
171
+ return df
172
+
173
+
174
+ class EdgeIndex(NamedTuple):
175
+ """Edge Index."""
176
+
177
+ edge_index: Tensor
178
+ e_id: Optional[Tensor]
179
+ size: Tuple[int, int]
180
+
181
+ def to(self, *args, **kwargs):
182
+ """to"""
183
+ edge_index = self.edge_index.to(*args, **kwargs)
184
+ e_id = self.e_id.to(*args, **kwargs) if self.e_id is not None else None
185
+ return EdgeIndex(edge_index, e_id, self.size)
186
+
187
+
188
+ def expand_index(ind, dim):
189
+ """Interleave dim incremented copies of ind"""
190
+
191
+ n = len(ind)
192
+ ind = [ind * dim + i for i in range(dim)]
193
+ ind = torch.hstack(ind).view(dim, n).t().flatten()
194
+
195
+ return ind
196
+
197
+
198
+ def to_block_diag(sp_tensors):
199
+ """To block diagonal."""
200
+ sizes = [torch.tensor(t.size()).unsqueeze(1) for t in sp_tensors]
201
+ ind = [t.indices() for t in sp_tensors]
202
+ val = [t.values() for t in sp_tensors]
203
+
204
+ for i in range(1, len(sp_tensors)):
205
+ for j in range(i):
206
+ ind[i] += sizes[j]
207
+
208
+ ind = torch.hstack(ind)
209
+ val = torch.hstack(val)
210
+
211
+ return torch.sparse_coo_tensor(ind, val)
212
+
213
+
214
+ def expand_edge_index(edge_index, dim=1):
215
+ """When using rotations, we replace nodes by vector spaces so
216
+ need to expand adjacency matrix from nxn -> n*dimxn*dim matrices"""
217
+
218
+ if dim == 1:
219
+ return edge_index
220
+
221
+ dev = edge_index.device
222
+ if dev != "cpu":
223
+ edge_index = edge_index.to("cpu")
224
+
225
+ n = edge_index.shape[1]
226
+ ind = [torch.tensor([i, j]) for i in range(dim) for j in range(dim)]
227
+ edge_index = [edge_index * dim + i.unsqueeze(1) for i in ind]
228
+ edge_index = torch.stack(edge_index, dim=2).view(2, n * len(ind))
229
+
230
+ if dev != "cpu":
231
+ edge_index.to(dev)
232
+
233
+ return edge_index
234
+
235
+
236
+ def tile_tensor(tensor, dim):
237
+ """Enlarge nxn tensor to d*dim x n*dim block matrix. Effectively
238
+ computing a sparse version of torch.kron(K, torch.ones((dim,dim)))"""
239
+
240
+ tensor = tensor.coalesce()
241
+ edge_index = tensor.indices()
242
+ edge_index = expand_edge_index(edge_index, dim=dim)
243
+ return torch.sparse_coo_tensor(edge_index, tensor.values().repeat_interleave(dim * dim))
244
+
245
+
246
+ def restrict_dimension(sp_tensor, d, m):
247
+ """Limit the dimension of the tensor"""
248
+ n = sp_tensor.size(0)
249
+ idx = torch.ones(n)
250
+ for _ in range(m, d):
251
+ idx[m::d] = 0
252
+ idx = torch.where(idx)[0]
253
+ sp_tensor = torch.index_select(sp_tensor, 0, idx).coalesce()
254
+ return torch.index_select(sp_tensor, 1, idx).coalesce()
255
+
256
+
257
+ def restrict_to_batch(sp_tensor, idx):
258
+ """Restrict tensor to current batch"""
259
+
260
+ idx = [i.to(sp_tensor.device) for i in idx]
261
+
262
+ if len(idx) == 1:
263
+ return torch.index_select(sp_tensor, 0, idx[0]).coalesce()
264
+ if len(idx) == 2:
265
+ sp_tensor = torch.index_select(sp_tensor, 0, idx[0])
266
+ return torch.index_select(sp_tensor, 1, idx[1]).coalesce()
267
+
268
+ raise NotImplementedError
269
+
270
+
271
+ def standardize(X):
272
+ """Standarsise data row-wise"""
273
+
274
+ mean = X.mean(axis=0, keepdims=True)
275
+ std = X.std(axis=0, keepdims=True)
276
+
277
+ return (X - mean) / std
data/doc/Makefile ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Minimal makefile for Sphinx documentation
2
+ #
3
+
4
+ # You can set these variables from the command line, and also
5
+ # from the environment for the first two.
6
+ SPHINXOPTS = #-W # treats warning as srrors
7
+ SPHINXBUILD = sphinx-build
8
+ SPHINXPROJ = MARBLE
9
+ SOURCEDIR = source
10
+ BUILDDIR = ../../MARBLE-docs
11
+ PDFBUILDER = /tmp
12
+ PDF = ../manual.pdf
13
+
14
+ # Put it first so that "make" without argument is like "make help".
15
+ help:
16
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
17
+
18
+ .PHONY: help Makefile
19
+
20
+ full: html
21
+ cd $(BUILDDIR)/html; git add . ; git commit -m "rebuilt docs"; git push origin gh-pages
22
+
23
+ # Catch-all target: route all unknown targets to Sphinx using the new
24
+ # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
25
+ %: Makefile
26
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
data/doc/assets/illustration_for_github.png ADDED

Git LFS Details

  • SHA256: 79e1a5f2d57e0bf6f35de439f9ade433672c1aece4a2173e8562c0ce591162fd
  • Pointer size: 130 Bytes
  • Size of remote file: 54 kB
data/doc/index_readme.md ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MARBLE - Manifold Representation Basis Learning
2
+
3
+ MARBLE is a fully unsupervised geometric deep learning method that can
4
+
5
+ 1. intrincally represent vector fields over manifolds, such as those arising in neural recordings or dissipative dynamical systems, but it is not limited to dynamical systems
6
+ 2. perform unbiased comparisons across dynamical systems or parameter conditions by jointly embedded representations
7
+ 3. can operate in geometry-aware or geometry-agnostic modes to test the contribution of manifold dynamics and geometry.
8
+
9
+ The code is built around [PyG (PyTorch Geometric)](https://pytorch-geometric.readthedocs.io/en/latest/notes/installation.html).
10
+
11
+ ## Cite
12
+
13
+ If you find this package useful or inspirational, please cite our work as follows
14
+
15
+ ```
16
+ @misc{gosztolai2023interpretable,
17
+ title={Interpretable statistical representations of neural population dynamics and geometry},
18
+ author={Adam Gosztolai and Robert L. Peach and Alexis Arnaudon and Mauricio Barahona and Pierre Vandergheynst},
19
+ year={2023},
20
+ eprint={2304.03376},
21
+ archivePrefix={arXiv},
22
+ primaryClass={cs.LG}
23
+ }
24
+ ```
25
+
26
+
27
+ ## Installation
28
+
29
+ The code is tested for both cpu and gpu (CUDA) machines running Linux or OSX. Although smaller examples run fast on cpu, for larger datasets, it is highly recommended that you use a gpu machine.
30
+
31
+ We recommend you install the code in a fresh Anaconda virtual environment, as follows.
32
+
33
+ First, clone this repository,
34
+
35
+ ```
36
+ git clone https://github.com/agosztolai/GeoDySys
37
+ ```
38
+
39
+ Then, create an new anaconda environment using the provided environment file that matches your system.
40
+
41
+ For Linux machines with CUDA:
42
+
43
+ ```
44
+ conda env create -f environment.yml
45
+ ```
46
+
47
+ For Mac without CUDA:
48
+
49
+ ```
50
+ conda env create -f environment_cpu_osx.yml
51
+ ```
52
+
53
+ This will install all the requires dependencies. Finally, install by running inside the main folder
54
+
55
+ ```
56
+ pip install .
57
+ ```
58
+
59
+ ## Quick start
60
+
61
+ We suggest you study at least the example of a [simple vector fields over flat surfaces](https://github.com/agosztolai/MARBLE/blob/main/examples/ex_vector_field_flat_surface.py) to understand what behaviour to expect.
62
+
63
+ Briefly, MARBLE takes two inputs
64
+
65
+ 1. `pos` - a list of `nxd` arrays, each defining a point cloud describing the geometry of a manifold
66
+ 2. `x` - a list of `nxD` arrays, defining a signal over the respective manifolds in 1. For dynamical systems, D=d, but our code can also handle signals of other dimensions. Read more about [inputs](#inputs) and [different conditions](#conditions).
67
+
68
+ Using these inputs, you can construct a dataset for MARBLE.
69
+
70
+ ```
71
+ import MARBLE
72
+ data = MARBLE.construct_dataset(pos, features=x)
73
+ ```
74
+
75
+ The main attributes are `data.pos` - manifold positions concatenated, `data.x` - manifold signals concatenated and `data.y` - identifiers that tell you which manifold the poitn belongs to. Read more about [other usedul data attributed](#construct).
76
+
77
+ Now you can initialise and train a MARBLE model. Read more about [training parameters](#training).
78
+
79
+ ```
80
+ from MARBLE import net
81
+ model = MARBLE.net(data)
82
+ model.run_training(data)
83
+ ```
84
+
85
+ By default, MARBLE operated in geometry-aware mode. You can enable the geometry-agnostic mode by changing the initalisation step to
86
+
87
+ ```
88
+ model = MARBLE.net(data, params = {'inner_product_features': True})
89
+ ```
90
+
91
+ Read more about the geometry-aware and geometry-agnostic modes [here](#innerproduct)
92
+
93
+ After you have trained your model, you can evaluate evaluate your model on your dataset, or another dataset to obtain an embedding all manifold points in joint latent space (3-dimensional by default) based on their local vector field features.
94
+
95
+ ```
96
+ data = model.evaluate(data) #adds an attribute `data.emb`
97
+ ```
98
+
99
+ To recover the embeddings of individual vector fields, use `data.emb[data.y==0]`.
100
+
101
+ You can then compare datasets by running
102
+
103
+ ```
104
+ from MARBLE import postprocessing
105
+ data = postprocessing.distribution_distances(data) #adds an attribute `data.dist` containing a matrix of pairwise distance between vector field representations
106
+ ```
107
+
108
+ Finally, you can perform some visualisation
109
+
110
+ ```
111
+ from MARBLE import plotting
112
+ data = postprocessing.embed_in_2D(data) #adds an attribute `data.emb_2D` containing a 2D embedding of the MARBLE output using UMAP by default
113
+ plotting.fields(data) #visualise the original vector fields over manifolds
114
+ plotting.embedding(data, data.y.numpy()) #visualise embedding
115
+ ```
116
+
117
+ There are loads of parameters to adjust these plots, so have a look at the respective functions.
118
+
119
+ ## Examples
120
+
121
+ The folder [/examples](https://github.com/agosztolai/MARBLE/tree/main/examples) contains scripts for some basic examples and other scripts to reproduce the results in our paper.
122
+
123
+ ## Further details
124
+
125
+ .. _inputs:
126
+
127
+ ### More on inputs
128
+
129
+ If you measure time series observables, such as neural firing rates, you can start with a list of variable length time series under a given condition, e.g., `ts_1`, `ts_2`. We assume these are measurements from the same dynamical system, i.e., the sample points making up these trajectories are drawn from the same manifold, defining its shape `pos = np.vstack([ts_1, ts_2])`.
130
+
131
+ If you do not directly have access to the velocities, you can approximate them as `x = np.vstack([np.diff(ts_1, axis=0), np.diff(ts_2, axis=0)])` and take `pos = np.vstack([ts_1[:-1,:], ts_2[:-1,:]])` to ensure `pos` and `x` have the same length.
132
+
133
+ If you just want to play around with dynamical systems, why not try our (experimental) [sister package] DE_library(https://github.com/agosztolai/DE_library).
134
+
135
+ .. _conditions:
136
+
137
+ ### More on different conditions
138
+
139
+ Comparing dynamics in a data-driven way is equivalent to comparing the corresponding vector fields based on their respective sample sets. The dynamics to be compared might correspond to different experimental conditions (stimulation conditions, genetic perturbations etc.), dynamical systems (different task, different brain region).
140
+
141
+ Suppose we have the data pairs `pos1, pos2` and `x1, x2`. Then we may concatenate them as a list to ensure that our pipeline handles them independently (on different manifolds), but embeds them jointly in the same space.
142
+
143
+ ```
144
+ pos_list, x_list = [pos1, pos2], [x1, x2]
145
+ ```
146
+
147
+ Note, it is sometimes useful to consider that two vector fields lie on independent manifolds (providing them as a list) even when we want to *discover* the contrary. However, when we know that two vector fields lie on the same manifold, then it can be advantageous to stack their corresponding samples (stacking them) as this will enforce geometric relationships between them through the proximity graph.
148
+
149
+ .. _construct:
150
+
151
+ ### More on constructing data object
152
+
153
+ Our pipleline is build around a Pytorch Geometric data object, which we can obtain by running the following constructor.
154
+
155
+ ```
156
+ import MARBLE
157
+ data = MARBLE.construct_dataset(pos, features=x, stop_crit=0.03, graph_type='cknn', k=15, local_gauge=False)
158
+ ```
159
+
160
+ This command will do several things.
161
+
162
+ 1. Subsample the point cloud using farthest point sampling to achieve even sampling density. Using `stop_crit=0.03` means the average distance between the subsampled points will equal to 3% of the manifold diameter.
163
+ 2. Fit a nearest neighbour graph to each point cloud, here using the `graph_type=cknn` method using `k=15` nearest neighbours. We implemented other graph algorithms, but cknn typically works. Note, `k` should be large enough to approximate the tangent space, but small enough not to connect (geodesically) distant points of the manifold. The more data you have the higher `k` you can use.
164
+ 3. Perform operations in local (manifold) gauges or global coordinates. Note that `local_gauge=False` should be used whenever the manifold has negligible curvature on the scale of the local feature. Setting `local_gauge=True` means that the code performs tangent space alignments before computing gradients, however, this will increase the cost of the computations $m^2$-fold, where $m$ is the manifold dimension, because points will be treated as vector spaces. See the example of a [simple vector fields over curved surfaces](https://github.com/agosztolai/MARBLE/blob/main/examples/ex_vector_field_curved_surface.py) for illustration.
165
+
166
+
167
+ The final data object contains the following attributes (among others):
168
+
169
+ ```
170
+ data.pos: positions `pos` concatenated across manifolds
171
+ data.x: vectors `x` concatenated across manifolds
172
+ data.y: labels for each points denoting which manifold it belongs to
173
+ data.edge_index: edge list of proximity graph (each manifold gets its own graph, disconnected from others)
174
+ data.gauges: local coordinate bases when `local_gauge=True`
175
+ ```
176
+
177
+ ### How to pick good parameters
178
+
179
+ Choosing good parameters for the description of manifold, in particular `stop_crit` and `k`, can be essential for the success of your analysis. The illustration below shows three different scenarios to give you intuition.
180
+
181
+ 1. (left) **'Optimal' scenario.** Here, the sample spacing along trajectories and between trajectories is comparable and `k` is choosen such that the proximity graph connects to neighbours but no further. At the same time `k` is large enough to have enough neighbours for gradient approximation. Notice the trade-off here.
182
+ 2. (middle) **Suboptimal scenario 1.** Here, the sample spacing is much smaller along the trajectory than between trajectories. This is probably frequently encountered when there are few trials relative to the dimension of the manifold and size of basin of attraction. Fitting a proximity graph to this dataset will lead to either a poorly connected manifold or having too many neighbours pointing to consecutive points on the trajectory, leading to poor gradient approximation. Also, too dense discretisation will mean that second-order features will not pick up on second-order features (curvature)of the trajectories. **Fix:** either increase `stop_crit` and/or subsample your trajectories before using `construct_dataset()`.
183
+ 3. (right) **Suboptimal scenario 2.** Here, there are too few sample points relative to the curvature of the trajectories. As a result, the gradient approximation will be inaccurate. **Fix:** decrease `stop_crit` or collect more data.
184
+
185
+ ![illustration](../assets/illustration_for_github.png)
186
+
187
+
188
+ .. _training:
189
+
190
+ ### Training
191
+
192
+ You are ready to train! This is straightforward.
193
+
194
+ You first specify the hyperparameters. The key ones are the following, which will work for many settings, but see [here](https://github.com/agosztolai/MARBLE/blob/main/MARBLE/default_params.yaml) for a complete list.
195
+
196
+ ```
197
+ params = {'epochs': 50, #optimisation epochs
198
+ 'hidden_channels': 32, #number of internal dimensions in MLP
199
+ 'out_channels': 5,
200
+ 'inner_product_features': True,
201
+ }
202
+
203
+ ```
204
+
205
+ Then proceed by constructing a network object
206
+
207
+ ```
208
+ model = MARBLE.net(data, params=params)
209
+ ```
210
+
211
+ Finally, launch training. The code will continuously save checkpoints during training with timestamps.
212
+
213
+ ```
214
+ model.run_training(data, outdir='./outputs')
215
+ ```
216
+
217
+ If you have previously trained a network, or have interrupted training, you can load the network directly as
218
+
219
+ ```
220
+ model = MARBLE.net(data, loadpath=loadpath)
221
+ ```
222
+
223
+ where loadpath can be either a path to the model (with a specific timestamp, or a directory to automatically load the latest model. By running `MARBLE.run_training()`, training will resume from the last ckechpoint.
224
+
225
+ .. _innerproduct:
226
+
227
+ ### Geometry-aware and geometry-agnostic modes
228
+
229
+ One of the main features of our method is the ability to run in two different modes
230
+
231
+ 1. Geometry-aware mode - learn manifold geometry and dynamics
232
+ 2. Geometry-agnostic mode - learn dynamics only
233
+
234
+ To enable geometry-agnostic mode, set `inner_product_features=True` in training `params`. This engages an additional layer in the network after the computation of gradients, which makes them rotation invariant.
235
+
236
+ As a slight cost of expressivity, this feature enables the orientation- and geometry-independent representation of dynamics over the manifolds. Amongst others, this allows one to recognise similar dynamics across different manifolds. See [RNN example](https://github.com/agosztolai/MARBLE/blob/doc/examples/RNN/RNN.ipynb) for an illustration.
237
+
238
+
239
+ ## Troubleshooting guide
240
+
241
+ Training is successful when features are recognised to be similar across distinct vector fields, with their own manifolds and independent proximity graphs. To achieve this, follow these useful pieces of advice (mostly general ML practises):
242
+
243
+ 1. Check that traning has converged, i.e., the validation loss is no longer decreasing.
244
+ 2. Check that convergence is smooth, i.e., there are no big jumps in the validation loss.
245
+ 3. Check that that there is no big gap between training loss and validation loss (generalisation gap).
246
+
247
+ Seeing problems with the above would be possible signs your solution will be suboptimal and will likely not generalise well. If you see either of these, try the following
248
+ * increase training time (increase `epochs`)
249
+ * increase your data (e.g., decrease `stop_crit` and construct dataset again)
250
+ * decrease number of parameters (decrease `hidden_channels`, or decrease order, try `order=1`)
251
+ * improve the gradient approximation (increase `k`, but see above)
252
+ * disable local gauges (`local_gauge=False`)
253
+
254
+ If you still do not get good convergence, your data may be very noisy.
255
+ * Try enabling diffusion (`diffusion=True` in training `params`)
256
+
257
+ If this still does not work, check if there are very small or very large vector magnitudes in your dataset, filter them out and try again.
258
+
259
+
260
+ ## Stay in touch
261
+
262
+ If all hopes are lost, or if you want to chat about your use case, get in touch or raise an issue! We are happy to help and looking to further develop this package to make it as useful as possible.
263
+
264
+
265
+ ## References
266
+
267
+ The following packages were inspirational during the delopment of this code:
268
+
269
+ * [DiffusionNet](https://github.com/nmwsharp/diffusion-net)
270
+ * [Directional Graph Networks](https://github.com/Saro00/DGN)
271
+ * [pyEDM](https://github.com/SugiharaLab/pyEDM)
272
+ * [Parallel Transport Unfolding](https://github.com/mbudnins/parallel_transport_unfolding)
data/doc/source/conf.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Configuration file for the Sphinx documentation builder.
2
+ #
3
+ # For the full list of built-in configuration values, see the documentation:
4
+ # https://www.sphinx-doc.org/en/master/usage/configuration.html
5
+
6
+ # -- Project information -----------------------------------------------------
7
+ # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
8
+
9
+ project = "MARBLE"
10
+ copyright = "2023, Adam Gosztolai"
11
+ author = "Adam Gosztolai"
12
+
13
+ autodoc_member_order = "bysource"
14
+ # -- General configuration ---------------------------------------------------
15
+ # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
16
+
17
+ extensions = [
18
+ "sphinx.ext.autodoc",
19
+ "sphinx.ext.doctest",
20
+ "sphinx.ext.autosummary",
21
+ "sphinx.ext.todo",
22
+ "sphinx.ext.coverage",
23
+ "sphinx.ext.mathjax",
24
+ "sphinx.ext.ifconfig",
25
+ "sphinx.ext.viewcode",
26
+ "sphinx.ext.githubpages",
27
+ "sphinx.ext.napoleon",
28
+ "alabaster",
29
+ "sphinx_mdinclude",
30
+ "nbsphinx",
31
+ ]
32
+
33
+ templates_path = ["_templates"]
34
+ exclude_patterns = ["Thumbs.db", ".DS_Store", "_build", "**.ipynb_checkpoints"]
35
+
36
+
37
+ # -- Options for HTML output -------------------------------------------------
38
+ # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
39
+
40
+ html_theme = "furo"
41
+ autoclass_content = "both"
42
+ #html_static_path = ["_static"]
data/doc/source/dataloader.rst ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Dataloader module
2
+ =================
3
+ .. automodule:: MARBLE.dataloader
4
+ :members:
data/doc/source/dynamics.rst ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Dynamics module
2
+ ===============
3
+ .. automodule:: MARBLE.dynamics
4
+ :members:
data/doc/source/geometry.rst ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Geometry module
2
+ ===============
3
+ .. automodule:: MARBLE.geometry
4
+ :members:
data/doc/source/index.rst ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. mdinclude:: ../index_readme.md
2
+ :start-line: 0
3
+
4
+ .. toctree::
5
+ :maxdepth: 1
6
+
7
+ main
8
+ postprocessing
9
+ preprocessing
10
+ layers
11
+ geometry
12
+ dataloader
13
+ plotting
14
+ utils
15
+ dynamics
data/doc/source/layers.rst ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Layers module
2
+ =============
3
+ .. automodule:: MARBLE.layers
4
+ :members:
data/doc/source/main.rst ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ MARBLE module
2
+ =============
3
+ .. automodule:: MARBLE.main
4
+ :members:
data/doc/source/plotting.rst ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Plotting module
2
+ ===============
3
+ .. automodule:: MARBLE.plotting
4
+ :members:
data/doc/source/postprocessing.rst ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Postprocessing module
2
+ =====================
3
+ .. automodule:: MARBLE.postprocessing
4
+ :members:
data/doc/source/preprocessing.rst ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Preprocessing module
2
+ ====================
3
+ .. automodule:: MARBLE.preprocessing
4
+ :members:
data/doc/source/utils.rst ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Utils module
2
+ ============
3
+ .. automodule:: MARBLE.utils
4
+ :members:
data/environment.yml ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: MARBLE
2
+ channels:
3
+ - pyg
4
+ - conda-forge
5
+ - pytorch
6
+ - defaults
7
+ dependencies:
8
+ - _libgcc_mutex=0.1=main
9
+ - _openmp_mutex=5.1=1_gnu
10
+ - blas=1.0=mkl
11
+ - bottleneck=1.3.5=py39h7deecbd_0
12
+ - brotli=1.0.9=h5eee18b_7
13
+ - brotli-bin=1.0.9=h5eee18b_7
14
+ - brotlipy=0.7.0=py39h27cfd23_1003
15
+ - bzip2=1.0.8=h7b6447c_0
16
+ - ca-certificates=2022.07.19=h06a4308_0
17
+ - certifi=2022.9.24=py39h06a4308_0
18
+ - cffi=1.15.1=py39h74dc2b5_0
19
+ - charset-normalizer=2.0.4=pyhd3eb1b0_0
20
+ - cryptography=37.0.1=py39h9ce1e76_0
21
+ - cudatoolkit=11.3.1=h2bc3f7f_2
22
+ - cycler=0.11.0=pyhd3eb1b0_0
23
+ - cython=0.29.32=py39h6a678d5_0
24
+ - dbus=1.13.18=hb2f20db_0
25
+ - expat=2.4.9=h6a678d5_0
26
+ - ffmpeg=4.3=hf484d3e_0
27
+ - fftw=3.3.9=h27cfd23_1
28
+ - fontconfig=2.13.1=h6c09931_0
29
+ - fonttools=4.25.0=pyhd3eb1b0_0
30
+ - freetype=2.11.0=h70c0345_0
31
+ - future=0.18.2=py39h06a4308_1
32
+ - giflib=5.2.1=h7b6447c_0
33
+ - glib=2.69.1=h4ff587b_1
34
+ - gmp=6.2.1=h295c915_3
35
+ - gnutls=3.6.15=he1e5248_0
36
+ - gst-plugins-base=1.14.0=h8213a91_2
37
+ - gstreamer=1.14.0=h28cd5cc_2
38
+ - icu=58.2=he6710b0_3
39
+ - idna=3.4=py39h06a4308_0
40
+ - intel-openmp=2021.4.0=h06a4308_3561
41
+ - jinja2=3.0.3=pyhd3eb1b0_0
42
+ - joblib=1.1.0=pyhd3eb1b0_0
43
+ - jpeg=9e=h7f8727e_0
44
+ - kiwisolver=1.4.2=py39h295c915_0
45
+ - krb5=1.19.2=hac12032_0
46
+ - lame=3.100=h7b6447c_0
47
+ - lapack=3.9.0=netlib
48
+ - lcms2=2.12=h3be6417_0
49
+ - ld_impl_linux-64=2.38=h1181459_1
50
+ - lerc=3.0=h295c915_0
51
+ - libblas=3.9.0=12_linux64_mkl
52
+ - libbrotlicommon=1.0.9=h5eee18b_7
53
+ - libbrotlidec=1.0.9=h5eee18b_7
54
+ - libbrotlienc=1.0.9=h5eee18b_7
55
+ - libclang=10.0.1=default_hb85057a_2
56
+ - libdeflate=1.8=h7f8727e_5
57
+ - libedit=3.1.20210910=h7f8727e_0
58
+ - libevent=2.1.12=h8f2d780_0
59
+ - libffi=3.3=he6710b0_2
60
+ - libgcc-ng=11.2.0=h1234567_1
61
+ - libgfortran-ng=11.2.0=h00389a5_1
62
+ - libgfortran5=11.2.0=h1234567_1
63
+ - libgomp=11.2.0=h1234567_1
64
+ - libiconv=1.16=h7f8727e_2
65
+ - libidn2=2.3.2=h7f8727e_0
66
+ - liblapack=3.9.0=12_linux64_mkl
67
+ - libllvm10=10.0.1=hbcb73fb_5
68
+ - libpng=1.6.37=hbc83047_0
69
+ - libpq=12.9=h16c4e8d_3
70
+ - libprotobuf=3.20.1=h4ff587b_0
71
+ - libstdcxx-ng=11.2.0=h1234567_1
72
+ - libtasn1=4.16.0=h27cfd23_0
73
+ - libtiff=4.4.0=hecacb30_0
74
+ - libunistring=0.9.10=h27cfd23_0
75
+ - libuuid=1.0.3=h7f8727e_2
76
+ - libwebp=1.2.4=h11a3e52_0
77
+ - libwebp-base=1.2.4=h5eee18b_0
78
+ - libxcb=1.15=h7f8727e_0
79
+ - libxkbcommon=1.0.1=hfa300c1_0
80
+ - libxml2=2.9.14=h74e7548_0
81
+ - libxslt=1.1.35=h4e12654_0
82
+ - lz4-c=1.9.3=h295c915_1
83
+ - markupsafe=2.1.1=py39h7f8727e_0
84
+ - matplotlib=3.5.2=py39h06a4308_0
85
+ - matplotlib-base=3.5.2=py39hf590b9c_0
86
+ - mkl=2021.4.0=h06a4308_640
87
+ - mkl-service=2.4.0=py39h7f8727e_0
88
+ - mkl_fft=1.3.1=py39hd3c417c_0
89
+ - mkl_random=1.2.2=py39h51133e4_0
90
+ - munkres=1.1.4=py_0
91
+ - ncurses=6.3=h5eee18b_3
92
+ - nettle=3.7.3=hbbd107a_1
93
+ - networkx=2.8.4=py39h06a4308_0
94
+ - ninja=1.10.2=h06a4308_5
95
+ - ninja-base=1.10.2=hd09550d_5
96
+ - nspr=4.33=h295c915_0
97
+ - nss=3.74=h0370c37_0
98
+ - numexpr=2.8.3=py39h807cd23_0
99
+ - numpy=1.23.3=py39h14f4228_0
100
+ - numpy-base=1.23.3=py39h31eccc5_0
101
+ - openh264=2.1.1=h4ff587b_0
102
+ - openssl=1.1.1q=h7f8727e_0
103
+ - packaging=21.3=pyhd3eb1b0_0
104
+ - pandas=1.4.4=py39h6a678d5_0
105
+ - pcre=8.45=h295c915_0
106
+ - pillow=9.2.0=py39hace64e9_1
107
+ - pip=22.2.2=py39h06a4308_0
108
+ - ply=3.11=py39h06a4308_0
109
+ - pot=0.8.2=py39h1832856_0
110
+ - protobuf=3.20.1=py39h295c915_0
111
+ - pybind11=2.9.2=py39h79cecc1_0
112
+ - pycparser=2.21=pyhd3eb1b0_0
113
+ - pyg=2.1.0=py39_torch_1.12.0_cu113
114
+ - pyopenssl=22.0.0=pyhd3eb1b0_0
115
+ - pyparsing=3.0.9=py39h06a4308_0
116
+ - pyqt=5.15.7=py39h6a678d5_1
117
+ - pyqt5-sip=12.11.0=py39h6a678d5_1
118
+ - pysocks=1.7.1=py39h06a4308_0
119
+ - python=3.9.13=haa1d7c7_2
120
+ - python-dateutil=2.8.2=pyhd3eb1b0_0
121
+ - python_abi=3.9=2_cp39
122
+ - pytorch=1.12.1=py3.9_cuda11.3_cudnn8.3.2_0
123
+ - pytorch-cluster=1.6.0=py39_torch_1.12.0_cu113
124
+ - pytorch-mutex=1.0=cuda
125
+ - pytorch-scatter=2.0.9=py39_torch_1.12.0_cu113
126
+ - pytorch-sparse=0.6.15=py39_torch_1.12.0_cu113
127
+ - pytz=2022.1=py39h06a4308_0
128
+ - pyyaml=6.0=py39h7f8727e_1
129
+ - qt-main=5.15.2=h327a75a_7
130
+ - qt-webengine=5.15.9=hd2b0992_4
131
+ - qtwebkit=5.212=h4eab89a_4
132
+ - readline=8.1.2=h7f8727e_1
133
+ - requests=2.28.1=py39h06a4308_0
134
+ - scikit-learn=1.1.2=py39h6a678d5_0
135
+ - scipy=1.9.1=py39h14f4228_0
136
+ - seaborn=0.11.2=pyhd3eb1b0_0
137
+ - setuptools=63.4.1=py39h06a4308_0
138
+ - sip=6.6.2=py39h6a678d5_0
139
+ - six=1.16.0=pyhd3eb1b0_1
140
+ - sqlite=3.39.3=h5082296_0
141
+ - tensorboardx=2.2=pyhd3eb1b0_0
142
+ - threadpoolctl=2.2.0=pyh0d69192_0
143
+ - tk=8.6.12=h1ccaba5_0
144
+ - toml=0.10.2=pyhd3eb1b0_0
145
+ - torchaudio=0.12.1=py39_cu113
146
+ - torchvision=0.13.1=py39_cu113
147
+ - tornado=6.2=py39h5eee18b_0
148
+ - tqdm=4.64.1=py39h06a4308_0
149
+ - typing-extensions=4.3.0=py39h06a4308_0
150
+ - typing_extensions=4.3.0=py39h06a4308_0
151
+ - tzdata=2022e=h04d1e81_0
152
+ - urllib3=1.26.11=py39h06a4308_0
153
+ - wheel=0.37.1=pyhd3eb1b0_0
154
+ - xz=5.2.6=h5eee18b_0
155
+ - yaml=0.2.5=h7b6447c_0
156
+ - zlib=1.2.12=h5eee18b_3
157
+ - zstd=1.5.2=ha4553b6_0
158
+ - pip:
159
+ - teaspoon==1.3.1
data/environment_osx_arm.yml ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: MARBLE
2
+ channels:
3
+ - pytorch
4
+ - defaults
5
+ dependencies:
6
+ - blas=1.0=openblas
7
+ - bottleneck=1.3.5=py39heec5a64_0
8
+ - brotli=1.0.9=h1a28f6b_7
9
+ - brotli-bin=1.0.9=h1a28f6b_7
10
+ - brotlipy=0.7.0=py39h1a28f6b_1002
11
+ - bzip2=1.0.8=h620ffc9_4
12
+ - ca-certificates=2023.08.22=hca03da5_0
13
+ - certifi=2023.7.22=py39hca03da5_0
14
+ - cffi=1.15.1=py39h80987f9_3
15
+ - charset-normalizer=2.0.4=pyhd3eb1b0_0
16
+ - contourpy=1.0.5=py39h525c30c_0
17
+ - cryptography=41.0.3=py39hd4332d6_0
18
+ - cycler=0.11.0=pyhd3eb1b0_0
19
+ - ffmpeg=4.2.2=h04105a8_0
20
+ - fonttools=4.25.0=pyhd3eb1b0_0
21
+ - freetype=2.12.1=h1192e45_0
22
+ - gettext=0.21.0=h13f89a0_1
23
+ - giflib=5.2.1=h80987f9_3
24
+ - gmp=6.2.1=hc377ac9_3
25
+ - gnutls=3.6.15=h887c41c_0
26
+ - icu=73.1=h313beb8_0
27
+ - idna=3.4=py39hca03da5_0
28
+ - importlib_resources=5.2.0=pyhd3eb1b0_1
29
+ - jpeg=9e=h80987f9_1
30
+ - jupyterlab
31
+ - kiwisolver=1.4.4=py39h313beb8_0
32
+ - lame=3.100=h1a28f6b_0
33
+ - lcms2=2.12=hba8e193_0
34
+ - lerc=3.0=hc377ac9_0
35
+ - libbrotlicommon=1.0.9=h1a28f6b_7
36
+ - libbrotlidec=1.0.9=h1a28f6b_7
37
+ - libbrotlienc=1.0.9=h1a28f6b_7
38
+ - libcxx=14.0.6=h848a8c0_0
39
+ - libdeflate=1.17=h80987f9_1
40
+ - libffi=3.4.4=hca03da5_0
41
+ - libgfortran=5.0.0=11_3_0_hca03da5_28
42
+ - libgfortran5=11.3.0=h009349e_28
43
+ - libiconv=1.16=h1a28f6b_2
44
+ - libidn2=2.3.4=h80987f9_0
45
+ - libopenblas=0.3.21=h269037a_0
46
+ - libopus=1.3=h1a28f6b_1
47
+ - libpng=1.6.39=h80987f9_0
48
+ - libtasn1=4.19.0=h80987f9_0
49
+ - libtiff=4.5.1=h313beb8_0
50
+ - libunistring=0.9.10=h1a28f6b_0
51
+ - libvpx=1.10.0=hc377ac9_0
52
+ - libwebp=1.3.2=ha3663a8_0
53
+ - libwebp-base=1.3.2=h80987f9_0
54
+ - libxml2=2.10.4=h0dcf63f_1
55
+ - llvm-openmp=14.0.6=hc6e5704_0
56
+ - lz4-c=1.9.4=h313beb8_0
57
+ - matplotlib=3.7.2=py39hca03da5_0
58
+ - matplotlib-base=3.7.2=py39h46d7db6_0
59
+ - munkres=1.1.4=py_0
60
+ - ncurses=6.4=h313beb8_0
61
+ - nettle=3.7.3=h84b5d62_1
62
+ - networkx=3.1=py39hca03da5_0
63
+ - numexpr=2.8.7=py39hecc3335_0
64
+ - numpy=1.26.0=py39h3b2db8e_0
65
+ - numpy-base=1.26.0=py39ha9811e2_0
66
+ - openh264=1.8.0=h98b2900_0
67
+ - openjpeg=2.3.0=h7a6adac_2
68
+ - openssl=3.0.11=h1a28f6b_2
69
+ - packaging=23.1=py39hca03da5_0
70
+ - pandas=2.1.1=py39h46d7db6_0
71
+ - pillow=10.0.1=py39h3b245a6_0
72
+ - pip=23.3=py39hca03da5_0
73
+ - pybind11=2.10.4=py39h48ca7d4_0
74
+ - pybind11-global=2.10.4=py39h48ca7d4_0
75
+ - pycparser=2.21=pyhd3eb1b0_0
76
+ - pyopenssl=23.2.0=py39hca03da5_0
77
+ - pyparsing=3.0.9=py39hca03da5_0
78
+ - pysocks=1.7.1=py39hca03da5_0
79
+ - python=3.9.18=hb885b13_0
80
+ - python-dateutil=2.8.2=pyhd3eb1b0_0
81
+ - python-tzdata=2023.3=pyhd3eb1b0_0
82
+ - pytorch=1.12.1=py3.9_0
83
+ - pytz=2023.3.post1=py39hca03da5_0
84
+ - pyyaml=6.0=py39h80987f9_1
85
+ - readline=8.2=h1a28f6b_0
86
+ - requests=2.31.0=py39hca03da5_0
87
+ - scipy=1.11.3=py39h20cbe94_0
88
+ - seaborn=0.11
89
+ - setuptools=68.0.0=py39hca03da5_0
90
+ - scikit-learn=1.3.0=py39h46d7db6_0
91
+ - statannotations
92
+ - six=1.16.0=pyhd3eb1b0_1
93
+ - sqlite=3.41.2=h80987f9_0
94
+ - tk=8.6.12=hb8d0fd4_0
95
+ - torchaudio=0.12.1=py39_cpu
96
+ - torchvision=0.13.1=py39_cpu
97
+ - tornado=6.3.3=py39h80987f9_0
98
+ - tqdm=4.65.0=py39h86d0a89_0
99
+ - typing_extensions=4.7.1=py39hca03da5_0
100
+ - tzdata=2023c=h04d1e81_0
101
+ - urllib3=1.26.16=py39hca03da5_0
102
+ - wheel=0.41.2=py39hca03da5_0
103
+ - x264=1!152.20180806=h1a28f6b_0
104
+ - xz=5.4.2=h80987f9_0
105
+ - yaml=0.2.5=h1a28f6b_0
106
+ - zipp=3.11.0=py39hca03da5_0
107
+ - zlib=1.2.13=h5a0b063_0
108
+ - zstd=1.5.5=hd90d995_0
109
+ - pip:
110
+ - ninja==1.11.1.1
111
+ - teaspoon==1.3.1
data/environment_osx_intel.yml ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: MARBLE
2
+ channels:
3
+ - pyg
4
+ - pytorch
5
+ - conda-forge
6
+ - defaults
7
+ dependencies:
8
+ - blas=2.116=openblas
9
+ - blas-devel=3.9.0=16_osx64_openblas
10
+ - bottleneck=1.3.5=py39h67323c0_0
11
+ - brotli=1.0.9=hca72f7f_7
12
+ - brotli-bin=1.0.9=hca72f7f_7
13
+ - brotlipy=0.7.0=py39h9ed2024_1003
14
+ - bzip2=1.0.8=h1de35cc_0
15
+ - ca-certificates=2022.9.24=h033912b_0
16
+ - certifi=2022.9.24=pyhd8ed1ab_0
17
+ - cffi=1.15.1=py39hc55c11b_0
18
+ - charset-normalizer=2.0.4=pyhd3eb1b0_0
19
+ - cryptography=37.0.1=py39hf6deb26_0
20
+ - cycler=0.11.0=pyhd3eb1b0_0
21
+ - cython=0.29.32=py39he9d5cce_0
22
+ - ffmpeg=4.3=h0a44026_0
23
+ - fftw=3.3.9=h9ed2024_1
24
+ - fonttools=4.25.0=pyhd3eb1b0_0
25
+ - freetype=2.11.0=hd8bbffd_0
26
+ - gettext=0.21.0=h7535e17_0
27
+ - giflib=5.2.1=haf1e3a3_0
28
+ - gmp=6.2.1=he9d5cce_3
29
+ - gnutls=3.6.15=hed9c0bf_0
30
+ - icu=58.2=h0a44026_3
31
+ - idna=3.4=py39hecd8cb5_0
32
+ - intel-openmp=2021.4.0=hecd8cb5_3538
33
+ - jinja2=3.0.3=pyhd3eb1b0_0
34
+ - joblib=1.1.1=py39hecd8cb5_0
35
+ - jpeg=9e=hca72f7f_0
36
+ - kiwisolver=1.4.2=py39he9d5cce_0
37
+ - lame=3.100=h1de35cc_0
38
+ - lapack=3.9.0=netlib
39
+ - lcms2=2.12=hf1fd2bf_0
40
+ - lerc=3.0=he9d5cce_0
41
+ - libblas=3.9.0=16_osx64_openblas
42
+ - libbrotlicommon=1.0.9=hca72f7f_7
43
+ - libbrotlidec=1.0.9=hca72f7f_7
44
+ - libbrotlienc=1.0.9=hca72f7f_7
45
+ - libcblas=3.9.0=16_osx64_openblas
46
+ - libcxx=14.0.6=h9765a3e_0
47
+ - libdeflate=1.8=h9ed2024_5
48
+ - libffi=3.4.2=h0d85af4_5
49
+ - libgfortran=5.0.0=11_3_0_hecd8cb5_28
50
+ - libgfortran5=11.3.0=h9dfd629_28
51
+ - libiconv=1.16=hca72f7f_2
52
+ - libidn2=2.3.2=h9ed2024_0
53
+ - liblapack=3.9.0=16_osx64_openblas
54
+ - liblapacke=3.9.0=16_osx64_openblas
55
+ - libopenblas=0.3.21=openmp_h429af6e_3
56
+ - libpng=1.6.37=ha441bb4_0
57
+ - libprotobuf=3.20.1=h8346a28_0
58
+ - libtasn1=4.16.0=h9ed2024_0
59
+ - libtiff=4.4.0=h2ef1027_0
60
+ - libunistring=0.9.10=h9ed2024_0
61
+ - libwebp=1.2.4=h56c3ce4_0
62
+ - libwebp-base=1.2.4=hca72f7f_0
63
+ - libxml2=2.9.14=hbf8cd5e_0
64
+ - libzlib=1.2.12=hfd90126_3
65
+ - llvm-openmp=14.0.6=h0dcd299_0
66
+ - lz4-c=1.9.3=h23ab428_1
67
+ - markupsafe=2.1.1=py39hca72f7f_0
68
+ - matplotlib=3.5.2=py39hecd8cb5_0
69
+ - matplotlib-base=3.5.2=py39hfb0c5b7_0
70
+ - mkl=2022.1.0=h860c996_928
71
+ - mkl-service=2.4.0=py39h9032bd8_0
72
+ - mkl_fft=1.3.1=py39hbc11c22_3
73
+ - mkl_random=1.2.2=py39h1833399_1
74
+ - munkres=1.1.4=py_0
75
+ - ncurses=6.3=hca72f7f_3
76
+ - nettle=3.7.3=h230ac6f_1
77
+ - networkx=2.8.4=py39hecd8cb5_0
78
+ - ninja=1.11.0=h1b54a9f_0
79
+ - nomkl=3.0=0
80
+ - numexpr=2.8.3=py39hf72b562_0
81
+ - numpy=1.23.3=py39h0f1bd0b_0
82
+ - numpy-base=1.23.3=py39hbda7086_0
83
+ - openblas=0.3.21=openmp_hbefa662_3
84
+ - openh264=2.1.1=h8346a28_0
85
+ - openssl=3.0.5=hfd90126_2
86
+ - packaging=21.3=pyhd3eb1b0_0
87
+ - pandas=1.4.4=py39he9d5cce_0
88
+ - pillow=9.2.0=py39hde71d04_1
89
+ - pip=22.2.2=py39hecd8cb5_0
90
+ - pot=0.8.2=py39hca71b8a_1
91
+ - protobuf=3.20.1=py39he9d5cce_0
92
+ - pybind11=2.9.2=py39hc29d2bd_0
93
+ - pycparser=2.21=pyhd3eb1b0_0
94
+ - pyg=2.1.0=py39_torch_1.12.0_cpu
95
+ - pyopenssl=22.0.0=pyhd3eb1b0_0
96
+ - pyparsing=3.0.9=py39hecd8cb5_0
97
+ - pysocks=1.7.1=py39hecd8cb5_0
98
+ - python=3.9.13=hf8d34f4_0_cpython
99
+ - python-dateutil=2.8.2=pyhd3eb1b0_0
100
+ - python_abi=3.9=2_cp39
101
+ - pytorch=1.12.1=cpu_py39h9b0ea23_0
102
+ - pytorch-cluster=1.6.0=py39_torch_1.12.0_cpu
103
+ - pytorch-scatter=2.0.9=py39_torch_1.12.0_cpu
104
+ - pytorch-sparse=0.6.15=py39_torch_1.12.0_cpu
105
+ - pytz=2022.1=py39hecd8cb5_0
106
+ - pyyaml=6.0=py39hca72f7f_1
107
+ - readline=8.1.2=hca72f7f_1
108
+ - requests=2.28.1=py39hecd8cb5_0
109
+ - scikit-learn=1.1.2=py39he9d5cce_0
110
+ - scipy=1.9.3=py39h8a15683_0
111
+ - seaborn=0.12.0=py39hecd8cb5_0
112
+ - setuptools=63.4.1=py39hecd8cb5_0
113
+ - six=1.16.0=pyhd3eb1b0_1
114
+ - sleef=3.5.1=h6db0672_2
115
+ - sqlite=3.39.3=h707629a_0
116
+ - tbb=2021.6.0=hb8565cd_0
117
+ - tensorboardx=2.2=pyhd3eb1b0_0
118
+ - threadpoolctl=2.2.0=pyh0d69192_0
119
+ - tk=8.6.12=h5d9f67b_0
120
+ - torchaudio=0.12.1=py39_cpu
121
+ - torchvision=0.13.1=py39_cpu
122
+ - tornado=6.2=py39hca72f7f_0
123
+ - tqdm=4.64.1=py39hecd8cb5_0
124
+ - typing_extensions=4.3.0=py39hecd8cb5_0
125
+ - tzdata=2022e=h04d1e81_0
126
+ - urllib3=1.26.12=py39hecd8cb5_0
127
+ - wheel=0.37.1=pyhd3eb1b0_0
128
+ - xz=5.2.6=hca72f7f_0
129
+ - yaml=0.2.5=haf1e3a3_0
130
+ - zlib=1.2.12=h4dc903c_3
131
+ - zstd=1.5.2=hcb37349_0
data/environment_windows_native.yml ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: MARBLE
2
+ channels:
3
+ - pytorch
4
+ - pyg
5
+ - defaults
6
+ dependencies:
7
+ - blas=1.0=mkl
8
+ - bottleneck=1.3.5=py39h080aedc_0
9
+ - brotli=1.0.9=h2bbff1b_7
10
+ - brotli-bin=1.0.9=h2bbff1b_7
11
+ - brotlipy=0.7.0=py39h2bbff1b_1003
12
+ - ca-certificates=2023.08.22=haa95532_0
13
+ - certifi=2023.7.22=py39haa95532_0
14
+ - cffi=1.15.1=py39h2bbff1b_3
15
+ - charset-normalizer=2.0.4=pyhd3eb1b0_0
16
+ - colorama=0.4.6=py39haa95532_0
17
+ - contourpy=1.0.5=py39h59b6b97_0
18
+ - cryptography=41.0.3=py39h89fc84f_0
19
+ - cudatoolkit=11.3.1=h59b6b97_2
20
+ - cycler=0.11.0=pyhd3eb1b0_0
21
+ - fonttools=4.25.0=pyhd3eb1b0_0
22
+ - freetype=2.12.1=ha860e81_0
23
+ - giflib=5.2.1=h8cc25b3_3
24
+ - glib=2.69.1=h5dc1a3c_2
25
+ - icc_rt=2022.1.0=h6049295_2
26
+ - icu=58.2=ha925a31_3
27
+ - idna=3.4=py39haa95532_0
28
+ - importlib_resources=5.2.0=pyhd3eb1b0_1
29
+ - intel-openmp=2023.1.0=h59b6b97_46319
30
+ - jinja2=3.1.2=py39haa95532_0
31
+ - joblib=1.2.0=py39haa95532_0
32
+ - jpeg=9e=h2bbff1b_1
33
+ - kiwisolver=1.4.4=py39hd77b12b_0
34
+ - krb5=1.20.1=h5b6d351_0
35
+ - lerc=3.0=hd77b12b_0
36
+ - libbrotlicommon=1.0.9=h2bbff1b_7
37
+ - libbrotlidec=1.0.9=h2bbff1b_7
38
+ - libbrotlienc=1.0.9=h2bbff1b_7
39
+ - libclang=14.0.6=default_hb5a9fac_1
40
+ - libclang13=14.0.6=default_h8e68704_1
41
+ - libdeflate=1.17=h2bbff1b_1
42
+ - libffi=3.4.4=hd77b12b_0
43
+ - libiconv=1.16=h2bbff1b_2
44
+ - libpng=1.6.39=h8cc25b3_0
45
+ - libpq=12.15=h906ac69_1
46
+ - libtiff=4.5.1=hd77b12b_0
47
+ - libuv=1.44.2=h2bbff1b_0
48
+ - libwebp=1.3.2=hbc33d0d_0
49
+ - libwebp-base=1.3.2=h2bbff1b_0
50
+ - libxml2=2.10.4=h0ad7f3c_1
51
+ - libxslt=1.1.37=h2bbff1b_1
52
+ - lz4-c=1.9.4=h2bbff1b_0
53
+ - markupsafe=2.1.1=py39h2bbff1b_0
54
+ - matplotlib=3.7.2=py39haa95532_0
55
+ - matplotlib-base=3.7.2=py39h4ed8f06_0
56
+ - mkl=2023.1.0=h6b88ed4_46357
57
+ - mkl-service=2.4.0=py39h2bbff1b_1
58
+ - mkl_fft=1.3.8=py39h2bbff1b_0
59
+ - mkl_random=1.2.4=py39h59b6b97_0
60
+ - munkres=1.1.4=py_0
61
+ - networkx=3.1=py39haa95532_0
62
+ - numexpr=2.8.7=py39h2cd9be0_0
63
+ - numpy=1.26.0=py39h055cbcc_0
64
+ - numpy-base=1.26.0=py39h65a83cf_0
65
+ - openjpeg=2.4.0=h4fc8c34_0
66
+ - openssl=3.0.11=h2bbff1b_2
67
+ - packaging=23.1=py39haa95532_0
68
+ - pandas=2.1.1=py39h4ed8f06_0
69
+ - pcre=8.45=hd77b12b_0
70
+ - pillow=10.0.1=py39h045eedc_0
71
+ - pip=23.3=py39haa95532_0
72
+ - ply=3.11=py39haa95532_0
73
+ - pycparser=2.21=pyhd3eb1b0_0
74
+ - pyg=2.1.0=py39_torch_1.12.0_cu113
75
+ - pyopenssl=23.2.0=py39haa95532_0
76
+ - pyparsing=3.0.9=py39haa95532_0
77
+ - pyqt=5.15.7=py39hd77b12b_0
78
+ - pyqt5-sip=12.11.0=py39hd77b12b_0
79
+ - pysocks=1.7.1=py39haa95532_0
80
+ - python=3.9.18=h1aa4202_0
81
+ - python-dateutil=2.8.2=pyhd3eb1b0_0
82
+ - python-tzdata=2023.3=pyhd3eb1b0_0
83
+ - pytorch=1.12.1=py3.9_cuda11.3_cudnn8_0
84
+ - pytorch-cluster=1.6.0=py39_torch_1.12.0_cu113
85
+ - pytorch-mutex=1.0=cuda
86
+ - pytorch-scatter=2.0.9=py39_torch_1.12.0_cu113
87
+ - pytorch-sparse=0.6.15=py39_torch_1.12.0_cu113
88
+ - pytz=2023.3.post1=py39haa95532_0
89
+ - qt-main=5.15.2=h879a1e9_9
90
+ - qt-webengine=5.15.9=h5bd16bc_7
91
+ - qtwebkit=5.212=h2bbfb41_5
92
+ - requests=2.31.0=py39haa95532_0
93
+ - scikit-learn=1.3.0=py39h4ed8f06_0
94
+ - scipy=1.11.3=py39h309d312_0
95
+ - setuptools=68.0.0=py39haa95532_0
96
+ - sip=6.6.2=py39hd77b12b_0
97
+ - six=1.16.0=pyhd3eb1b0_1
98
+ - sqlite=3.41.2=h2bbff1b_0
99
+ - tbb=2021.8.0=h59b6b97_0
100
+ - threadpoolctl=2.2.0=pyh0d69192_0
101
+ - tk=8.6.12=h2bbff1b_0
102
+ - toml=0.10.2=pyhd3eb1b0_0
103
+ - torchaudio=0.12.1=py39_cu113
104
+ - torchvision=0.13.1=py39_cu113
105
+ - tornado=6.3.3=py39h2bbff1b_0
106
+ - tqdm=4.65.0=py39hd4e2768_0
107
+ - typing_extensions=4.7.1=py39haa95532_0
108
+ - tzdata=2023c=h04d1e81_0
109
+ - urllib3=1.26.16=py39haa95532_0
110
+ - vc=14.2=h21ff451_1
111
+ - vs2015_runtime=14.27.29016=h5e58377_2
112
+ - wheel=0.41.2=py39haa95532_0
113
+ - win_inet_pton=1.1.0=py39haa95532_0
114
+ - xz=5.4.2=h8cc25b3_0
115
+ - zipp=3.11.0=py39haa95532_0
116
+ - zlib=1.2.13=h8cc25b3_0
117
+ - zstd=1.5.5=hd43e919_0
118
+ - pip:
119
+ - teaspoon==1.3.1
data/examples/RNN/RNN.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
data/examples/RNN/RNN_scripts/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ """Code to generate RNN networks following:
2
+
3
+ A. Dubreuil, A. Valente, M. Beiran, F. Mastrogiuseppe, and S. Ostojic,
4
+ “The role of population structure in computations through neural dynamics,”
5
+ Nat. Neurosci., vol. 25, no. 6, pp. 783–794, 2022, doi: 10.1038/s41593-022-01088-4.
6
+
7
+ with code adapted from available at:
8
+
9
+ https://github.com/adrian-valente/populations_paper_code/tree/master/figures_notebooks
10
+ """
data/examples/RNN/RNN_scripts/clustering.py ADDED
@@ -0,0 +1,467 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy import stats
3
+ from math import sqrt
4
+ import torch
5
+ import multiprocessing as mp
6
+ from itertools import repeat
7
+ from sklearn.mixture import GaussianMixture, BayesianGaussianMixture
8
+ from sklearn.cluster import SpectralClustering
9
+ from sklearn.metrics import adjusted_rand_score
10
+ from sklearn.neighbors import NearestNeighbors
11
+ import matplotlib.pyplot as plt
12
+
13
+ from .modules import SupportLowRankRNN
14
+
15
+
16
+ def phi_prime(x):
17
+ return 1 - np.tanh(x) ** 2
18
+
19
+
20
+ def hard_thresh_linreg(vec1, vec2, inp, thresh=0.5, label1="", label2=""):
21
+ """
22
+ plot a scatter of (vec1, vec2) points, separated in 2 populations according to the threshold inp > thresh,
23
+ with linear regressions
24
+ :param vec1: array of shape n
25
+ :param vec2: array of shape n
26
+ :param inp: array of shape n
27
+ :param thresh: float
28
+ :param label1: label for x axis
29
+ :param label2: label for y axis
30
+ :return:
31
+ """
32
+ idx1 = phi_prime(inp) < thresh
33
+ idx2 = phi_prime(inp) > thresh
34
+
35
+ plt.scatter(vec1[idx1], vec2[idx1], c="orange", label="saturated")
36
+ plt.scatter(vec1[idx2], vec2[idx2], c="green", label="non saturated")
37
+
38
+ xmin, xmax = vec1.min(), vec1.max()
39
+
40
+ slope, intercept, r_value, p_value, std_err = stats.linregress(vec1[idx2], vec2[idx2])
41
+ print("slope: %f intercept: %f" % (slope, intercept))
42
+ print("r-squared: %f" % (r_value**2))
43
+ print("p-value: %f" % p_value)
44
+ xs = np.linspace(xmin, xmax, 100)
45
+ plt.plot(xs, slope * xs + intercept, color="green")
46
+
47
+ slope2, intercept2, r_value2, p_value2, std_err2 = stats.linregress(vec1[idx1], vec2[idx1])
48
+ print("slope: %f intercept: %f" % (slope2, intercept2))
49
+ print("r-squared: %f" % (r_value2**2))
50
+ print("p-value: %f" % p_value2)
51
+ xs = np.linspace(xmin, xmax, 100)
52
+ plt.plot(xs, slope2 * xs + intercept2, color="orange")
53
+ plt.legend()
54
+
55
+ slope, intercept, r_value, p_value, std_err = stats.linregress(vec1, vec2)
56
+ print("slope: %f intercept: %f" % (slope, intercept))
57
+ print("r-squared: %f" % (r_value**2))
58
+ print("p-value: %f" % p_value)
59
+ xs = np.linspace(xmin, xmax, 100)
60
+ plt.plot(xs, slope * xs + intercept, color="b")
61
+ plt.xlabel(label1)
62
+ plt.ylabel(label2)
63
+ plt.show()
64
+
65
+ return idx1, idx2
66
+
67
+
68
+ def gmm_fit(
69
+ net,
70
+ n_components,
71
+ algo="bayes",
72
+ n_init=50,
73
+ random_state=None,
74
+ mean_precision_prior=None,
75
+ weight_concentration_prior_type="dirichlet_process",
76
+ weight_concentration_prior=None,
77
+ ):
78
+ """
79
+ fit a mixture of gaussians to a set of vectors
80
+ :param net
81
+ :param n_components: int
82
+ :param algo: 'em' or 'bayes'
83
+ :param n_init: number of random seeds for the inference algorithm
84
+ :param random_state: random seed for the rng to eliminate randomness
85
+ :return: vector of population labels (of shape n), best fitted model
86
+ """
87
+ neurons_fs = make_vecs(net)
88
+
89
+ if isinstance(neurons_fs, list):
90
+ X = np.vstack(neurons_fs).transpose()
91
+ else:
92
+ X = neurons_fs
93
+ if algo == "em":
94
+ model = GaussianMixture(n_components=n_components, n_init=n_init, random_state=random_state)
95
+ else:
96
+ model = BayesianGaussianMixture(
97
+ n_components=n_components,
98
+ n_init=n_init,
99
+ random_state=random_state,
100
+ init_params="random",
101
+ mean_precision_prior=mean_precision_prior,
102
+ weight_concentration_prior_type=weight_concentration_prior_type,
103
+ weight_concentration_prior=weight_concentration_prior,
104
+ )
105
+ model.fit(X)
106
+ z = model.predict(X)
107
+ return z, model
108
+
109
+
110
+ def make_vecs(net):
111
+ """
112
+ return a list of vectors (list of numpy arrays of shape n) composing a network
113
+ """
114
+ return (
115
+ [net.m[:, i].detach().cpu().numpy() for i in range(net.rank)]
116
+ + [net.n[:, i].detach().cpu().numpy() for i in range(net.rank)]
117
+ + [net.wi[i].detach().cpu().numpy() for i in range(net.input_size)]
118
+ + [net.wo[:, i].cpu().detach().numpy() for i in range(net.output_size)]
119
+ )
120
+
121
+
122
+ def gram_factorization(G):
123
+ """
124
+ The rows of the returned matrix are the basis vectors whose Gramian matrix is G
125
+ :param G: ndarray representing a symmetric semidefinite positive matrix
126
+ :return: ndarray
127
+ """
128
+ w, v = np.linalg.eigh(G)
129
+ x = v * np.sqrt(w)
130
+ return x
131
+
132
+
133
+ def to_support_net(net, z, take_means=False):
134
+ X = np.vstack(make_vecs(net)).transpose()
135
+ _, counts = np.unique(z, return_counts=True)
136
+ n_components = counts.shape[0]
137
+ weights = counts / net.hidden_size
138
+ if take_means:
139
+ means = np.vstack([X[z == i].mean(axis=0) for i in range(n_components)])
140
+ else:
141
+ means = np.zeros((n_components, X.shape[1]))
142
+ covariances = [np.cov(X[z == i].transpose()) for i in range(n_components)]
143
+
144
+ rank = net.rank
145
+ basis_dim = 2 * rank + net.input_size + net.output_size
146
+ m_init = torch.zeros(rank, n_components, basis_dim)
147
+ n_init = torch.zeros(rank, n_components, basis_dim)
148
+ wi_init = torch.zeros(net.input_size, n_components, basis_dim)
149
+ wo_init = torch.zeros(net.output_size, n_components, basis_dim)
150
+
151
+ # if new_size is None:
152
+ new_size = net.hidden_size
153
+ old_size = net.hidden_size
154
+ # if scaling:
155
+ # old_size = net.hidden_size
156
+ # else:
157
+ # old_size = 1
158
+ m_means = torch.from_numpy(means[:, :rank]).t() * sqrt(old_size) / sqrt(new_size)
159
+ n_means = torch.from_numpy(means[:, rank : 2 * rank]).t() * sqrt(old_size) / sqrt(new_size)
160
+ wi_means = torch.from_numpy(means[:, 2 * rank : 2 * rank + net.input_size]).t()
161
+
162
+ for i in range(n_components):
163
+ # Compute Gramian matrix of the basis we have to build
164
+ G = covariances[i]
165
+ X_reduced = gram_factorization(G)
166
+ for k in range(rank):
167
+ m_init[k, i] = torch.from_numpy(X_reduced[k]) * sqrt(old_size) / sqrt(new_size)
168
+ n_init[k, i] = torch.from_numpy(X_reduced[rank + k]) * sqrt(old_size) / sqrt(new_size)
169
+ for k in range(net.input_size):
170
+ wi_init[k, i] = torch.from_numpy(X_reduced[2 * rank + k])
171
+ for k in range(net.output_size):
172
+ wo_init[k, i] = (
173
+ torch.from_numpy(X_reduced[2 * rank + net.input_size + k]) * old_size / new_size
174
+ )
175
+
176
+ net2 = SupportLowRankRNN(
177
+ net.input_size,
178
+ net.hidden_size,
179
+ net.output_size,
180
+ net.noise_std,
181
+ net.alpha,
182
+ rank,
183
+ n_components,
184
+ weights,
185
+ basis_dim,
186
+ m_init,
187
+ n_init,
188
+ wi_init,
189
+ wo_init,
190
+ m_means,
191
+ n_means,
192
+ wi_means,
193
+ )
194
+ return net2
195
+
196
+
197
+ def to_support_net_old(net, n_components=1, new_size=None):
198
+ vecs = make_vecs(net)
199
+ z, model = gmm_fit(vecs, n_components)
200
+ weights = model.weights_
201
+
202
+ rank = net.rank
203
+ basis_dim = 2 * rank + net.input_size + net.output_size
204
+ m_init = torch.zeros(rank, n_components, basis_dim)
205
+ n_init = torch.zeros(rank, n_components, basis_dim)
206
+ wi_init = torch.zeros(net.input_size, n_components, basis_dim)
207
+ wo_init = torch.zeros(net.output_size, n_components, basis_dim)
208
+
209
+ if new_size is None:
210
+ new_size = net.hidden_size
211
+ old_size = net.hidden_size
212
+ m_means = torch.from_numpy(model.means_[:, :rank]).t() * sqrt(old_size) / sqrt(new_size)
213
+ n_means = (
214
+ torch.from_numpy(model.means_[:, rank : 2 * rank]).t() * sqrt(old_size) / sqrt(new_size)
215
+ )
216
+ wi_means = torch.from_numpy(model.means_[:, 2 * rank : 2 * rank + net.input_size]).t()
217
+
218
+ for i in range(n_components):
219
+ # Compute Gramian matrix of the basis we have to build
220
+ G = model.covariances_[i]
221
+ X_reduced = gram_factorization(G)
222
+ for k in range(rank):
223
+ m_init[k, i] = torch.from_numpy(X_reduced[k]) * sqrt(old_size) / sqrt(new_size)
224
+ n_init[k, i] = torch.from_numpy(X_reduced[rank + k]) * sqrt(old_size) / sqrt(new_size)
225
+ for k in range(net.input_size):
226
+ wi_init[k, i] = torch.from_numpy(X_reduced[2 * rank + k])
227
+ for k in range(net.output_size):
228
+ wo_init[k, i] = (
229
+ torch.from_numpy(X_reduced[2 * rank + net.input_size + k]) * old_size / new_size
230
+ )
231
+
232
+ net2 = SupportLowRankRNN(
233
+ net.input_size,
234
+ new_size,
235
+ net.output_size,
236
+ net.noise_std,
237
+ net.alpha,
238
+ rank,
239
+ n_components,
240
+ weights,
241
+ basis_dim,
242
+ m_init,
243
+ n_init,
244
+ wi_init,
245
+ wo_init,
246
+ m_means,
247
+ n_means,
248
+ wi_means,
249
+ )
250
+ return net2
251
+
252
+
253
+ def center_axes(ax):
254
+ ax.spines["top"].set_visible(False)
255
+ ax.spines["right"].set_visible(False)
256
+ ax.spines["bottom"].set_position("zero")
257
+ ax.spines["left"].set_position("zero")
258
+ ax.set(xticks=[], yticks=[])
259
+
260
+
261
+ def pop_scatter_linreg(
262
+ vec1,
263
+ vec2,
264
+ pops,
265
+ n_pops=None,
266
+ linreg=True,
267
+ colors=("blue", "green", "red", "violet", "gray"),
268
+ figsize=(5, 5),
269
+ size=10.0,
270
+ ax=None,
271
+ ):
272
+ """
273
+ scatter plot of (vec1, vec2) points separated in populations according to int labels in vector pops, with linear
274
+ regressions
275
+ """
276
+ if ax is None:
277
+ fig, ax = plt.subplots(figsize=figsize)
278
+ center_axes(ax)
279
+
280
+ # Computing axes limits
281
+ xmax = max(abs(vec1.min()), vec1.max())
282
+ xmin = -xmax
283
+ ax.set_xlim(xmin - 0.1 * (xmax - xmin), xmax + 0.1 * (xmax - xmin))
284
+ ymax = max(abs(vec2.min()), vec2.min())
285
+ ymin = -ymax
286
+ ax.set_ylim(ymin - 0.1 * (ymax - ymin), ymax + 0.1 * (ymax - ymin))
287
+ xs = np.linspace(xmin - 0.1 * (xmax - xmin), xmax + 0.1 * (xmax - xmin), 100)
288
+
289
+ if n_pops is None:
290
+ n_pops = np.unique(pops).shape[0]
291
+ for i in range(n_pops):
292
+ ax.scatter(vec1[pops == i], vec2[pops == i], color=colors[i], s=size)
293
+ if linreg:
294
+ slope, intercept, r_value, p_value, std_err = stats.linregress(
295
+ vec1[pops == i], vec2[pops == i]
296
+ )
297
+ print(f"pop {i}: slope={slope:.2f}, intercept={intercept:.2f}")
298
+ ax.plot(xs, slope * xs + intercept, color=colors[i], zorder=-1)
299
+ ax.set_xticks([])
300
+ ax.set_yticks([])
301
+
302
+
303
+ def all_scatter_linreg(
304
+ vecs,
305
+ pops,
306
+ xlabel="",
307
+ ylabel="",
308
+ n_pops=None,
309
+ linreg=False,
310
+ colors=("blue", "green", "red", "violet", "gray"),
311
+ ):
312
+ fig, ax = plt.subplots(len(vecs), len(vecs), figsize=(6, 8))
313
+ if n_pops is None:
314
+ n_pops = np.unique(pops).shape[0]
315
+ for k in range(len(vecs)):
316
+ for l in range(len(vecs)):
317
+ if k is not l:
318
+ for i in range(n_pops):
319
+ ax[k, l].scatter(vecs[k][pops == i], vecs[l][pops == i], color=colors[i])
320
+ if linreg:
321
+ slope, intercept, r_value, p_value, std_err = stats.linregress(
322
+ vecs[k][pops == i], vecs[l][pops == i]
323
+ )
324
+ print(f"pop {i}: slope={slope:.2f}, intercept={intercept:.2f}")
325
+ xs = vecs[k][pops == i]
326
+ plt.plot(xs, slope * xs + intercept, color=colors[i])
327
+
328
+ return ax
329
+
330
+
331
+ ### Spectral clustering and stability analysis
332
+
333
+
334
+ def generate_subsamples(neurons_fs, fraction=0.8):
335
+ indexes = np.random.choice(
336
+ neurons_fs.shape[0], int(fraction * neurons_fs.shape[0]), replace=False
337
+ )
338
+ indexes = np.sort(indexes)
339
+ return neurons_fs[indexes], indexes
340
+
341
+
342
+ def spectral_clustering(neurons_fs, n_clusters, metric="euclidean", n_neighbors=10):
343
+ if metric == "euclidean":
344
+ model = SpectralClustering(n_clusters, affinity="nearest_neighbors")
345
+ model.fit(neurons_fs)
346
+ elif metric == "cosine":
347
+ model = SpectralClustering(n_clusters, affinity="precomputed")
348
+ nn = NearestNeighbors(n_neighbors=n_neighbors, algorithm="brute", metric="cosine")
349
+ nn.fit(neurons_fs)
350
+ knn_graph = nn.kneighbors_graph()
351
+ knn_graph = 0.5 * (knn_graph + knn_graph.transpose())
352
+ model.fit(knn_graph)
353
+ return model
354
+
355
+
356
+ def clustering_stability_task(
357
+ neurons_fs, algo, n_clusters, metric, n_neighbors, mean_precision_prior=1e5
358
+ ):
359
+ sample, indexes = generate_subsamples(neurons_fs)
360
+ if algo == "spectral":
361
+ model = spectral_clustering(sample, n_clusters, metric, n_neighbors)
362
+ labels = model.labels_
363
+ else:
364
+ labels, _ = gmm_fit(
365
+ sample,
366
+ n_clusters,
367
+ mean_precision_prior=mean_precision_prior,
368
+ weight_concentration_prior_type="dirichlet_distribution",
369
+ )
370
+ return labels, indexes
371
+
372
+
373
+ def clustering_stability(
374
+ neurons_fs,
375
+ n_clusters,
376
+ n_bootstrap,
377
+ algo="gmm",
378
+ metric="cosine",
379
+ n_neighbors=10,
380
+ mean_precision_prior=1e5,
381
+ normalize=None,
382
+ ):
383
+ """
384
+ :param neurons_fs: numpy array of shape Nxd (neurons embedded in some feature space)
385
+ :param n_clusters: int
386
+ :param n_bootstrap: int
387
+ :param algo: 'spectral' or 'gmm'
388
+ :param metric: 'euclidean' or 'cosine' for spectral clustering
389
+ :param n_neighbors: int, for spectral clustering
390
+ :param mean_precision_prior:
391
+ :param normalize: None, 'normal' or 'uniform'
392
+ :return: list of n_bootstrap x (n_bootstrap - 1) / 2 ARI values for bootstrapped clusterings (possibly normalized)
393
+ """
394
+
395
+ with mp.Pool(mp.cpu_count()) as pool:
396
+ args = repeat(
397
+ (neurons_fs, algo, n_clusters, metric, n_neighbors, mean_precision_prior), n_bootstrap
398
+ )
399
+ res = pool.starmap(clustering_stability_task, args)
400
+ labels_list, indexings = zip(*res)
401
+
402
+ aris = []
403
+ # Align bootstrap samples and compute pairwise Rand indexes
404
+ for i in range(n_bootstrap):
405
+ for j in range(i + 1, n_bootstrap):
406
+ # build aligned labellings
407
+ indexes_i = indexings[i]
408
+ indexes_j = indexings[j]
409
+ labels_i = []
410
+ labels_j = []
411
+ l = 0
412
+ for k in range(len(indexes_i)):
413
+ if l > len(indexes_j):
414
+ break
415
+ while l < len(indexes_j) and indexes_j[l] < indexes_i[k]:
416
+ l += 1
417
+ if l < len(indexes_j) and indexes_j[l] == indexes_i[k]:
418
+ labels_i.append(labels_list[i][k])
419
+ labels_j.append(labels_list[j][l])
420
+ l += 1
421
+ aris.append(adjusted_rand_score(labels_i, labels_j))
422
+
423
+ if normalize is not None:
424
+ if normalize == "normal":
425
+ X_base = np.random.randn(neurons_fs.shape[0], neurons_fs.shape[1])
426
+ elif normalize == "uniform":
427
+ X_base = (np.random.rand(neurons_fs.shape[0], neurons_fs.shape[1]) - 0.5) * 2
428
+ base_aris = clustering_stability(X_base, n_clusters, n_bootstrap, metric, normalize=None)
429
+ base_mean, base_std = np.mean(base_aris), np.std(base_aris)
430
+ aris = [(ari - base_mean) / base_std for ari in aris]
431
+ return aris
432
+
433
+
434
+ def boxplot_clustering_stability(
435
+ neurons_fs,
436
+ clusters_nums,
437
+ aris=None,
438
+ algo="gmm",
439
+ n_bootstrap=20,
440
+ metric="cosine",
441
+ n_neighbors=10,
442
+ ax=None,
443
+ ):
444
+ if aris is None:
445
+ aris = [
446
+ clustering_stability(neurons_fs, k, n_bootstrap, algo, metric, n_neighbors)
447
+ for k in clusters_nums
448
+ ]
449
+ aris = np.array(aris)
450
+ if ax is None:
451
+ fig, ax = plt.subplots()
452
+ col_lines = "indianred"
453
+ bp = ax.boxplot(aris.T, patch_artist=True)
454
+ for box in bp["boxes"]:
455
+ box.set(color="steelblue", facecolor="steelblue")
456
+ for med in bp["medians"]:
457
+ med.set(color=col_lines)
458
+ for l in bp["whiskers"]:
459
+ l.set(color=col_lines)
460
+ for l in bp["caps"]:
461
+ l.set(color=col_lines)
462
+ ax.set_xticks(list(range(1, aris.shape[0] + 1)))
463
+ ax.set_xticklabels(clusters_nums)
464
+ ax.set(xlabel="number of clusters", ylabel="stability", ylim=(-0.1, 1.1))
465
+ ax.spines["top"].set_visible(False)
466
+ ax.spines["right"].set_visible(False)
467
+ return ax
data/examples/RNN/RNN_scripts/dms.py ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """RNN dms (delayed-match-to-sample)."""
2
+ from math import floor
3
+ import numpy as np
4
+ import torch
5
+ import matplotlib.pyplot as plt
6
+
7
+ from .modules import loss_mse
8
+ from .ranktwo import plot_field, plot_field_noscalings
9
+
10
+ # task constants
11
+ deltaT = 20.0
12
+ tau = 100
13
+ alpha = deltaT / tau
14
+ std_default = 3e-2
15
+
16
+ fixation_duration_min = 100
17
+ fixation_duration_max = 500
18
+ stimulus1_duration_min = 500
19
+ stimulus1_duration_max = 500
20
+ delay_duration_min = 500
21
+ delay_duration_max = 3000
22
+ stimulus2_duration_min = 500
23
+ stimulus2_duration_max = 500
24
+ decision_duration = 1000
25
+
26
+ # Defining some global variables, whoses values can also be set in setup
27
+ min_fixation_duration_discrete = floor(fixation_duration_min / deltaT)
28
+ max_fixation_duration_discrete = floor(fixation_duration_max / deltaT)
29
+ min_stimulus1_duration_discrete = floor(stimulus1_duration_min / deltaT)
30
+ max_stimulus1_duration_discrete = floor(stimulus1_duration_max / deltaT)
31
+ min_stimulus2_duration_discrete = floor(stimulus2_duration_min / deltaT)
32
+ max_stimulus2_duration_discrete = floor(stimulus2_duration_max / deltaT)
33
+ decision_duration_discrete = floor(decision_duration / deltaT)
34
+ min_delay_duration_discrete = floor(delay_duration_min / deltaT)
35
+ max_delay_duration_discrete = floor(delay_duration_max / deltaT)
36
+ total_duration = (
37
+ max_fixation_duration_discrete
38
+ + max_stimulus1_duration_discrete
39
+ + max_delay_duration_discrete
40
+ + max_stimulus2_duration_discrete
41
+ + decision_duration_discrete
42
+ )
43
+
44
+
45
+ def setup():
46
+ global min_fixation_duration_discrete
47
+ global max_fixation_duration_discrete
48
+ global min_stimulus1_duration_discrete
49
+ global max_stimulus1_duration_discrete
50
+ global min_stimulus2_duration_discrete
51
+ global max_stimulus2_duration_discrete
52
+ global decision_duration_discrete
53
+ global min_delay_duration_discrete
54
+ global max_delay_duration_discrete
55
+ global total_duration
56
+ min_fixation_duration_discrete = floor(fixation_duration_min / deltaT)
57
+ max_fixation_duration_discrete = floor(fixation_duration_max / deltaT)
58
+ min_stimulus1_duration_discrete = floor(stimulus1_duration_min / deltaT)
59
+ max_stimulus1_duration_discrete = floor(stimulus1_duration_max / deltaT)
60
+ min_stimulus2_duration_discrete = floor(stimulus2_duration_min / deltaT)
61
+ max_stimulus2_duration_discrete = floor(stimulus2_duration_max / deltaT)
62
+ decision_duration_discrete = floor(decision_duration / deltaT)
63
+ min_delay_duration_discrete = floor(delay_duration_min / deltaT)
64
+ max_delay_duration_discrete = floor(delay_duration_max / deltaT)
65
+
66
+ total_duration = (
67
+ max_fixation_duration_discrete
68
+ + max_stimulus1_duration_discrete
69
+ + max_delay_duration_discrete
70
+ + max_stimulus2_duration_discrete
71
+ + decision_duration_discrete
72
+ )
73
+
74
+
75
+ def generate_dms_data(
76
+ num_trials,
77
+ type=None,
78
+ gain=1.0,
79
+ fraction_validation_trials=0.2,
80
+ fraction_catch_trials=0.0,
81
+ std=std_default,
82
+ ):
83
+ x = std * torch.randn(num_trials, total_duration, 2)
84
+ y = torch.zeros(num_trials, total_duration, 1)
85
+ mask = torch.zeros(num_trials, total_duration, 1)
86
+
87
+ types = ["A-A", "A-B", "B-A", "B-B"]
88
+ for i in range(num_trials):
89
+ if np.random.rand() > fraction_catch_trials:
90
+ if type is None:
91
+ cur_type = types[int(np.random.rand() * 4)]
92
+ else:
93
+ cur_type = type
94
+
95
+ if cur_type == "A-A":
96
+ input1 = gain
97
+ input2 = gain
98
+ choice = 1
99
+ elif cur_type == "A-B":
100
+ input1 = gain
101
+ input2 = 0
102
+ choice = -1
103
+ elif cur_type == "B-A":
104
+ input1 = 0
105
+ input2 = gain
106
+ choice = -1
107
+ elif cur_type == "B-B":
108
+ input1 = 0
109
+ input2 = 0
110
+ choice = 1
111
+
112
+ # Sample durations
113
+ delay_duration = np.random.uniform(delay_duration_min, delay_duration_max)
114
+ delay_duration_discrete = floor(delay_duration / deltaT)
115
+ fixation_duration = np.random.uniform(
116
+ min_fixation_duration_discrete, max_fixation_duration_discrete
117
+ )
118
+ fixation_duration_discrete = floor(fixation_duration / deltaT)
119
+ stimulus1_duration = np.random.uniform(stimulus1_duration_min, stimulus1_duration_max)
120
+ stimulus1_duration_discrete = floor(stimulus1_duration / deltaT)
121
+ stimulus2_duration = np.random.uniform(stimulus2_duration_min, stimulus2_duration_max)
122
+ stimulus2_duration_discrete = floor(stimulus2_duration / deltaT)
123
+ decision_time_discrete = (
124
+ fixation_duration_discrete
125
+ + stimulus1_duration_discrete
126
+ + delay_duration_discrete
127
+ + stimulus2_duration_discrete
128
+ )
129
+ stim1_begin = fixation_duration_discrete
130
+ stim1_end = stim1_begin + stimulus1_duration_discrete
131
+ stim2_begin = stim1_end + delay_duration_discrete
132
+ stim2_end = stim2_begin + stimulus2_duration_discrete
133
+
134
+ x[i, stim1_begin:stim1_end, 0] += input1
135
+ x[i, stim1_begin:stim1_end, 1] += 1 - input1
136
+ x[i, stim2_begin:stim2_end, 0] += input2
137
+ x[i, stim2_begin:stim2_end, 1] += 1 - input2
138
+ y[
139
+ i, decision_time_discrete : decision_time_discrete + decision_duration_discrete
140
+ ] = choice
141
+ mask[
142
+ i, decision_time_discrete : decision_time_discrete + decision_duration_discrete
143
+ ] = 1
144
+
145
+ # Split
146
+ split_at = x.shape[0] - floor(x.shape[0] * fraction_validation_trials)
147
+ (x_train, x_val) = x[:split_at], x[split_at:]
148
+ (y_train, y_val) = y[:split_at], y[split_at:]
149
+ (mask_train, mask_val) = mask[:split_at], mask[split_at:]
150
+
151
+ return x_train, y_train, mask_train, x_val, y_val, mask_val
152
+
153
+
154
+ def accuracy_dms(output, targets, mask):
155
+ good_trials = (targets != 0).any(dim=1).squeeze() # eliminates catch trials
156
+ mask_bool = mask[good_trials, :, 0] == 1
157
+ targets_filtered = torch.stack(
158
+ targets[good_trials].squeeze()[mask_bool].chunk(good_trials.sum())
159
+ )
160
+ target_decisions = torch.sign(targets_filtered.mean(dim=1))
161
+ decisions_filtered = torch.stack(
162
+ output[good_trials].squeeze()[mask_bool].chunk(good_trials.sum())
163
+ )
164
+ decisions = torch.sign(decisions_filtered.mean(dim=1))
165
+ return (target_decisions == decisions).type(torch.float32).mean()
166
+
167
+
168
+ def map_device(tensors, net):
169
+ """
170
+ Maps a list of tensors to the device used by the network net
171
+ :param tensors: list of tensors
172
+ :param net: nn.Module
173
+ :return: list of tensors
174
+ """
175
+ if net.wi.device != torch.device("cpu"):
176
+ new_tensors = []
177
+ for tensor in tensors:
178
+ new_tensors.append(tensor.to(device=net.wi.device))
179
+ return new_tensors
180
+ else:
181
+ return tensors
182
+
183
+
184
+ def test_dms(net, x, y, mask):
185
+ x, y, mask = map_device([x, y, mask], net)
186
+ with torch.no_grad():
187
+ output, _ = net(x)
188
+ loss = loss_mse(output, y, mask).item()
189
+ acc = accuracy_dms(output, y, mask).item()
190
+ return loss, acc
191
+
192
+
193
+ def confusion_matrix(net):
194
+ matrix = np.zeros((4, 2))
195
+ rows = ["A-A", "B-B", "A-B", "B-A"]
196
+ for i, type in enumerate(rows):
197
+ x, y, mask, _, _, _ = generate_dms_data(100, type=type, fraction_validation_trials=0.0)
198
+ x, y, mask = map_device([x, y, mask], net)
199
+ output, _ = net(x)
200
+ mask_bool = mask[:, :, 0] == 1
201
+ decisions_filtered = torch.stack(output.squeeze()[mask_bool].chunk(output.shape[0]))
202
+ decisions = torch.sign(decisions_filtered.mean(dim=1))
203
+ matrix[i, 0] = (decisions < 0).sum().type(torch.float) / decisions.shape[0]
204
+ matrix[i, 1] = (decisions >= 0).sum().type(torch.float) / decisions.shape[0]
205
+ cols = ["different", "same"]
206
+ print("{:^12s}|{:^12s}|{:^12s}".format(" ", cols[0], cols[1]))
207
+ print("-" * 40)
208
+ for i, row in enumerate(rows):
209
+ print("{:^12s}|{:^12.2f}|{:^12.2f}".format(row, matrix[i, 0], matrix[i, 1]))
210
+ print("-" * 40)
211
+
212
+
213
+ def plot_trajectories(net, trajectories, ax, n_traj=2, style="-", c="C0", interval=[None, None]):
214
+
215
+ m1 = net.m[:, 0].detach().numpy()
216
+ m2 = net.m[:, 1].detach().numpy()
217
+
218
+ for j in range(n_traj):
219
+ proj1 = trajectories[j] @ m1 / net.hidden_size
220
+ proj2 = trajectories[j] @ m2 / net.hidden_size
221
+
222
+ if interval[1] != 0:
223
+ ax.plot(proj1[: interval[0]], proj2[: interval[0]], c=c, lw=4, linestyle=style)
224
+
225
+
226
+ def remove_axes(ax):
227
+ ax.spines["top"].set_visible(False)
228
+ ax.spines["right"].set_visible(False)
229
+ ax.spines["bottom"].set_visible(False)
230
+ ax.spines["left"].set_visible(False)
231
+ ax.set(xticks=[], yticks=[])
232
+
233
+
234
+ def _plot_field(net, input, ax, sizes=1.0, rect=(-5, 5, -4, 4), scalings=False):
235
+
236
+ m1 = net.m[:, 0].detach().numpy()
237
+ m2 = net.m[:, 1].detach().numpy()
238
+
239
+ xmin, xmax, ymin, ymax = rect
240
+
241
+ if scalings:
242
+ plot_field(net, m1, m2, xmin, xmax, ymin, ymax, input=input, ax=ax, sizes=sizes)
243
+ else:
244
+ plot_field_noscalings(net, m1, m2, xmin, xmax, ymin, ymax, input=input, ax=ax, sizes=sizes)
245
+
246
+ remove_axes(ax)
247
+
248
+
249
+ def psychometric_matrix(net, n_trials=10, ax=None):
250
+ if ax is None:
251
+ fig, ax = plt.subplots()
252
+ stim1_begin = max_fixation_duration_discrete
253
+ stim1_end = max_fixation_duration_discrete + max_stimulus1_duration_discrete
254
+ stim2_begin = stim1_end + max_delay_duration_discrete
255
+ stim2_end = stim2_begin + max_stimulus2_duration_discrete
256
+ decision_end = stim2_end + decision_duration_discrete
257
+ mean_outputs = np.zeros((2, 2))
258
+ for inp1 in range(2):
259
+ for inp2 in range(2):
260
+ input = torch.zeros(n_trials, decision_end, 2)
261
+ input[:, stim1_begin:stim1_end, inp1] = 1
262
+ input[:, stim2_begin:stim2_end, inp2] = 1
263
+ output = net(input)
264
+ output = output.squeeze().detach().numpy()
265
+ mean_output = output[:, stim2_end:decision_end].mean()
266
+ mean_outputs[inp1, inp2] = mean_output
267
+ image = ax.matshow(mean_outputs, cmap="gray", vmin=-1, vmax=1)
268
+ ax.set_xticks([])
269
+ ax.set_yticks([])
270
+ return image
data/examples/RNN/RNN_scripts/helpers.py ADDED
@@ -0,0 +1,507 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import pickle
3
+ import os
4
+ import numpy as np
5
+ from sklearn.decomposition import PCA
6
+ import matplotlib
7
+ import matplotlib.pyplot as plt
8
+ import matplotlib.ticker as ticker
9
+ from matplotlib.patches import Ellipse
10
+ import seaborn as sns
11
+
12
+ from . import clustering
13
+ from . import modules
14
+ from . import dms
15
+
16
+
17
+ def load_network(f):
18
+ """Load RNN network."""
19
+ noise_std = 5e-2
20
+ alpha = 0.2
21
+ hidden_size = 500
22
+
23
+ load = torch.load(f, map_location="cpu")
24
+
25
+ if len(load) == 2:
26
+ z, state = load
27
+ else:
28
+ state = load
29
+ z = None
30
+
31
+ net = modules.LowRankRNN(2, hidden_size, 1, noise_std, alpha, rank=2)
32
+ net.load_state_dict(state)
33
+ net.svd_reparametrization()
34
+
35
+ return z, net
36
+
37
+
38
+ def sample_network(net, f, seed=0):
39
+
40
+ if os.path.exists(f):
41
+ print('Network found with same name. Loading...')
42
+ return torch.load(open(f, "rb"))
43
+
44
+ n_pops = 2
45
+ z, _ = clustering.gmm_fit(net, n_pops, algo="bayes", random_state=seed)
46
+ net_sampled = clustering.to_support_net(net, z)
47
+
48
+ if os.path.exists(f):
49
+ z, state = torch.load(f)
50
+ net_sampled.load_state_dict(state)
51
+ else:
52
+ x_train, y_train, mask_train, x_val, y_val, mask_val = dms.generate_dms_data(1000)
53
+ modules.train(
54
+ net_sampled,
55
+ x_train,
56
+ y_train,
57
+ mask_train,
58
+ 20,
59
+ lr=1e-6,
60
+ resample=True,
61
+ keep_best=True,
62
+ clip_gradient=1,
63
+ )
64
+ torch.save([z, net_sampled], f)
65
+
66
+ return z, net_sampled
67
+
68
+
69
+ def generate_trajectories(
70
+ net, input=None, epochs=None, n_traj=None, fname="./data/RNN_trajectories.pkl"
71
+ ):
72
+
73
+ if fname is not None:
74
+ if os.path.exists(fname):
75
+ print('Trajectory file found with same name. Loading...')
76
+ return pickle.load(open(fname, "rb"))
77
+
78
+ traj = []
79
+ for i in range(len(input)):
80
+ conds = []
81
+ for _ in range(n_traj):
82
+ _, traj_ = net(input[i].unsqueeze(0))
83
+ traj_ = traj_.squeeze().detach().numpy()
84
+ traj_epoch = [traj_[e : epochs[j + 1]] for j, e in enumerate(epochs[:-1])]
85
+ conds.append(traj_epoch)
86
+
87
+ traj.append(conds)
88
+
89
+ pickle.dump(traj, open(fname, "wb"))
90
+
91
+ return traj
92
+
93
+
94
+ def load_trajectories(fname):
95
+ return pickle.load(open(fname, "rb"))
96
+
97
+
98
+ def plot_ellipse(ax, w, color="silver", std_factor=1):
99
+ X = np.array([w[:, 0], w[:, 1]]).T
100
+ cov = X.T @ X / X.shape[0]
101
+ eigvals, eigvecs = np.linalg.eig(cov)
102
+ v1 = eigvecs[:, 0]
103
+ angle = np.arctan(v1[1] / v1[0])
104
+ angle = angle * 180 / np.pi
105
+ ax.add_artist(
106
+ Ellipse(
107
+ xy=[0, 0],
108
+ angle=angle,
109
+ width=np.sqrt(eigvals[0]) * 2 * std_factor,
110
+ height=np.sqrt(eigvals[1]) * 2 * std_factor,
111
+ fill=True,
112
+ fc=color,
113
+ ec=color,
114
+ lw=1,
115
+ alpha=0.4,
116
+ )
117
+ )
118
+
119
+ return ax
120
+
121
+
122
+ def plot_coefficients(net, z=None):
123
+ if z is None:
124
+ n_pops = 2
125
+ z, _ = clustering.gmm_fit(net, n_pops, algo="bayes", random_state=0)
126
+
127
+ m1 = net.m[:, 0].detach().numpy()
128
+ n1 = net.n[:, 0].detach().numpy()
129
+ m2 = net.m[:, 1].detach().numpy()
130
+ n2 = net.n[:, 1].detach().numpy()
131
+ wi1 = net.wi[0].detach().numpy()
132
+ wi2 = net.wi[1].detach().numpy()
133
+
134
+ fig, ax = plt.subplots(1, 4, figsize=(12, 2))
135
+
136
+ colors = ["#364285", "#E5BA52"]
137
+ n_pops = 2
138
+ clustering.pop_scatter_linreg(wi1, wi2, z, n_pops, colors=colors, ax=ax[0])
139
+ plot_ellipse(
140
+ ax[0], np.array([wi1[z.astype(bool)], wi2[z.astype(bool)]]).T, std_factor=3, color=colors[1]
141
+ )
142
+ plot_ellipse(
143
+ ax[0],
144
+ np.array([wi1[~z.astype(bool)], wi2[~z.astype(bool)]]).T,
145
+ std_factor=3,
146
+ color=colors[0],
147
+ )
148
+
149
+ clustering.pop_scatter_linreg(m1, m2, z, n_pops, colors=colors, ax=ax[1])
150
+ clustering.pop_scatter_linreg(n1, n2, z, n_pops, colors=colors, ax=ax[2])
151
+ clustering.pop_scatter_linreg(m1, n1, z, n_pops, colors=colors, ax=ax[3])
152
+
153
+
154
+ def setup_matplotlib():
155
+ plt.rcParams["axes.titlesize"] = 24
156
+ plt.rcParams["axes.labelsize"] = 19
157
+ plt.rcParams["xtick.labelsize"] = 16
158
+ plt.rcParams["ytick.labelsize"] = 16
159
+ plt.rcParams["figure.figsize"] = (6, 4)
160
+ plt.rcParams["axes.titlepad"] = 24
161
+ plt.rcParams["axes.labelpad"] = 10
162
+ plt.rcParams["axes.spines.top"] = False
163
+ plt.rcParams["axes.spines.right"] = False
164
+ plt.rcParams["font.size"] = 14
165
+
166
+
167
+ def get_lower_tri_heatmap(
168
+ ov, bounds=None, figsize=None, cbar=False, cbar_shrink=0.9, cbar_pad=0.3, ax=None
169
+ ):
170
+ mask = np.zeros_like(ov, dtype=np.bool)
171
+ mask[np.triu_indices_from(mask)] = True
172
+ ov[np.diag_indices_from(ov)] = 0
173
+ # mask[np.diag_indices_from(mask)] = False
174
+ mask = mask.T
175
+ print(mask)
176
+ if figsize is None:
177
+ figsize = matplotlib.rcParams["figure.figsize"]
178
+
179
+ if bounds is None:
180
+ bound = np.max((np.abs(np.min(ov)), np.abs(np.max(ov))))
181
+ bounds = [-bound, bound]
182
+
183
+ # Set up the matplotlib figure
184
+ if ax is None:
185
+ f, ax = plt.subplots(figsize=figsize)
186
+
187
+ # Generate a custom diverging colormap
188
+ cmap = sns.diverging_palette(220, 10, sep=10, as_cmap=True)
189
+ print(ov)
190
+ # Draw the heatmap with the mask and correct aspect ratio
191
+ if not cbar:
192
+ mesh = sns.heatmap(
193
+ ov[:-1, 1:],
194
+ mask=mask[:-1, 1:],
195
+ cmap=cmap,
196
+ center=0,
197
+ square=True,
198
+ linewidths=0.5,
199
+ cbar=False,
200
+ vmin=bounds[0],
201
+ vmax=bounds[1],
202
+ ax=ax,
203
+ )
204
+ else:
205
+ mesh = sns.heatmap(
206
+ ov[:-1, 1:],
207
+ mask=mask[:-1, 1:],
208
+ cmap=cmap,
209
+ center=0,
210
+ square=True,
211
+ linewidths=0.5,
212
+ cbar=True,
213
+ vmin=bounds[0],
214
+ vmax=bounds[1],
215
+ ax=ax,
216
+ cbar_kws={"shrink": cbar_shrink, "ticks": ticker.MaxNLocator(3), "pad": cbar_pad},
217
+ )
218
+ ax.xaxis.tick_top()
219
+ ax.yaxis.tick_right()
220
+ return ax, mesh
221
+
222
+
223
+ def set_size(size, ax=None):
224
+ """to force the size of the plot, not of the overall figure, from
225
+ https://stackoverflow.com/questions/44970010/axes-class-set-explicitly-size-width-height-of-axes-in-given-units"""
226
+ if not ax:
227
+ ax = plt.gca()
228
+ l = ax.figure.subplotpars.left
229
+ r = ax.figure.subplotpars.right
230
+ t = ax.figure.subplotpars.top
231
+ b = ax.figure.subplotpars.bottom
232
+ w, h = size
233
+ figw = float(w) / (r - l)
234
+ figh = float(h) / (t - b)
235
+ ax.figure.set_size_inches(figw, figh)
236
+
237
+
238
+ def center_limits(ax):
239
+ xmin, xmax = ax.get_xlim()
240
+ xbound = max(-xmin, xmax)
241
+ ax.set_xlim(-xbound, xbound)
242
+ ymin, ymax = ax.get_ylim()
243
+ ybound = max(-ymin, ymax)
244
+ ax.set_ylim(-ybound, ybound)
245
+
246
+
247
+ def plot_all_scatters(vectors):
248
+ fig, ax = plt.subplots(len(vectors), len(vectors), figsize=(6, 8))
249
+ for i in range(len(vectors)):
250
+ for j in range(len(vectors)):
251
+ if i is not j:
252
+ ax[i, j].scatter(vectors[i], vectors[j])
253
+ return ax
254
+
255
+
256
+ def plot_rates_single_neurons(rates, offset=1, colors=None, deltaT=1.0, figsize=(6, 8), ax=None):
257
+ if ax is None:
258
+ fig, ax = plt.subplots(figsize=figsize)
259
+ cur_max = 0.0
260
+ for i in range(rates.shape[1]):
261
+ color = colors[i] if colors is not None else "red"
262
+ ax.plot(
263
+ np.arange(rates.shape[0]) * deltaT / 1000,
264
+ rates[:, i] + cur_max + offset - np.min(rates[:, i]),
265
+ color=color,
266
+ )
267
+ cur_max = cur_max + offset - np.min(rates[:, i]) + np.max(rates[:, i])
268
+ return ax
269
+
270
+
271
+ def bar_plots_vectors(n, wi, wi_ctx1, wi_ctx2, title, xticks):
272
+ fig, ax = plt.subplots()
273
+ x = np.arange(3)
274
+ ctx_derivative1 = phi_prime(wi_ctx1)
275
+ ctx_derivative2 = phi_prime(wi_ctx2)
276
+ win = wi
277
+ Nfull = n.shape[0]
278
+ neff_ctx1 = n.reshape(Nfull, 1) * ctx_derivative1.reshape(Nfull, 1)
279
+ neff_ctx2 = n.reshape(Nfull, 1) * ctx_derivative2.reshape(Nfull, 1)
280
+ ov1 = np.sum(n.reshape(Nfull, 1) * win.reshape(Nfull, 1))
281
+ ov2 = np.sum(neff_ctx1.reshape(Nfull, 1) * win.reshape(Nfull, 1))
282
+ ov3 = np.sum(neff_ctx2.reshape(Nfull, 1) * win.reshape(Nfull, 1))
283
+ y = [ov1, ov2, ov3]
284
+ ax.bar(x, y)
285
+ plt.ylabel(title, fontsize=30)
286
+ plt.xticks(x, xticks, fontsize=25)
287
+ return ax
288
+
289
+
290
+ def radial_distribution_plot(x, N=80, bottom=0.1, cmap_scale=0.05, points=True):
291
+ """
292
+ Plot a radial histogram of angles
293
+ :param x: if points=True, an array of shape nx2 of points in 2d space. if points=False a series of angles
294
+ :param N: num bins
295
+ :param bottom: radius of base circle
296
+ :param cmap_scale: to adjust the colormap
297
+ :param points: see x
298
+ :return:
299
+ """
300
+ if points:
301
+ assert len(x.shape) == 2 and x.shape[1] == 2
302
+ x_cplx = np.array(x[:, 0], dtype=np.complex64)
303
+ x_cplx.imag = x[:, 1]
304
+ angles = np.angle(x_cplx)
305
+ else:
306
+ angles = x
307
+ angles = angles % (2 * np.pi)
308
+ theta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)
309
+ radii = [
310
+ np.mean(np.logical_and(angles > theta[i], angles < theta[i + 1]))
311
+ for i in range(len(theta) - 1)
312
+ ]
313
+ radii.append(np.mean(angles > theta[-1]))
314
+ width = (2 * np.pi) / N
315
+ offset = np.pi / N
316
+ ax = plt.subplot(111, polar=True)
317
+ bars = ax.bar(theta + offset, radii, width=width, bottom=bottom)
318
+ for r, bar in zip(radii, bars):
319
+ bar.set_facecolor(plt.cm.jet(r / cmap_scale))
320
+ bar.set_alpha(0.8)
321
+ plt.yticks([])
322
+
323
+
324
+ def dimensionality_plot(trajectories, vecs, labels, figsize=None):
325
+ """
326
+ plot cumulative percentage of variance explained by vectors vecs, while ordering with most explicative vectors first
327
+ :param trajectories: numpy array of shape #time_points x #neurons (with trials already flattened)
328
+ :param vecs: list of numpy arrays of shape #neurons
329
+ :param labels: labels associated with each vector
330
+ :param figsize:
331
+ :return: axes
332
+ """
333
+ total_var = np.sum(np.var(trajectories, axis=0))
334
+ print(total_var)
335
+
336
+ vars_nonorth = []
337
+ for v in vecs:
338
+ proj = trajectories @ v / np.linalg.norm(v)
339
+ vars_nonorth.append(np.var(proj))
340
+ indices = np.argsort(vars_nonorth)
341
+
342
+ vecs_ordered = [vecs[i] for i in indices[::-1]]
343
+ labels_ordered = [labels[i] for i in indices[::-1]]
344
+ vecs_orth = gram_schmidt(vecs_ordered)
345
+ variances = []
346
+ for v in vecs_orth:
347
+ proj = trajectories @ v / np.linalg.norm(v)
348
+ variances.append(np.var(proj))
349
+ print(variances)
350
+ cumvar = np.cumsum(variances).tolist()
351
+ print(cumvar)
352
+
353
+ fig, ax = plt.subplots(figsize=figsize)
354
+ ax.bar(
355
+ range(1, len(variances) + 1), cumvar / total_var * 100, color="lightslategray", alpha=0.5
356
+ )
357
+ ax.axhline(100, c="r")
358
+ ax.spines["top"].set_visible(False)
359
+ ax.spines["right"].set_visible(False)
360
+ ax.set(xticks=range(1, len(variances) + 1), xticklabels=labels_ordered, yticks=[0, 100])
361
+ return ax
362
+
363
+
364
+ def phi_prime(x):
365
+ return 1 - np.tanh(x) ** 2
366
+
367
+
368
+ def overlap_matrix(vectors):
369
+ hidden_size = len(vectors[0])
370
+ ov = np.zeros((len(vectors), len(vectors)))
371
+ for i in range(len(vectors)):
372
+ for j in range(i, len(vectors)):
373
+ ov[i, j] = 1 / hidden_size * np.sum(vectors[i] * vectors[j])
374
+ return ov
375
+
376
+
377
+ def boxplot_accuracies(accs, figsize=None, labels=None):
378
+ fig, ax = plt.subplots(1, 1, figsize=figsize)
379
+ for i, acc in enumerate(accs):
380
+ if not isinstance(acc, list):
381
+ plt.scatter(i, acc, marker="*", s=90, c="k")
382
+ else:
383
+ bp = ax.boxplot(acc, positions=[i], widths=0.5)
384
+ ax.scatter([i] * len(acc), acc, c="gray", alpha=0.5, s=5)
385
+ [l.set_linewidth(2) for l in bp["medians"]]
386
+ ax.set_xlim(-0.5, len(accs) - 0.5)
387
+ ax.set_ylim(0, 1.1)
388
+ ax.set_xticks(list(range(len(accs))))
389
+ ax.spines["top"].set_visible(False)
390
+ ax.spines["right"].set_visible(False)
391
+ ax.set_xticklabels(labels, rotation=45)
392
+ ax.set_yticks([0.0, 0.25, 0.5, 0.75, 1.0])
393
+ ax.set_ylabel("accuracy")
394
+ ax.axhline(1, c="k", zorder=-10, lw=1, ls="--")
395
+ return ax
396
+
397
+
398
+ def gram_schmidt(vecs):
399
+ vecs_orth = []
400
+ vecs_orth.append(vecs[0] / np.linalg.norm(vecs[0]))
401
+ for i in range(1, len(vecs)):
402
+ v = vecs[i]
403
+ for j in range(i):
404
+ v = v - (v @ vecs_orth[j]) * vecs_orth[j]
405
+ v = v / np.linalg.norm(v)
406
+ vecs_orth.append(v)
407
+ return vecs_orth
408
+
409
+
410
+ def gram_factorization(G):
411
+ """
412
+ The rows of the returned matrix are the basis vectors whose Gramian matrix is G
413
+ :param G: ndarray representing a symmetric semidefinite positive matrix
414
+ :return: ndarray
415
+ """
416
+ w, v = np.linalg.eigh(G)
417
+ x = v * np.sqrt(w)
418
+ return x
419
+
420
+
421
+ def angle(v, w, deg=True):
422
+ res = np.arccos((v @ w) / (np.linalg.norm(v) * np.linalg.norm(w)))
423
+ if not deg:
424
+ return res
425
+ else:
426
+ return res * 180 / np.pi
427
+
428
+
429
+ def plot_experiment(net, input, traj, epochs, rect=(-8, 8, -6, 6), traj_to_show=1):
430
+ fig, ax = plt.subplots(4, 5, figsize=(25, 20))
431
+ idx = np.floor(np.linspace(0, len(input) - 1, 4))
432
+ for i in range(4):
433
+ for j, e in enumerate(epochs[:-1]):
434
+ dms._plot_field(net, input[int(idx[i]), e], ax[i][j], rect=rect, sizes=1.3)
435
+ epoch = [c[j] for c in traj[i]]
436
+ dms.plot_trajectories(net, epoch, ax[i][j], c="#C30021", n_traj=traj_to_show)
437
+ if j > 0:
438
+ epoch = [c[j - 1] for c in traj[i]]
439
+ dms.plot_trajectories(
440
+ net, epoch, ax[i][j], c="#C30021", style="--", n_traj=traj_to_show
441
+ )
442
+
443
+ ax[0][0].set_title("Fix")
444
+ ax[0][1].set_title("Stim 1")
445
+ ax[0][2].set_title("Delay")
446
+ ax[0][3].set_title("Stim 2")
447
+ ax[0][4].set_title("Decision")
448
+
449
+ for i in range(4):
450
+ ax[i][0].set_ylabel(r"$\kappa_2$")
451
+ for i in range(5):
452
+ ax[3][i].set_xlabel(r"$\kappa_1$")
453
+
454
+ fig.subplots_adjust(hspace=0.1, wspace=0.1)
455
+
456
+
457
+ def aggregate_data(traj, epochs, transient=10, only_stim=False, pca=True, n_pca=3):
458
+
459
+ n_conds = len(traj)
460
+ n_epochs = len(epochs) - 1
461
+ n_traj = len(traj[0])
462
+
463
+ # fit PCA to all data
464
+ pos = []
465
+ for i in range(n_conds): # conditions
466
+ for k in range(n_epochs):
467
+ for j in range(n_traj): # trajectories
468
+ pos.append(traj[i][j][k][transient:])
469
+
470
+ if pca:
471
+ pca = PCA(n_components=n_pca)
472
+ pca.fit(np.vstack(pos))
473
+ print("Explained variance: ", pca.explained_variance_ratio_)
474
+
475
+ # aggregate data under baseline condition (no input)
476
+ pos, vel = [], []
477
+ if not only_stim:
478
+ for i in range(n_conds): # conditions
479
+ pos_, vel_ = [], []
480
+ for k in [0, 2, 4]:
481
+ for j in range(n_traj): # trajectories
482
+ pos_proj = traj[i][j][k][transient:]
483
+ if pca:
484
+ pos_proj = pca.transform(pos_proj)
485
+ pos_.append(pos_proj[:-1]) # stack trajectories
486
+ vel_.append(np.diff(pos_proj, axis=0)) # compute differences
487
+
488
+ pos_, vel_ = np.vstack(pos_), np.vstack(vel_) # stack trajectories
489
+ pos.append(pos_)
490
+ vel.append(vel_)
491
+
492
+ # aggregate data under stimulated condition
493
+ for i in range(n_conds): # conditions
494
+ pos_, vel_ = [], []
495
+ for k in [1, 3]:
496
+ for j in range(n_traj): # trajectories
497
+ pos_proj = traj[i][j][k][transient:]
498
+ if pca:
499
+ pos_proj = pca.transform(pos_proj)
500
+ pos_.append(pos_proj[:-1])
501
+ vel_.append(np.diff(pos_proj, axis=0))
502
+
503
+ pos_, vel_ = np.vstack(pos_), np.vstack(vel_) # stack trajectories
504
+ pos.append(pos_)
505
+ vel.append(vel_)
506
+
507
+ return pos, vel
data/examples/RNN/RNN_scripts/modules.py ADDED
@@ -0,0 +1,1273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """RNN definition of network classes, training functionality."""
2
+ import torch.nn as nn
3
+ from math import sqrt, floor
4
+ import random
5
+ import time
6
+ import torch
7
+ import numpy as np
8
+ import matplotlib.pyplot as plt
9
+
10
+
11
+ def gram_schmidt_pt(mat):
12
+ """
13
+ Performs INPLACE Gram-Schmidt
14
+ :param mat:
15
+ :return:
16
+ """
17
+ mat[0] = mat[0] / torch.norm(mat[0])
18
+ for i in range(1, mat.shape[0]):
19
+ mat[i] = mat[i] - (mat[:i].t() @ mat[:i] @ mat[i])
20
+ mat[i] = mat[i] / torch.norm(mat[i])
21
+
22
+
23
+ def loss_mse(output, target, mask):
24
+ """
25
+ Mean squared error loss
26
+ :param output: torch tensor of shape (num_trials, num_timesteps, output_dim)
27
+ :param target: torch tensor of shape (num_trials, num_timesteps, output_dim)
28
+ :param mask: torch tensor of shape (num_trials, num_timesteps, 1)
29
+ :return: float
30
+ """
31
+ # Compute loss for each (trial, timestep) (average accross output dimensions)
32
+ loss_tensor = (mask * (target - output)).pow(2).mean(dim=-1)
33
+ # Account for different number of masked values per trial
34
+ loss_by_trial = loss_tensor.sum(dim=-1) / mask[:, :, 0].sum(dim=-1)
35
+ return loss_by_trial.mean()
36
+
37
+
38
+ def train(
39
+ net,
40
+ _input,
41
+ _target,
42
+ _mask,
43
+ n_epochs,
44
+ random_ic=False,
45
+ lr=1e-2,
46
+ batch_size=32,
47
+ plot_learning_curve=False,
48
+ plot_gradient=False,
49
+ mask_gradients=False,
50
+ clip_gradient=None,
51
+ early_stop=None,
52
+ keep_best=False,
53
+ cuda=False,
54
+ resample=False,
55
+ ):
56
+ """
57
+ Train a network
58
+ :param net: nn.Module
59
+ :param _input: torch tensor of shape (num_trials, num_timesteps, input_dim)
60
+ :param _target: torch tensor of shape (num_trials, num_timesteps, output_dim)
61
+ :param _mask: torch tensor of shape (num_trials, num_timesteps, 1)
62
+ :param n_epochs: int
63
+ :param lr: float, learning rate
64
+ :param batch_size: int
65
+ :param plot_learning_curve: bool
66
+ :param plot_gradient: bool
67
+ :param mask_gradients: bool, set to True if training the SupportLowRankRNN_withMask for reduced models
68
+ :param clip_gradient: None or float, if not None the value at which gradient norm is clipped
69
+ :param early_stop: None or float, set to target loss value after which to immediately stop if attained
70
+ :param keep_best: bool, if True, model with lower loss from training process will be kept (for this option, the
71
+ network has to implement a method clone())
72
+ :param resample: for SupportLowRankRNNs, set True
73
+ :return: nothing
74
+ """
75
+ print("Training...")
76
+ optimizer = torch.optim.Adam(net.parameters(), lr=lr)
77
+ num_examples = _input.shape[0]
78
+ all_losses = []
79
+ if plot_gradient:
80
+ gradient_norms = []
81
+
82
+ # CUDA management
83
+ if cuda:
84
+ if not torch.cuda.is_available():
85
+ print("Warning: CUDA not available on this machine, switching to CPU")
86
+ device = torch.device("cpu")
87
+ else:
88
+ device = torch.device("cuda")
89
+ else:
90
+ device = torch.device("cpu")
91
+ net.to(device=device)
92
+ input = _input.to(device=device)
93
+ target = _target.to(device=device)
94
+ mask = _mask.to(device=device)
95
+
96
+ # Initialize setup to keep best network
97
+ with torch.no_grad():
98
+ output, _ = net(input)
99
+ initial_loss = loss_mse(output, target, mask)
100
+ print("initial loss: %.3f" % (initial_loss.item()))
101
+ if keep_best:
102
+ best = net.clone()
103
+ best_loss = initial_loss.item()
104
+
105
+ if random_ic:
106
+ print("training with random initial conditions")
107
+
108
+ # Training loop
109
+ for epoch in range(n_epochs):
110
+ begin = time.time()
111
+ losses = [] # losses over the whole epoch
112
+ for i in range(num_examples // batch_size):
113
+ optimizer.zero_grad()
114
+ random_batch_idx = random.sample(range(num_examples), batch_size)
115
+ batch = input[random_batch_idx]
116
+
117
+ # set initial condition
118
+ if random_ic:
119
+ # norm = net.m.norm(dim=0)
120
+ h0 = torch.rand(net.hidden_size).to(device) * net.hidden_size / 300
121
+ net.h0.data = h0
122
+
123
+ output, _ = net(batch)
124
+ loss = loss_mse(output, target[random_batch_idx], mask[random_batch_idx])
125
+ losses.append(loss.item())
126
+ all_losses.append(loss.item())
127
+ loss.backward()
128
+ if mask_gradients:
129
+ net.m.grad = net.m.grad * net.m_mask
130
+ net.n.grad = net.n.grad * net.n_mask
131
+ net.wi.grad = net.wi.grad * net.wi_mask
132
+ net.wo.grad = net.wo.grad * net.wo_mask
133
+ net.unitn.grad = net.unitn.grad * net.unitn_mask
134
+ net.unitm.grad = net.unitm.grad * net.unitm_mask
135
+ net.unitwi.grad = net.unitwi.grad * net.unitwi_mask
136
+ if clip_gradient is not None:
137
+ torch.nn.utils.clip_grad_norm_(net.parameters(), clip_gradient)
138
+ if plot_gradient:
139
+ tot = 0
140
+ for param in [p for p in net.parameters() if p.requires_grad]:
141
+ tot += (param.grad**2).sum()
142
+ gradient_norms.append(sqrt(tot))
143
+ optimizer.step()
144
+ # These 2 lines important to prevent memory leaks
145
+ loss.detach_()
146
+ output.detach_()
147
+ if resample:
148
+ net.resample_basis()
149
+ if keep_best and np.mean(losses) < best_loss:
150
+ best = net.clone()
151
+ best_loss = np.mean(losses)
152
+ print(
153
+ "epoch %d: loss=%.3f (took %.2f s) *"
154
+ % (epoch, np.mean(losses), time.time() - begin)
155
+ )
156
+ else:
157
+ print(
158
+ "epoch %d: loss=%.3f (took %.2f s)"
159
+ % (epoch, np.mean(losses), time.time() - begin)
160
+ )
161
+ if early_stop is not None and np.mean(losses) < early_stop:
162
+ break
163
+
164
+ if plot_learning_curve:
165
+ plt.plot(all_losses)
166
+ plt.title("Learning curve")
167
+ plt.show()
168
+
169
+ if plot_gradient:
170
+ plt.plot(gradient_norms)
171
+ plt.title("Gradient norm")
172
+ plt.show()
173
+
174
+ if keep_best:
175
+ net.load_state_dict(best.state_dict())
176
+
177
+
178
+ class FullRankRNN(nn.Module):
179
+ def __init__(
180
+ self,
181
+ input_size,
182
+ hidden_size,
183
+ output_size,
184
+ noise_std,
185
+ alpha=0.2,
186
+ rho=1,
187
+ train_wi=False,
188
+ train_wo=False,
189
+ train_wrec=True,
190
+ train_h0=False,
191
+ wi_init=None,
192
+ wo_init=None,
193
+ wrec_init=None,
194
+ si_init=None,
195
+ so_init=None,
196
+ ):
197
+ """
198
+ :param input_size: int
199
+ :param hidden_size: int
200
+ :param output_size: int
201
+ :param noise_std: float
202
+ :param alpha: float, value of dt/tau
203
+ :param rho: float, std of gaussian distribution for initialization
204
+ :param train_wi: bool
205
+ :param train_wo: bool
206
+ :param train_wrec: bool
207
+ :param train_h0: bool
208
+ :param wi_init: torch tensor of shape (input_dim, hidden_size)
209
+ :param wo_init: torch tensor of shape (hidden_size, output_dim)
210
+ :param wrec_init: torch tensor of shape (hidden_size, hidden_size)
211
+ :param si_init: input scaling, torch tensor of shape (input_dim)
212
+ :param so_init: output scaling, torch tensor of shape (output_dim)
213
+ """
214
+ super(FullRankRNN, self).__init__()
215
+ self.input_size = input_size
216
+ self.hidden_size = hidden_size
217
+ self.output_size = output_size
218
+ self.noise_std = noise_std
219
+ self.alpha = alpha
220
+ self.rho = rho
221
+ self.train_wi = train_wi
222
+ self.train_wo = train_wo
223
+ self.train_wrec = train_wrec
224
+ self.train_h0 = train_h0
225
+ self.non_linearity = torch.tanh
226
+
227
+ # Define parameters
228
+ self.wi = nn.Parameter(torch.Tensor(input_size, hidden_size))
229
+ self.si = nn.Parameter(torch.Tensor(input_size))
230
+ if train_wi:
231
+ self.si.requires_grad = False
232
+ else:
233
+ self.wi.requires_grad = False
234
+ self.wrec = nn.Parameter(torch.Tensor(hidden_size, hidden_size))
235
+ if not train_wrec:
236
+ self.wrec.requires_grad = False
237
+ self.wo = nn.Parameter(torch.Tensor(hidden_size, output_size))
238
+ self.so = nn.Parameter(torch.Tensor(output_size))
239
+ if train_wo:
240
+ self.so.requires_grad = False
241
+ if not train_wo:
242
+ self.wo.requires_grad = False
243
+ self.h0 = nn.Parameter(torch.Tensor(hidden_size))
244
+ if not train_h0:
245
+ self.h0.requires_grad = False
246
+
247
+ # Initialize parameters
248
+ with torch.no_grad():
249
+ if wi_init is None:
250
+ self.wi.normal_()
251
+ else:
252
+ self.wi.copy_(wi_init)
253
+ if si_init is None:
254
+ self.si.set_(torch.ones_like(self.si))
255
+ else:
256
+ self.si.copy_(si_init)
257
+ if wrec_init is None:
258
+ self.wrec.normal_(std=rho / sqrt(hidden_size))
259
+ else:
260
+ self.wrec.copy_(wrec_init)
261
+ if wo_init is None:
262
+ self.wo.normal_(std=1 / hidden_size)
263
+ else:
264
+ self.wo.copy_(wo_init)
265
+ if so_init is None:
266
+ self.so.set_(torch.ones_like(self.so))
267
+ else:
268
+ self.so.copy_(so_init)
269
+ self.h0.zero_()
270
+ self.wi_full, self.wo_full = [None] * 2
271
+ self._define_proxy_parameters()
272
+
273
+ def _define_proxy_parameters(self):
274
+ self.wi_full = (self.wi.t() * self.si).t()
275
+ self.wo_full = self.wo * self.so
276
+
277
+ def forward(self, input):
278
+ """
279
+ :param input: tensor of shape (batch_size, #timesteps, input_dimension)
280
+ Important: the 3 dimensions need to be present, even if they are of size 1.
281
+ :return: (output tensor, trajectories tensor of shape (batch_size, #timesteps, #hidden_units))
282
+ """
283
+ batch_size = input.shape[0]
284
+ seq_len = input.shape[1]
285
+ h = self.h0
286
+ r = self.non_linearity(h)
287
+ self._define_proxy_parameters()
288
+ noise = torch.randn(batch_size, seq_len, self.hidden_size, device=self.wrec.device)
289
+ output = torch.zeros(batch_size, seq_len, self.output_size, device=self.wrec.device)
290
+ trajectories = torch.zeros(batch_size, seq_len, self.hidden_size, device=self.wrec.device)
291
+
292
+ # simulation loop
293
+ for i in range(seq_len):
294
+ h = (
295
+ h
296
+ + self.noise_std * noise[:, i, :]
297
+ + self.alpha * (-h + r.matmul(self.wrec.t()) + input[:, i, :].matmul(self.wi_full))
298
+ )
299
+
300
+ r = self.non_linearity(h)
301
+ output[:, i, :] = r.matmul(self.wo_full)
302
+ trajectories[:, i, :] = h
303
+
304
+ return output, trajectories
305
+
306
+ def clone(self):
307
+ new_net = FullRankRNN(
308
+ self.input_size,
309
+ self.hidden_size,
310
+ self.output_size,
311
+ self.noise_std,
312
+ self.alpha,
313
+ self.rho,
314
+ self.train_wi,
315
+ self.train_wo,
316
+ self.train_wrec,
317
+ self.train_h0,
318
+ self.wi,
319
+ self.wo,
320
+ self.wrec,
321
+ self.si,
322
+ self.so,
323
+ )
324
+ return new_net
325
+
326
+
327
+ def simulation_loop(model, input):
328
+ batch_size = input.shape[0]
329
+ seq_len = input.shape[1]
330
+ h = model.h0
331
+ r = model.non_linearity(h)
332
+
333
+ noise = torch.randn(batch_size, seq_len, model.hidden_size, device=model.m.device)
334
+ output = torch.zeros(batch_size, seq_len, model.output_size, device=model.m.device)
335
+ trajectories = torch.zeros(batch_size, seq_len, model.hidden_size, device=model.m.device)
336
+
337
+ for i in range(seq_len):
338
+ h = (
339
+ h
340
+ + model.noise_std * noise[:, i, :]
341
+ + model.alpha
342
+ * (
343
+ -h
344
+ + r.matmul(model.n).matmul(model.m.t()) / model.hidden_size
345
+ + input[:, i, :].matmul(model.wi_full)
346
+ )
347
+ )
348
+
349
+ r = model.non_linearity(h)
350
+ output[:, i, :] = r.matmul(model.wo_full) / model.hidden_size
351
+ trajectories[:, i, :] = h
352
+
353
+ return output, trajectories
354
+
355
+
356
+ class LowRankRNN(nn.Module):
357
+ """
358
+ This class implements the low-rank RNN. Instead of being parametrized by an NxN connectivity matrix, it is
359
+ parametrized by two Nxr matrices m and n such that the connectivity is m * n^T
360
+ """
361
+
362
+ def __init__(
363
+ self,
364
+ input_size,
365
+ hidden_size,
366
+ output_size,
367
+ noise_std,
368
+ alpha,
369
+ rank=1,
370
+ train_wi=False,
371
+ train_wo=False,
372
+ train_wrec=True,
373
+ train_h0=False,
374
+ train_si=True,
375
+ train_so=True,
376
+ wi_init=None,
377
+ wo_init=None,
378
+ m_init=None,
379
+ n_init=None,
380
+ si_init=None,
381
+ so_init=None,
382
+ h0_init=None,
383
+ ):
384
+ """
385
+ :param input_size: int
386
+ :param hidden_size: int
387
+ :param output_size: int
388
+ :param noise_std: float
389
+ :param alpha: float, value of dt/tau
390
+ :param rank: int
391
+ :param train_wi: bool
392
+ :param train_wo: bool
393
+ :param train_wrec: bool
394
+ :param train_h0: bool
395
+ :param train_si: bool
396
+ :param train_so: bool
397
+ :param wi_init: torch tensor of shape (input_dim, hidden_size)
398
+ :param wo_init: torch tensor of shape (hidden_size, output_dim)
399
+ :param m_init: torch tensor of shape (hidden_size, rank)
400
+ :param n_init: torch tensor of shape (hidden_size, rank)
401
+ :param si_init: torch tensor of shape (input_size)
402
+ :param so_init: torch tensor of shape (output_size)
403
+ :param h0_init: torch tensor of shape (hidden_size)
404
+ """
405
+ super(LowRankRNN, self).__init__()
406
+ self.input_size = input_size
407
+ self.hidden_size = hidden_size
408
+ self.output_size = output_size
409
+ self.noise_std = noise_std
410
+ self.alpha = alpha
411
+ self.rank = rank
412
+ self.train_wi = train_wi
413
+ self.train_wo = train_wo
414
+ self.train_wrec = train_wrec
415
+ self.train_h0 = train_h0
416
+ self.train_si = train_si
417
+ self.train_so = train_so
418
+ self.non_linearity = torch.tanh
419
+
420
+ # Define parameters
421
+ self.wi = nn.Parameter(torch.Tensor(input_size, hidden_size))
422
+ self.si = nn.Parameter(torch.Tensor(input_size))
423
+ if train_wi:
424
+ self.si.requires_grad = False
425
+ else:
426
+ self.wi.requires_grad = False
427
+ if not train_si:
428
+ self.si.requires_grad = False
429
+ self.m = nn.Parameter(torch.Tensor(hidden_size, rank))
430
+ self.n = nn.Parameter(torch.Tensor(hidden_size, rank))
431
+ if not train_wrec:
432
+ self.m.requires_grad = False
433
+ self.n.requires_grad = False
434
+ self.wo = nn.Parameter(torch.Tensor(hidden_size, output_size))
435
+ self.so = nn.Parameter(torch.Tensor(output_size))
436
+ if train_wo:
437
+ self.so.requires_grad = False
438
+ if not train_wo:
439
+ self.wo.requires_grad = False
440
+ if not train_so:
441
+ self.so.requires_grad = False
442
+ self.h0 = nn.Parameter(torch.Tensor(hidden_size))
443
+ if not train_h0:
444
+ self.h0.requires_grad = False
445
+
446
+ # Initialize parameters
447
+ with torch.no_grad():
448
+ if wi_init is None:
449
+ self.wi.normal_()
450
+ else:
451
+ self.wi.copy_(wi_init)
452
+ if si_init is None:
453
+ self.si.set_(torch.ones_like(self.si))
454
+ else:
455
+ self.si.copy_(si_init)
456
+ if m_init is None:
457
+ self.m.normal_()
458
+ else:
459
+ self.m.copy_(m_init)
460
+ if n_init is None:
461
+ self.n.normal_()
462
+ else:
463
+ self.n.copy_(n_init)
464
+ if wo_init is None:
465
+ self.wo.normal_(std=4.0)
466
+ else:
467
+ self.wo.copy_(wo_init)
468
+ if so_init is None:
469
+ self.so.set_(torch.ones_like(self.so))
470
+ else:
471
+ self.so.copy_(so_init)
472
+ if h0_init is None:
473
+ self.h0.zero_()
474
+ else:
475
+ self.h0.copy_(h0_init)
476
+ self.wrec, self.wi_full, self.wo_full = [None] * 3
477
+ self._define_proxy_parameters()
478
+
479
+ def _define_proxy_parameters(self):
480
+ self.wrec = None
481
+ self.wi_full = (self.wi.t() * self.si).t()
482
+ self.wo_full = self.wo * self.so
483
+
484
+ def forward(self, input):
485
+ """
486
+ :param input: tensor of shape (batch_size, #timesteps, input_dimension)
487
+ Important: the 3 dimensions need to be present, even if they are of size 1.
488
+ :return: (output tensor, trajectories tensor of shape (batch_size, #timesteps, #hidden_units))
489
+ """
490
+ return simulation_loop(self, input)
491
+
492
+ def clone(self):
493
+ new_net = LowRankRNN(
494
+ self.input_size,
495
+ self.hidden_size,
496
+ self.output_size,
497
+ self.noise_std,
498
+ self.alpha,
499
+ self.rank,
500
+ self.train_wi,
501
+ self.train_wo,
502
+ self.train_wrec,
503
+ self.train_h0,
504
+ self.train_si,
505
+ self.train_so,
506
+ self.wi,
507
+ self.wo,
508
+ self.m,
509
+ self.n,
510
+ self.si,
511
+ self.so,
512
+ )
513
+ new_net._define_proxy_parameters()
514
+ return new_net
515
+
516
+ def load_state_dict(self, state_dict, strict=True):
517
+ """
518
+ override
519
+ """
520
+ if "rec_noise" in state_dict:
521
+ del state_dict["rec_noise"]
522
+ super().load_state_dict(state_dict, strict)
523
+ self._define_proxy_parameters()
524
+
525
+ def svd_reparametrization(self):
526
+ """
527
+ Orthogonalize m and n via SVD
528
+ """
529
+ with torch.no_grad():
530
+ structure = (self.m @ self.n.t()).numpy()
531
+ m, s, n = np.linalg.svd(structure, full_matrices=False)
532
+ m, s, n = m[:, : self.rank], s[: self.rank], n[: self.rank, :]
533
+ self.m.set_(torch.from_numpy(m * np.sqrt(s)))
534
+ self.n.set_(torch.from_numpy(n.transpose() * np.sqrt(s)))
535
+ self._define_proxy_parameters()
536
+
537
+
538
+ class SupportLowRankRNN(nn.Module):
539
+ """
540
+ This class implements the mixture-of-gaussians, low-rank RNN. The difference with the low-rank RNN is that
541
+ all vectors are defined as transformation of a gaussian basis of dimensionality b for each population.
542
+
543
+ For example the matrix m, instead of having Nxr free parameters, is parametrized by a tensor
544
+ m_weights of shape (r, p, b) (where r is the rank, p is the number of populations). A gaussian basis of
545
+ shape Nxb is sampled, and m is then computed from the basis and the weights, by assigning each neuron to a
546
+ population monotonically.
547
+
548
+ The weights defined above correspond to a linear transformation of the gaussian basis (ie the expectancy
549
+ of the final distribution obtained is always zero). Affine transforms can be defined by setting biases.
550
+ """
551
+
552
+ def __init__(
553
+ self,
554
+ input_size,
555
+ hidden_size,
556
+ output_size,
557
+ noise_std,
558
+ alpha,
559
+ rank=1,
560
+ n_supports=1,
561
+ weights=None,
562
+ gaussian_basis_dim=None,
563
+ m_weights_init=None,
564
+ n_weights_init=None,
565
+ wi_weights_init=None,
566
+ wo_weights_init=None,
567
+ m_biases_init=None,
568
+ n_biases_init=None,
569
+ wi_biases_init=None,
570
+ train_biases=False,
571
+ ):
572
+ """
573
+ :param input_size: int
574
+ :param hidden_size: int
575
+ :param output_size: int
576
+ :param noise_std: float
577
+ :param alpha: float
578
+ :param rank: int
579
+ :param n_supports: int, number of cell classes used
580
+ :param weights: list, proportion of total population for each cell class (GMM components weights)
581
+ :param gaussian_basis_dim: dimensionality of the gaussian basis on which weights are learned
582
+ :param m_weights_init: torch tensor of shape (rank, n_supports, gaussian_basis_dim)
583
+ :param n_weights_init: torch tensor of shape (rank, n_supports, gaussian_basis_dim)
584
+ :param wi_weights_init: torch tensor of shape (input_size, n_supports, self.gaussian_basis_dim)
585
+ :param wo_weights_init: torch tensor of shape (output_size, n_supports, self.gaussian_basis_dim)
586
+ :param m_biases_init: torch tensor of shape (rank, n_supports)
587
+ :param n_biases_init: torch tensor of shape (rank, n_supports)
588
+ :param wi_biases_init: torch tensor of shape (input_size, n_supports)
589
+ :param train_biases: bool
590
+ """
591
+ super(SupportLowRankRNN, self).__init__()
592
+ self.input_size = input_size
593
+ self.hidden_size = hidden_size
594
+ self.output_size = output_size
595
+ self.noise_std = noise_std
596
+ self.alpha = alpha
597
+ self.rank = rank
598
+ self.n_supports = n_supports
599
+ self.gaussian_basis_dim = (
600
+ 2 * rank + input_size if gaussian_basis_dim is None else gaussian_basis_dim
601
+ )
602
+ self.non_linearity = torch.tanh
603
+
604
+ self.gaussian_basis = nn.Parameter(
605
+ torch.randn((self.gaussian_basis_dim, hidden_size)), requires_grad=False
606
+ )
607
+ self.supports = nn.Parameter(torch.zeros((n_supports, hidden_size)), requires_grad=False)
608
+ if weights is None:
609
+ self.weights = nn.Parameter(torch.tensor([1 / hidden_size]))
610
+ l_support = hidden_size // n_supports
611
+ for i in range(n_supports):
612
+ self.supports[i, l_support * i : l_support * (i + 1)] = 1
613
+ self.weights = [l_support / hidden_size] * n_supports
614
+ else:
615
+ k = 0
616
+ self.weights = nn.Parameter(torch.tensor(weights), requires_grad=False)
617
+ for i in range(n_supports):
618
+ self.supports[i, k : k + floor(weights[i] * hidden_size)] = 1
619
+ k += floor(weights[i] * hidden_size)
620
+
621
+ # Define parameters
622
+ self.wi_weights = nn.Parameter(
623
+ torch.Tensor(input_size, n_supports, self.gaussian_basis_dim)
624
+ )
625
+ self.m_weights = nn.Parameter(torch.Tensor(rank, n_supports, self.gaussian_basis_dim))
626
+ self.n_weights = nn.Parameter(torch.Tensor(rank, n_supports, self.gaussian_basis_dim))
627
+ self.wo_weights = nn.Parameter(
628
+ torch.Tensor(output_size, n_supports, self.gaussian_basis_dim)
629
+ )
630
+ self.wi_biases = nn.Parameter(
631
+ torch.Tensor(input_size, n_supports), requires_grad=train_biases
632
+ )
633
+ self.m_biases = nn.Parameter(torch.Tensor(rank, n_supports), requires_grad=train_biases)
634
+ self.n_biases = nn.Parameter(torch.Tensor(rank, n_supports), requires_grad=train_biases)
635
+ self.h0_weights = nn.Parameter(torch.Tensor(n_supports, self.gaussian_basis_dim))
636
+ self.h0_weights.requires_grad = False
637
+
638
+ # Initialize parameters
639
+ with torch.no_grad():
640
+ if wi_weights_init is not None:
641
+ self.wi_weights.copy_(wi_weights_init)
642
+ else:
643
+ self.wi_weights.normal_()
644
+ if m_weights_init is not None:
645
+ self.m_weights.copy_(m_weights_init)
646
+ else:
647
+ self.m_weights.normal_(std=1 / sqrt(hidden_size))
648
+ if n_weights_init is not None:
649
+ self.n_weights.copy_(n_weights_init)
650
+ else:
651
+ self.n_weights.normal_(std=1 / sqrt(hidden_size))
652
+ if wo_weights_init is not None:
653
+ self.wo_weights.copy_(wo_weights_init)
654
+ else:
655
+ self.wo_weights.normal_(std=1 / hidden_size)
656
+ if wi_biases_init is not None:
657
+ self.wi_biases.copy_(wi_biases_init)
658
+ else:
659
+ self.wi_biases.zero_()
660
+ if m_biases_init is not None:
661
+ self.m_biases.copy_(m_biases_init)
662
+ else:
663
+ self.m_biases.zero_()
664
+ if n_biases_init is not None:
665
+ self.n_biases.copy_(n_biases_init)
666
+ else:
667
+ self.n_biases.zero_()
668
+ self.h0_weights.zero_()
669
+ self.wi, self.m, self.n, self.wo, self.h0, self.wi_full, self.wo_full = [None] * 7
670
+ self._define_proxy_parameters()
671
+
672
+ def _define_proxy_parameters(self):
673
+ self.wi = (
674
+ torch.sum((self.wi_weights @ self.gaussian_basis) * self.supports, dim=(1,))
675
+ + self.wi_biases @ self.supports
676
+ )
677
+ self.wi_full = self.wi
678
+ self.m = (
679
+ torch.sum((self.m_weights @ self.gaussian_basis) * self.supports, dim=(1,)).t()
680
+ + (self.m_biases @ self.supports).t()
681
+ )
682
+ self.n = (
683
+ torch.sum((self.n_weights @ self.gaussian_basis) * self.supports, dim=(1,)).t()
684
+ + (self.n_biases @ self.supports).t()
685
+ )
686
+ self.wo = torch.sum((self.wo_weights @ self.gaussian_basis) * self.supports, dim=(1,)).t()
687
+ self.wo_full = self.wo
688
+ self.h0 = torch.sum((self.h0_weights @ self.gaussian_basis) * self.supports, dim=(0,))
689
+
690
+ def forward(self, input):
691
+ return simulation_loop(self, input)
692
+
693
+ def clone(self):
694
+ new_net = SupportLowRankRNN(
695
+ self.input_size,
696
+ self.hidden_size,
697
+ self.output_size,
698
+ self.noise_std,
699
+ self.alpha,
700
+ self.rank,
701
+ self.n_supports,
702
+ self.weights.tolist(),
703
+ self.gaussian_basis_dim,
704
+ self.m_weights,
705
+ self.n_weights,
706
+ self.wi_weights,
707
+ self.wo_weights,
708
+ self.m_biases,
709
+ self.n_biases,
710
+ self.wi_biases,
711
+ )
712
+ new_net.gaussian_basis.copy_(self.gaussian_basis)
713
+ new_net._define_proxy_parameters()
714
+ return new_net
715
+
716
+ def load_state_dict(self, state_dict, strict=True):
717
+ """
718
+ override to recompute w_rec on loading
719
+ """
720
+ super().load_state_dict(state_dict, strict)
721
+ self._define_proxy_parameters()
722
+
723
+ def resample_basis(self):
724
+ self.gaussian_basis.normal_()
725
+ self._define_proxy_parameters()
726
+
727
+
728
+ class SupportLowRankRNN_withMask(nn.Module):
729
+ """
730
+ This network has been defined to train an arbitrary subset of the parameters offered by the SupportLowRankRNN
731
+ by adding a mask.
732
+ """
733
+
734
+ def __init__(
735
+ self,
736
+ input_size,
737
+ hidden_size,
738
+ output_size,
739
+ noise_std,
740
+ alpha,
741
+ rank=1,
742
+ n_supports=1,
743
+ gaussian_basis_dim=None,
744
+ initial_m=None,
745
+ initial_n=None,
746
+ initial_unitm=None,
747
+ initial_unitn=None,
748
+ initial_wi=None,
749
+ initial_unitwi=None,
750
+ initial_wo=None,
751
+ initial_h0=None,
752
+ initial_unith0=None,
753
+ initial_bias=None,
754
+ train_h0=False,
755
+ train_bias=False,
756
+ initial_wi_mask=None,
757
+ initial_wo_mask=None,
758
+ initial_m_mask=None,
759
+ initial_n_mask=None,
760
+ ):
761
+ super(SupportLowRankRNN_withMask, self).__init__()
762
+ self.input_size = input_size
763
+ self.hidden_size = hidden_size
764
+ self.output_size = output_size
765
+ self.noise_std = noise_std
766
+ self.alpha = alpha
767
+ self.rank = rank
768
+ self.n_supports = n_supports
769
+ self.gaussian_basis_dim = (
770
+ 2 * rank + input_size if gaussian_basis_dim is None else gaussian_basis_dim
771
+ )
772
+ self.non_linearity = torch.tanh
773
+
774
+ self.gaussian_basis = nn.Parameter(
775
+ torch.randn((self.gaussian_basis_dim, hidden_size)), requires_grad=False
776
+ )
777
+ self.unit_vector = nn.Parameter(torch.ones((1, hidden_size)), requires_grad=False)
778
+ self.supports = nn.Parameter(torch.zeros((n_supports, hidden_size)), requires_grad=False)
779
+ l_support = hidden_size // n_supports
780
+ for i in range(n_supports):
781
+ self.supports[i, l_support * i : l_support * (i + 1)] = 1
782
+
783
+ # Define parameters
784
+ self.wi = nn.Parameter(torch.Tensor(input_size, n_supports, self.gaussian_basis_dim))
785
+ self.unitwi = nn.Parameter(torch.Tensor(input_size, n_supports, 1))
786
+ self.m = nn.Parameter(torch.Tensor(rank, n_supports, self.gaussian_basis_dim))
787
+ self.n = nn.Parameter(torch.Tensor(rank, n_supports, self.gaussian_basis_dim))
788
+ self.unitm = nn.Parameter(torch.Tensor(rank, n_supports, 1))
789
+ self.unitn = nn.Parameter(torch.Tensor(rank, n_supports, 1))
790
+ self.wo = nn.Parameter(torch.Tensor(output_size, n_supports, self.gaussian_basis_dim))
791
+ self.h0 = nn.Parameter(torch.Tensor(n_supports, self.gaussian_basis_dim))
792
+ self.unith0 = nn.Parameter(torch.Tensor(n_supports, 1))
793
+ self.bias = nn.Parameter(torch.Tensor(n_supports, 1))
794
+
795
+ self.wi_mask = nn.Parameter(
796
+ torch.Tensor(input_size, n_supports, self.gaussian_basis_dim), requires_grad=False
797
+ )
798
+ self.unitwi_mask = nn.Parameter(
799
+ torch.Tensor(input_size, n_supports, 1), requires_grad=False
800
+ )
801
+ self.m_mask = nn.Parameter(
802
+ torch.Tensor(rank, n_supports, self.gaussian_basis_dim), requires_grad=False
803
+ )
804
+ self.n_mask = nn.Parameter(
805
+ torch.Tensor(rank, n_supports, self.gaussian_basis_dim), requires_grad=False
806
+ )
807
+ self.unitm_mask = nn.Parameter(torch.Tensor(rank, n_supports, 1), requires_grad=False)
808
+ self.unitn_mask = nn.Parameter(torch.Tensor(rank, n_supports, 1), requires_grad=False)
809
+ self.wo_mask = nn.Parameter(
810
+ torch.Tensor(output_size, n_supports, self.gaussian_basis_dim), requires_grad=False
811
+ )
812
+ self.h0_mask = nn.Parameter(
813
+ torch.Tensor(n_supports, self.gaussian_basis_dim), requires_grad=False
814
+ )
815
+ self.unith0_mask = nn.Parameter(torch.Tensor(n_supports, 1), requires_grad=False)
816
+ self.bias_mask = nn.Parameter(torch.Tensor(n_supports, 1), requires_grad=False)
817
+
818
+ if not train_h0:
819
+ self.h0.requires_grad = False
820
+ self.unith0.requires_grad = False
821
+ if not train_bias:
822
+ self.bias.requires_grad = False
823
+
824
+ # Initialize parameters
825
+ with torch.no_grad():
826
+ if initial_wi is not None:
827
+ self.wi.copy_(initial_wi)
828
+ if initial_wi_mask is not None:
829
+ maskc = initial_wi_mask
830
+ else:
831
+ maskc = torch.where(
832
+ initial_wi != 0, torch.ones_like(initial_wi), torch.zeros_like(initial_wi)
833
+ )
834
+ self.wi_mask.copy_(maskc)
835
+ else:
836
+ self.wi.zero_()
837
+ self.wi_mask.zero_()
838
+ if initial_unitwi is not None:
839
+ self.unitwi.copy_(initial_unitwi)
840
+ maskc = torch.where(
841
+ initial_unitwi != 0,
842
+ torch.ones_like(initial_unitwi),
843
+ torch.zeros_like(initial_unitwi),
844
+ )
845
+ self.unitwi_mask.copy_(maskc)
846
+ else:
847
+ self.unitwi.zero_()
848
+ self.unitwi_mask.zero_()
849
+ if initial_m is not None:
850
+ self.m.copy_(initial_m)
851
+ if initial_m_mask is not None:
852
+ maskc = initial_m_mask
853
+ else:
854
+ maskc = torch.where(
855
+ initial_m != 0, torch.ones_like(initial_m), torch.zeros_like(initial_m)
856
+ )
857
+ self.m_mask.copy_(maskc)
858
+ else:
859
+ self.m.zero_()
860
+ self.m_mask.zero_()
861
+ if initial_n is not None:
862
+ self.n.copy_(initial_n)
863
+ if initial_n_mask is not None:
864
+ maskc = initial_n_mask
865
+ else:
866
+ maskc = torch.where(
867
+ initial_n != 0, torch.ones_like(initial_n), torch.zeros_like(initial_n)
868
+ )
869
+ self.n_mask.copy_(maskc)
870
+ else:
871
+ self.n.zero_()
872
+ self.n_mask.zero_()
873
+ if initial_unitm is not None:
874
+ self.unitm.copy_(initial_unitm)
875
+ maskc = torch.where(
876
+ initial_unitm != 0,
877
+ torch.ones_like(initial_unitm),
878
+ torch.zeros_like(initial_unitm),
879
+ )
880
+ self.unitm_mask.copy_(maskc)
881
+ else:
882
+ self.unitm.zero_()
883
+ self.unitm_mask.zero_()
884
+ if initial_unitn is not None:
885
+ self.unitn.copy_(initial_unitn)
886
+ maskc = torch.where(
887
+ initial_unitn != 0,
888
+ torch.ones_like(initial_unitn),
889
+ torch.zeros_like(initial_unitn),
890
+ )
891
+ self.unitn_mask.copy_(maskc)
892
+ else:
893
+ self.unitn.zero_()
894
+ self.unitn_mask.zero_()
895
+ if initial_wo is not None:
896
+ self.wo.copy_(initial_wo)
897
+ if initial_wo_mask is not None:
898
+ maskc = initial_wo_mask
899
+ else:
900
+ maskc = torch.where(
901
+ initial_wo != 0, torch.ones_like(initial_wo), torch.zeros_like(initial_wo)
902
+ )
903
+ self.wo_mask.copy_(maskc)
904
+ else:
905
+ self.wo.zero_()
906
+ self.wo_mask.zero_()
907
+ if initial_h0 is not None:
908
+ self.h0.copy_(initial_h0)
909
+ maskc = torch.where(
910
+ initial_h0 != 0, torch.ones_like(initial_h0), torch.zeros_like(initial_h0)
911
+ )
912
+ self.h0_mask.copy_(maskc)
913
+ else:
914
+ self.h0.zero_()
915
+ self.h0_mask.zero_()
916
+ if initial_unith0 is not None:
917
+ self.unith0.copy_(initial_unith0)
918
+ maskc = torch.where(
919
+ initial_unith0 != 0,
920
+ torch.ones_like(initial_unith0),
921
+ torch.zeros_like(initial_unith0),
922
+ )
923
+ self.unith0_mask.copy_(maskc)
924
+ else:
925
+ self.unith0.zero_()
926
+ self.unith0_mask.zero_()
927
+ if initial_bias is not None:
928
+ self.bias.copy_(initial_bias)
929
+ maskc = torch.where(
930
+ initial_bias != 0, torch.ones_like(initial_bias), torch.zeros_like(initial_bias)
931
+ )
932
+ self.bias_mask.copy_(maskc)
933
+ else:
934
+ self.bias.zero_()
935
+ self.bias_mask.zero_()
936
+
937
+ (
938
+ self.wi_full,
939
+ self.m_rec,
940
+ self.n_rec,
941
+ self.wo_full,
942
+ self.w_rec,
943
+ self.h0_full,
944
+ self.bias_full,
945
+ ) = [None] * 7
946
+ self.define_proxy_parameters()
947
+
948
+ def define_proxy_parameters(self):
949
+ self.wi_full = torch.sum(
950
+ (self.wi @ self.gaussian_basis) * self.supports, dim=(1,)
951
+ ) + torch.sum((self.unitwi @ self.unit_vector) * self.supports, dim=(1,))
952
+ self.m_rec = (
953
+ torch.sum((self.m @ self.gaussian_basis) * self.supports, dim=(1,)).t()
954
+ + torch.sum((self.unitm @ self.unit_vector) * self.supports, dim=(1,)).t()
955
+ )
956
+ self.n_rec = (
957
+ torch.sum((self.n @ self.gaussian_basis) * self.supports, dim=(1,)).t()
958
+ + torch.sum((self.unitn @ self.unit_vector) * self.supports, dim=(1,)).t()
959
+ )
960
+ self.wo_full = torch.sum((self.wo @ self.gaussian_basis) * self.supports, dim=(1,)).t()
961
+ self.w_rec = self.m_rec.matmul(self.n_rec.t())
962
+ self.h0_full = torch.sum(
963
+ (self.h0 @ self.gaussian_basis) * self.supports, dim=(0,)
964
+ ) + torch.sum((self.unith0 @ self.unit_vector) * self.supports, dim=(0,))
965
+ self.bias_full = torch.sum((self.bias @ self.unit_vector) * self.supports, dim=(0,))
966
+
967
+ def forward(self, input):
968
+ batch_size = input.shape[0]
969
+ seq_len = input.shape[1]
970
+ self.define_proxy_parameters()
971
+ h = self.h0_full
972
+ r = self.non_linearity(h)
973
+ noise = torch.randn(batch_size, seq_len, self.hidden_size, device=self.m_rec.device)
974
+ output = torch.zeros(batch_size, seq_len, self.output_size, device=self.m_rec.device)
975
+ trajectories = torch.zeros(batch_size, seq_len, self.hidden_size, device=self.m_rec.device)
976
+
977
+ # simulation loop
978
+ for i in range(seq_len):
979
+ h = (
980
+ h
981
+ + self.bias_full
982
+ + self.noise_std * noise[:, i, :]
983
+ + self.alpha * (-h + r.matmul(self.w_rec.t()) + input[:, i, :].matmul(self.wi_full))
984
+ )
985
+
986
+ r = self.non_linearity(h)
987
+ output[:, i, :] = r.matmul(self.wo_full)
988
+ trajectories[:, i, :] = h
989
+
990
+ return output, trajectories
991
+
992
+ def clone(self):
993
+ new_net = SupportLowRankRNN_withMask(
994
+ self.input_size,
995
+ self.hidden_size,
996
+ self.output_size,
997
+ self.noise_std,
998
+ self.alpha,
999
+ self.rank,
1000
+ self.n_supports,
1001
+ self.gaussian_basis_dim,
1002
+ self.m,
1003
+ self.n,
1004
+ self.unitm,
1005
+ self.unitn,
1006
+ self.wi,
1007
+ self.unitwi,
1008
+ self.wo,
1009
+ self.h0,
1010
+ self.unith0,
1011
+ self.bias,
1012
+ )
1013
+ new_net.gaussian_basis.copy_(self.gaussian_basis)
1014
+ new_net.define_proxy_parameters()
1015
+ return new_net
1016
+
1017
+ def load_state_dict(self, state_dict, strict=True):
1018
+ """
1019
+ override to recompute w_rec on loading
1020
+ """
1021
+ super().load_state_dict(state_dict, strict)
1022
+ self.define_proxy_parameters()
1023
+
1024
+ def resample_basis(self):
1025
+ self.gaussian_basis.normal_()
1026
+ self.define_proxy_parameters()
1027
+
1028
+ def orthogonalize_basis(self):
1029
+ for i in range(self.n_supports):
1030
+ gaussian_chunk = self.gaussian_basis[:, self.supports[i] == 1].view(
1031
+ self.gaussian_basis_dim, -1
1032
+ )
1033
+ gram_schmidt_pt(gaussian_chunk)
1034
+ self.gaussian_basis[:, self.supports[i] == 1] = gaussian_chunk
1035
+ self.gaussian_basis *= sqrt(self.hidden_size // self.n_supports)
1036
+ self.define_proxy_parameters()
1037
+
1038
+
1039
+ def simulation_loop(model, input):
1040
+ batch_size = input.shape[0]
1041
+ seq_len = input.shape[1]
1042
+ h = model.h0
1043
+ r = model.non_linearity(h)
1044
+
1045
+ noise = torch.randn(batch_size, seq_len, model.hidden_size, device=model.m.device)
1046
+ output = torch.zeros(batch_size, seq_len, model.output_size, device=model.m.device)
1047
+ trajectories = torch.zeros(batch_size, seq_len, model.hidden_size, device=model.m.device)
1048
+
1049
+ for i in range(seq_len):
1050
+ h = (
1051
+ h
1052
+ + model.noise_std * noise[:, i, :]
1053
+ + model.alpha
1054
+ * (
1055
+ -h
1056
+ + r.matmul(model.n).matmul(model.m.t()) / model.hidden_size
1057
+ + input[:, i, :].matmul(model.wi_full)
1058
+ )
1059
+ )
1060
+
1061
+ r = model.non_linearity(h)
1062
+ output[:, i, :] = r.matmul(model.wo_full) / model.hidden_size
1063
+ trajectories[:, i, :] = h
1064
+
1065
+ return output, trajectories
1066
+
1067
+
1068
+ class OptimizedLowRankRNN(nn.Module):
1069
+ """
1070
+ LowRankRNN class with a different definition of scalings (see caption of SI Fig. about the 3-population Ctx net)
1071
+ """
1072
+
1073
+ def __init__(
1074
+ self,
1075
+ input_size,
1076
+ hidden_size,
1077
+ output_size,
1078
+ noise_std,
1079
+ alpha,
1080
+ rho=0.0,
1081
+ rank=1,
1082
+ train_wi=False,
1083
+ train_wo=False,
1084
+ train_wrec=True,
1085
+ train_h0=False,
1086
+ train_si=True,
1087
+ train_so=True,
1088
+ wi_init=None,
1089
+ wo_init=None,
1090
+ m_init=None,
1091
+ n_init=None,
1092
+ si_init=None,
1093
+ so_init=None,
1094
+ h0_init=None,
1095
+ ):
1096
+ """
1097
+ :param input_size: int
1098
+ :param hidden_size: int
1099
+ :param output_size: int
1100
+ :param noise_std: float
1101
+ :param alpha: float
1102
+ :param rho: float, std of quenched noise matrix
1103
+ :param rank: int
1104
+ :param train_wi: bool
1105
+ :param train_wo: bool
1106
+ :param train_wrec: bool
1107
+ :param train_h0: bool
1108
+ :param train_si: bool (can't be True if train_wi is already True)
1109
+ :param train_so: bool (can't be True if train_wo is already True)
1110
+ :param wi_init: torch tensor of shape (input_dim, hidden_size)
1111
+ :param wo_init: torch tensor of shape (hidden_size, output_dim)
1112
+ :param m_init: torch tensor of shape (hidden_size, rank)
1113
+ :param n_init: torch tensor of shape (hidden_size, rank)
1114
+ :param si_init: input scaling, torch tensor of shape (input_dim)
1115
+ :param so_init: output scaling, torch tensor of shape (output_dim)
1116
+ :param h0_init: torch tensor of shape (hidden_size)
1117
+ """
1118
+ super(OptimizedLowRankRNN, self).__init__()
1119
+ self.input_size = input_size
1120
+ self.hidden_size = hidden_size
1121
+ self.output_size = output_size
1122
+ self.noise_std = noise_std
1123
+ self.alpha = alpha
1124
+ self.rho = rho
1125
+ self.rank = rank
1126
+ self.train_wi = train_wi
1127
+ self.train_wo = train_wo
1128
+ self.train_wrec = train_wrec
1129
+ self.train_h0 = train_h0
1130
+ self.train_si = train_si
1131
+ self.train_so = train_so
1132
+ self.non_linearity = torch.tanh
1133
+
1134
+ # Define parameters
1135
+ self.wi = nn.Parameter(torch.Tensor(input_size, hidden_size))
1136
+ self.si = nn.Parameter(torch.Tensor(input_size))
1137
+ if train_wi:
1138
+ self.si.requires_grad = False
1139
+ else:
1140
+ self.wi.requires_grad = False
1141
+ if not train_si:
1142
+ self.si.requires_grad = False
1143
+ self.m = nn.Parameter(torch.Tensor(hidden_size, rank))
1144
+ self.n = nn.Parameter(torch.Tensor(hidden_size, rank))
1145
+ if not train_wrec:
1146
+ self.m.requires_grad = False
1147
+ self.n.requires_grad = False
1148
+ self.wo = nn.Parameter(torch.Tensor(hidden_size, output_size))
1149
+ self.so = nn.Parameter(torch.Tensor(output_size))
1150
+ if train_wo:
1151
+ self.so.requires_grad = False
1152
+ else:
1153
+ self.wo.requires_grad = False
1154
+ if not train_so:
1155
+ self.so.requires_grad = False
1156
+ self.h0 = nn.Parameter(torch.Tensor(hidden_size))
1157
+ if not train_h0:
1158
+ self.h0.requires_grad = False
1159
+
1160
+ # Initialize parameters
1161
+ with torch.no_grad():
1162
+ if wi_init is None:
1163
+ self.wi.normal_()
1164
+ else:
1165
+ self.wi.copy_(wi_init)
1166
+ if si_init is None:
1167
+ self.si.set_(torch.ones_like(self.si))
1168
+ else:
1169
+ self.si.copy_(si_init)
1170
+ if m_init is None:
1171
+ self.m.normal_(std=1 / sqrt(hidden_size))
1172
+ else:
1173
+ self.m.copy_(m_init)
1174
+ if n_init is None:
1175
+ self.n.normal_(std=1 / sqrt(hidden_size))
1176
+ else:
1177
+ self.n.copy_(n_init)
1178
+ if wo_init is None:
1179
+ self.wo.normal_(std=2 / hidden_size)
1180
+ else:
1181
+ self.wo.copy_(wo_init)
1182
+ if so_init is None:
1183
+ self.so.set_(torch.ones_like(self.so))
1184
+ else:
1185
+ self.so.copy_(so_init)
1186
+ if h0_init is None:
1187
+ self.h0.zero_()
1188
+ else:
1189
+ self.h0.copy_(h0_init)
1190
+ self.wrec, self.wi_full, self.wo_full = [None] * 3
1191
+ self.define_proxy_parameters()
1192
+
1193
+ def define_proxy_parameters(self):
1194
+ self.wi_full = (self.wi.t() * self.si).t()
1195
+ self.wo_full = self.wo * self.so
1196
+
1197
+ def forward(self, input):
1198
+ """
1199
+ :param input: tensor of shape (batch_size, #timesteps, input_dimension)
1200
+ Important: the 3 dimensions need to be present, even if they are of size 1.
1201
+ :return: (output tensor, trajectories tensor of shape (batch_size, #timesteps, #hidden_units))
1202
+ """
1203
+ batch_size = input.shape[0]
1204
+ seq_len = input.shape[1]
1205
+ h = self.h0
1206
+ r = self.non_linearity(h)
1207
+ self.define_proxy_parameters()
1208
+ noise = torch.randn(batch_size, seq_len, self.hidden_size, device=self.m.device)
1209
+ output = torch.zeros(batch_size, seq_len, self.output_size, device=self.m.device)
1210
+ trajectories = torch.zeros(batch_size, seq_len + 1, self.hidden_size, device=self.m.device)
1211
+ trajectories[:, 0, :] = h
1212
+
1213
+ # simulation loop
1214
+ for i in range(seq_len):
1215
+ h = (
1216
+ h
1217
+ + self.noise_std * noise[:, i, :]
1218
+ + self.alpha
1219
+ * (-h + r.matmul(self.n).matmul(self.m.t()) + input[:, i, :].matmul(self.wi_full))
1220
+ )
1221
+
1222
+ r = self.non_linearity(h)
1223
+ output[:, i, :] = r.matmul(self.wo_full)
1224
+ trajectories[:, i + 1, :] = h
1225
+
1226
+ return output, trajectories
1227
+
1228
+ def clone(self):
1229
+ new_net = OptimizedLowRankRNN(
1230
+ self.input_size,
1231
+ self.hidden_size,
1232
+ self.output_size,
1233
+ self.noise_std,
1234
+ self.alpha,
1235
+ self.rho,
1236
+ self.rank,
1237
+ self.train_wi,
1238
+ self.train_wo,
1239
+ self.train_wrec,
1240
+ self.train_h0,
1241
+ self.train_si,
1242
+ self.train_so,
1243
+ self.wi,
1244
+ self.wo,
1245
+ self.m,
1246
+ self.n,
1247
+ self.si,
1248
+ self.so,
1249
+ )
1250
+ new_net.define_proxy_parameters()
1251
+ return new_net
1252
+
1253
+ def resample_connectivity_noise(self):
1254
+ self.define_proxy_parameters()
1255
+
1256
+ def load_state_dict(self, state_dict, strict=True):
1257
+ """
1258
+ override to recompute w_rec on loading
1259
+ """
1260
+ super().load_state_dict(state_dict, strict)
1261
+ self.define_proxy_parameters()
1262
+
1263
+ def svd_reparametrization(self):
1264
+ """
1265
+ Orthogonalize m and n via SVD
1266
+ """
1267
+ with torch.no_grad():
1268
+ structure = (self.m @ self.n.t()).numpy()
1269
+ m, s, n = np.linalg.svd(structure, full_matrices=False)
1270
+ m, s, n = m[:, : self.rank], s[: self.rank], n[: self.rank, :]
1271
+ self.m.set_(torch.from_numpy(m * np.sqrt(s)))
1272
+ self.n.set_(torch.from_numpy(n.transpose() * np.sqrt(s)))
1273
+ self.define_proxy_parameters()
data/examples/RNN/RNN_scripts/ranktwo.py ADDED
@@ -0,0 +1,538 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from math import sqrt
2
+ import numpy as np
3
+ import matplotlib.pyplot as plt
4
+ import multiprocessing as mp
5
+ from scipy.optimize import root
6
+ from .helpers import phi_prime
7
+
8
+
9
+ def adjust_plot(ax, xmin, xmax, ymin, ymax):
10
+ ax.set_xlim(xmin - 0.05 * (xmax - xmin), xmax + 0.05 * (xmax - xmin))
11
+ ax.set_ylim(ymin - 0.05 * (ymax - ymin), ymax + 0.05 * (ymax - ymin))
12
+
13
+
14
+ def plot_field(
15
+ net,
16
+ vec1=None,
17
+ vec2=None,
18
+ xmin=-3,
19
+ xmax=3,
20
+ ymin=-3,
21
+ ymax=3,
22
+ input=None,
23
+ res=50,
24
+ ax=None,
25
+ add_fixed_points=False,
26
+ fixed_points_trials=10,
27
+ fp_save=None,
28
+ fp_load=None,
29
+ nojac=False,
30
+ orth=False,
31
+ alt_naming=True,
32
+ sizes=1,
33
+ ):
34
+ """
35
+ Plot the flow field of a rank 2 network in its (m1, m2) plane (eventually affine if there is an input)
36
+ Note: assumes the net uses tanh non-linearity
37
+ Note 2: if plotting fixed points, stability calculations only make sense in the case of a rank2 net in plane
38
+ (m1, m2)
39
+ :param net:
40
+ :param vec1: numpy array, if None, the m1 vector of the network is selected
41
+ :param vec2: numpy array, if None, the m2 vector of the net is selected
42
+ :param xmin: float
43
+ :param xmax: float
44
+ :param ymin: float
45
+ :param ymax: float
46
+ :param input: tensor of shape (dim_input) that is used to compute input vector
47
+ :param res: int, resolution of flow field
48
+ :param ax: matplotlib.Axes, plot on it if given
49
+ :param add_fixed_points: bool
50
+ :param fixed_points_trials: int, number of trials for fixed points search
51
+ :param nojac: if True, do not use the jacobian for the fixed points finder
52
+ :param orth: bool, True to orthogonalize vec2 with vec1
53
+ :param alt_naming: set True for SupportLowRankRNN_withMask
54
+ :return: matplotlib.Axes
55
+ """
56
+ if ax is None:
57
+ fig, ax = plt.subplots()
58
+ adjust_plot(ax, xmin, xmax, ymin, ymax)
59
+ if vec1 is None:
60
+ vec1 = net.m[:, 0].squeeze().detach().numpy()
61
+ if vec2 is None:
62
+ vec2 = net.m[:, 1].squeeze().detach().numpy()
63
+ if add_fixed_points:
64
+ n1 = net.n[:, 0].squeeze().detach().numpy()
65
+ n2 = net.n[:, 1].squeeze().detach().numpy()
66
+ if hasattr(net, "wrec") and net.wrec is not None:
67
+ w_rec = net.wrec.detach().numpy()
68
+ else:
69
+ w_rec = None
70
+ m = net.m.detach().numpy()
71
+ n = net.n.detach().numpy().T
72
+ if alt_naming:
73
+ m = net.m_rec.detach().numpy()
74
+ n = net.n_rec.detach().numpy().T
75
+
76
+ # Plotting constants
77
+ marker_size = 50 * sizes
78
+ nx, ny = res, res
79
+
80
+ # Orthogonalization of the basis vec1, vec2, I
81
+ if orth:
82
+ vec2 = vec2 - (vec2 @ vec1) * vec1 / (vec1 @ vec1)
83
+ if input is not None:
84
+ I = (input @ net.wi_full).detach().numpy()
85
+ I_orth = I - (I @ vec1) * vec1 / (vec1 @ vec1) - (I @ vec2) * vec2 / (vec2 @ vec2)
86
+ else:
87
+ I = np.zeros(net.hidden_size)
88
+ I_orth = np.zeros(net.hidden_size)
89
+
90
+ # rescaling factors (for transformation euclidean space / overlap space)
91
+ # here, if one wants x s.t. overlap(x, vec1) = alpha, x should be r1 * alpha * vec1
92
+ # with the overlap being defined as overlap(u, v) = u.dot(v) / sqrt(hidden_size)
93
+ r1 = sqrt(net.hidden_size) / (vec1 @ vec1)
94
+ r2 = sqrt(net.hidden_size) / (vec2 @ vec2)
95
+
96
+ # Defining the grid
97
+ xs_grid = np.linspace(xmin, xmax, nx + 1)
98
+ ys_grid = np.linspace(ymin, ymax, ny + 1)
99
+ xs = (xs_grid[1:] + xs_grid[:-1]) / 2
100
+ ys = (ys_grid[1:] + ys_grid[:-1]) / 2
101
+ field = np.zeros((nx, ny, 2))
102
+ X, Y = np.meshgrid(xs, ys)
103
+
104
+ # Recurrent function of dx/dt = F(x, I)
105
+ if w_rec is not None:
106
+
107
+ def F(x, I):
108
+ return -x + w_rec @ np.tanh(x) + I
109
+
110
+ else:
111
+
112
+ def F(x, I):
113
+ return -x + m @ (n @ np.tanh(x)) + I
114
+
115
+ # Derivative of tanh
116
+ def phiPrime(x):
117
+ return 1 - np.tanh(x) ** 2
118
+
119
+ # Jacobian of F, assuming F is rank 2
120
+ def FJac(x, I=None):
121
+ phiPr = phiPrime(x)
122
+ n1_eff = n1 * phiPr
123
+ n2_eff = n2 * phiPr
124
+ return np.outer(vec1, n1_eff) + np.outer(vec2, n2_eff) - np.identity(net.hidden_size)
125
+
126
+ # Compute flow in each point of the grid
127
+ for i, x in enumerate(xs):
128
+ for j, y in enumerate(ys):
129
+ h = r1 * x * vec1 + r2 * y * vec2 + I_orth
130
+ delta = F(h, I)
131
+ field[j, i, 0] = delta @ vec1 / sqrt(net.hidden_size)
132
+ field[j, i, 1] = delta @ vec2 / sqrt(net.hidden_size)
133
+
134
+ ax.streamplot(
135
+ xs,
136
+ ys,
137
+ field[:, :, 0],
138
+ field[:, :, 1],
139
+ color="white",
140
+ density=0.5,
141
+ arrowsize=sizes,
142
+ linewidth=sizes * 0.8,
143
+ )
144
+
145
+ norm_field = np.sqrt(field[:, :, 0] ** 2 + field[:, :, 1] ** 2)
146
+ mappable = ax.pcolor(X, Y, norm_field)
147
+
148
+ # Look for fixed points
149
+ if add_fixed_points:
150
+ if fp_load is None:
151
+ stable_sols = []
152
+ saddles = []
153
+ sources = []
154
+
155
+ # initial conditions are dispersed over a grid
156
+ X_grid, Y_grid = np.meshgrid(
157
+ np.linspace(xmin, xmax, int(sqrt(fixed_points_trials))),
158
+ np.linspace(ymin, ymax, int(sqrt(fixed_points_trials))),
159
+ )
160
+
161
+ for i in range(X_grid.size):
162
+ xy = X_grid.ravel()[i], Y_grid.ravel()[i]
163
+ x0 = r1 * xy[0] * vec1 + r2 * xy[1] * vec2 + I_orth
164
+ sol = root(F, x0, args=I, jac=None if nojac else FJac)
165
+
166
+ # if solution found
167
+ if sol.success == 1:
168
+ kappa_sol = [
169
+ (sol.x @ vec1) / sqrt(net.hidden_size),
170
+ (sol.x @ vec2) / sqrt(net.hidden_size),
171
+ ]
172
+ # Computing stability
173
+ pseudoJac = np.zeros((2, 2))
174
+ phiPr = phiPrime(sol.x)
175
+ n1_eff = n1 * phiPr
176
+ n2_eff = n2 * phiPr
177
+ pseudoJac[0, 0] = vec1 @ n1_eff
178
+ pseudoJac[0, 1] = vec2 @ n1_eff
179
+ pseudoJac[1, 0] = vec1 @ n2_eff
180
+ pseudoJac[1, 1] = vec2 @ n2_eff
181
+ eigvals = np.linalg.eigvals(pseudoJac)
182
+ if np.all(np.real(eigvals) <= 1):
183
+ stable_sols.append(kappa_sol)
184
+ elif np.any(np.real(eigvals) <= 1):
185
+ saddles.append(kappa_sol)
186
+ else:
187
+ sources.append(kappa_sol)
188
+ # Load fixed points stored in a file
189
+ else:
190
+ arrays = np.load(fp_load)
191
+ arr = arrays["arr_0"]
192
+ stable_sols = [arr[i] for i in range(arr.shape[0])]
193
+ arr = arrays["arr_1"]
194
+ saddles = [arr[i] for i in range(arr.shape[0])]
195
+ arr = arrays["arr_2"]
196
+ sources = [arr[i] for i in range(arr.shape[0])]
197
+ print(saddles)
198
+ if fp_save is not None:
199
+ np.savez(fp_save, np.array(stable_sols), np.array(saddles), np.array(sources))
200
+ else:
201
+ ax.scatter(
202
+ [x[0] for x in stable_sols],
203
+ [x[1] for x in stable_sols],
204
+ facecolors="white",
205
+ edgecolors="white",
206
+ s=marker_size,
207
+ zorder=1000,
208
+ )
209
+ ax.scatter(
210
+ [x[0] for x in saddles],
211
+ [x[1] for x in saddles],
212
+ facecolors="black",
213
+ edgecolors="white",
214
+ s=marker_size,
215
+ zorder=1000,
216
+ )
217
+ ax.scatter(
218
+ [x[0] for x in sources],
219
+ [x[1] for x in sources],
220
+ facecolors="black",
221
+ edgecolors="white",
222
+ s=marker_size,
223
+ zorder=1000,
224
+ )
225
+ return ax, mappable
226
+
227
+
228
+ #
229
+ # def plot_field2(net, vec1=None, vec2=None, xmin=-3, xmax=3, ymin=-3, ymax=3, input=None, res=50,
230
+ # ax=None, add_fixed_points=False, fixed_points_trials=10, nojac=False, orth=False):
231
+ # if ax is None:
232
+ # fig, ax = plt.subplots()
233
+ # adjust_plot(ax, xmin, xmax, ymin, ymax)
234
+ # if net.wrec is not None:
235
+ # w_rec = net.wrec.detach().numpy()
236
+ # else:
237
+ # w_rec = None
238
+ # m = net.m.detach().numpy()
239
+ # n = net.n.detach().numpy().T
240
+ #
241
+ # # Plotting constants
242
+ # marker_size = 90
243
+ # nx, ny = res, res
244
+ #
245
+ # # Orthogonalization of the basis vec1, vec2, I
246
+ # if orth:
247
+ # vec2 = vec2 - (vec2 @ vec1) * vec1 / (vec1 @ vec1)
248
+ # if input is not None:
249
+ # I = (input @ net.wi_full).detach().numpy()
250
+ # I_orth = I - (I @ vec1) * vec1 / (vec1 @ vec1) - (I @ vec2) * vec2 / (vec2 @ vec2)
251
+ # else:
252
+ # I = np.zeros(net.hidden_size)
253
+ # I_orth = np.zeros(net.hidden_size)
254
+ #
255
+ # # rescaling factors (for transformation euclidean space / overlap space)
256
+ # # here, if one wants x s.t. overlap(x, vec1) = alpha, x should be r1 * alpha * vec1
257
+ # # with the overlap being defined as overlap(u, v) = u.dot(v) / sqrt(hidden_size)
258
+ # r1 = 1. / (vec1 @ vec1)
259
+ # r2 = 1. / (vec2 @ vec2)
260
+ #
261
+ # # Defining the grid
262
+ # xs_grid = np.linspace(xmin, xmax, nx + 1)
263
+ # ys_grid = np.linspace(ymin, ymax, ny + 1)
264
+ # xs = (xs_grid[1:] + xs_grid[:-1]) / 2
265
+ # ys = (ys_grid[1:] + ys_grid[:-1]) / 2
266
+ # field = np.zeros((nx, ny, 2))
267
+ # X, Y = np.meshgrid(xs, ys)
268
+ #
269
+ # # Recurrent function of dx/dt = F(x, I)
270
+ # if w_rec is not None:
271
+ # def F(x, I):
272
+ # return -x + w_rec @ np.tanh(x) + I
273
+ # else:
274
+ # def F(x, I):
275
+ # return -x + m @ (n @ np.tanh(x)) + I
276
+ #
277
+ # # Compute flow in each point of the grid
278
+ # for i, x in enumerate(xs):
279
+ # for j, y in enumerate(ys):
280
+ # h = r1 * x * vec1 + r2 * y * vec2 + I_orth
281
+ # delta = F(h, I)
282
+ # field[j, i, 0] = delta @ vec1
283
+ # field[j, i, 1] = delta @ vec2
284
+ # ax.streamplot(xs, ys, field[:, :, 0], field[:, :, 1], color='white', density=0.5, arrowsize=1.8, linewidth=1.5)
285
+ # norm_field = np.sqrt(field[:, :, 0] ** 2 + field[:, :, 1] ** 2)
286
+ # ax.pcolor(X, Y, norm_field)
287
+ # return ax
288
+
289
+
290
+ def fixedpoint_task(x0, m, n, hidden_size, I, nojac):
291
+ """
292
+ Task for the root solver to find fixed points, for parallelization
293
+ """
294
+ # Redefining functions for pickle issues
295
+ def F(x, I):
296
+ return -x + m @ (n.T @ np.tanh(x)) / hidden_size + I
297
+
298
+ m1 = m[:, 0]
299
+ m2 = m[:, 1]
300
+ n1 = n[:, 0]
301
+ n2 = n[:, 1]
302
+ # Jacobian of F assuming Wrec is rank 2
303
+ def FJac(x, I=None):
304
+ phiPr = phi_prime(x)
305
+ n1_eff = n1 * phiPr
306
+ n2_eff = n2 * phiPr
307
+ return (np.outer(m1, n1_eff) + np.outer(m2, n2_eff)) / hidden_size - np.identity(
308
+ hidden_size
309
+ )
310
+
311
+ return root(F, x0, args=I, jac=None if nojac else FJac)
312
+
313
+
314
+ def plot_field_noscalings(
315
+ net,
316
+ vec1=None,
317
+ vec2=None,
318
+ xmin=-3,
319
+ xmax=3,
320
+ ymin=-3,
321
+ ymax=3,
322
+ input=None,
323
+ res=50,
324
+ ax=None,
325
+ add_fixed_points=False,
326
+ fixed_points_trials=10,
327
+ fp_save=None,
328
+ fp_load=None,
329
+ nojac=False,
330
+ orth=False,
331
+ sizes=1.0,
332
+ ):
333
+ """
334
+ Plot 2d flow field and eventually fixed points for a rank 2 network without scaled vectors (ie defined as in
335
+ Francesca's paper). Can plot the affine flow field in presence of a constant input with argument input.
336
+ :param net: a LowRankRNN
337
+ :param vec1: None or a numpy array of shape (hidden_size). If None, will be taken as vector m1 of the network
338
+ :param vec2: same with m2
339
+ :param xmin: float
340
+ :param xmax: float
341
+ :param ymin: float
342
+ :param ymax: float
343
+ :param input: None or torch tensor of shape (n_inputs), provides constant input for plotting affine flow field
344
+ :param res: int, grid resolution
345
+ :param ax: None or matplotlib axes
346
+ :param add_fixed_points: bool
347
+ :param fixed_points_trials: int, number of simulations to launch to find fixed points
348
+ :param fp_save: None or filename, to save found fixed points instead of plotting them
349
+ :param fp_load: None or filename, to load fixed points instead of recomputing them
350
+ :param nojac: bool, if True, use root solver without jacobian matrix
351
+ :param orth: bool, if True, start by orthogonalizing (vec1, vec2)
352
+ :return: axes
353
+ """
354
+ if ax is None:
355
+ fig, ax = plt.subplots()
356
+ adjust_plot(ax, xmin, xmax, ymin, ymax)
357
+ if vec1 is None:
358
+ vec1 = net.m[:, 0].squeeze().detach().numpy()
359
+ if vec2 is None:
360
+ vec2 = net.m[:, 1].squeeze().detach().numpy()
361
+ if add_fixed_points:
362
+ n1 = net.n[:, 0].squeeze().detach().numpy()
363
+ n2 = net.n[:, 1].squeeze().detach().numpy()
364
+ m = net.m.detach().numpy()
365
+ n = net.n.detach().numpy()
366
+
367
+ # Plotting constants
368
+ nx, ny = res, res
369
+ marker_size = 50 * sizes
370
+
371
+ # Orthogonalization of the basis vec1, vec2, I
372
+ if orth:
373
+ vec2 = vec2 - (vec2 @ vec1) * vec1 / (vec1 @ vec1)
374
+ if input is not None:
375
+ I = (input @ net.wi_full).detach().numpy()
376
+ I_orth = I - (I @ vec1) * vec1 / (vec1 @ vec1) - (I @ vec2) * vec2 / (vec2 @ vec2)
377
+ else:
378
+ I = np.zeros(net.hidden_size)
379
+ I_orth = np.zeros(net.hidden_size)
380
+
381
+ # rescaling factors (for transformation euclidean space / overlap space)
382
+ # here, if one wants x s.t. overlap(x, vec1) = alpha, x should be r1 * alpha * vec1
383
+ # with the overlap being defined as overlap(u, v) = u.dot(v) / sqrt(hidden_size)
384
+ r1 = net.hidden_size / (vec1 @ vec1)
385
+ r2 = net.hidden_size / (vec2 @ vec2)
386
+
387
+ # Defining the grid
388
+ xs_grid = np.linspace(xmin, xmax, nx + 1)
389
+ ys_grid = np.linspace(ymin, ymax, ny + 1)
390
+ xs = (xs_grid[1:] + xs_grid[:-1]) / 2
391
+ ys = (ys_grid[1:] + ys_grid[:-1]) / 2
392
+ field = np.zeros((nx, ny, 2))
393
+ X, Y = np.meshgrid(xs, ys)
394
+
395
+ # Recurrent function of dx/dt = F(x, I)
396
+ def F(x, I):
397
+ return -x + m @ (n.T @ np.tanh(x)) / net.hidden_size + I
398
+
399
+ # Compute flow in each point of the grid
400
+ for i, x in enumerate(xs):
401
+ for j, y in enumerate(ys):
402
+ h = r1 * x * vec1 + r2 * y * vec2 + I_orth
403
+ delta = F(h, I)
404
+ field[j, i, 0] = delta @ vec1 / (vec1 @ vec1)
405
+ field[j, i, 1] = delta @ vec2 / (vec2 @ vec2)
406
+ ax.streamplot(
407
+ xs,
408
+ ys,
409
+ field[:, :, 0],
410
+ field[:, :, 1],
411
+ color="white",
412
+ density=0.5,
413
+ arrowsize=sizes,
414
+ linewidth=sizes * 0.8,
415
+ )
416
+ norm_field = np.sqrt(field[:, :, 0] ** 2 + field[:, :, 1] ** 2)
417
+ mappable = ax.pcolor(X, Y, norm_field)
418
+
419
+ # Look for fixed points
420
+ if add_fixed_points:
421
+ if fp_load is None:
422
+ stable_sols = []
423
+ saddles = []
424
+ sources = []
425
+
426
+ # initial conditions are dispersed over a grid
427
+ X_grid, Y_grid = np.meshgrid(
428
+ np.linspace(xmin, xmax, int(sqrt(fixed_points_trials))),
429
+ np.linspace(ymin, ymax, int(sqrt(fixed_points_trials))),
430
+ )
431
+
432
+ # Parallelized root solver
433
+ x0s = [
434
+ r1 * X_grid.ravel()[i] * vec1 + r2 * Y_grid.ravel()[i] * vec2 + I_orth
435
+ for i in range(X_grid.size)
436
+ ]
437
+ with mp.Pool(mp.cpu_count()) as pool:
438
+ args = [(x0, m, n, net.hidden_size, I, nojac) for x0 in x0s]
439
+ sols = pool.starmap(fixedpoint_task, args)
440
+
441
+ for sol in sols:
442
+ # if solution found
443
+ if sol.success == 1:
444
+ kappa_sol = [(sol.x @ vec1) / net.hidden_size, (sol.x @ vec2) / net.hidden_size]
445
+ # Computing stability
446
+ pseudoJac = np.zeros((2, 2))
447
+ phiPr = phi_prime(sol.x)
448
+ n1_eff = n1 * phiPr
449
+ n2_eff = n2 * phiPr
450
+ pseudoJac[0, 0] = vec1 @ n1_eff / net.hidden_size
451
+ pseudoJac[0, 1] = vec2 @ n1_eff / net.hidden_size
452
+ pseudoJac[1, 0] = vec1 @ n2_eff / net.hidden_size
453
+ pseudoJac[1, 1] = vec2 @ n2_eff / net.hidden_size
454
+ eigvals = np.linalg.eigvals(pseudoJac)
455
+ if np.all(np.real(eigvals) <= 1):
456
+ stable_sols.append(kappa_sol)
457
+ elif np.any(np.real(eigvals) <= 1):
458
+ saddles.append(kappa_sol)
459
+ else:
460
+ sources.append(kappa_sol)
461
+ # Load fixed points stored in a file
462
+ else:
463
+ arrays = np.load(fp_load)
464
+ arr = arrays["arr_0"]
465
+ stable_sols = [arr[i] for i in range(arr.shape[0])]
466
+ arr = arrays["arr_1"]
467
+ saddles = [arr[i] for i in range(arr.shape[0])]
468
+ arr = arrays["arr_2"]
469
+ sources = [arr[i] for i in range(arr.shape[0])]
470
+ if fp_save is not None:
471
+ np.savez(fp_save, np.array(stable_sols), np.array(saddles), np.array(sources))
472
+ else:
473
+ ax.scatter(
474
+ [x[0] for x in stable_sols],
475
+ [x[1] for x in stable_sols],
476
+ facecolors="white",
477
+ edgecolors="white",
478
+ s=marker_size,
479
+ zorder=1000,
480
+ )
481
+ ax.scatter(
482
+ [x[0] for x in saddles],
483
+ [x[1] for x in saddles],
484
+ facecolors="black",
485
+ edgecolors="white",
486
+ s=marker_size,
487
+ zorder=1000,
488
+ )
489
+ ax.scatter(
490
+ [x[0] for x in sources],
491
+ [x[1] for x in sources],
492
+ facecolors="black",
493
+ edgecolors="white",
494
+ s=marker_size,
495
+ zorder=1000,
496
+ )
497
+ return ax, mappable
498
+
499
+
500
+ def plot_readout_map(vec1, vec2, wo, rect=(-10, 10, -10, 10), scale=0.5, scalings=True, cmap="jet"):
501
+ """
502
+ Plot the map from the 2D space spanned by (vec1, vec2) to a scalar defined by wo.T @ phi(x)
503
+ :param vec1: numpy array
504
+ :param vec2: numpy array
505
+ :param wo: numpy array
506
+ :param rect: 4-tuple, x and y axis limits
507
+ :param scale: scale of vector readout for plotting
508
+ :param scalings: bool, False for no scalings networks
509
+ :param cmap:
510
+ :return:
511
+ """
512
+ xmin, xmax, ymin, ymax = rect
513
+ hidden_size = vec1.shape[0]
514
+ xs = np.linspace(xmin, xmax, 100)
515
+ ys = np.linspace(ymin, ymax, 100)
516
+ if scalings:
517
+ r1 = 1.0 / (vec1 @ vec1)
518
+ r2 = 1.0 / (vec2 @ vec2)
519
+ else:
520
+ r1 = hidden_size / (vec1 @ vec1)
521
+ r2 = hidden_size / (vec2 @ vec2)
522
+ X, Y = np.meshgrid(xs, ys)
523
+ z = np.zeros((100, 100))
524
+ for i, x in enumerate(xs):
525
+ for j, y in enumerate(ys):
526
+ h = r1 * x * vec1 + r2 * y * vec2
527
+ z[i, j] = wo @ np.tanh(h)
528
+ plt.pcolor(X, Y, z, cmap=cmap)
529
+ plt.colorbar()
530
+ if scalings:
531
+ readout1, readout2 = wo @ vec1, wo @ vec2
532
+ else:
533
+ readout1, readout2 = wo @ vec1 / hidden_size, wo @ vec2 / hidden_size
534
+ plt.quiver(0, 0, readout1, readout2, color="k", scale=scale)
535
+ plt.xticks([])
536
+ plt.yticks([])
537
+ plt.xlabel("$\kappa_1$")
538
+ plt.ylabel("$\kappa_2$")
data/examples/macaque_reaching/convert_spikes_to_firing_rates.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Convert spiking and kinematics data into instantaneous rates for MARBLE analysis.
2
+
3
+ It needs neo and elephant packages in addition to MARBLE.
4
+ """
5
+ import os
6
+ import sys
7
+ from collections import defaultdict
8
+ from pathlib import Path
9
+ import numpy as np
10
+ from scipy.io import loadmat
11
+ from elephant.statistics import instantaneous_rate
12
+ import neo
13
+ from elephant.kernels import GaussianKernel
14
+ from quantities import ms
15
+ from MARBLE import utils
16
+ import mat73
17
+ import pickle
18
+
19
+
20
+ def spikes_to_rates(data, d, sampling_period=20):
21
+ """
22
+ Converts matlab spiking data into instantaneous rates in a suitable format for further analysis
23
+ """
24
+
25
+ # defining conditions by their ordering (this was how it was ordered in matlab script)
26
+ conditions = ["DownLeft", "Left", "UpLeft", "Up", "UpRight", "Right", "DownRight"]
27
+
28
+ data_day = data[d] # daily session
29
+
30
+ gk = GaussianKernel(100 * ms) # increase this for smoother signals (previously used auto)
31
+
32
+ # define empty dictionary for each day
33
+ rates = {}
34
+
35
+ # loop over the 7 conditions
36
+ for c, cond in enumerate(conditions):
37
+
38
+ # define empty list for each condition during each session
39
+ trial_data = []
40
+
41
+ # extracting data for a single condition on a single day (composed of t trials)
42
+ data_day_cond = data_day[c]
43
+
44
+ # loop over trials
45
+ for t, trial in enumerate(data_day_cond):
46
+
47
+ # if the trial exists (sometimes there is None)
48
+ if trial:
49
+ trial = trial[0] # it was a single element list
50
+
51
+ # loop over neurons
52
+ inst_rates = []
53
+ for ch in range(trial.shape[0]):
54
+
55
+ # extract spikes for a given channel (neuron)
56
+ spikes = np.where(trial[ch, :])[0]
57
+
58
+ # get spike train (1200 milliseconds)
59
+ st = neo.SpikeTrain(spikes, units="ms", t_stop=1200)
60
+
61
+ # get rates
62
+ inst_rate = instantaneous_rate(st, kernel=gk, sampling_period=sampling_period*ms).magnitude
63
+
64
+ # append into list
65
+ inst_rates.append(inst_rate.flatten())
66
+
67
+ # stack rates back together and transpose = (channels by time)
68
+ inst_rates = np.stack(inst_rates, axis=1)
69
+
70
+ # append rates from one trial into trial data
71
+ trial_data.append(inst_rates)
72
+
73
+ # stack into an array of trial x channels x time
74
+ rates[cond] = np.dstack(trial_data).transpose(2, 1, 0)
75
+
76
+ return rates
77
+
78
+
79
+ def convert_spiking_rates(sampling_period=20):
80
+
81
+ data_file = "data/conditions_spiking_data.mat"
82
+ Path("data").mkdir(exist_ok=True)
83
+ os.system(f"wget -nc https://dataverse.harvard.edu/api/access/datafile/6963157 -O {data_file}")
84
+
85
+ # load data compiled into matlab cell array
86
+ data = mat73.loadmat(data_file)["all_results"]
87
+
88
+ rates = utils.parallel_proc(
89
+ spikes_to_rates, range(len(data)), data, processes=-1, desc="Converting spikes to rates..."
90
+ )
91
+
92
+ all_rates = {}
93
+ for i, rates_day in enumerate(rates):
94
+ all_rates[i] = rates_day
95
+
96
+ with open(f"data/rate_data_{sampling_period}ms.pkl", "wb") as handle:
97
+ pickle.dump(all_rates, handle, protocol=pickle.HIGHEST_PROTOCOL)
98
+
99
+
100
+ def find_condition(val, trials, conditions):
101
+ for cond in conditions:
102
+ if val in trials[cond]:
103
+ return cond
104
+
105
+
106
+ def convert_kinematics():
107
+ """Extracting kinematic data from matlab into format for decoding"""
108
+
109
+ data_file = "data/kinematics_lfadsSingleFromFactors.mat"
110
+ os.system(f"wget -nc https://dataverse.harvard.edu/api/access/datafile/7062085 -O {data_file}")
111
+ kinematic_data = loadmat(data_file)["Tin_single"]
112
+
113
+ data_file = "data/trial_ids.pkl"
114
+ os.system(f"wget -nc https://dataverse.harvard.edu/api/access/datafile/6963200 -O {data_file}")
115
+ trial_ids = pickle.load(open("./data/trial_ids.pkl", "rb"))
116
+
117
+ kinematics = {}
118
+ conditions = ["DownLeft", "Left", "UpLeft", "Up", "UpRight", "Right", "DownRight"]
119
+
120
+ for d, day in enumerate(kinematic_data):
121
+ day = day[0]
122
+ kinematics_conds = defaultdict(list)
123
+
124
+ for i in range(day.shape[0]):
125
+ X = day[i].tolist()[0][1] # kinematics x,y posiution and x,y velocity
126
+ Z = day[i].tolist()[0][2] # lfads factors
127
+ T = day[i].tolist()[0][3] # time
128
+
129
+ cond = find_condition(i, trial_ids[d], conditions)
130
+
131
+ kinematics_conds[i] = {
132
+ "kinematics": X,
133
+ "lfads_factors": Z,
134
+ "time": T,
135
+ "condition": cond,
136
+ }
137
+
138
+ kinematics[d] = kinematics_conds
139
+
140
+ with open("data/kinematics.pkl", "wb") as handle:
141
+ pickle.dump(kinematics, handle, protocol=pickle.HIGHEST_PROTOCOL)
142
+
143
+
144
+ def main():
145
+ convert_spiking_rates()
146
+ #convert_kinematics()
147
+
148
+
149
+ if __name__ == "__main__":
150
+ sys.exit(main())
data/examples/macaque_reaching/iframe_figures/figure_54.html ADDED
The diff for this file is too large to render. See raw diff
 
data/examples/macaque_reaching/kinematic_decoding.ipynb ADDED
@@ -0,0 +1,657 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "id": "1c2478f9",
6
+ "metadata": {},
7
+ "source": [
8
+ "# Kinematic decoding\n",
9
+ "\n",
10
+ "This notebook compares the decoding performance of MARBLE with CEBRA and TDR on a macaque centre-out reaching task. "
11
+ ]
12
+ },
13
+ {
14
+ "cell_type": "code",
15
+ "execution_count": null,
16
+ "id": "cae68224-84f6-43fd-82f4-bba80b806780",
17
+ "metadata": {
18
+ "tags": []
19
+ },
20
+ "outputs": [],
21
+ "source": [
22
+ "%load_ext autoreload\n",
23
+ "%autoreload 2\n",
24
+ "%matplotlib widget\n",
25
+ "\n",
26
+ "!pip install statannotations ipympl\n",
27
+ "\n",
28
+ "import matplotlib.pyplot as plt\n",
29
+ "import numpy as np\n",
30
+ "import pandas as pd\n",
31
+ "import pickle\n",
32
+ "import seaborn as sns\n",
33
+ "from statannotations.Annotator import Annotator\n",
34
+ "from sklearn.model_selection import KFold\n",
35
+ "from macaque_reaching_helpers import *\n",
36
+ "from tqdm import tqdm"
37
+ ]
38
+ },
39
+ {
40
+ "cell_type": "markdown",
41
+ "id": "a1d50682-1117-4481-854d-76d2563a200b",
42
+ "metadata": {
43
+ "tags": []
44
+ },
45
+ "source": [
46
+ "Load firing rate and kinematics data"
47
+ ]
48
+ },
49
+ {
50
+ "cell_type": "code",
51
+ "execution_count": null,
52
+ "id": "c2883f5a-4789-49e2-8d43-1b3946c3a4fd",
53
+ "metadata": {
54
+ "tags": []
55
+ },
56
+ "outputs": [],
57
+ "source": [
58
+ "!mkdir data\n",
59
+ "!wget -nc https://dataverse.harvard.edu/api/access/datafile/6969885 -O data/kinematics.pkl\n",
60
+ "\n",
61
+ "with open('data/kinematics.pkl', 'rb') as handle:\n",
62
+ " data = pickle.load(handle)"
63
+ ]
64
+ },
65
+ {
66
+ "cell_type": "markdown",
67
+ "id": "94051207",
68
+ "metadata": {},
69
+ "source": [
70
+ "# Load MARBLE and CEBRA embeddings"
71
+ ]
72
+ },
73
+ {
74
+ "cell_type": "code",
75
+ "execution_count": null,
76
+ "id": "e9bc422a",
77
+ "metadata": {
78
+ "tags": []
79
+ },
80
+ "outputs": [],
81
+ "source": [
82
+ "!wget -nc https://dataverse.harvard.edu/api/access/datafile/7062022 -O data/marble_embeddings_out20_pca5_100ms.pkl\n",
83
+ "!wget -nc https://dataverse.harvard.edu/api/access/datafile/7509031 -O data/cebra_embeddings_out20_pca5_100ms.pkl\n",
84
+ "\n",
85
+ "with open('data/marble_embeddings_out20_pca5_100ms.pkl', 'rb') as handle:\n",
86
+ " _, marble_embeddings, _, _, trial_ids, _ = pickle.load(handle)\n",
87
+ "\n",
88
+ "with open('data/cebra_embeddings_out20_pca5_100ms.pkl', 'rb') as handle:\n",
89
+ " _, cebra_embeddings, _, _, _, _ = pickle.load(handle)"
90
+ ]
91
+ },
92
+ {
93
+ "cell_type": "markdown",
94
+ "id": "a66b1c78-ebdc-44d7-a249-ba39659538e3",
95
+ "metadata": {},
96
+ "source": [
97
+ "# Load raw firing rates"
98
+ ]
99
+ },
100
+ {
101
+ "cell_type": "code",
102
+ "execution_count": null,
103
+ "id": "1d25686c-08c0-40d1-8528-61056a3bc9c6",
104
+ "metadata": {
105
+ "scrolled": true
106
+ },
107
+ "outputs": [],
108
+ "source": [
109
+ "pca_n = 5\n",
110
+ "filter_data = True\n",
111
+ "conditions=['DownLeft','Left','UpLeft','Up','UpRight','Right','DownRight'] \n",
112
+ "\n",
113
+ "!wget -nc https://dataverse.harvard.edu/api/access/datafile/6969883 -O data/rate_data_20ms_100ms.pkl\n",
114
+ "\n",
115
+ "with open('data/rate_data_20ms_100ms.pkl', 'rb') as handle:\n",
116
+ " rates = pickle.load(handle)\n",
117
+ "\n",
118
+ "!wget -nc https://dataverse.harvard.edu/api/access/datafile/6963200 -O data/trial_ids.pkl\n",
119
+ "\n",
120
+ "with open('data/trial_ids.pkl', 'rb') as handle:\n",
121
+ " trials = pickle.load(handle)\n",
122
+ " \n",
123
+ "pos, pos_raw = [], []\n",
124
+ "for day in rates.keys():\n",
125
+ " #preprocess by PCA dimensionality reduction and smoothing\n",
126
+ " pca = fit_pca(rates, day, conditions, filter_data=filter_data, pca_n=pca_n)\n",
127
+ " pos_, _, _, _, _ = format_data(rates, \n",
128
+ " trials,\n",
129
+ " day, \n",
130
+ " conditions, \n",
131
+ " pca=pca,\n",
132
+ " filter_data=filter_data,\n",
133
+ " )\n",
134
+ "\n",
135
+ " #no preprocessing for comparison\n",
136
+ " pos_raw_, _, _, _, _ = format_data(rates, \n",
137
+ " trials,\n",
138
+ " day, \n",
139
+ " conditions,\n",
140
+ " )\n",
141
+ " \n",
142
+ " pos.append(np.vstack(pos_))\n",
143
+ " pos_raw.append(np.vstack(pos_raw_))"
144
+ ]
145
+ },
146
+ {
147
+ "cell_type": "markdown",
148
+ "id": "d2c473e3-48e8-403a-8384-b98541a6535c",
149
+ "metadata": {},
150
+ "source": [
151
+ "### Targeted Dimensionality Reduction (TDR)"
152
+ ]
153
+ },
154
+ {
155
+ "cell_type": "code",
156
+ "execution_count": null,
157
+ "id": "d41142bd-2b2e-4d74-8808-e7aac0b5b66b",
158
+ "metadata": {},
159
+ "outputs": [],
160
+ "source": [
161
+ "condition_labels = {'DownLeft': [1,[-1, -1]], \n",
162
+ " 'Left': [2, [-1, 0]], \n",
163
+ " 'UpLeft': [3,[-1, 1]], \n",
164
+ " 'Up': [4,[0, 1]], \n",
165
+ " 'UpRight': [5,[1, 1]], \n",
166
+ " 'Right': [6,[1, 0]], \n",
167
+ " 'DownRight': [7,[1, -1]]} \n",
168
+ "\n",
169
+ "TDR_embeddings = {}\n",
170
+ "for day in tqdm(rates.keys()):\n",
171
+ " unique_trial_ids = np.unique(trial_ids[day])\n",
172
+ " Z, X, cond = [], [], []\n",
173
+ " for t in unique_trial_ids:\n",
174
+ " c_l = data[day][t]['condition']\n",
175
+ " firing_rates = pos_raw[day][trial_ids[day]==t,:].T\n",
176
+ " c = np.tile(condition_labels[c_l][1], (firing_rates.shape[1],1))\n",
177
+ " regressors = np.hstack([c, np.ones([firing_rates.shape[1], 1])])\n",
178
+ " \n",
179
+ " Z.append(firing_rates)\n",
180
+ " X.append(regressors)\n",
181
+ " cond.append(condition_labels[c_l][0])\n",
182
+ " \n",
183
+ " Z = np.stack(Z, axis=2)\n",
184
+ " X = np.stack(X, axis=2)\n",
185
+ " cond = np.hstack(cond)\n",
186
+ " \n",
187
+ " #standardise per neuron\n",
188
+ " Z -= Z.mean(axis=(1,2), keepdims=True)\n",
189
+ " Z /= Z.std(axis=(1,2), keepdims=True)\n",
190
+ " \n",
191
+ " [n, T, tr] = Z.shape\n",
192
+ " n_reg = X.shape[1]\n",
193
+ " \n",
194
+ " #compute TDR regression coefficients\n",
195
+ " betaBehav2Neural = np.zeros([T,n,n_reg-1])\n",
196
+ " for i in range(T):\n",
197
+ " Ztrain = Z[:,i,:].T\n",
198
+ " Xtrain = X[i,:,:].T\n",
199
+ " \n",
200
+ " reg = np.linalg.lstsq(Xtrain, Ztrain, rcond=None)[0]\n",
201
+ " reg = np.linalg.pinv(reg) # Compute the TDR axes.\n",
202
+ " reg = reg[:,:-1] # remove last regressor (bias)\n",
203
+ " \n",
204
+ " betaBehav2Neural[i,:,:] = reg\n",
205
+ " \n",
206
+ " #project data to TDR subspace\n",
207
+ " Zproj = np.zeros([n_reg-1,T,tr])\n",
208
+ " for i in range(T):\n",
209
+ " Zt = Z[:,i,:].T \n",
210
+ " regt = betaBehav2Neural[i,:,:]\n",
211
+ " Zproj[:,i,:] = (Zt @ regt).T\n",
212
+ "\n",
213
+ " TDR_embeddings[day] = Zproj"
214
+ ]
215
+ },
216
+ {
217
+ "cell_type": "code",
218
+ "execution_count": null,
219
+ "id": "5f9c6226-2c82-4177-bf85-4a6ed75ee47a",
220
+ "metadata": {},
221
+ "outputs": [],
222
+ "source": [
223
+ "#plot\n",
224
+ "plt.figure()\n",
225
+ "colors = plt.cm.viridis(np.linspace(0,1,7))\n",
226
+ "for j in range(tr):\n",
227
+ " c = cond[j]-1\n",
228
+ " plt.plot(Zproj[0,:,j], Zproj[1,:,j], c = colors[c])"
229
+ ]
230
+ },
231
+ {
232
+ "cell_type": "code",
233
+ "execution_count": null,
234
+ "id": "a1638c8f-f4b7-450b-a16c-981a74353417",
235
+ "metadata": {
236
+ "tags": []
237
+ },
238
+ "outputs": [],
239
+ "source": [
240
+ "# match the neural representations to the kinematics\n",
241
+ "for day in data.keys():\n",
242
+ " unique_trial_ids = np.unique(trial_ids[day])\n",
243
+ " for i, t in enumerate(unique_trial_ids):\n",
244
+ " data[day][t]['kinematics'] = data[day][t]['kinematics'][:,:-1] #remove last point because\n",
245
+ " data[day][t]['lfads_factors'] = data[day][t]['lfads_factors'][:,:-1] \n",
246
+ " data[day][t]['marble_emb'] = marble_embeddings[day][trial_ids[day]==t,:].T\n",
247
+ " data[day][t]['firing_rates'] = pos[day][trial_ids[day]==t,:].T\n",
248
+ " data[day][t]['cebra_emb'] = cebra_embeddings[day][trial_ids[day]==t,:].T\n",
249
+ " data[day][t]['raw_firing_rates'] = pos_raw[day][trial_ids[day]==t,:].T\n",
250
+ " data[day][t]['TDR_emb'] = TDR_embeddings[day][:,:,i]"
251
+ ]
252
+ },
253
+ {
254
+ "cell_type": "markdown",
255
+ "id": "49b9822e-5817-4022-a5f9-3d5962877b60",
256
+ "metadata": {},
257
+ "source": [
258
+ "# Visualise kinematics for a single session"
259
+ ]
260
+ },
261
+ {
262
+ "cell_type": "code",
263
+ "execution_count": null,
264
+ "id": "22176228-1e45-445f-acd7-180c4b5c22b7",
265
+ "metadata": {},
266
+ "outputs": [],
267
+ "source": [
268
+ "session = 0\n",
269
+ "\n",
270
+ "colors = plt.cm.viridis(np.linspace(0,1,7))\n",
271
+ "\n",
272
+ "# plot average kinematic position across trials for a given session\n",
273
+ "plt.figure()\n",
274
+ "for c,cond in enumerate(conditions): \n",
275
+ " meh = np.dstack([data[session][t]['kinematics'] for t in data[session].keys() if data[session][t]['condition']==cond]).mean(2) \n",
276
+ " plt.plot(meh[0,:],meh[1,:],c=colors[c])\n",
277
+ "plt.title('average kinematic hand position across trials')\n",
278
+ "\n",
279
+ "# plot kinematic position for each trials in a given session\n",
280
+ "plt.figure()\n",
281
+ "for c,cond in enumerate(conditions): \n",
282
+ " for t in data[session].keys():\n",
283
+ " if data[session][t]['condition']==cond:\n",
284
+ " meh = data[session][t]['kinematics']\n",
285
+ " plt.plot(meh[0,:],meh[1,:],c=colors[c])\n",
286
+ "plt.title('per trial kinematic hand position')"
287
+ ]
288
+ },
289
+ {
290
+ "cell_type": "markdown",
291
+ "id": "76a52b47",
292
+ "metadata": {},
293
+ "source": [
294
+ "# Decoding single session"
295
+ ]
296
+ },
297
+ {
298
+ "cell_type": "markdown",
299
+ "id": "9e70f5fe-6cd8-46dd-ae95-269f5b449bd8",
300
+ "metadata": {},
301
+ "source": [
302
+ "### Optimal linear decoding via LFADS, MARBLE and CEBRA"
303
+ ]
304
+ },
305
+ {
306
+ "cell_type": "code",
307
+ "execution_count": null,
308
+ "id": "1cb13479",
309
+ "metadata": {
310
+ "tags": []
311
+ },
312
+ "outputs": [],
313
+ "source": [
314
+ "session = 0\n",
315
+ "\n",
316
+ "unique_trial_ids = np.unique(trial_ids[session])\n",
317
+ "\n",
318
+ "L_lfads = train_OLE(data[session], trial_ids[session], representation='lfads_factors')\n",
319
+ "\n",
320
+ "# loop over test trials\n",
321
+ "for tr in unique_trial_ids:\n",
322
+ " trial_pred = decode_kinematics(data[session][tr], L_lfads, dt=20, representation='lfads_factors')\n",
323
+ " data[session][tr]['lfads_decoded'] = trial_pred\n",
324
+ "\n",
325
+ "L_firing_rates = train_OLE(data[session], trial_ids[session], representation='firing_rates')\n",
326
+ "\n",
327
+ "# loop over test trials\n",
328
+ "for tr in unique_trial_ids:\n",
329
+ " trial_pred = decode_kinematics(data[session][tr], L_firing_rates, dt=20, representation='firing_rates')\n",
330
+ " data[session][tr]['firing_rates_decoded'] = trial_pred\n",
331
+ "\n",
332
+ "L_marble = train_OLE(data[session], trial_ids[session], representation='marble_emb')\n",
333
+ "\n",
334
+ "# loop over test trials\n",
335
+ "for tr in unique_trial_ids:\n",
336
+ " trial_pred = decode_kinematics(data[session][tr], L_marble, dt=20, representation='marble_emb')\n",
337
+ " data[session][tr]['marble_decoded'] = trial_pred\n",
338
+ " \n",
339
+ "L_cebra = train_OLE(data[session], trial_ids[session], representation='cebra_emb')\n",
340
+ "\n",
341
+ "# loop over test trials\n",
342
+ "for tr in unique_trial_ids:\n",
343
+ " trial_pred = decode_kinematics(data[session][tr], L_cebra, dt=20, representation='cebra_emb')\n",
344
+ " data[session][tr]['cebra_decoded'] = trial_pred\n",
345
+ "\n",
346
+ "L_TDR = train_OLE(data[session], trial_ids[session], representation='TDR_emb')\n",
347
+ "\n",
348
+ "# loop over test trials\n",
349
+ "for tr in unique_trial_ids:\n",
350
+ " trial_pred = decode_kinematics(data[session][tr], L_TDR, dt=20, representation='TDR_emb')\n",
351
+ " data[session][tr]['TDR_decoded'] = trial_pred"
352
+ ]
353
+ },
354
+ {
355
+ "cell_type": "markdown",
356
+ "id": "6a0a408b",
357
+ "metadata": {},
358
+ "source": [
359
+ "### Comparison of decoding with ground truth"
360
+ ]
361
+ },
362
+ {
363
+ "cell_type": "code",
364
+ "execution_count": null,
365
+ "id": "a4bfea1d",
366
+ "metadata": {
367
+ "tags": []
368
+ },
369
+ "outputs": [],
370
+ "source": [
371
+ "fig, ax = plt.subplots(2,3,figsize=(10,4))\n",
372
+ "\n",
373
+ "plot_kinematics(data, session, unique_trial_ids, representation='kinematics', ax=ax[0,0])\n",
374
+ "plot_kinematics(data, session, unique_trial_ids, representation='firing_rates_decoded', ax=ax[0,1])\n",
375
+ "plot_kinematics(data, session, unique_trial_ids, representation='marble_decoded', ax=ax[0,2])\n",
376
+ "plot_kinematics(data, session, unique_trial_ids, representation='lfads_decoded', ax=ax[1,0])\n",
377
+ "plot_kinematics(data, session, unique_trial_ids, representation='cebra_decoded', ax=ax[1,1])\n",
378
+ "plot_kinematics(data, session, unique_trial_ids, representation='TDR_decoded', ax=ax[1,2])"
379
+ ]
380
+ },
381
+ {
382
+ "cell_type": "markdown",
383
+ "id": "ea829c14",
384
+ "metadata": {},
385
+ "source": [
386
+ "# Decode across all sessions\n",
387
+ "\n",
388
+ "Above we decoded for a single session. Lets now loop over every session and compute some quantitative comparisons with the ground truth kinematics."
389
+ ]
390
+ },
391
+ {
392
+ "cell_type": "code",
393
+ "execution_count": null,
394
+ "id": "b77a49a6",
395
+ "metadata": {
396
+ "tags": []
397
+ },
398
+ "outputs": [],
399
+ "source": [
400
+ "kf = KFold(n_splits=5, shuffle=True) # use 5-fold split of the data \n",
401
+ "\n",
402
+ "r2_lfads_vel = []; r2_lfads_pos = []\n",
403
+ "r2_cebra_vel = []; r2_cebra_pos = []\n",
404
+ "r2_marble_vel = []; r2_marble_pos = []\n",
405
+ "r2_TDR_vel = []; r2_TDR_pos = []\n",
406
+ "r2_firing_rates_vel = []; r2_firing_rates_pos = []\n",
407
+ "\n",
408
+ "# loop over seessions\n",
409
+ "for d in tqdm(data.keys()):\n",
410
+ " unique_trial_ids = np.unique(trial_ids[d])\n",
411
+ " \n",
412
+ " # cross validation\n",
413
+ " for i, (train_index, test_index) in enumerate(kf.split(unique_trial_ids)):\n",
414
+ "\n",
415
+ " train_data = {key: data[d][key] for key in train_index if key in data[d]}\n",
416
+ "\n",
417
+ " #LFADS\n",
418
+ " Lw = train_OLE(data[d], unique_trial_ids[train_index], representation='lfads_factors')\n",
419
+ " \n",
420
+ " for tr in unique_trial_ids[test_index]:\n",
421
+ " trial_pred = decode_kinematics(data[d][tr], Lw, dt=20, representation='lfads_factors')\n",
422
+ " data[d][tr]['lfads_decoded'] = trial_pred\n",
423
+ " \n",
424
+ " #CEBRA\n",
425
+ " Lw = train_OLE(data[d], unique_trial_ids[train_index], representation='cebra_emb')\n",
426
+ " \n",
427
+ " for tr in unique_trial_ids[test_index]:\n",
428
+ " trial_pred = decode_kinematics(data[d][tr], Lw, dt=20, representation='cebra_emb')\n",
429
+ " data[d][tr]['cebra_decoded'] = trial_pred\n",
430
+ " \n",
431
+ " #MARBLE\n",
432
+ " Lw = train_OLE(data[d], unique_trial_ids[train_index], representation='marble_emb')\n",
433
+ " \n",
434
+ " for tr in unique_trial_ids[test_index]:\n",
435
+ " trial_pred = decode_kinematics(data[d][tr], Lw, dt=20, representation='marble_emb')\n",
436
+ " data[d][tr]['marble_decoded'] = trial_pred\n",
437
+ "\n",
438
+ " #TDR\n",
439
+ " Lw = train_OLE(data[d], unique_trial_ids[train_index], representation='TDR_emb')\n",
440
+ " \n",
441
+ " for tr in unique_trial_ids[test_index]:\n",
442
+ " trial_pred = decode_kinematics(data[d][tr], Lw, dt=20, representation='TDR_emb')\n",
443
+ " data[d][tr]['TDR_decoded'] = trial_pred\n",
444
+ "\n",
445
+ " #Firing rates\n",
446
+ " Lw = train_OLE(data[d], unique_trial_ids[train_index], representation='firing_rates')\n",
447
+ " \n",
448
+ " for tr in unique_trial_ids[test_index]:\n",
449
+ " trial_pred = decode_kinematics(data[d][tr], Lw, dt=20, representation='firing_rates')\n",
450
+ " data[d][tr]['firing_rates_decoded'] = trial_pred\n",
451
+ " \n",
452
+ " # r-squared velocity\n",
453
+ " r2_pos, r2_vel = correlation(data[d], unique_trial_ids, representation='lfads_decoded') \n",
454
+ " r2_lfads_pos.append(r2_pos)\n",
455
+ " r2_lfads_vel.append(r2_vel)\n",
456
+ " \n",
457
+ " r2_pos, r2_vel = correlation(data[d], unique_trial_ids, representation='cebra_decoded') \n",
458
+ " r2_cebra_pos.append(r2_pos)\n",
459
+ " r2_cebra_vel.append(r2_vel)\n",
460
+ " \n",
461
+ " r2_pos, r2_vel = correlation(data[d], unique_trial_ids, representation='marble_decoded') \n",
462
+ " r2_marble_pos.append(r2_pos)\n",
463
+ " r2_marble_vel.append(r2_vel)\n",
464
+ "\n",
465
+ " r2_pos, r2_vel = correlation(data[d], unique_trial_ids, representation='TDR_decoded') \n",
466
+ " r2_TDR_pos.append(r2_pos)\n",
467
+ " r2_TDR_vel.append(r2_vel)\n",
468
+ "\n",
469
+ " r2_pos, r2_vel = correlation(data[d], unique_trial_ids, representation='firing_rates_decoded') \n",
470
+ " r2_firing_rates_pos.append(r2_pos)\n",
471
+ " r2_firing_rates_vel.append(r2_vel)"
472
+ ]
473
+ },
474
+ {
475
+ "cell_type": "markdown",
476
+ "id": "f4aaba62",
477
+ "metadata": {},
478
+ "source": [
479
+ "Lets now visualise the decoded kinematics for the same set of example sessions (Fig S7)."
480
+ ]
481
+ },
482
+ {
483
+ "cell_type": "code",
484
+ "execution_count": null,
485
+ "id": "e2ba8691",
486
+ "metadata": {},
487
+ "outputs": [],
488
+ "source": [
489
+ "# chosen example sessions\n",
490
+ "examples = [5,6,8,11,14,15,18,23,26,32]\n",
491
+ "\n",
492
+ "fig, ax = plt.subplots(4,len(examples),figsize=(15,5))\n",
493
+ "\n",
494
+ "for i,d in enumerate(examples):\n",
495
+ " \n",
496
+ " unique_trial_ids = np.unique(trial_ids[d])\n",
497
+ "\n",
498
+ " ax[0,i] = plot_kinematics(data, d, unique_trial_ids, representation='kinematics', ax=ax[0,i])\n",
499
+ " ax[1,i] = plot_kinematics(data, d, unique_trial_ids, representation='marble_decoded', ax=ax[1,i])\n",
500
+ " ax[2,i] = plot_kinematics(data, d, unique_trial_ids, representation='cebra_decoded', ax=ax[2,i])\n",
501
+ " ax[3,i] = plot_kinematics(data, d, unique_trial_ids, representation='lfads_decoded', ax=ax[3,i])\n",
502
+ " ax[3,i] = plot_kinematics(data, d, unique_trial_ids, representation='firing_rates_decoded', ax=ax[3,i])"
503
+ ]
504
+ },
505
+ {
506
+ "cell_type": "markdown",
507
+ "id": "a20a9cb0",
508
+ "metadata": {},
509
+ "source": [
510
+ "## Instantaneous velocity decoding"
511
+ ]
512
+ },
513
+ {
514
+ "cell_type": "code",
515
+ "execution_count": null,
516
+ "id": "75da5f8b",
517
+ "metadata": {
518
+ "tags": []
519
+ },
520
+ "outputs": [],
521
+ "source": [
522
+ "results = pd.DataFrame(data=np.vstack([ r2_marble_vel, r2_cebra_vel, r2_lfads_vel, r2_TDR_vel, r2_firing_rates_vel]).T,columns=['marble', 'CEBRA', 'LFADS', 'TDR', 'firing_rates'])\n",
523
+ "results = results.melt()\n",
524
+ "results.columns = ['model','accuracy']\n",
525
+ "\n",
526
+ "f, ax = plt.subplots(figsize=(4,5))\n",
527
+ "sns.despine(bottom=True, left=True)\n",
528
+ "\n",
529
+ "sns.stripplot(\n",
530
+ " data=results, x=\"model\", y=\"accuracy\",\n",
531
+ " dodge=True, alpha=.5, zorder=1,\n",
532
+ ")\n",
533
+ "\n",
534
+ "sns.pointplot(\n",
535
+ " data=results, x=\"model\", y=\"accuracy\",\n",
536
+ " join=False, dodge=.8 - .8 / 3, palette=\"dark\",\n",
537
+ " markers=\"d\", scale=.75, errorbar=None\n",
538
+ ")\n",
539
+ "\n",
540
+ "pairs=[(\"LFADS\", \"marble\"), (\"CEBRA\",\"marble\"), (\"firing_rates\",\"marble\"), (\"TDR\",\"marble\")]\n",
541
+ "\n",
542
+ "annotator = Annotator(ax, pairs, data=results, x=\"model\", y=\"accuracy\",)\n",
543
+ "annotator.configure(test='Wilcoxon', text_format='star', loc='outside')\n",
544
+ "annotator.apply_and_annotate()"
545
+ ]
546
+ },
547
+ {
548
+ "cell_type": "markdown",
549
+ "id": "379d1077",
550
+ "metadata": {},
551
+ "source": [
552
+ "## Decoding final reach direction"
553
+ ]
554
+ },
555
+ {
556
+ "cell_type": "code",
557
+ "execution_count": null,
558
+ "id": "91f2c2a7",
559
+ "metadata": {
560
+ "tags": []
561
+ },
562
+ "outputs": [],
563
+ "source": [
564
+ "marble_model_acc = []\n",
565
+ "cebra_model_acc = []\n",
566
+ "lfads_model_acc = []\n",
567
+ "TDR_model_acc = []\n",
568
+ "firing_rates_model_acc = []\n",
569
+ "\n",
570
+ "for d in tqdm(data.keys()): \n",
571
+ "\n",
572
+ " unique_trial_ids = np.unique(trial_ids[d])\n",
573
+ " \n",
574
+ " # fit classifier to kinematics\n",
575
+ " clf = fit_classifier(data[d], conditions, unique_trial_ids, representation='kinematics')\n",
576
+ " \n",
577
+ " # evaluate classifier on marble decoded\n",
578
+ " score = transform_classifier(clf, data[d], conditions, unique_trial_ids, representation='marble_decoded')\n",
579
+ " marble_model_acc.append(score)\n",
580
+ " \n",
581
+ " # evaluate classifier on cebra decoded\n",
582
+ " score = transform_classifier(clf, data[d], conditions, unique_trial_ids, representation='cebra_decoded')\n",
583
+ " cebra_model_acc.append(score)\n",
584
+ " \n",
585
+ " # evaluate classifier on lfads decoded\n",
586
+ " score = transform_classifier(clf, data[d], conditions, unique_trial_ids, representation='lfads_decoded')\n",
587
+ " lfads_model_acc.append(score)\n",
588
+ "\n",
589
+ " # evaluate classifier on lfads decoded\n",
590
+ " score = transform_classifier(clf, data[d], conditions, unique_trial_ids, representation='TDR_decoded')\n",
591
+ " TDR_model_acc.append(score)\n",
592
+ "\n",
593
+ " # evaluate classifier on firing_rates\n",
594
+ " score = transform_classifier(clf, data[d], conditions, unique_trial_ids, representation='firing_rates_decoded')\n",
595
+ " firing_rates_model_acc.append(score)\n",
596
+ "\n",
597
+ "results = pd.DataFrame(data=np.vstack([ marble_model_acc, cebra_model_acc, lfads_model_acc, TDR_model_acc, firing_rates_model_acc]).T,columns=['marble', 'cebra', 'LFADS', 'TDR', 'firing_rates'])\n",
598
+ "\n",
599
+ "results = results.melt()\n",
600
+ "results.columns = ['model','accuracy']"
601
+ ]
602
+ },
603
+ {
604
+ "cell_type": "code",
605
+ "execution_count": null,
606
+ "id": "90c62030",
607
+ "metadata": {
608
+ "tags": []
609
+ },
610
+ "outputs": [],
611
+ "source": [
612
+ "f, ax = plt.subplots(figsize=(4,4))\n",
613
+ "sns.despine(bottom=True, left=True)\n",
614
+ "\n",
615
+ "sns.stripplot(\n",
616
+ " data=results, x=\"model\", y=\"accuracy\",\n",
617
+ " dodge=True, alpha=.5, zorder=1,\n",
618
+ ")\n",
619
+ "\n",
620
+ "sns.pointplot(\n",
621
+ " data=results, x=\"model\", y=\"accuracy\",\n",
622
+ " join=False, dodge=.8 - .8 / 3, palette=\"dark\",\n",
623
+ " markers=\"d\", scale=.75, errorbar=None\n",
624
+ ")"
625
+ ]
626
+ },
627
+ {
628
+ "cell_type": "code",
629
+ "execution_count": null,
630
+ "id": "78d726e8-3a27-4e5e-b959-199d2de8199a",
631
+ "metadata": {},
632
+ "outputs": [],
633
+ "source": []
634
+ }
635
+ ],
636
+ "metadata": {
637
+ "kernelspec": {
638
+ "display_name": "Python 3 (ipykernel)",
639
+ "language": "python",
640
+ "name": "python3"
641
+ },
642
+ "language_info": {
643
+ "codemirror_mode": {
644
+ "name": "ipython",
645
+ "version": 3
646
+ },
647
+ "file_extension": ".py",
648
+ "mimetype": "text/x-python",
649
+ "name": "python",
650
+ "nbconvert_exporter": "python",
651
+ "pygments_lexer": "ipython3",
652
+ "version": "3.10.14"
653
+ }
654
+ },
655
+ "nbformat": 4,
656
+ "nbformat_minor": 5
657
+ }
data/examples/macaque_reaching/macaque_reaching_helpers.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from scipy.signal import savgol_filter
3
+ from sklearn.decomposition import PCA
4
+ from sklearn.svm import SVC
5
+ import pickle
6
+ import numpy as np
7
+ import matplotlib.pyplot as plt
8
+
9
+ conditions=['DownLeft','Left','UpLeft','Up','UpRight','Right','DownRight']
10
+
11
+ def get_vector_array(coords):
12
+ """function for defining the vector features from each array of coordinates"""
13
+ diff = np.diff(coords, axis=0)
14
+ return diff
15
+
16
+
17
+ def fit_pca(rates, day, conditions, filter_data=True, pca_n=5):
18
+ pos = []
19
+ # loop over each condition on that day
20
+ for c, cond in enumerate(conditions):
21
+
22
+ # go cue at 500ms (500ms / 20ms bin = 250)
23
+ # only take rates from bin 10 onwards
24
+ data = rates[day][cond][:, :, 25:]
25
+
26
+ # loop over all trials
27
+ for t in range(data.shape[0]):
28
+
29
+ # extract trial
30
+ trial = data[t, :, :]
31
+
32
+ # smooth trial over time
33
+ if filter_data:
34
+ trial = savgol_filter(trial, 9, 2)
35
+
36
+ # store each trial as time x channels
37
+ pos.append(trial.T)
38
+
39
+ # stacking all trials into a single array (time x channels)
40
+ pos = np.vstack(pos)
41
+
42
+ # fit PCA to all data across all conditions on a given day simultaneously
43
+ pca = PCA(n_components=pca_n)
44
+ pca.fit(pos)
45
+
46
+ return pca
47
+
48
+
49
+ def format_data(rates, trial_ids, day, conditions, pca=None, filter_data=True, go_cue=25, stack=True):
50
+ # create empty list of lists for each condition
51
+ pos = [[] for u in range(len(conditions))]
52
+ vel = [[] for u in range(len(conditions))]
53
+ timepoints = [[] for u in range(len(conditions))]
54
+ condition_labels = [[] for u in range(len(conditions))]
55
+ trial_indexes = [[] for u in range(len(conditions))]
56
+
57
+ # loop over conditions
58
+ for c, cond in enumerate(conditions):
59
+
60
+ # go cue at 500ms (500ms / 50ms bin = 10)
61
+ data = rates[day][cond][:, :, go_cue:]
62
+
63
+ # loop over all trials
64
+ for t in range(data.shape[0]):
65
+
66
+ # extract trial
67
+ trial = data[t, :, :]
68
+
69
+ # smooth trial over time
70
+ if filter_data:
71
+ trial = savgol_filter(trial, 9, 2)
72
+
73
+ # apply the transformation to a single trial
74
+ if pca is not None:
75
+ trial = pca.transform(trial.T)
76
+ else:
77
+ trial = trial.T
78
+
79
+ # take all points except last
80
+ pos[c].append(trial[:-1, :])
81
+
82
+ # extract vectors between coordinates
83
+ vel[c].append(get_vector_array(trial))
84
+ timepoints[c].append(np.linspace(0, trial.shape[0] - 2, trial.shape[0] - 1))
85
+ condition_labels[c].append(np.repeat(c, trial.shape[0] - 1))
86
+
87
+ # adding trial id info (to match with kinematics decoding later)
88
+ ind = np.repeat(trial_ids[day][cond][t], trial.shape[0] - 1)
89
+ trial_indexes[c].append(ind)
90
+
91
+ # stack the trials within each condition
92
+ if stack:
93
+ pos = [np.vstack(u) for u in pos] # trials x time x channels
94
+ vel = [np.vstack(u) for u in vel] # trials x time x channels
95
+ timepoints = [np.hstack(u) for u in timepoints]
96
+ condition_labels = [np.hstack(u) for u in condition_labels]
97
+ trial_indexes = [np.hstack(u) for u in trial_indexes]
98
+
99
+ return pos, vel, timepoints, condition_labels, trial_indexes
100
+
101
+
102
+ def train_OLE(data, trial_ids, representation='lfads_factors'):
103
+
104
+ X, Z = [], []
105
+ unique_trial_ids = np.unique(trial_ids)
106
+ for tr in unique_trial_ids:
107
+ X.append(data[tr]['kinematics'])
108
+ Z.append(data[tr][representation])
109
+
110
+ X, Z = np.hstack(X), np.hstack(Z)
111
+
112
+ X = X[:4,:] # take first four rows of kinematics (pos_x, pos_y, vel_x, vel_y)
113
+ Z = np.vstack([Z,np.ones(Z.shape[1])])
114
+
115
+ out = np.linalg.lstsq(Z.T, X.T, rcond=None)
116
+ Lw = out[0].T
117
+
118
+ return Lw
119
+
120
+
121
+ def decode_kinematics(data, L, dt=20, representation='lfads_factors'):
122
+
123
+ trial_emb = data[representation] # get trial embedding
124
+ trial_kinematics = data['kinematics'] # get kinematics associated with trial
125
+
126
+ trial_emb = np.vstack([trial_emb, np.ones(trial_emb.shape[1])])
127
+
128
+ # empty array for decoding predictions
129
+ trial_pred = np.empty([4, trial_kinematics.shape[1]])
130
+ trial_pred[:] = np.nan
131
+
132
+ trial_pred[:2,0] = trial_kinematics[:2,0] #first two entries are the x,y coordinates
133
+
134
+ # predict velocity
135
+ z = np.matmul(L, trial_emb[:,0]); # decode
136
+ trial_pred[[2,3],0] = z[[2,3]] #velocities
137
+
138
+ # loop over each time point in trial
139
+ for nt in range(1,trial_kinematics.shape[1]):
140
+
141
+ neural = trial_emb[:,nt] # next point of embedding
142
+ z = np.matmul(L, neural) # decode
143
+
144
+ #trial_pred[:2,nt] = (1-alpha) * z[:2] + alpha * (trial_pred[:2,nt-1] + z[[2,3]]*dt/1000)
145
+ trial_pred[:2,nt] = trial_pred[:2,nt-1] + z[[2,3]]*dt/1000
146
+ trial_pred[[2,3],nt] = z[[2,3]]
147
+
148
+ return trial_pred
149
+
150
+ # define a function for computing R-squared of kinematics
151
+ def correlation(data, trial_ids, representation='lfads_factors'):
152
+
153
+ X, Z = [], []
154
+ for tr in trial_ids:
155
+ X.append(data[tr]['kinematics'])
156
+ Z.append(data[tr][representation][:,:])
157
+
158
+ X, Z = np.hstack(X), np.hstack(Z)
159
+
160
+ r2_vel = np.mean([calcR2(X[2,:], Z[2,:]), calcR2(X[3,:], Z[3,:])])
161
+ r2_pos = np.mean([calcR2(X[0,:], Z[0,:]), calcR2(X[1,:], Z[1,:])])
162
+
163
+ return r2_pos, r2_vel
164
+
165
+ def calcR2(data, model):
166
+
167
+ datavar = sum((data-np.mean(data))**2);
168
+ errorvar = sum((model-data)**2);
169
+ r2 = 1-errorvar/datavar
170
+
171
+ return r2
172
+
173
+ def plot_kinematics(data, session, trial_ids, representation='lfads_factors', ax=None, sz = 140):
174
+ colors = plt.cm.viridis(np.linspace(0,1,7))
175
+
176
+ if ax is None:
177
+ fig, ax = plt.subplots(1,1)
178
+
179
+ for c,cond in enumerate(conditions):
180
+ for t in trial_ids:
181
+ if data[session][t]['condition']==cond:
182
+ meh = data[session][t][representation]
183
+ ax.plot(meh[0,:],meh[1,:],c=colors[c])
184
+
185
+ ax.set_title(representation)
186
+ ax.set_xlim([-sz, sz])
187
+ ax.set_ylim([-sz, sz])
188
+ ax.set_axis_off()
189
+
190
+ return ax
191
+
192
+ def fit_classifier(data, conditions, trials, representation):
193
+ samples = []; labels = [];
194
+ for c,cond in enumerate(conditions):
195
+ for t in trials:
196
+ if data[t]['condition']==cond:
197
+ sample = data[t][representation][:2,:].flatten()
198
+ samples.append(sample)
199
+ labels.append(c)
200
+
201
+ X = np.vstack(samples)
202
+ y = np.array(labels)
203
+ clf = SVC().fit(X, y)
204
+
205
+ return clf
206
+
207
+ def transform_classifier(clf, data, conditions, trials, representation):
208
+ samples = []; labels = [];
209
+ for c,cond in enumerate(conditions):
210
+ for t in trials:
211
+ if data[t]['condition']==cond:
212
+ sample = data[t][representation][:2,:].flatten()
213
+ samples.append(sample)
214
+ labels.append(c)
215
+
216
+ X = np.vstack(samples)
217
+ y = np.array(labels)
218
+ return clf.score(X, y)
data/examples/macaque_reaching/plot_MARBLE_representations.ipynb ADDED
@@ -0,0 +1,390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "id": "713d155f",
6
+ "metadata": {},
7
+ "source": [
8
+ "# Visualisations of MARBLE embeddings\n",
9
+ "\n",
10
+ "This notebook visualises the MARBLE latent representations of the macaque arm-reaching data obtained from binned spike counts with 20ms bin size.\n",
11
+ "\n",
12
+ "We would like to thank the authors of LFADS for making this data accessible and answering our questions about the data!\n",
13
+ "\n",
14
+ "### Note: the notebook relies on plotly, which may not work on all browsers. If you encounter issues on one browser (e.g., Chrome), just change to another (e.g., Firefox)."
15
+ ]
16
+ },
17
+ {
18
+ "cell_type": "code",
19
+ "execution_count": null,
20
+ "id": "9f439545",
21
+ "metadata": {},
22
+ "outputs": [],
23
+ "source": [
24
+ "%load_ext autoreload\n",
25
+ "%autoreload 2\n",
26
+ " \n",
27
+ "!pip install plotly\n",
28
+ "\n",
29
+ "import numpy as np\n",
30
+ "import pandas as pd\n",
31
+ "\n",
32
+ "import matplotlib.pylab as pl\n",
33
+ "import matplotlib.pyplot as plt\n",
34
+ "import plotly\n",
35
+ "import plotly.graph_objs as go\n",
36
+ "from sklearn.decomposition import PCA\n",
37
+ "\n",
38
+ "import pickle\n",
39
+ "\n",
40
+ "from sklearn.metrics import pairwise_distances\n",
41
+ "import torch.nn as nn\n",
42
+ "import torch\n",
43
+ "\n",
44
+ "from MARBLE import geometry "
45
+ ]
46
+ },
47
+ {
48
+ "cell_type": "markdown",
49
+ "id": "78f3d637",
50
+ "metadata": {},
51
+ "source": [
52
+ "## Load data"
53
+ ]
54
+ },
55
+ {
56
+ "cell_type": "code",
57
+ "execution_count": null,
58
+ "id": "ed5d3773",
59
+ "metadata": {},
60
+ "outputs": [],
61
+ "source": [
62
+ "# insert the pickle file of results that you want to visualise\n",
63
+ "!mkdir data\n",
64
+ "!wget -nc https://dataverse.harvard.edu/api/access/datafile/7062022 -O data/marble_embeddings_out3_pca10_100ms.pkl\n",
65
+ "\n",
66
+ "with open('./data/marble_embeddings_out3_pca10_100ms.pkl', 'rb') as handle:\n",
67
+ " data = pickle.load(handle)\n",
68
+ " \n",
69
+ "distance_matrices = data[0]\n",
70
+ "embeddings = data[1]\n",
71
+ "timepoints = data[2]\n",
72
+ "labels = data[3]\n",
73
+ "sample_inds = data[4]\n",
74
+ "trial_ids = data[5]\n",
75
+ "\n",
76
+ "# condition labels\n",
77
+ "conditions=['DownLeft','Left','UpLeft','Up','UpRight','Right','DownRight']\n",
78
+ "\n",
79
+ "# load kinematics\n",
80
+ "!wget -nc https://dataverse.harvard.edu/api/access/datafile/6969885 -O data/kinematics.pkl\n",
81
+ "\n",
82
+ "with open('data/kinematics.pkl', 'rb') as handle:\n",
83
+ " kinematic_data = pickle.load(handle)"
84
+ ]
85
+ },
86
+ {
87
+ "cell_type": "markdown",
88
+ "id": "2e1ed943",
89
+ "metadata": {},
90
+ "source": [
91
+ "# Generate 3D plots for a selection of sessions\n",
92
+ "\n",
93
+ "Lets first do this for the MARBLE data."
94
+ ]
95
+ },
96
+ {
97
+ "cell_type": "code",
98
+ "execution_count": null,
99
+ "id": "8ce5b649",
100
+ "metadata": {},
101
+ "outputs": [],
102
+ "source": [
103
+ "colors = pl.cm.viridis(np.linspace(0,1,7))\n",
104
+ "\n",
105
+ "# Configure Plotly to be rendered inline in the notebook.\n",
106
+ "plotly.offline.init_notebook_mode()\n",
107
+ "\n",
108
+ "# looping over 10 different sessions\n",
109
+ "examples = [5,6,8,11,14,15,18,23,26,32] # these sessions were used in Figure S7\n",
110
+ "for d, i in enumerate(examples):\n",
111
+ " emb = embeddings[d]\n",
112
+ " label = labels[d]\n",
113
+ " time = np.hstack(timepoints[d])\n",
114
+ " # Configure the trace.\n",
115
+ " data = []\n",
116
+ "\n",
117
+ " for i in range(7):\n",
118
+ " trace = go.Scatter3d(\n",
119
+ " x=emb[label==i,0], \n",
120
+ " y=emb[label==i,1], \n",
121
+ " z=emb[label==i,2], \n",
122
+ " mode='markers',\n",
123
+ " marker={\n",
124
+ " 'size': 1,\n",
125
+ " 'opacity': 1,\n",
126
+ " 'color':'rgb({},{},{})'.format(colors[i,0],colors[i,1],colors[i,2]), # set color to an array/list of desired values\n",
127
+ " },\n",
128
+ " )\n",
129
+ " data.append(trace)\n",
130
+ "\n",
131
+ " # Configure the layout.\n",
132
+ " layout = go.Layout(\n",
133
+ " paper_bgcolor='rgba(0,0,0,0)',\n",
134
+ " plot_bgcolor='rgba(0,0,0,0)',\n",
135
+ " xaxis=dict(showgrid=False,showline=False),\n",
136
+ " yaxis=dict(showgrid=False,showline=False)\n",
137
+ " )\n",
138
+ "\n",
139
+ " plot_figure = go.Figure(data=data, layout=layout)\n",
140
+ " plot_figure.update_scenes(xaxis_visible=False, yaxis_visible=False,zaxis_visible=False )\n",
141
+ "\n",
142
+ " # Render the plot.\n",
143
+ " plot_figure.show()"
144
+ ]
145
+ },
146
+ {
147
+ "cell_type": "markdown",
148
+ "id": "aded605d",
149
+ "metadata": {},
150
+ "source": [
151
+ "Lets now compare this with the LFADS embeddings."
152
+ ]
153
+ },
154
+ {
155
+ "cell_type": "code",
156
+ "execution_count": null,
157
+ "id": "633c32e0",
158
+ "metadata": {},
159
+ "outputs": [],
160
+ "source": [
161
+ "colors = pl.cm.viridis(np.linspace(0,1,7))\n",
162
+ "\n",
163
+ "# Configure Plotly to be rendered inline in the notebook.\n",
164
+ "plotly.offline.init_notebook_mode()\n",
165
+ "\n",
166
+ "for i in range(len(examples)):\n",
167
+ " d = examples[i]\n",
168
+ " \n",
169
+ " \n",
170
+ " lfads_data = [[] for cond in conditions]\n",
171
+ " all_data = []\n",
172
+ " for c,cond in enumerate(conditions): \n",
173
+ " for t in kinematic_data[d].keys():\n",
174
+ " if kinematic_data[d][t]['condition']==cond:\n",
175
+ " meh = kinematic_data[d][t]['lfads_factors']\n",
176
+ " lfads_data[c].append(meh)\n",
177
+ " all_data.append(meh)\n",
178
+ "\n",
179
+ " lfads_data = [np.hstack(u) for u in lfads_data]\n",
180
+ " all_data = np.hstack(all_data) \n",
181
+ "\n",
182
+ " # need to PCA high dimension lfads data\n",
183
+ " pca = PCA(n_components=3)\n",
184
+ " pca.fit(all_data.T) \n",
185
+ " \n",
186
+ " \n",
187
+ " # Configure the trace.\n",
188
+ " data = []\n",
189
+ "\n",
190
+ " for i in range(7):\n",
191
+ " emb = pca.transform(lfads_data[i].T)\n",
192
+ " trace = go.Scatter3d(\n",
193
+ " x=emb[:,0], \n",
194
+ " y=emb[:,1], \n",
195
+ " z=emb[:,2], \n",
196
+ " mode='markers',\n",
197
+ " marker={\n",
198
+ " 'size': 1,\n",
199
+ " 'opacity': 1,\n",
200
+ " 'color':'rgb({},{},{})'.format(colors[i,0],colors[i,1],colors[i,2]), # set color to an array/list of desired values\n",
201
+ " },\n",
202
+ " )\n",
203
+ " data.append(trace)\n",
204
+ "\n",
205
+ " # Configure the layout.\n",
206
+ " layout = go.Layout(\n",
207
+ " paper_bgcolor='rgba(0,0,0,0)',\n",
208
+ " plot_bgcolor='rgba(0,0,0,0)',\n",
209
+ " xaxis=dict(showgrid=False,showline=False),\n",
210
+ " yaxis=dict(showgrid=False,showline=False)\n",
211
+ " )\n",
212
+ "\n",
213
+ " plot_figure = go.Figure(data=data, layout=layout)\n",
214
+ " plot_figure.update_scenes(xaxis_visible=False, yaxis_visible=False,zaxis_visible=False )\n",
215
+ " \n",
216
+ " # Render the plot.\n",
217
+ " plot_figure.show()\n",
218
+ " #plotly.offline.iplot(plot_figure)"
219
+ ]
220
+ },
221
+ {
222
+ "cell_type": "markdown",
223
+ "id": "3a6bd5db",
224
+ "metadata": {},
225
+ "source": [
226
+ "# Average distance matrix across sessions "
227
+ ]
228
+ },
229
+ {
230
+ "cell_type": "markdown",
231
+ "id": "92385113",
232
+ "metadata": {},
233
+ "source": [
234
+ "Lets see what the average distance matrix looks like across sessions for MARBLE."
235
+ ]
236
+ },
237
+ {
238
+ "cell_type": "code",
239
+ "execution_count": null,
240
+ "id": "7901f815",
241
+ "metadata": {},
242
+ "outputs": [],
243
+ "source": [
244
+ "# plot average distance matrix based on clustering\n",
245
+ "plt.figure()\n",
246
+ "plt.imshow(np.mean(np.dstack(distance_matrices),2)); plt.colorbar() \n",
247
+ "\n",
248
+ "emb_MDS, _ = geometry.embed(np.mean(np.dstack(distance_matrices),2), embed_typ = 'MDS')\n",
249
+ "plt.figure()\n",
250
+ "plt.scatter(emb_MDS[:,0],emb_MDS[:,1],c=np.linspace(0,6,7))"
251
+ ]
252
+ },
253
+ {
254
+ "cell_type": "markdown",
255
+ "id": "24ee2248",
256
+ "metadata": {},
257
+ "source": [
258
+ "how does this compare with LFADS?"
259
+ ]
260
+ },
261
+ {
262
+ "cell_type": "code",
263
+ "execution_count": null,
264
+ "id": "506ec87d",
265
+ "metadata": {},
266
+ "outputs": [],
267
+ "source": [
268
+ "# we first need to compute distance matrices for lfads \n",
269
+ "\n",
270
+ "distance_matrices_lfads = []\n",
271
+ "\n",
272
+ "# loop over sessions and compute distance matrices\n",
273
+ "for d in range(len(embeddings)):\n",
274
+ " \n",
275
+ " lfads_data = [[] for cond in conditions]\n",
276
+ " for t in kinematic_data[d].keys():\n",
277
+ " for c,cond in enumerate(conditions): \n",
278
+ " if kinematic_data[d][t]['condition'] == cond:\n",
279
+ " meh = kinematic_data[d][t]['lfads_factors']\n",
280
+ " lfads_data[c].append(meh)\n",
281
+ " \n",
282
+ " lfads_data = [np.hstack(u).T for u in lfads_data]\n",
283
+ " \n",
284
+ " distances = np.zeros([len(conditions), len(conditions)])\n",
285
+ " for i in range(len(conditions)):\n",
286
+ " for j in range(len(conditions)):\n",
287
+ " if i == j:\n",
288
+ " distances[i,j] = 0\n",
289
+ " else:\n",
290
+ " distances[i,j] = pairwise_distances(lfads_data[i], lfads_data[j]).mean()\n",
291
+ " \n",
292
+ " distances = distances/np.std(distances)\n",
293
+ " distance_matrices_lfads.append(distances)"
294
+ ]
295
+ },
296
+ {
297
+ "cell_type": "code",
298
+ "execution_count": null,
299
+ "id": "d8afa280",
300
+ "metadata": {},
301
+ "outputs": [],
302
+ "source": [
303
+ "# plot average distance matrix based on clustering\n",
304
+ "plt.figure()\n",
305
+ "plt.imshow(np.mean(np.dstack(distance_matrices_lfads),2))\n",
306
+ "plt.colorbar() \n",
307
+ "\n",
308
+ "emb_MDS, _ = geometry.embed(np.mean(np.dstack(distance_matrices_lfads),2), embed_typ='MDS')\n",
309
+ "plt.figure()\n",
310
+ "plt.scatter(emb_MDS[:,0], emb_MDS[:,1], c=np.linspace(0,6,7))"
311
+ ]
312
+ },
313
+ {
314
+ "cell_type": "markdown",
315
+ "id": "fc8ab5d4",
316
+ "metadata": {},
317
+ "source": [
318
+ "Both are pretty good in terms of their average embeddings!"
319
+ ]
320
+ },
321
+ {
322
+ "cell_type": "markdown",
323
+ "id": "e4bd33cf",
324
+ "metadata": {},
325
+ "source": [
326
+ "# Plotting individual session embeddings\n",
327
+ "\n",
328
+ "Here we just want to plot the distance matrix for individual sessions (Fig S7)."
329
+ ]
330
+ },
331
+ {
332
+ "cell_type": "code",
333
+ "execution_count": null,
334
+ "id": "ad42d324",
335
+ "metadata": {
336
+ "scrolled": true
337
+ },
338
+ "outputs": [],
339
+ "source": [
340
+ "fig, axs = plt.subplots(4,len(examples),figsize=(15,5))\n",
341
+ "\n",
342
+ "# loop over example sessions\n",
343
+ "for i,idx in enumerate(examples):\n",
344
+ " \n",
345
+ " # plot distance matrix for marble\n",
346
+ " axs[0, i].imshow(distance_matrices[idx])\n",
347
+ " \n",
348
+ " # plot distance matrix for LFADS\n",
349
+ " axs[1, i].imshow(distance_matrices_lfads[idx]) \n",
350
+ "\n",
351
+ " # plot MDS embedding of MARBLE distance matrix\n",
352
+ " emb_MDS, _ = geometry.embed(distance_matrices[idx], embed_typ = 'MDS')\n",
353
+ " axs[2, i].scatter(emb_MDS[:,0],emb_MDS[:,1],c=np.linspace(0,6,7))\n",
354
+ " \n",
355
+ " # plot MDS embedding of LFADS distance matrix\n",
356
+ " emb_MDS, _ = geometry.embed(distance_matrices_lfads[idx], embed_typ = 'MDS')\n",
357
+ " axs[3, i].scatter(emb_MDS[:,0],emb_MDS[:,1],c=np.linspace(0,6,7))"
358
+ ]
359
+ },
360
+ {
361
+ "cell_type": "code",
362
+ "execution_count": null,
363
+ "id": "1bf65406-eeef-4c64-9786-1b54993caf45",
364
+ "metadata": {},
365
+ "outputs": [],
366
+ "source": []
367
+ }
368
+ ],
369
+ "metadata": {
370
+ "kernelspec": {
371
+ "display_name": "Python 3 (ipykernel)",
372
+ "language": "python",
373
+ "name": "python3"
374
+ },
375
+ "language_info": {
376
+ "codemirror_mode": {
377
+ "name": "ipython",
378
+ "version": 3
379
+ },
380
+ "file_extension": ".py",
381
+ "mimetype": "text/x-python",
382
+ "name": "python",
383
+ "nbconvert_exporter": "python",
384
+ "pygments_lexer": "ipython3",
385
+ "version": "3.10.14"
386
+ }
387
+ },
388
+ "nbformat": 4,
389
+ "nbformat_minor": 5
390
+ }
data/examples/macaque_reaching/plot_vector_fields.ipynb ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "c9dbe513",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "%load_ext autoreload\n",
11
+ "%autoreload 2\n",
12
+ "%matplotlib widget\n",
13
+ "import numpy as np\n",
14
+ "import matplotlib.pyplot as plt\n",
15
+ "import matplotlib.cm as cm\n",
16
+ "import mat73\n",
17
+ "import pickle\n",
18
+ "\n",
19
+ "import MARBLE\n",
20
+ "from MARBLE import plotting\n",
21
+ "\n",
22
+ "from sklearn.decomposition import PCA\n",
23
+ "\n",
24
+ "import neo\n",
25
+ "from elephant.statistics import instantaneous_rate\n",
26
+ "from elephant.kernels import GaussianKernel\n",
27
+ "from quantities import ms"
28
+ ]
29
+ },
30
+ {
31
+ "cell_type": "code",
32
+ "execution_count": null,
33
+ "id": "5fd6f954",
34
+ "metadata": {},
35
+ "outputs": [],
36
+ "source": [
37
+ "# load data compiled into matlab cell array\n",
38
+ "!wget -nc https://dataverse.harvard.edu/api/access/datafile/6963157 -O data/conditions_spiking_data.mat\n",
39
+ "spiking_data = mat73.loadmat('data/conditions_spiking_data.mat')['all_results']"
40
+ ]
41
+ },
42
+ {
43
+ "cell_type": "code",
44
+ "execution_count": null,
45
+ "id": "c578b742",
46
+ "metadata": {},
47
+ "outputs": [],
48
+ "source": [
49
+ "trial = 1\n",
50
+ "session = 9\n",
51
+ "\n",
52
+ "spikes, colors = [], []\n",
53
+ "for cond in range(7):\n",
54
+ " st = spiking_data[session][cond][trial][0][:,:]\n",
55
+ " spikes += [np.where(st[ch,:])[0] for ch in range(24)]\n",
56
+ " colors += [cm.viridis(cond / 6) for _ in range(24)]\n",
57
+ "\n",
58
+ "_, ax = plt.subplots(figsize=(5,4))\n",
59
+ "ax.eventplot(spikes, color=colors)\n",
60
+ "\n",
61
+ "_, ax = plt.subplots(figsize=(5,4))\n",
62
+ "gk = GaussianKernel(100 * ms) # increase this for smoother signals (previously used auto)\n",
63
+ "\n",
64
+ "for sp in spikes[:24]:\n",
65
+ " st = neo.SpikeTrain(sp, units='ms', t_stop=1200)\n",
66
+ " \n",
67
+ " inst_rate = instantaneous_rate(st, kernel=gk, sampling_period=1 * ms).magnitude\n",
68
+ " ax.plot(inst_rate, 'C0')"
69
+ ]
70
+ },
71
+ {
72
+ "cell_type": "code",
73
+ "execution_count": null,
74
+ "id": "4424eb17",
75
+ "metadata": {},
76
+ "outputs": [],
77
+ "source": [
78
+ "!wget -nc https://dataverse.harvard.edu/api/access/datafile/7062086 -O data/raw_data_session_9_3D.pkl\n",
79
+ "pos, vel, time, _ = pickle.load(open('data/raw_data_session_9_3D.pkl','rb'))"
80
+ ]
81
+ },
82
+ {
83
+ "cell_type": "code",
84
+ "execution_count": null,
85
+ "id": "8d8a958a",
86
+ "metadata": {},
87
+ "outputs": [],
88
+ "source": [
89
+ "fig = plt.figure(figsize=(10,5))\n",
90
+ "n_traj=10\n",
91
+ "from matplotlib.colors import LinearSegmentedColormap\n",
92
+ "from mpl_toolkits.mplot3d.art3d import Line3DCollection\n",
93
+ "\n",
94
+ "for i,cond in enumerate([1,4,6]):\n",
95
+ " ax = fig.add_subplot(int('13{}'.format(i+1)), projection='3d')\n",
96
+ " ax.view_init(elev=10., azim=90)\n",
97
+ " starts = np.where(time[cond]==0)[0]\n",
98
+ " for j in range(n_traj):\n",
99
+ " t = range(starts[j], starts[j+1]-1)\n",
100
+ " p = pos[cond][t]\n",
101
+ " segments = np.stack([p[:-1], p[1:]], axis=1)\n",
102
+ " \n",
103
+ " colors = [(0, 0, 0), cm.viridis(cond/6)] # first color is black, last is red\n",
104
+ " cmap = LinearSegmentedColormap.from_list(\"Custom\", colors, N=len(time[cond][t]))\n",
105
+ " r = cmap(np.linspace(0,1,len(time[cond][t])))\n",
106
+ " \n",
107
+ " ax.add_collection(Line3DCollection(segments,colors=list(r)))\n",
108
+ " ax.set_xlim([min(pos[cond][:,0]), max(pos[cond][:,0])])\n",
109
+ " ax.set_ylim([min(pos[cond][:,1]), max(pos[cond][:,1])])\n",
110
+ " ax.set_zlim([min(pos[cond][:,2]), max(pos[cond][:,2])])\n",
111
+ " \n",
112
+ " ax.scatter(pos[cond][starts[j],0],pos[cond][starts[j],1],pos[cond][starts[j],2],color=colors[0])\n",
113
+ " ax.scatter(pos[cond][starts[j+1]-1,0],pos[cond][starts[j+1]-1,1],pos[cond][starts[j+1]-1,2],color=colors[1])\n",
114
+ " ax.get_xaxis().set_ticks([])\n",
115
+ " ax.get_yaxis().set_ticks([])\n",
116
+ " ax.get_zaxis().set_ticks([])"
117
+ ]
118
+ },
119
+ {
120
+ "cell_type": "code",
121
+ "execution_count": null,
122
+ "id": "e3b0b713",
123
+ "metadata": {},
124
+ "outputs": [],
125
+ "source": [
126
+ "data = MARBLE.construct_dataset(pos, features=vel, graph_type='cknn', k=10, stop_crit=0.05, local_gauges=False)"
127
+ ]
128
+ },
129
+ {
130
+ "cell_type": "code",
131
+ "execution_count": null,
132
+ "id": "fb29a0b4",
133
+ "metadata": {},
134
+ "outputs": [],
135
+ "source": [
136
+ "data_plot = data.to_data_list()\n",
137
+ "for i in [1,4,6]:\n",
138
+ " d = data_plot[i]\n",
139
+ " c = [(0, 0, 0), cm.viridis(i/6)] # first color is black, last is Ci\n",
140
+ " cmap = LinearSegmentedColormap.from_list(\"Custom\", c, N=140)\n",
141
+ " ind = np.linspace(0,1,140)\n",
142
+ " colors = cmap(ind[time[i][d.sample_ind].astype(int)])\n",
143
+ " plotting.fields([d], view=(10,90), figsize=(3,3), scale=2, width=7., color=colors, axes_visible=False)\n",
144
+ " plt.axis('on')"
145
+ ]
146
+ },
147
+ {
148
+ "cell_type": "code",
149
+ "execution_count": null,
150
+ "id": "43d0816b",
151
+ "metadata": {},
152
+ "outputs": [],
153
+ "source": []
154
+ }
155
+ ],
156
+ "metadata": {
157
+ "kernelspec": {
158
+ "display_name": "Python 3 (ipykernel)",
159
+ "language": "python",
160
+ "name": "python3"
161
+ },
162
+ "language_info": {
163
+ "codemirror_mode": {
164
+ "name": "ipython",
165
+ "version": 3
166
+ },
167
+ "file_extension": ".py",
168
+ "mimetype": "text/x-python",
169
+ "name": "python",
170
+ "nbconvert_exporter": "python",
171
+ "pygments_lexer": "ipython3",
172
+ "version": "3.10.14"
173
+ }
174
+ },
175
+ "nbformat": 4,
176
+ "nbformat_minor": 5
177
+ }
data/examples/macaque_reaching/run_cebra.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pickle
2
+ import os
3
+ import sys
4
+ import numpy as np
5
+ from scipy.signal import savgol_filter
6
+ from sklearn.decomposition import PCA
7
+ from sklearn.neighbors import LocalOutlierFactor
8
+ from macaque_reaching_helpers import fit_pca, format_data
9
+ from tqdm import tqdm
10
+ from cebra import CEBRA
11
+
12
+
13
+ def main():
14
+
15
+ """fitting model for each day + pca embedding"""
16
+
17
+ data_file = "data/rate_data_20ms.pkl"
18
+ metadata_file = "data/trial_ids.pkl"
19
+
20
+ rates = pickle.load(open(data_file, "rb"))
21
+ trial_ids = pickle.load(open(metadata_file, "rb"))
22
+
23
+ # defining the set of conditions
24
+ conditions = ["DownLeft", "Left", "UpLeft", "Up", "UpRight", "Right", "DownRight"]
25
+
26
+ # list of days
27
+ days = rates.keys()
28
+
29
+ # define some parameters
30
+ pca_n = 5
31
+ filter_data = True
32
+
33
+ # storing all distance matrices
34
+ embeddings = []
35
+ distance_matrices = []
36
+ times = [] # to store the time point of each node in the trajectory
37
+ all_condition_labels = [] # to store the condition label for each node
38
+ all_trial_ids = [] # trial ids for each node
39
+ all_sampled_ids = [] # to store all the nodes sampled by marble
40
+
41
+ # loop over each day
42
+ for day in tqdm(days):
43
+
44
+ # first stack all trials from that day together and fit pca
45
+ print(day)
46
+ pca = fit_pca(rates, day, conditions, filter_data=filter_data, pca_n=pca_n)
47
+ pos, vel, timepoints, condition_labels, trial_indexes = format_data(rates,
48
+ trial_ids,
49
+ day,
50
+ conditions,
51
+ pca=pca,
52
+ filter_data=filter_data)
53
+
54
+
55
+ cebra_model = CEBRA(model_architecture='offset10-model',
56
+ batch_size=512,
57
+ learning_rate=0.0001,
58
+ temperature=1,
59
+ output_dimension=20,
60
+ max_iterations=5000,
61
+ distance='euclidean',
62
+ conditional='time_delta',
63
+ device='cuda_if_available',
64
+ verbose=True,
65
+ time_offsets=10)
66
+
67
+ pos_all = np.vstack(pos)
68
+ condition_labels = np.hstack(condition_labels)
69
+ cebra_model.fit(pos_all, condition_labels)
70
+ cebra_pos = cebra_model.transform(pos_all)
71
+
72
+ cebra_model.save("data/session_{}_20ms.pt".format(day))
73
+
74
+ embeddings.append(cebra_pos)
75
+ distance_matrices.append([])
76
+ times.append(np.hstack(timepoints))
77
+ all_condition_labels.append(np.hstack(condition_labels))
78
+ all_trial_ids.append(np.hstack(trial_indexes))
79
+ all_sampled_ids.append([])
80
+
81
+ # save over after each session (incase computations crash)
82
+ with open("data/cebra_embeddings_20ms_out20.pkl", "wb") as handle:
83
+ pickle.dump(
84
+ [
85
+ distance_matrices,
86
+ embeddings,
87
+ times,
88
+ all_condition_labels,
89
+ all_trial_ids,
90
+ all_sampled_ids,
91
+ ],
92
+ handle,
93
+ protocol=pickle.HIGHEST_PROTOCOL,
94
+ )
95
+
96
+ # final save
97
+ with open("data/cebra_embeddings_20ms_out20.pkl", "wb") as handle:
98
+ pickle.dump(
99
+ [
100
+ distance_matrices,
101
+ embeddings,
102
+ times,
103
+ all_condition_labels,
104
+ all_trial_ids,
105
+ all_sampled_ids,
106
+ ],
107
+ handle,
108
+ protocol=pickle.HIGHEST_PROTOCOL,
109
+ )
110
+
111
+ if __name__ == "__main__":
112
+ sys.exit(main())