ToughStone commited on
Commit
bbcee8d
·
verified ·
1 Parent(s): fc385a9

Upload 14 files

Browse files
feature/V1_C3D.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e98a5cd9b1575ed631e26c1c51accbc8ef70b2606608f851cbc9dd8e60590ae
3
+ size 65538224
feature/V1_resnet_avg.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec2421d0c3c6f3bff0743dd89513fc3e173c24a66ad75d4d3b94d9ce9ce6db25
3
+ size 32770224
feature/V2_C3D.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad577dd1252470a5dd3bdc14f412d25506581cfaf52af71311e97970c712a2df
3
+ size 65538224
feature/V2_resnet_avg.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d93ad8846b84a42a436677d4aeecededa1b036c26430091ae6fc86ddb655752b
3
+ size 32770224
feature/V3_C3D.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:647c720520d37c0e65ae937c0079b3fd27c00a4795847867be001d530759e8f1
3
+ size 65538224
feature/V3_resnet_avg.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd61f91f7b13b00a4ccfeaacb27d2105c41c9e4f554c41827a3500cb3280b3ea
3
+ size 32770224
feature/V4_C3D.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d65cba6253635ec5926400260af3741c4e2eb3778ef05838579787147d89e2e
3
+ size 65538224
feature/V4_resnet_avg.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:416d9cbbdcf9b236b977f72075496fe571659d53f67b46d14cff9efaff8dae2b
3
+ size 32770224
segment/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .cpd_auto import cpd_auto
segment/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (174 Bytes). View file
 
segment/__pycache__/cpd_auto.cpython-310.pyc ADDED
Binary file (2.92 kB). View file
 
segment/__pycache__/cpd_nonlin.cpython-310.pyc ADDED
Binary file (2.2 kB). View file
 
segment/cpd_auto.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from .cpd_nonlin import cpd_nonlin
3
+
4
+ def cpd_auto(K, ncp, vmax, desc_rate=1, **kwargs):
5
+ """Main interface
6
+
7
+ Detect change points automatically selecting their number
8
+ K - kernel between each pair of frames in video
9
+ ncp - maximum ncp
10
+ vmax - special parameter
11
+ Optional arguments:
12
+ lmin - minimum segment length
13
+ lmax - maximum segment length
14
+ desc_rate - rate of descriptor sampling (vmax always corresponds to 1x)
15
+
16
+ Note:
17
+ - cps are always calculated in subsampled coordinates irrespective to
18
+ desc_rate
19
+ - lmin and m should be in agreement
20
+ ---
21
+ Returns: (cps, costs)
22
+ cps - best selected change-points
23
+ costs - costs for 0,1,2,...,m change-points
24
+
25
+ Memory requirement: ~ (3*N*N + N*ncp)*4 bytes ~= 16 * N^2 bytes
26
+ That is 1,6 Gb for the N=10000.
27
+ """
28
+ m = ncp
29
+ (_, scores) = cpd_nonlin(K, m, backtrack=False, **kwargs)
30
+ # print("scores ",scores)
31
+
32
+ N = K.shape[0]
33
+ N2 = N*desc_rate # length of the video before subsampling
34
+
35
+ penalties = np.zeros(m+1)
36
+ # Prevent division by zero (in case of 0 changes)
37
+ ncp = np.arange(1, m+1)
38
+ penalties[1:] = (vmax*ncp/(2.0*N2))*(np.log(float(N2)/ncp)+1)
39
+
40
+ costs = scores/float(N) + penalties
41
+ m_best = np.argmin(costs)
42
+ # print("cost ",costs)
43
+ # print("m_best ",m_best)
44
+ (cps, scores2) = cpd_nonlin(K, m_best, **kwargs)
45
+
46
+ return (cps, costs)
47
+
48
+
49
+ # ------------------------------------------------------------------------------
50
+ # Extra functions (currently not used)
51
+
52
+ def estimate_vmax(K_stable):
53
+ """K_stable - kernel between all frames of a stable segment"""
54
+ n = K_stable.shape[0]
55
+ vmax = np.trace(centering(K_stable)/n)
56
+ return vmax
57
+
58
+
59
+ def centering(K):
60
+ """Apply kernel centering"""
61
+ mean_rows = np.mean(K, 1)[:, np.newaxis]
62
+ return K - mean_rows - mean_rows.T + np.mean(mean_rows)
63
+
64
+
65
+ def eval_score(K, cps):
66
+ """ Evaluate unnormalized empirical score
67
+ (sum of kernelized scatters) for the given change-points """
68
+ N = K.shape[0]
69
+ cps = [0] + list(cps) + [N]
70
+ V1 = 0
71
+ V2 = 0
72
+ for i in range(len(cps)-1):
73
+ K_sub = K[cps[i]:cps[i+1], :][:, cps[i]:cps[i+1]]
74
+ V1 += np.sum(np.diag(K_sub))
75
+ V2 += np.sum(K_sub) / float(cps[i+1] - cps[i])
76
+ return (V1 - V2)
77
+
78
+
79
+ def eval_cost(K, cps, score, vmax):
80
+ """ Evaluate cost function for automatic number of change points selection
81
+ K - kernel between all frames
82
+ cps - selected change-points
83
+ score - unnormalized empirical score (sum of kernelized scatters)
84
+ vmax - vmax parameter"""
85
+
86
+ N = K.shape[0]
87
+ penalty = (vmax*len(cps)/(2.0*N))*(np.log(float(N)/len(cps))+1)
88
+ return score/float(N) + penalty
89
+
segment/cpd_nonlin.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ # from scipy import weave
3
+
4
+ def calc_scatters(K):
5
+ n = K.shape[0]
6
+ K1 = np.cumsum([0] + list(np.diag(K)))
7
+ K2 = np.zeros((n+1, n+1))
8
+ K2[1:, 1:] = np.cumsum(np.cumsum(K, 0), 1) # TODO: use the fact that K - symmetric
9
+
10
+ scatters = np.zeros((n, n))
11
+
12
+ # code = r"""
13
+ # for (int i = 0; i < n; i++) {
14
+ # for (int j = i; j < n; j++) {
15
+ # scatters(i,j) = K1(j+1)-K1(i) - (K2(j+1,j+1)+K2(i,i)-K2(j+1,i)-K2(i,j+1))/(j-i+1);
16
+ # }
17
+ # }
18
+ # """
19
+ # weave.inline(code, ['K1','K2','scatters','n'], global_dict = \
20
+ # {'K1':K1, 'K2':K2, 'scatters':scatters, 'n':n}, type_converters=weave.converters.blitz)
21
+
22
+ for i in range(n):
23
+ for j in range(i, n):
24
+ scatters[i,j] = K1[j+1] - K1[i] - (K2[j+1,j+1]+K2[i,i]-K2[j+1,i]-K2[i,j+1])/(j-i+1)
25
+ return scatters
26
+
27
+ def cpd_nonlin(K, ncp, lmin=1, lmax=100000, backtrack=True, verbose=True,
28
+ out_scatters=None):
29
+ """ Change point detection with dynamic programming
30
+ K - square kernel matrix
31
+ ncp - number of change points to detect (ncp >= 0)
32
+ lmin - minimal length of a segment
33
+ lmax - maximal length of a segment
34
+ backtrack - when False - only evaluate objective scores (to save memory)
35
+
36
+ Returns: (cps, obj)
37
+ cps - detected array of change points: mean is thought to be constant on [ cps[i], cps[i+1] )
38
+ obj_vals - values of the objective function for 0..m changepoints
39
+
40
+ """
41
+ m = int(ncp) # prevent numpy.int64
42
+
43
+ (n, n1) = K.shape
44
+ assert(n == n1), "Kernel matrix awaited."
45
+
46
+ assert(n >= (m + 1)*lmin)
47
+ assert(n <= (m + 1)*lmax)
48
+ assert(lmax >= lmin >= 1)
49
+
50
+ if verbose:
51
+ #print "n =", n
52
+ print("Precomputing scatters...")
53
+ J = calc_scatters(K)
54
+
55
+ if out_scatters != None:
56
+ out_scatters[0] = J
57
+
58
+ if verbose:
59
+ print("Inferring best change points...")
60
+ I = 1e101*np.ones((m+1, n+1))
61
+ I[0, lmin:lmax] = J[0, lmin-1:lmax-1]
62
+
63
+ if backtrack:
64
+ p = np.zeros((m+1, n+1), dtype=int)
65
+ else:
66
+ p = np.zeros((1,1), dtype=int)
67
+
68
+ # code = r"""
69
+ # #define max(x,y) ((x)>(y)?(x):(y))
70
+ # for (int k=1; k<m+1; k++) {
71
+ # for (int l=(k+1)*lmin; l<n+1; l++) {
72
+ # I(k, l) = 1e100; //nearly infinity
73
+ # for (int t=max(k*lmin,l-lmax); t<l-lmin+1; t++) {
74
+ # double c = I(k-1, t) + J(t, l-1);
75
+ # if (c < I(k, l)) {
76
+ # I(k, l) = c;
77
+ # if (backtrack == 1) {
78
+ # p(k, l) = t;
79
+ # }
80
+ # }
81
+ # }
82
+ # }
83
+ # }
84
+ # """
85
+
86
+ # weave.inline(code, ['m','n','p','I', 'J', 'lmin', 'lmax', 'backtrack'], \
87
+ # global_dict={'m':m, 'n':n, 'p':p, 'I':I, 'J':J, \
88
+ # 'lmin':lmin, 'lmax':lmax, 'backtrack': int(1) if backtrack else int(0)},
89
+ # type_converters=weave.converters.blitz)
90
+
91
+ for k in range(1, m+1):
92
+ for l in range((k+1)*lmin, n+1):
93
+ I[k, l] = 1e100
94
+ for t in range(max(k*lmin,l-lmax), l-lmin+1):
95
+ c = I[k-1, t] + J[t, l-1]
96
+ if (c < I[k, l]):
97
+ I[k, l] = c
98
+ if (backtrack == 1):
99
+ p[k, l] = t
100
+
101
+
102
+ # Collect change points
103
+ cps = np.zeros(m, dtype=int)
104
+
105
+ if backtrack:
106
+ cur = n
107
+ for k in range(m, 0, -1):
108
+ cps[k-1] = p[k, cur]
109
+ cur = cps[k-1]
110
+
111
+ scores = I[:, n].copy()
112
+ scores[scores > 1e99] = np.inf
113
+ return cps, scores
114
+
115
+