repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
ChromaStarPy
|
ChromaStarPy-master/HjertingComponents.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 29 13:21:01 2017
@author: Ian
"""
def hjertingComponents():
"""//Hjerting function components (expansion coefficients in Voigt fn "a" parameter):
// Observation and Analysis of Stellar Photospehres -, 3rd. Ed., Tab 11.5, p. 256
// David F. Gray"""
#//Note: "u" is the Voigt fn "v" parameter
numV = 81
#//Row 0 containt abscissae (Voigt fn "v" parameter)
hjertComp = [ [ 0.0 for i in range(numV) ] for j in range(6) ]
#//u H_0(u) H_1(u) H_2(u) H_3(u) H_4(u)
hjertComp[0][0] = 0.0; hjertComp[1][0] = 1.000000 ; hjertComp[2][0] = -1.12838 ; hjertComp[3][0] = 1.0000 ; hjertComp[4][0] = -0.752 ; hjertComp[5][0] = 0.50;
hjertComp[0][1] = 0.1; hjertComp[1][1] = 0.990050 ; hjertComp[2][1] = -1.10596 ; hjertComp[3][1] = 0.9702 ; hjertComp[4][1] = -0.722 ; hjertComp[5][1] = 0.48;
hjertComp[0][2] = 0.2; hjertComp[1][2] = 0.960789 ; hjertComp[2][2] = -1.04048 ; hjertComp[3][2] = 0.8839 ; hjertComp[4][2] = -0.637 ; hjertComp[5][2] = 0.40;
hjertComp[0][3] = 0.3; hjertComp[1][3] = 0.913931 ; hjertComp[2][3] = -0.93703 ; hjertComp[3][3] = 0.7494 ; hjertComp[4][3] = -0.505 ; hjertComp[5][3] = 0.30;
hjertComp[0][4] = 0.4; hjertComp[1][4] = 0.852144 ; hjertComp[2][4] = -0.80346 ; hjertComp[3][4] = 0.5795 ; hjertComp[4][4] = -0.342 ; hjertComp[5][4] = 0.17;
hjertComp[0][5] = 0.5; hjertComp[1][5] = 0.778801 ; hjertComp[2][5] = -0.64945 ; hjertComp[3][5] = 0.3894 ; hjertComp[4][5] = -0.165 ; hjertComp[5][5] = 0.03;
hjertComp[0][6] = 0.6; hjertComp[1][6] = 0.697676 ; hjertComp[2][6] = -0.48582 ; hjertComp[3][6] = 0.1953 ; hjertComp[4][6] = 0.007 ; hjertComp[5][6] = -0.09;
hjertComp[0][7] = 0.7; hjertComp[1][7] = 0.612626 ; hjertComp[2][7] = -0.32192 ; hjertComp[3][7] = 0.0123 ; hjertComp[4][7] = 0.159 ; hjertComp[5][7] = -0.20;
hjertComp[0][8] = 0.8; hjertComp[1][8] = 0.527292 ; hjertComp[2][8] = -0.16772 ; hjertComp[3][8] = -0.1476 ; hjertComp[4][8] = 0.280 ; hjertComp[5][8] = -0.27;
hjertComp[0][9] = 0.9; hjertComp[1][9] = 0.444858 ; hjertComp[2][9] = -0.03012 ; hjertComp[3][9] = -0.2758 ; hjertComp[4][9] = 0.362 ; hjertComp[5][9] = -0.30;
hjertComp[0][10] = 1.0; hjertComp[1][10] = 0.367879 ; hjertComp[2][10] = 0.08594 ; hjertComp[3][10] = -0.3679 ; hjertComp[4][10] = 0.405 ; hjertComp[5][10] = -0.31;
hjertComp[0][11] = 1.1; hjertComp[1][11] = 0.298197 ; hjertComp[2][11] = 0.17789 ; hjertComp[3][11] = -0.4234 ; hjertComp[4][11] = 0.411 ; hjertComp[5][11] = -0.28;
hjertComp[0][12] = 1.2; hjertComp[1][12] = 0.236928 ; hjertComp[2][12] = 0.24537 ; hjertComp[3][12] = -0.4454 ; hjertComp[4][12] = 0.386 ; hjertComp[5][12] = -0.24;
hjertComp[0][13] = 1.3; hjertComp[1][13] = 0.184520 ; hjertComp[2][13] = 0.28981 ; hjertComp[3][13] = -0.4392 ; hjertComp[4][13] = 0.339 ; hjertComp[5][13] = -0.18;
hjertComp[0][14] = 1.4; hjertComp[1][14] = 0.140858 ; hjertComp[2][14] = 0.31394 ; hjertComp[3][14] = -0.4113 ; hjertComp[4][14] = 0.280 ; hjertComp[5][14] = -0.12;
hjertComp[0][15] = 1.5; hjertComp[1][15] = 0.105399 ; hjertComp[2][15] = 0.32130 ; hjertComp[3][15] = -0.3689 ; hjertComp[4][15] = 0.215 ; hjertComp[5][15] = -0.07;
hjertComp[0][16] = 1.6; hjertComp[1][16] = 0.077305 ; hjertComp[2][16] = 0.31573 ; hjertComp[3][16] = -0.3185 ; hjertComp[4][16] = 0.153 ; hjertComp[5][16] = -0.02;
hjertComp[0][17] = 1.7; hjertComp[1][17] = 0.055576 ; hjertComp[2][17] = 0.30094 ; hjertComp[3][17] = -0.2657 ; hjertComp[4][17] = 0.097 ; hjertComp[5][17] = 0.02;
hjertComp[0][18] = 1.8; hjertComp[1][18] = 0.039164 ; hjertComp[2][18] = 0.28027 ; hjertComp[3][18] = -0.2146 ; hjertComp[4][18] = 0.051 ; hjertComp[5][18] = 0.04;
hjertComp[0][19] = 1.9; hjertComp[1][19] = 0.027052 ; hjertComp[2][19] = 0.25648 ; hjertComp[3][19] = -0.1683 ; hjertComp[4][19] = 0.015 ; hjertComp[5][19] = 0.05;
hjertComp[0][20] = 2.0; hjertComp[1][20] = 0.0183156; hjertComp[2][20] = 0.231726; hjertComp[3][20] = -0.12821; hjertComp[4][20] = -0.0101; hjertComp[5][20] = 0.058;
hjertComp[0][21] = 2.1; hjertComp[1][21] = 0.0121552; hjertComp[2][21] = 0.207528; hjertComp[3][21] = -0.09505; hjertComp[4][21] = -0.0265; hjertComp[5][21] = 0.056;
hjertComp[0][22] = 2.2; hjertComp[1][22] = 0.0079071; hjertComp[2][22] = 0.184882; hjertComp[3][22] = -0.06863; hjertComp[4][22] = -0.0355; hjertComp[5][22] = 0.051;
hjertComp[0][23] = 2.3; hjertComp[1][23] = 0.0050418; hjertComp[2][23] = 0.164341; hjertComp[3][23] = -0.04830; hjertComp[4][23] = -0.0391; hjertComp[5][23] = 0.043;
hjertComp[0][24] = 2.4; hjertComp[1][24] = 0.0031511; hjertComp[2][24] = 0.146128; hjertComp[3][24] = -0.03315; hjertComp[4][24] = -0.0389; hjertComp[5][24] = 0.035;
hjertComp[0][25] = 2.5; hjertComp[1][25] = 0.0019305; hjertComp[2][25] = 0.130236; hjertComp[3][25] = -0.02220; hjertComp[4][25] = -0.0363; hjertComp[5][25] = 0.027;
hjertComp[0][26] = 2.6; hjertComp[1][26] = 0.0011592; hjertComp[2][26] = 0.116515; hjertComp[3][26] = -0.01451; hjertComp[4][26] = -0.0325; hjertComp[5][26] = 0.020;
hjertComp[0][27] = 2.7; hjertComp[1][27] = 0.0006823; hjertComp[2][27] = 0.104739; hjertComp[3][27] = -0.00927; hjertComp[4][27] = -0.0282; hjertComp[5][27] = 0.015;
hjertComp[0][28] = 2.8; hjertComp[1][28] = 0.0003937; hjertComp[2][28] = 0.094653; hjertComp[3][28] = -0.00578; hjertComp[4][28] = -0.0239; hjertComp[5][28] = 0.010;
hjertComp[0][29] = 2.9; hjertComp[1][29] = 0.0002226; hjertComp[2][29] = 0.086005; hjertComp[3][29] = -0.00352; hjertComp[4][29] = -0.0201; hjertComp[5][29] = 0.007;
hjertComp[0][30] = 3.0; hjertComp[1][30] = 0.0001234; hjertComp[2][30] = 0.078565; hjertComp[3][30] = -0.00210; hjertComp[4][30] = -0.0167; hjertComp[5][30] = 0.005;
hjertComp[0][31] = 3.1; hjertComp[1][31] = 0.0000671; hjertComp[2][31] = 0.072129; hjertComp[3][31] = -0.00122; hjertComp[4][31] = -0.0138; hjertComp[5][31] = 0.003;
hjertComp[0][32] = 3.2; hjertComp[1][32] = 0.0000357; hjertComp[2][32] = 0.066526; hjertComp[3][32] = -0.00070; hjertComp[4][32] = -0.0115; hjertComp[5][32] = 0.002;
hjertComp[0][33] = 3.3; hjertComp[1][33] = 0.0000186; hjertComp[2][33] = 0.061615; hjertComp[3][33] = -0.00039; hjertComp[4][33] = -0.0096; hjertComp[5][33] = 0.001;
hjertComp[0][34] = 3.4; hjertComp[1][34] = 0.0000095; hjertComp[2][34] = 0.057281; hjertComp[3][34] = -0.00021; hjertComp[4][34] = -0.0080; hjertComp[5][34] = 0.001;
hjertComp[0][35] = 3.5; hjertComp[1][35] = 0.0000048; hjertComp[2][35] = 0.053430; hjertComp[3][35] = -0.00011; hjertComp[4][35] = -0.0068; hjertComp[5][35] = 0.000;
hjertComp[0][36] = 3.6; hjertComp[1][36] = 0.0000024; hjertComp[2][36] = 0.049988; hjertComp[3][36] = -0.00006; hjertComp[4][36] = -0.0058; hjertComp[5][36] = 0.000;
hjertComp[0][37] = 3.7; hjertComp[1][37] = 0.0000011; hjertComp[2][37] = 0.046894; hjertComp[3][37] = -0.00003; hjertComp[4][37] = -0.0050; hjertComp[5][37] = 0.000;
hjertComp[0][38] = 3.8; hjertComp[1][38] = 0.0000005; hjertComp[2][38] = 0.044098; hjertComp[3][38] = -0.00001; hjertComp[4][38] = -0.0043; hjertComp[5][38] = 0.000;
hjertComp[0][39] = 3.9; hjertComp[1][39] = 0.0000002; hjertComp[2][39] = 0.041561; hjertComp[3][39] = -0.00001; hjertComp[4][39] = -0.0037; hjertComp[5][39] = 0.000;
hjertComp[0][40] = 4.0; hjertComp[1][40] = 0.0000000; hjertComp[2][40] = 0.039250; hjertComp[3][40] = 0.00000; hjertComp[4][40] = -0.00329; hjertComp[5][40] = 0.000;
hjertComp[0][41] = 4.2; hjertComp[1][41] = 0.0000000; hjertComp[2][41] = 0.035195; hjertComp[3][41] = 0.00000; hjertComp[4][41] = -0.00257; hjertComp[5][41] = 0.000;
hjertComp[0][42] = 4.4; hjertComp[1][42] = 0.0000000; hjertComp[2][42] = 0.031762; hjertComp[3][42] = 0.00000; hjertComp[4][42] = -0.00205; hjertComp[5][42] = 0.000;
hjertComp[0][43] = 4.6; hjertComp[1][43] = 0.0000000; hjertComp[2][43] = 0.028824; hjertComp[3][43] = 0.00000; hjertComp[4][43] = -0.00166; hjertComp[5][43] = 0.000;
hjertComp[0][44] = 4.8; hjertComp[1][44] = 0.0000000; hjertComp[2][44] = 0.026288; hjertComp[3][44] = 0.00000; hjertComp[4][44] = -0.00137; hjertComp[5][44] = 0.000;
hjertComp[0][45] = 5.0; hjertComp[1][45] = 0.0000000; hjertComp[2][45] = 0.024081; hjertComp[3][45] = 0.00000; hjertComp[4][45] = -0.00113; hjertComp[5][45] = 0.000;
hjertComp[0][46] = 5.2; hjertComp[1][46] = 0.0000000; hjertComp[2][46] = 0.022146; hjertComp[3][46] = 0.00000; hjertComp[4][46] = -0.00095; hjertComp[5][46] = 0.000;
hjertComp[0][47] = 5.4; hjertComp[1][47] = 0.0000000; hjertComp[2][47] = 0.020441; hjertComp[3][47] = 0.00000; hjertComp[4][47] = -0.00080; hjertComp[5][47] = 0.000;
hjertComp[0][48] = 5.6; hjertComp[1][48] = 0.0000000; hjertComp[2][48] = 0.018929; hjertComp[3][48] = 0.00000; hjertComp[4][48] = -0.00068; hjertComp[5][48] = 0.000;
hjertComp[0][49] = 5.8; hjertComp[1][49] = 0.0000000; hjertComp[2][49] = 0.017582; hjertComp[3][49] = 0.00000; hjertComp[4][49] = -0.00059; hjertComp[5][49] = 0.000;
hjertComp[0][50] = 6.0; hjertComp[1][50] = 0.0000000; hjertComp[2][50] = 0.016375; hjertComp[3][50] = 0.00000; hjertComp[4][50] = -0.00051; hjertComp[5][50] = 0.000;
hjertComp[0][51] = 6.2; hjertComp[1][51] = 0.0000000; hjertComp[2][51] = 0.015291; hjertComp[3][51] = 0.00000; hjertComp[4][51] = -0.00044; hjertComp[5][51] = 0.000;
hjertComp[0][52] = 6.4; hjertComp[1][52] = 0.0000000; hjertComp[2][52] = 0.014312; hjertComp[3][52] = 0.00000; hjertComp[4][52] = -0.00038; hjertComp[5][52] = 0.000;
hjertComp[0][53] = 6.6; hjertComp[1][53] = 0.0000000; hjertComp[2][53] = 0.013426; hjertComp[3][53] = 0.00000; hjertComp[4][53] = -0.00034; hjertComp[5][53] = 0.000;
hjertComp[0][54] = 6.8; hjertComp[1][54] = 0.0000000; hjertComp[2][54] = 0.012620; hjertComp[3][54] = 0.00000; hjertComp[4][54] = -0.00030; hjertComp[5][54] = 0.000;
hjertComp[0][55] = 7.0; hjertComp[1][55] = 0.0000000; hjertComp[2][55] = 0.0118860; hjertComp[3][55] = 0.00000; hjertComp[4][55] = -0.00026; hjertComp[5][55] = 0.000;
hjertComp[0][56] = 7.2; hjertComp[1][56] = 0.0000000; hjertComp[2][56] = 0.0112145; hjertComp[3][56] = 0.00000; hjertComp[4][56] = -0.00023; hjertComp[5][56] = 0.000;
hjertComp[0][57] = 7.4; hjertComp[1][57] = 0.0000000; hjertComp[2][57] = 0.0105990; hjertComp[3][57] = 0.00000; hjertComp[4][57] = -0.00021; hjertComp[5][57] = 0.000;
hjertComp[0][58] = 7.6; hjertComp[1][58] = 0.0000000; hjertComp[2][58] = 0.0100332; hjertComp[3][58] = 0.00000; hjertComp[4][58] = -0.00019; hjertComp[5][58] = 0.000;
hjertComp[0][59] = 7.8; hjertComp[1][59] = 0.0000000; hjertComp[2][59] = 0.0095119; hjertComp[3][59] = 0.00000; hjertComp[4][59] = -0.00017; hjertComp[5][59] = 0.000;
hjertComp[0][60] = 8.0; hjertComp[1][60] = 0.0000000; hjertComp[2][60] = 0.0090306; hjertComp[3][60] = 0.00000; hjertComp[4][60] = -0.00015; hjertComp[5][60] = 0.000;
hjertComp[0][61] = 8.2; hjertComp[1][61] = 0.0000000; hjertComp[2][61] = 0.0085852; hjertComp[3][61] = 0.00000; hjertComp[4][61] = -0.00013; hjertComp[5][61] = 0.000;
hjertComp[0][62] = 8.4; hjertComp[1][62] = 0.0000000; hjertComp[2][62] = 0.0081722; hjertComp[3][62] = 0.00000; hjertComp[4][62] = -0.00012; hjertComp[5][62] = 0.000;
hjertComp[0][63] = 8.6; hjertComp[1][63] = 0.0000000; hjertComp[2][63] = 0.0077885; hjertComp[3][63] = 0.00000; hjertComp[4][63] = -0.00011; hjertComp[5][63] = 0.000;
hjertComp[0][64] = 8.8; hjertComp[1][64] = 0.0000000; hjertComp[2][64] = 0.0074314; hjertComp[3][64] = 0.00000; hjertComp[4][64] = -0.00010; hjertComp[5][64] = 0.000;
hjertComp[0][65] = 9.0; hjertComp[1][65] = 0.0000000; hjertComp[2][65] = 0.0070985; hjertComp[3][65] = 0.00000; hjertComp[4][65] = -0.00009; hjertComp[5][65] = 0.000;
hjertComp[0][66] = 9.2; hjertComp[1][66] = 0.0000000; hjertComp[2][66] = 0.0067875; hjertComp[3][66] = 0.00000; hjertComp[4][66] = -0.00008; hjertComp[5][66] = 0.000;
hjertComp[0][67] = 9.4; hjertComp[1][67] = 0.0000000; hjertComp[2][67] = 0.0064967; hjertComp[3][67] = 0.00000; hjertComp[4][67] = -0.00008; hjertComp[5][67] = 0.000;
hjertComp[0][68] = 9.6; hjertComp[1][68] = 0.0000000; hjertComp[2][68] = 0.0062243; hjertComp[3][68] = 0.00000; hjertComp[4][68] = -0.00007; hjertComp[5][68] = 0.000;
hjertComp[0][69] = 9.8; hjertComp[1][69] = 0.0000000; hjertComp[2][69] = 0.0059688; hjertComp[3][69] = 0.00000; hjertComp[4][69] = -0.00007; hjertComp[5][69] = 0.000;
hjertComp[0][70] = 10.0; hjertComp[1][70] = 0.000000 ; hjertComp[2][70] = 0.0057287; hjertComp[3][70] = 0.00000; hjertComp[4][70] = -0.00006; hjertComp[5][70] = 0.000;
hjertComp[0][71] = 10.2; hjertComp[1][71] = 0.000000 ; hjertComp[2][71] = 0.0055030; hjertComp[3][71] = 0.00000; hjertComp[4][71] = -0.00006; hjertComp[5][71] = 0.000;
hjertComp[0][72] = 10.4; hjertComp[1][72] = 0.000000 ; hjertComp[2][72] = 0.0052903; hjertComp[3][72] = 0.00000; hjertComp[4][72] = -0.00005; hjertComp[5][72] = 0.000;
hjertComp[0][73] = 10.6; hjertComp[1][73] = 0.000000 ; hjertComp[2][73] = 0.0050898; hjertComp[3][73] = 0.00000; hjertComp[4][73] = -0.00005; hjertComp[5][73] = 0.000;
hjertComp[0][74] = 10.8; hjertComp[1][74] = 0.000000 ; hjertComp[2][74] = 0.0049006; hjertComp[3][74] = 0.00000; hjertComp[4][74] = -0.00004; hjertComp[5][74] = 0.000;
hjertComp[0][75] = 11.0; hjertComp[1][75] = 0.000000 ; hjertComp[2][75] = 0.0047217; hjertComp[3][75] = 0.00000; hjertComp[4][75] = -0.00004; hjertComp[5][75] = 0.000;
hjertComp[0][76] = 11.2; hjertComp[1][76] = 0.000000 ; hjertComp[2][76] = 0.0045526; hjertComp[3][76] = 0.00000; hjertComp[4][76] = -0.00004; hjertComp[5][76] = 0.000;
hjertComp[0][77] = 11.4; hjertComp[1][77] = 0.000000 ; hjertComp[2][77] = 0.0043924; hjertComp[3][77] = 0.00000; hjertComp[4][77] = -0.00003; hjertComp[5][77] = 0.000;
hjertComp[0][78] = 11.6; hjertComp[1][78] = 0.000000 ; hjertComp[2][78] = 0.0042405; hjertComp[3][78] = 0.00000; hjertComp[4][78] = -0.00003; hjertComp[5][78] = 0.000;
hjertComp[0][79] = 11.8; hjertComp[1][79] = 0.000000 ; hjertComp[2][79] = 0.0040964; hjertComp[3][79] = 0.00000; hjertComp[4][79] = -0.00003; hjertComp[5][79] = 0.000;
hjertComp[0][80] = 12.0; hjertComp[1][80] = 0.000000 ; hjertComp[2][80] = 0.0039595; hjertComp[3][80] = 0.00000; hjertComp[4][80] = -0.00003; hjertComp[5][80] = 0.000;
return hjertComp
| 14,931
| 131.141593
| 177
|
py
|
ChromaStarPy
|
ChromaStarPy-master/LineProf.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 29 14:26:42 2017
@author: Ian
"""
import math
import Useful
import ToolBox
"""/**
* Line profile, phi_lambda(lambda): Assume Voigt function profile - need H(a,v)
* Assumes CRD, LTE, ??? Input parameters: lam0 - line center wavelength in nm
* mass - mass of absorbing particle (amu) logGammaCol - log_10(gamma) - base 10
* logarithmic collisional (pressure) damping co-efficient (s^-1) epsilon -
* convective microturbulence- non-thermal broadening parameter (km/s) Also
* needs atmospheric structure information: numDeps WON'T WORK - need observer's
* frame fixed lambda at all depths: temp structure for depth-dependent thermal
* line broadening Teff as typical temp instead of above pressure structure,
* pGas, if scaling gamma
*/"""
def delta(linePoints, lam0In, numDeps, tauRos, massIn, xiTIn, teff):
"""//delta function line profile for initiali check of line strength"""
lam0 = lam0In #// * 1.0E-7; //nm to cm
logLam0 = math.log(lam0)
logE = math.log10(math.e) #// for debug output
#//System.out.println("LineProf: doppler, logDopp: " + doppler + " " + logE*logDopp);
#//Put input parameters into linear cgs units:
#//System.out.println("LINEGRID: Tau1: " + tau1);
#//logA = 2.0 * logLam0 + logGamma - ln4pi - logC - logDopp;
#//a = Math.exp(logA);
#//System.out.println("LINEGRID: logA: " + logE * logA);
#//Set up a half-profile Delta_lambda grid in Doppler width units
#//from line centre to wing
numPoints = 1
#//System.out.println("LineProf: numPoints: " + numPoints);
#// Return a 2D numPoints X numDeps array of normalized line profile points (phi)
lineProf = [ [ 0.0 for i in range(numDeps) ] for j in range(1) ]
c = Useful.c()
logC = Useful.logC()
logK = Useful.logK()
amu = Useful.amu()
ln10 = math.log(10.0)
ln2 = math.log(2.0)
lnSqRtPi = 0.5 * math.log(math.pi)
logTeff = math.log(teff)
xiT = xiTIn * 1.0E5 #//km/s to cm/s
logMass = math.log(massIn * amu) #//amu to g
#// Compute depth-independent Doppler width, Delta_lambda_D:
#double doppler, logDopp;
#double logHelp, help; //scratch
logHelp = ln2 + logK + logTeff - logMass #// M-B dist, square of v_mode
help = math.exp(logHelp) + xiT * xiT #// quadratic sum of thermal v and turbulent v
logHelp = 0.5 * math.log(help)
logDopp = logHelp + logLam0 - logC
doppler = math.exp(logDopp) #// cm
#// Line profile points in Doppler widths - needed for Voigt function, H(a,v):
#double ii;
#// lineProf[0][0] = 0.0; v[0] = 0.0; //Line centre - cannot do logaritmically!
#double delta, core, logDelta;
#//int il0 = 36;
#//System.out.println("il0 " + il0 + " temp[il] " + temp[0][il0] + " press[il] " + logE*press[1][il0]);
for id in range(numDeps):
#//if (il <= numCore) {
#// - Gaussian ONLY - at line centre Lorentzian will diverge!
delta = 1.0
#//System.out.println("LINEGRID- CORE: core: " + core);
#//System.out.println("LINEGRID: il, v[il]: " + il + " " + v[il] + " lineProf[0][il]: " + lineProf[0][il]);
#//System.out.println("LINEGRID: il, Voigt, H(): " + il + " " + voigt);
#//Convert from H(a,v) in dimensionless Voigt units to physical phi((Delta lambda) profile:
logDelta = math.log(delta) + 2.0 * logLam0 - lnSqRtPi - logDopp - logC
lineProf[0][id] = math.exp(logDelta)
#//if (id == 36) {
#// System.out.println("il " + il + " linePoints " + 1.0e7 * linePoints[0][il] + " id " + id + " lineProf[il][id] " + lineProf[il][id]);
#//}
#//System.out.println("LineProf: il, id, lineProf[il][id]: " + il + " " + id + " " + lineProf[il][id]);
#// if (id == 20) {
#// for (int il = 0; il < numPoints; il++) {
#// System.out.format("Voigt: %20.16f %20.16f%n", linePoints[1][il], logE * Math.log(lineProf[il][id]));
#// }
#// }
#} //id loop
return lineProf
#} //end method delta()
def gauss(linePoints, lam0In, numDeps, teff, tauRos, temp, tempSun):
c = Useful.c()
logC = Useful.logC()
#//double k = Useful.k;
logK = Useful.logK()
#//double e = Useful.e;
#//double mE = Useful.mE;
lam0 = lam0In #// * 1.0E-7; //nm to cm
logLam0 = math.log(lam0)
ln10 = math.log(10.0)
ln2 = math.log(2.0)
ln4pi = math.log(4.0 * math.pi)
lnSqRtPi = 0.5 * math.log(math.pi)
sqPi = math.sqrt(math.pi)
#//double ln100 = 2.0*Math.log(10.0);
logE = math.log10(math.e) #// for debug output
doppler = linePoints[0][1] / linePoints[1][1]
logDopp = math.log(doppler)
tiny = 1.0e-19 #//??
#//System.out.println("LineProf: doppler, logDopp: " + doppler + " " + logE*logDopp);
#//Put input parameters into linear cgs units:
#//System.out.println("LINEGRID: Tau1: " + tau1);
#//logA = 2.0 * logLam0 + logGamma - ln4pi - logC - logDopp;
#//a = Math.exp(logA);
#//System.out.println("LINEGRID: logA: " + logE * logA);
#//Set up a half-profile Delta_lambda grid in Doppler width units
#//from line centre to wing
numPoints = len(linePoints[0])
#//System.out.println("LineProf: numPoints: " + numPoints);
#// Return a 2D numPoints X numDeps array of normalized line profile points (phi)
lineProf = [ [ 0.0 for i in range(numDeps) ] for j in range(numPoints) ]
#// Line profiel points in Doppler widths - needed for Voigt function, H(a,v):
v = [ 0.0 for i in range(numPoints) ]
#double logV, ii;
#// lineProf[0][0] = 0.0; v[0] = 0.0; //Line centre - cannot do logaritmically!
#double gauss, core, logGauss;
gauss = tiny #//default initialization
#//int il0 = 36;
#//System.out.println("il0 " + il0 + " temp[il] " + temp[0][il0] + " press[il] " + logE*press[1][il0]);
for id in range(numDeps):
for il in range(numPoints):
v[il] = linePoints[1][il]
#//System.out.println("LineProf: il, v[il]: " + il + " " + v[il]);
#//if (il <= numCore) {
if (v[il] <= 3.5 and v[il] >= -3.5):
#// - Gaussian ONLY - at line centre Lorentzian will diverge!
core = math.exp(-1.0 * (v[il] * v[il]))
gauss = core
#//System.out.println("LINEGRID- CORE: core: " + core);
#}
#//System.out.println("LINEGRID: il, v[il]: " + il + " " + v[il] + " lineProf[0][il]: " + lineProf[0][il]);
#//System.out.println("LINEGRID: il, Voigt, H(): " + il + " " + voigt);
#//Convert from H(a,v) in dimensionless Voigt units to physical phi((Delta lambda) profile:
logGauss = math.log(gauss) + 2.0 * logLam0 - lnSqRtPi - logDopp - logC
lineProf[il][id] = math.exp(logGauss)
#//if (id == 36) {
#// System.out.println("il " + il + " linePoints " + 1.0e7 * linePoints[0][il] + " id " + id + " lineProf[il][id] " + lineProf[il][id]);
#//}
#//System.out.println("LineProf: il, id, lineProf[il][id]: " + il + " " + id + " " + lineProf[il][id]);
#} // il lambda loop
#// if (id == 20) {
#// for (int il = 0; il < numPoints; il++) {
#// System.out.format("Voigt: %20.16f %20.16f%n", linePoints[1][il], logE * Math.log(lineProf[il][id]));
#// }
#// }
#} //id loop
""" /* Debug
// Check that line profile is area-normalized (it is NOT, area = 1.4845992503443734E-19!, but IS constant with depth - !?:
double delta;
for (int id = 0; id < numDeps; id++) {
double sum = 0.0;
for (int il = 1; il < numPoints2; il++) {
delta = lineProf2[0][il][id] - lineProf2[0][il - 1][id];
sum = sum + (lineProf2[1][il][id] * delta);
}
System.out.println("LineGrid: id, Profile area = " + id + " " + sum );
}
*/ """
return lineProf
#} //end method gauss()
def voigt(linePoints, lam0In, logAij, logGammaCol,
numDeps, teff, tauRos, temp, pGas,
tempSun, pGasSun, hjertComp, dbgHandle):
c = Useful.c()
logC = Useful.logC()
#//double k = Useful.k;
logK = Useful.logK()
#//double e = Useful.e;
#//double mE = Useful.mE;
lam0 = lam0In #// * 1.0E-7; //nm to cm
logLam0 = math.log(lam0)
ln10 = math.log(10.0)
ln2 = math.log(2.0);
ln4pi = math.log(4.0 * math.pi)
#lnSqRtPi = 0.5 * math.log(math.pi)
sqRtPi = math.sqrt(math.pi)
sqPi = math.sqrt(math.pi)
#//double ln100 = 2.0*Math.log(10.0);
logE = math.log10(math.e) #// for debug output
doppler = linePoints[0][1] / linePoints[1][1]
logDopp = math.log(doppler)
#//System.out.println("LineProf: doppler, logDopp: " + doppler + " " + logE*logDopp);
#//Put input parameters into linear cgs units:
#//double gammaCol = Math.pow(10.0, logGammaCol);
#// Lorentzian broadening:
#// Assumes Van der Waals dominates radiative damping
#// log_10 Gamma_6 for van der Waals damping around Tau_Cont = 1 in Sun
#// - p. 57 of Radiative Transfer in Stellar Atmospheres (Rutten)
logGammaSun = 9.0 * ln10 #// Convert to base e
#//double logFudge = Math.log(2.5); // Van der Waals enhancement factor
tau1 = ToolBox.tauPoint(numDeps, tauRos, 1.0)
#outline = ("tau1 "+ str(tau1) + "\n")
#dbgHandle.write(outline)
#//System.out.println("LINEGRID: Tau1: " + tau1);
#//logA = 2.0 * logLam0 + logGamma - ln4pi - logC - logDopp;
#//a = Math.exp(logA);
#//System.out.println("LINEGRID: logA: " + logE * logA);
#//Set up a half-profile Delta_lambda grid in Doppler width units
#//from line centre to wing
numPoints = len(linePoints[0])
#//System.out.println("LineProf: numPoints: " + numPoints);
#// Return a 2D numPoints X numDeps array of normalized line profile points (phi)
lineProf = [ [ 0.0 for i in range(numDeps) ] for j in range(numPoints) ]
#// Line profiel points in Doppler widths - needed for Voigt function, H(a,v):
v = [0.0 for i in range(numPoints)]
#double logV, ii;
#// lineProf[0][0] = 0.0; v[0] = 0.0; //Line centre - cannot do logaritmically!
#double gamma, logGamma, a, logA, voigt, core, wing, logWing, logVoigt;
Aij = math.pow(10.0, logAij)
il0 = 36
#// For Hjerting function approximation:
#double vSquare, vFourth, vAbs, a2, a3, a4, Hjert0, Hjert1, Hjert2, Hjert3, Hjert4, hjertFn;
#//System.out.println("il0 " + il0 + " temp[il] " + temp[0][il0] + " press[il] " + logE*press[1][il0]);
for id in range(numDeps):
#//Formula from p. 56 of Radiative Transfer in Stellar Atmospheres (Rutten),
#// logarithmically with respect to solar value:
logGamma = pGas[1][id] - pGasSun[1][tau1] + 0.7 * (tempSun[1][tau1] - temp[1][id]) + logGammaSun
#if (id%5 == 1):
# outline = ("id "+ str(id)+ " logGamma "+ str(logGamma) + "\n")
# dbgHandle.write(outline)
#//logGamma = logGamma + logFudge + logGammaCol;
logGamma = logGamma + logGammaCol
#//Add radiation (natural) broadning:
gamma = math.exp(logGamma) + Aij
logGamma = math.log(gamma)
#//
#//if (id == 12){
#//System.out.println("LineGrid: logGamma: " + id + " " + logE * logGamma + " press[1][id] " + press[1][id] + " pressSun[1][tau1] "
#// + pressSun[1][tau1] + " temp[1][id] " + temp[1][id] + " tempSun[1][tau1] " + tempSun[1][tau1]);
#// }
#//Voigt "a" parameter with line centre wavelength:
logA = 2.0 * logLam0 + logGamma - ln4pi - logC - logDopp
a = math.exp(logA)
a2 = math.exp(2.0*logA)
a3 = math.exp(3.0*logA)
a4 = math.exp(4.0*logA)
#// if (id == 12) {
#//System.out.println("LineGrid: lam0: " + lam0 + " logGam " + logE * logGamma + " logA " + logE * logA);
#// }
#//if (id == 30) {
#// //System.out.println("il v[il] iy y logNumerator logDenominator logInteg ");
#// System.out.println("voigt: v logVoigt: ");
#//}
for il in range(numPoints):
v[il] = linePoints[1][il]
vAbs = abs(v[il])
vSquare = vAbs * vAbs
vFourth = vSquare * vSquare
#//System.out.println("LineProf: il, v[il]: " + il + " " + v[il]);
#//Approximate Hjerting fn from tabulated expansion coefficients:
#// Interpolate in Hjerting table to exact "v" value for each expanstion coefficient:
#// Row 0 of Hjerting component table used for tabulated abscissae, Voigt "v" parameter
if (vAbs <= 12.0):
#//we are within abscissa domain of table
Hjert0 = ToolBox.interpol(hjertComp[0], hjertComp[1], vAbs)
Hjert1 = ToolBox.interpol(hjertComp[0], hjertComp[2], vAbs)
Hjert2 = ToolBox.interpol(hjertComp[0], hjertComp[3], vAbs)
Hjert3 = ToolBox.interpol(hjertComp[0], hjertComp[4], vAbs)
Hjert4 = ToolBox.interpol(hjertComp[0], hjertComp[5], vAbs)
else:
#// We use the analytic expansion
Hjert0 = 0.0
Hjert1 = (0.56419 / vSquare) + (0.846 / vFourth)
Hjert2 = 0.0
Hjert3 = -0.56 / vFourth
Hjert4 = 0.0
#//Approximate Hjerting fn with power expansion in Voigt "a" parameter
#// "Observation & Analysis of Stellar Photospeheres" (D. Gray), 3rd Ed., p. 258:
hjertFn = Hjert0 + a*Hjert1 + a2*Hjert2 + a3*Hjert3 + a4*Hjert4
#if ((id%5 == 1) and (il%2 == 0)):
# outline = ("il "+ str(il)+ " hjertFn "+ str(hjertFn) + "\n")
# dbgHandle.write(outline)
"""/* Gaussian + Lorentzian approximation:
//if (il <= numCore) {
if (v[il] <= 2.0 && v[il] >= -2.0) {
// - Gaussian ONLY - at line centre Lorentzian will diverge!
core = Math.exp(-1.0 * (v[il] * v[il]));
voigt = core;
//System.out.println("LINEGRID- CORE: core: " + core);
} else {
logV = Math.log(Math.abs(v[il]));
//Gaussian core:
core = Math.exp(-1.0 * (v[il] * v[il]));
// if (id == 12) {
// System.out.println("LINEGRID- WING: core: " + core);
// }
//Lorentzian wing:
logWing = logA - lnSqRtPi - (2.0 * logV);
wing = Math.exp(logWing);
voigt = core + wing;
// if (id == 12) {
// System.out.println("LINEGRID- WING: wing: " + wing + " logV " + logV);
// }
} // end else
*/"""
#//System.out.println("LINEGRID: il, v[il]: " + il + " " + v[il] + " lineProf[0][il]: " + lineProf[0][il]);
#//System.out.println("LINEGRID: il, Voigt, H(): " + il + " " + voigt);
#//Convert from H(a,v) in dimensionless Voigt units to physical phi((Delta lambda) profile:
#//logVoigt = Math.log(voigt) + 2.0 * logLam0 - lnSqRtPi - logDopp - logC;
#//System.out.println("voigt: Before log... id " + id + " il " + il + " hjertFn " + hjertFn);
#logVoigt = math.log(hjertFn) + 2.0 * logLam0 - lnSqRtPi - logDopp - logC
voigt = hjertFn * math.pow(lam0, 2) / sqRtPi / doppler / c
#logVoigt = math.log(voigt)
#lineProf[il][id] = math.exp(logVoigt)
lineProf[il][id] = voigt
if (lineProf[il][id] <= 0.0):
lineProf[il][id] = 1.0e-49
#// if (id == 12) {
#// System.out.println("il " + il + " linePoints " + 1.0e7 * linePoints[0][il] + " id " + id + " lineProf[il][id] " + lineProf[il][id]);
#// }
#//System.out.println("LineProf: il, id, lineProf[il][id]: " + il + " " + id + " " + lineProf[il][id]);
#} // il lambda loop
#// if (id == 20) {
#// for (int il = 0; il < numPoints; il++) {
#// System.out.format("Voigt: %20.16f %20.16f%n", linePoints[1][il], logE * Math.log(lineProf[il][id]));
#// }
#// }
#} //id loop
return lineProf
#} //end method voigt()
def stark(linePoints, lam0In, logAij, logGammaCol,
numDeps, teff, tauRos, temp, pGas, Ne,
tempSun, pGasSun, hjertComp, lineName):
c = Useful.c()
logC = Useful.logC()
#//double k = Useful.k;
logK = Useful.logK()
#//double e = Useful.e;
#//double mE = Useful.mE;
lam0 = lam0In #// * 1.0E-7; //nm to cm
logLam0 = math.log(lam0)
logLam0A = math.log(lam0) + 8.0*math.log(10.0) #//cm to A
ln10 = math.log(10.0)
ln2 = math.log(2.0)
ln4pi = math.log(4.0 * math.pi)
lnSqRtPi = 0.5 * math.log(math.pi)
sqRtPi = math.sqrt(math.pi)
sqPi = math.sqrt(math.pi)
#//double ln100 = 2.0*Math.log(10.0);
logE = math.log10(math.e) #// for debug output
doppler = linePoints[0][1] / linePoints[1][1]
logDopp = math.log(doppler)
#//System.out.println("LineProf: doppler, logDopp: " + doppler + " " + logE*logDopp);
#//Put input parameters into linear cgs units:
#//double gammaCol = Math.pow(10.0, logGammaCol);
#// Lorentzian broadening:
#// Assumes Van der Waals dominates radiative damping
#// log_10 Gamma_6 for van der Waals damping around Tau_Cont = 1 in Sun
#// - p. 57 of Radiative Transfer in Stellar Atmospheres (Rutten)
logGammaSun = 9.0 * ln10 #// Convert to base e
#//double logFudge = Math.log(2.5); // Van der Waals enhancement factor
tau1 = ToolBox.tauPoint(numDeps, tauRos, 1.0)
#//System.out.println("LINEGRID: Tau1: " + tau1);
#//logA = 2.0 * logLam0 + logGamma - ln4pi - logC - logDopp;
#//a = Math.exp(logA);
#//System.out.println("LINEGRID: logA: " + logE * logA);
#//Set up a half-profile Delta_lambda grid in Doppler width units
#//from line centre to wing
numPoints = len(linePoints[0])
#//System.out.println("LineProf: numPoints: " + numPoints);
#// Return a 2D numPoints X numDeps array of normalized line profile points (phi)
lineProf = [ [ 0.0 for i in range(numDeps) ] for j in range(numPoints) ]
#// Line profiel points in Doppler widths - needed for Voigt function, H(a,v):
v = [0.0 for i in range(numPoints)]
#double logV, ii;
#// lineProf[0][0] = 0.0; v[0] = 0.0; //Line centre - cannot do logaritmically!
#double gamma, logGamma, a, logA, voigt, core, wing, logWing, logVoigt;
Aij = math.pow(10.0, logAij)
il0 = 36
#// For Hjerting function approximation:
#double vSquare, vFourth, vAbs, a2, a3, a4, Hjert0, Hjert1, Hjert2, Hjert3, Hjert4, hjertFn;
#//Parameters for linear Stark broadening:
#//Assymptotic ("far wing") "K" parameters
#//Stehle & Hutcheon, 1999, A&A Supp Ser, 140, 93 and CDS data table
#//Assume K has something to do with "S" and proceed as in Observation and Analysis of
#// Stellar Photosphere, 3rd Ed. (D. Gray), Eq. 11.50,
#//
logTuneStark = math.log(3.16e7) #//convert DeltaI K parameters to deltaS STark profile parameters
logKStark = [0.0 for i in range(11)]
logKStark[0] = math.log(2.56e-03) + logTuneStark #//Halpha
logKStark[1] = math.log(7.06e-03) + logTuneStark #//Hbeta
logKStark[2] = math.log(1.19e-02) + logTuneStark #//Hgamma
logKStark[3] = math.log(1.94e-02) + logTuneStark #//Hdelta
logKStark[4] = math.log(2.95e-02) + logTuneStark #//Hepsilon
logKStark[5] = math.log(4.62e-02) + logTuneStark #//H8 JB
logKStark[6] = math.log(6.38e-02) + logTuneStark #//H9 JB
logKStark[7] = math.log(8.52e-02) + logTuneStark #//H10 JB
logKStark[8] = math.log(1.12e-01) + logTuneStark #//H11 JB
logKStark[9] = math.log(1.43e-01) + logTuneStark #//H12 JB
logKStark[10] = math.log(1.80e-01) + logTuneStark #//H13 JB
#//logKStark[11] = Math.log(2.11) + logTuneStark; //H30 JB
thisLogK = [0.0 for i in range(4)] #//default initialization
#//double thisLogK = logKStark[10]; //default initialization
#//which Balmer line are we? crude but effective:
if (lam0In > 650.0e-7):
thisLogK = logKStark[0] #//Halpha
#//System.out.println("Halpha")
#}
if ( (lam0In > 480.0e-7) and (lam0In < 650.0e-7) ):
#//System.out.println("Hbeta");
thisLogK = logKStark[1] #//Hbeta
#}
if ( (lam0In > 420.0e-7) and (lam0In < 470.0e-7) ):
#//System.out.println("Hgamma");
thisLogK = logKStark[2] #//Hgamma
#}
if ( (lam0In > 400.0e-7) and (lam0In < 450.0e-7) ):
#//System.out.println("Hdelta");
thisLogK = logKStark[3] #//Hdelta
if ( (lam0In < 400.0e-7) ):
#//System.out.println("Hepsilon");
thisLogK = logKStark[4] #//Hepsilon
#}
#// if ((lam0In < 390.0e-7)){
#//
#////This won't work here - "species" is always just "HI":
#// int numberInName = (int) lineName.substring("HI".length());
#// //console.log(numberInName);
#// thisLogK = logKStark[numberInName-3];
#// }
#//
#double F0, logF0, lamOverF0, logLamOverF0; //electrostatic field strength (e.s.u.)
#double deltaAlpha, logDeltaAlpha, logStark, logStarkTerm; //reduced wavelength de-tuning parameter (Angstroms/e.s.u.)
logF0Fac = math.log(1.249e-9)
#// log wavelength de-tunings in A:
#double logThisPoint, thisPoint;
#//System.out.println("il0 " + il0 + " temp[il] " + temp[0][il0] + " press[il] " + logE*press[1][il0]);
for id in range(numDeps):
#//linear Stark broadening stuff:
logF0 = logF0Fac + (0.666667)*Ne[1][id]
logLamOverF0 = logLam0A - logF0
lamOverF0 = math.exp(logLamOverF0)
#//System.out.println("id " + id + " logF0 " + logE*logF0 + " logLamOverF0 " + logE*logLamOverF0 + " lamOverF0 " + lamOverF0);
#//Formula from p. 56 of Radiative Transfer in Stellar Atmospheres (Rutten),
#// logarithmically with respect to solar value:
logGamma = pGas[1][id] - pGasSun[1][tau1] + 0.7 * (tempSun[1][tau1] - temp[1][id]) + logGammaSun
#//logGamma = logGamma + logFudge + logGammaCol
logGamma = logGamma + logGammaCol
#//Add radiation (natural) broadning:
gamma = math.exp(logGamma) + Aij
logGamma = math.log(gamma)
#//
#//if (id == 12){
#//System.out.println("LineGrid: logGamma: " + id + " " + logE * logGamma + " press[1][id] " + press[1][id] + " pressSun[1][tau1] "
#// + pressSun[1][tau1] + " temp[1][id] " + temp[1][id] + " tempSun[1][tau1] " + tempSun[1][tau1]);
#// }
#//Voigt "a" parameter with line centre wavelength:
logA = 2.0 * logLam0 + logGamma - ln4pi - logC - logDopp
a = math.exp(logA)
a2 = math.exp(2.0*logA)
a3 = math.exp(3.0*logA)
a4 = math.exp(4.0*logA)
#// if (id == 12) {
#//System.out.println("LineGrid: lam0: " + lam0 + " logGam " + logE * logGamma + " logA " + logE * logA);
#// }
#//if (id == 30) {
#// //System.out.println("il v[il] iy y logNumerator logDenominator logInteg ");
#// System.out.println("voigt: v logVoigt: ");
#//}
for il in range(numPoints):
v[il] = linePoints[1][il]
vAbs = abs(v[il])
vSquare = vAbs * vAbs
vFourth = vSquare * vSquare
#//System.out.println("LineProf: il, v[il]: " + il + " " + v[il]);
#//Approximate Hjerting fn from tabulated expansion coefficients:
#// Interpolate in Hjerting table to exact "v" value for each expanstion coefficient:
#// Row 0 of Hjerting component table used for tabulated abscissae, Voigt "v" parameter
if (vAbs <= 12.0):
#//we are within abscissa domain of table
Hjert0 = ToolBox.interpol(hjertComp[0], hjertComp[1], vAbs)
Hjert1 = ToolBox.interpol(hjertComp[0], hjertComp[2], vAbs)
Hjert2 = ToolBox.interpol(hjertComp[0], hjertComp[3], vAbs)
Hjert3 = ToolBox.interpol(hjertComp[0], hjertComp[4], vAbs)
Hjert4 = ToolBox.interpol(hjertComp[0], hjertComp[5], vAbs)
else:
#// We use the analytic expansion
Hjert0 = 0.0
Hjert1 = (0.56419 / vSquare) + (0.846 / vFourth)
Hjert2 = 0.0
Hjert3 = -0.56 / vFourth
Hjert4 = 0.0
#}
#//Approximate Hjerting fn with power expansion in Voigt "a" parameter
#// "Observation & Analysis of Stellar Photospeheres" (D. Gray), 3rd Ed., p. 258:
hjertFn = Hjert0 + a*Hjert1 + a2*Hjert2 + a3*Hjert3 + a4*Hjert4;
logStark = -49.0 #//re-initialize
if (vAbs > 2.0):
#//System.out.println("Adding in Stark wing");
thisPoint = 1.0e8 * abs(linePoints[0][il]) #//cm to A
logThisPoint = math.log(thisPoint)
logDeltaAlpha = logThisPoint - logF0
deltaAlpha = math.exp(logDeltaAlpha)
logStarkTerm = ( math.log(lamOverF0 + deltaAlpha) - logLamOverF0 )
logStark = thisLogK + 0.5*logStarkTerm - 2.5*logDeltaAlpha
#//System.out.println("il " + il + " logDeltaAlpha " + logE*logDeltaAlpha + " logStarkTerm " + logE*logStarkTerm + " logStark " + logE*logStark);
#//console.log("il " + il + " logDeltaAlpha " + logE*logDeltaAlpha + " logStarkTerm " + logE*logStarkTerm + " logStark " + logE*logStark);
#//System.out.println("id " + id + " il " + il + " v[il] " + v[il]
#// + " hjertFn " + hjertFn + " Math.exp(logStark) " + Math.exp(logStark));
#//not here! hjertFn = hjertFn + Math.exp(logStark);
#//System.out.println("LINEGRID: il, v[il]: " + il + " " + v[il] + " lineProf[0][il]: " + lineProf[0][il]);
#//System.out.println("LINEGRID: il, Voigt, H(): " + il + " " + voigt);
#//Convert from H(a,v) in dimensionless Voigt units to physical phi((Delta lambda) profile:
#//logVoigt = Math.log(voigt) + 2.0 * logLam0 - lnSqRtPi - logDopp - logC;
#//System.out.println("stark: Before log... id " + id + " il " + il + " hjertFn " + hjertFn);
#logVoigt = math.log(hjertFn) - lnSqRtPi - logDopp
voigt = hjertFn / sqRtPi / doppler
#//logVoigt = math.log(voigt)
logStark = logStark - logF0
if (vAbs > 2.0):
#//if (id == 24) {
#// System.out.println("il " + il + " v[il] " + v[il] + " logVoigt " + logE*logVoigt + " logStark " + logE*logStark);
#//}
#//voigt = math.exp(logVoigt) + math.exp(logStark)
voigt = voigt + math.exp(logStark)
#//logVoigt = math.log(voigt)
#logVoigt = logVoigt + 2.0 * logLam0 - logC
voigt = voigt * math.pow(lam0, 2) / c
#//lineProf[il][id] = math.exp(logVoigt)
lineProf[il][id] = voigt
if (lineProf[il][id] <= 0.0):
lineProf[il][id] = 1.0e-49
#//if (id == 24) {
#// System.out.println("lam0In " + lam0In);
#// System.out.println("il " + il + " linePoints " + 1.0e7 * linePoints[0][il] + " id " + id + " lineProf[il][id] " + lineProf[il][id]);
#//}
#//System.out.println("LineProf: il, id, lineProf[il][id]: " + il + " " + id + " " + lineProf[il][id]);
#} // il lambda loop
#// if (id == 20) {
#// for (int il = 0; il < numPoints; il++) {
#// System.out.format("Voigt: %20.16f %20.16f%n", linePoints[1][il], logE * Math.log(lineProf[il][id]));
#// }
#// }
#} //id loop
return lineProf
#} //end method stark()
def lineSource(numDeps, tau, temp, lambda2):
"""#// Make line source function:
#// Equivalenth two-level atom (ETLA) approx
#//CAUTION: input lambda in nm"""
lineSource = [0.0 for i in range(numDeps)]
#//thermal photon creation/destruction probability
epsilon = 0.01 #//should decrease with depth??
#//This is an artifact of jayBinner's original purpose:
grayLevel = 1.0
#//int iLam0 = numLams / 2; //+/- 1 deltaLambda
#//double lam0 = linePoints[0][iLam0]; //line centre lambda in cm - not needed:
#//double lamStart = lambda - 0.1; // nm
#//double lamStop = lambda + 0.1; // nm
#//double lamRange = (lamStop - lamStart); // * 1.0e-7; // line width in cm
#//System.out.println("lamStart " + lamStart + " lamStop " + lamStop + " lamRange " + lamRange);
jayLambda = [0.0 for i in range(numDeps)]
BLambda = [ [ 0.0 for i in range(numDeps) ] for j in range(2) ]
#double linSrc;
#// Dress up Blambda to look like what jayBinner expects:
for i in range(numDeps):
#//Planck.planck return log(B_lambda):
BLambda[0][i] = math.exp(Planck.planck(temp[0][i], lambda2))
BLambda[1][i] = 1.0 #//supposed to be dB/dT, but not needed.
#//CAUTION: planckBin Row 0 is linear lambda-integrated B_lambda; Row 1 is same for dB_lambda/dT
#//planckBin = MulGrayTCorr.planckBinner(numDeps, temp, lamStart, lamStop);
jayLambda = MulGrayTCorr.jayBinner(numDeps, tau, temp, BLambda, grayLevel)
#//To begin with, coherent scattering - we're not computing line profile-weighted average Js and Bs
for i in range(numDeps):
#//planckBin[0][i] = planckBin[0][i] / lamRange; //line average
#//jayBin[i] = jayBin[i];
linSrc = (1.0 - epsilon) * jayLambda[i] + epsilon * BLambda[0][i]
lineSource[i] = math.log(linSrc)
return lineSource
| 30,137
| 42.17765
| 162
|
py
|
ChromaStarPy
|
ChromaStarPy-master/SpecSyn2.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 28 17:03:30 2017
@author: ishort
"""
#/**
# *
# * Create master kappa_lambda(lambda) and tau_lambda(lambda) for
# * FormalSoln.formalSoln()
# *
# * @author Ian
# */
import math
import ToolBox
#plotting:
import matplotlib
import pylab
import numpy
def masterLambda(numLams, numMaster, numNow, masterLams, numPoints, listLineLambdas):
"""//Merge continuum and line wavelength scales - for one line
//This expects *pure* line opacity - no continuum opacity pre-added!"""
#//int numCnt = lambdaScale.length;
#//skip the last wavelength point in the line lambda grid - it holds the line centre wavelength
#//int numLine = lineLambdas.length - 1;
numTot = numNow + numPoints #//current dynamic total
#//System.out.println("numCnt " + numCnt + " numLine " + numLine + " numTot " + numTot);
"""/*
for (int i = 0; i < numCnt; i++) {
System.out.println("i " + i + " lambdaScale[i] " + lambdaScale[i]);
}
for (int i = 0; i < numLine; i++) {
System.out.println("i " + i + " lineLambdas[i] " + lineLambdas[i]);
}
*/ """
#//Row 0 is merged lambda scale
#//Row 1 is log of *total* (line plus continuum kappa
masterLamsOut = [0.0 for i in range(numTot)]
#// Merge wavelengths into a sorted master list
#//initialize with first continuum lambda:
lastLam = masterLams[0]
masterLamsOut[0] = masterLams[0]
nextCntPtr = 1
nextLinePtr = 0
for iL in range(1, numTot):
if (nextCntPtr < numNow):
#//System.out.println("nextCntPtr " + nextCntPtr + " lambdaScale[nextCntPtr] " + lambdaScale[nextCntPtr]);
#//System.out.println("nextLinePtr " + nextLinePtr + " lineLambdas[nextLinePtr] " + lineLambdas[nextLinePtr]);
if ((masterLams[nextCntPtr] <= listLineLambdas[nextLinePtr])
or (nextLinePtr >= numPoints - 1)):
#//Next point is a continuum point:
masterLamsOut[iL] = masterLams[nextCntPtr]
nextCntPtr+=1
elif ((listLineLambdas[nextLinePtr] < masterLams[nextCntPtr])
and (nextLinePtr < numPoints - 1)):
#//Next point is a line point:
masterLamsOut[iL] = listLineLambdas[nextLinePtr]
nextLinePtr+=1
#//System.out.println("iL " + iL + " masterLamsOut[iL] " + masterLamsOut[iL]);
#} //iL loop
#//Make sure final wavelength point in masterLams is secured:
masterLamsOut[numTot-1] = masterLams[numNow-1]
return masterLamsOut
#}
def masterKappa(numDeps, numLams, numMaster, numNow, masterLams, masterLamsOut, logMasterKaps, \
numPoints, listLineLambdas, listLogKappaL):
#//
logE = math.log10(math.e) #// for debug output
#//int numLams = masterLams.length;
numTot = numNow + numPoints
logMasterKapsOut = [ [ 0.0 for i in range(numDeps) ] for j in range(numTot) ]
#//double[][] kappa2 = new double[2][numTot];
#//double[][] lineKap2 = new double[2][numTot];
#double kappa2, lineKap2, totKap;
#lineKap2 = 1.0e-99 #//initialization
#logLineKap2 = -49.0 #//initialization
logKappa2 = [0.0 for i in range(numTot)]
logLineKap2 = [-49.0 for i in range(numTot)]
#//int numCnt = lambdaScale.length;
#//int numLine = lineLambdas.length - 1;
#kappa1D = [0.0 for i in range(numNow)]
logKappa1D = [0.0 for i in range(numNow)]
#thisMasterLams = [0.0 for i in range(numNow)]
#lineKap1D = [0.0 for i in range(numPoints)]
logLineKap1D = [0.0 for i in range(numPoints)]
#//System.out.println("iL masterLams logMasterKappa");
#print("numNow ", numNow, " numPoints ", numPoints)
#print("iD ", iD, " len(masterLams) ", len(masterLams), " len(logKappa1D) ", len(logKappa1D))
#for k in range(numNow):
# thisMasterLams[k] = masterLams[k]
thisMasterLams = [ masterLams[k] for k in range(numNow) ]
for iD in range(numDeps):
#//Extract 1D *linear* opacity vectors for interpol()
#for k in range(numNow):
# #kappa1D[k] = math.exp(logMasterKaps[k][iD]) #//now wavelength dependent
# logKappa1D[k] = logMasterKaps[k][iD] #//now wavelength dependent
logKappa1D = [ logMasterKaps[k][iD] for k in range(numNow) ]
#for k in range(numPoints):
# #lineKap1D[k] = math.exp(listLogKappaL[k][iD])
# logLineKap1D[k] = listLogKappaL[k][iD]
#// if (iD%10 == 1){
#// System.out.println("iD " + iD + " k " + k + " listLineLambdas " + listLineLambdas[k] + " lineKap1D " + lineKap1D[k]);
#// }
logLineKap1D = [ listLogKappaL[k][iD] for k in range(numPoints) ]
#//Interpolate continuum and line opacity onto master lambda scale, and add them lambda-wise:
#for iL in range(numTot):
# logLineKap2[iL] = -49.0 #//re-initialization
logLineKap2 = [ -49.0 for iL in range(numTot) ]
logKappa2 = numpy.interp(masterLamsOut, thisMasterLams, logKappa1D)
logLineKap2 = numpy.interp(masterLamsOut, listLineLambdas, logLineKap1D)
for iL in range(numTot):
totKap = math.exp(logKappa2[iL]) + math.exp(logLineKap2[iL])
logMasterKapsOut[iL][iD] = math.log(totKap)
#logMasterKapsOut[:][iD] =\
#[ math.log(math.exp(logKappa2[iL]) + math.exp(logLineKap2[iL])) for iL in range(numTot) ]
#} iD loop
#pylab.plot(masterLamsOut, [logMasterKaps[i][12] for i in range(numTot)])
#pylab.plot(masterLamsOut, [logMasterKaps[i][12] for i in range(numTot)], '.')
return logMasterKapsOut;
#}
| 5,805
| 39.601399
| 140
|
py
|
ChromaStarPy
|
ChromaStarPy-master/ToolBox.py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 21 10:41:24 2017
Collection of useful utilities
@author: ishort
"""
import math
import numpy
#JB#
#a function to create a cubic function fit extrapolation
def cubicFit(x,y):
coeffs = numpy.polyfit(x,y,3)
#returns an array of coefficents for the cubic fit of the form
#Ax^3 + Bx^2 + Cx + D as [A,B,C,D]
return coeffs
#this will work for any number of data points!
def valueFromFit(fit,x):
#return the value y for a given fit, at point x
return (fit[0]*(x**3)+fit[1]*(x**2)+fit[2]*x+fit[3])
def interpol(x, y, newX):
"""Linear interpolation to a new abscissa """
#// Bracket newX:
p1 = 0
p2 = 1
x1 = x[p1]
x2 = x[p2]
for i in range(1, len(x)):
if (x[i] >= newX):
#// Found upper bracket
p2 = i
p1 = i - 1
x2 = x[p2]
x1 = x[p1]
break
step = x2 - x1
#//Interpolate
#//First order Lagrange formula
#// newY = y[1][p2] * (newX - x1) / step
#// + y[1][p1] * (x2 - newX) / step;
newY = y[p2] * (newX - x1) / step \
+ y[p1] * (x2 - newX) / step
#//System.out.println("Interpol: p1, p2, x1, x2, y1, y2, newX, newY: " +
#// p1 + " " + p2 + " " + x1 + " " + x2 + " " + y[1][p1] + " " + y[1][p2] + " " + newX + " " + newY + " ");
return newY
def interpolV(y, x, newX):
"""vectorized version of simple linear 1st order interpolation
Caution: Assumes new and old abscissae are in monotonic increasing order"""
num = len(x)
#if (num != len(y)):
#//System.out.println("Toolbox.interpolV(): Old x and y must be same length");
newNum = len(newX)
#//System.out.println("interpolV: newNum " + newNum + " num " + num);
#newY = [0.0 for i in range(newNum)]
#//Renormalize ordinates:
iMinAndMax = minMax(y)
norm = y[iMinAndMax[1]]
#//System.out.println("norm " + norm);
#yNorm = [0.0 for i in range(num)]
newYNorm = [0.0 for i in range(newNum)]
#for i in range(num):
# yNorm[i] = y[i] / norm
yNorm = [ x / norm for x in y ]
#// Set any newX elements that are *less than* the first x element to th first
#// x element - "0th order extrapolation"
#//
start = 0
for i in range(newNum):
if (newX[i] <= x[1]):
newYNorm[i] = yNorm[0]
start += 1
if (newX[i] > x[1]):
break
#//System.out.println("start " + start);
#//System.out.println("x[0] " + x[0] + " x[1] " + x[1] + " newX[start] " + newX[start]);
#double jWght, jm1Wght, denom;
if (start < newNum-1):
j = 1 #//initialize old abscissae index
#//outer loop over new abscissae
for i in range(start, newNum):
#//System.out.println("i " + i + " j " + j);
#// break out if current element newX is *greater* that last x element
if ( (newX[i] > x[num-1]) or (j > (num-1)) ):
break
while (x[j] < newX[i]):
j += 1
#//System.out.println("i " + i + " newX[i] " + newX[i] + " j " + j + " x[j-1] " + x[j-1] + " x[j] " + x[j]);
#//1st order Lagrange method:
jWght = newX[i] * (1.0 - (x[j-1]/newX[i])) #//(newX[i]-x[j-1])
jm1Wght = x[j] * (1.0 - (newX[i]/x[j])) #//(x[j]-newX[i])
denom = x[j] * (1.0 - (x[j-1]/x[j])) #//(x[j]-x[j-1])
jWght = jWght / denom
jm1Wght = jm1Wght / denom
#//newYNorm[i] = (yNorm[j]*(newX[i]-x[j-1])) + (yNorm[j-1]*(x[j]-newX[i]));
newYNorm[i] = (yNorm[j]*jWght) + (yNorm[j-1]*jm1Wght)
#//System.out.println("i " + i + " newYNorm[i] " + newYNorm[i] + " j " + j + " yNorm[j-1] " + yNorm[j-1] + " yNorm[j] " + yNorm[j]);
#// Set any newX elements that are *greater than* the first x element to the last
#// x element - "0th order extrapolation"
#//
for i in range(newNum):
if (newX[i] >= x[num-1]):
newYNorm[i] = yNorm[num-1]
#//Restore orinate scale
#for i in range(newNum):
# newY[i] = newYNorm[i] * norm
newY = [ x * norm for x in newYNorm ]
return newY
def lamPoint(numLams, lambdas, lam):
"""Return the array index of the wavelength array (lambdas) closest to a desired
value of wavelength (lam)"""
help = [0.0 for i in range(numLams)]
for i in range(numLams):
help[i] = lambdas[i] - lam;
help[i] = abs(help[i]);
index = 0
min = help[index]
for i in range(1, numLams):
if (help[i] < min):
min = help[i]
index = i
return index
def minMax(x):
"""Return the minimum and maximum values of an input 1D array CAUTION; Will
return the *first* occurence if min and/or max values occur in multiple
places iMinMax[0] = first occurence of minimum iMinMax[1] = first occurence
of maximum"""
iMinMax = [0 for i in range(2)]
num = len(x)
#//System.out.println("MinMax: num: " + num);
iMin = 0
iMax = 0
min = x[iMin]
max = x[iMax]
for i in range(1, num):
#//System.out.println("MinMax: i , current min, x : " + i + " " + min + " " + x[i]);
if (x[i] < min):
#//System.out.println("MinMax: new min: if branch triggered" );
min = x[i]
iMin = i
#//System.out.println("MinMax: new min: " + min);
if (x[i] > max):
max = x[i]
iMax = i
#//System.out.println("MinMax: " + iMin + " " + iMax);
iMinMax[0] = iMin
iMinMax[1] = iMax
return iMinMax
def minMax2(x):
"""Version of MinMax.minMax for 2XnumDep & 2XnumLams arrays where row 0 is
linear and row 1 is logarithmic
Return the minimum and maximum values of an input 1D array CAUTION; Will
return the *first* occurence if min and/or max values occur in multiple
places iMinMax[0] = first occurence of minimum iMinMax[1] = first occurence
of maximum"""
iMinMax = [0 for i in range(2)]
num = len(x)[0]
iMin = 0
iMax = 0
#// Search for minimum and maximum in row 0 - linear values:
min = x[0][iMin]
max = x[0][iMax]
for i in range(1, num):
if (x[0][i] < min):
min = x[0][i]
iMin = i
if (x[0][i] > max):
max = x[0][i]
iMax = i
iMinMax[0] = iMin
iMinMax[1] = iMax
return iMinMax
def tauPoint(numDeps, tauRos, tau):
"""Return the array index of the optical depth arry (tauRos) closest to a
desired value of optical depth (tau) Assumes the use wants to find a *lienar*
tau value , NOT logarithmic"""
#int index;
help = [0.0 for i in range(numDeps)]
for i in range(0, numDeps):
help[i] = tauRos[0][i] - tau
help[i] = abs(help[i])
index = 0
min = help[index]
for i in range(1, numDeps):
if (help[i] < min):
min = help[i]
index = i
return index
| 7,243
| 24.687943
| 144
|
py
|
miccai2022-roigan
|
miccai2022-roigan-main/main.py
|
import os
import argparse
import yaml
import collections
import itertools
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import datasets
from sklearn.model_selection import train_test_split
from src import models, utils
def main(args, config):
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
utils.write_flush(device)
# Create sample and checkpoint directories
os.makedirs('images/%s' % args.job_number, exist_ok=True)
os.makedirs('saved_models/%s' % args.job_number, exist_ok=True)
# Losses
criterion_GAN = torch.nn.MSELoss()
criterion_cycle = torch.nn.L1Loss()
criterion_identity = torch.nn.L1Loss()
cuda = torch.cuda.is_available()
input_shape = (3, 256, 256)
# Initialize generator and discriminator
G_AB = models.GeneratorResNet(input_shape, config.nb_residuals).to(device)
G_BA = models.GeneratorResNet(input_shape, config.nb_residuals).to(device)
D_A = models.Discriminator(input_shape).to(device)
D_B = models.Discriminator(input_shape).to(device)
D_ROI_A = models.DiscriminatorROI().to(device)
D_ROI_B = models.DiscriminatorROI().to(device)
# Optimizers
optimizer_G = torch.optim.Adam(
itertools.chain(G_AB.parameters(), G_BA.parameters()), lr=2e-4, betas=(0.5, 0.999)
)
optimizer_D_A = torch.optim.Adam(D_A.parameters(), lr=2e-4, betas=(0.5, 0.999))
optimizer_D_B = torch.optim.Adam(D_B.parameters(), lr=2e-4, betas=(0.5, 0.999))
optimizer_D_A_ROI = torch.optim.Adam(D_ROI_A.parameters(), lr=2e-4, betas=(0.5, 0.999))
optimizer_D_B_ROI = torch.optim.Adam(D_ROI_B.parameters(), lr=2e-4, betas=(0.5, 0.999))
# Learning rate update schedulers
lr_scheduler_G = torch.optim.lr_scheduler.LambdaLR(
optimizer_G, lr_lambda=utils.LambdaLR(config.nb_epochs, 0, 12).step
)
lr_scheduler_D_A = torch.optim.lr_scheduler.LambdaLR(
optimizer_D_A, lr_lambda=utils.LambdaLR(config.nb_epochs, 0, 12).step
)
lr_scheduler_D_B = torch.optim.lr_scheduler.LambdaLR(
optimizer_D_B, lr_lambda=utils.LambdaLR(config.nb_epochs, 0, 12).step
)
lr_scheduler_D_A_ROI = torch.optim.lr_scheduler.LambdaLR(
optimizer_D_A_ROI, lr_lambda=utils.LambdaLR(config.nb_epochs, 0, 12).step
)
lr_scheduler_D_B_ROI = torch.optim.lr_scheduler.LambdaLR(
optimizer_D_B_ROI, lr_lambda=utils.LambdaLR(config.nb_epochs, 0, 12).step
)
# Buffers of previously generated samples
fake_A_buffer = utils.ReplayBuffer()
fake_B_buffer = utils.ReplayBuffer()
hes_images, hes_dfs_list = utils.load_data(config.hes_dir, config.hes_library)
ihc_images, ihc_dfs_list = utils.load_data(config.ihc_dir, config.ihc_library)
# Data generators
hes_images_tr, hes_images_te, hes_bboxes_tr,hes_bboxes_te = train_test_split(
hes_images, hes_dfs_list, test_size=0.1, random_state=42)
ihc_images_tr, ihc_images_te, ihc_bboxes_tr, ihc_bboxes_te = train_test_split(
ihc_images, ihc_dfs_list, test_size=0.1, random_state=42)
# ----------
# Training
# ----------
gen_A = utils.data_generator(hes_images_tr, hes_bboxes_tr, nb_batch=config.nb_batch, nb_rois=config.nb_rois)
gen_B = utils.data_generator(ihc_images_tr, ihc_bboxes_tr, nb_batch=config.nb_batch, nb_rois=config.nb_rois)
for epoch in range(config.nb_epochs):
for i in range(config.steps_per_epoch):
data = next(gen_A)
real_A, condition_A, bboxes_A = (data[0].to(device),
data[1].to(device),
data[2].to(device))
data = next(gen_B)
real_B, condition_B, bboxes_B = (data[0].to(device),
data[1].to(device),
data[2].to(device))
fake = torch.zeros((config.nb_batch, *D_A.output_shape)).to(device)
valid = torch.ones((config.nb_batch, *D_A.output_shape)).to(device)
fake_roi = torch.zeros((config.nb_rois,)).to(device)
valid_roi = torch.ones((config.nb_rois,)).to(device)
# ------------------
# Train Generators
# ------------------
G_AB.train()
G_BA.train()
optimizer_G.zero_grad()
# Identity loss
loss_id_A = criterion_identity(G_BA(real_A), real_A)
loss_id_B = criterion_identity(G_AB(real_B), real_B)
loss_identity = (loss_id_A + loss_id_B) / 2
# GAN loss
fake_B = G_AB(real_A)
loss_GAN_AB = criterion_GAN(D_B(fake_B), valid)
fake_A = G_BA(real_B)
loss_GAN_BA = criterion_GAN(D_A(fake_A), valid)
loss_GAN = (loss_GAN_AB + loss_GAN_BA) / 2
# ROI loss
validity_ROI_A = D_ROI_A(fake_A, condition_B, bboxes_B)
validity_ROI_B = D_ROI_B(fake_B, condition_A, bboxes_A)
loss_ROI_A = criterion_GAN(validity_ROI_A, valid_roi)
loss_ROI_B = criterion_GAN(validity_ROI_B, valid_roi)
loss_ROI = (loss_ROI_A + loss_ROI_B) / 2
# Cycle loss
recov_A = G_BA(fake_B)
loss_cycle_A = criterion_cycle(recov_A, real_A)
recov_B = G_AB(fake_A)
loss_cycle_B = criterion_cycle(recov_B, real_B)
loss_cycle = (loss_cycle_A + loss_cycle_B) / 2
# Total loss
loss_G = loss_GAN + config.lambda_roi * loss_ROI + config.lambda_cyc * loss_cycle + config.lambda_id * loss_identity
loss_G.backward()
optimizer_G.step()
# -----------------------
# Train Discriminator A
# -----------------------
optimizer_D_A.zero_grad()
# Real loss
loss_real = criterion_GAN(D_A(real_A), valid)
# Fake loss (on batch of previously generated samples)
fake_A_ = fake_A_buffer.push_and_pop(fake_A)
loss_fake = criterion_GAN(D_A(fake_A_.detach()), fake)
# Total loss
loss_D_A = (loss_real + loss_fake) / 2
loss_D_A.backward()
optimizer_D_A.step()
# -----------------------
# Train Discriminator B
# -----------------------
optimizer_D_B.zero_grad()
# Real loss
loss_real = criterion_GAN(D_B(real_B), valid)
# Fake loss (on batch of previously generated samples)
fake_B_ = fake_B_buffer.push_and_pop(fake_B)
loss_fake = criterion_GAN(D_B(fake_B_.detach()), fake)
# Total loss
loss_D_B = (loss_real + loss_fake) / 2
loss_D_B.backward()
optimizer_D_B.step()
loss_D = (loss_D_A + loss_D_B) / 2
# --------------------------
# Train Discriminator ROI A
# --------------------------
optimizer_D_A_ROI.zero_grad()
roi_outputs = D_ROI_A(real_A, condition_A, bboxes_A)
real_loss = criterion_GAN(roi_outputs, valid_roi)
roi_outputs = D_ROI_A(fake_A.detach(), condition_B, bboxes_B)
fake_loss = criterion_GAN(roi_outputs, fake_roi)
d_ROI_A_loss = (real_loss + fake_loss) / 2
d_ROI_A_loss.backward()
optimizer_D_A_ROI.step()
# --------------------------
# Train Discriminator ROI B
# --------------------------
optimizer_D_B_ROI.zero_grad()
roi_outputs = D_ROI_B(real_B, condition_B, bboxes_B)
real_loss = criterion_GAN(roi_outputs, valid_roi)
roi_outputs = D_ROI_B(fake_B.detach(), condition_A, bboxes_A)
fake_loss = criterion_GAN(roi_outputs, fake_roi)
d_ROI_B_loss = (real_loss + fake_loss) / 2
d_ROI_B_loss.backward()
optimizer_D_B_ROI.step()
# --------------
# Log Progress
# --------------
# Print log
utils.write_flush(
'\r[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [D_ROI_A loss: %f] [D_ROI_B loss: %f] [G loss: %f, adv: %f, cycle: %f, identity: %f]'
% (epoch, config.nb_epochs, i, config.steps_per_epoch, loss_D.item(), d_ROI_A_loss.item(), d_ROI_B_loss.item(), loss_G.item(), loss_GAN.item(), loss_cycle.item(), loss_identity.item()))
batches_done = epoch * config.steps_per_epoch + i
if batches_done % 100 == 0:
utils.sample_images(args.job_number, batches_done, G_AB, G_BA, hes_images_te, hes_bboxes_te, ihc_images_te, ihc_bboxes_te, device)
# Update learning rates
lr_scheduler_G.step()
lr_scheduler_D_A.step()
lr_scheduler_D_B.step()
lr_scheduler_D_A_ROI.step()
lr_scheduler_D_B_ROI.step()
if epoch % 5 == 0:
# Save model checkpoints
torch.save(G_AB.state_dict(), 'saved_models/%s/G_AB_%d.pth' % (args.job_number, epoch))
torch.save(G_BA.state_dict(), 'saved_models/%s/G_BA_%d.pth' % (args.job_number, epoch))
torch.save(D_A.state_dict(), 'saved_models/%s/D_A_%d.pth' % (args.job_number, epoch))
torch.save(D_B.state_dict(), 'saved_models/%s/D_B_%d.pth' % (args.job_number, epoch))
torch.save(D_ROI_A.state_dict(), 'saved_models/%s/D_A_ROI_%d.pth' % (args.job_number, epoch))
torch.save(D_ROI_B.state_dict(), 'saved_models/%s/D_B_ROI_%d.pth' % (args.job_number, epoch))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Region-guided CycleGAN for stain transfer on whole slide images')
parser.add_argument('job_number', type=int)
parser.add_argument('config', type=str)
args = parser.parse_args()
utils.write_flush(str(args))
with open(args.config, 'r') as fp:
cfg = yaml.safe_load(fp)
config = collections.namedtuple('Config', cfg.keys())(*cfg.values())
main(args, config)
| 10,158
| 36.487085
| 201
|
py
|
miccai2022-roigan
|
miccai2022-roigan-main/src/utils.py
|
import os
import sys
import h5py
import random
import numpy as np
import pandas as pd
import torch
from torch.autograd import Variable
from torchvision.utils import save_image, make_grid
from torch.utils.data import DataLoader
def write_flush(*text_args, stream=sys.stdout):
stream.write(', '.join(map(str, text_args)) + '\n')
stream.flush()
return
def load_data(data_dir, library_dir):
patches_dir = os.path.join(data_dir, 'patches/')
patches_files = os.listdir(patches_dir)
images = {file_name : h5py.File(os.path.join(patches_dir, file_name))['x'][()]
for file_name in sorted(patches_files)}
dfs = {file_name : pd.read_csv(os.path.join(library_dir, file_name.split('.')[0] + '.csv'), index_col=0)
for file_name in patches_files}
imgs = np.vstack([images[key] for key in patches_files])
dfs_list = []
for key in patches_files:
nb_tiles = images[key].shape[0]
df = dfs[key]
dfs_list.extend([df[df.tile == tile] for tile in range(nb_tiles)])
assert imgs.shape[0] == len(dfs_list)
return imgs, dfs_list
def draw_conditions(bboxes, dim):
condition = torch.zeros((1, 2, dim, dim))
noise = torch.zeros((1, 2, dim, dim))
for i, bbox in enumerate(bboxes):
xmin, ymin, xmax, ymax, cls = map(int, bbox)
ymin = max(0, ymin + 5)
xmin = max(0, xmin + 5)
ymax = min(dim, ymax - 5)
xmax = min(dim, xmax - 5)
condition[0, cls, ymin:ymax, xmin:xmax] = 1
z = torch.randn(1, 1, ymax-ymin, xmax-xmin)
noise[0, cls, ymin:ymax, xmin:xmax] = z
return condition, noise
def sample_bboxes(df_bboxes, nb_samples):
"""
For a purely negative tile take 3/4 * nb_samples
For a purely positive tile take 5/4 * nb_samples
For a mixed tile, take 1/4 * nb_samples positives and nb_samples positives
e.g. For nb_samples = 8:
Take 6 pos if positive
Take 10 neg if negative
Take 2 pos, 8 neg if mixed
This guarantees 64 rois are taken, and, because positive and negative tiles
are balanced, the roi classes are roughly balanced also.
"""
df_neg = df_bboxes[df_bboxes['class'] == 0]
df_pos = df_bboxes[df_bboxes['class'] == 1]
if df_pos.shape[0] == 0: # purely negative tile
df_sample = df_neg.sample(3 * (nb_samples // 4), replace=True)
elif df_neg.shape[0] == 0: # purely positive tile
df_sample = df_pos.sample(5 * (nb_samples // 4), replace=True)
else: # mixed tile
df_sample = pd.concat([df_pos.sample(nb_samples, replace=True),
df_neg.sample(nb_samples // 4, replace=True)])
return df_sample[['xmin', 'ymin', 'xmax', 'ymax', 'class']].values
def data_augmentation(x_batch, bbox_batch, img_dim):
if np.random.randn() > 0.5:
x_batch = x_batch.flip(dims=(3,))
left = bbox_batch[:, 1].copy()
right = bbox_batch[:, 3].copy()
bbox_batch[:, 1] = img_dim - right
bbox_batch[:, 3] = img_dim - left
if np.random.randn() > 0.5:
x_batch = x_batch.flip(dims=(2,))
top = bbox_batch[:, 2].copy()
bottom = bbox_batch[:, 4].copy()
bbox_batch[:, 2] = img_dim - bottom
bbox_batch[:, 4] = img_dim - top
return x_batch, bbox_batch
def data_generator(imgs, bboxes, nb_batch, nb_rois=64):
idx_non_empty = [idx for idx, df in enumerate(bboxes) if not df.empty]
idx_pos = [idx for idx in idx_non_empty if 1 in bboxes[idx]['class'].values]
idx_neg = [idx for idx in idx_non_empty if not 1 in bboxes[idx]['class'].values]
img_dim = imgs.shape[2]
while True:
idx_pos_batch = np.random.choice(idx_pos, size=nb_batch // 2)
idx_neg_batch = np.random.choice(idx_neg, size=nb_batch // 2)
x_batch = np.vstack([imgs[idx_pos_batch], imgs[idx_neg_batch]])
x_batch = torch.Tensor(np.moveaxis(x_batch, 3, 1) / 127.5 - 1)
nb_samples = nb_rois // nb_batch
df_bbox_batch = [bboxes[i] for i in list(idx_pos_batch) + list(idx_neg_batch)]
bbox_data = [sample_bboxes(df_bbox, nb_samples) for df_bbox in df_bbox_batch]
bbox_batch = np.vstack([np.hstack([i * np.ones((bboxes.shape[0], 1)), bboxes])
for i, bboxes in enumerate(bbox_data)])
x_batch, bbox_batch = data_augmentation(x_batch, bbox_batch, img_dim)
condition_batch = []
for i in range(nb_batch):
rois = bbox_batch[bbox_batch[:, 0]==i]
condition, noise = draw_conditions(rois[:, 1:], img_dim)
condition_batch.append(condition)
condition_batch = torch.cat(condition_batch, axis=0)
yield torch.Tensor(x_batch), condition_batch, torch.Tensor(bbox_batch)
"""
N.B. There is generally on a few hundred samples in the val/test data.
Hence, drawing 25 samples leads to duplicates with high probability
(see the birthday paradox). Furthermore, the duplicates will always
be consecutive in the batch, as the indices are sorted in the data_generator
function.
"""
def sample_images(output_dir, batches_done, G_AB, G_BA, hes_images_te, hes_bboxes_te, ihc_images_te, ihc_bboxes_te, device):
"""Saves a generated sample from the test set"""
# imgs = next(iter(val_dataloader))
gen_A = data_generator(hes_images_te, hes_bboxes_te, nb_batch=25)
gen_B = data_generator(ihc_images_te, ihc_bboxes_te, nb_batch=25)
real_A = next(gen_A)[0].to(device)
real_B = next(gen_B)[0].to(device)
G_AB.eval()
G_BA.eval()
fake_B = G_AB(real_A)
fake_A = G_BA(real_B)
# Arrange images along x-axis
real_A = make_grid(real_A, nrow=5, normalize=True)
real_B = make_grid(real_B, nrow=5, normalize=True)
fake_A = make_grid(fake_A, nrow=5, normalize=True)
fake_B = make_grid(fake_B, nrow=5, normalize=True)
# Arange images along y-axis
image_grid = torch.cat((real_A, fake_B, real_B, fake_A), 1)
save_image(image_grid, "images/%s/%s.png" % (output_dir, batches_done), normalize=False)
class ReplayBuffer:
def __init__(self, max_size=50):
assert max_size > 0, "Empty buffer or trying to create a black hole. Be careful."
self.max_size = max_size
self.data = []
def push_and_pop(self, data):
to_return = []
for element in data.data:
element = torch.unsqueeze(element, 0)
if len(self.data) < self.max_size:
self.data.append(element)
to_return.append(element)
else:
if random.uniform(0, 1) > 0.5:
i = random.randint(0, self.max_size - 1)
to_return.append(self.data[i].clone())
self.data[i] = element
else:
to_return.append(element)
return Variable(torch.cat(to_return))
class LambdaLR:
def __init__(self, n_epochs, offset, decay_start_epoch):
assert (n_epochs - decay_start_epoch) > 0, "Decay must start before the training session ends!"
self.n_epochs = n_epochs
self.offset = offset
self.decay_start_epoch = decay_start_epoch
def step(self, epoch):
return 1.0 - max(0, epoch + self.offset - self.decay_start_epoch) / (self.n_epochs - self.decay_start_epoch)
| 7,345
| 32.543379
| 124
|
py
|
miccai2022-roigan
|
miccai2022-roigan-main/src/models.py
|
import torch
import torch.nn as nn
from torchvision.ops import RoIAlign
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
if hasattr(m, 'bias') and m.bias is not None:
torch.nn.init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
class DiscriminatorROI(nn.Module):
def __init__(self, base_filters=64):
super(DiscriminatorROI, self).__init__()
def conv_block(in_filters, out_filters, normalise=True):
layers = [
nn.Conv2d(in_filters, out_filters, kernel_size=4, stride=2, padding=1, bias=False)]
if normalise:
layers.append(nn.BatchNorm2d(out_filters, momentum=0.8))
layers.append(nn.LeakyReLU(0.2, inplace=True))
return layers
self.conv_layers = nn.Sequential(
*conv_block(5, base_filters, normalise=False),
*conv_block(1 * base_filters, 2 * base_filters),
*conv_block(2 * base_filters, 4 * base_filters),
*conv_block(4 * base_filters, 8 * base_filters))
self.roi_pool = RoIAlign(output_size=(3, 3), spatial_scale=0.0625, sampling_ratio=-1)
self.classifier = nn.Sequential(
nn.Conv2d(8 * base_filters, 1, kernel_size=3, padding=0, bias=False))
self.apply(weights_init_normal)
def forward(self, inputs, condition, bboxes):
bbox_batch = bboxes[:, :-1]
x = torch.cat([inputs, condition], axis=1)
x = self.conv_layers(x)
pool = self.roi_pool(x, bbox_batch)
outputs = self.classifier(pool)
return outputs.squeeze()
class ResidualBlock(nn.Module):
def __init__(self, in_features):
super(ResidualBlock, self).__init__()
self.block = nn.Sequential(
nn.ReflectionPad2d(1),
nn.Conv2d(in_features, in_features, 3),
nn.InstanceNorm2d(in_features),
nn.ReLU(inplace=True),
nn.ReflectionPad2d(1),
nn.Conv2d(in_features, in_features, 3),
nn.InstanceNorm2d(in_features),
)
def forward(self, x):
return x + self.block(x)
class GeneratorResNet(nn.Module):
def __init__(self, input_shape, num_residual_blocks):
super(GeneratorResNet, self).__init__()
channels = input_shape[0]
# Initial convolution block
out_features = 64
model = [
nn.ReflectionPad2d(channels),
nn.Conv2d(channels, out_features, 7),
nn.InstanceNorm2d(out_features),
nn.ReLU(inplace=True),
]
in_features = out_features
# Downsampling
for _ in range(2):
out_features *= 2
model += [
nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.InstanceNorm2d(out_features),
nn.ReLU(inplace=True),
]
in_features = out_features
# Residual blocks
for _ in range(num_residual_blocks):
model += [ResidualBlock(out_features)]
# Upsampling
for _ in range(2):
out_features //= 2
model += [
nn.Upsample(scale_factor=2),
nn.Conv2d(in_features, out_features, 3, stride=1, padding=1),
nn.InstanceNorm2d(out_features),
nn.ReLU(inplace=True),
]
in_features = out_features
# Output layer
model += [nn.ReflectionPad2d(channels), nn.Conv2d(out_features, channels, 7), nn.Tanh()]
self.model = nn.Sequential(*model)
self.apply(weights_init_normal)
def forward(self, x):
return self.model(x)
class Discriminator(nn.Module):
def __init__(self, input_shape):
super(Discriminator, self).__init__()
channels, height, width = input_shape
# Calculate output shape of image discriminator (PatchGAN)
self.output_shape = (1, height // 2 ** 5, width // 2 ** 5)
def discriminator_block(in_filters, out_filters, normalize=True):
"""Returns downsampling layers of each discriminator block"""
layers = [nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1)]
if normalize:
layers.append(nn.InstanceNorm2d(out_filters))
layers.append(nn.LeakyReLU(0.2, inplace=True))
return layers
self.model = nn.Sequential(
*discriminator_block(channels, 64, normalize=False),
*discriminator_block(64, 128),
*discriminator_block(128, 256),
*discriminator_block(256, 512),
*discriminator_block(512, 512),
nn.ZeroPad2d((1, 0, 1, 0)),
nn.Conv2d(512, 1, 4, padding=1)
)
self.apply(weights_init_normal)
def forward(self, img):
return self.model(img)
| 5,076
| 31.132911
| 99
|
py
|
miccai2022-roigan
|
miccai2022-roigan-main/data/library_utils.py
|
import os
import sys
import h5py
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from cv2 import addWeighted
from PIL import Image, ImageDraw
from skimage.color import rgb2gray
from skimage.filters import gaussian, threshold_otsu
from skimage.morphology import closing, dilation, disk
from skimage.feature import blob_dog, blob_log
from skimage.color import rgb2hed, hed2rgb
from scipy.spatial import ConvexHull
from sklearn.metrics.pairwise import pairwise_distances
from functools import reduce
def write_flush(*text_args, stream=sys.stdout):
stream.write(', '.join(map(str, text_args)) + '\n')
stream.flush()
return
def get_cells(input_img, gauss_sigma, min_sigma, max_sigma, threshold):
filtered_img = gaussian(input_img, sigma=gauss_sigma)
inv_img = 1 - filtered_img
blobs = blob_log(inv_img, min_sigma=min_sigma, max_sigma=max_sigma,
threshold=threshold, exclude_border=True)
blobs[:, 2] = blobs[:, 2] * np.sqrt(2)
return blobs
def find_clusters(points):
dists = pairwise_distances(points, points)
H, W = dists.shape
clusters = []
def find_cluster(clusters, point):
for cluster_idx, cluster in enumerate(clusters):
if point in cluster:
return cluster_idx
return None
for i in range(H):
point_i = tuple(points[i])[::-1] # PIL takes (x, y) coordinates
for j in range(W):
if j == i:
continue
if dists[i, j] <= 35:
point_j = tuple(points[j])[::-1] # PIL takes (x, y) coordinates
cluster_i = find_cluster(clusters, point_i)
cluster_j = find_cluster(clusters, point_j)
if cluster_i is None and cluster_j is None: # new cluster
clusters.append({point_i, point_j})
elif cluster_i is None: # i joins j
clusters[cluster_j].add(point_i)
elif cluster_j is None: # j joins i
clusters[cluster_i].add(point_j)
else: # merge
if cluster_i == cluster_j:
continue
merged_cluster = clusters[cluster_i].union(clusters[cluster_j])
cluster_i = clusters[cluster_i]
cluster_j = clusters[cluster_j]
clusters.remove(cluster_i)
clusters.remove(cluster_j)
clusters.append(merged_cluster)
return clusters
def get_cluster_mask(clusters):
mask = Image.new('1', (256, 256), 0)
for cluster in clusters:
if len(cluster) < 3:
continue
cluster_list = list(cluster)
hull = ConvexHull(cluster_list)
ordered_points = [tuple(cluster_list[idx]) for idx in hull.vertices]
ImageDraw.Draw(mask).polygon(ordered_points, outline=1, fill=1)
return np.array(mask)
def decompose_ihc(input_img):
# Separate the stains from the IHC image
ihc_hed = rgb2hed(input_img)
# Create an RGB image for each of the stains
null = np.zeros_like(ihc_hed[:, :, 0])
ihc_h = hed2rgb(np.stack((ihc_hed[:, :, 0], null, null), axis=-1))
ihc_e = hed2rgb(np.stack((null, ihc_hed[:, :, 1], null), axis=-1))
ihc_d = hed2rgb(np.stack((null, null, ihc_hed[:, :, 2]), axis=-1))
return ihc_h, ihc_e, ihc_d
def get_crops_image(img, cells, crop_size=48):
crops = []
bboxes = []
for cell in cells:
y, x = map(int, cell)
top, bottom = y - crop_size // 2, y + crop_size // 2
left, right = x - crop_size // 2, x + crop_size // 2
crop = img[top:bottom, left:right]
if crop.shape[0] == crop_size and crop.shape[1] == crop_size:
crops.append(crop)
bboxes.append([left, top, right, bottom])
return crops, bboxes
| 3,897
| 28.755725
| 83
|
py
|
hgp
|
hgp-main/setup.py
|
from setuptools import setup, find_packages
setup(
name="hgp",
version="0.0.1",
author="Magnus Ross",
author_email="magnus.ross@postgrad.manchester.ac.uk",
url="https://github.com/magnusross/hgp",
packages=find_packages(),
python_requires=">=3.9",
)
| 279
| 22.333333
| 57
|
py
|
hgp
|
hgp-main/hgp/__init__.py
|
import hgp.models
import hgp.misc
import hgp.datasets
import hgp.core
| 70
| 13.2
| 19
|
py
|
hgp
|
hgp-main/hgp/core/kernels.py
|
# MIT License
# Copyright (c) 2021 Pashupati Hegde.
# Copyright (c) 2023 Magnus Ross.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import functorch
import numpy as np
import torch
from torch import nn
from torch.distributions import Normal
from torch.nn import init
from hgp.misc.constraint_utils import invsoftplus, softplus
from ..misc.ham_utils import build_J
prior_weights = Normal(0.0, 1.0)
def sample_normal(shape, seed=None):
rng = np.random.RandomState() if seed is None else np.random.RandomState(seed)
return torch.tensor(rng.normal(size=shape).astype(np.float32))
class RBF(torch.nn.Module):
"""
Implements squared exponential kernel with kernel computation and weights and frquency sampling for Fourier features
"""
def __init__(self, D_in, D_out=None, dimwise=False, init_ls=2.0, init_var=0.5):
"""
@param D_in: Number of input dimensions
@param D_out: Number of output dimensions
@param dimwise: If True, different kernel parameters are given to output dimensions
"""
super(RBF, self).__init__()
self.D_in = D_in
self.D_out = D_in if D_out is None else D_out
self.dimwise = dimwise
lengthscales_shape = (self.D_out, self.D_in) if dimwise else (self.D_in,)
variance_shape = (self.D_out,) if dimwise else (1,)
self.unconstrained_lengthscales = nn.Parameter(
torch.ones(size=lengthscales_shape), requires_grad=True
)
self.unconstrained_variance = nn.Parameter(
torch.ones(size=variance_shape), requires_grad=True
)
self._initialize(init_ls, init_var)
def _initialize(self, init_ls, init_var):
init.constant_(
self.unconstrained_lengthscales, invsoftplus(torch.tensor(init_ls)).item()
)
init.constant_(
self.unconstrained_variance, invsoftplus(torch.tensor(init_var)).item()
)
@property
def lengthscales(self):
return softplus(self.unconstrained_lengthscales)
@lengthscales.setter
def lengthscales(self, value):
self.unconstrained_lengthscales = nn.Parameter(
invsoftplus(value), requires_grad=True
)
@property
def variance(self):
return softplus(self.unconstrained_variance)
@variance.setter
def variance(self, value):
self.unconstrained_variance = nn.Parameter(
invsoftplus(value), requires_grad=True
)
def square_dist_dimwise(self, X, X2=None):
"""
Compues squared euclidean distance (scaled) for dimwise kernel setting
@param X: Input 1 (N,D_in)
@param X2: Input 2 (M,D_in)
@return: Tensor (D_out, N,M)
"""
X = X.unsqueeze(0) / self.lengthscales.unsqueeze(1) # (D_out,N,D_in)
Xs = torch.sum(torch.pow(X, 2), dim=2) # (D_out,N)
if X2 is None:
return (
-2 * torch.einsum("dnk, dmk -> dnm", X, X)
+ Xs.unsqueeze(-1)
+ Xs.unsqueeze(1)
) # (D_out,N,N)
else:
X2 = X2.unsqueeze(0) / self.lengthscales.unsqueeze(1) # (D_out,M,D_in)
X2s = torch.sum(torch.pow(X2, 2), dim=2) # (D_out,N)
return (
-2 * torch.einsum("dnk, dmk -> dnm", X, X2)
+ Xs.unsqueeze(-1)
+ X2s.unsqueeze(1)
) # (D_out,N,M)
def square_dist(self, X, X2=None):
"""
Compues squared euclidean distance (scaled) for non dimwise kernel setting
@param X: Input 1 (N,D_in)
@param X2: Input 2 (M,D_in)
@return: Tensor (N,M)
"""
X = X / self.lengthscales # (N,D_in)
Xs = torch.sum(torch.pow(X, 2), dim=1) # (N,)
if X2 is None:
return (
-2 * torch.matmul(X, X.t())
+ torch.reshape(Xs, (-1, 1))
+ torch.reshape(Xs, (1, -1))
) # (N,1)
else:
X2 = X2 / self.lengthscales # (M,D_in)
X2s = torch.sum(torch.pow(X2, 2), dim=1) # (M,)
return (
-2 * torch.matmul(X, X2.t())
+ torch.reshape(Xs, (-1, 1))
+ torch.reshape(X2s, (1, -1))
) # (N,M)
def K(self, X, X2=None):
"""
Computes K(\X, \X_2)
@param X: Input 1 (N,D_in)
@param X2: Input 2 (M,D_in)
@return: Tensor (D,N,M) if dimwise else (N,M)
"""
if self.dimwise:
sq_dist = torch.exp(-0.5 * self.square_dist_dimwise(X, X2)) # (D_out,N,M)
return self.variance[:, None, None] * sq_dist # (D_out,N,M)
else:
sq_dist = torch.exp(-0.5 * self.square_dist(X, X2) / 2) # (N,M)
return self.variance * sq_dist # (N,M)
def sample_freq(self, S, seed=None):
"""
Computes random samples from the spectral density for Sqaured exponential kernel
@param S: Number of features
@param seed: random seed
@return: Tensor a random sample from standard Normal (D_in, S, D_out) if dimwise else (D_in, S)
"""
omega_shape = (self.D_in, S, self.D_out) if self.dimwise else (self.D_in, S)
omega = sample_normal(omega_shape, seed) # (D_in, S, D_out) or (D_in, S)
lengthscales = (
self.lengthscales.T.unsqueeze(1)
if self.dimwise
else self.lengthscales.unsqueeze(1)
) # (D_in,1,D_out) or (D_in,1)
return omega / lengthscales # (D_in, S, D_out) or (D_in, S)
class DerivativeRBF(RBF):
"""
Implements squared exponential kernel with kernel computation and weights and frquency sampling for Fourier features.
Additionally implements gradients and hessians of kernels, only applies for single output.
"""
def __init__(self, D_in, init_ls=2.0, init_var=0.5):
assert D_in % 2 == 0, "D_in must be even."
super(DerivativeRBF, self).__init__(
D_in, D_out=1, dimwise=False, init_ls=init_ls, init_var=init_var
)
self.J = build_J(D_in)
def single_k(self, xi, yi):
"""Kernel at a single point"""
xi = xi / self.lengthscales
yi = yi / self.lengthscales
return self.variance[0] * torch.exp(-0.5 * torch.sum((xi - yi) ** 2 / 2))
def grad_single_k(self, xi, yi, use_J=False):
"""Grad of kernel at a single point"""
if use_J:
J = self.J
else:
J = torch.eye(self.D_in)
return J @ functorch.grad(self.single_k, argnums=0)(xi, yi)
def grad_K(self, X, X2=None, use_J=False):
"""Grad of kernel at a set of points"""
N1D = X.shape[0] * X.shape[1]
N2 = X.shape
if X2 is not None:
N2 = X2.shape[0]
if X2 is None:
X2 = X
return (
functorch.vmap(
lambda ti: functorch.vmap(
lambda tpi: self.grad_single_k(tpi, ti, use_J=use_J)
)(X)
)(X2)
.permute(2, 1, 0)
.reshape(N1D, N2)
)
def hess_single_k(self, x, xp, use_J=False):
"""Hessian of kernel at a single point"""
if use_J:
J = self.J
else:
J = torch.eye(self.D_in)
return -J @ functorch.hessian(self.single_k)(x, xp) @ J.T
def hess_K(self, X, X2=None, use_J=False):
"""Hessian of kernel at a set of points"""
if X2 is not None:
raise NotImplementedError
ND = X.shape[0] * X.shape[1]
return (
functorch.vmap(
lambda ti: functorch.vmap(
lambda tpi: self.hess_single_k(ti, tpi, use_J=use_J)
)(X)
)(X)
.permute(2, 0, 3, 1)
.reshape(ND, ND)
)
| 8,840
| 34.939024
| 121
|
py
|
hgp
|
hgp-main/hgp/core/flow.py
|
# MIT License
# Copyright (c) 2021 Pashupati Hegde.
# Copyright (c) 2023 Magnus Ross.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
import torch.nn as nn
from torchdiffeq import odeint as odeint_nonadjoint
from torchdiffeq import odeint_adjoint
class ODEfunc(nn.Module):
def __init__(self, diffeq):
"""
Defines the ODE function:
mainly calls layer.build_cache() method to fix the draws from random variables.
Modified from https://github.com/rtqichen/ffjord/
@param diffeq: Layer of GPODE/npODE/neuralODE
"""
super(ODEfunc, self).__init__()
self.diffeq = diffeq
self.register_buffer("_num_evals", torch.tensor(0.0))
def before_odeint(self, rebuild_cache):
self._num_evals.fill_(0)
if rebuild_cache:
self.diffeq.build_cache()
def num_evals(self):
return self._num_evals.item()
def forward(self, t, states):
self._num_evals += 1
dy = self.diffeq(t, states)
return dy
class Flow(nn.Module):
def __init__(
self, diffeq, solver="dopri5", atol=1e-6, rtol=1e-6, use_adjoint=False
):
"""
Defines an ODE flow:
mainly defines forward() method for forward numerical integration of an ODEfunc object
See https://github.com/rtqichen/torchdiffeq for more information on numerical ODE solvers.
@param diffeq: Layer of GPODE/npODE/neuralODE
@param solver: Solver to be used for ODE numerical integration
@param atol: Absolute tolerence for the solver
@param rtol: Relative tolerence for the solver
@param use_adjoint: Use adjoint method for computing loss gradients, calls odeint_adjoint fro torchdiffeq
"""
super(Flow, self).__init__()
self.odefunc = ODEfunc(diffeq)
self.solver = solver
self.atol = atol
self.rtol = rtol
self.use_adjoint = use_adjoint
def forward(self, x0, ts, return_energy=False):
"""
Numerical solution of an IVP, and optionally compute divergence term for density transformation computation
@param x0: Initial state (N,D) tensor x(t_0).
@param ts: Time sequence of length T, first value is considered as t_0
@param return_divergence: Bool flag deciding the divergence computation
@return: xs: (N,T,D) tensor
"""
odeint = odeint_adjoint if self.use_adjoint else odeint_nonadjoint
self.odefunc.before_odeint(rebuild_cache=True)
if return_energy:
xs = odeint(
self.odefunc, x0, ts, atol=self.atol, rtol=self.rtol, method=self.solver
)
energy = self.odefunc.diffeq.hamiltonian(
ts, xs.reshape(-1, xs.shape[-1])
).reshape(xs.shape[0], xs.shape[1], 1)
return xs.permute(1, 0, 2), energy.permute(1, 0, 2) # (N,T,D), # (N,T,1)
else:
xs = odeint(
self.odefunc, x0, ts, atol=self.atol, rtol=self.rtol, method=self.solver
)
return xs.permute(1, 0, 2) # (N,T,D)
def num_evals(self):
return self.odefunc.num_evals()
def kl(self):
"""
Calls KL() computation from the diffeq layer
"""
return self.odefunc.diffeq.kl().sum()
def log_prior(self):
"""
Calls log_prior() computation from the diffeq layer
"""
return self.odefunc.diffeq.log_prior().sum()
| 4,501
| 36.831933
| 115
|
py
|
hgp
|
hgp-main/hgp/core/constraint_likelihoods.py
|
import torch
import torch.nn as nn
from torch import distributions
from torch.nn import init
from hgp.misc.constraint_utils import invsoftplus, softplus
class Gaussian(nn.Module):
"""
Gaussian likelihood with an optionally trainable scale parameter
"""
def __init__(
self, d: int = 1, scale: float = 1.0, requires_grad: bool = True
) -> None:
super(Gaussian, self).__init__()
self.unconstrained_scale = torch.nn.Parameter(
torch.ones(d), requires_grad=requires_grad
)
self._initialize(scale)
def _initialize(self, x: float) -> None:
init.constant_(self.unconstrained_scale, invsoftplus(torch.tensor(x)).item())
@property
def scale(self):
return softplus(self.unconstrained_scale)
def distribution(self, loc: torch.Tensor) -> torch.distributions.Distribution:
return distributions.Normal(loc=loc, scale=self.scale)
@property
def variance(self):
return self.distribution(loc=torch.zeros_like(self.scale)).variance
def log_prob(self, f: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
log_prob = self.distribution(loc=f).log_prob(y)
assert log_prob.shape == f.shape
return log_prob
class Laplace(nn.Module):
"""
Laplace likelihood with an optionally trainable scale parameter
"""
def __init__(
self, d: int = 1, scale: float = 1.0, requires_grad: bool = True
) -> None:
super(Laplace, self).__init__()
self.unconstrained_scale = torch.nn.Parameter(
torch.ones(d), requires_grad=requires_grad
)
self._initialize(scale)
def _initialize(self, x: float) -> None:
init.constant_(self.unconstrained_scale, invsoftplus(torch.tensor(x)).item())
@property
def scale(self):
return softplus(self.unconstrained_scale)
def distribution(self, loc):
return distributions.Laplace(loc=loc, scale=self.scale)
@property
def variance(self):
return self.distribution(loc=torch.zeros_like(self.scale)).variance
def log_prob(self, f: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
log_prob = self.distribution(loc=f).log_prob(y)
assert log_prob.shape == f.shape
return log_prob
| 2,282
| 29.44
| 85
|
py
|
hgp
|
hgp-main/hgp/core/dsvgp.py
|
# MIT License
# Copyright (c) 2021 Pashupati Hegde.
# Copyright (c) 2023 Magnus Ross.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import functorch
import numpy as np
import torch
from hgp.core.kernels import RBF, DerivativeRBF
from hgp.misc import transforms
from hgp.misc.ham_utils import build_J
from hgp.misc.param import Param
from hgp.misc.settings import settings
torch.use_deterministic_algorithms(False)
jitter = 1e-5
def sample_normal(shape, seed=None):
# sample from standard Normal with a given shape
rng = np.random.RandomState() if seed is None else np.random.RandomState(seed)
return torch.tensor(rng.normal(size=shape).astype(np.float32)).to(settings.device)
def sample_uniform(shape, seed=None):
# random Uniform sample of a given shape
rng = np.random.RandomState() if seed is None else np.random.RandomState(seed)
return torch.tensor(rng.uniform(low=0.0, high=1.0, size=shape).astype(np.float32)).to(settings.device)
def compute_divergence(dx, y):
# stolen from FFJORD : https://github.com/rtqichen/ffjord/blob/master/lib/layers/odefunc.py
sum_diag = 0.0
for i in range(y.shape[1]):
sum_diag += (
torch.autograd.grad(dx[:, i].sum(), y, create_graph=True)[0]
.contiguous()[:, i]
.contiguous()
)
return sum_diag.contiguous()
class DSVGP_Layer(torch.nn.Module):
"""
A layer class implementing decoupled sampling of SVGP posterior
@InProceedings{pmlr-v119-wilson20a,
title = {Efficiently sampling functions from {G}aussian process posteriors},
author = {Wilson, James and Borovitskiy, Viacheslav and Terenin, Alexander and Mostowsky, Peter and Deisenroth, Marc},
booktitle = {Proceedings of the 37th International Conference on Machine Learning},
pages = {10292--10302},
year = {2020},
editor = {Hal Daumé III and Aarti Singh},
volume = {119},
series = {Proceedings of Machine Learning Research},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v119/wilson20a/wilson20a.pdf}
}
"""
def __init__(self, D_in, D_out, M, S, q_diag=False, dimwise=True):
"""
@param D_in: Number of input dimensions
@param D_out: Number of output dimensions
@param M: Number of inducing points
@param S: Number of features to consider for Fourier feature maps
@param q_diag: Diagonal approximation for inducing posteior
@param dimwise: If True, different kernel parameters are given to output dimensions
"""
super(DSVGP_Layer, self).__init__()
self.kern = RBF(D_in, D_out)
self.q_diag = q_diag
self.dimwise = dimwise
self.D_out = D_out
self.D_in = D_in
self.M = M
self.S = S
self.inducing_loc = Param(
np.random.normal(size=(M, D_in)), name="Inducing locations"
) # (M,D_in)
self.Um = Param(
# np.random.normal(size=(M, D_out)) * 1e-1,
np.zeros((M, D_out)),
name="Inducing distirbution (mean)",
) # (M,D_out)
if self.q_diag:
self.Us_sqrt = Param(
np.ones(shape=(M, D_out)) * 1e-1, # (M,D_out)
transform=transforms.SoftPlus(),
name="Inducing distirbution (scale)",
)
else:
self.Us_sqrt = Param(
np.stack([np.eye(M)] * D_out) * 1e-1, # (D_out,M,M)
transform=transforms.LowerTriangular(M, D_out),
name="Inducing distirbution (scale)",
)
def sample_inducing(self):
"""
Generate a sample from the inducing posterior q(u) ~ N(m, S)
@return: inducing sample (M,D) tensor
"""
epsilon = sample_normal(shape=(self.M, self.D_out), seed=None) # (M, D_out)
if self.q_diag:
ZS = self.Us_sqrt() * epsilon # (M, D_out)
else:
ZS = torch.einsum("dnm, md->nd", self.Us_sqrt(), epsilon) # (M, D_out)
u_sample = ZS + self.Um() # (M, D_out)
return u_sample # (M, D_out)
def build_cache(self):
"""
Builds a cache of computations that uniquely define a sample from posteiror process
1. Generate and fix parameters of Fourier features
2. Generate and fix induing posterior sample
3. Intermediate computations based on the inducing sample for pathwise update
"""
# generate parameters required for the Fourier feature maps
self.rff_weights = sample_normal((self.S, self.D_out)) # (S,D_out)
self.rff_omega = self.kern.sample_freq(self.S) # (D_in,S) or (D_in,S,D_out)
phase_shape = (1, self.S, self.D_out) if self.dimwise else (1, self.S)
self.rff_phase = sample_uniform(phase_shape) * 2 * np.pi # (S,D_out)
# generate sample from the inducing posterior
inducing_val = self.sample_inducing() # (M,D)
# compute th term nu = k(Z,Z)^{-1}(u-f(Z)) in whitened form of inducing variables
# equation (13) from http://proceedings.mlr.press/v119/wilson20a/wilson20a.pdf
Ku = self.kern.K(self.inducing_loc()) # (M,M) or (D,M,M)
Lu = torch.linalg.cholesky(Ku + torch.eye(self.M) * jitter) # (M,M) or (D,M,M)
u_prior = self.rff_forward(self.inducing_loc()) # (M,D)
if not self.dimwise:
nu = torch.linalg.solve_triangular(Lu, u_prior, upper=False) # (M,D)
nu = torch.linalg.solve_triangular(
Lu.T, (inducing_val - nu), upper=True
) # (M,D)
else:
nu = torch.linalg.solve_triangular(
Lu, u_prior.T.unsqueeze(2), upper=False
) # (D,M,1)
nu = torch.linalg.solve_triangular(
Lu.permute(0, 2, 1), (inducing_val.T.unsqueeze(2) - nu), upper=True
) # (D,M,1)
self.nu = nu # (D,M)
def rff_forward(self, x):
"""
Calculates samples from the GP prior with random Fourier Features
@param x: input tensor (N,D)
@return: function values (N,D_out)
"""
# compute feature map
xo = torch.einsum(
"nd,dfk->nfk" if self.dimwise else "nd,df->nf", x, self.rff_omega
) # (N,S) or (N,S,D_in)
phi_ = torch.cos(xo + self.rff_phase) # (N,S) or (N,S,D_in)
phi = phi_ * torch.sqrt(self.kern.variance / self.S) # (N,S) or (N,S,D_in)
# compute function values
f = torch.einsum(
"nfk,fk->nk" if self.dimwise else "nf,fd->nd", phi, self.rff_weights
) # (N,D_out)
return f # (N,D_out)
def build_conditional(self, x, full_cov=False):
"""
Calculates conditional distribution q(f(x)) = N(m(x), Sigma(x))
where m(x) = k(x,Z)k(Z,Z)^{-1}u, k(x,x)
Sigma(x) = k(x,Z)k(Z,Z)^{-1}(S-K(Z,Z))k(Z,Z)^{-1}k(Z,x))
@param x: input tensor (N,D)
@param full_cov: if True, returns fulll Sigma(x) else returns only diagonal
@return: m(x), Sigma(x)
"""
Ku = self.kern.K(self.inducing_loc()) # (M,M) or (D,M,M)
Lu = torch.linalg.cholesky(Ku + torch.eye(self.M) * jitter) # (M,M) or (D,M,M)
Kuf = self.kern.K(self.inducing_loc(), x) # (M,N) or (D,M,N)
A = torch.linalg.solve_triangular(
Lu, Kuf, upper=False
) # (M,M)@(M,N) --> (M,N) or (D,M,M)@(D,M,N) --> (D,M,N)
Us_sqrt = (
self.Us_sqrt().T[:, :, None] if self.q_diag else self.Us_sqrt()
) # (D,M,1) or (D,M,M)
SK = (Us_sqrt @ Us_sqrt.permute(0, 2, 1)) - torch.eye(Ku.shape[1]).unsqueeze(
0
) # (D,M,M)
B = torch.einsum(
"dme, den->dmn" if self.dimwise else "dmi, in->dmn", SK, A
) # (D,M,N)
if full_cov:
delta_cov = torch.einsum(
"dme, dmn->den" if self.dimwise else "me, dmn->den", A, B
) # (D,M,N)
Kff = (
self.kern.K(x) if self.dimwise else self.kern.K(x).unsqueeze(0)
) # (1,N,N) or (D,N,N)
else:
delta_cov = ((A if self.dimwise else A.unsqueeze(0)) * B).sum(1) # (D,N)
if self.dimwise:
Kff = torch.diagonal(self.kern.K(x), dim1=1, dim2=2) # (N,)
else:
Kff = torch.diagonal(self.kern.K(x), dim1=0, dim2=1) # (D,N)
var = Kff + delta_cov
mean = torch.einsum(
"dmn, md->nd" if self.dimwise else "mn, md->nd", A, self.Um()
)
return mean, var.T # (N,D) , (N,D) or (N,N,D)
def forward(self, t, x):
"""
Compute sample from the SVGP posterior using decoupeld sampling approach
Involves two steps:
1. Generate sample from the prior :: rff_forward
2. Compute pathwise updates using samples from inducing posterior :: build_cache
@param t: time value, usually None as we define time-invariant ODEs
@param x: input tensor in (N,D)
@return: f(x) where f is a sample from GP posterior
"""
# generate a prior sample using rff
f_prior = self.rff_forward(x) # (N,D))
# compute pathwise updates
if not self.dimwise:
Kuf = self.kern.K(self.inducing_loc(), x) # (M,N)
f_update = torch.einsum("md, mn -> nd", self.nu, Kuf) # (N,D)
else:
Kuf = self.kern.K(self.inducing_loc(), x) # (D,M,N)
f_update = torch.einsum("dm, dmn -> nd", self.nu.squeeze(2), Kuf) # (N,D)
# sample from the GP posterior
dx = f_prior + f_update # (N,D)
return dx # (N,D)
def kl(self):
"""
Computes KL divergence for inducing variables in whitened form
Calculated as KL between multivariate Gaussians q(u) ~ N(m,S) and p(U) ~ N(0,I)
@return: KL divergence value tensor
"""
alpha = self.Um() # (M,D)
if self.q_diag:
Lq = Lq_diag = self.Us_sqrt() # (M,D)
else:
Lq = torch.tril(self.Us_sqrt()) # (D,M,M)
Lq_diag = torch.diagonal(Lq, dim1=1, dim2=2).t() # (M,D)
# compute ahalanobis term
mahalanobis = torch.pow(alpha, 2).sum(dim=0, keepdim=True) # (1,D)
# log-determinant of the covariance of q(u)
logdet_qcov = torch.log(torch.pow(Lq_diag, 2)).sum(dim=0, keepdim=True) # (1,D)
# trace term
if self.q_diag:
trace = torch.pow(Lq, 2).sum(dim=0, keepdim=True) # (M,D) --> (1,D)
else:
trace = torch.pow(Lq, 2).sum(dim=(1, 2)).unsqueeze(0) # (D,M,M) --> (1,D)
logdet_pcov = 0.0
constant = -torch.tensor(self.M)
twoKL = logdet_pcov - logdet_qcov + mahalanobis + trace + constant
kl = 0.5 * twoKL.sum()
return kl
class Hamiltonian_DSVGP_Layer(DSVGP_Layer):
def __init__(self, D_in, M, S, q_diag=False):
"""
@param D_in: Number of input dimensions
@param M: Number of inducing points
@param S: Number of features to consider for Fourier feature maps
@param q_diag: Diagonal approximation for inducing posteior
"""
super(Hamiltonian_DSVGP_Layer, self).__init__(
D_in, 1, M, S, q_diag=q_diag, dimwise=False
)
self.kern = DerivativeRBF(D_in)
self.J = build_J(D_in)
def hamiltonian(self, t, x):
H = super(Hamiltonian_DSVGP_Layer, self).forward(t, x)
return H[:, 0]
def forward(self, t, x):
dHdx = functorch.grad(lambda xi: self.hamiltonian(t, xi).sum())(x)
return dHdx @ self.J.T
| 12,813
| 39.169279
| 138
|
py
|
hgp
|
hgp-main/hgp/core/nn.py
|
import functorch
import torch
from torch import nn
from hgp.misc.ham_utils import build_J
def Linear(chin, chout, zero_bias=False, orthogonal_init=False):
linear = nn.Linear(chin, chout)
if zero_bias:
torch.nn.init.zeros_(linear.bias)
if orthogonal_init:
torch.nn.init.orthogonal_(linear.weight, gain=0.5)
return linear
def FCtanh(chin, chout, zero_bias=False, orthogonal_init=False):
return nn.Sequential(Linear(chin, chout, zero_bias, orthogonal_init), nn.Tanh())
class NNModel(nn.Module):
def __init__(self, D_in, D_out, N_nodes, N_layers):
super(NNModel, self).__init__()
self.D_out = D_out
self.D_in = D_in
chs = [self.D_in] + N_layers * [N_nodes]
self.net = nn.Sequential(
*[
FCtanh(chs[i], chs[i + 1], zero_bias=False, orthogonal_init=False)
for i in range(N_layers)
],
Linear(chs[-1], D_out, zero_bias=False, orthogonal_init=False)
)
def forward(self, t, x):
return self.net(x)
def build_cache(self):
pass
class HamiltonianNNModel(NNModel):
"""
Implements a NN model with Hamiltonian restriction.
"""
def __init__(self, D_in, N_nodes, N_layers):
super(HamiltonianNNModel, self).__init__(D_in, 1, N_nodes, N_layers)
self.J = build_J(D_in)
def hamiltonian(self, t, x):
H = super(HamiltonianNNModel, self).forward(t, x)
return H[:, 0]
def forward(self, t, x):
dHdx = functorch.grad(lambda xi: self.hamiltonian(t, xi).sum())(x)
f = dHdx @ self.J.T
return f
| 1,637
| 25.852459
| 84
|
py
|
hgp
|
hgp-main/hgp/core/states.py
|
# MIT License
# Copyright (c) 2021 Pashupati Hegde.
# Copyright (c) 2023 Magnus Ross.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import torch
from torch import nn
from torch.distributions import MultivariateNormal
from hgp.misc import settings, transforms
from hgp.misc.param import Param
initial_state_scale = 1e-2
jitter = 1e-5
def sample_normal(shape, seed=None):
rng = np.random.RandomState() if seed is None else np.random.RandomState(seed)
return torch.tensor(rng.normal(size=shape).astype(np.float32))
class StateInitialDistribution(nn.Module):
"""
Base class defining Initial state posterior q(x_0)
"""
def __init__(self, dim_n, dim_d):
super(StateInitialDistribution, self).__init__()
self.dim_n = dim_n
self.dim_d = dim_d
def _initialize(self, x0, xs):
raise NotImplementedError
def sample(self, num_samples, seed=None):
raise NotImplementedError
def log_prob(self, x):
raise NotImplementedError
def kl(self):
raise NotImplementedError
class DeltaInitialDistrubution(StateInitialDistribution):
def __init__(self, dim_n, dim_d):
super(DeltaInitialDistrubution, self).__init__(dim_n, dim_d)
self.param_mean = Param(
np.random.normal(size=(dim_n, dim_d)) * initial_state_scale,
name="Initiali state distirbution (mean)",
)
def _initialize(self, x, xs):
self.param_mean().data = x
def mean(self):
return self.param_mean()
def sample(self, num_samples=1, seed=None):
return self.param_mean().unsqueeze(0).repeat(num_samples, 1, 1)
class StateInitialVariationalGaussian(StateInitialDistribution):
"""
Full rank multivariate Gaussian approximation for the Initial state posterior q(x_0) = N(m, S)
where x is (N,D), m is (N,D), S is (N,D,D)
N being the number of sequences, D being the number of state dimensions.
"""
def __init__(self, dim_n, dim_d):
"""
@param dim_n: N number of sequences
@param dim_d: D state dimensionality
"""
super(StateInitialVariationalGaussian, self).__init__(dim_n, dim_d)
self.param_mean = Param(
np.random.normal(size=(dim_n, dim_d)) * initial_state_scale,
name="Initiali state distirbution (mean)",
)
self.param_lchol = Param(
np.stack([np.eye(dim_d)] * dim_n) * initial_state_scale, # NxDxD
transform=transforms.LowerTriangular(dim_d, dim_n),
name="Initial state distirbution (scale)",
)
def _initialize(self, x, xs):
self.param_mean().data = x
def mean(self):
return self.param_mean()
def lchol(self):
return self.param_lchol()
def distirbution(self):
x0_mean = self.mean()
x0_lchol = self.lchol()
x0_qcov = torch.einsum("nij, nkj -> nik", x0_lchol, x0_lchol)
dist = MultivariateNormal(
loc=x0_mean,
covariance_matrix=x0_qcov
+ torch.eye(x0_qcov.shape[-1]).unsqueeze(0) * jitter,
)
return dist
def sample_numpy(self, num_samples=1, seed=None):
x0_mean = self.mean().unsqueeze(0)
x0_lchol = self.lchol()
epsilon = sample_normal(
shape=(num_samples, self.dim_n, self.dim_d), seed=seed
) # (S,N,D)
zs = torch.einsum("nij, snj -> sni", x0_lchol, epsilon)
return zs + x0_mean # (S,N,D)
def sample(self, num_samples=1, seed=None):
s = self.distirbution().rsample((num_samples,))
return s
def log_prob(self, x):
return self.distirbution().log_prob(x)
def kl(self):
alpha = self.mean() # NxD
Lq = torch.tril(self.lchol()) # force lower triangle # NxDxD
Lq_diag = torch.diagonal(Lq, dim1=1, dim2=2) # NxD
# Mahalanobis term: μqᵀ Σp⁻¹ μq
mahalanobis = torch.pow(alpha, 2).sum(dim=1, keepdim=True) # Nx1
# Log-determinant of the covariance of q(x):
logdet_qcov = torch.log(torch.pow(Lq_diag, 2)).sum(dim=1, keepdim=True) # Nx1
# Trace term: tr(Σp⁻¹ Σq)
trace = torch.pow(Lq, 2).sum(dim=(1, 2)).unsqueeze(1) # NxDxD --> Nx1
logdet_pcov = 0.0
constant = -torch.tensor(self.dim_d)
twoKL = logdet_pcov - logdet_qcov + mahalanobis + trace + constant
kl = 0.5 * twoKL.sum()
return kl # Nx1
class StateSequenceVariationalDistribution(nn.Module):
"""
Base class defining state sequence posterior
"""
def __init__(self, dim_n, dim_t, dim_d):
super(StateSequenceVariationalDistribution, self).__init__()
self.dim_n = dim_n
self.dim_t = dim_t
self.dim_d = dim_d
def _add_intial_state(self):
raise NotImplementedError
def _initialize(self, x0, xs):
raise NotImplementedError
def sample(self, num_samples, **kwargs):
raise NotImplementedError
def log_prob(self, x):
raise NotImplementedError
def entropy(self):
raise NotImplementedError
class DeltaStateSequenceDistribution(StateSequenceVariationalDistribution):
def __init__(self, dim_n, dim_t, dim_d):
"""
@param dim_n: N number of sequences
@param dim_n: T sequence length
@param dim_d: D state dimensionality
"""
super(DeltaStateSequenceDistribution, self).__init__(dim_n, dim_t, dim_d)
self.param_mean = Param(
np.random.normal(size=(self.dim_n, self.dim_t, self.dim_d))
* initial_state_scale,
name="State distribution (mean)",
) # (N,T,D)
self._add_initial_state()
def _add_initial_state(self):
self.x0 = DeltaInitialDistrubution(self.dim_n, self.dim_d)
def _initialize(self, x0, xs):
self.x0._initialize(x0, None)
self.param_mean().data = xs
def mean(self):
return self.param_mean()
def sample(self, num_samples=1, seed=None):
return torch.cat(
[
self.x0.sample(num_samples).unsqueeze(2),
self.param_mean().unsqueeze(0).repeat(num_samples, 1, 1, 1),
],
2,
)
class StateSequenceVariationalFactorizedGaussian(StateSequenceVariationalDistribution):
"""
Full rank multivariate Gaussian approximation for the state sequence posterior q(x_s) = N(m, S)
where x_s is (N,T,D), m is (N,T,D), S is (N,T,D,D)
N is the number of sequences, T is the sequence length, D being the number of state dimensions.
"""
def __init__(self, dim_n, dim_t, dim_d):
"""
@param dim_n: N number of sequences
@param dim_n: T sequence length
@param dim_d: D state dimensionality
"""
super(StateSequenceVariationalFactorizedGaussian, self).__init__(
dim_n, dim_t, dim_d
)
self.param_mean = Param(
np.random.normal(size=(self.dim_n, self.dim_t, self.dim_d))
* initial_state_scale,
name="State distribution (mean)",
) # (N,T,D)
self.param_lchol = Param(
np.stack([np.stack([np.eye(self.dim_d)] * self.dim_t)] * self.dim_n)
* initial_state_scale,
# (N,T,D,D)
transform=transforms.StackedLowerTriangular(
self.dim_d, self.dim_n, self.dim_t
),
name="State distribution (scale)",
)
self._add_initial_state()
def _add_initial_state(self):
self.x0 = StateInitialVariationalGaussian(self.dim_n, self.dim_d)
def _initialize(self, x0, xs, xs_std=None):
self.x0._initialize(x0, None)
self.param_mean().data = xs
if xs_std is not None:
self.param_scale.optvar.data = self.param_lchol.transform.backward_tensor(
torch.diag_embed(xs_std)
).data
def mean(self):
return self.param_mean()
def lchol(self):
return self.param_lchol()
def distribution(self):
xs_mean = self.mean()
xs_lchol = self.lchol()
xs_qcov = torch.einsum("ntij, ntkj -> ntik", xs_lchol, xs_lchol)
xs_qcov = (
xs_qcov + torch.eye(xs_qcov.shape[-1]).unsqueeze(0).unsqueeze(0) * jitter
)
dist = MultivariateNormal(loc=xs_mean, covariance_matrix=xs_qcov)
return dist
def sample_numpy(self, num_samples=1, seed=None):
# append sample from the initial state distribution
epsilon = sample_normal(
shape=(num_samples, self.dim_n, self.dim_t, self.dim_d), seed=seed
) # (S,N,T,D)
zs = torch.einsum("ntij, sntj->snti", self.lchol(), epsilon)
return torch.cat(
[
self.x0.sample(num_samples, seed).unsqueeze(2),
zs + self.mean().unsqueeze(0),
],
2,
) # (S, N, T+1, D)
def sample(self, num_samples=1, seed=None):
return torch.cat(
[
self.x0.sample(num_samples).unsqueeze(2),
self.distribution().rsample((num_samples,)),
],
2,
) # (S, N, T+1, D)
def entropy(self):
return self.distribution().entropy()
def log_prob(self, x):
return self.distribution().log_prob(x)
| 10,358
| 31.990446
| 99
|
py
|
hgp
|
hgp-main/hgp/core/observation_likelihoods.py
|
# MIT License
# Copyright (c) 2021 Pashupati Hegde.
# Copyright (c) 2023 Magnus Ross.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import torch
import torch.nn as nn
from torch.nn import init
from hgp.misc.constraint_utils import invsoftplus, softplus
class Gaussian(nn.Module):
"""
Gaussian likelihood
"""
def __init__(self, ndim=1, init_val=0.01):
super(Gaussian, self).__init__()
self.unconstrained_variance = torch.nn.Parameter(
torch.ones(ndim), requires_grad=True
)
self._initialize(init_val)
def _initialize(self, x):
init.constant_(self.unconstrained_variance, invsoftplus(torch.tensor(x)).item())
@property
def variance(self):
return softplus(self.unconstrained_variance)
@variance.setter
def variance(self, value):
self.unconstrained_variance = nn.Parameter(
invsoftplus(value), requires_grad=True
)
def log_prob(self, F, Y):
return -0.5 * (
np.log(2.0 * np.pi)
+ torch.log(self.variance)
+ torch.pow(F - Y, 2) / self.variance
)
| 2,157
| 33.253968
| 88
|
py
|
hgp
|
hgp-main/hgp/models/sequence.py
|
# MIT License
# Copyright (c) 2021 Pashupati Hegde.
# Copyright (c) 2023 Magnus Ross.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from hgp.misc.torch_utils import insert_zero_t0, compute_ts_dense
from torch import nn
import torch
def stack_segments(unstacked):
return unstacked.reshape(-1, unstacked.shape[-1])
def unstack_segments(stacked, unstacked_shape):
return stacked.reshape(unstacked_shape)
class BaseSequenceModel(nn.Module):
"""
Implements base class for model for learning unknown Hamiltonian system.
Model setup for observations on non-uniform grid or mini-batching over time can be derived from this class.
Defines following methods:
build_flow: given an initial state and time sequence, perform forward ODE integration
build_flow_and_divergence: performs coupled forward ODE integration for states and density change
build_lowerbound_terms: given observed states and time, builds individual terms for the lowerbound computation
build_inducing_kl: computes KL divergence between inducing prior and posterior.
forward: a wrapper for build_flow method
"""
def __init__(
self,
flow,
num_observations,
state_distribution,
observation_likelihood,
constraint_likelihood,
ts_dense_scale=2,
):
super(BaseSequenceModel, self).__init__()
self.flow = flow
self.num_observations = num_observations
self.state_distribution = state_distribution
self.observation_likelihood = observation_likelihood
self.constraint_likelihood = constraint_likelihood
self.ts_dense_scale = ts_dense_scale
def build_flow(self, x0, ts):
"""
Given an initial state and time sequence, perform forward ODE integration
Optionally, the time sequnce can be made dense based on self.ts_dense_scale prameter
@param x0: initial state tensor (N,D)
@param ts: time sequence tensor (T,)
@return: forward solution tensor (N,T,D)
"""
ts = compute_ts_dense(ts, self.ts_dense_scale)
ys = self.flow(x0, ts, return_energy=False)
return ys[:, :: self.ts_dense_scale - 1, :]
def build_lowerbound_terms(self, ys, ts, **kwargs):
raise NotImplementedError
def build_objective(self, ys, ts):
raise NotImplementedError
def build_inducing_kl(self):
"""
Computes KL divergence between inducing prior and posterior.
@return: inducing KL scaled by the number of observations
"""
return self.flow.kl() / self.num_observations
def forward(self, x0, ts):
"""
A wrapper for build_flow method
@param x0: initial state tensor (N,D)
@param ts: time sequence tensor (T,)
@return: forward solution tensor (N,T,D)
"""
return self.build_flow(x0, ts)
class NNSequenceModel(BaseSequenceModel):
"""
Implements sequence model for neural network derivative functions.
"""
def build_objective(self, ys, ts):
raise NotImplementedError
def build_inducing_kl(self):
raise NotImplementedError
def build_lowerbound_terms(self, ys_batched, ts, num_samples=1):
"""
Given oberved states and time, builds the individual terms for the lowerbound computation
@param ys: observed sequence tensor (N,T,D)
@param ts: observed time sequence (T,)
@return: nll, initial state KL
"""
assert num_samples == 1, ">1 sample not implemented for standard model."
xs = self.build_flow(ys_batched[:, 0, :], ts)
mse = self.observation_likelihood(xs, ys_batched)
# print(mse.shape)
return mse
class NNUniformShootingModel(BaseSequenceModel):
"""
Neural network model, with shooting.
"""
def __init__(
self,
flow,
num_observations,
state_distribution,
observation_likelihood,
constraint_likelihood,
shooting_time_factor=None,
ts_dense_scale=2,
alpha=100,
):
super(NNUniformShootingModel, self).__init__(
flow=flow,
num_observations=num_observations,
state_distribution=state_distribution,
observation_likelihood=observation_likelihood,
constraint_likelihood=constraint_likelihood,
ts_dense_scale=ts_dense_scale,
)
self.shooting_time_factor = shooting_time_factor
self.alpha = alpha
def build_objective(self, ys, ts):
loss, shooting_loss = self.build_lowerbound_terms(ys, ts)
return loss + shooting_loss
def build_inducing_kl(self):
raise NotImplementedError
def build_lowerbound_terms(self, ys, ts, num_samples=1):
"""
Given oberved states and time, builds the individual terms for the lowerbound computation
@param ys: observed sequence tensor (N,T,D)
@param ts: observed time sequence (T,)
@return: nll, initial state KL
"""
assert num_samples == 1, ">1 sample not implemented for nn model."
ss_samples = self.state_distribution.sample(
num_samples=num_samples
) # (S,N,(T-1)/shooting_time_factor + 1, D)
(S, N, N_state, D) = ss_samples.shape
N_shooting = N_state - 1
T = ts.shape[0]
shooting_time_factor = (T - 1) // N_shooting
assert (
shooting_time_factor == self.shooting_time_factor
), f"{shooting_time_factor}, {T}, {N_shooting}"
predicted_xs = self.flow(
x0=stack_segments(ss_samples[:, :, 1:, :]),
ts=ts[: shooting_time_factor + 1],
) # (SxNxN_state, shooting_time_factor+1, D)
predicted_xs = unstack_segments(
predicted_xs[:, 1:], (S, N, T - 1, D)
) # (S, N, T-1, D)
predicted_x0 = self.flow(
x0=stack_segments(ss_samples[:, :, 0, :]),
ts=ts[:2],
)
predicted_x0 = unstack_segments(predicted_x0[:, -1], (S, N, 1, D))
predicted_xs = torch.cat([predicted_x0, predicted_xs], axis=2)
loss = self.observation_likelihood(predicted_xs, ys.unsqueeze(0))
shooting_loss = self.constraint_likelihood(
ss_samples[:, :, 1:, :],
predicted_xs[:, :, 0:-shooting_time_factor:shooting_time_factor, :],
)
return loss, self.alpha * shooting_loss
class SequenceModel(BaseSequenceModel):
"""
Standard ODE model, with no shooting, works for irregular timepoints.
"""
def __init__(
self,
flow,
num_observations,
state_distribution,
observation_likelihood,
constraint_likelihood,
ts_dense_scale=2,
):
super(SequenceModel, self).__init__(
flow=flow,
num_observations=num_observations,
state_distribution=state_distribution,
observation_likelihood=observation_likelihood,
constraint_likelihood=constraint_likelihood,
ts_dense_scale=ts_dense_scale,
)
def build_objective(self, ys, ts):
"""
Compute objective.
@param ys: true observation sequence
@param ts: observation timesd
@return: loss, nll, initial_staet_kl, inducing_kl
"""
observ_loglik, init_state_kl = self.build_lowerbound_terms(ys, ts)
kl = self.build_inducing_kl()
loss = -(observ_loglik - init_state_kl - kl)
return loss
def build_lowerbound_terms(self, ys, ts, num_samples=1):
"""
Given oberved states and time, builds the individual terms for the lowerbound computation
@param ys: observed sequence tensor (N,T,D)
@param ts: observed time sequence (T,)
@return: nll, initial state KL
"""
assert num_samples == 1, ">1 sample not implemented for standard model."
ts = insert_zero_t0(ts)
x0_samples = self.state_distribution.sample(num_samples=1)[0]
x0_kl = self.state_distribution.kl()
xs = self.build_flow(x0_samples, ts)[:, 1:]
loglik = self.observation_likelihood.log_prob(xs, ys)
return loglik.mean(), x0_kl.mean() / self.num_observations
class SubSequenceModel(BaseSequenceModel):
"""
Batched data model, with timepoints on regular grid.
"""
def __init__(
self,
flow,
num_observations,
state_distribution,
observation_likelihood,
constraint_likelihood,
ts_dense_scale=2,
):
super(SubSequenceModel, self).__init__(
flow=flow,
num_observations=num_observations,
state_distribution=state_distribution,
observation_likelihood=observation_likelihood,
constraint_likelihood=constraint_likelihood,
ts_dense_scale=ts_dense_scale,
)
def build_objective(self, ys, ts):
"""
Compute objective for GPODE optimization
@param ys: true observation sequence
@param ts: observation timesd
@return: loss, nll, initial_staet_kl, inducing_kl
"""
observ_loglik = self.build_lowerbound_terms(ys, ts)
kl = self.build_inducing_kl()
loss = -(observ_loglik - kl)
return loss
def build_lowerbound_terms(self, ys_batched, ts, num_samples=1):
"""
Given oberved states and time, builds the individual terms for the lowerbound computation
@param ys: observed sequence tensor (N_batch,T,D)
@param ts: observed time sequence (T,)
@return: nll, initial state KL
"""
assert num_samples == 1, ">1 sample not implemented for standard model."
xs = self.build_flow(ys_batched[:, 0, :], ts)
loglik = self.observation_likelihood.log_prob(xs, ys_batched)
return loglik.mean()
class UniformShootingModel(BaseSequenceModel):
"""
Implements shooting model for data observed on uniform time grid.
Defines following methods:
build_lowerbound_terms: given observed states and time, builds individual terms for the lowerbound computation
"""
def __init__(
self,
flow,
num_observations,
state_distribution,
observation_likelihood,
constraint_likelihood,
shooting_time_factor=None,
energy_likelihood=None,
ts_dense_scale=2,
):
super(UniformShootingModel, self).__init__(
flow=flow,
num_observations=num_observations,
state_distribution=state_distribution,
observation_likelihood=observation_likelihood,
constraint_likelihood=constraint_likelihood,
ts_dense_scale=ts_dense_scale,
)
self.energy_likelihood = energy_likelihood
self.constrain_energy = False
self.shooting_time_factor = shooting_time_factor
def compute_segments(self, ts, num_samples=1, constrain_energy=False):
ss_samples = self.state_distribution.sample(
num_samples=num_samples
) # (S,N,(T-1)/shooting_time_factor + 1, D)
(S, N, N_state, D) = ss_samples.shape
N_shooting = N_state - 1
T = ts.shape[0]
shooting_time_factor = (T - 1) // N_shooting
assert (
shooting_time_factor == self.shooting_time_factor
), f"{shooting_time_factor}, {T}, {N_shooting}"
if constrain_energy:
predicted_xs, predicted_energy = self.flow(
x0=stack_segments(ss_samples),
ts=ts[: shooting_time_factor + 1],
return_energy=True,
) # (SxNxN_state, shooting_time_factor+1, D)
# get the energy of the initial point of each segement
# include initial x0 in ss energy as it also needs to be penalised
ss_energy = unstack_segments(predicted_energy[:, 0], (S, N, N_state, 1))
else:
predicted_xs = self.flow(
x0=stack_segments(ss_samples),
ts=ts[: shooting_time_factor + 1],
return_energy=False,
) # (SxNxN_state, shooting_time_factor+1, D)
# get additional set of points we don't need from integrating the initial
# condition too far,
predicted_xs = unstack_segments(
predicted_xs[:, 1:], (S, N, T + shooting_time_factor - 1, D)
)
# get rid of extraneous intial points
predicted_xs = torch.cat(
[
predicted_xs[:, :, 0:1, :],
predicted_xs[:, :, shooting_time_factor:, :],
],
axis=2,
)
if constrain_energy:
return ss_samples, predicted_xs, ss_energy
else:
return ss_samples, predicted_xs
def build_lowerbound_terms(self, ys, ts, num_samples=1):
"""
Given observed states and time, builds the individual terms for the lowerbound computation
@param ys: observed sequence tensor (N,T,D)
@param ts: observed time sequence (T,)
@param num_samples: number of reparametrized samples used to compute lowerbound
@return: nll, state cross-entropy, state entropy, initial state KL
"""
ss_samples, predicted_xs = self.compute_segments(
ts, num_samples=num_samples, constrain_energy=False
)
(S, N, N_state, D) = ss_samples.shape
N_shooting = N_state - 1
T = ts.shape[0]
shooting_time_factor = (T - 1) // N_shooting
assert (
shooting_time_factor == self.shooting_time_factor
), f"{shooting_time_factor}, {T}, {N_shooting}"
observation_loglik = self.observation_likelihood.log_prob(
predicted_xs, ys.unsqueeze(0)
) # (S,N,T,D)
# compute the entropy of variational posteriors for shooting states
state_entropy = self.state_distribution.entropy() # (N,T-1)
# compute the shooting constraint likelihoods
state_constraint_logprob = self.constraint_likelihood.log_prob(
ss_samples[:, :, 1:, :],
predicted_xs[:, :, 0:-shooting_time_factor:shooting_time_factor, :],
).sum(
3
) # (S,N,T-1)
# compute initial state KL
initial_state_kl = self.state_distribution.x0.kl() # (1,)
assert state_entropy.shape == (N, N_shooting)
assert state_constraint_logprob.shape == (S, N, N_shooting)
total_state_constraint_loglik = (
state_constraint_logprob.mean(0).sum()
) / self.num_observations
scaled_state_entropy = state_entropy.sum() / self.num_observations
scaled_initial_state_kl = initial_state_kl / self.num_observations
return (
observation_loglik.mean(),
total_state_constraint_loglik,
scaled_state_entropy,
scaled_initial_state_kl,
)
class ConsUniformShootingModel(UniformShootingModel):
"""
Energy conserving shooting model, with timepoints on regular grid.
"""
def __init__(
self,
flow,
num_observations,
state_distribution,
observation_likelihood,
constraint_likelihood,
shooting_time_factor=None,
energy_likelihood=None,
ts_dense_scale=2,
):
super(ConsUniformShootingModel, self).__init__(
flow=flow,
num_observations=num_observations,
state_distribution=state_distribution,
observation_likelihood=observation_likelihood,
constraint_likelihood=constraint_likelihood,
shooting_time_factor=shooting_time_factor,
energy_likelihood=energy_likelihood,
ts_dense_scale=ts_dense_scale,
)
self.constrain_energy = True
def build_lowerbound_terms(self, ys, ts, num_samples=1):
"""
Given observed states and time, builds the individual terms for the lowerbound computation
@param ys: observed sequence tensor (N,T,D)
@param ts: observed time sequence (T,)
@param num_samples: number of reparametrized samples used to compute lowerbound
@return: nll, state cross-entropy, state entropy, initial state KL
"""
ss_samples, predicted_xs, ss_energy = self.compute_segments(
ts, num_samples=num_samples, constrain_energy=True
)
(S, N, N_state, D) = ss_samples.shape
N_shooting = N_state - 1
T = ts.shape[0]
shooting_time_factor = (T - 1) // N_shooting
assert (
shooting_time_factor == self.shooting_time_factor
), f"{shooting_time_factor}, {T}, {N_shooting}"
observation_loglik = self.observation_likelihood.log_prob(
predicted_xs, ys.unsqueeze(0)
) # (S,N,T,D)
# compute the entropy of variational posteriors for shooting states
state_entropy = self.state_distribution.entropy() # (N,T-1)
# compute the shooting constraint likelihoods
state_constraint_logprob = self.constraint_likelihood.log_prob(
ss_samples[:, :, 1:, :],
predicted_xs[:, :, 0:-shooting_time_factor:shooting_time_factor, :],
).sum(
3
) # (S,N,T-1)
# compute initial state KL
initial_state_kl = self.state_distribution.x0.kl() # (1,)
assert state_entropy.shape == (N, N_shooting)
assert state_constraint_logprob.shape == (S, N, N_shooting)
scaled_state_constraint_loglik = (
state_constraint_logprob.mean(0).sum()
) / self.num_observations
# compute the energy likelihood
energy_constraint_logprob = self.energy_likelihood.log_prob(
ss_energy[:, :, 1:-1, :], ss_energy[:, :, 2:, :]
).squeeze(3)
# print(energy_constraint_logprob[:, :, 0:3])
scaled_energy_constraint_loglik = (
energy_constraint_logprob.mean(0).sum() / self.num_observations
)
# print(energy_constraint_logprob.mean(0).sum() / self.num_observations)
scaled_state_entropy = state_entropy.sum() / self.num_observations
scaled_initial_state_kl = initial_state_kl / self.num_observations
return (
observation_loglik.mean(),
scaled_state_constraint_loglik,
scaled_energy_constraint_loglik,
scaled_state_entropy,
scaled_initial_state_kl,
)
| 19,534
| 34.261733
| 118
|
py
|
hgp
|
hgp-main/hgp/models/initialization.py
|
# MIT License
# Copyright (c) 2021 Pashupati Hegde.
# Copyright (c) 2023 Magnus Ross.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from hgp.misc import constraint_utils
import torch
import numpy as np
from scipy.cluster.vq import kmeans2
from hgp.core.kernels import DerivativeRBF, RBF
from scipy.signal import savgol_filter
from hgp.models.sequence import SequenceModel, UniformShootingModel
from hgp.core.dsvgp import DSVGP_Layer, Hamiltonian_DSVGP_Layer
from plum import dispatch
class MOExactHamiltonian(torch.nn.Module):
def __init__(self, X, Fx, Z, init_ls=1.3, init_var=0.5, init_noise=1e-1):
super().__init__()
self.X = X
self.D = X.shape[1]
self.N = X.shape[0]
self.M = Z.shape[0]
self.Fx = Fx
self.Z = Z
self.kern = DerivativeRBF(X.shape[1], init_ls=init_ls, init_var=init_var)
self.log_noise = torch.nn.Parameter(torch.log(torch.tensor(init_noise)))
def construct(self):
Ix = torch.eye(self.N * self.D) * torch.exp(self.log_noise)
self.Kxx = self.kern.hess_K(self.X, use_J=True) # (2DN,2DN)
self.Kxz = self.kern.grad_K(self.X, self.Z, use_J=True) # (M, 2DN)
Iz = torch.eye(self.M) * 1e-6
self.Kzz = self.kern.K(self.Z)
self.Lxx = torch.linalg.cholesky(self.Kxx + Ix) # (N,N) or (D,N,N)
self.Lzz = torch.linalg.cholesky(self.Kzz + Iz)
def posterior_mean(self, whiten=True):
self.construct()
alpha = torch.linalg.solve_triangular(self.Lxx, self.Fx.T, upper=False) # (N,D)
alpha = torch.linalg.solve_triangular(self.Lxx.T, alpha, upper=True) # (N,D)
f_update = torch.einsum("nm, nd -> md", self.Kxz, alpha) # (M,D)
if whiten:
return (
torch.linalg.solve_triangular(
self.Lzz, f_update.T.unsqueeze(2), upper=False
)
.squeeze(2)
.T
)
else:
return f_update
@dispatch
def initialize_inducing(
diffeq: Hamiltonian_DSVGP_Layer, data_ys, data_ts, data_noise=1e-1
):
"""
Initialization of inducing variabels for Hamiltonian DSVGP layer.
Inducing locations are initialized at cluster centers
Inducing values are initialized using empirical data gradients.
@param diffeq: a GP layer represnting the differential function
@param data_ys: observed sequence (N,T,D)
@param data_ts: data observation times, assumed to be equally spaced
@param data_noise: an initial guess for observation noise.
@return: the GP object after initialization
"""
# compute empirical gradients and scale them according to observation time.
f_xt = np.gradient(data_ys, data_ts, axis=1)
f_xt = f_xt.reshape(-1, data_ys.shape[-1]) # (N,T-1,D)
data_ys = data_ys[:, :-1, :] # (N,T-1,D)
data_ys = data_ys.reshape(-1, data_ys.shape[-1]) # (N*T-1,D)
with torch.no_grad():
num_obs_for_initialization = np.minimum(1000, data_ys.shape[0])
obs_index = np.random.choice(
data_ys.shape[0], num_obs_for_initialization, replace=False
)
inducing_loc = torch.tensor(
kmeans2(data_ys, k=diffeq.Um().shape[0], minit="points")[0]
)
data_ys = torch.tensor(data_ys[obs_index])
f_xt = torch.tensor(f_xt[obs_index].T.reshape(1, -1))
pre_model = MOExactHamiltonian(
data_ys,
f_xt,
inducing_loc,
init_noise=0.1,
init_ls=2.0,
init_var=0.5,
)
pre_model.construct()
inducing_val = pre_model.posterior_mean(whiten=True)
diffeq.inducing_loc().data = inducing_loc.data # (M,D)
diffeq.Um().data = inducing_val.data # (M,D)
diffeq.kern.lengthscales = pre_model.kern.lengthscales.detach()
diffeq.kern.variance = pre_model.kern.variance.detach()
return diffeq
def compute_gpode_intial_inducing(kern, N_u, data_ys, empirical_fs, data_noise=1e-1):
"""
Constructs initial inducing values using the process described in appendix of
"Bayesian inference of ODEs with Gaussian processes", Hegde et al., 2021
"""
# compute empirical gradients and scale them according to observation time.
empirical_fs = empirical_fs.reshape(-1, empirical_fs.shape[-1]) # (N,T-1,D)
data_ys = data_ys[:, :-1, :] # (N,T-1,D)
data_ys = data_ys.reshape(-1, data_ys.shape[-1]) # (N*T-1,D)
with torch.no_grad():
num_obs_for_initialization = np.minimum(1000, data_ys.shape[0])
obs_index = np.random.choice(
data_ys.shape[0], num_obs_for_initialization, replace=False
)
inducing_loc = torch.tensor(kmeans2(data_ys, k=N_u, minit="points")[0])
data_ys = torch.tensor(data_ys[obs_index])
empirical_fs = torch.tensor(empirical_fs[obs_index])
Kxx = kern.K(data_ys) # (N,N) or (D,N,N)
Kxz = kern.K(data_ys, inducing_loc) # (N,M) or (D,N,M)
Kzz = kern.K(inducing_loc) # (M,M) or (D,M,M)
Lxx = torch.linalg.cholesky(
Kxx + torch.eye(Kxx.shape[1]) * data_noise
) # (N,N) or (D,N,N)
Lzz = torch.linalg.cholesky(
Kzz + torch.eye(Kzz.shape[1]) * 1e-6
) # (M,M) or (D,M,M)
if not kern.dimwise:
alpha = torch.linalg.solve_triangular(
Lxx, empirical_fs, upper=False
) # (N,D)
alpha = torch.linalg.solve_triangular(Lxx.T, alpha, upper=True) # (N,D)
f_update = torch.einsum("nm, nd -> md", Kxz, alpha) # (M,D)
else:
alpha = torch.linalg.solve_triangular(
Lxx, empirical_fs.T.unsqueeze(2), upper=False
) # (N,D)
alpha = torch.linalg.solve_triangular(
Lxx.permute(0, 2, 1), alpha, upper=True
) # (N,D)
f_update = torch.einsum("dnm, dn -> md", Kxz, alpha.squeeze(2)) # (M,D)
inducing_val = (
torch.linalg.solve_triangular(Lzz, f_update.T.unsqueeze(2), upper=False)
.squeeze(2)
.T
) # (M,D)
return inducing_loc.data, inducing_val.data
@dispatch
def initialize_inducing(diffeq: DSVGP_Layer, data_ys, data_ts, data_noise=1e-1):
"""
Initialization of inducing variabels for standard DSVGP layer.
Inducing locations are initialized at cluster centers
Inducing values are initialized using empirical data gradients.
@param diffeq: a GP layer represnting the differential function
@param data_ys: observed sequence (N,T,D)
@param data_noise: an initial guess for observation noise.
@return: the gp object after initialization
"""
empirical_fs = np.gradient(data_ys, data_ts, axis=1) # (N,T-1,D)
inducing_loc, inducing_val = compute_gpode_intial_inducing(
diffeq.kern,
diffeq.Um().shape[0],
data_ys,
empirical_fs,
)
diffeq.inducing_loc().data = inducing_loc # (M,D)
diffeq.Um().data = inducing_val # (M,D)
return diffeq
def initialize_latents_with_data(model, data_ys, data_ts, num_samples=20):
"""
Initializes shooting states from data.
Initial state distribution is initialized by solving the ODE backward in time from the first
observation after inducing variables are initialized.
Other states are initialized at observed values.
@param model: a gpode.UniformShootingModel object
@param data_ys: observed state sequence
@param num_samples: number of samples to consider for initial state initialization
@return: the model object after initialization
"""
with torch.no_grad():
# this makes sure we only take the data points that align with our shooting states
try:
init_xs = torch.tensor(
data_ys[:, 0 : -model.shooting_time_factor : model.shooting_time_factor]
)
except AttributeError:
init_xs = torch.tensor(data_ys[:, :-1])
ts = torch.tensor(data_ts)
init_ts = torch.cat([ts[1:2], ts[0:1]])
init_x0 = []
for _ in range(num_samples):
init_x0.append(
model.build_flow(init_xs[:, 0], init_ts).clone().detach().data[:, -1]
)
init_x0 = torch.stack(init_x0).mean(0)
model.state_distribution._initialize(init_x0, init_xs)
return model
def initalize_noisevar(model, init_noisevar):
"""
Initializes likelihood observation noise variance parameter
@param model: a gpode.SequenceModel object
@param init_noisevar: initialization value
@return: the model object after initialization
"""
model.observation_likelihood.unconstrained_variance.data = (
constraint_utils.invsoftplus(torch.tensor(init_noisevar)).data
)
return model
def initialize_and_fix_kernel_parameters(
model, lengthscale_value=1.25, variance_value=0.5, fix=False
):
"""
Initializes and optionally fixes kernel parameter
@param model: a gpode.SequenceModel object
@param lengthscale_value: initialization value for kernel lengthscales parameter
@param variance_value: initialization value for kernel signal variance parameter
@param fix: a flag variable to fix kernel parameters during optimization
@return: the model object after initialization
"""
model.flow.odefunc.diffeq.kern.unconstrained_lengthscales.data = (
constraint_utils.invsoftplus(
lengthscale_value
* torch.ones_like(
model.flow.odefunc.diffeq.kern.unconstrained_lengthscales.data
)
)
)
model.flow.odefunc.diffeq.kern.unconstrained_variance.data = (
constraint_utils.invsoftplus(
variance_value
* torch.ones_like(
model.flow.odefunc.diffeq.kern.unconstrained_variance.data
)
)
)
if fix:
model.flow.odefunc.diffeq.kern.unconstrained_lengthscales.requires_grad_(False)
model.flow.odefunc.diffeq.kern.unconstrained_variance.requires_grad_(False)
return model
| 11,106
| 37.835664
| 96
|
py
|
hgp
|
hgp-main/hgp/models/__init__.py
|
import hgp.models.builder
import hgp.models.initialization
import hgp.models.sequence
| 86
| 20.75
| 32
|
py
|
hgp
|
hgp-main/hgp/models/builder.py
|
# MIT License
# Copyright (c) 2021 Pashupati Hegde.
# Copyright (c) 2023 Magnus Ross.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import copy
import time
from typing import Union
import torch
from plum import dispatch
from torch.utils.data import DataLoader, Dataset
import hgp
# from scipy.stats import norm
# from scipy.special import logsumexp
import hgp.misc.metrics as metrics
from hgp.core import constraint_likelihoods as constraints
from hgp.core.dsvgp import DSVGP_Layer, Hamiltonian_DSVGP_Layer
from hgp.core.flow import Flow
from hgp.core.nn import HamiltonianNNModel, NNModel
from hgp.core.observation_likelihoods import Gaussian
from hgp.core.states import (
DeltaStateSequenceDistribution,
StateInitialVariationalGaussian,
StateSequenceVariationalFactorizedGaussian,
)
from hgp.misc import train_utils as utils
from hgp.misc.torch_utils import insert_zero_t0, numpy2torch, torch2numpy
from hgp.models.initialization import initialize_inducing, initialize_latents_with_data
from hgp.models.sequence import (
ConsUniformShootingModel,
NNSequenceModel,
NNUniformShootingModel,
SequenceModel,
SubSequenceModel,
UniformShootingModel,
)
@dispatch
def init_and_fit(
model: Union[UniformShootingModel, SequenceModel],
args,
data_ts,
data_ys,
return_history=False,
):
if args.model.inducing_init:
model.flow.odefunc.diffeq = initialize_inducing(
model.flow.odefunc.diffeq, data_ys, data_ts
)
model = initialize_latents_with_data(model, data_ys, data_ts)
trainer = Trainer()
model, history = trainer.train(
model=model,
loss_function=compute_loss,
ys=numpy2torch(data_ys),
ts=numpy2torch(data_ts),
num_iter=args.num_iter,
lr=args.lr,
log_freq=args.log_freq,
)
if return_history:
return model, history
else:
return model
@dispatch
def init_and_fit(
model: SubSequenceModel,
args,
data_ts,
data_ys,
return_history=False,
):
if args.model.inducing_init:
model.flow.odefunc.diffeq = initialize_inducing(
model.flow.odefunc.diffeq, data_ys, data_ts
)
trainer = BatchedTrainer()
model, history = trainer.train(
model=model,
loss_function=compute_loss,
ys=numpy2torch(data_ys),
ts=numpy2torch(data_ts),
num_iter=args.num_iter,
lr=args.lr,
log_freq=args.log_freq,
)
if return_history:
return model, history
else:
return model
@dispatch
def init_and_fit(
model: NNUniformShootingModel,
args,
data_ts,
data_ys,
return_history=False,
):
model = initialize_latents_with_data(model, data_ys, data_ts)
trainer = NNTrainer()
model, history = trainer.train(
model=model,
loss_function=compute_loss,
ys=numpy2torch(data_ys),
ts=numpy2torch(data_ts),
num_iter=args.num_iter,
lr=args.lr,
log_freq=args.log_freq,
)
if return_history:
return model, history
else:
return model
@dispatch
def init_and_fit(
model: NNSequenceModel,
args,
data_ts,
data_ys,
return_history=False,
):
trainer = BatchedNNTrainer()
model, history = trainer.train(
model=model,
loss_function=compute_loss,
ys=numpy2torch(data_ys),
ts=numpy2torch(data_ts),
num_iter=args.num_iter,
lr=args.lr,
log_freq=args.log_freq,
batch_length=args.model.batch_length,
batch_size=args.model.batch_size,
num_val_epochs=args.model.num_val_epochs,
)
if return_history:
return model, history
else:
return model
def build_model(args, data_ys):
"""
Builds a HGP model based on training sequence
@param args: model setup arguments
@param data_ys: observed/training sequence of (N,T,D) dimensions
"""
N, T, D = data_ys.shape
gp = Hamiltonian_DSVGP_Layer(
D_in=D,
M=args.model.num_inducing,
S=args.model.num_features,
q_diag=args.model.q_diag,
)
flow = Flow(diffeq=gp, solver=args.solver, use_adjoint=args.use_adjoint)
observation_likelihood = Gaussian(ndim=D, init_val=args.init_noise)
if args.model.shooting:
if args.model.shooting_time_factor is None:
args.model.shooting_time_factor = 1
assert (
(T - 1) % args.model.shooting_time_factor
) == 0, f"T-1 must be devisable by time factor, T={T}, F={args.model.shooting_time_factor}"
N_shooting = (T - 1) // args.model.shooting_time_factor
if args.model.constraint_type not in ["gauss", "laplace"]:
raise ValueError(
"invalid constraint likelihood specification, only available options are gauss/laplace"
)
constraint_type_class = (
constraints.Laplace
if args.model.constraint_type == "laplace"
else constraints.Gaussian
)
constraint_likelihood = constraint_type_class(
d=1,
scale=args.model.constraint_initial_scale,
requires_grad=args.model.constraint_trainable,
)
energy_likelihood = (
constraint_type_class(
d=1,
scale=args.model.energy_constraint_initial_scale,
requires_grad=args.model.constraint_trainable,
)
if args.model.constrain_energy
else None
)
model = (
ConsUniformShootingModel
if args.model.constrain_energy
else UniformShootingModel
)(
flow=flow,
num_observations=N * T * D,
state_distribution=StateSequenceVariationalFactorizedGaussian(
dim_n=N, dim_t=N_shooting, dim_d=D
),
observation_likelihood=observation_likelihood,
constraint_likelihood=constraint_likelihood,
shooting_time_factor=args.model.shooting_time_factor,
energy_likelihood=energy_likelihood,
ts_dense_scale=args.model.ts_dense_scale,
)
else:
model = SequenceModel(
flow=flow,
num_observations=N * T * D,
state_distribution=StateInitialVariationalGaussian(dim_n=N, dim_d=D),
observation_likelihood=observation_likelihood,
constraint_likelihood=None,
ts_dense_scale=args.model.ts_dense_scale,
)
return model
def build_subsequence_model(args, data_ys):
"""
Builds a HGP-Batched model based on training sequence
@param args: model setup arguments
@param data_ys: observed/training sequence of (N,T,D) dimensions
"""
N, T, D = data_ys.shape
gp = Hamiltonian_DSVGP_Layer(
D_in=D,
M=args.model.num_inducing,
S=args.model.num_features,
q_diag=args.model.q_diag,
)
flow = Flow(diffeq=gp, solver=args.solver, use_adjoint=args.use_adjoint)
observation_likelihood = Gaussian(ndim=D, init_val=args.init_noise)
model = SubSequenceModel(
flow=flow,
num_observations=N * T * D,
state_distribution=None,
observation_likelihood=observation_likelihood,
constraint_likelihood=None,
ts_dense_scale=args.model.ts_dense_scale,
)
return model
def build_gpode_model(args, data_ys):
"""
Builds a GP-ODE model based on training sequence
@param args: model setup arguments
@param data_ys: observed/training sequence of (N,T,D) dimensions
"""
N, T, D = data_ys.shape
gp = DSVGP_Layer(
D_in=D,
D_out=D,
M=args.model.num_inducing,
S=args.model.num_features,
dimwise=args.model.dimwise,
q_diag=args.model.q_diag,
)
flow = Flow(diffeq=gp, solver=args.solver, use_adjoint=args.use_adjoint)
observation_likelihood = Gaussian(ndim=D, init_val=args.init_noise)
if args.model.shooting:
if args.model.shooting_time_factor is None:
args.model.shooting_time_factor = 1
assert (
(T - 1) % args.model.shooting_time_factor
) == 0, "T-1 must be devisable by time factor"
N_shooting = (T - 1) // args.model.shooting_time_factor
if args.model.constraint_type not in ["gauss", "laplace"]:
raise ValueError(
"invalid constraint likelihood specification, only available options are gauss/laplace"
)
constraint_type_class = (
constraints.Laplace
if args.model.constraint_type == "laplace"
else constraints.Gaussian
)
constraint_likelihood = constraint_type_class(
d=1,
scale=args.model.constraint_initial_scale,
requires_grad=args.model.constraint_trainable,
)
model = UniformShootingModel(
flow=flow,
num_observations=N * T * D,
state_distribution=StateSequenceVariationalFactorizedGaussian(
dim_n=N, dim_t=N_shooting, dim_d=D
),
observation_likelihood=observation_likelihood,
constraint_likelihood=constraint_likelihood,
shooting_time_factor=args.model.shooting_time_factor,
ts_dense_scale=args.model.ts_dense_scale,
)
else:
model = SequenceModel(
flow=flow,
num_observations=N * T * D,
state_distribution=StateInitialVariationalGaussian(dim_n=N, dim_d=D),
observation_likelihood=observation_likelihood,
constraint_likelihood=None,
ts_dense_scale=args.model.ts_dense_scale,
)
return model
def build_nn_model(args, data_ys):
"""
Builds a NN model based on training sequence
@param args: model setup arguments
@param data_ys: observed/training sequence of (N,T,D) dimensions
"""
N, T, D = data_ys.shape
if args.model.flow_type == "hnn":
nn = HamiltonianNNModel(
D_in=D,
N_layers=args.model.N_layers,
N_nodes=args.model.N_nodes,
)
elif args.model.flow_type == "node":
nn = NNModel(
D_in=D,
D_out=D,
N_layers=args.model.N_layers,
N_nodes=args.model.N_nodes,
)
else:
raise ValueError
flow = Flow(diffeq=nn, solver=args.solver, use_adjoint=args.use_adjoint)
if args.model.shooting:
model = NNUniformShootingModel(
flow=flow,
num_observations=N * T * D,
state_distribution=DeltaStateSequenceDistribution(
dim_n=N, dim_t=T - 1, dim_d=D
),
observation_likelihood=torch.nn.L1Loss(),
constraint_likelihood=torch.nn.L1Loss(),
shooting_time_factor=args.model.shooting_time_factor,
ts_dense_scale=args.model.ts_dense_scale,
alpha=args.model.alpha,
)
else:
model = NNSequenceModel(
flow=flow,
num_observations=N * T * D,
state_distribution=None,
observation_likelihood=torch.nn.L1Loss(),
constraint_likelihood=None,
ts_dense_scale=args.model.ts_dense_scale,
)
# hack to get likelihoods to show nan as the model doesn't have one
model.observation_likelihood.variance = torch.tensor(torch.nan)
return model
@dispatch
def compute_loss(model: NNSequenceModel, ys, ts):
"""
Compute loss for model optimization.
@param model: a model object
@param ys: true observation sequence
@param ts: observation times
@return: loss, nan, nan
"""
loss = model.build_lowerbound_terms(ys, ts)
return loss, torch.tensor(torch.nan), torch.tensor(torch.nan)
@dispatch
def compute_loss(model: NNUniformShootingModel, ys, ts):
"""
Compute loss for model optimization.
@param model: a model object
@param ys: true observation sequence
@param ts: observation times
@return loss, observation loss, shooting loss
"""
obs_loss, shooting_loss = model.build_lowerbound_terms(ys, ts)
return obs_loss + shooting_loss, obs_loss, shooting_loss
@dispatch
def compute_loss(model: UniformShootingModel, ys, ts, **kwargs):
"""
Compute loss for model optimization.
@param model: a model object
@param ys: true observation sequence
@param ts: observation times
@param kwargs: additional parameters passed to the model.build_lowerbound_terms() method
@return: loss, nll, nan, initial_state_kl, inducing_kl
"""
(
observation_loglik,
state_constraint_logpob,
state_entropy,
init_state_kl,
) = model.build_lowerbound_terms(ys, ts, **kwargs)
inducing_kl = model.build_inducing_kl()
loss = -(
observation_loglik
+ state_constraint_logpob
+ state_entropy
- init_state_kl
- inducing_kl
)
return (
loss,
-observation_loglik,
-(state_constraint_logpob),
torch.tensor(torch.nan),
init_state_kl,
inducing_kl,
)
@dispatch
def compute_loss(model: ConsUniformShootingModel, ys, ts, **kwargs):
"""
Compute loss for model optimization.
@param model: a model object
@param ys: true observation sequence
@param ts: observation times
@param kwargs: additional parameters passed to the model.build_lowerbound_terms() method
@return: loss, nll, energy constraint, initial_state_kl, inducing_kl
"""
(
observation_loglik,
state_constraint_logpob,
energy_constraint_logpob,
state_entropy,
init_state_kl,
) = model.build_lowerbound_terms(ys, ts, **kwargs)
inducing_kl = model.build_inducing_kl()
loss = -(
observation_loglik
+ state_constraint_logpob
+ energy_constraint_logpob
+ state_entropy
- init_state_kl
- inducing_kl
)
return (
loss,
-observation_loglik,
-(state_constraint_logpob),
-(energy_constraint_logpob),
init_state_kl,
inducing_kl,
)
@dispatch
def compute_loss(model: SequenceModel, ys, ts, **kwargs):
"""
Compute loss for model optimization, no shooting.
@param model: a model object
@param ys: true observation sequence
@param ts: observation times
@return: loss, nll, nan, nan, initial_state_kl, inducing_kl
"""
observ_loglik, init_state_kl = model.build_lowerbound_terms(ys, ts)
kl = model.build_inducing_kl()
loss = -(observ_loglik - init_state_kl - kl)
return (
loss,
-observ_loglik,
torch.tensor(torch.nan),
torch.tensor(torch.nan),
init_state_kl,
kl,
)
@dispatch
def compute_loss(model: SubSequenceModel, ys, ts, **kwargs):
"""
Compute loss for model optimization, batched training.
@param model: a gpode.SequenceModel object
@param ys: true observation sequence
@param ts: observation times
@param kwargs: additional parameters passed to the model.build_lowerbound_terms() method
@return: loss, nll, inducing_kl
"""
observ_loglik = model.build_lowerbound_terms(ys, ts)
kl = model.build_inducing_kl()
loss = -(observ_loglik - kl)
return (
loss,
-observ_loglik,
kl,
)
@dispatch
def compute_single_prediction(
model: Union[
UniformShootingModel, ConsUniformShootingModel, NNUniformShootingModel
],
ts,
):
"""
Computes single prediction from a model from an optimized initial state
Useful while making predictions/extrapolation to novel time points from an optimized initial state.
@param model: a model object
@param ts: observation times
@return: predictive samples
"""
# add additional time point accounting the initial state
ts = insert_zero_t0(ts)
return model(model.state_distribution.x0.sample().squeeze(0), ts)
@dispatch
def compute_single_prediction(model: Union[SequenceModel, NNSequenceModel], ts):
"""
Computes single prediction from a model from an optimized initial state
Useful while making predictions/extrapolation to novel time points from an optimized initial state.
@param model: a model object
@param ts: observation times
@return: predictive samples
"""
# add additional time point accounting the initial state
ts = insert_zero_t0(ts)
return model(model.state_distribution.sample().squeeze(0), ts)
def compute_predictions(model, ts, eval_sample_size=10):
"""
Compute predictions or ODE sequences from a GPODE model from an optimized initial state
Useful while making predictions/extrapolation to novel time points from an optimized initial state.
@param model: a model object
@param ts: observation times
@param eval_sample_size: number of samples for evaluation
@return: predictive samples
"""
model.eval()
pred_samples = []
for _ in range(eval_sample_size):
with torch.no_grad():
pred_samples.append(compute_single_prediction(model, ts))
return torch.stack(pred_samples, 0)[:, :, 1:]
def compute_test_predictions(model, y0, ts, eval_sample_size=10):
"""
Compute predictions or ODE sequences from a GPODE model from an given initial state
@param model: a gpode.SequenceModel object
@param y0: initial state for computing predictions (N,D)
@param ts: observation times
@param eval_sample_size: number of samples for evaluation
@return: predictive samples
"""
model.eval()
pred_samples = []
for _ in range(eval_sample_size):
with torch.no_grad():
pred_samples.append(model(y0, ts))
return torch.stack(pred_samples, 0)
def compute_summary(actual, predicted, noise_var, ys=1.0, squeeze_time=True):
"""
Computes MSE and MLL as summary metrics between actual and predicted sequences
@param actual: true observation sequnce
@param predicted: predicted sequence
@param noise_var: noise var predicted by the model
@param ys: optional scaling factor for standardized data
@param squeeze_time: optional, if true averages over time dimension
@return: MLL(actual, predicted), MSE(actual, predicted)
"""
actual = actual * ys
predicted = predicted * ys
noise_var = noise_var * ys**2 + 1e-8
if squeeze_time:
return (
metrics.log_lik(actual, predicted, noise_var).mean(),
metrics.mse(actual, predicted).mean(),
metrics.rel_err(actual, predicted).mean(),
)
else:
return (
metrics.log_lik(actual, predicted, noise_var).mean(2),
metrics.mse(actual, predicted).mean(2),
metrics.rel_err(actual, predicted),
)
class Trainer:
"""
A trainer class for models. Stores optimization trace for monitoring/plotting purpose
"""
def __init__(self):
self.loss_meter = utils.CachedRunningAverageMeter(0.98)
self.observation_nll_meter = utils.CachedRunningAverageMeter(0.98)
self.state_kl_meter = utils.CachedRunningAverageMeter(0.98)
self.energy_kl_meter = utils.CachedRunningAverageMeter(0.98)
self.init_kl_meter = utils.CachedRunningAverageMeter(0.98)
self.inducing_kl_meter = utils.CachedRunningAverageMeter(0.98)
self.time_meter = utils.CachedAverageMeter()
self.compute_loss = compute_loss
def train(self, model, loss_function, ys, ts, num_iter, lr, log_freq, **kwargs):
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
print("Fitting model...")
for itr in range(1, num_iter):
try:
model.train()
begin = time.time()
optimizer.zero_grad()
(
loss,
observation_nll,
state_kl,
energy_kl,
init_kl,
inducing_kl,
) = loss_function(model, ys, ts, **kwargs)
loss.backward()
optimizer.step()
self.loss_meter.update(loss.item(), itr)
self.observation_nll_meter.update(observation_nll.item(), itr)
self.state_kl_meter.update(state_kl.item(), itr)
self.energy_kl_meter.update(energy_kl.item(), itr)
self.init_kl_meter.update(init_kl.item(), itr)
self.inducing_kl_meter.update(inducing_kl.item(), itr)
self.time_meter.update(time.time() - begin, itr)
if itr % log_freq == 0:
log_message = (
"Iter {:04d} | Loss {:.2f}({:.2f}) |"
"OBS NLL {:.2f}({:.2f}) | XS KL {:.2f}({:.2f}) |"
" E KL {:.2f}({:.2f}) |"
"X0 KL {:.2f}({:.2f}) | IND KL {:.2f}({:.2f})".format(
itr,
self.loss_meter.val,
self.loss_meter.avg,
self.observation_nll_meter.val,
self.observation_nll_meter.avg,
self.state_kl_meter.val,
self.state_kl_meter.avg,
self.energy_kl_meter.val,
self.energy_kl_meter.avg,
self.init_kl_meter.val,
self.init_kl_meter.avg,
self.inducing_kl_meter.val,
self.inducing_kl_meter.avg,
)
)
print(log_message)
except KeyboardInterrupt:
break
return model, self
class MultiDataset(Dataset):
def __init__(self, ts, ys):
self.ts = ts
self.ys = ys
self.len = ys.shape[0]
self.batch_length = ys.shape[1]
self.d = ys.shape[-1]
def __getitem__(self, index):
return self.ys[index]
def __len__(self):
return self.len
class BatchedTrainer:
"""
A trainer class for batched models. Stores optimization trace for monitoring/plotting purpose
"""
def __init__(self):
self.loss_meter = utils.CachedRunningAverageMeter(0.98)
self.observation_nll_meter = utils.CachedRunningAverageMeter(0.98)
self.state_kl_meter = utils.CachedRunningAverageMeter(0.98)
def train(
self,
model,
loss_function,
ys,
ts,
num_iter,
lr,
log_freq,
batch_length=10,
batch_size=32,
num_val_epochs=10,
**kwargs,
):
if type(model) != SubSequenceModel:
raise ValueError("Batch training only supported for SubSequenceModel")
# only single examples for now
batch_ys = torch.stack(
[
ys[:, i : i + batch_length]
for i in range(ys.shape[1] - batch_length + 1)
],
axis=0,
)
batch_ys = batch_ys.reshape(-1, batch_ys.shape[2], batch_ys.shape[-1])
batch_ts = ts[:batch_length]
dataset = MultiDataset(batch_ts, batch_ys)
trainloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
i = 0
best_metric = 1e6
best_model = copy.deepcopy(model)
print("Fitting model...")
for itr in range(1, 100000):
try:
for ysi in trainloader:
i += 1
model.train()
optimizer.zero_grad()
# print(ysi.shape)
# ysi = ysi.reshape(-1, dataset.batch_length, dataset.d)
loss, obs_like, initial_kl = loss_function(
model, ysi, dataset.ts, **kwargs
)
loss.backward()
optimizer.step()
self.loss_meter.update(loss.item(), i)
self.state_kl_meter.update(initial_kl.item(), i)
self.observation_nll_meter.update(obs_like.item(), i)
except KeyboardInterrupt:
break
if num_val_epochs:
if itr % num_val_epochs == 0:
preds = hgp.models.builder.compute_test_predictions(
model,
numpy2torch(ys[:, 0, :]),
numpy2torch(ts),
eval_sample_size=2,
)
mll, _, _ = hgp.models.builder.compute_summary(
torch2numpy(ys),
torch2numpy(preds),
torch2numpy(model.observation_likelihood.variance),
)
mnll = -mll
if mnll < best_metric:
best_model = copy.deepcopy(model)
best_metric = mnll
if itr % log_freq == 0:
log_message = "Iter {:04d} | Loss {:.3f}({:.3f}) | OBS {:.3f}({:.3f}) | KL {:.3f}({:.3f}) | Best Metric {:.3f}".format(
i,
self.loss_meter.val,
self.loss_meter.avg,
self.observation_nll_meter.val,
self.observation_nll_meter.avg,
self.state_kl_meter.val,
self.state_kl_meter.avg,
best_metric,
)
print(log_message)
if i > num_iter:
break
return best_model, self
class BatchedNNTrainer:
"""
A trainer class for batched NN models. Stores optimization trace for monitoring/plotting purpose
"""
def __init__(self):
self.loss_meter = utils.CachedRunningAverageMeter(0.98)
self.observation_nll_meter = utils.CachedRunningAverageMeter(0.98)
self.state_kl_meter = utils.CachedRunningAverageMeter(0.98)
def train(
self,
model,
loss_function,
ys,
ts,
num_iter,
lr,
log_freq,
batch_length=5,
batch_size=32,
num_val_epochs=10,
**kwargs,
):
# only single examples for now
batch_ys = torch.stack(
[
ys[:, i : i + batch_length]
for i in range(ys.shape[1] - batch_length + 1)
],
axis=0,
)
batch_ys = batch_ys.reshape(-1, batch_ys.shape[2], batch_ys.shape[-1])
batch_ts = ts[:batch_length]
dataset = MultiDataset(batch_ts, batch_ys)
trainloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
i = 0
best_metric = 1e6
best_model = copy.deepcopy(model)
print("Fitting model...")
for itr in range(1, 100000):
try:
for ysi in trainloader:
i += 1
model.train()
optimizer.zero_grad()
# print(ysi.shape)
loss, obs_loss, shooting_loss = loss_function(
model, ysi, dataset.ts, **kwargs
)
loss.backward()
optimizer.step()
self.loss_meter.update(loss.item(), i)
self.state_kl_meter.update(obs_loss.item(), i)
self.observation_nll_meter.update(shooting_loss.item(), i)
except KeyboardInterrupt:
break
if num_val_epochs:
if itr % num_val_epochs == 0:
preds = hgp.models.builder.compute_test_predictions(
model,
numpy2torch(ys[:, 0, :]),
numpy2torch(ts),
eval_sample_size=1,
)
_, mse, _ = hgp.models.builder.compute_summary(
torch2numpy(ys),
torch2numpy(preds),
torch2numpy(model.observation_likelihood.variance),
)
if mse < best_metric:
best_model = copy.deepcopy(model)
best_metric = mse
if itr % log_freq == 0:
log_message = "Iter {:04d} | Loss {:.3f}({:.3f}) | OBS {:.3f}({:.3f}) | KL {:.3f}({:.3f}) | Best Metric {:.3f}".format(
i,
self.loss_meter.val,
self.loss_meter.avg,
self.observation_nll_meter.val,
self.observation_nll_meter.avg,
self.state_kl_meter.val,
self.state_kl_meter.avg,
best_metric,
)
print(log_message)
if i > num_iter:
break
return best_model, self
class NNTrainer:
"""
A trainer class for NN models. Stores optimization trace for monitoring/plotting purpose
"""
def __init__(self):
self.loss_meter = utils.CachedRunningAverageMeter(0.98)
self.observation_nll_meter = utils.CachedRunningAverageMeter(0.98)
self.state_kl_meter = utils.CachedRunningAverageMeter(0.98)
def train(self, model, loss_function, ys, ts, num_iter, lr, log_freq, **kwargs):
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
print("Fitting model...")
for itr in range(1, num_iter):
try:
model.train()
begin = time.time()
optimizer.zero_grad()
loss, obs_loss, shooting_loss = loss_function(model, ys, ts, **kwargs)
loss.backward()
optimizer.step()
self.loss_meter.update(loss.item(), itr)
self.observation_nll_meter.update(obs_loss.item(), itr)
self.state_kl_meter.update(shooting_loss.item(), itr)
if itr % log_freq == 0:
log_message = "Iter {:04d} | Loss {:.3f}({:.3f}) | Obs {:.3f}({:.3f}) | Shooting {:.3f}({:.3f}) |".format(
itr,
self.loss_meter.val,
self.loss_meter.avg,
self.observation_nll_meter.val,
self.observation_nll_meter.avg,
self.state_kl_meter.val,
self.state_kl_meter.avg,
)
print(log_message)
except KeyboardInterrupt:
break
return model, self
| 31,938
| 30.716981
| 135
|
py
|
hgp
|
hgp-main/hgp/datasets/__init__.py
| 0
| 0
| 0
|
py
|
|
hgp
|
hgp-main/hgp/datasets/hamiltonians.py
|
# import numpy as np
import functorch
import numpy as np
import torch
from torchdiffeq import odeint
from hgp.misc.ham_utils import build_J
from hgp.misc.torch_utils import numpy2torch, torch2numpy
# from scipy.integrate import odeint
class Data:
def __init__(self, ys, ts):
self.ts = ts.astype(np.float32)
self.ys = ys.astype(np.float32)
def __len__(self):
return self.ys.shape[0]
def __getitem__(self, index):
return self.ys[index, ...], self.ts
class HamiltonianSystem:
def __init__(
self,
state_dimension,
frequency_train=4,
T_train=6.0,
frequency_test=None,
T_test=None,
x0=None,
x0_test=None,
N_x0s=None,
N_x0s_test=None,
noise_var=0.01,
noise_rel=False,
device="cpu",
seed=121,
ic_mode=None,
):
noise_rng = np.random.RandomState(seed)
init_rng_train = np.random.RandomState(seed + 1)
init_rng_test = np.random.RandomState(seed + 2)
frequency_test = (
frequency_test if frequency_test is not None else frequency_train
)
T_test = T_test if T_test is not None else T_train
self.S_test = frequency_train
self.T_test = T_test
self.S_train = frequency_test
self.T_train = T_train
if N_x0s is None:
N_x0s = 10
if x0 is None:
x0 = self.sample_ics(N_x0s, ic_mode=ic_mode, rng=init_rng_train)
if x0_test is None and N_x0s_test is not None:
x0_test = self.sample_ics(N_x0s_test, ic_mode=ic_mode, rng=init_rng_test)
if N_x0s_test is None:
N_x0s_test = N_x0s
if x0_test is None:
x0_test = x0
self.state_dimension = state_dimension
self.J = build_J(state_dimension).float()
self.x0 = x0
self.x0_test = x0_test
self.noise_var = noise_var
xs_train, ts_train = self.generate_sequence(
x0=self.x0, sequence_length=int(frequency_train * T_train) + 1, T=T_train
)
xs_test, ts_test = self.generate_sequence(
x0=self.x0_test, sequence_length=int(frequency_test * T_test), T=T_test
)
xs_train = xs_train + noise_rng.normal(size=xs_train.shape) * (
self.noise_var**0.5
) * (1.0 if not noise_rel else xs_train.std(axis=(1))[:, None, :])
self.trn = Data(ys=xs_train, ts=ts_train)
self.tst = Data(ys=xs_test, ts=ts_test)
self.mean_std_ys = self.trn.ys.mean(axis=(0, 1)), self.trn.ys.std(axis=(0, 1))
self.max_trn = self.trn.ts.max()
def f(self, t, x):
"""
Computes derivative function from H
"""
dHdx = functorch.grad(lambda xi: self.hamiltonian(xi).sum())(x)
return dHdx @ self.J.T
def generate_sequence(self, x0, sequence_length, T):
"""
Generates trajectories given derivative function
"""
with torch.no_grad():
ts = torch.linspace(0, 1, sequence_length) * T
x0 = torch.tensor(x0, dtype=torch.float32, requires_grad=False)
# x0 = x0.clone().detach()
xs = torch2numpy(
odeint(
self.f,
x0,
ts,
).permute(1, 0, 2)
)
return xs, torch2numpy(ts)
def scale_output(self, x):
return (x - self.mean_std_ys[0]) / self.mean_std_ys[1]
def unscale_output(self, x):
return x * self.mean_std_ys[1] + self.mean_std_ys[0]
def scale_t(self, t):
return t / self.max_trn
def unscale_t(self, t):
return t * self.max_trn
def scale_ts(self):
self.trn.ts = self.scale_t(self.trn.ts)
self.tst.ts = self.scale_t(self.tst.ts)
def scale_ys(self):
self.tst.ys = self.scale_output(self.tst.ys)
self.trn.ys = self.scale_output(self.trn.ys)
self.x0_test = self.scale_output(self.x0_test)
self.x0 = self.scale_output(self.x0)
class SimplePendulum(HamiltonianSystem):
def __init__(
self,
**kwargs,
):
super(SimplePendulum, self).__init__(2, **kwargs)
self.xlim = (
-self.tst.ys[:, :, 0].max() - 0.1,
self.tst.ys[:, :, 0].max() + 0.1,
)
self.ylim = (
-self.tst.ys[:, :, 1].max() - 0.1,
self.tst.ys[:, :, 1].max() + 0.1,
)
self.name = "simple-pendulum"
def sample_ics(self, N, rng, ic_mode=None):
out = []
n = 0
while n < N:
x0 = rng.rand(2) * 2.0 - 1.0
energy = self.hamiltonian(numpy2torch(x0))
if energy < 9.81:
out.append(x0)
n += 1
return np.array(out)
def hamiltonian(self, x, m=1, g=9.81, r=1):
q, p = torch.split(x, x.shape[-1] // 2, dim=-1)
return m * g * r * (1 - torch.cos(q)) + 0.5 / (r**2 * m) * p**2
class SpringPendulum(HamiltonianSystem):
def __init__(
self,
**kwargs,
):
self.m = 1.0
self.l0 = 3.0
self.k = 10
self.g = 9.81
super(SpringPendulum, self).__init__(4, **kwargs)
self.lim = self.trn.ys.max()
self.name = "spring-pendulum"
def sample_ics(self, N, rng, ic_mode=None):
out_ics = rng.uniform(low=-0.25, high=0.25, size=(N, 4))
return out_ics
def hamiltonian(self, x):
q, p = torch.split(x, x.shape[-1] // 2, dim=-1)
kin = (
0.5
* (1 / self.m)
* (p[..., 0] ** 2 + p[..., 1] ** 2 / (q[..., 0] + self.l0) ** 2)
)
elas = 0.5 * self.k * (q[..., 0]) ** 2
gpe = -self.m * self.g * (q[..., 0] + self.l0) * torch.cos(q[..., 1])
return kin + elas + gpe
class HenonHeiles(HamiltonianSystem):
def __init__(
self,
**kwargs,
):
self.mu = 0.8
super(HenonHeiles, self).__init__(4, **kwargs)
self.lim = self.trn.ys.max()
self.name = "henon-heiles"
def sample_ics(self, N, rng, ic_mode=None):
out = []
n = 0
while n < N:
x0 = rng.uniform(low=-1.0, high=1.0, size=4)
energy = self.hamiltonian(numpy2torch(x0))
if energy < 1 / (6 * self.mu**2):
out.append(x0)
n += 1
out_ics =(np.array(out))
return out_ics
def hamiltonian(self, x):
q, p = torch.split(x, x.shape[-1] // 2, dim=-1)
return self.mu * (q[..., 0] ** 2 * q[..., 1] - q[..., 1] ** 3 / 3) + 0.5 * (
x**2
).sum(-1)
def load_system_from_name(name):
all_classes = {
"simple-pendulum": SimplePendulum,
"henon-heiles": HenonHeiles,
"spring-pendulum": SpringPendulum,
}
return all_classes[name]
| 6,888
| 26.890688
| 86
|
py
|
hgp
|
hgp-main/hgp/misc/plot_utils.py
|
from hgp.misc.torch_utils import torch2numpy, numpy2torch
import matplotlib.pyplot as plt
from matplotlib import cm
import matplotlib
import shutil
import numpy as np
from hgp.models.builder import compute_summary
def plot_predictions(data, test_pred, save=None, test_true=None, model_name="Model"):
test_ts, test_ys = data.tst.ts, data.tst.ys
fig, axs = plt.subplots(
data.state_dimension,
1,
figsize=(
12,
2 * data.state_dimension,
),
sharex=True,
sharey=True,
squeeze=True,
)
# for i, (model_name, test_pred) in enumerate(test_pred_dict.items()):
for d in range(data.state_dimension):
# axs[d].plot(test_ts, test_pred_mean[n, :, d], c="r", alpha=0.7, zorder=3)
axs[d].plot(test_ts, test_pred[:, 0, :, d].T, c=cm.Set1(2), alpha=0.1, zorder=4)
axs[d].plot(test_ts, test_ys[0, :, d], c="k", alpha=0.7, zorder=2)
axs[d].scatter(
data.trn.ts,
data.trn.ys[0, :, d],
c="k",
s=20,
marker=".",
zorder=200,
)
axs[d].set_xlabel("$t$")
axs[d].scatter([], [], c="k", s=20, marker=".", label="Training data")
axs[d].plot([], [], c="k", alpha=0.7, label="True $\mathbf{x}(t)$")
axs[d].plot(
[],
[],
c=cm.Set1(2),
alpha=0.9,
label=model_name + " Pred. $\mathbf{x}(t)$",
)
axs[0].legend(loc="upper right")
axs[0].set_title(model_name)
for i in range(data.state_dimension):
half_d = data.state_dimension // 2
ax_label = ("q_" if i < half_d else "p_") + str(i % half_d + 1)
axs[i].set_ylabel(f"${ax_label}$")
# fig.suptitle("Predictive posterior")
# fig.subplots_adjust(wspace=0.2, hspace=0.2)
plt.tight_layout()
if save:
plt.savefig(save)
plt.close()
else:
plt.show()
def plot_longitudinal(data, test_pred, noisevar, save=None, test_true=None):
test_pred_mean, test_pred_postvar = test_pred.mean(0), test_pred.var(0)
test_pred_predvar = test_pred_postvar + noisevar
if test_true is None:
test_ts, test_ys = data.tst.ts, data.tst.ys
else:
test_ts, test_ys = test_true
for n in range(test_pred_mean.shape[0]):
fig, axs = plt.subplots(
data.state_dimension,
1,
figsize=(
3 * data.state_dimension,
8 * 1,
),
)
for d in range(data.state_dimension):
axs[d].plot(test_ts, test_pred_mean[n, :, d], c="r", alpha=0.7, zorder=3)
axs[d].fill_between(
test_ts,
test_pred_mean[n, :, d] - 2 * test_pred_postvar[n, :, d] ** 0.5,
test_pred_mean[n, :, d] + 2 * test_pred_postvar[n, :, d] ** 0.5,
color="r",
alpha=0.1,
zorder=1,
label="posterior",
)
axs[d].fill_between(
test_ts,
test_pred_mean[n, :, d] - 2 * test_pred_predvar[n, :, d] ** 0.5,
test_pred_mean[n, :, d] + 2 * test_pred_predvar[n, :, d] ** 0.5,
color="b",
alpha=0.1,
zorder=0,
label="predictive",
)
axs[d].plot(test_ts, test_pred[:, n, :, d].T, c="g", alpha=0.1, zorder=4)
axs[d].plot(test_ts, test_ys[n, :, d], c="k", alpha=0.7, zorder=2)
if data.trn.ys.shape[0] == data.tst.ys.shape[0]:
axs[d].scatter(
data.trn.ts,
data.trn.ys[n, :, d],
c="k",
s=100,
marker=".",
zorder=200,
)
axs[d].set_title("State {}".format(d + 1))
axs[d].set_xlabel("Time")
axs[d].scatter([], [], c="k", s=10, marker=".", label="train obs")
axs[d].plot([], [], c="k", alpha=0.7, label="true")
axs[d].plot([], [], c="r", alpha=0.7, label="predicted")
axs[d].legend(loc="upper right")
fig.suptitle("Predictive posterior")
fig.subplots_adjust(wspace=0.2, hspace=0.2)
plt.tight_layout()
if save:
plt.savefig(save + f"t{n}.pdf")
plt.close()
else:
return fig, axs
def plot_traces(model, data, test_pred, save=None):
mll, mse = compute_summary(
data.tst.ys,
torch2numpy(test_pred),
torch2numpy(model.observation_likelihood.variance),
squeeze_time=False,
)
fig, axs = plt.subplots(2, 2, figsize=(20, 10))
(ax1, ax2, ax3, ax4) = axs.flatten()
ax1.plot(data.tst.ts, mll.T)
ax1.set_title("MLL")
ax2.plot(data.tst.ts, mse.T)
ax2.set_title("MSE")
ax3.plot(data.tst.ts, np.var(torch2numpy(test_pred), axis=0).mean(-1).T)
ax3.set_title("Variance")
pred_energy = torch2numpy(data.hamiltonian(numpy2torch(test_pred)))
true_energy = torch2numpy(data.hamiltonian(numpy2torch(data.tst.ys)))
energy_err = np.power(true_energy - pred_energy.mean(0), 2)
ax4.plot(data.tst.ts, energy_err.T)
ax4.set_title("Energy MSE")
if save:
plt.savefig(save)
else:
plt.show()
def plot_comparison_traces(test_preds, obs_noises, data, save=None, names=None):
fig, axs = plt.subplots(2, 2, figsize=(20, 10))
names = [None, None] if names is None else names
for i, (noise, test_pred, name) in enumerate(zip(obs_noises, test_preds, names)):
mll, mse, rel_err = compute_summary(
data.tst.ys,
torch2numpy(test_pred),
torch2numpy(noise),
squeeze_time=False,
)
(ax1, ax2, ax3, ax4) = axs.flatten()
ax1.plot(data.tst.ts, mll.T, c=cm.Set2(i), alpha=0.7)
ax1.plot([], [], c=cm.Set2(i), label=name)
ax1.set_title("MLL")
ax2.plot(data.tst.ts, rel_err.T, c=cm.Set2(i), alpha=0.7)
ax2.set_title("Relative Error")
ax3.plot(
data.tst.ts,
np.var(torch2numpy(test_pred), axis=0).mean(-1).T,
c=cm.Set2(i),
alpha=0.7,
)
ax3.set_title("Variance")
pred_energy = torch2numpy(data.hamiltonian(numpy2torch(test_pred)))
true_energy = torch2numpy(data.hamiltonian(numpy2torch(data.tst.ys)))
energy_err = np.sqrt(np.power(true_energy - pred_energy.mean(0), 2))
ax4.plot(data.tst.ts, np.squeeze(energy_err).T, c=cm.Set2(i), alpha=0.7)
ax4.set_title("Energy MSE")
for ax in axs.flatten():
ax.axvline(
data.trn.ts.max(),
ls=":",
c="grey",
alpha=0.5,
label="End of train period",
)
ax.set_xlabel("T (s)")
ax1.legend()
if save:
plt.savefig(save)
else:
plt.show()
def plot_learning_curve(history, save=None):
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(20, 3))
ax1.plot(history.loss_meter.iters, history.loss_meter.vals)
ax1.set_title("Loss function")
ax1.set_yscale("log")
try:
ax2.plot(
history.observation_nll_meter.iters, history.observation_nll_meter.vals
)
ax2.set_title("Observation NLL")
# ax2.set_yscale("log")
ax3.plot(history.state_kl_meter.iters, history.state_kl_meter.vals)
ax3.set_title("State KL")
ax3.set_yscale("log")
# deals with nn plotting
except AttributeError:
pass
if save:
plt.savefig(save)
else:
plt.show()
| 7,650
| 31.012552
| 88
|
py
|
hgp
|
hgp-main/hgp/misc/settings.py
|
import torch
import numpy
class Settings:
def __init__(self):
pass
@property
def torch_int(self):
return torch.int32
@property
def numpy_int(self):
return numpy.int32
@property
def device(self):
# return torch.device('cpu')
# return torch.device('cuda:0')
return torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
@property
def torch_float(self):
return torch.float32
@property
def numpy_float(self):
return numpy.float32
@property
def jitter(self):
return 1e-5
settings = Settings()
| 630
| 16.054054
| 77
|
py
|
hgp
|
hgp-main/hgp/misc/ham_utils.py
|
import torch
def build_J(D_in):
assert D_in % 2 == 0
I = torch.eye(D_in // 2)
zeros = torch.zeros((D_in // 2, D_in // 2))
zI = torch.hstack((zeros, I))
mIz = torch.hstack((-I, zeros))
return torch.vstack((zI, mIz))
| 241
| 21
| 47
|
py
|
hgp
|
hgp-main/hgp/misc/train_utils.py
|
import random
import numpy as np
import torch
def seed_everything(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.use_deterministic_algorithms(True)
def get_logger(logpath, filepath, add_stdout=True):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
info_file_handler = logging.FileHandler(logpath, mode="a")
info_file_handler.setLevel(logging.INFO)
logger.addHandler(info_file_handler)
logger.info(filepath)
if add_stdout:
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logging.Formatter("%(asctime)s %(message)s"))
logger.addHandler(consoleHandler)
return logger
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class CachedAverageMeter(object):
"""Computes and stores the average and current value over optimization iterations"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.vals = []
self.iters = []
def update(self, val, iter, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
self.vals.append(val)
self.iters.append(iter)
class RunningAverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, momentum=0.99):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
def update(self, val):
if self.val is None:
self.avg = val
else:
self.avg = self.avg * self.momentum + val * (1 - self.momentum)
self.val = val
class CachedRunningAverageMeter(object):
"""Computes and stores the average and current value over optimization iterations"""
def __init__(self, momentum=0.99):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
self.vals = []
self.iters = []
def update(self, val, iter):
if self.val is None:
self.avg = val
else:
self.avg = self.avg * self.momentum + val * (1 - self.momentum)
self.val = val
self.vals.append(val)
self.iters.append(iter)
| 2,731
| 23.836364
| 88
|
py
|
hgp
|
hgp-main/hgp/misc/torch_utils.py
|
from hgp.misc.settings import settings
import numpy as np
import torch
device = settings.device
dtype = settings.torch_float
def numpy2torch(x):
return (
torch.tensor(x, dtype=dtype).to(device)
if type(x) is np.ndarray
else x.to(device)
)
def torch2numpy(x):
return x if type(x) is np.ndarray else x.detach().cpu().numpy()
def restore_model(model, filename):
checkpt = torch.load(filename, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpt["state_dict"])
return model
def save_model(model, filename):
torch.save({"state_dict": model.state_dict()}, filename)
def save_model_optimizer(model, optimizer, filename):
torch.save(
{
"state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
},
filename,
)
def insert_zero_t0(ts):
return torch.cat([torch.tensor([0.0]), ts + ts[1] - ts[0]])
def compute_ts_dense(ts, ts_dense_scale):
"""
Given a time sequence ts, this makes it dense by adding intermediate time points.
@param ts: time sequence
@param ts_dense_scale: dense factor
@return: dense time sequence
"""
if ts_dense_scale > 1:
ts_dense = torch.cat(
[
torch.linspace(t1, t2, ts_dense_scale)[:-1]
for (t1, t2) in zip(ts[:-1], ts[1:])
]
+ [ts[-1:]]
)
else:
ts_dense = ts
return ts_dense
| 1,487
| 22.619048
| 85
|
py
|
hgp
|
hgp-main/hgp/misc/metrics.py
|
import numpy as np
from scipy.special import logsumexp
from scipy.stats import norm
def log_lik(actual, predicted, noise_var):
lik_samples = norm.logpdf(actual, loc=predicted, scale=noise_var**0.5)
lik = logsumexp(lik_samples, 0, b=1 / float(predicted.shape[0]))
return lik
def mse(actual, predicted):
return np.power(actual - predicted.mean(0), 2)
def rel_err(actual, predicted):
return np.sqrt(((actual - predicted.mean(0)) ** 2).mean(-1)) / (
np.sqrt((predicted.mean(0) ** 2).mean(-1)) + np.sqrt((actual**2).mean(-1))
)
| 561
| 27.1
| 82
|
py
|
hgp
|
hgp-main/hgp/misc/__init__.py
| 0
| 0
| 0
|
py
|
|
hgp
|
hgp-main/hgp/misc/constraint_utils.py
|
import torch
import torch.nn.functional as F
def softplus(x):
lower = 1e-12
return F.softplus(x) + lower
def invsoftplus(x):
lower = 1e-12
xs = torch.max(x - lower, torch.tensor(torch.finfo(x.dtype).eps).to(x))
return xs + torch.log(-torch.expm1(-xs))
| 276
| 18.785714
| 75
|
py
|
hgp
|
hgp-main/hgp/misc/param.py
|
import numpy as np
import torch
from hgp.misc import transforms
from hgp.misc.settings import settings
class Param(torch.nn.Module):
"""
A class to handle contrained --> unconstrained optimization using variable transformations.
Similar to Parameter class in GPflow : https://github.com/GPflow/GPflow/blob/develop/gpflow/base.py
"""
def __init__(self, value, transform=transforms.Identity(), name="var"):
super(Param, self).__init__()
self.transform = transform
self.name = name
value_ = self.transform.backward(value)
self.optvar = torch.nn.Parameter(
torch.tensor(data=np.array(value_), dtype=settings.torch_float)
).to(settings.device)
def __call__(self):
return self.transform.forward_tensor(self.optvar)
def __repr__(self):
return "{} parameter with {}".format(self.name, self.transform.__str__())
| 913
| 31.642857
| 103
|
py
|
hgp
|
hgp-main/hgp/misc/transforms.py
|
from hgp.misc.settings import settings
import numpy as np
import torch
import torch.nn.functional as F
class Identity:
def __init__(self):
pass
def __str__(self):
return "Identity transformation"
def forward_tensor(self, x):
return x
def backward_tensor(self, y):
return y
def forward(self, x):
return x
def backward(self, y):
return y
class SoftPlus:
def __init__(self, lower=1e-12):
self._lower = lower
def __str__(self):
return "Softplus transformation"
def forward(self, x):
return np.logaddexp(0, x) + self._lower
def forward_tensor(self, x):
return F.softplus(x) + self._lower
def backward_tensor(self, y):
ys = torch.max(y - self._lower, torch.tensor(torch.finfo(y.dtype).eps).to(y))
return ys + torch.log(-torch.expm1(-ys))
def backward(self, y):
ys = np.maximum(y - self._lower, np.finfo(settings.numpy_float).eps)
return ys + np.log(-np.expm1(-ys))
class LowerTriangular:
def __init__(self, N, num_matrices=1):
self.N = N
self.num_matrices = num_matrices # We need to store this for reconstruction.
def __str__(self):
return "Lower cholesky transformation"
def forward(self, x):
fwd = np.zeros((self.num_matrices, self.N, self.N), dtype=settings.numpy_float)
indices = np.tril_indices(self.N, 0)
z = np.zeros(len(indices[0])).astype(int)
for i in range(self.num_matrices):
fwd[(z + i,) + indices] = x[i, :]
return fwd
def backward(self, y):
ind = np.tril_indices(self.N)
return np.vstack([y_i[ind] for y_i in y])
def forward_tensor(self, x):
fwd = torch.zeros(
(self.num_matrices, self.N, self.N),
dtype=settings.torch_float,
device=settings.device,
)
indices = np.tril_indices(self.N, 0)
z = np.zeros(len(indices[0])).astype(int)
for i in range(self.num_matrices):
fwd[(z + i,) + indices] = x[i, :]
return fwd
def backward_tensor(self, y):
ind = np.tril_indices(self.N)
return torch.stack([y_i[ind] for y_i in y])
class StackedLowerTriangular:
def __init__(self, N, num_n, num_m):
self.N = N
self.num_n = num_n # We need to store this for reconstruction.
self.num_m = num_m
def __str__(self):
return "Lower cholesky transformation for stack sequence of covariance matrices"
def forward(self, x):
fwd = np.zeros(
(self.num_n, self.num_m, self.N, self.N), dtype=settings.numpy_float
)
indices = np.tril_indices(self.N, 0)
z = np.zeros(len(indices[0])).astype(int)
for i in range(self.num_n):
for j in range(self.num_m):
fwd[
(
z + i,
z + j,
)
+ indices
] = x[i, j, :]
return fwd
def backward(self, y):
ind = np.tril_indices(self.N)
return np.stack([np.stack([y_i[ind] for y_i in y_j]) for y_j in y])
def forward_tensor(self, x):
fwd = torch.zeros(
(self.num_n, self.num_m, self.N, self.N),
dtype=settings.torch_float,
device=settings.device,
)
indices = np.tril_indices(self.N, 0)
z = np.zeros(len(indices[0])).astype(int)
for i in range(self.num_n):
for j in range(self.num_m):
fwd[
(
z + i,
z + j,
)
+ indices
] = x[i, j, :]
return fwd
def backward_tensor(self, y):
ind = np.tril_indices(self.N)
return torch.stack([torch.stack([y_i[ind] for y_i in y_j]) for y_j in y])
| 3,936
| 27.323741
| 88
|
py
|
hgp
|
hgp-main/tests/test_kernels.py
|
import pytest
import hgp.core.kernels as kernels
import torch
@pytest.fixture()
def t():
return 1 * torch.randn(10, 4)
@pytest.fixture()
def kernel():
k = kernels.DerivativeRBF(4)
return k
def test_single_k(t, kernel):
K = kernel.K(t)
for i in range(10):
for j in range(10):
assert torch.isclose(K[i, j], kernel.single_k(t[i], t[j]))
def test_grad_single_k(t, kernel):
def analytic_dk(xi, yi, dim):
return (
-0.5
* (1 / kernel.lengthscales[dim] ** 2)
* (xi[dim] - yi[dim])
* kernel.single_k(xi, yi)
)
for j in range(10):
for k in range(10):
for i in range(4):
print(i)
assert torch.isclose(
kernel.grad_single_k(t[k], t[j])[i],
analytic_dk(t[k], t[j], i),
)
def test_hess_single_k(t, kernel):
def analytic_ddk(xi, yi, dim1, dim2):
return (
0.25
* (1 / kernel.lengthscales[dim2] ** 2)
* (
2 * (dim1 == dim2)
- (1 / kernel.lengthscales[dim1] ** 2)
* (xi[dim1] - yi[dim1])
* (xi[dim2] - yi[dim2])
)
* kernel.single_k(xi, yi)
)
for j in range(10):
for k in range(10):
for i in range(4):
for j in range(4):
pred = kernel.hess_single_k(t[j], t[k])[i][j]
ana = analytic_ddk(t[j], t[k], i, j)
print(pred, ana)
assert torch.isclose(
pred,
ana,
)
def test_grad_K(t, kernel):
dK = kernel.grad_K(t, t[:9])
def analytic_dk(xi, yi, dim):
return (
-0.5
* (1 / kernel.lengthscales[dim] ** 2)
* (xi[dim] - yi[dim])
* kernel.single_k(xi, yi)
)
# print(dK.shape)
for i in range(4 * 10):
for j in range(9):
assert torch.isclose(
dK[i, j], analytic_dk(t[i % 10], t[j], i // 10), atol=1e-6
)
def test_hess_K(t, kernel):
ddK = kernel.hess_K(t)
def analytic_ddk(xi, yi, dim1, dim2):
return (
0.25
* (1 / kernel.lengthscales[dim2] ** 2)
* (
2 * (dim1 == dim2)
- (1 / kernel.lengthscales[dim1] ** 2)
* (xi[dim1] - yi[dim1])
* (xi[dim2] - yi[dim2])
)
* kernel.single_k(xi, yi)
)
for i in range(4 * 10):
for j in range(4 * 10):
assert torch.isclose(
ddK[i, j],
analytic_ddk(t[i % 10], t[j % 10], i // 10, j // 10),
atol=1e-6,
)
def test_hess_K_PSD(t, kernel):
ddK = kernel.hess_K(t)
torch.linalg.cholesky(ddK + torch.eye(ddK.shape[1]) * 1e-5)
def test_variance_setter(kernel):
kernel.variance = torch.tensor(2.70)
assert torch.isclose(kernel.variance, torch.tensor(2.70))
def test_lengthscale_setter(kernel):
kernel.lengthscales = torch.ones(4) * 2.7
assert torch.all(torch.isclose(kernel.lengthscales, torch.ones(4) * 2.7))
| 3,261
| 24.286822
| 77
|
py
|
hgp
|
hgp-main/tests/__init__.py
| 0
| 0
| 0
|
py
|
|
hgp
|
hgp-main/experiments/initial_pendulum/experiment.py
|
import logging
import os
import hydra
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import torch
from matplotlib import cm
from matplotlib.collections import LineCollection
from matplotlib.legend import _get_legend_handles_labels
from omegaconf import DictConfig
import hgp
from hgp.datasets.hamiltonians import load_system_from_name
from hgp.misc.torch_utils import numpy2torch, torch2numpy
from hgp.misc.train_utils import seed_everything
from hgp.misc.settings import settings
device = settings.device
if device.type == 'cuda':
torch.set_default_tensor_type('torch.cuda.FloatTensor')
log = logging.getLogger(__name__)
@hydra.main(config_path=".", config_name="config", version_base="1.2")
def run_experiment(config: DictConfig):
print("Working directory : {}".format(os.getcwd()))
seed_everything(3)
(fig, axs) = plt.subplots(
2,
4,
figsize=(12, 6),
# sharex=True,
# sharey=True,
)
cycle_lengths = [0.52, 1]
model_names = ["hgp", "gpode"]
titles1 = ["Inferred vector field", "Posterior samples"]
titles2 = [", $\\frac{1}{2}$ cycle", ", $1$ cycle"]
for c, cycle_length in enumerate(cycle_lengths):
train_time = cycle_length * 2 * np.pi / np.sqrt(9.81)
system = load_system_from_name("simple-pendulum")(
frequency_train=16,
T_train=train_time,
frequency_test=20,
T_test=(8 * np.pi / np.sqrt(9.81)),
noise_var=0.01,
noise_rel=True,
seed=3,
N_x0s=1,
)
models = []
preds = []
for model_name in model_names:
model = (
hgp.models.builder.build_model(config, system.trn.ys)
if model_name == "hgp"
else hgp.models.builder.build_gpode_model(config, system.trn.ys)
)
model = hgp.models.builder.init_and_fit(
model, config, system.trn.ts, system.trn.ys
)
models.append(model)
preds.append(
hgp.models.builder.compute_predictions(
model,
numpy2torch(system.tst.ts),
eval_sample_size=config.eval_samples,
)
)
(ax1, ax2, ax3, ax4) = axs[c]
grid_size = 30
xlim = system.xlim
ylim = system.ylim
factor = 1.5
xx, yy = np.meshgrid(
np.linspace(xlim[0] * factor, xlim[1] * factor, grid_size),
np.linspace(ylim[0] * factor, ylim[1] * factor, grid_size),
)
grid_x = np.concatenate([xx.reshape(-1, 1), yy.reshape(-1, 1)], 1)
grid_f = []
for gx in grid_x:
grid_f.append(
torch2numpy(system.f(None, torch.tensor(gx, dtype=torch.float32)))
)
grid_f = np.stack(grid_f)
if c == 0:
ax1.streamplot(
xx,
yy,
grid_f[:, 0].reshape(xx.shape),
grid_f[:, 1].reshape(xx.shape),
color="grey",
density=0.5,
)
ax1.set_title("True vector field")
ax1.scatter(
[None], [None], marker=".", c="k", alpha=0.8, label="Training data"
)
ax1.plot(
[None],
[None],
color="k",
linestyle="solid",
alpha=1.0,
zorder=4,
label="True trajectory",
)
ax1.set_ylabel("$p$")
ax1.set_xlabel("$q$")
# ax1.legend(loc="lower right")
else:
ax1.axis("off")
grid_x = torch.tensor(
np.concatenate([xx.reshape(-1, 1), yy.reshape(-1, 1)], 1),
dtype=torch.float32,
)
for i, model in enumerate(models):
grid_f = []
color = cm.Set2(i)
with torch.no_grad():
for _ in range(100):
model.flow.odefunc.diffeq.build_cache()
grid_f.append(model.flow.odefunc.diffeq.forward(None, grid_x))
grid_f = torch2numpy(torch.stack(grid_f))
sp = ax2.streamplot(
xx,
yy,
grid_f.mean(0)[:, 0].reshape(xx.shape),
grid_f.mean(0)[:, 1].reshape(xx.shape),
color=color,
arrowsize=1.1,
arrowstyle="<|-",
density=0.5,
)
ax2.set_title(titles1[0] + titles2[c])
if c == 0:
ax2.plot(
[None],
[None],
linestyle=":" if i == 0 else "solid",
color=color,
label=model_names[i].upper(),
)
sp.lines.set(alpha=0.8, ls=":" if i == 0 else "solid")
for s in range(min(preds[i].shape[0], 10)):
for n in range(preds[i].shape[1]):
points = preds[i][s, n].reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = LineCollection(
segments,
linestyle=":" if i == 0 else "solid",
alpha=0.3,
color=color,
)
lc.set_linewidth(2.5)
ax3.add_collection(lc)
ax3.set_title(titles1[1] + titles2[c])
pred_energy = torch2numpy(system.hamiltonian(numpy2torch(preds[i])))
true_energy = torch2numpy(system.hamiltonian(numpy2torch(system.tst.ys)))
energy_err = np.sqrt(np.power(true_energy - pred_energy, 2))
ax4.plot(
system.tst.ts,
np.squeeze(energy_err).T,
linestyle=":" if i == 0 else "solid",
alpha=0.3,
color=color,
)
ax4.set_title("Energy MSE" + titles2[c])
ax4.set_xlabel("$t$")
for n in range(system.tst.ys.shape[0]):
points = system.tst.ys[n].reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = LineCollection(
segments, color="k", linestyle="solid", alpha=1.0, zorder=4
)
lc.set_linewidth(0.5)
ax3.add_collection(lc)
ax3.scatter(
system.trn.ys[:, :, 0], system.trn.ys[:, :, 1], marker=".", c="k", alpha=1
)
for ax in [ax1, ax2, ax3]:
ax.set_xlim(xlim[0] * factor, xlim[1] * factor)
ax.set_ylim(ylim[0] * factor, ylim[1] * factor)
fig.legend(*_get_legend_handles_labels(fig.axes), loc=(0.1, 0.3))
plt.tight_layout()
plt.savefig("./figure4.pdf")
if __name__ == "__main__":
run_experiment()
| 6,928
| 31.078704
| 86
|
py
|
hgp
|
hgp-main/experiments/forward_trajectory/experiment.py
|
import logging
import os
import pickle
from distutils.dir_util import copy_tree
from pathlib import Path
import hydra
import numpy as np
import torch
from omegaconf import DictConfig
import hgp
from hgp.datasets.hamiltonians import load_system_from_name
from hgp.misc.plot_utils import (
plot_comparison_traces,
plot_learning_curve,
plot_longitudinal,
)
from hgp.misc.torch_utils import numpy2torch, torch2numpy
from hgp.misc.train_utils import seed_everything
from hgp.misc.settings import settings
device = settings.device
if device.type == 'cuda':
torch.set_default_tensor_type('torch.cuda.FloatTensor')
log = logging.getLogger(__name__)
@hydra.main(config_path="conf", config_name="config", version_base="1.2")
def run_experiment(config: DictConfig):
seed_everything(config.system.seed)
times_ahead = 2
system = load_system_from_name(config.system.system_name)(
frequency_train=config.system.frequency_train,
T_train=config.system.data_obs_T,
frequency_test=config.system.frequency_test,
T_test=times_ahead * config.system.data_obs_T,
noise_var=config.system.data_obs_noise_var,
noise_rel=config.system.noise_rel,
seed=config.system.seed,
N_x0s=1,
)
system.scale_ts()
system.scale_ys()
if config.model.model_type == "hgp":
model = hgp.models.builder.build_model(config, system.trn.ys)
elif config.model.model_type == "hgp_subseq":
model = hgp.models.builder.build_subsequence_model(config, system.trn.ys)
elif config.model.model_type == "gpode":
model = hgp.models.builder.build_gpode_model(config, system.trn.ys)
elif config.model.model_type == "nn":
model = hgp.models.builder.build_nn_model(config, system.trn.ys)
else:
raise ValueError("Model type not valid.")
model, history = hgp.models.builder.init_and_fit(
model, config, system.trn.ts, system.trn.ys, return_history=True
)
plot_learning_curve(
history, save=os.path.join(os.getcwd(), f"lc_{config.model.name}.pdf")
)
print("Generating predictions...")
preds = (
hgp.models.builder.compute_predictions(
model,
numpy2torch(system.tst.ts),
eval_sample_size=config.eval_samples,
)
if config.model.model_type in ["hgp", "gpode"]
else hgp.models.builder.compute_test_predictions(
model,
numpy2torch(system.trn.ys[:, 0, :]),
numpy2torch(system.tst.ts),
eval_sample_size=config.eval_samples,
)
)
model_vars = model.observation_likelihood.variance
# we need to only compute the metrics outside the training region
# test data here also contains noise free function in traing range
test_idx = system.tst.ts > system.trn.ts.max()
mll, mse, rel_err = hgp.models.builder.compute_summary(
system.tst.ys[:, test_idx, :],
torch2numpy(preds[:, :, test_idx, :]),
torch2numpy(model_vars),
squeeze_time=False,
)
plot_longitudinal(
system,
torch2numpy(preds),
torch2numpy(model_vars),
save=os.path.join(os.getcwd(), f"{config.model.name}_trajpost"),
)
# print(model)
res_dict = {}
full_res_dict = {}
full_res_dict["rmse"] = np.sqrt(mse)
full_res_dict["mll"] = mll
full_res_dict["rel_err"] = rel_err
full_res_dict["preds"] = torch2numpy(preds)
res_dict["rmse"] = np.sqrt(mse.mean())
res_dict["mll"] = mll.mean()
res_dict["rel_err"] = rel_err.mean()
log.info(res_dict)
# also save data for plotting
full_res_dict["system"] = system
with open(os.path.join(os.getcwd(), "full_metrics.pickle"), "wb") as handle:
pickle.dump(full_res_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(os.getcwd(), "summary_metrics.pickle"), "wb") as handle:
pickle.dump(res_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
plot_comparison_traces(
[preds],
[model_vars],
system,
save=os.path.join(os.getcwd(), "comparison_traces.pdf"),
names=config.model.name,
)
if config.exp_dir:
cwd = os.getcwd()
last_path = cwd.split("/")[-1]
main_path = f"data/results/{config.exp_dir}/"
is_multi = last_path if len(last_path) <= 3 else ""
res_path = hydra.utils.to_absolute_path(main_path + is_multi)
filepath = Path(res_path)
filepath.mkdir(parents=True, exist_ok=True)
copy_tree(os.getcwd(), res_path)
if __name__ == "__main__":
run_experiment()
| 4,625
| 30.469388
| 83
|
py
|
hgp
|
hgp-main/experiments/multiple_trajectory/experiment.py
|
import logging
import os
import pickle
from distutils.dir_util import copy_tree
from pathlib import Path
import hydra
import numpy as np
import torch
from omegaconf import DictConfig
import hgp
from hgp.datasets.hamiltonians import load_system_from_name
from hgp.misc.plot_utils import (
plot_comparison_traces,
plot_learning_curve,
plot_longitudinal,
)
from hgp.misc.torch_utils import numpy2torch, torch2numpy
from hgp.misc.train_utils import seed_everything
from hgp.misc.settings import settings
device = settings.device
if device.type == 'cuda':
torch.set_default_tensor_type('torch.cuda.FloatTensor')
log = logging.getLogger(__name__)
@hydra.main(config_path="conf", config_name="config", version_base="1.2")
def run_experiment(config: DictConfig):
seed_everything(config.system.seed)
# assert config.shooting == True
times_ahead = 3
system = load_system_from_name(config.system.system_name)(
frequency_train=config.system.frequency_train,
T_train=config.system.data_obs_T,
frequency_test=config.system.frequency_test,
T_test=times_ahead * config.system.data_obs_T,
noise_var=config.system.data_obs_noise_var,
noise_rel=config.system.noise_rel,
seed=config.system.seed,
N_x0s=config.system.num_traj,
N_x0s_test=25,
)
system.scale_ts()
system.scale_ys()
if config.model.model_type == "hgp":
model = hgp.models.builder.build_model(config, system.trn.ys)
elif config.model.model_type == "hgp_subseq":
model = hgp.models.builder.build_subsequence_model(config, system.trn.ys)
elif config.model.model_type == "gpode":
model = hgp.models.builder.build_gpode_model(config, system.trn.ys)
elif config.model.model_type == "nn":
model = hgp.models.builder.build_nn_model(config, system.trn.ys)
else:
raise ValueError("Model type not valid.")
model, history = hgp.models.builder.init_and_fit(
model, config, system.trn.ts, system.trn.ys, return_history=True
)
plot_learning_curve(
history, save=os.path.join(os.getcwd(), f"lc_{config.model.name}.pdf")
)
model_vars = model.observation_likelihood.variance
print("Generating predictions...")
preds = hgp.models.builder.compute_test_predictions(
model,
system.x0_test,
numpy2torch(system.tst.ts),
eval_sample_size=config.eval_samples,
)
train_preds = (
hgp.models.builder.compute_predictions(
model,
numpy2torch(system.trn.ts),
eval_sample_size=config.eval_samples,
)
if config.model.model_type in ["hgp", "gpode"]
else hgp.models.builder.compute_test_predictions(
model,
numpy2torch(system.trn.ys[:, 0, :]),
numpy2torch(system.trn.ts),
eval_sample_size=config.eval_samples,
)
)
mll, mse, rel_err = hgp.models.builder.compute_summary(
system.tst.ys,
torch2numpy(preds),
torch2numpy(model.observation_likelihood.variance),
squeeze_time=False,
)
plot_longitudinal(
system,
torch2numpy(preds[:, : min(np.shape(preds)[1], 5)]),
torch2numpy(model.observation_likelihood.variance),
save=os.path.join(os.getcwd(), f"{config.model.name}_trajpost"),
)
plot_longitudinal(
system,
torch2numpy(train_preds[:, : max(np.shape(train_preds)[1], 5)]),
torch2numpy(model.observation_likelihood.variance),
save=os.path.join(os.getcwd(), f"train_{config.model.name}_trajpost"),
test_true=(system.trn.ts, system.trn.ys),
)
res_dict = {}
full_res_dict = {}
full_res_dict["rmse"] = np.sqrt(mse)
full_res_dict["mll"] = mll
full_res_dict["rel_err"] = rel_err
full_res_dict["preds"] = torch2numpy(preds)
res_dict["rmse"] = np.sqrt(mse.mean())
res_dict["mll"] = mll.mean()
res_dict["rel_err"] = rel_err.mean()
log.info(res_dict)
full_res_dict["system"] = system
with open(os.path.join(os.getcwd(), "metrics.pickle"), "wb") as handle:
pickle.dump(full_res_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(os.getcwd(), "summary_metrics.pickle"), "wb") as handle:
pickle.dump(res_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
plot_comparison_traces(
[preds],
[model_vars],
system,
save=os.path.join(os.getcwd(), "comparison_traces.pdf"),
names=[config.model.name],
)
if config.exp_dir:
cwd = os.getcwd()
last_path = cwd.split("/")[-1]
main_path = f"data/results/{config.exp_dir}/"
is_multi = last_path if len(last_path) <= 3 else ""
res_path = hydra.utils.to_absolute_path(main_path + is_multi)
filepath = Path(res_path)
filepath.mkdir(parents=True, exist_ok=True)
copy_tree(os.getcwd(), res_path)
if __name__ == "__main__":
run_experiment()
| 5,012
| 29.944444
| 83
|
py
|
SubGNN
|
SubGNN-main/config.py
|
from pathlib import Path
# directory where data and results will be stored
# TODO: *UPDATE TO YOUR DIRECTORY OF CHOICE*
PROJECT_ROOT = Path('/mnt/subgraphs/data_to_release')
# padding
PAD_VALUE = 0
| 202
| 17.454545
| 53
|
py
|
SubGNN
|
SubGNN-main/SubGNN/test.py
|
import sys
sys.path.insert(0, '..') # add config to path
import config
import train as tr
import os
import json
import random
import numpy as np
import argparse
class Namespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def parse_arguments():
parser = argparse.ArgumentParser(description="Run SubGNN")
parser.add_argument('-task', type=str, default=None, help='Task name (e.g. hpo_metab)')
parser.add_argument('-tb_name', type=str, default="sg", help='Base Model Name for Tensorboard Log')
parser.add_argument('-restoreModelPath', type=str, default=None, help='Parent directory of model, hparams, kwargs')
parser.add_argument("-max_epochs", type=int, default=200, help="Max number of epochs to train")
parser.add_argument("-random_seeds", action="store_true", help="Use random seeds from 0-9. Otherwise use random random seeds")
parser.add_argument('-tb_dir', default="tensorboard_test", type=str)
parser.add_argument('-no_train', action="store_true")
args = parser.parse_args()
return args
def main(args_script):
args_to_function = {
"task" : args_script.task,
"tb_name" : args_script.tb_name,
"restoreModelPath" : args_script.restoreModelPath,
"max_epochs" : args_script.max_epochs,
"tb_dir" : args_script.tb_dir,
## Defaults
"checkpoint_k": 1,
"no_checkpointing" : False, #0 and True or 1 and False
"tb_logging": True,
"runTest" : False,
"no_save" : False,
"print_train_times" : False,
"monitor_metric":'val_micro_f1',
"opt_n_trials":None,
"debug_mode":False,
"subset_data":False,
"restoreModelName":None,
"noTrain":False,
"log_path":None
}
args = Namespace(**args_to_function)
# dict to keep track of results
exp_results = {
"test_acc_mean":0, "test_acc_sd":0,"test_micro_f1_mean":0,"test_micro_f1_sd":0,
"test_auroc_mean":0, "test_auroc_sd":0,
"test_acc" : [], "test_micro_f1": [], "test_auroc" : [],
"call" : args_to_function
}
# for each seed, train a new model
for seed in range(10):
print(f"Running Round {seed+1}")
# either use a random seed from 0 to 1000000 or use the default random seeds 0-9
args.seed = random.randint(0, 1000000) if args_script.random_seeds else seed
print('Seed used: ', args.seed)
args.tb_dir = os.path.join(config.PROJECT_ROOT, args.tb_dir)
args.tb_version = f"version_{seed}"
if not args_script.no_train: #train the model from scratch
args.noTrain = False
args.runTest = True
test_results = tr.train_model(args)
else: #read in the model - NOTE that this doesn't differentiaate .ckpt files if multiple are saved
model_path = os.path.join(config.PROJECT_ROOT,args.tb_dir, args.tb_name, args.tb_version)
for file in os.listdir(model_path):
if file.endswith(".ckpt") and file.startswith("epoch"):
outpath = file
args.noTrain = True
args.no_save = True
args.restoreModelPath = model_path
args.restoreModelName = outpath
test_results = tr.train_model(args)
# keep track of test results for each random seed run
exp_results['test_micro_f1'].append(float(test_results['test_micro_f1']))
exp_results['test_acc'].append(float(test_results['test_acc']))
exp_results['test_auroc'].append(float(test_results['test_auroc']))
exp_results["test_acc_mean"] = np.mean(exp_results['test_acc'])
exp_results["test_acc_sd"] = np.std(exp_results['test_acc'])
exp_results["test_micro_f1_mean"] = np.mean(exp_results['test_micro_f1'])
exp_results["test_micro_f1_sd"] = np.std(exp_results['test_micro_f1'])
exp_results["test_auroc_mean"] = np.mean(exp_results['test_auroc'])
exp_results["test_auroc_sd"] = np.std(exp_results['test_auroc'])
print("OVERALL RESULTS:") # across all random seeds
print(exp_results)
# write results for all runs to file
exp_results_file = open(os.path.join(config.PROJECT_ROOT, args.tb_dir, args.tb_name, "experiment_results.json"),"w")
exp_results_file.write(json.dumps(exp_results, indent=4))
exp_results_file.close()
if __name__ == "__main__":
args = parse_arguments()
main(args)
| 4,436
| 40.858491
| 130
|
py
|
SubGNN
|
SubGNN-main/SubGNN/anchor_patch_samplers.py
|
# General
import numpy as np
import random
from collections import defaultdict
import networkx as nx
import sys
import time
# Pytorch
import torch
# Our Methods
sys.path.insert(0, '..') # add config to path
import config
import subgraph_utils
#######################################################
# Triangular Random Walks
def is_triangle(graph, a, b, c):
'''
Returns true if the nodes a,b,c consistute a triangle in the graph
'''
return c in set(graph.neighbors(a)).intersection(set(graph.neighbors(b)))
def get_neighbors(networkx_graph, subgraph, all_valid_border_nodes, prev_node, curr_node, inside):
'''
Returns lists of triangle and non-triangle neighbors for the curr_node
'''
# if 'inside', we don't want to consider any nodes outside of the subgraph
if inside: graph = subgraph
else: graph = networkx_graph
neighbors = list(graph.neighbors(curr_node))
# if we're doing a border random walk, we need to make sure the neighbors are in the valid set of nodes to consider (i.e. all_valid_border_nodes)
if not inside: neighbors = [n for n in neighbors if n in all_valid_border_nodes]
# separate neighbors into triangle and non-triangle neighbors
triangular_neighbors = []
non_triangular_neighbors = []
for n in neighbors:
if is_triangle(graph, prev_node, curr_node, n): triangular_neighbors.append(n)
else: non_triangular_neighbors.append(n)
return triangular_neighbors, non_triangular_neighbors
def triangular_random_walk(hparams, networkx_graph, anchor_patch_subgraph, walk_len, in_border_nodes, all_valid_nodes, inside):
'''
Perform a triangular random walk
This is used (1) to sample anchor patches and (2) to generate internal/border representations of the anchor patches
when using function for (1), in_border_nodes, non_subgraph_nodes, all_valid_nodes = None; inside = True & anchor_patch_subgraph is actually the entire networkx graph
Inputs:
- hparams: dict of hyperparameters
- networkx graph: base underlying graph
- anchor patch subgraph: this is either the anchor patch subgraph or in the case of (1), it's actually the base underlying graph
- walk_len: length of the random walk
- in_border_nodes: nodes that are in the subgraph, but that have an edge to a node external to the subgraph
- all_valid_nodes: the union of in_border_nodes + nodes that are not in the subgraph but are in the underlying graph
- inside: whether this random walk is internal or border to the subgraph (note that when using this method to sample anchor patches, inside=True)
Output:
- visited: list of node ids visited during the walk
'''
if inside:
# randomly sample a start node from the subgraph/graph
prev_node = np.random.choice(list(anchor_patch_subgraph.nodes()))
# get all of the neighbors for the start node
neighbor_nodes = list(anchor_patch_subgraph.neighbors(prev_node))
# sample node from neighbors
curr_node = np.random.choice(neighbor_nodes) if len(neighbor_nodes) > 0 else config.PAD_VALUE #we're using the PAD_VALUE as a sentinel that the first node has no neighbors
all_valid_nodes = None
else:
# ranomly sample a start node from the list of 'in_border_nodes" and restrict neighboring nodes to only those in 'all_valid_nodes'
prev_node = np.random.choice(in_border_nodes, 1)[0]
neighbor_nodes = [n for n in list(networkx_graph.neighbors(prev_node)) if n in all_valid_nodes]
curr_node = np.random.choice(neighbor_nodes, 1)[0] if len(neighbor_nodes) > 0 else config.PAD_VALUE
# if the first node has no neighbors, the random walk is only length 1 & we return it immediately
if curr_node == config.PAD_VALUE:
return [prev_node]
visited = [prev_node, curr_node]
# now that we've already performed a walk of length 2, let's perform the rest of the walk
for k in range(walk_len - 2):
#get the triangular and non-triangular neighbors for the current node given the previously visited node
triangular_neighbors, non_triangular_neighbors = get_neighbors(networkx_graph, anchor_patch_subgraph, all_valid_nodes, prev_node, curr_node, inside=inside)
neighbors = triangular_neighbors + non_triangular_neighbors
if len(neighbors) == 0: break # if there are no neighbors, end walk
else:
# if there are no neighbors of one type, sample from the other type
if len(triangular_neighbors) == 0:
next_node = np.random.choice(non_triangular_neighbors)
elif len(non_triangular_neighbors) == 0:
next_node = np.random.choice(triangular_neighbors)
# with probability 'rw_beta', we go to a triangular node
elif random.uniform(0, 1) <= hparams['rw_beta'] and len(triangular_neighbors) != 0:
next_node = np.random.choice(triangular_neighbors)
# otherwise we go to a non-triangular node
else:
next_node = np.random.choice(non_triangular_neighbors)
prev_node = curr_node
curr_node = next_node
visited.append(next_node)
# we return a list of the node ids visited during the walk
return visited
#######################################################
# Perform random walks over the sampled structure anchor patches
def perform_random_walks(hparams, networkx_graph, anchor_patch_ids, inside):
'''
Performs random walks over the sampled anchor patches
If inside=True, performs random walks over the inside of the subgraph. Otherwise, performs random walks over the subgraph border
(i.e. nodes in the subgraph that have an external edge + nodes external to the subgraph)
Returns padded tensor of all walks of shape (n sampled anchor patches, n_triangular_walks, random_walk_len)
'''
n_sampled_patches, max_patch_len = anchor_patch_ids.shape
all_patch_walks = []
for anchor_patch in anchor_patch_ids:
curr_anchor_patch = anchor_patch[anchor_patch != config.PAD_VALUE] #remove any padding
# if anchor patch is only padding, then we just add a tensor of all zeros to maintain the padding
if curr_anchor_patch.shape[0] == 0:
all_patch_walks.append(torch.zeros((hparams['n_triangular_walks'], hparams['random_walk_len']), dtype=torch.long).fill_(config.PAD_VALUE))
else:
anchor_patch_subgraph = networkx_graph.subgraph(curr_anchor_patch.numpy()) # create a networkx graph from the anchor patch
if not inside:
# get nodes in subgraph that have an edge to a node not in the subgraph & all of the nodes that are not in the subgraph
in_border_nodes, non_subgraph_nodes = subgraph_utils.get_border_nodes(networkx_graph, anchor_patch_subgraph)
# the border random walk can operate over all nodes on the border of the subgraph + all nodes external to the subgraph
all_valid_nodes = set(in_border_nodes).union(set(non_subgraph_nodes))
else: in_border_nodes, non_subgraph_nodes, all_valid_nodes = None, None, None
# perform 'n_triangular_walks' number of walks over the anchor patch (each walk's length = 'random_walk_len')
# pad the walks and stack them to produce a final tensor of shape (n sampled anchor patches, n_triangular_walks, random_walk_len)
walks = []
for w in range(hparams['n_triangular_walks']):
walk = triangular_random_walk(hparams, networkx_graph, anchor_patch_subgraph, hparams['random_walk_len'], in_border_nodes, all_valid_nodes, inside=inside)
fill_len = hparams['random_walk_len'] - len(walk)
walk = torch.cat([torch.LongTensor(walk),torch.LongTensor((fill_len)).fill_(config.PAD_VALUE)])
walks.append(walk)
all_patch_walks.append(torch.stack(walks))
all_patch_walks = torch.stack(all_patch_walks).view(n_sampled_patches, hparams['n_triangular_walks'], hparams['random_walk_len'])
return all_patch_walks
#######################################################
# Sample anchor patches
def sample_neighborhood_anchor_patch(hparams, networkx_graph, cc_ids, border_set, sample_inside=True ):
'''
Returns a tensor of shape (batch_sz, max_n_cc, n_anchor_patches_N_in OR n_anchor_patches_N_out) that contains the sampled
neighborhood internal or border anchor patches
'''
batch_sz, max_n_cc, _ = cc_ids.shape
components = cc_ids.view(cc_ids.shape[0]*cc_ids.shape[1], -1) #(batch_sz * max_n_cc, max_cc_len)
# sample internal N anchor patch
if sample_inside:
all_samples = []
for i in range(hparams['n_anchor_patches_N_in']):
# to efficiently sample a random element from each connected component (with variable lengths),
# we generate and pad a random matrix then take the argmax. This gives a randomly sampled node ID from within the component.
rand = torch.randn(components.shape)
rand[components == config.PAD_VALUE] = config.PAD_VALUE
sample = components[range(len(components)), torch.argmax(rand, dim=1)]
all_samples.append(sample)
samples = torch.transpose(torch.stack(all_samples), 0, 1)
# sample border N anchor patch
else:
border_set_reshaped = border_set.view(border_set.shape[0]*border_set.shape[1], -1)
all_samples = []
for i in range(hparams['n_anchor_patches_N_out']): # number of neighborhood border AP to sample
# same approach as internally, except that we're sampling from the border_set instead of within the connected component
rand = torch.randn(border_set_reshaped.shape)
rand[border_set_reshaped == config.PAD_VALUE] = config.PAD_VALUE
sample = border_set_reshaped[range(len(border_set_reshaped)), torch.argmax(rand, dim=1)]
all_samples.append(sample)
samples = torch.transpose(torch.stack(all_samples),0,1)
# Reshape and return
anchor_patches = samples.view(batch_sz, max_n_cc, -1)
return anchor_patches
def sample_position_anchor_patches(hparams, networkx_graph, subgraph = None):
'''
Returns list of sampled position anchor patches. If subgraph != None, we sample from within the entire subgraph (across all CC).
Otherwise, we sample from the entire base graph. 'n_anchor_patches_pos_out' and 'n_anchor_patches_pos_in' specify the number of anchor patches to sample.
'''
if not subgraph: #sample border position anchor patches
return list(np.random.choice(list(networkx_graph.nodes), hparams['n_anchor_patches_pos_out'], replace = True))
else: #sampling internal position anchor patches
return list(np.random.choice(subgraph, hparams['n_anchor_patches_pos_in'], replace = True))
def sample_structure_anchor_patches(hparams, networkx_graph, device, max_sim_epochs):
'''
Generate a large number of structure anchor patches from which we can sample later
max_sim_epochs: multiplication factor to ensure we generate more AP than are actually needed
Returns a tensor of shape (n sampled patches, max patch length)
'''
# number of anchor patches to sample
n_samples = max_sim_epochs * hparams['n_anchor_patches_structure'] * hparams['n_layers']
all_patches = []
start_nodes = list(np.random.choice(list(networkx_graph.nodes), n_samples, replace = True))
for i, node in enumerate(start_nodes):
# there are two approaches implemented to sample the structure anchor patches: 'ego_graph' or 'triangular_random_walk' (the default)
if hparams['structure_patch_type'] == 'ego_graph':
# in this case, the anchor patch is the ego graph around the randomly sampled start node where the radius is specified by 'structure_anchor_patch_radius'
subgraph = list(nx.ego_graph(networkx_graph, node, radius=hparams['structure_anchor_patch_radius']).nodes)
elif hparams['structure_patch_type'] == 'triangular_random_walk':
# in this case, we perform a triangular random walk of length 'sample_walk_len'
subgraph = triangular_random_walk(hparams, networkx_graph, networkx_graph, hparams['sample_walk_len'], None, None, True)
else:
raise NotImplementedError
all_patches.append(subgraph)
# pad the sampled anchor patches to the max length
max_anchor_len = max([len(s) for s in all_patches])
padded_all_patches = []
for s in all_patches:
fill_len = max_anchor_len - len(s)
padded_all_patches.append(torch.cat([torch.LongTensor(s),torch.LongTensor((fill_len)).fill_(config.PAD_VALUE)]))
return torch.stack(padded_all_patches).long() # (n sampled patches, max patch length)
#######################################################
# Initialize anchor patches
def init_anchors_neighborhood(split, hparams, networkx_graph, device, train_cc_ids, val_cc_ids, test_cc_ids, train_N_border, val_N_border, test_N_border):
'''
Returns:
- anchors_int_neigh: dict of dicts mapping from dataset name & layer number -> sampled internal N anchor patches
- anchors_border_neigh: same as above, but stores border N anchor patches
'''
# get datasets to process based on split
if split == 'all':
dataset_names = ['train', 'val', 'test']
datasets = [train_cc_ids, val_cc_ids, test_cc_ids]
border_sets = [train_N_border, val_N_border, test_N_border]
elif split == 'train_val':
dataset_names = ['train', 'val']
datasets = [train_cc_ids, val_cc_ids]
border_sets = [train_N_border, val_N_border]
elif split == 'test':
dataset_names = ['test']
datasets = [test_cc_ids]
border_sets = [test_N_border]
#initialize internal and border neighborhood anchor patch dicts
anchors_int_neigh = defaultdict(dict)
anchors_border_neigh = defaultdict(dict)
# for each dataset, for each layer, sample internal and border neighborhood anchor patches
# we can use the precomputed border set to speed up the calculation
for dataset_name, dataset, border_set in zip(dataset_names, datasets, border_sets):
for n in range(hparams['n_layers']):
anchors_int_neigh[dataset_name][n] = sample_neighborhood_anchor_patch(hparams, networkx_graph, dataset, border_set, sample_inside=True)
anchors_border_neigh[dataset_name][n] = sample_neighborhood_anchor_patch(hparams, networkx_graph, dataset, border_set, sample_inside=False)
return anchors_int_neigh, anchors_border_neigh
def init_anchors_pos_int(split, hparams, networkx_graph, device, train_cc_ids, val_cc_ids, test_cc_ids):
'''
Returns:
- anchors_pos_int: dict of dicts mapping from dataset name (e.g train, val, etc.) and layer number to the sampled internal position anchor patches
'''
# get datasets to process based on split
if split == 'all':
dataset_names = ['train', 'val', 'test']
datasets = [train_cc_ids, val_cc_ids, test_cc_ids]
elif split == 'train_val':
dataset_names = ['train', 'val']
datasets = [train_cc_ids, val_cc_ids]
elif split == 'test':
dataset_names = ['test']
datasets = [test_cc_ids]
anchors_pos_int = defaultdict(dict)
# for each dataset, for each layer, sample internal position anchor patches
for dataset_name, dataset in zip(dataset_names, datasets):
for n in range(hparams['n_layers']):
anchors = [sample_position_anchor_patches(hparams, networkx_graph, sg) for sg in dataset]
anchors_pos_int[dataset_name][n] = torch.stack([torch.tensor(l) for l in anchors])
return anchors_pos_int
def init_anchors_pos_ext(hparams, networkx_graph, device):
'''
Returns:
- anchors_pos_ext: dict mapping from layer number in SubGNN -> tensor of sampled border position anchor patches
'''
anchors_pos_ext = {}
for n in range(hparams['n_layers']):
anchors_pos_ext[n] = torch.tensor(sample_position_anchor_patches(hparams, networkx_graph))
return anchors_pos_ext
def init_anchors_structure(hparams, structure_anchors, int_structure_anchor_rw, bor_structure_anchor_rw):
'''
For each layer in SubGNN, sample 'n_anchor_patches_structure' number of anchor patches and their associated pre-computed internal & border random walks
Returns:
- anchors_struc: dictionary from layer number -> tuple(sampled structure anchor patches, indices of the selected anchor patches in larger list of sampled anchor patches,
associated sampled internal random walks, associated sampled border random walks)
'''
anchors_struc = {}
for n in range(hparams['n_layers']):
indices = list(np.random.choice(range(structure_anchors.shape[0]), hparams['n_anchor_patches_structure'], replace = True))
anchors_struc[n] = (structure_anchors[indices,:], indices, int_structure_anchor_rw[indices,:,:], bor_structure_anchor_rw[indices,:,:] )
return anchors_struc
#######################################################
# Retrieve anchor patches
def get_anchor_patches(dataset_type, hparams, networkx_graph, node_matrix, \
subgraph_idx, cc_ids, cc_embed_mask, lstm, anchors_neigh_int, anchors_neigh_border, \
anchors_pos_int, anchors_pos_ext, anchors_structure, layer_num, channel, inside, \
device=None):
'''
Inputs:
- dataset_type: train, val, etc.
- hparams: dictionary of hyperparameters
- networkx_graph:
- node_matrix: matrix containing node embeddings for every node in base graph
Returns:
- anchor_patches: tensor of shape (batch_sz, max_n_cc, n_anchor_patches, max_length_anchor_patch) containing the node ids associated with each anchor patch
- anchor_mask: tensor of shape (batch_sz, max_n_cc, n_anchor_patches, max_length_anchor_patch) containing a mask over the anchor patches so we know which are just padding
- anchor_embeds: tensor of shape (batch_sz, max_n_cc, n_anchor_patches, embed_dim) containing embeddings for each anchor patch
'''
batch_sz, max_n_cc, max_size_cc = cc_ids.shape
if channel == 'neighborhood':
# look up precomputed anchor patches
if inside:
anchor_patches = anchors_neigh_int[dataset_type][layer_num][subgraph_idx].squeeze(1)
else:
anchor_patches = anchors_neigh_border[dataset_type][layer_num][subgraph_idx].squeeze(1)
anchor_patches = anchor_patches.to(cc_ids.device)
# Get anchor patch embeddings: return shape is (batch_sz, max_n_cc, n_sampled_patches, hidden_dim)
anchor_embeds, anchor_mask = embed_anchor_patch(node_matrix, anchor_patches, device)
anchor_patches = anchor_patches.unsqueeze(-1)
anchor_mask = anchor_mask.unsqueeze(-1)
elif channel == 'position':
# Get precomputed anchor patch ids: return shape is (batch_sz, max_n_cc, n_sampled_patches)
if inside:
anchors_tensor = anchors_pos_int[dataset_type][layer_num][subgraph_idx].squeeze(1)
anchor_patches = anchors_tensor.unsqueeze(1).repeat(1,max_n_cc,1) # repeat anchor patches for each CC
anchor_patches[~cc_embed_mask] = config.PAD_VALUE #mask CC that are just padding
else:
anchor_patches = anchors_pos_ext[layer_num].unsqueeze(0).unsqueeze(0).repeat(batch_sz,max_n_cc,1)
anchor_patches[~cc_embed_mask] = config.PAD_VALUE #mask CC that are just padding
# Get anchor patch embeddings: return shape is (batch_sz, max_n_cc, n_sampled_patches, hidden_dim)
anchor_embeds, anchor_mask = embed_anchor_patch(node_matrix, anchor_patches, device)
anchor_patches = anchor_patches.unsqueeze(-1)
anchor_mask = anchor_mask.unsqueeze(-1)
elif channel == 'structure':
anchor_patches, indices, int_anchor_rw, bor_anchor_rw = anchors_structure[layer_num] #(n_anchor_patches_sampled, max_length_anchor_patch)
# Get anchor patch embeddings: return shape is (n_sampled_patches, hidden_dim)
anchor_rw = int_anchor_rw if inside else bor_anchor_rw
anchor_embeds = aggregate_structure_anchor_patch(hparams, networkx_graph, lstm, node_matrix, anchor_patches, anchor_rw, inside=inside, device=cc_ids.device)
# expand anchor patches/embeddings to be batch_sz, max_n_cc and pad them
# return shape of anchor_patches = (bs, n_cc, n_anchor_patches_sampled, max_length_anchor_patch)
anchor_patches = anchor_patches.unsqueeze(0).unsqueeze(0).repeat(batch_sz,max_n_cc,1,1)
anchor_patches[~cc_embed_mask] = config.PAD_VALUE # mask CC that are just padding
anchor_mask = (anchor_patches != config.PAD_VALUE).bool()
anchor_embeds = anchor_embeds.unsqueeze(0).unsqueeze(0).repeat(batch_sz,max_n_cc,1,1)
anchor_embeds[~cc_embed_mask] = config.PAD_VALUE
else:
raise Exception('An invalid channel has been entered.')
return anchor_patches, anchor_mask, anchor_embeds
#######################################################
# Embed anchor patches
def embed_anchor_patch(node_matrix, anchor_patch_ids, device):
'''
Returns a tensor of the node embeddings associated with the `anchor patch ids`
and an associated mask where there's 1 where there's no padding and 0 otherwise
'''
anchor_patch_embeds = node_matrix(anchor_patch_ids.to(device))
anchor_patch_mask = (anchor_patch_ids != config.PAD_VALUE).bool()
return anchor_patch_embeds, anchor_patch_mask
def aggregate_structure_anchor_patch(hparams, networkx_graph, lstm, node_matrix, anchor_patch_ids, all_patch_walks, inside, device):
'''
Computes embedding for structure anchor patch by (1) retrieving node embeddings for nodes visited in precomputed triangular random walks,
(2) feeding the RW embeddings into an bi-lstm, and (3) summing the resulting embedding for each random walk to generate a
final embedding of shape (n sampled anchor batches, node_embed_dim)
'''
# anchor_patch_ids shape is (batch_sz, max_n_cc, n_sampled_patches, max_patch_len)
# anchor_patch_embeds shape is (batch_sz, max_n_cc, n_sampled_patches, max_patch_len, hidden_dim)
n_sampled_patches, max_patch_len = anchor_patch_ids.shape
#Get embeddings for each walk
walk_embeds, _ = embed_anchor_patch(node_matrix, all_patch_walks, device) # n_patch, n_walk, walk_len, embed_sz
walk_embeds_reshaped = walk_embeds.view(n_sampled_patches * hparams['n_triangular_walks'], hparams['random_walk_len'], hparams['node_embed_size'])
# input into RNN & aggregate over walk len
walk_hidden = lstm(walk_embeds_reshaped)
walk_hidden = walk_hidden.view(n_sampled_patches, hparams['n_triangular_walks'], -1)
# Sum over random walks
return torch.sum(walk_hidden, dim=1)
| 23,117
| 51.901602
| 179
|
py
|
SubGNN
|
SubGNN-main/SubGNN/subgraph_utils.py
|
# General
import typing
import sys
import numpy as np
#Networkx
import networkx as nx
# Sklearn
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.metrics import f1_score, accuracy_score
# Pytorch
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn.functional import one_hot
# Our methods
sys.path.insert(0, '..') # add config to path
import config
def read_subgraphs(sub_f, split = True):
'''
Read subgraphs from file
Args
- sub_f (str): filename where subgraphs are stored
Return for each train, val, test split:
- sub_G (list): list of nodes belonging to each subgraph
- sub_G_label (list): labels for each subgraph
'''
# Enumerate/track labels
label_idx = 0
labels = {}
# Train/Val/Test subgraphs
train_sub_G = []
val_sub_G = []
test_sub_G = []
# Train/Val/Test subgraph labels
train_sub_G_label = []
val_sub_G_label = []
test_sub_G_label = []
# Train/Val/Test masks
train_mask = []
val_mask = []
test_mask = []
multilabel = False
# Parse data
with open(sub_f) as fin:
subgraph_idx = 0
for line in fin:
nodes = [int(n) for n in line.split("\t")[0].split("-") if n != ""]
if len(nodes) != 0:
if len(nodes) == 1: print(nodes)
l = line.split("\t")[1].split("-")
if len(l) > 1: multilabel = True
for lab in l:
if lab not in labels.keys():
labels[lab] = label_idx
label_idx += 1
if line.split("\t")[2].strip() == "train":
train_sub_G.append(nodes)
train_sub_G_label.append([labels[lab] for lab in l])
train_mask.append(subgraph_idx)
elif line.split("\t")[2].strip() == "val":
val_sub_G.append(nodes)
val_sub_G_label.append([labels[lab] for lab in l])
val_mask.append(subgraph_idx)
elif line.split("\t")[2].strip() == "test":
test_sub_G.append(nodes)
test_sub_G_label.append([labels[lab] for lab in l])
test_mask.append(subgraph_idx)
subgraph_idx += 1
if not multilabel:
train_sub_G_label = torch.tensor(train_sub_G_label).long().squeeze()
val_sub_G_label = torch.tensor(val_sub_G_label).long().squeeze()
test_sub_G_label = torch.tensor(test_sub_G_label).long().squeeze()
if len(val_mask) < len(test_mask):
return train_sub_G, train_sub_G_label, test_sub_G, test_sub_G_label, val_sub_G, val_sub_G_label
return train_sub_G, train_sub_G_label, val_sub_G, val_sub_G_label, test_sub_G, test_sub_G_label
def calc_f1(logits, labels, avg_type='macro', multilabel_binarizer=None):
'''
Calculates the F1 score (either macro or micro as defined by 'avg_type') for the specified logits and labelss
'''
if multilabel_binarizer is not None: #multi-label prediction
# perform a sigmoid on each logit separately & use > 0.5 threshold to make prediction
probs = torch.sigmoid(logits)
thresh = torch.tensor([0.5]).to(probs.device)
pred = (probs > thresh)
score = f1_score(labels.cpu().detach(), pred.cpu().detach(), average=avg_type)
else: # multi-class, but not multi-label prediction
pred = torch.argmax(logits, dim=-1) #get predictions by finding the indices with max logits
score = f1_score(labels.cpu().detach(), pred.cpu().detach(), average=avg_type)
return torch.tensor([score])
def calc_accuracy(logits, labels, multilabel_binarizer=None):
'''
Calculates the accuracy for the specified logits and labels
'''
if multilabel_binarizer is not None: #multi-label prediction
# perform a sigmoid on each logit separately & use > 0.5 threshold to make prediction
probs = torch.sigmoid(logits)
thresh = torch.tensor([0.5]).to(probs.device)
pred = (probs > thresh)
acc = accuracy_score(labels.cpu().detach(), pred.cpu().detach())
else:
pred = torch.argmax(logits, 1) #get predictions by finding the indices with max logits
acc = accuracy_score(labels.cpu().detach(), pred.cpu().detach())
return torch.tensor([acc])
def get_border_nodes(graph, subgraph):
'''
Returns (1) an array containing the border nodes of the subgraph (i.e. all nodes that have an edge to a node not in the subgraph, but are themselves in the subgraph)
and (2) an array containing all of the nodes in the base graph that aren't in the subgraph
'''
# get all of the nodes in the base graph that are not in the subgraph
non_subgraph_nodes = np.array(list(set(graph.nodes()).difference(set(subgraph.nodes()))))
subgraph_nodes = np.array(list(subgraph.nodes()))
A = nx.adjacency_matrix(graph).todense()
# subset adjacency matrix to get edges between subgraph and non-subgraph nodes
border_A = A[np.ix_(subgraph_nodes - 1,non_subgraph_nodes - 1)] # NOTE: Need to subtract 1 bc nodes are indexed starting at 1
# the nodes in the subgraph are border nodes if they have at least one edge to a node that is not in the subgraph
border_edge_exists = (np.sum(border_A, axis=1) > 0).flatten()
border_nodes = subgraph_nodes[np.newaxis][border_edge_exists]
return border_nodes, non_subgraph_nodes
def get_component_border_neighborhood_set(networkx_graph, component, k, ego_graph_dict=None):
'''
Returns a set containing the nodes in the k-hop border of the specified component
component: 1D tensor of node IDs in the component (with possible padding)
k: number of hops around the component that is included in the border set
ego_graph_dict: dictionary mapping from node id to precomputed ego_graph for the node
'''
# First, remove any padding that exists in the component
if type(component) is torch.Tensor:
component_inds_non_neg = (component!=config.PAD_VALUE).nonzero().view(-1)
component_set = {int(n) for n in component[component_inds_non_neg]}
else:
component_set = set(component)
# calculate the ego graph for each node in the connected component & take the union of all nodes
neighborhood = set()
for node in component_set:
if ego_graph_dict == None: # if it hasn't already been computed, calculate the ego graph (i.e. induced subgraph of neighbors centered at node with specified radius)
ego_g = nx.ego_graph(networkx_graph, node, radius = k).nodes()
else:
ego_g = ego_graph_dict[node-1] #NOTE: nodes in dict were indexed with 0, while our nodes are indexed starting at 1
neighborhood = neighborhood.union(set(ego_g))
# remove from the unioned ego sets all nodes that are actually in the component
# this will leave only the nodes that are in the k-hop border, but not in the subgraph component
border_nodes = neighborhood.difference(component_set)
return border_nodes
# THE BELOW FUNCTIONS ARE COPIED FROM ALLEN NLP
def weighted_sum(matrix: torch.Tensor, attention: torch.Tensor) -> torch.Tensor:
"""
Takes a matrix of vectors and a set of weights over the rows in the matrix (which we call an
"attention" vector), and returns a weighted sum of the rows in the matrix. This is the typical
computation performed after an attention mechanism.
Note that while we call this a "matrix" of vectors and an attention "vector", we also handle
higher-order tensors. We always sum over the second-to-last dimension of the "matrix", and we
assume that all dimensions in the "matrix" prior to the last dimension are matched in the
"vector". Non-matched dimensions in the "vector" must be `directly after the batch dimension`.
For example, say I have a "matrix" with dimensions `(batch_size, num_queries, num_words,
embedding_dim)`. The attention "vector" then must have at least those dimensions, and could
have more. Both:
- `(batch_size, num_queries, num_words)` (distribution over words for each query)
- `(batch_size, num_documents, num_queries, num_words)` (distribution over words in a
query for each document)
are valid input "vectors", producing tensors of shape:
`(batch_size, num_queries, embedding_dim)` and
`(batch_size, num_documents, num_queries, embedding_dim)` respectively.
"""
# We'll special-case a few settings here, where there are efficient (but poorly-named)
# operations in pytorch that already do the computation we need.
if attention.dim() == 2 and matrix.dim() == 3:
return attention.unsqueeze(1).bmm(matrix).squeeze(1)
if attention.dim() == 3 and matrix.dim() == 3:
return attention.bmm(matrix)
if matrix.dim() - 1 < attention.dim():
expanded_size = list(matrix.size())
for i in range(attention.dim() - matrix.dim() + 1):
matrix = matrix.unsqueeze(1)
expanded_size.insert(i + 1, attention.size(i + 1))
matrix = matrix.expand(*expanded_size)
intermediate = attention.unsqueeze(-1).expand_as(matrix) * matrix
return intermediate.sum(dim=-2)
def masked_sum(
vector: torch.Tensor, mask: torch.BoolTensor, dim: int, keepdim: bool = False) -> torch.Tensor:
"""
**
Adapted from AllenNLP's masked mean:
https://github.com/allenai/allennlp/blob/90e98e56c46bc466d4ad7712bab93566afe5d1d0/allennlp/nn/util.py
**
To calculate mean along certain dimensions on masked values
# Parameters
vector : `torch.Tensor`
The vector to calculate mean.
mask : `torch.BoolTensor`
The mask of the vector. It must be broadcastable with vector.
dim : `int`
The dimension to calculate mean
keepdim : `bool`
Whether to keep dimension
# Returns
`torch.Tensor`
A `torch.Tensor` of including the mean values.
"""
replaced_vector = vector.masked_fill(~mask, 0.0)
value_sum = torch.sum(replaced_vector, dim=dim, keepdim=keepdim)
return value_sum
| 10,206
| 41.886555
| 172
|
py
|
SubGNN
|
SubGNN-main/SubGNN/subgraph_mpn.py
|
# General
import numpy as np
import sys
from multiprocessing import Pool
import time
# Pytorch
import torch
import torch.nn as nn
import torch.nn.functional as F
# Pytorch Geometric
from torch_geometric.utils import add_self_loops
from torch_geometric.nn import MessagePassing
# Our methods
sys.path.insert(0, '..') # add config to path
import config
class SG_MPN(MessagePassing):
'''
A single subgraph-level message passing layer
Messages are passed from anchor patch to connected component and weighted by the channel-specific similarity between the two.
The resulting messages for a single component are aggregated and used to update the embedding for the component.
'''
def __init__(self, hparams):
super(SG_MPN, self).__init__(aggr='add') # "Add" aggregation.
self.hparams = hparams
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.linear = nn.Linear(hparams['node_embed_size'] * 2, hparams['node_embed_size']).to(self.device)
self.linear_position = nn.Linear(hparams['node_embed_size'],1).to(self.device)
def create_patch_embedding_matrix(self,cc_embeds, cc_embed_mask, anchor_embeds, anchor_mask):
'''
Concatenate the connected component and anchor patch embeddings into a single matrix.
This will be used an input for the pytorch geometric message passing framework.
'''
batch_sz, max_n_cc, cc_hidden_dim = cc_embeds.shape
anchor_hidden_dim = anchor_embeds.shape[-1]
# reshape connected component & anchor patch embedding matrices
reshaped_cc_embeds = cc_embeds.view(-1, cc_hidden_dim) #(batch_sz * max_n_cc , hidden_dim)
reshaped_anchor_embeds = anchor_embeds.view(-1, anchor_hidden_dim) #(batch_sz * max_n_cc * n_sampled_patches, hidden_dim)
# concatenate the anchor patch and connected component embeddings into single matrix
patch_embedding_matrix = torch.cat([reshaped_anchor_embeds, reshaped_cc_embeds])
return patch_embedding_matrix
def create_edge_index(self, reshaped_cc_ids, reshaped_anchor_patch_ids, anchor_mask, n_anchor_patches):
'''
Create edge matrix of shape (2, # edges) where edges exist between connected components and their associated anchor patches
Note that edges don't exist between components or between anchor patches
'''
# get indices into patch matrix corresponding to anchor patches
anchor_inds = torch.tensor(range(reshaped_anchor_patch_ids.shape[0]))
# get indices into patch matrix corresponding to connected components
cc_inds = torch.tensor(range(reshaped_cc_ids.shape[0])) + reshaped_anchor_patch_ids.shape[0]
# repeat CC indices n_anchor_patches times
cc_inds_matched = cc_inds.repeat_interleave(n_anchor_patches)
# stack together two indices to create (2,E) edge matrix
edge_index = torch.stack((anchor_inds, cc_inds_matched)).to(device=self.device)
mask_inds = anchor_mask.view(-1, anchor_mask.shape[-1])[:,0]
return edge_index[:,mask_inds], mask_inds
def get_similarities(self, networkx_graph, edge_index, sims, cc_ids, anchor_ids, anchors_sim_index):
'''
Reshape similarities tensor of shape (n edges, 1) that contains similarity value for each edge in the edge index
sims: (batch_size, max_n_cc, n possible anchor patches)
edge_index: (2, number of edges between components and anchor patches)
anchors_sim_index: indices into sims matrix for the structure channel that specify which anchor patches we're using
'''
n_cc = cc_ids.shape[0]
n_anchor_patches = anchor_ids.shape[0]
batch_sz, max_n_cc, n_patch_options = sims.shape
sims = sims.view(batch_sz * max_n_cc, n_patch_options)
if anchors_sim_index != None: anchors_sim_index = anchors_sim_index * torch.unique(edge_index[1,:]).shape[0] # n unique CC
# NOTE: edge_index contains stacked anchor, cc embeddings
if anchors_sim_index == None: # neighborhood, position channels
anchor_indices = anchor_ids[edge_index[0,:],:] - 1 # get the indices into the similarity matrix of which anchors were sampled
cc_indices = edge_index[1,:] - n_anchor_patches # get indices of the conneced components into the similarity matrix
similarities = sims[cc_indices, anchor_indices.squeeze()]
else: #structure channel
# get indices of the conneced components into the similarity matrix
cc_indices = edge_index[1,:] - n_anchor_patches #indexing into edge index is different than indexing into sims because patch matrix from which edge index was derived stacks anchor paches before the cc embeddings
similarities = sims[cc_indices, torch.tensor(anchors_sim_index)] # anchors_sim_index provides indexing into the big similarity matrix - it tells you which anchors we actually sampled
if len(similarities.shape) == 1: similarities = similarities.unsqueeze(-1)
return similarities
def generate_pos_struc_embeddings(self, raw_msgs, cc_ids, anchor_ids, edge_index, edge_index_mask):
'''
Generates the property aware position/structural embeddings for each connected component
'''
# Generate position/structure embeddings
n_cc = cc_ids.shape[0]
n_anchor_patches = anchor_ids.shape[0]
embed_sz = raw_msgs.shape[1]
n_anchors_per_cc = int(n_anchor_patches/n_cc)
# 1) add masked CC back in & reshape
# raw_msgs doesn't include padding so we need to add padding back in
# NOTE: while these are named as position embeddings, these apply to structure channel as well
pos_embeds = torch.zeros((n_cc * n_anchors_per_cc, embed_sz)).to(device=self.device) + config.PAD_VALUE
pos_embeds[edge_index_mask] = raw_msgs # raw_msgs doesn't include padding so we need to add padding back in
pos_embeds_reshaped = pos_embeds.view(-1, n_anchors_per_cc, embed_sz)
# 2) linear layer + normalization
position_out = self.linear_position(pos_embeds_reshaped).squeeze(-1)
# optionally normalize the output of the linear layer (this is what P-GNN paper did)
if 'norm_pos_struc_embed' in self.hparams and self.hparams['norm_pos_struc_embed']:
position_out = F.normalize(position_out, p=2, dim=-1)
else: # otherwise, just push through a relu
position_out = F.relu(position_out)
return position_out #(n subgraphs * n_cc, n_anchors_per_cc )
def forward(self, networkx_graph, sims, cc_ids, cc_embeds, cc_embed_mask, \
anchor_patches, anchor_embeds, anchor_mask, anchors_sim_index):
'''
Performs a single message passing layer
Returns:
- cc_embed_matrix_reshaped: order-invariant hidden representation (batch_sz, max_n_cc, node embed dim)
- position_struc_out_reshaped: property aware cc representation (batch_sz, max_n_cc, n_anchor_patches)
'''
# reshape anchor patches & CC embeddings & stack together
# NOTE: anchor patches then CC stacked in matrix
patch_matrix = self.create_patch_embedding_matrix(cc_embeds, cc_embed_mask, anchor_embeds, anchor_mask)
# reshape cc & anchor patch id matrices
batch_sz, max_n_cc, max_size_cc = cc_ids.shape
cc_ids = cc_ids.view(-1, max_size_cc) # (batch_sz * max_n_cc, max_size_cc)
anchor_ids = anchor_patches.contiguous().view(-1, anchor_patches.shape[-1]) # (batch_sz * max_n_cc * n_sampled_patches, anchor patch size)
n_anchor_patches_sampled = anchor_ids.shape[0]
# create edge index
edge_index, edge_index_mask = self.create_edge_index(cc_ids, anchor_ids, anchor_mask, anchor_patches.shape[2])
# get similarity values for each edge index
similarities = self.get_similarities( networkx_graph, edge_index, sims, cc_ids, anchor_ids, anchors_sim_index)
# Perform Message Passing
# propagated_msgs: (length of concatenated anchor patches & cc, node dim size)
propagated_msgs, raw_msgs = self.propagate(edge_index, x=patch_matrix, similarity=similarities)
# Generate Position/Structure Embeddings
position_struc_out = self.generate_pos_struc_embeddings(raw_msgs, cc_ids, anchor_ids, edge_index, edge_index_mask)
# index resulting propagated messagaes to get updated CC embeddings & reshape
cc_embed_matrix = propagated_msgs[n_anchor_patches_sampled:,:]
cc_embed_matrix_reshaped = cc_embed_matrix.view(batch_sz , max_n_cc ,-1)
# reshape property aware position/structure embeddings
position_struc_out_reshaped = position_struc_out.view(batch_sz, max_n_cc, -1)
return cc_embed_matrix_reshaped, position_struc_out_reshaped
def propagate(self, edge_index, size=None, **kwargs):
# We need to reimplement propagate instead of relying on base class implementation because we need
# to return the raw messages to generate the position/structure embeddings.
# Everything else is identical to propagate function from Pytorch Geometric.
r"""The initial call to start propagating messages.
Args:
edge_index (Tensor or SparseTensor): A :obj:`torch.LongTensor` or a
:obj:`torch_sparse.SparseTensor` that defines the underlying
graph connectivity/message passing flow.
:obj:`edge_index` holds the indices of a general (sparse)
assignment matrix of shape :obj:`[N, M]`.
If :obj:`edge_index` is of type :obj:`torch.LongTensor`, its
shape must be defined as :obj:`[2, num_messages]`, where
messages from nodes in :obj:`edge_index[0]` are sent to
nodes in :obj:`edge_index[1]`
(in case :obj:`flow="source_to_target"`).
If :obj:`edge_index` is of type
:obj:`torch_sparse.SparseTensor`, its sparse indices
:obj:`(row, col)` should relate to :obj:`row = edge_index[1]`
and :obj:`col = edge_index[0]`.
The major difference between both formats is that we need to
input the *transposed* sparse adjacency matrix into
:func:`propagate`.
size (tuple, optional): The size :obj:`(N, M)` of the assignment
matrix in case :obj:`edge_index` is a :obj:`LongTensor`.
If set to :obj:`None`, the size will be automatically inferred
and assumed to be quadratic.
This argument is ignored in case :obj:`edge_index` is a
:obj:`torch_sparse.SparseTensor`. (default: :obj:`None`)
**kwargs: Any additional data which is needed to construct and
aggregate messages, and to update node embeddings.
"""
size = self.__check_input__(edge_index, size)
# run both functions in separation.
coll_dict = self.__collect__(self.__user_args__, edge_index, size,
kwargs)
msg_kwargs = self.inspector.distribute('message', coll_dict)
msg_out = self.message(**msg_kwargs)
aggr_kwargs = self.inspector.distribute('aggregate', coll_dict)
out = self.aggregate(msg_out, **aggr_kwargs)
update_kwargs = self.inspector.distribute('update', coll_dict)
out = self.update(out, **update_kwargs)
return out, msg_out
def message(self, x_j, similarity): #default is source to target
'''
The message is the anchor patch representation weighted by the similarity between the patch and the component
'''
return similarity * x_j
def update(self, aggr_out, x):
'''
Update the connected component embedding from the result of the aggregation. The default is to 'use_mpn_projection',
i.e. concatenate the aggregated messages with the previous cc embedding and push through a relu
'''
if self.hparams['use_mpn_projection']:
return F.relu(self.linear(torch.cat([x, aggr_out], dim=1)))
else:
return aggr_out
| 12,371
| 50.123967
| 223
|
py
|
SubGNN
|
SubGNN-main/SubGNN/datasets.py
|
# Pytorch
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
# Typing
from typing import List
class SubgraphDataset(Dataset):
'''
Stores subgraphs and their associated labels as well as precomputed similarities and border sets for the subgraphs
'''
def __init__(self, subgraph_list: List, labels, cc_ids, N_border, NP_sim, I_S_sim, B_S_sim, multilabel, multilabel_binarizer):
# subgraph ids & labels
self.subgraph_list = subgraph_list
self.cc_ids = cc_ids
self.labels = labels
# precomputed border set
self.N_border = N_border
# precomputed similarity matrices
self.NP_sim = NP_sim
self.I_S_sim = I_S_sim
self.B_S_sim = B_S_sim
# necessary for handling multi-label classsification
self.multilabel = multilabel
self.multilabel_binarizer = multilabel_binarizer
def __len__(self):
'''
Returns number of subgraphs
'''
return len(self.subgraph_list)
def __getitem__(self, idx):
'''
Returns a single example from the datasest
'''
subgraph_ids = torch.LongTensor(self.subgraph_list[idx]) # list of node IDs in subgraph
cc_ids = self.cc_ids[idx]
N_border = self.N_border[idx] if self.N_border != None else None
NP_sim = self.NP_sim[idx] if self.NP_sim != None else None
I_S_sim = self.I_S_sim[idx] if self.I_S_sim != None else None
B_S_sim = self.B_S_sim[idx] if self.B_S_sim != None else None
if self.multilabel:
label = torch.LongTensor(self.multilabel_binarizer.transform([self.labels[idx]]))
else:
label = torch.LongTensor([self.labels[idx]])
idx = torch.LongTensor([idx])
return (subgraph_ids, cc_ids, N_border, NP_sim, I_S_sim, B_S_sim, idx, label)
| 1,904
| 31.844828
| 130
|
py
|
SubGNN
|
SubGNN-main/SubGNN/train_config.py
|
# General
import numpy as np
import random
import argparse
import tqdm
import pickle
import json
import commentjson
import joblib
import os
import sys
import pathlib
from collections import OrderedDict
import random
import string
# Pytorch
import torch
from torch.utils.data import DataLoader
from torch.nn.functional import one_hot
import pytorch_lightning as pl
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.profiler import AdvancedProfiler
# Optuna
import optuna
from optuna.samplers import TPESampler
from optuna.integration import PyTorchLightningPruningCallback
# Our Methods
import SubGNN as md
sys.path.insert(0, '..') # add config to path
import config
def parse_arguments():
'''
Read in the config file specifying all of the parameters
'''
parser = argparse.ArgumentParser(description="Learn subgraph embeddings")
parser.add_argument('-config_path', type=str, default=None, help='Load config file')
args = parser.parse_args()
return args
def read_json(fname):
'''
Read in the json file specified by 'fname'
'''
with open(fname, 'rt') as handle:
return commentjson.load(handle, object_hook=OrderedDict)
def get_optuna_suggest(param_dict, name, trial):
'''
Returns a suggested value for the hyperparameter specified by 'name' from the range of values in 'param_dict'
name: string specifying hyperparameter
trial: optuna trial
param_dict: dictionary containing information about the hyperparameter (range of values & type of sampler)
e.g.{
"type" : "suggest_categorical",
"args" : [[ 64, 128]]
}
'''
module_name = param_dict['type'] # e.g. suggest_categorical, suggest_float
args = [name]
args.extend(param_dict['args']) # resulting list will look something like this ['batch_size', [ 64, 128]]
if "kwargs" in param_dict:
kwargs = dict(param_dict["kwargs"])
return getattr(trial, module_name)(*args, **kwargs)
else:
return getattr(trial, module_name)(*args)
def get_hyperparams_optuna(run_config, trial):
'''
Converts the fixed and variable hyperparameters in the run config to a dictionary of the final hyperparameters
Returns: hyp_fix - dictionary where key is the hyperparameter name (e.g. batch_size) and value is the hyperparameter value
'''
#initialize the dict with the fixed hyperparameters
hyp_fix = dict(run_config["hyperparams_fix"])
# update the dict with variable value hyperparameters by sampling a hyperparameter value from the range specified in the run_config
hyp_optuna = {k:get_optuna_suggest(run_config["hyperparams_optuna"][k], k, trial) for k in dict(run_config["hyperparams_optuna"]).keys()}
hyp_fix.update(hyp_optuna)
return hyp_fix
def build_model(run_config, trial = None):
'''
Creates SubGNN from the hyperparameters specified in the run config
'''
# get hyperparameters for the current trial
hyperparameters = get_hyperparams_optuna(run_config, trial)
# Set seeds for reproducibility
torch.manual_seed(hyperparameters['seed'])
np.random.seed(hyperparameters['seed'])
torch.cuda.manual_seed(hyperparameters['seed'])
torch.cuda.manual_seed_all(hyperparameters['seed'])
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# initialize SubGNN
model = md.SubGNN(hyperparameters, run_config["graph_path"], \
run_config["subgraphs_path"], run_config["embedding_path"], \
run_config["similarities_path"], run_config["shortest_paths_path"], run_config['degree_sequence_path'], run_config['ego_graph_path'])
return model, hyperparameters
def build_trainer(run_config, hyperparameters, trial = None):
'''
Set up optuna trainer
'''
if 'progress_bar_refresh_rate' in hyperparameters:
p_refresh = hyperparameters['progress_bar_refresh_rate']
else:
p_refresh = 5
# set epochs, gpus, gradient clipping, etc.
# if 'no_gpu' in run config, then use CPU
trainer_kwargs={'max_epochs': hyperparameters['max_epochs'],
"gpus": 0 if 'no_gpu' in run_config else 1,
"num_sanity_val_steps":0,
"progress_bar_refresh_rate":p_refresh,
"gradient_clip_val": hyperparameters['grad_clip']
}
# set auto learning rate finder param
if 'auto_lr_find' in hyperparameters and hyperparameters['auto_lr_find']:
trainer_kwargs['auto_lr_find'] = hyperparameters['auto_lr_find']
# Create tensorboard logger
lgdir = os.path.join(run_config['tb']['dir_full'], run_config['tb']['name'])
if not os.path.exists(lgdir):
os.makedirs(lgdir)
logger = TensorBoardLogger(run_config['tb']['dir_full'], name=run_config['tb']['name'], version="version_"+ str(random.randint(0, 10000000)))
if not os.path.exists(logger.log_dir):
os.makedirs(logger.log_dir)
print("Tensorboard logging at ", logger.log_dir)
trainer_kwargs["logger"] = logger
# Save top three model checkpoints
trainer_kwargs["checkpoint_callback"] = ModelCheckpoint(
filepath= os.path.join(logger.log_dir, "{epoch}-{val_micro_f1:.2f}-{val_acc:.2f}-{val_auroc:.2f}"),
save_top_k = 3,
verbose=True,
monitor=run_config['optuna']['monitor_metric'],
mode='max'
)
# if we use pruning, use the pytorch lightning pruning callback
if run_config["optuna"]['pruning']:
trainer_kwargs['early_stop_callback'] = PyTorchLightningPruningCallback(trial, monitor=run_config['optuna']['monitor_metric'])
trainer = pl.Trainer(**trainer_kwargs)
return trainer, trainer_kwargs, logger.log_dir
def train_model(run_config, trial = None):
'''
Train a single model whose hyperparameters are specified in the run config
Returns the max (or min) metric specified by 'monitor_metric' in the run config
'''
# get model and hyperparameter dict
model, hyperparameters = build_model(run_config, trial)
# build optuna trainer
trainer, trainer_kwargs, results_path = build_trainer(run_config, hyperparameters, trial)
# dump hyperparameters to results dir
hparam_file = open(os.path.join(results_path, "hyperparams.json"),"w")
hparam_file.write(json.dumps(hyperparameters, indent=4))
hparam_file.close()
# dump trainer args to results dir
tkwarg_file = open(os.path.join(results_path, "trainer_kwargs.json"),"w")
pop_keys = [key for key in ['logger','profiler','early_stop_callback','checkpoint_callback'] if key in trainer_kwargs.keys()]
[trainer_kwargs.pop(key) for key in pop_keys]
tkwarg_file.write(json.dumps(trainer_kwargs, indent=4))
tkwarg_file.close()
# train the model
trainer.fit(model)
# write results to the results dir
if results_path is not None:
hparam_file = open(os.path.join(results_path, "final_metric_scores.json"),"w")
results_serializable = {k:float(v) for k,v in model.metric_scores[-1].items()}
hparam_file.write(json.dumps(results_serializable, indent=4))
hparam_file.close()
# return the max (or min) metric specified by 'monitor_metric' in the run config
all_scores = [score[run_config['optuna']['monitor_metric']].numpy() for score in model.metric_scores]
if run_config['optuna']['opt_direction'] == "maximize":
return(np.max(all_scores))
else:
return(np.min(all_scores))
def main():
'''
Perform an optuna run according to the hyperparameters and directory locations specified in 'config_path'
'''
torch.autograd.set_detect_anomaly(True)
args = parse_arguments()
# read in config file
run_config = read_json(args.config_path)
## Set paths to data
task = run_config['data']['task']
embedding_type = run_config['hyperparams_fix']['embedding_type']
# paths to subgraphs, edge list, and shortest paths between all nodes in the graph
run_config["subgraphs_path"] = os.path.join(task, "subgraphs.pth")
run_config["graph_path"] = os.path.join(task, "edge_list.txt")
run_config['shortest_paths_path'] = os.path.join(task, "shortest_path_matrix.npy")
run_config['degree_sequence_path'] = os.path.join(task, "degree_sequence.txt")
run_config['ego_graph_path'] = os.path.join(task, "ego_graphs.txt")
#directory where similarity calculations will be stored
run_config["similarities_path"] = os.path.join(task, "similarities/")
# get location of node embeddings
if embedding_type == 'gin':
run_config["embedding_path"] = os.path.join(task, "gin_embeddings.pth")
elif embedding_type == 'graphsaint':
run_config["embedding_path"] = os.path.join(task, "graphsaint_gcn_embeddings.pth")
else:
raise NotImplementedError
# create a tensorboard directory in the folder specified by dir in the PROJECT ROOT folder
if 'local' in run_config['tb'] and run_config['tb']['local']:
run_config['tb']['dir_full'] = run_config['tb']['dir']
else:
run_config['tb']['dir_full'] = os.path.join(config.PROJECT_ROOT, run_config['tb']['dir'])
ntrials = run_config['optuna']['opt_n_trials']
print(f'Running {ntrials} Trials of optuna')
if run_config['optuna']['pruning']:
pruner = optuna.pruners.MedianPruner()
else:
pruner = None
# the complete study path is the tensorboard directory + the study name
run_config['study_path'] = os.path.join(run_config['tb']['dir_full'], run_config['tb']['name'])
print("Logging to ", run_config['study_path'])
pathlib.Path(run_config['study_path']).mkdir(parents=True, exist_ok=True)
# get database file
db_file = os.path.join(run_config['study_path'], 'optuna_study_sqlite.db')
# specify sampler
if run_config['optuna']['sampler'] == "grid" and "grid_search_space" in run_config['optuna']:
sampler = optuna.samplers.GridSampler(run_config['optuna']['grid_search_space'])
elif run_config['optuna']['sampler'] == "tpe":
sampler = optuna.samplers.TPESampler()
elif run_config['optuna']['sampler'] == "random":
sampler = optuna.samplers.RandomSampler()
# create an optuna study with the specified sampler, pruner, direction (e.g. maximize)
# A SQLlite database is used to keep track of results
# Will load in existing study if one exists
study = optuna.create_study(direction=run_config['optuna']['opt_direction'],
sampler=sampler,
pruner=pruner,
storage= 'sqlite:///' + db_file,
study_name=run_config['study_path'],
load_if_exists=True)
study.optimize(lambda trial: train_model(run_config, trial), n_trials=run_config['optuna']['opt_n_trials'], n_jobs =run_config['optuna']['opt_n_cores'])
optuna_results_path = os.path.join(run_config['study_path'], 'optuna_study.pkl')
print("Saving Study Results to", optuna_results_path)
joblib.dump(study, optuna_results_path)
print(study.best_params)
if __name__ == "__main__":
main()
| 11,383
| 39.226148
| 156
|
py
|
SubGNN
|
SubGNN-main/SubGNN/SubGNN.py
|
# General
import os
import numpy as np
from pathlib import Path
import typing
import time
import json
import copy
from typing import Dict, List
import multiprocessing
from multiprocessing import Pool
from itertools import accumulate
from collections import OrderedDict
import pickle
import sys
from functools import partial
#Sklearn
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.metrics import roc_auc_score
# Pytorch
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
from torch.nn.utils.rnn import pad_sequence, pack_sequence, pad_packed_sequence
import torch.nn.functional as F
from torch.nn.functional import one_hot
from torch.nn.parameter import Parameter
import matplotlib.pyplot as plt
# Pytorch lightning
import pytorch_lightning as pl
# Pytorch Geometric
from torch_geometric.utils.convert import to_networkx
from torch_geometric.nn import MessagePassing, GINConv
# Similarity calculations
from fastdtw import fastdtw
# Networkx
import networkx as nx
# Our Methods
sys.path.insert(0, '..') # add config to path
import config
import subgraph_utils
from subgraph_mpn import SG_MPN
from datasets import SubgraphDataset
import anchor_patch_samplers
from anchor_patch_samplers import *
import gamma
import attention
class LSTM(nn.Module):
'''
bidirectional LSTM with linear head
'''
def __init__(self, n_features, h, dropout=0.0, num_layers=1, batch_first=True, aggregator='last'):
super().__init__()
# number of LSTM layers
self.num_layers = num_layers
# type of aggregation('sum' or 'last')
self.aggregator = aggregator
self.lstm = nn.LSTM(n_features, h, num_layers=num_layers, batch_first=batch_first, dropout=dropout, bidirectional=True)
self.linear = nn.Linear(h * 2, n_features)
def forward(self, input):
#input: (batch_sz, seq_len, hidden_dim )
lstm_out, last_hidden = self.lstm(input)
batch, seq_len, _ = lstm_out.shape
# either take last hidden state or sum all hidden states
if self.aggregator == 'last':
lstm_agg = lstm_out[:,-1,:]
elif self.aggregator == 'sum':
lstm_agg = torch.sum(lstm_out, dim=1)
else:
raise NotImplementedError
return self.linear(lstm_agg)
class SubGNN(pl.LightningModule):
'''
Pytorch lightning class for SubGNN
'''
def __init__(self, hparams: Dict, graph_path: str, subgraph_path: str,
embedding_path: str, similarities_path: str, shortest_paths_path:str,
degree_dict_path: str, ego_graph_path: str):
super(SubGNN, self).__init__()
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#dictionary of hyperparameters
self.hparams = hparams
# paths where data is stored
self.graph_path = graph_path
self.subgraph_path = subgraph_path
self.embedding_path = embedding_path
self.similarities_path = Path(similarities_path)
self.shortest_paths_path = shortest_paths_path
self.degree_dict_path = degree_dict_path
self.ego_graph_path = ego_graph_path
# read in data
self.read_data()
# initialize MPN layers for each channel (neighborhood, structure, position; internal, border)
# and each layer (up to 'n_layers')
hid_dim = self.hparams['node_embed_size']
self.neighborhood_mpns = nn.ModuleList()
if self.hparams['use_neighborhood']:
hid_dim += self.hparams['n_layers'] * 2 * self.hparams['node_embed_size'] #automatically infer hidden dimension
for l in range(self.hparams['n_layers']):
curr_layer = nn.ModuleDict()
curr_layer['internal'] = SG_MPN(self.hparams)
curr_layer['border'] = SG_MPN(self.hparams)
# optionally add batch_norm
if 'batch_norm' in self.hparams and self.hparams['batch_norm']:
curr_layer['batch_norm'] = nn.BatchNorm1d(self.hparams['node_embed_size']).to(self.device)
curr_layer['batch_norm_out'] = nn.BatchNorm1d(self.hparams['node_embed_size']).to(self.device)
self.neighborhood_mpns.append(curr_layer)
self.position_mpns = nn.ModuleList()
if self.hparams['use_position']:
hid_dim = hid_dim + (self.hparams['n_anchor_patches_pos_in'] + self.hparams['n_anchor_patches_pos_out']) * self.hparams['n_layers']
for l in range(self.hparams['n_layers']):
curr_layer = nn.ModuleDict()
curr_layer['internal'] = SG_MPN(self.hparams)
curr_layer['border'] = SG_MPN(self.hparams)
# optionally add batch_norm
if 'batch_norm' in self.hparams and self.hparams['batch_norm']:
curr_layer['batch_norm'] = nn.BatchNorm1d(self.hparams['node_embed_size']).to(self.device)
curr_layer['batch_norm_out'] = nn.BatchNorm1d(self.hparams['node_embed_size']).to(self.device)
self.position_mpns.append(curr_layer)
self.structure_mpns = nn.ModuleList()
if self.hparams['use_structure']:
hid_dim += 2 * self.hparams['n_anchor_patches_structure'] * self.hparams['n_layers']
for l in range(self.hparams['n_layers']):
curr_layer = nn.ModuleDict()
curr_layer['internal'] = SG_MPN(self.hparams)
curr_layer['border'] = SG_MPN(self.hparams)
# optionally add batch_norm
if 'batch_norm' in self.hparams and self.hparams['batch_norm']:
curr_layer['batch_norm'] = nn.BatchNorm1d(self.hparams['node_embed_size']).to(self.device)
curr_layer['batch_norm_out'] = nn.BatchNorm1d(self.hparams['node_embed_size']).to(self.device)
self.structure_mpns.append(curr_layer)
# initialize 3 FF layers on top of MPN layers
self.lin = nn.Linear(hid_dim, self.hparams['linear_hidden_dim_1'])
self.lin2 = nn.Linear(self.hparams['linear_hidden_dim_1'], self.hparams['linear_hidden_dim_2'])
self.lin3 = nn.Linear(self.hparams['linear_hidden_dim_2'], self.num_classes)
# optional dropout on the linear layers
self.lin_dropout = nn.Dropout(p=self.hparams['lin_dropout'])
self.lin_dropout2 = nn.Dropout(p=self.hparams['lin_dropout'])
# initialize loss
if self.multilabel:
self.loss = nn.BCEWithLogitsLoss()
else:
self.loss = nn.CrossEntropyLoss()
# initialize LSTM - this is used in the structure channel for embedding anchor patches
self.lstm = LSTM(self.hparams['node_embed_size'], self.hparams['node_embed_size'], \
dropout=self.hparams['lstm_dropout'], num_layers=self.hparams['lstm_n_layers'], \
aggregator=self.hparams['lstm_aggregator'])
# optionally, use feedforward attention
if 'ff_attn' in self.hparams and self.hparams['ff_attn']:
self.attn_vector = torch.nn.Parameter(torch.zeros((hid_dim,1), dtype=torch.float).to(self.device), requires_grad=True)
nn.init.xavier_uniform_(self.attn_vector)
self.attention = attention.AdditiveAttention(hid_dim, hid_dim)
# default similarity function for the structure channel is dynamic time warping
if 'structure_similarity_fn' not in self.hparams:
self.hparams['structure_similarity_fn'] = 'dtw'
# track metrics (used for optuna)
self.metric_scores = []
##################################################
# forward pass
def run_mpn_layer(self, dataset_type, mpn_fn, subgraph_ids, subgraph_idx, cc_ids, \
cc_embeds, cc_embed_mask, sims, layer_num, channel, inside=True):
'''
Perform a single message-passing layer for the specified 'channel' and internal/border
Returns:
- cc_embed_matrix: updated connected component embedding matrix
- position_struc_out: property aware embedding matrix (for position & structure channels)
'''
# batch_sz, max_n_cc, max_size_cc = cc_ids.shape
# self.graph.x (n_nodes, hidden dim)
# Get Anchor Patches
anchor_patches, anchor_mask, anchor_embeds = get_anchor_patches(dataset_type, self.hparams, \
self.networkx_graph, self.node_embeddings, subgraph_idx, cc_ids, cc_embed_mask, self.lstm,
self.anchors_neigh_int, self.anchors_neigh_border, self.anchors_pos_int, \
self.anchors_pos_ext, self.anchors_structure, layer_num, channel, inside, self.device)
# for the structure channel, we need to also pass in indices into larger matrix of pre-sampled structure AP
if channel == 'structure': anchors_sim_index = self.anchors_structure[layer_num][1]
else: anchors_sim_index = None
# one layer of message passing
cc_embed_matrix, position_struc_out = mpn_fn(self.networkx_graph, sims, cc_ids,
cc_embeds, cc_embed_mask, anchor_patches, anchor_embeds,
anchor_mask, anchors_sim_index)
return cc_embed_matrix, position_struc_out
def forward(self, dataset_type, N_I_cc_embed, N_B_cc_embed, \
S_I_cc_embed, S_B_cc_embed, P_I_cc_embed, P_B_cc_embed, \
subgraph_ids, cc_ids, subgraph_idx, NP_sim, \
I_S_sim, B_S_sim):
'''
subgraph_ids: (batch_sz, max_len_sugraph)
cc_ids: (batch_sz, max_n_cc, max_len_cc)
'''
# create cc_embeds matrix for each channel: (batch_sz, max_n_cc, hidden_dim)
init_cc_embeds = self.initialize_cc_embeddings(cc_ids, self.hparams['cc_aggregator'])
if not self.hparams['trainable_cc']: # if the cc embeddings, aren't trainable, we clone them
N_in_cc_embeds = init_cc_embeds.clone()
N_out_cc_embeds = init_cc_embeds.clone()
P_in_cc_embeds = init_cc_embeds.clone()
P_out_cc_embeds = init_cc_embeds.clone()
S_in_cc_embeds = init_cc_embeds.clone()
S_out_cc_embeds = init_cc_embeds.clone()
else: # otherwise, we index into the intialized cc embeddings for each channel using the subgraph ids for the given batch
N_in_cc_embeds = torch.index_select(N_I_cc_embed, 0, subgraph_idx.squeeze(-1))
N_out_cc_embeds = torch.index_select(N_B_cc_embed, 0, subgraph_idx.squeeze(-1))
P_in_cc_embeds = torch.index_select(P_I_cc_embed, 0, subgraph_idx.squeeze(-1))
P_out_cc_embeds = torch.index_select(P_B_cc_embed, 0, subgraph_idx.squeeze(-1))
S_in_cc_embeds = torch.index_select(S_I_cc_embed, 0, subgraph_idx.squeeze(-1))
S_out_cc_embeds = torch.index_select(S_B_cc_embed, 0, subgraph_idx.squeeze(-1))
batch_sz, max_n_cc, _ = init_cc_embeds.shape
#get mask for cc_embeddings
cc_embed_mask = (cc_ids != config.PAD_VALUE)[:,:,0] # only take first element bc only need mask over n_cc, not n_nodes in cc
# for each layer in SubGNN:
outputs = []
for l in range(self.hparams['n_layers']):
# neighborhood channel
if self.hparams['use_neighborhood']:
# message passing layer for N internal and border
N_in_cc_embeds, _ = self.run_mpn_layer(dataset_type, self.neighborhood_mpns[l]['internal'], subgraph_ids, subgraph_idx, cc_ids, N_in_cc_embeds, cc_embed_mask, NP_sim, layer_num=l, channel='neighborhood', inside=True)
N_out_cc_embeds, _ = self.run_mpn_layer(dataset_type, self.neighborhood_mpns[l]['border'], subgraph_ids, subgraph_idx, cc_ids, N_out_cc_embeds, cc_embed_mask, NP_sim, layer_num=l, channel='neighborhood', inside=False)
if 'batch_norm' in self.hparams and self.hparams['batch_norm']: #optional batch norm
N_in_cc_embeds = self.neighborhood_mpns[l]['batch_norm'](N_in_cc_embeds.view(batch_sz*max_n_cc,-1)).view(batch_sz,max_n_cc, -1 )
N_out_cc_embeds = self.neighborhood_mpns[l]['batch_norm_out'](N_out_cc_embeds.view(batch_sz*max_n_cc,-1)).view(batch_sz,max_n_cc, -1 )
outputs.extend([N_in_cc_embeds, N_out_cc_embeds])
# position channel
if self.hparams['use_position']:
# message passing layer for P internal and border
P_in_cc_embeds, P_in_position_embed = self.run_mpn_layer(dataset_type, self.position_mpns[l]['internal'], subgraph_ids, subgraph_idx, cc_ids, P_in_cc_embeds, cc_embed_mask, NP_sim, layer_num=l, channel='position', inside=True)
P_out_cc_embeds, P_out_position_embed = self.run_mpn_layer(dataset_type, self.position_mpns[l]['border'], subgraph_ids, subgraph_idx, cc_ids, P_out_cc_embeds, cc_embed_mask, NP_sim, layer_num=l, channel='position', inside=False)
if 'batch_norm' in self.hparams and self.hparams['batch_norm']: #optional batch norm
P_in_cc_embeds = self.position_mpns[l]['batch_norm'](P_in_cc_embeds.view(batch_sz*max_n_cc,-1)).view(batch_sz,max_n_cc, -1 )
P_out_cc_embeds = self.position_mpns[l]['batch_norm_out'](P_out_cc_embeds.view(batch_sz*max_n_cc,-1)).view(batch_sz,max_n_cc, -1 )
outputs.extend([P_in_position_embed, P_out_position_embed])
# structure channel
if self.hparams['use_structure']:
# message passing layer for S internal and border
S_in_cc_embeds, S_in_struc_embed = self.run_mpn_layer(dataset_type, self.structure_mpns[l]['internal'], subgraph_ids, subgraph_idx, cc_ids, S_in_cc_embeds, cc_embed_mask, I_S_sim, layer_num=l, channel='structure', inside=True)
S_out_cc_embeds, S_out_struc_embed = self.run_mpn_layer(dataset_type, self.structure_mpns[l]['border'], subgraph_ids, subgraph_idx, cc_ids, S_out_cc_embeds, cc_embed_mask, B_S_sim, layer_num=l, channel='structure', inside=False)
if 'batch_norm' in self.hparams and self.hparams['batch_norm']: #optional batch norm
S_in_cc_embeds = self.structure_mpns[l]['batch_norm'](S_in_cc_embeds.view(batch_sz*max_n_cc,-1)).view(batch_sz,max_n_cc, -1 )
S_out_cc_embeds = self.structure_mpns[l]['batch_norm_out'](S_out_cc_embeds.view(batch_sz*max_n_cc,-1)).view(batch_sz,max_n_cc, -1 )
outputs.extend([S_in_struc_embed, S_out_struc_embed])
# concatenate all layers
all_cc_embeds = torch.cat([init_cc_embeds] + outputs, dim=-1)
# sum across all CC
if 'ff_attn' in self.hparams and self.hparams['ff_attn']:
batched_attn = self.attn_vector.squeeze().unsqueeze(0).repeat(all_cc_embeds.shape[0],1)
attn_weights = self.attention(batched_attn, all_cc_embeds, cc_embed_mask)
subgraph_embedding = subgraph_utils.weighted_sum(all_cc_embeds, attn_weights)
else:
subgraph_embedding = subgraph_utils.masked_sum(all_cc_embeds, cc_embed_mask.unsqueeze(-1), dim=1, keepdim=False)
# Fully Con Layers + Optional Dropout
subgraph_embedding_out = F.relu(self.lin(subgraph_embedding))
subgraph_embedding_out = self.lin_dropout(subgraph_embedding_out)
subgraph_embedding_out = F.relu(self.lin2(subgraph_embedding_out))
subgraph_embedding_out = self.lin_dropout2(subgraph_embedding_out)
subgraph_embedding_out = self.lin3(subgraph_embedding_out)
return subgraph_embedding_out
##################################################
# training, val, test steps
def training_step(self, train_batch, batch_idx):
'''
Runs a single training step over the batch
'''
# get subgraphs and labels
subgraph_ids = train_batch['subgraph_ids']
cc_ids = train_batch['cc_ids']
subgraph_idx = train_batch['subgraph_idx']
labels = train_batch['label'].squeeze(-1)
# get similarities for batch
NP_sim = train_batch['NP_sim']
I_S_sim = train_batch['I_S_sim']
B_S_sim = train_batch['B_S_sim']
# forward pass
logits = self.forward('train', self.train_N_I_cc_embed, self.train_N_B_cc_embed, \
self.train_S_I_cc_embed, self.train_S_B_cc_embed, self.train_P_I_cc_embed, self.train_P_B_cc_embed, \
subgraph_ids, cc_ids, subgraph_idx, NP_sim, I_S_sim, B_S_sim)
# calculate loss
if len(labels.shape) == 0: labels = labels.unsqueeze(-1)
if self.multilabel:
loss = self.loss(logits.squeeze(1), labels.type_as(logits))
else:
loss = self.loss(logits, labels)
# calculate accuracy
acc = subgraph_utils.calc_accuracy(logits, labels, multilabel_binarizer=self.multilabel_binarizer)
logs = {'train_loss': loss, 'train_acc': acc} # used for tensorboard
return {'loss': loss, 'log': logs}
def val_test_step(self, batch, batch_idx, is_test = False):
'''
Runs a single validation or test step over the batch
'''
# get subgraphs and labels
subgraph_ids = batch['subgraph_ids']
cc_ids = batch['cc_ids']
subgraph_idx = batch['subgraph_idx']
labels = batch['label'].squeeze(-1)
# get similarities for batch
NP_sim = batch['NP_sim']
I_S_sim = batch['I_S_sim']
B_S_sim = batch['B_S_sim']
# forward pass
if not is_test:
logits = self.forward('val', self.val_N_I_cc_embed, self.val_N_B_cc_embed, \
self.val_S_I_cc_embed, self.val_S_B_cc_embed, self.val_P_I_cc_embed, self.val_P_B_cc_embed, \
subgraph_ids, cc_ids, subgraph_idx, NP_sim, I_S_sim, B_S_sim)
else:
logits = self.forward('test', self.test_N_I_cc_embed, self.test_N_B_cc_embed, \
self.test_S_I_cc_embed, self.test_S_B_cc_embed, self.test_P_I_cc_embed, self.test_P_B_cc_embed, \
subgraph_ids, cc_ids, subgraph_idx, NP_sim, I_S_sim, B_S_sim)
# calc loss
if len(labels.shape) == 0: labels = labels.unsqueeze(-1)
if self.multilabel:
loss = self.loss(logits.squeeze(1), labels.type_as(logits))
else:
loss = self.loss(logits, labels)
# calc accuracy and macro F1
acc = subgraph_utils.calc_accuracy(logits, labels, multilabel_binarizer=self.multilabel_binarizer)
macro_f1 = subgraph_utils.calc_f1(logits, labels, avg_type='macro', multilabel_binarizer=self.multilabel_binarizer)
if not is_test: # for tensorboard
return {'val_loss': loss, 'val_acc': acc, 'val_macro_f1': macro_f1, 'val_logits': logits, 'val_labels': labels}
else:
return {'test_loss': loss, 'test_acc': acc, 'test_macro_f1': macro_f1, 'test_logits': logits, 'test_labels': labels}
def validation_step(self, val_batch, batch_idx):
'''
wrapper for self.val_test_step
'''
return self.val_test_step(val_batch, batch_idx, is_test = False)
def test_step(self, test_batch, batch_idx):
'''
wrapper for self.val_test_step
'''
return self.val_test_step(test_batch, batch_idx, is_test = True)
##################################################
# validation & test epoch end
def validation_epoch_end(self, outputs):
'''
called at the end of the validation epoch
Input:
- outputs: is an array with what you returned in validation_step for each batch
outputs = [{'loss': batch_0_loss}, {'loss': batch_1_loss}, ..., {'loss': batch_n_loss}]
'''
# aggregate the logits, labels, and metrics for all batches
logits = torch.cat([x['val_logits'] for x in outputs], dim=0)
labels = torch.cat([x['val_labels'] for x in outputs], dim=0)
macro_f1 = subgraph_utils.calc_f1(logits, labels, avg_type='macro', multilabel_binarizer=self.multilabel_binarizer).squeeze()
micro_f1 = subgraph_utils.calc_f1(logits, labels, avg_type='micro', multilabel_binarizer=self.multilabel_binarizer).squeeze()
acc = subgraph_utils.calc_accuracy(logits, labels, multilabel_binarizer=self.multilabel_binarizer).squeeze()
# calc AUC
if self.multilabel:
auroc = roc_auc_score(labels.cpu(), torch.sigmoid(logits).cpu(), multi_class = 'ovr')
elif len(torch.unique(labels)) == 2: #binary case
auroc = roc_auc_score(labels.cpu(), F.softmax(logits, dim=1).cpu()[:,1])
else: #multiclass
auroc = roc_auc_score(labels.cpu(), F.softmax(logits, dim=1).cpu(), multi_class = 'ovr')
# get average loss, acc, and macro F1 over batches
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean().cpu()
avg_acc = torch.stack([x['val_acc'] for x in outputs]).mean()
avg_macro_f1 = torch.stack([x['val_macro_f1'] for x in outputs]).mean()
tensorboard_logs = {'val_loss': avg_loss, 'val_micro_f1': micro_f1, 'val_macro_f1': macro_f1, \
'val_acc': acc, 'avg_val_acc': avg_acc, 'avg_macro_f1':avg_macro_f1, 'val_auroc':auroc }
# add to tensorboard
if self.multilabel:
for c in range(logits.shape[1]): #n_classes
tensorboard_logs['val_auroc_class_' + str(c)] = roc_auc_score(labels[:, c].cpu(), torch.sigmoid(logits)[:, c].cpu())
else:
one_hot_labels = one_hot(labels, num_classes = logits.shape[1])
for c in range(logits.shape[1]): #n_classes
tensorboard_logs['val_auroc_class_' + str(c)] = roc_auc_score(one_hot_labels[:, c].cpu(), logits[:, c].cpu())
# Re-Initialize cc_embeds
if not self.hparams['trainable_cc']:
self.init_all_embeddings(split = 'train_val', trainable = self.hparams['trainable_cc'])
# Optionally re initialize anchor patches each epoch (defaults to false)
if self.hparams['resample_anchor_patches']:
if self.hparams['use_neighborhood']:
self.anchors_neigh_int, self.anchors_neigh_border = init_anchors_neighborhood('train_val', self.hparams, self.networkx_graph, self.device, self.train_cc_ids, self.val_cc_ids, self.test_cc_ids)
if self.hparams['use_position']:
self.anchors_pos_int = init_anchors_pos_int('train_val', self.hparams, self.networkx_graph, self.device, self.train_sub_G, self.val_sub_G, self.test_sub_G)
self.anchors_pos_ext = init_anchors_pos_ext(self.hparams, self.networkx_graph, self.device)
if self.hparams['use_structure']:
self.anchors_structure = init_anchors_structure(self.hparams, self.structure_anchors, self.int_structure_anchor_random_walks, self.bor_structure_anchor_random_walks)
self.metric_scores.append(tensorboard_logs) # keep track for optuna
return {'avg_val_loss': avg_loss, 'log': tensorboard_logs}
def test_epoch_end(self, outputs):
'''
Called at end of the test epoch
'''
# aggregate the logits, labels, and metrics for all batches
logits = torch.cat([x['test_logits'] for x in outputs], dim=0)
labels = torch.cat([x['test_labels'] for x in outputs], dim=0)
macro_f1 = subgraph_utils.calc_f1(logits, labels, avg_type='macro', multilabel_binarizer=self.multilabel_binarizer).squeeze()
micro_f1 = subgraph_utils.calc_f1(logits, labels, avg_type='micro', multilabel_binarizer=self.multilabel_binarizer).squeeze()
acc = subgraph_utils.calc_accuracy(logits, labels, multilabel_binarizer=self.multilabel_binarizer).squeeze()
# calc AUC
if self.multilabel:
auroc = roc_auc_score(labels.cpu(), torch.sigmoid(logits).cpu(), multi_class = 'ovr')
elif len(torch.unique(labels)) == 2: #binary case
auroc = roc_auc_score(labels.cpu(), F.softmax(logits, dim=1).cpu()[:,1])
else: #multiclass
auroc = roc_auc_score(labels.cpu(), F.softmax(logits, dim=1).cpu(), multi_class = 'ovr')
# get average loss, acc, and macro F1 over batches
avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean().cpu()
avg_acc = torch.stack([x['test_acc'] for x in outputs]).mean()
avg_macro_f1 = torch.stack([x['test_macro_f1'] for x in outputs]).mean()
tensorboard_logs = {'test_loss': avg_loss, 'test_micro_f1': micro_f1, 'test_macro_f1': macro_f1, \
'test_acc': acc, 'avg_test_acc': avg_acc, 'test_avg_macro_f1':avg_macro_f1, 'test_auroc':auroc }
# add ROC for each class to tensorboard
if self.multilabel:
for c in range(logits.shape[1]): #n_classes
tensorboard_logs['test_auroc_class_' + str(c)] = roc_auc_score(labels[:, c].cpu(), torch.sigmoid(logits)[:, c].cpu())
else:
one_hot_labels = one_hot(labels, num_classes = logits.shape[1])
for c in range(logits.shape[1]): #n_classes
tensorboard_logs['test_auroc_class_' + str(c)] = roc_auc_score(one_hot_labels[:, c].cpu(), logits[:, c].cpu())
self.test_results = tensorboard_logs
return {'avg_test_loss': avg_loss, 'log': tensorboard_logs}
##################################################
# Read in data
def reindex_data(self, data):
'''
Relabel node indices in the train/val/test sets to be 1-indexed instead of 0 indexed
so that we can use 0 for padding
'''
new_subg = []
for subg in data:
new_subg.append([c + 1 for c in subg])
return new_subg
def read_data(self):
'''
Read in the subgraphs & their associated labels
'''
# read networkx graph from edge list
self.networkx_graph = nx.read_edgelist(config.PROJECT_ROOT / self.graph_path)
# readin list of node ids for each subgraph & their labels
self.train_sub_G, self.train_sub_G_label, self.val_sub_G, \
self.val_sub_G_label, self.test_sub_G, self.test_sub_G_label \
= subgraph_utils.read_subgraphs(config.PROJECT_ROOT / self.subgraph_path)
# check if the dataset is multilabel (e.g. HPO-NEURO)
if type(self.train_sub_G_label) == list:
self.multilabel=True
all_labels = self.train_sub_G_label + self.val_sub_G_label + self.test_sub_G_label
self.multilabel_binarizer = MultiLabelBinarizer().fit(all_labels)
else:
self.multilabel = False
self.multilabel_binarizer = None
# Optionally subset the data for debugging purposes to the batch size
if 'subset_data' in self.hparams and self.hparams['subset_data']:
print("****WARNING: SUBSETTING DATA*****")
self.train_sub_G, self.train_sub_G_label, self.val_sub_G, \
self.val_sub_G_label, self.test_sub_G, self.test_sub_G_label = self.train_sub_G[0:self.hparams['batch_size']], self.train_sub_G_label[0:self.hparams['batch_size']], self.val_sub_G[0:self.hparams['batch_size']], \
self.val_sub_G_label[0:self.hparams['batch_size']], self.test_sub_G[0:self.hparams['batch_size']], self.test_sub_G_label[0:self.hparams['batch_size']]
# get the number of classes for prediction
if type(self.train_sub_G_label) == list: # if multi-label
self.num_classes = max([max(l) for l in self.train_sub_G_label + self.val_sub_G_label + self.test_sub_G_label]) + 1
else:
self.num_classes = int(torch.max(torch.cat((self.train_sub_G_label, self.val_sub_G_label, self.test_sub_G_label)))) + 1
# renumber nodes to start with index 1 instead of 0
mapping = {n:int(n)+1 for n in self.networkx_graph.nodes()}
self.networkx_graph = nx.relabel_nodes(self.networkx_graph, mapping)
self.train_sub_G = self.reindex_data(self.train_sub_G)
self.val_sub_G = self.reindex_data(self.val_sub_G)
self.test_sub_G = self.reindex_data(self.test_sub_G)
# Initialize pretrained node embeddings
pretrained_node_embeds = torch.load(config.PROJECT_ROOT / self.embedding_path, torch.device('cpu')) # feature matrix should be initialized to the node embeddings
self.hparams['node_embed_size'] = pretrained_node_embeds.shape[1]
zeros = torch.zeros(1, pretrained_node_embeds.shape[1])
embeds = torch.cat((zeros, pretrained_node_embeds), 0) #there's a zeros in the first index for padding
# optionally freeze the node embeddings
self.node_embeddings = nn.Embedding.from_pretrained(embeds, freeze=self.hparams['freeze_node_embeds'], padding_idx=config.PAD_VALUE).to(self.device)
print('--- Finished reading in data ---')
##################################################
# Initialize connected components & associated embeddings for each channel in SubGNN
def initialize_cc_ids(self, subgraph_ids):
'''
Initialize the 3D matrix of (n_subgraphs X max number of cc X max length of cc)
Input:
- subgraph_ids: list of subgraphs where each subgraph is a list of node ids
Output:
- reshaped_cc_ids_pad: padded tensor of shape (n_subgraphs, max_n_cc, max_len_cc)
'''
n_subgraphs = len(subgraph_ids) # number of subgraphs
# Create connected component ID list from subgraphs
cc_id_list = []
for curr_subgraph_ids in subgraph_ids:
subgraph = nx.subgraph(self.networkx_graph, curr_subgraph_ids) #networkx version of subgraph
con_components = list(nx.connected_components(subgraph)) # get connected components in subgraph
cc_id_list.append([torch.LongTensor(list(cc_ids)) for cc_ids in con_components])
# pad number of connected components
max_n_cc = max([len(cc) for cc in cc_id_list]) #max number of cc across all subgraphs
for cc_list in cc_id_list:
while True:
if len(cc_list) == max_n_cc: break
cc_list.append(torch.LongTensor([config.PAD_VALUE]))
# pad number of nodes in connected components
all_pad_cc_ids = [cc for cc_list in cc_id_list for cc in cc_list]
assert len(all_pad_cc_ids) % max_n_cc == 0
con_component_ids_pad = pad_sequence(all_pad_cc_ids, batch_first=True, padding_value=config.PAD_VALUE) # (batch_sz * max_n_cc, max_cc_len)
reshaped_cc_ids_pad = con_component_ids_pad.view(n_subgraphs, max_n_cc, -1) # (batch_sz, max_n_cc, max_cc_len)
return reshaped_cc_ids_pad # (n_subgraphs, max_n_cc, max_len_cc)
def initialize_cc_embeddings(self, cc_id_list, aggregator='sum'):
'''
Initialize connected component embeddings as either the sum or max of node embeddings in the connected component
Input:
- cc_id_list: 3D tensor of shape (n subgraphs, max n CC, max length CC)
Output:
- 3D tensor of shape (n_subgraphs, max n_cc, node embedding dim)
'''
if aggregator == 'sum':
return torch.sum(self.node_embeddings(cc_id_list.to(self.device)), dim=2)
elif aggregator == 'max':
return torch.max(self.node_embeddings(cc_id_list.to(self.device)), dim=2)[0]
def initialize_channel_embeddings(self, cc_embeddings, trainable = False):
'''
Initialize CC embeddings for each channel (N, S, P X internal, border)
'''
if trainable: # if the embeddings are trainable, make them a parameter
N_I_cc_embeds = Parameter(cc_embeddings.detach().clone())
N_B_cc_embeds = Parameter(cc_embeddings.detach().clone())
S_I_cc_embeds = Parameter(cc_embeddings.detach().clone())
S_B_cc_embeds = Parameter(cc_embeddings.detach().clone())
P_I_cc_embeds = Parameter(cc_embeddings.detach().clone())
P_B_cc_embeds = Parameter(cc_embeddings.detach().clone())
else:
N_I_cc_embeds = cc_embeddings
N_B_cc_embeds = cc_embeddings
S_I_cc_embeds = cc_embeddings
S_B_cc_embeds = cc_embeddings
P_I_cc_embeds = cc_embeddings
P_B_cc_embeds = cc_embeddings
return (N_I_cc_embeds, N_B_cc_embeds, S_I_cc_embeds, S_B_cc_embeds, P_I_cc_embeds, P_B_cc_embeds)
def init_all_embeddings(self, split = 'all', trainable = False):
'''
Initialize the CC and channel-specific CC embeddings for the subgraphs in the specified split
('all', 'train_val', 'train', 'val', or 'test')
'''
if split in ['all','train_val','train']:
# initialize CC embeddings
train_cc_embeddings = self.initialize_cc_embeddings(self.train_cc_ids, self.hparams['cc_aggregator'])
# initialize CC embeddings for each channel
self.train_N_I_cc_embed, self.train_N_B_cc_embed, self.train_S_I_cc_embed, \
self.train_S_B_cc_embed, self.train_P_I_cc_embed, self.train_P_B_cc_embed \
= self.initialize_channel_embeddings(train_cc_embeddings, trainable)
if split in ['all','train_val','val']:
val_cc_embeddings = self.initialize_cc_embeddings( self.val_cc_ids, self.hparams['cc_aggregator'])
self.val_N_I_cc_embed, self.val_N_B_cc_embed, self.val_S_I_cc_embed, \
self.val_S_B_cc_embed, self.val_P_I_cc_embed, self.val_P_B_cc_embed \
= self.initialize_channel_embeddings(val_cc_embeddings, trainable=False)
if split in ['all','test']:
test_cc_embeddings = self.initialize_cc_embeddings( self.test_cc_ids, self.hparams['cc_aggregator'])
self.test_N_I_cc_embed, self.test_N_B_cc_embed, self.test_S_I_cc_embed, \
self.test_S_B_cc_embed, self.test_P_I_cc_embed, self.test_P_B_cc_embed \
= self.initialize_channel_embeddings(test_cc_embeddings, trainable=False)
##################################################
# Initialize node border sets surrounding each CC for each subgraph
def initialize_border_sets(self, fname, cc_ids, radius, ego_graph_dict=None):
'''
Creates and saves to file a matrix containing the node ids in the k-hop border set of each CC for each subgraph
The shape of the resulting matrix, which is padded to the max border set size, is (n_subgraphs, max_n_cc, max_border_set_sz)
'''
n_subgraphs, max_n_cc, _ = cc_ids.shape
all_border_sets = []
# for each component in each subgraph, calculate the k-hop node border of the connected component
for s, subgraph in enumerate(cc_ids):
border_sets = []
for c, component in enumerate(subgraph):
# radius specifies the size of the border set - i.e. the k number of hops away the node can be from any node in the component to be in the border set
component_border = subgraph_utils.get_component_border_neighborhood_set(self.networkx_graph, component, radius, ego_graph_dict)
border_sets.append(component_border)
all_border_sets.append(border_sets)
#fill in matrix with padding
max_border_set_len = max([len(s) for l in all_border_sets for s in l])
border_set_matrix = torch.zeros((n_subgraphs, max_n_cc, max_border_set_len), dtype=torch.long).fill_(config.PAD_VALUE)
for s, subgraph in enumerate(all_border_sets):
for c,component in enumerate(subgraph):
fill_len = max_border_set_len - len(component)
border_set_matrix[s,c,:] = torch.cat([torch.LongTensor(list(component)),torch.LongTensor((fill_len)).fill_(config.PAD_VALUE)])
# save border set to file
np.save(fname, border_set_matrix.cpu().numpy())
return border_set_matrix # n_subgraphs, max_n_cc, max_border_set_sz
def get_border_sets(self, split):
'''
Returns the node ids in the k-hop border of each subgraph (where k = neigh_sample_border_size) for the train, val, and test subgraphs
'''
# location where similarities are stored
sim_path = config.PROJECT_ROOT / self.similarities_path
self.train_P_border = None
self.val_P_border = None
self.test_P_border = None
# We need the border sets if we're using the neighborhood channel or if we're using the edit distance similarity function in the structure channel
if self.hparams['use_neighborhood'] or (self.hparams['use_structure'] and self.hparams['structure_similarity_fn'] == 'edit_distance'):
# load ego graphs dictionary
ego_graph_path = config.PROJECT_ROOT / self.ego_graph_path
if ego_graph_path.exists():
with open(str(ego_graph_path), 'r') as f:
ego_graph_dict = json.load(f)
ego_graph_dict = {int(key): value for key, value in ego_graph_dict.items()}
else: ego_graph_dict = None
# either load in the border sets from file or recompute the border sets
train_neigh_path = sim_path / (str(self.hparams["neigh_sample_border_size"]) + '_' + str(config.PAD_VALUE) + '_train_border_set.npy')
val_neigh_path = sim_path / (str(self.hparams["neigh_sample_border_size"]) + '_' + str(config.PAD_VALUE) + '_val_border_set.npy')
test_neigh_path = sim_path / (str(self.hparams["neigh_sample_border_size"]) + '_' + str(config.PAD_VALUE) + '_test_border_set.npy')
if split == 'test':
if test_neigh_path.exists() and not self.hparams['compute_similarities']:
self.test_N_border = torch.tensor(np.load(test_neigh_path, allow_pickle=True))
else:
self.test_N_border = self.initialize_border_sets(test_neigh_path, self.test_cc_ids, self.hparams["neigh_sample_border_size"], ego_graph_dict)
elif split == 'train_val':
if train_neigh_path.exists() and not self.hparams['compute_similarities']:
self.train_N_border = torch.tensor(np.load(train_neigh_path, allow_pickle=True))
else:
self.train_N_border = self.initialize_border_sets(train_neigh_path, self.train_cc_ids, self.hparams["neigh_sample_border_size"], ego_graph_dict)
if val_neigh_path.exists() and not self.hparams['compute_similarities']:
self.val_N_border = torch.tensor(np.load(val_neigh_path, allow_pickle=True))
else:
self.val_N_border = self.initialize_border_sets(val_neigh_path, self.val_cc_ids, self.hparams["neigh_sample_border_size"], ego_graph_dict)
else: # otherwise, we can just set these to None
self.train_N_border = None
self.val_N_border = None
self.test_N_border = None
##################################################
# Compute similarities between the anchor patches & the subgraphs
def compute_shortest_path_similarities(self, fname, shortest_paths, cc_ids):
'''
Creates a similarity matrix with shape (n_subgraphs, max num cc, number of nodes in graph) that stores the shortest
path between each cc (for each subgraph) and all nodes in the graph.
'''
print('---- Precomputing Shortest Path Similarities ----')
n_subgraphs, max_n_cc, _ = cc_ids.shape
n_nodes_in_graph = len(self.networkx_graph.nodes()) #get number of nodes in the underlying base graph
cc_id_mask = (cc_ids[:,:,0] != config.PAD_VALUE)
similarities = torch.zeros((n_subgraphs, max_n_cc, n_nodes_in_graph)) \
.fill_(config.PAD_VALUE)
#NOTE: could use multiprocessing to speed up this calculation
for s, subgraph in enumerate(cc_ids):
for c, component in enumerate(subgraph):
non_padded_component = component[component != config.PAD_VALUE].cpu().numpy() #remove padding
if len(non_padded_component) > 0:
# NOTE: indexing is off by 1 bc node ids are indexed starting at 1
similarities[s,c,:] = torch.tensor(np.min(shortest_paths[non_padded_component - 1,:], axis=0))
# add padding (because each subgraph has variable # CC) & save to file
if not fname.parent.exists(): fname.parent.mkdir(parents=True)
print('---- Saving Shortest Path Similarities ----')
similarities[~cc_id_mask] = config.PAD_VALUE
np.save(fname, similarities.cpu().numpy())
return similarities
def compute_structure_patch_similarities(self, degree_dict, fname, internal, cc_ids, sim_path, dataset_type, border_set=None):
'''
Calculate the similarity between the sampled anchor patches and the connected components
The default structure similarity function is DTW over the patch and component degree sequences.
Returns tensor of similarities of shape (n_subgraphs, max_n_cc, n anchor patches)
'''
print('---Computing Structure Patch Similarities---')
n_anchors = self.structure_anchors.shape[0]
n_subgraphs, max_n_cc, _ = cc_ids.shape
cc_id_mask = (cc_ids[:,:,0] != config.PAD_VALUE)
# the default structure similarity function is dynamic time warping (DTW) over the degree sequences of the anchor patches & connected components
if self.hparams['structure_similarity_fn'] == 'dtw':
# store the degree sequence for each anchor patch into a dict
anchor_degree_seq_dict = {}
for a, anchor_patch in enumerate(self.structure_anchors):
anchor_degree_seq_dict[a] = gamma.get_degree_sequence(self.networkx_graph, anchor_patch, degree_dict, internal=internal)
# store the degree sequence for each connected component into a dict
component_degree_seq_dict = {}
cc_ids_reshaped = cc_ids.view(n_subgraphs*max_n_cc, -1)
for c, component in enumerate(cc_ids_reshaped):
component_degree_seq_dict[c] = gamma.get_degree_sequence(self.networkx_graph, component, degree_dict, internal=internal)
# to use multiprocessing to calculate the similarity, we first create a list of all of the inputs
inputs = []
for c in range(len(cc_ids_reshaped)):
for a in range(len(self.structure_anchors)):
inputs.append((component_degree_seq_dict[c], anchor_degree_seq_dict[a]))
# use starmap to calculate DTW between the anchor patches & connected components' degree sequences
with multiprocessing.Pool(processes=self.hparams['n_processes']) as pool:
sims = pool.starmap(gamma.calc_dtw, inputs)
# reshape similarities to a matrix of shape (n_subgraphs, max_n_cc, n anchor patches)
similarities = torch.tensor(sims, dtype=torch.float).view(n_subgraphs, max_n_cc, -1)
else:
# other structure similarity functions can be added here
raise NotImplementedError
# add padding & save to file
print('---- Saving Similarities ----')
if not fname.parent.exists(): fname.parent.mkdir(parents=True)
similarities[~cc_id_mask] = config.PAD_VALUE
np.save(fname, similarities.cpu().numpy())
return similarities
def get_similarities(self, split):
'''
For the N/P channels: precomputes the shortest paths between all connected components (for all subgraphs) and all nodes in the graph
For the S channel: precomputes structure anchor patches & random walks as well as structure similarity calculations between the anchor patches and all connected components
'''
# path where similarities are stored
sim_path = config.PROJECT_ROOT / self.similarities_path
# If we're using the position or neighborhood channels, we need to calculate the relevant shortest path similarities
if self.hparams['use_position'] or self.hparams['use_neighborhood']:
# read in precomputed shortest paths between all nodes in the graph
pairwise_shortest_paths_path = config.PROJECT_ROOT / self.shortest_paths_path
pairwise_shortest_paths = np.load(pairwise_shortest_paths_path, allow_pickle=True)
# Read in precomputed similarities if they exist. If they don't, calculate them
train_np_path = sim_path / (str(config.PAD_VALUE) + '_train_similarities.npy')
val_np_path = sim_path / (str(config.PAD_VALUE) + '_val_similarities.npy')
test_np_path = sim_path / (str(config.PAD_VALUE) + '_test_similarities.npy')
if split == 'test':
if test_np_path.exists() and not self.hparams['compute_similarities']:
print('--- Loading Position Similarities from File ---')
self.test_neigh_pos_similarities = torch.tensor(np.load(test_np_path, allow_pickle=True))#.to(self.device)
else:
self.test_neigh_pos_similarities = self.compute_shortest_path_similarities(test_np_path, pairwise_shortest_paths, self.test_cc_ids)
elif split == 'train_val':
if train_np_path.exists() and not self.hparams['compute_similarities']:
print('--- Loading Train Position Similarities from File ---')
self.train_neigh_pos_similarities = torch.tensor(np.load(train_np_path, allow_pickle=True))#.to(self.device)
else:
self.train_neigh_pos_similarities = self.compute_shortest_path_similarities(train_np_path, pairwise_shortest_paths, self.train_cc_ids)
if val_np_path.exists() and not self.hparams['compute_similarities']:
print('--- Loading Val Position Similarities from File ---')
self.val_neigh_pos_similarities = torch.tensor(np.load(val_np_path, allow_pickle=True))#.to(self.device)
else:
self.val_neigh_pos_similarities = self.compute_shortest_path_similarities(val_np_path, pairwise_shortest_paths, self.val_cc_ids)
else: # if we're only using the structure channel, we can just set these to None
self.train_neigh_pos_similarities = None
self.val_neigh_pos_similarities = None
self.test_neigh_pos_similarities = None
if self.hparams['use_structure']:
# load in degree dictionary {node id: degree}
degree_path = config.PROJECT_ROOT / self.degree_dict_path
if degree_path.exists():
with open(str(degree_path), 'r') as f:
degree_dict = json.load(f)
degree_dict = {int(key): value for key, value in degree_dict.items()}
else: degree_dict = None
# (1) sample structure anchor patches
# sample walk len: length of the random walk used to sample the anchor patches
# structure_patch_type: either 'triangular_random_walk' (default) or 'ego_graph'
# MAX_SIM_EPOCHS:
struc_anchor_patches_path = sim_path / ('struc_patches_' + str(self.hparams['sample_walk_len']) + '_' + self.hparams['structure_patch_type'] + '_' + str(self.hparams['max_sim_epochs']) + '.npy')
if struc_anchor_patches_path.exists() and not self.hparams['compute_similarities']:
self.structure_anchors = torch.tensor(np.load(struc_anchor_patches_path, allow_pickle=True))
else:
self.structure_anchors = sample_structure_anchor_patches(self.hparams, self.networkx_graph, self.device, self.hparams['max_sim_epochs'])
np.save(struc_anchor_patches_path, self.structure_anchors.cpu().numpy())
# (2) perform internal and border random walks over sampled anchor patches
#border
bor_struc_patch_random_walks_path = sim_path / ('bor_struc_patch_random_walks_' + str(self.hparams['n_triangular_walks']) + '_' + str(self.hparams['random_walk_len']) + '_' + str(self.hparams['sample_walk_len']) + '_' + self.hparams['structure_patch_type'] + '_' + str(self.hparams['max_sim_epochs']) + '.npy')
if bor_struc_patch_random_walks_path.exists() and not self.hparams['compute_similarities']:
self.bor_structure_anchor_random_walks = torch.tensor(np.load(bor_struc_patch_random_walks_path, allow_pickle=True))#.to(self.device)
else:
self.bor_structure_anchor_random_walks = perform_random_walks(self.hparams, self.networkx_graph, self.structure_anchors, inside=False)
np.save(bor_struc_patch_random_walks_path, self.bor_structure_anchor_random_walks.cpu().numpy())
#internal
int_struc_patch_random_walks_path = sim_path / ('int_struc_patch_random_walks_' + str(self.hparams['n_triangular_walks']) + '_' + str(self.hparams['random_walk_len']) + '_' + str(self.hparams['sample_walk_len']) + '_' + self.hparams['structure_patch_type'] + '_' + str(self.hparams['max_sim_epochs']) + '.npy')
if int_struc_patch_random_walks_path.exists() and not self.hparams['compute_similarities']:
self.int_structure_anchor_random_walks = torch.tensor(np.load(int_struc_patch_random_walks_path, allow_pickle=True))#.to(self.device)
else:
self.int_structure_anchor_random_walks = perform_random_walks(self.hparams, self.networkx_graph, self.structure_anchors, inside=True)
np.save(int_struc_patch_random_walks_path, self.int_structure_anchor_random_walks.cpu().numpy())
# (3) calculate similarities between anchor patches and connected components
# filenames where outputs will be stored
struc_sim_type_fname = '_' + self.hparams['structure_similarity_fn'] if self.hparams['structure_similarity_fn'] != 'dtw' else '' #we only add info about the structure similarity function to the filename if it's not the default dtw
train_int_struc_path = sim_path / ('int_struc_' + str(self.hparams['sample_walk_len']) + '_' + self.hparams['structure_patch_type'] + '_' + str(self.hparams['max_sim_epochs']) + '_' + str(config.PAD_VALUE) + struc_sim_type_fname + '_train_similarities.npy')
val_int_struc_path = sim_path / ('int_struc_' + str(self.hparams['sample_walk_len']) + '_' + self.hparams['structure_patch_type'] + '_' + str(self.hparams['max_sim_epochs']) + '_' + str(config.PAD_VALUE) + struc_sim_type_fname + '_val_similarities.npy')
test_int_struc_path = sim_path / ('int_struc_' + str(self.hparams['sample_walk_len']) + '_' + self.hparams['structure_patch_type'] + '_' + str(self.hparams['max_sim_epochs']) + '_' + str(config.PAD_VALUE) + struc_sim_type_fname + '_test_similarities.npy')
train_bor_struc_path = sim_path / ('bor_struc_' + str(self.hparams['sample_walk_len']) + '_' + self.hparams['structure_patch_type'] + '_' + str(self.hparams['max_sim_epochs']) + '_' + str(config.PAD_VALUE) + struc_sim_type_fname + '_train_similarities.npy')
val_bor_struc_path = sim_path / ('bor_struc_' + str(self.hparams['sample_walk_len']) + '_' + self.hparams['structure_patch_type'] + '_' + str(self.hparams['max_sim_epochs']) + '_' + str(config.PAD_VALUE) + struc_sim_type_fname + '_val_similarities.npy')
test_bor_struc_path = sim_path / ('bor_struc_' + str(self.hparams['sample_walk_len']) + '_' + self.hparams['structure_patch_type'] + '_' + str(self.hparams['max_sim_epochs']) + '_' + str(config.PAD_VALUE) + struc_sim_type_fname + '_test_similarities.npy')
if split == 'test':
if test_int_struc_path.exists() and not self.hparams['compute_similarities']:
print('--- Loading Test Structure Similarities from File ---', flush=True)
self.test_int_struc_similarities = torch.tensor(np.load(test_int_struc_path, allow_pickle=True))#.to(self.device)
else:
self.test_int_struc_similarities = self.compute_structure_patch_similarities(degree_dict, test_int_struc_path, True, self.test_cc_ids, sim_path, 'test', self.test_N_border)
elif split == 'train_val':
if train_int_struc_path.exists() and not self.hparams['compute_similarities']:
print('--- Loading Train Structure Similarities from File ---', flush=True)
self.train_int_struc_similarities = torch.tensor(np.load(train_int_struc_path, allow_pickle=True))#.to(self.device)
else:
self.train_int_struc_similarities = self.compute_structure_patch_similarities(degree_dict, train_int_struc_path, True, self.train_cc_ids, sim_path, 'train', self.train_N_border)
if val_int_struc_path.exists() and not self.hparams['compute_similarities']:
print('--- Loading Val Structure Similarities from File ---', flush=True)
self.val_int_struc_similarities = torch.tensor(np.load(val_int_struc_path, allow_pickle=True))#.to(self.device)
else:
self.val_int_struc_similarities = self.compute_structure_patch_similarities(degree_dict, val_int_struc_path, True, self.val_cc_ids, sim_path, 'val', self.val_N_border)
print('Done computing internal structure similarities', flush=True)
# read in structure similarities
print('computing border structure sims')
if split == 'test':
if test_bor_struc_path.exists() and not self.hparams['compute_similarities']:
print('--- Loading Test Structure Similarities from File ---')
self.test_bor_struc_similarities = torch.tensor(np.load(test_bor_struc_path, allow_pickle=True))
else:
self.test_bor_struc_similarities = self.compute_structure_patch_similarities(degree_dict, test_bor_struc_path, False, self.test_cc_ids, sim_path, 'test', self.test_N_border)
if split == 'train_val':
if train_bor_struc_path.exists() and not self.hparams['compute_similarities']:
print('--- Loading Train Structure Similarities from File ---')
self.train_bor_struc_similarities = torch.tensor(np.load(train_bor_struc_path, allow_pickle=True))
else:
self.train_bor_struc_similarities = self.compute_structure_patch_similarities(degree_dict, train_bor_struc_path, False, self.train_cc_ids, sim_path,'train', self.train_N_border)
if val_bor_struc_path.exists() and not self.hparams['compute_similarities']:
print('--- Loading Val Structure Similarities from File ---')
self.val_bor_struc_similarities = torch.tensor(np.load(val_bor_struc_path, allow_pickle=True))
else:
self.val_bor_struc_similarities = self.compute_structure_patch_similarities(degree_dict, val_bor_struc_path, False, self.val_cc_ids, sim_path, 'val', self.val_N_border)
print('Done computing border structure similarities')
else: # if we're not using the structure channel, we can just set these to None
self.structure_anchors = None
self.train_int_struc_similarities = None
self.val_int_struc_similarities = None
self.test_int_struc_similarities = None
self.train_bor_struc_similarities = None
self.val_bor_struc_similarities = None
self.test_bor_struc_similarities = None
##################################################
# Prepare data
def prepare_test_data(self):
'''
Same as prepare_data, but for test dataset
'''
print('--- Started Preparing Test Data ---')
self.test_cc_ids = self.initialize_cc_ids(self.test_sub_G)
print('--- Initialize embeddings ---')
self.init_all_embeddings(split = 'test', trainable = self.hparams['trainable_cc'])
print('--- Getting Border Sets ---')
self.get_border_sets(split='test')
print('--- Getting Similarities ---')
self.get_similarities(split='test')
print('--- Initializing Anchor Patches ---')
# note that we don't need to initialize border position & structure anchor patches because those are shared
if self.hparams['use_neighborhood']:
self.anchors_neigh_int, self.anchors_neigh_border = init_anchors_neighborhood('test', \
self.hparams, self.networkx_graph, self.device, None, None, \
self.test_cc_ids, None, None, self.test_N_border)
else: self.anchors_neigh_int, self.anchors_neigh_border = None, None
if self.hparams['use_position']:
self.anchors_pos_int = init_anchors_pos_int('test', self.hparams, self.networkx_graph, self.device, self.train_sub_G, self.val_sub_G, self.test_sub_G)
else: self.anchors_pos_int = None
print('--- Finished Preparing Test Data ---')
def prepare_data(self):
'''
Initialize connected components, precomputed similarity calculations, and anchor patches
'''
print('--- Started Preparing Data ---', flush=True)
# Intialize connected component matrix (n_subgraphs, max_n_cc, max_len_cc)
self.train_cc_ids = self.initialize_cc_ids(self.train_sub_G)
self.val_cc_ids = self.initialize_cc_ids(self.val_sub_G)
# initialize embeddings for each cc
# 'trainable_cc' flag determines whether the cc embeddings are trainable
print('--- Initializing CC Embeddings ---', flush=True)
self.init_all_embeddings(split = 'train_val', trainable = self.hparams['trainable_cc'])
# Initialize border sets for each cc
print('--- Initializing CC Border Sets ---', flush=True)
self.get_border_sets(split='train_val')
# calculate similarities
print('--- Getting Similarities ---', flush=True)
self.get_similarities(split='train_val')
# Initialize neighborhood, position, and structure anchor patches
print('--- Initializing Anchor Patches ---', flush=True)
if self.hparams['use_neighborhood']:
self.anchors_neigh_int, self.anchors_neigh_border = init_anchors_neighborhood('train_val', \
self.hparams, self.networkx_graph, self.device, self.train_cc_ids, self.val_cc_ids, \
None, self.train_N_border, self.val_N_border, None) # we pass in None for the test_N_border
else: self.anchors_neigh_int, self.anchors_neigh_border = None, None
if self.hparams['use_position']:
self.anchors_pos_int = init_anchors_pos_int('train_val', self.hparams, self.networkx_graph, self.device, self.train_sub_G, self.val_sub_G, self.test_sub_G)
self.anchors_pos_ext = init_anchors_pos_ext(self.hparams, self.networkx_graph, self.device)
else: self.anchors_pos_int, self.anchors_pos_ext = None, None
if self.hparams['use_structure']:
# pass in precomputed sampled structure anchor patches and random walks from which to further subsample
self.anchors_structure = init_anchors_structure(self.hparams, self.structure_anchors, self.int_structure_anchor_random_walks, self.bor_structure_anchor_random_walks)
else: self.anchors_structure = None
print('--- Finished Preparing Data ---', flush=True)
##################################################
# Data loaders
def _pad_collate(self, batch):
'''
Stacks all examples in the batch to be in shape (batch_sz, ..., ...)
& trims padding from border sets & connected component tensors, which were originally
padded to the max length across the whole dataset, not the batch
'''
subgraph_ids, con_component_ids, N_border, NP_sim, I_S_sim, B_S_sim, idx, labels = zip(*batch)
# subgraph_ids: (batch_sz, n_nodes_in_subgraph)
# con_component_ids: (batch_sz, n_con_components, n_nodes_in_cc)
# con_component_embeds: (batch_sz, n_con_components, hidden_dim)
# pad subgraph ids in batch to be of shape (batch_sz, max_subgraph_len)
subgraph_ids_pad = pad_sequence(subgraph_ids, batch_first=True, padding_value=config.PAD_VALUE)
# stack similarity matrics
if None in NP_sim: NP_sim = None
else: NP_sim = torch.stack(NP_sim)
if None in I_S_sim: I_S_sim = None
else: I_S_sim = torch.stack(I_S_sim)
if None in B_S_sim: B_S_sim = None
else: B_S_sim = torch.stack(B_S_sim)
# stack and trim the matrix of nodes in each component's border
if None in N_border: N_border_trimmed = None
else:
N_border = torch.stack(N_border)
# Trim neighbor border to only be as big as needed for the batch.
# This is necessary because the matrix was padded to the max length across all components, not just for the batch
batch_sz, max_n_cc, _ = N_border.shape
N_border_reshaped = N_border.view(batch_sz*max_n_cc, -1)
ind = (torch.sum(torch.abs(N_border_reshaped), dim=0) != 0)
N_border_trimmed = N_border_reshaped[:,ind].view(batch_sz, max_n_cc, -1)
labels = torch.stack(labels).squeeze() # (batch_sz, 1)
idx = torch.stack(idx)
cc_ids = torch.stack(con_component_ids)
# Trim connected component ids to only be as big as needed for the batch
batch_sz, max_n_cc, _ = cc_ids.shape
cc_ids_reshaped = cc_ids.view(batch_sz*max_n_cc, -1)
ind = (torch.sum(torch.abs(cc_ids_reshaped), dim=0) != 0)
cc_ids_trimmed = cc_ids_reshaped[:,ind].view(batch_sz, max_n_cc, -1)
return {'subgraph_ids': subgraph_ids_pad, 'cc_ids': cc_ids_trimmed, 'N_border': N_border_trimmed, \
'NP_sim': NP_sim, 'I_S_sim':I_S_sim, 'B_S_sim':B_S_sim, \
'subgraph_idx': idx, 'label':labels}
def train_dataloader(self):
'''
Prepare dataloader for training data
'''
dataset = SubgraphDataset(self.train_sub_G, self.train_sub_G_label, self.train_cc_ids, \
self.train_N_border, self.train_neigh_pos_similarities, self.train_int_struc_similarities, \
self.train_bor_struc_similarities, self.multilabel, self.multilabel_binarizer)
# drop last examples in batch if batch size is <= number of subgraphs in the training set (this will usually evaluate to true)
drop_last = self.hparams['batch_size'] <= len(self.train_sub_G)
loader = DataLoader(dataset, batch_size = self.hparams['batch_size'], shuffle=True, collate_fn=self._pad_collate, drop_last=drop_last) #ADDED DROP LAST
return loader
def val_dataloader(self):
'''
Prepare dataloader for validation data
'''
dataset = SubgraphDataset(self.val_sub_G, self.val_sub_G_label, self.val_cc_ids, \
self.val_N_border, self.val_neigh_pos_similarities, self.val_int_struc_similarities, \
self.val_bor_struc_similarities, self.multilabel, self.multilabel_binarizer)
loader = DataLoader(dataset, batch_size = self.hparams['batch_size'], shuffle=False, collate_fn=self._pad_collate)
return loader
def test_dataloader(self):
'''
Prepare dataloader for test data
'''
self.prepare_test_data()
dataset = SubgraphDataset(self.test_sub_G, self.test_sub_G_label, self.test_cc_ids, \
self.test_N_border, self.test_neigh_pos_similarities, self.test_int_struc_similarities, \
self.test_bor_struc_similarities, self.multilabel, self.multilabel_binarizer)
loader = DataLoader(dataset, batch_size = self.hparams['batch_size'], shuffle=False, collate_fn=self._pad_collate)
return loader
##################################################
# Optimization
def configure_optimizers(self):
'''
Set up Adam optimizer with specified learning rate
'''
optimizer = torch.optim.Adam(self.parameters(), lr=self.hparams['learning_rate'])
return optimizer
def backward(self, trainer, loss, optimizer, optimizer_idx):
loss.backward(retain_graph=True)
| 64,806
| 54.676117
| 326
|
py
|
SubGNN
|
SubGNN-main/SubGNN/gamma.py
|
# General
import sys
import time
import numpy as np
# Pytorch & Networkx
import torch
import networkx as nx
# Dynamic time warping
from fastdtw import fastdtw
# Our methods
sys.path.insert(0, '..') # add config to path
import config
###########################################
# DTW of degree sequences
def get_degree_sequence(graph, nodes, degree_dict=None, internal=True):
'''
Returns the ordered degree sequence of a list of nodes
'''
# remove badding
nodes = nodes[nodes != config.PAD_VALUE].cpu().numpy()
subgraph = graph.subgraph(nodes)
internal_degree_seq = [degree for node, degree in list(subgraph.degree(nodes))]
# for the internal structure channel, the sorted internal degree sequence is used
if internal:
# return the internal degree sequence
internal_degree_seq.sort()
return internal_degree_seq
# for the border structure channel, the sorted external degree sequence is used
else:
# if we have the degree dict, use that instead of recomputing the degree of each node
if degree_dict == None:
graph_degree_seq = [degree for node, degree in list(graph.degree(nodes))]
else:
graph_degree_seq = [degree_dict[n-1] for n in nodes]
external_degree_seq = [full_degree - i_degree for full_degree, i_degree in zip(graph_degree_seq, internal_degree_seq)]
external_degree_seq.sort()
return external_degree_seq
def calc_dist(a, b):
return ((max(a,b) + 1)/(min(a,b) + 1)) - 1
def calc_dtw( component_degree, patch_degree):
'''
calculate dynamic time warping between the component degree sequence and the patch degree sequence
'''
dist, path = fastdtw(component_degree, patch_degree, dist=calc_dist)
return 1. / (dist + 1.)
| 1,810
| 28.209677
| 126
|
py
|
SubGNN
|
SubGNN-main/SubGNN/attention.py
|
import torch
from torch.nn.parameter import Parameter
# All of the below code is taken from AllenAI's AllenNLP library
def tiny_value_of_dtype(dtype: torch.dtype):
"""
Returns a moderately tiny value for a given PyTorch data type that is used to avoid numerical
issues such as division by zero.
This is different from `info_value_of_dtype(dtype).tiny` because it causes some NaN bugs.
Only supports floating point dtypes.
"""
if not dtype.is_floating_point:
raise TypeError("Only supports floating point dtypes.")
if dtype == torch.float or dtype == torch.double:
return 1e-13
elif dtype == torch.half:
return 1e-4
else:
raise TypeError("Does not support dtype " + str(dtype))
def masked_softmax(
vector: torch.Tensor, mask: torch.BoolTensor, dim: int = -1, memory_efficient: bool = False,
) -> torch.Tensor:
"""
`torch.nn.functional.softmax(vector)` does not work if some elements of `vector` should be
masked. This performs a softmax on just the non-masked portions of `vector`. Passing
`None` in for the mask is also acceptable; you'll just get a regular softmax.
`vector` can have an arbitrary number of dimensions; the only requirement is that `mask` is
broadcastable to `vector's` shape. If `mask` has fewer dimensions than `vector`, we will
unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask,
do it yourself before passing the mask into this function.
If `memory_efficient` is set to true, we will simply use a very large negative number for those
masked positions so that the probabilities of those positions would be approximately 0.
This is not accurate in math, but works for most cases and consumes less memory.
In the case that the input vector is completely masked and `memory_efficient` is false, this function
returns an array of `0.0`. This behavior may cause `NaN` if this is used as the last layer of
a model that uses categorical cross-entropy loss. Instead, if `memory_efficient` is true, this function
will treat every element as equal, and do softmax over equal numbers.
"""
if mask is None:
result = torch.nn.functional.softmax(vector, dim=dim)
else:
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
if not memory_efficient:
# To limit numerical errors from large vector elements outside the mask, we zero these out.
result = torch.nn.functional.softmax(vector * mask, dim=dim)
result = result * mask
result = result / (
result.sum(dim=dim, keepdim=True) + tiny_value_of_dtype(result.dtype)
)
else:
masked_vector = vector.masked_fill(~mask, min_value_of_dtype(vector.dtype))
result = torch.nn.functional.softmax(masked_vector, dim=dim)
return result
class Attention(torch.nn.Module):
"""
An `Attention` takes two inputs: a (batched) vector and a matrix, plus an optional mask on the
rows of the matrix. We compute the similarity between the vector and each row in the matrix,
and then (optionally) perform a softmax over rows using those computed similarities.
Inputs:
- vector: shape `(batch_size, embedding_dim)`
- matrix: shape `(batch_size, num_rows, embedding_dim)`
- matrix_mask: shape `(batch_size, num_rows)`, specifying which rows are just padding.
Output:
- attention: shape `(batch_size, num_rows)`.
# Parameters
normalize : `bool`, optional (default = `True`)
If true, we normalize the computed similarities with a softmax, to return a probability
distribution for your attention. If false, this is just computing a similarity score.
"""
def __init__(self, normalize: bool = True) -> None:
super().__init__()
self._normalize = normalize
def forward(
self, vector: torch.Tensor, matrix: torch.Tensor, matrix_mask: torch.BoolTensor = None
) -> torch.Tensor:
similarities = self._forward_internal(vector, matrix)
if self._normalize:
return masked_softmax(similarities, matrix_mask)
else:
return similarities
def _forward_internal(self, vector: torch.Tensor, matrix: torch.Tensor) -> torch.Tensor:
raise NotImplementedError
class DotProductAttention(Attention):
"""
Computes attention between a vector and a matrix using dot product.
Registered as an `Attention` with name "dot_product".
"""
def _forward_internal(self, vector: torch.Tensor, matrix: torch.Tensor) -> torch.Tensor:
return matrix.bmm(vector.unsqueeze(-1)).squeeze(-1)
class AdditiveAttention(Attention):
"""
Computes attention between a vector and a matrix using an additive attention function. This
function has two matrices `W`, `U` and a vector `V`. The similarity between the vector
`x` and the matrix `y` is computed as `V tanh(Wx + Uy)`.
This attention is often referred as concat or additive attention. It was introduced in
<https://arxiv.org/abs/1409.0473> by Bahdanau et al.
Registered as an `Attention` with name "additive".
# Parameters
vector_dim : `int`, required
The dimension of the vector, `x`, described above. This is `x.size()[-1]` - the length
of the vector that will go into the similarity computation. We need this so we can build
the weight matrix correctly.
matrix_dim : `int`, required
The dimension of the matrix, `y`, described above. This is `y.size()[-1]` - the length
of the vector that will go into the similarity computation. We need this so we can build
the weight matrix correctly.
normalize : `bool`, optional (default : `True`)
If true, we normalize the computed similarities with a softmax, to return a probability
distribution for your attention. If false, this is just computing a similarity score.
"""
def __init__(self, vector_dim: int, matrix_dim: int, normalize: bool = True) -> None:
super().__init__(normalize)
self._w_matrix = Parameter(torch.Tensor(vector_dim, vector_dim))
self._u_matrix = Parameter(torch.Tensor(matrix_dim, vector_dim))
self._v_vector = Parameter(torch.Tensor(vector_dim, 1))
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self._w_matrix)
torch.nn.init.xavier_uniform_(self._u_matrix)
torch.nn.init.xavier_uniform_(self._v_vector)
def _forward_internal(self, vector: torch.Tensor, matrix: torch.Tensor) -> torch.Tensor:
intermediate = vector.matmul(self._w_matrix).unsqueeze(1) + matrix.matmul(self._u_matrix)
intermediate = torch.tanh(intermediate)
return intermediate.matmul(self._v_vector).squeeze(2)
| 6,880
| 47.801418
| 107
|
py
|
SubGNN
|
SubGNN-main/SubGNN/train.py
|
# General
import numpy as np
import random
import argparse
import tqdm
import pickle
import json
import joblib
import os
import time
import sys
import pathlib
import random
import string
# Pytorch
import torch
from torch.utils.data import DataLoader
from torch.nn.functional import one_hot
import pytorch_lightning as pl
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.profiler import AdvancedProfiler
# Optuna
import optuna
from optuna.samplers import TPESampler
from optuna.integration import PyTorchLightningPruningCallback
# Our Methods
import SubGNN as md
sys.path.insert(0, '..') # add config to path
import config
'''
There are several options for running `train.py`:
(1) Specify a model path via restoreModelPath. This script will use the hyperparameters at that path to train a model.
(2) Specify opt_n_trials != None and restoreModelPath == None. This script will use the hyperparameter ranges set
in the `get_hyperparams_optuma` function to run optuna trials.
(3) Specify opt_n_trials == None and restoreModelPath == None. This script will use the hyperparameters in the
`get_hyperparams` function to train/test the model.
'''
###################################################
# Parse arguments
def parse_arguments():
'''
Collect and parse arguments to script
'''
parser = argparse.ArgumentParser(description="Learn subgraph embeddings")
parser.add_argument('-embedding_path', type=str, help='Directory where node embeddings are saved')
parser.add_argument('-subgraphs_path', type=str, help='File where subgraphs are saved')
parser.add_argument('-shortest_paths_path', type=str, help='File where subgraphs are saved')
parser.add_argument('-graph_path', type=str, help='File where graph is saved')
parser.add_argument('-similarities_path', type=str, help='File where graph is saved')
parser.add_argument('-task', type=str, help='Task name (e.g. hpo_metab)')
# Max Epochs
parser.add_argument("-max_epochs", type=int, default=None, help="Max number of epochs to train")
parser.add_argument("-seed", type=int, default=None, help="Random Seed")
# Log
parser.add_argument('-log_path', type=str, default=None, help='Place to store results. By default, use tensorboard directory unless -no_save.')
parser.add_argument('-no_save', action='store_true', help='Makes model not save any specifications.')
parser.add_argument('-print_train_times', action='store_true', help='Print train times.')
# Tensorboard Arguments
parser.add_argument('-tb_logging', action='store_true', help='Log to Tensorboard')
parser.add_argument('-tb_dir', type=str, default="tensorboard", help='Directory for Tensorboard Logs')
parser.add_argument('-tb_name', type=str, default="sg", help='Base Model Name for Tensorboard Log')
parser.add_argument('-tb_version', help='Version Name for Tensorboard Log. (By default, created automatically.)')
# Checkpoint
parser.add_argument('-no_checkpointing', action='store_true', help='Specifies not to do model checkpointing.')
parser.add_argument('-checkpoint_k', type=int, default=3, help='Frequency with which to save model checkpoints')
parser.add_argument('-monitor_metric', type=str, default='val_micro_f1', help='Metric to monitor for checkpointing/stopping')
# Optuma
parser.add_argument("-opt_n_trials", type=int, default=None, help="Number of optuma trials to run")
parser.add_argument("-opt_n_cores", type=int, default=-1, help="Number of cores (-1 = all available)")
parser.add_argument("-opt_prune", action='store_true', help="Prune trials early if not promising")
parser.add_argument("-grid_search", action='store_true', help="Grid search")
#Debug
parser.add_argument('-debug_mode', action='store_true', help='Plot gradients + GPU usage')
parser.add_argument('-subset_data', action='store_true', help='Subset data to one batch per dataset')
# Restore Model
parser.add_argument('-restoreModelPath', type=str, default=None, help='Parent directory of model, hparams, kwargs')
parser.add_argument('-restoreModelName', type=str, default=None, help='Name of model to restore')
# Test set
parser.add_argument('-runTest', action='store_true', help='Run on the test set')
parser.add_argument('-noTrain', action='store_true', help='No training')
args = parser.parse_args()
return args
###################################################
# Set Hyperparameters
# TODO: change the values here if you run this script
def get_hyperparams(args):
'''
You, the user, should change these hyperparameters to best suit your model/run
NOTE: These hyperparameters are only used if args.opt_n_trials is None and restoreModelPath is None
'''
hyperparameters = {
"max_epochs": 200,
"use_neighborhood": True,
"use_structure": True,
"use_position": True,
"seed": 3,
"node_embed_size": 128,
"structure_patch_type": "triangular_random_walk",
"lstm_aggregator": "last",
"n_processes": 4,
"resample_anchor_patches": False,
"freeze_node_embeds": False,
"use_mpn_projection": True,
"print_train_times": False,
"compute_similarities": False,
"sample_walk_len": 50,
"n_triangular_walks": 5,
"random_walk_len": 10,
"rw_beta": 0.65,
"set2set": False,
"ff_attn": False,
"batch_size": 64,
"learning_rate": 0.00025420762516423353,
"grad_clip": 0.2160947806012501,
"n_layers": 1,
"neigh_sample_border_size": 1,
"n_anchor_patches_pos_out": 123,
"n_anchor_patches_pos_in": 34,
"n_anchor_patches_N_in": 19,
"n_anchor_patches_N_out": 69,
"n_anchor_patches_structure": 37,
"linear_hidden_dim_1": 64,
"linear_hidden_dim_2": 32,
"lstm_dropout": 0.21923625197416907,
"lstm_n_layers": 2,
"lin_dropout": 0.04617609616314509,
"cc_aggregator": "max",
"trainable_cc": True,
"auto_lr_find": True
}
return hyperparameters
def get_hyperparams_optuma(args, trial):
'''
If you specify args.opt_n_trials != None (and restoreModelPath == None), then the script will use the hyperparameter ranges
specified here to train/test the model
'''
hyperparameters={'seed': 42,
'batch_size': trial.suggest_int('batch_size', 64,150),
'learning_rate': trial.suggest_float('learning_rate', 1e-5, 1e-3, log=True), #learning rate
'grad_clip': trial.suggest_float('grad_clip', 0, 0.5), #gradient clipping
'max_epochs': args.max_epochs, #max number of epochs
'node_embed_size': 32, # dim of node embedding
'n_layers': trial.suggest_int('gamma_shortest_max_distance_N', 1,5), # number of layers
'n_anchor_patches_pos_in': trial.suggest_int('n_anchor_patches_pos_in', 25, 75), # number of anchor patches (P, INTERNAL)
'n_anchor_patches_pos_out': trial.suggest_int('n_anchor_patches_pos_out', 50, 200), # number of anchor patches (P, BORDER)
'n_anchor_patches_N_in': trial.suggest_int('n_anchor_patches_N_in', 10, 25), # number of anchor patches (N, INTERNAL)
'n_anchor_patches_N_out': trial.suggest_int('n_anchor_patches_N_out', 25, 75), # number of anchor patches (N, BORDER)
'n_anchor_patches_structure': trial.suggest_int('n_anchor_patches_structure', 15, 40), # number of anchor patches (S, INTERNAL & BORDER)
'neigh_sample_border_size': trial.suggest_int('neigh_sample_border_size', 1,2),
'linear_hidden_dim_1': trial.suggest_int('linear_hidden_dim', 16, 96),
'linear_hidden_dim_2': trial.suggest_int('linear_hidden_dim', 16, 96),
'n_triangular_walks': trial.suggest_int('n_triangular_walks', 5, 15),
'random_walk_len': trial.suggest_int('random_walk_len', 18, 26),
'sample_walk_len': trial.suggest_int('sample_walk_len', 18, 26),
'rw_beta': trial.suggest_float('rw_beta', 0.1, 0.9), #triangular random walk parameter, beta
'lstm_aggregator': 'last',
'lstm_dropout': trial.suggest_float('lstm_dropout', 0.0, 0.4),
'lstm_n_layers': trial.suggest_int('lstm_n_layers', 1, 2), #number of layers in LSTM used for embedding structural anchor patches
'n_processes': 4, # multiprocessing
'lin_dropout': trial.suggest_float('lin_dropout', 0.0, 0.6),
'resample_anchor_patches': False,
'compute_similarities': False,
'use_mpn_projection':True,
'use_neighborhood': True,
'use_structure': False,
'use_position': False,
'cc_aggregator': trial.suggest_categorical('cc_aggregator', ['sum', 'max']), #approach for aggregating node embeddings in components
'trainable_cc': trial.suggest_categorical('trainable_cc', [True, False]),
'freeze_node_embeds':False,
'print_train_times':args.print_train_times
}
return hyperparameters
###################################################
def get_paths(args, hyperparameters):
'''
Returns the paths to data (subgraphs, embeddings, similarity calculations, etc)
'''
if args.task is not None:
task = args.task
embedding_type = hyperparameters['embedding_type']
# paths to subgraphs, edge list, and shortest paths between all nodes in the graph
subgraphs_path = os.path.join(task, "subgraphs.pth")
graph_path = os.path.join(task, "edge_list.txt")
shortest_paths_path = os.path.join(task, "shortest_path_matrix.npy")
degree_sequence_path = os.path.join(task, "degree_sequence.txt")
ego_graph_path = os.path.join(task, "ego_graphs.txt")
#directory where similarity calculations will be stored
similarities_path = os.path.join(task, "similarities/")
# get location of node embeddings
if embedding_type == 'gin':
embedding_path = os.path.join(task, "gin_embeddings.pth")
elif embedding_type == 'graphsaint':
embedding_path = os.path.join(task, "graphsaint_gcn_embeddings.pth")
else:
raise NotImplementedError
return graph_path, subgraphs_path, embedding_path, similarities_path, shortest_paths_path, degree_sequence_path, ego_graph_path
else:
return args.graph_path, args.subgraphs_path, args.embedding_path, args.similarities_path, args.shortest_paths_path, args.degree_sequence_path, args.ego_graph_path
def build_model(args, trial = None):
'''
Creates SubGNN from the hyperparameters specifid in either (1) restoreModelPath, (2) get_hyperparams_optuma, or (3) get_hyperparams
'''
#get hyperparameters
if args.restoreModelPath is not None: # load in hyperparameters from file
print("Loading Hyperparams")
with open(os.path.join(args.restoreModelPath, "hyperparams.json")) as data_file:
hyperparameters = json.load(data_file)
if args.max_epochs:
hyperparameters['max_epochs'] = args.max_epochs
elif trial is not None: #select hyperparams from ranges specified in trial
hyperparameters = get_hyperparams_optuma(args, trial)
else: #get hyperparams from passed in args
hyperparameters = get_hyperparams(args)
# set subset_data
if args.subset_data:
hyperparameters['subset_data'] = True
# set seed
if hasattr(args,"seed") and args.seed is not None:
hyperparameters['seed'] = args.seed
# set for reproducibility
torch.manual_seed(hyperparameters['seed'])
np.random.seed(hyperparameters['seed'])
torch.cuda.manual_seed(hyperparameters['seed'])
torch.cuda.manual_seed_all(hyperparameters['seed'])
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# get locations of file paths & instantiate model
graph_path, subgraphs_path, embedding_path, similarities_path, shortest_paths_path, degree_dict_path, ego_graph_path = get_paths(args, hyperparameters)
model = md.SubGNN(hyperparameters, graph_path, subgraphs_path, embedding_path, similarities_path, shortest_paths_path, degree_dict_path, ego_graph_path)
# Restore Previous Weights, if relevant
if args.restoreModelName:
checkpoint_path = os.path.join(args.restoreModelPath, args.restoreModelName)
if not torch.cuda.is_available():
checkpoint = torch.load(checkpoint_path, torch.device('cpu') )
else:
checkpoint = torch.load(checkpoint_path)
model_dict = model.state_dict()
pretrain_dict = {k: v for k, v in checkpoint['state_dict'].items() if k in model_dict}
model.load_state_dict(pretrain_dict)
return model, hyperparameters
def build_trainer(args, hyperparameters, trial = None):
'''
Set up optuna trainer
'''
if 'progress_bar_refresh_rate' in hyperparameters:
p_refresh = hyperparameters['progress_bar_refresh_rate']
else:
p_refresh = 5
# set epochs, gpus, gradient clipping, etc.
# if 'no_gpu' in run config, then use CPU
trainer_kwargs={'max_epochs': hyperparameters['max_epochs'],
"gpus":1,
"num_sanity_val_steps":0,
"progress_bar_refresh_rate":p_refresh,
"gradient_clip_val": hyperparameters['grad_clip']
}
# set auto learning rate finder param
if 'auto_lr_find' in hyperparameters and hyperparameters['auto_lr_find']:
trainer_kwargs['auto_lr_find'] = hyperparameters['auto_lr_find']
# Create tensorboard logger
if not args.no_save and args.tb_logging:
lgdir = os.path.join(args.tb_dir, args.tb_name)
if not os.path.exists(lgdir):
os.makedirs(lgdir)
if args.tb_version is not None:
tb_version = args.tb_version
else:
tb_version = "version_"+ str(random.randint(0, 10000000))
logger = TensorBoardLogger(args.tb_dir, name=args.tb_name, version=tb_version)
if not os.path.exists(logger.log_dir):
os.makedirs(logger.log_dir)
print("Tensorboard logging at ", logger.log_dir)
trainer_kwargs["logger"] = logger
# set up model saving
results_path = None
if not args.no_save:
if args.log_path:
results_path = args.log_path
elif args.tb_logging:
results_path = logger.log_dir
else:
raise Exception('No results path has been specified.')
if (not args.no_save) and (not args.no_checkpointing):
trainer_kwargs["checkpoint_callback"] = ModelCheckpoint(
filepath= os.path.join(results_path, "{epoch}-{val_micro_f1:.2f}-{val_acc:.2f}-{val_auroc:.2f}"),
save_top_k = args.checkpoint_k,
verbose=True,
monitor=args.monitor_metric,
mode='max'
)
if trial is not None and args.opt_prune:
trainer_kwargs['early_stop_callback'] = PyTorchLightningPruningCallback(trial, monitor=args.monitor_metric)
# enable debug mode
if args.debug_mode:
print("\n**** DEBUG MODE ON! ****\n")
trainer_kwargs["track_grad_norm"] = 2
trainer_kwargs["log_gpu_memory"] = True
trainer_kwargs['print_nan_grads'] = False
if not args.no_save:
profile_path = os.path.join(results_path, "profiler.log")
print("Profiling to ", profile_path)
trainer_kwargs["profiler"] = AdvancedProfiler(output_filename=profile_path)
else:
trainer_kwargs["profiler"] = AdvancedProfiler()
# set GPU availability
if not torch.cuda.is_available():
trainer_kwargs['gpus'] = 0
trainer = pl.Trainer(**trainer_kwargs)
return trainer, trainer_kwargs, results_path
def train_model(args, trial = None):
'''
Train a single model whose hyperparameters are specified in the run config
Returns the max (or min) metric specified by 'monitor_metric' in the run config
'''
model, hyperparameters = build_model(args, trial)
trainer, trainer_kwargs, results_path = build_trainer(args, hyperparameters, trial)
random.seed(hyperparameters['seed'])
# save hyperparams and trainer kwargs to file
if results_path is not None:
hparam_file = open(os.path.join(results_path, "hyperparams.json"),"w")
hparam_file.write(json.dumps(hyperparameters, indent=4))
hparam_file.close()
tkwarg_file = open(os.path.join(results_path, "trainer_kwargs.json"),"w")
pop_keys = [key for key in ['logger','profiler','early_stop_callback','checkpoint_callback'] if key in trainer_kwargs.keys()]
[trainer_kwargs.pop(key) for key in pop_keys]
tkwarg_file.write(json.dumps(trainer_kwargs, indent=4))
tkwarg_file.close()
# optionally train the model
if not args.noTrain:
trainer.fit(model)
# optionally test the model
if args.runTest or args.noTrain:
# reproducibility
torch.manual_seed(hyperparameters['seed'])
np.random.seed(hyperparameters['seed'])
torch.cuda.manual_seed(hyperparameters['seed'])
torch.cuda.manual_seed_all(hyperparameters['seed'])
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if not args.no_checkpointing:
for file in os.listdir(results_path):
if file.endswith(".ckpt") and file.startswith("epoch"):
print(f"Loading model {file}")
if not torch.cuda.is_available():
checkpoint = torch.load(os.path.join(results_path, file), torch.device('cpu') )
else:
checkpoint = torch.load(os.path.join(results_path, file))
model_dict = model.state_dict()
pretrain_dict = {k: v for k, v in checkpoint['state_dict'].items() if k in model_dict}
model.load_state_dict(pretrain_dict)
trainer.test(model)
# save results
if results_path is not None:
scores_file = open(os.path.join(results_path, "final_metric_scores.json"),"w")
results_serializable = {k:float(v) for k,v in model.metric_scores[-1].items()}
scores_file.write(json.dumps(results_serializable, indent=4))
scores_file.close()
if args.runTest:
scores_file = open(os.path.join(results_path, "test_results.json"),"w")
results_serializable = {k:float(v) for k,v in model.test_results.items()}
scores_file.write(json.dumps(results_serializable, indent=4))
scores_file.close()
# print results
if args.runTest:
print(model.test_results)
return model.test_results
elif args.noTrain:
print(model.test_results)
return model.test_results
else:
all_scores = [score[args.monitor_metric].numpy() for score in model.metric_scores]
if args.monitor_metric == "val_loss":
return(np.min(all_scores))
else:
return(np.max(all_scores))
def main(args):
torch.autograd.set_detect_anomaly(True)
# specify tensorboard directory
if args.tb_dir is not None:
args.tb_dir = os.path.join(config.PROJECT_ROOT, args.tb_dir)
# if args.opt_n_trials is None, then we use either read in hparams from file or use the hyperparameters in get_hyperparams
if args.opt_n_trials is None:
return train_model(args)
else:
print(f'Running {args.opt_n_trials} Trials of optuna')
if args.opt_prune:
pruner = optuna.pruners.MedianPruner()
else:
pruner = None
if args.monitor_metric == 'val_loss':
direction = "minimize"
else:
direction = "maximize"
if args.log_path:
study_path = args.log_path
elif args.tb_logging:
study_path = os.path.join(args.tb_dir, args.tb_name)
print("Logging to ", study_path)
db_file = os.path.join(study_path, 'optuma_study_sqlite.db')
pathlib.Path(study_path).mkdir(parents=True, exist_ok=True)
# set up optuna study
if args.grid_search:
search_space = {
'neigh_sample_border_size': [1,2],
'gamma_shortest_max_distance_P': [3,4,5,6]
}
sampler = optuna.samplers.GridSampler(search_space)
else:
sampler = optuna.samplers.RandomSampler()
study = optuna.create_study(direction=direction,
sampler=sampler,
pruner=pruner,
storage= 'sqlite:///' + db_file,
study_name=study_path,
load_if_exists=True)
study.optimize(lambda trial: train_model(args, trial), n_trials=args.opt_n_trials, n_jobs = args.opt_n_cores)
optuma_results_path = os.path.join(study_path, 'optuna_study.pkl')
print("Saving Study Results to", optuma_results_path)
joblib.dump(study, optuma_results_path)
print(study.best_params)
if __name__ == "__main__":
args = parse_arguments()
main(args)
| 21,662
| 42.5
| 170
|
py
|
SubGNN
|
SubGNN-main/prepare_dataset/train_node_emb.py
|
# General
import numpy as np
import random
import argparse
import os
import config_prepare_dataset as config
import preprocess
import model as mdl
import utils
# Pytorch
import torch
from torch_geometric.utils.convert import to_networkx, to_scipy_sparse_matrix
from torch_geometric.data import Data, DataLoader, NeighborSampler
if config.MINIBATCH == "GraphSaint": from torch_geometric.data import GraphSAINTRandomWalkSampler
from torch_geometric.utils import negative_sampling
# Global Variables
log_f = open(str(config.DATASET_DIR / "node_emb.log"), "w")
all_data = None
device = None
best_val_acc = -1
best_embeddings = None
best_model = None
all_losses = {}
eps = 10e-4
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device:', device)
best_hyperparameters = dict()
if device.type == 'cuda': print(torch.cuda.get_device_name(0))
if config.MINIBATCH == "NeighborSampler":
all_hyperparameters = {'batch_size': config.POSSIBLE_BATCH_SIZES, 'hidden': config.POSSIBLE_HIDDEN, 'output': config.POSSIBLE_OUTPUT, 'lr': config.POSSIBLE_LR, 'wd': config.POSSIBLE_WD, 'nb_size': config.POSSIBLE_NB_SIZE, 'dropout': config.POSSIBLE_DROPOUT}
curr_hyperparameters = {'batch_size': config.POSSIBLE_BATCH_SIZES[0], 'hidden': config.POSSIBLE_HIDDEN[0], 'output': config.POSSIBLE_OUTPUT[0], 'lr': config.POSSIBLE_LR[0], 'wd': config.POSSIBLE_WD[0], 'nb_size': config.POSSIBLE_NB_SIZE[0], 'dropout': config.POSSIBLE_DROPOUT[0]}
elif config.MINIBATCH == "GraphSaint":
all_hyperparameters = {'batch_size': config.POSSIBLE_BATCH_SIZES, 'hidden': config.POSSIBLE_HIDDEN, 'output': config.POSSIBLE_OUTPUT, 'lr': config.POSSIBLE_LR, 'wd': config.POSSIBLE_WD, 'walk_length': config.POSSIBLE_WALK_LENGTH, 'num_steps': config.POSSIBLE_NUM_STEPS, 'dropout': config.POSSIBLE_DROPOUT}
curr_hyperparameters = {'batch_size': config.POSSIBLE_BATCH_SIZES[0], 'hidden': config.POSSIBLE_HIDDEN[0], 'output': config.POSSIBLE_OUTPUT[0], 'lr': config.POSSIBLE_LR[0], 'wd': config.POSSIBLE_WD[0], 'walk_length': config.POSSIBLE_WALK_LENGTH[0], 'num_steps': config.POSSIBLE_NUM_STEPS[0], 'dropout': config.POSSIBLE_DROPOUT[0]}
def train(epoch, model, optimizer):
global all_data, best_val_acc, best_embeddings, best_model, curr_hyperparameters, best_hyperparameters
# Save predictions
total_loss = 0
roc_val = []
ap_val = []
f1_val = []
acc_val = []
# Minibatches
if config.MINIBATCH == "NeighborSampler":
loader = NeighborSampler(all_data.edge_index, sizes = [curr_hyperparameters['nb_size']], batch_size = curr_hyperparameters['batch_size'], shuffle = True)
elif config.MINIBATCH == "GraphSaint":
all_data.num_classes = torch.tensor([2])
loader = GraphSAINTRandomWalkSampler(all_data, batch_size=curr_hyperparameters['batch_size'], walk_length=curr_hyperparameters['walk_length'], num_steps=curr_hyperparameters['num_steps'])
# Iterate through minibatches
for data in loader:
if config.MINIBATCH == "NeighborSampler": data = preprocess.set_data(data, all_data, config.MINIBATCH)
curr_train_pos = data.edge_index[:, data.train_mask]
curr_train_neg = negative_sampling(curr_train_pos, num_neg_samples=curr_train_pos.size(1) // 4)
curr_train_total = torch.cat([curr_train_pos, curr_train_neg], dim=-1)
data.y = torch.zeros(curr_train_total.size(1)).float()
data.y[:curr_train_pos.size(1)] = 1.
# Perform training
data.to(device)
optimizer.zero_grad()
out = model(data.x, data.edge_index)
curr_dot_embed = utils.el_dot(out, curr_train_total)
loss = utils.calc_loss_both(data, curr_dot_embed)
if torch.isnan(loss) == False:
total_loss += loss
loss.backward()
optimizer.step()
curr_train_pos_mask = torch.zeros(curr_train_total.size(1)).bool()
curr_train_pos_mask[:curr_train_pos.size(1)] = 1
curr_train_neg_mask = (curr_train_pos_mask == 0)
roc_score, ap_score, train_acc, train_f1 = utils.calc_roc_score(pred_all = curr_dot_embed.T[1], pos_edges = curr_train_pos_mask, neg_edges = curr_train_neg_mask)
print(">>>>>>Train: (ROC) ", roc_score, " (AP) ", ap_score, " (ACC) ", train_acc, " (F1) ", train_f1)
curr_val_pos = data.edge_index[:, data.val_mask]
curr_val_neg = negative_sampling(curr_val_pos, num_neg_samples=curr_val_pos.size(1) // 4)
curr_val_total = torch.cat([curr_val_pos, curr_val_neg], dim=-1)
curr_val_pos_mask = torch.zeros(curr_val_total.size(1)).bool()
curr_val_pos_mask[:curr_val_pos.size(1)] = 1
curr_val_neg_mask = (curr_val_pos_mask == 0)
val_dot_embed = utils.el_dot(out, curr_val_total)
data.y = torch.zeros(curr_val_total.size(1)).float()
data.y[:curr_val_pos.size(1)] = 1.
roc_score, ap_score, val_acc, val_f1 = utils.calc_roc_score(pred_all = val_dot_embed.T[1], pos_edges = curr_val_pos_mask, neg_edges = curr_val_neg_mask)
roc_val.append(roc_score)
ap_val.append(ap_score)
acc_val.append(val_acc)
f1_val.append(val_f1)
res = "\t".join(["Epoch: %04d" % (epoch + 1), "train_loss = {:.5f}".format(total_loss), "val_roc = {:.5f}".format(np.mean(roc_val)), "val_ap = {:.5f}".format(np.mean(ap_val)), "val_f1 = {:.5f}".format(np.mean(f1_val)), "val_acc = {:.5f}".format(np.mean(acc_val))])
print(res)
log_f.write(res + "\n")
# Save best model and parameters
if best_val_acc <= np.mean(acc_val) + eps:
best_val_acc = np.mean(acc_val)
with open(str(config.DATASET_DIR / "best_model.pth"), 'wb') as f:
torch.save(model.state_dict(), f)
best_hyperparameters = curr_hyperparameters
best_model = model
return total_loss
def test(model):
global all_data, best_embeddings, best_hyperparameters, all_losses
model.load_state_dict(torch.load(str(config.DATASET_DIR / "best_model.pth")))
model.to(device)
model.eval()
test_pos = all_data.edge_index[:, all_data.test_mask]
test_neg = negative_sampling(test_pos, num_neg_samples=test_pos.size(1) // 4)
test_total = torch.cat([test_pos, test_neg], dim=-1)
test_pos_edges = torch.zeros(test_total.size(1)).bool()
test_pos_edges[:test_pos.size(1)] = 1
test_neg_edges = (test_pos_edges == 0)
dot_embed = utils.el_dot(best_embeddings, test_total, test = True)
roc_score, ap_score, test_acc, test_f1 = utils.calc_roc_score(pred_all = dot_embed, pos_edges = test_pos_edges.flatten(), neg_edges = test_neg_edges.flatten(), loss = all_losses, save_plots = config.DATASET_DIR / "train_plots.pdf")
print('Test ROC score: {:.5f}'.format(roc_score))
print('Test AP score: {:.5f}'.format(ap_score))
print('Test Accuracy: {:.5f}'.format(test_acc))
print('Test F1 score: {:.5f}'.format(test_f1))
log_f.write('Test ROC score: {:.5f}\n'.format(roc_score))
log_f.write('Test AP score: {:.5f}\n'.format(ap_score))
log_f.write('Test Accuracy: {:.5f}\n'.format(test_acc))
log_f.write('Test F1 score: {:.5f}\n'.format(test_f1))
def generate_emb():
global all_data, best_embeddings, best_model, all_hyperparameters, curr_hyperparameters, best_hyperparameters, all_losses, device
all_data = preprocess.read_graphs(str(config.DATASET_DIR / "edge_list.txt"))
# Iterate through hyperparameter type (shuffled)
shuffled_param_type = random.sample(all_hyperparameters.keys(), len(all_hyperparameters.keys()))
for param_type in shuffled_param_type:
# Iterate through hyperparameter values of the specified type (shuffled)
shuffled_param_val = random.sample(all_hyperparameters[param_type], len(all_hyperparameters[param_type]))
for param_val in shuffled_param_val:
# Initiate current hyperparameter
curr_hyperparameters[param_type] = param_val
print(curr_hyperparameters)
log_f.write(str(curr_hyperparameters) + "\n")
# Set up
model = mdl.TrainNet(all_data.x.shape[1], curr_hyperparameters['hidden'], curr_hyperparameters['output'], config.CONV.lower().split("_")[1], curr_hyperparameters['dropout']).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr = curr_hyperparameters['lr'], weight_decay = curr_hyperparameters['wd'])
# Train model
model.train()
curr_losses = []
for epoch in range(config.EPOCHS):
loss = train(epoch, model, optimizer)
curr_losses.append(loss)
all_losses[";".join([str(v) for v in curr_hyperparameters.values()])] = curr_losses
# Set up for next hyperparameter
curr_hyperparameters[param_type] = best_hyperparameters[param_type]
print("Best Hyperparameters: ", best_hyperparameters)
print("Optimization finished!")
log_f.write("Best Hyperparameters: %s \n" % best_hyperparameters)
# Save best embeddings
device = torch.device('cpu')
best_model = best_model.to(device)
best_embeddings = utils.get_embeddings(best_model, all_data, device)
# Test
test(best_model)
# Save best embeddings
torch.save(best_embeddings, config.DATASET_DIR / (config.CONV.lower() + "_embeddings.pth"))
| 9,326
| 48.611702
| 334
|
py
|
SubGNN
|
SubGNN-main/prepare_dataset/utils.py
|
# General
import random
import numpy as np
# Pytorch
import torch
import torch.nn.functional as F
from torch.nn import Sigmoid
from torch_geometric.data import Dataset
# Matplotlib
from matplotlib import pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
# Sci-kit Learn
from sklearn.metrics import roc_auc_score, average_precision_score, accuracy_score, f1_score, roc_curve, precision_recall_curve
# Global variables
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def calc_loss_both(data, dot_pred):
"""
Calculate loss via link prediction
Args
- data (Data object): graph
- dot_pred (tensor long, shape=(nodes, classes)): predictions calculated from dot product
Return
- loss (float): loss
"""
loss = F.nll_loss(F.log_softmax(dot_pred.to(device), dim=-1), data.y.long())
loss.requires_grad = True
return loss
def el_dot(embed, edges, test=False):
"""
Calculate element-wise dot product for link prediction
Args
- embed (tensor): embedding
- edges (tensor): list of edges
Return
- tensor of element-wise dot product
"""
embed = embed.cpu().detach()
edges = edges.cpu().detach()
source = torch.index_select(embed, 0, edges[0, :])
target = torch.index_select(embed, 0, edges[1, :])
dots = torch.bmm(source.view(edges.shape[1], 1, embed.shape[1]), target.view(edges.shape[1], embed.shape[1], 1))
dots = torch.sigmoid(np.squeeze(dots))
if test: return dots
diff = np.squeeze(torch.ones((1, len(dots))) - dots)
return torch.stack((diff, dots), 1)
def calc_roc_score(pred_all, pos_edges=[], neg_edges=[], true_all=[], save_plots="", loss = [], multi_class=False, labels=[], multilabel=False):
"""
Calculate ROC score
Args
- pred_all
- pos_edges
- neg_edges
- true_all
- save_plots
- loss
- multi_class
- labels
- multilabel
Return
- roc_auc
- ap_score
- acc
- f1
"""
if multi_class:
if save_plots != "":
class_roc, class_ap, class_f1 = plot_roc_ap(true_all, pred_all, save_plots, loss = loss, labels = labels, multilabel = multilabel)
roc_auc = roc_auc_score(true_all, pred_all, multi_class = 'ovr')
if multilabel:
pred_all = (pred_all > 0.5)
else:
true_all = torch.argmax(true_all, axis = 1)
pred_all = torch.argmax(torch.tensor(pred_all), axis = 1)
f1_micro = f1_score(true_all, pred_all, average = "micro")
acc = accuracy_score(true_all, pred_all)
if save_plots != "": return roc_auc, acc, f1_micro, class_roc, class_ap, class_f1
return roc_auc, acc, f1_micro
else:
pred_pos = pred_all[pos_edges]
pred_neg = pred_all[neg_edges]
pred_all = torch.cat((pred_pos, pred_neg), 0).cpu().detach().numpy()
true_all = torch.cat((torch.ones(len(pred_pos)), torch.zeros(len(pred_neg))), 0).cpu().detach().numpy()
roc_auc = roc_auc_score(true_all, pred_all)
ap_score = average_precision_score(true_all, pred_all)
acc = accuracy_score(true_all, (pred_all > 0.5))
f1 = f1_score(true_all, (pred_all > 0.5))
if save_plots != "": plot_roc_ap(true_all, pred_all, save_plots, loss, multilabel = multilabel)
return roc_auc, ap_score, acc, f1
def plot_roc_ap(y_true, y_pred, save_plots, loss = {}, labels = [], multilabel = False):
with PdfPages(save_plots) as pdf:
# ROC
fpr = dict()
tpr = dict()
roc = dict()
if len(labels) > 0: # Multiclass classification
for c in range(y_true.shape[1]):
fpr[c], tpr[c], _ = roc_curve(y_true[:, c], y_pred[:, c])
roc[c] = roc_auc_score(y_true[:, c], y_pred[:, c])
plt.plot(fpr[c], tpr[c], label = str(labels[c]) + " (area = {:.5f})".format(roc[c]))
print("[ROC] " + str(labels[c]) + ": {:.5f}".format(roc[c]))
else: # Binary classification
fpr, tpr, _ = roc_curve(y_true, y_pred)
roc = roc_auc_score(y_true, y_pred)
plt.plot(fpr, tpr, label = "ROC = {:.5f}".format(roc))
plt.plot([0, 1], [0, 1], linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.legend(loc="best")
plt.title("ROC")
pdf.savefig()
plt.close()
# Precision-Recall curve
precision = dict()
recall = dict()
ap = dict()
if len(labels) > 0: # Multiclass classification
for c in range(y_true.shape[1]):
precision[c], recall[c], _ = precision_recall_curve(y_true[:, c], y_pred[:, c])
ap[c] = average_precision_score(y_true[:, c], y_pred[:, c])
plt.plot(recall[c], precision[c], label = str(labels[c]) + " (area = {:.5f})".format(ap[c]))
print("[AP] " + str(labels[c]) + ": {:.5f}".format(ap[c]))
else: # Binary classification
precision, recall, _ = precision_recall_curve(y_true, y_pred)
ap = average_precision_score(y_true, y_pred)
n_true = sum(y_true)/len(y_true)
plt.plot(recall, precision, label = "AP = {:.5f}".format(ap))
plt.plot([0, 1], [n_true, n_true], linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("Recall")
plt.ylabel("Precision")
if len(labels) > 0: plt.legend(loc="best")
plt.title("Precision-recall curve")
pdf.savefig()
plt.close()
# Loss
if len(loss) > 0:
max_epochs = max([len(l) for k, l in loss.items()])
for k, l in loss.items():
plt.plot(np.arange(max_epochs), l, label = k)
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.xlim([0, max_epochs])
plt.legend(loc="best")
plt.title("Training Loss")
pdf.savefig()
plt.close()
# F1 score
f1 = []
if len(labels) > 0: # Multiclass classification
if not multilabel:
y_true = torch.argmax(y_true, axis = 1)
y_pred = torch.argmax(torch.tensor(y_pred), axis = 1)
else: y_pred = (y_pred > 0.5)
f1 = f1_score(y_true, y_pred, range(len(labels)), average = None)
for c in range(len(f1)):
print("[F1] " + str(labels[c]) + ": {:.5f}".format(f1[c]))
return roc, ap, f1
######################################################
# Get best embeddings
#def get_embeddings(model, data_loader, device):
@torch.no_grad()
def get_embeddings(model, data, device):
"""
Get best embeddings
Args
- model (torch object): best model
- data (Data object): dataset
- device (torch object): cpu or cuda
Return
- all_emb (tensor): best embedding for all nodes
"""
model.eval()
data = data.to(device)
all_emb = model(data.x, data.edge_index)
print(all_emb.shape)
return all_emb
| 7,302
| 32.810185
| 144
|
py
|
SubGNN
|
SubGNN-main/prepare_dataset/model.py
|
# Pytorch
import torch
import torch.nn as nn
from torch.nn import Linear, LayerNorm, ReLU
from torch_geometric.nn import GINConv, GCNConv
import torch.nn.functional as F
# General
import numpy as np
import torch
import utils
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class TrainNet(nn.Module):
def __init__(self, nfeat, nhid, nclass, conv_type, dropout):
super(TrainNet, self).__init__()
# GINConv
if conv_type == "gin":
nn1 = nn.Sequential(nn.Linear(nfeat, nhid))
self.conv1 = GINConv(nn1)
nn2 = nn.Sequential(nn.Linear(nhid, nclass))
self.conv2 = GINConv(nn2)
# GCNConv
if conv_type == "gcn":
self.conv1 = GCNConv(nfeat, nhid)
self.conv2 = GCNConv(nhid, nclass)
self.dropout = dropout
def forward(self, x, edge_index):
x = F.relu(self.conv1(x, edge_index))
x = F.dropout(x, p = self.dropout, training = self.training)
return self.conv2(x, edge_index)
| 1,045
| 27.27027
| 69
|
py
|
SubGNN
|
SubGNN-main/prepare_dataset/config_prepare_dataset.py
|
from pathlib import Path
import sys
sys.path.insert(0, '..') # add config to path
import config as general_config
# Output directory ('density' as an example)
DATASET_DIR = Path(general_config.PROJECT_ROOT) / "density"
# Flags
GENERATE_SYNTHETIC_G = True # whether to generate synthetic graph with below specified properties
GENERATE_NODE_EMB = True # whether to generate node embeddings
# Random Seed
RANDOM_SEED = 42
# Parameters for generating synthetic subgraphs with specific properties
DESIRED_PROPERTY = "density"
BASE_GRAPH_TYPE = "barabasi_albert"
SUBGRAPH_TYPE = "bfs"
N_SUBGRAPHS = 250
N_CONNECTED_COMPONENTS = 1
N_SUBGRAPH_NODES = 20
FEATURES_TYPE = "one_hot"
N = 1000
P = 0.5
Q = 0
M = 5
N_BINS = 3
SUBGRAPH_GENERATOR = "complete"
MODIFY_GRAPH_FOR_PROPERTIES = True
DENSITY_EPSILON = 0.01
DENSITY_RANGE = [0.05, 0.25, 0.45]
CUT_RATIO_EPSILON = 0.001
CUT_RATIO_RANGE = [0.005, 0.0125, 0.02]
K_HOPS_RANGE = [0.12, 0.5, 1.0]
BA_P_RANGE = [0.1, 0.5, 0.9]
CC_RANGE = [1, 1, 1, 1, 5, 6, 7, 8, 9, 10]
MAX_TRIES = 100
# Parameters for training node embeddings for base graph
CONV = "graphsaint_gcn"
MINIBATCH = "GraphSaint"
POSSIBLE_BATCH_SIZES = [512, 1024]
POSSIBLE_HIDDEN = [128, 256]
POSSIBLE_OUTPUT = [64]
POSSIBLE_LR = [0.001, 0.005]
POSSIBLE_WD = [5e-4, 5e-5]
POSSIBLE_DROPOUT = [0.4, 0.5]
POSSIBLE_NB_SIZE = [-1]
POSSIBLE_NUM_HOPS = [1]
POSSIBLE_WALK_LENGTH = [32]
POSSIBLE_NUM_STEPS = [32]
EPOCHS = 100
# Flags for precomputing similarity metrics
CALCULATE_SHORTEST_PATHS = True # Calculate pairwise shortest paths between all nodes in the graph
CALCULATE_DEGREE_SEQUENCE = True # Create a dictionary containing degrees of the nodes in the graph
CALCULATE_EGO_GRAPHS = True # Calculate the 1-hop ego graph associated with each node in the graph
OVERRIDE = False # Overwrite a similarity file even if it exists
N_PROCESSSES = 4 # Number of cores to use for multi-processsing when precomputing similarity metrics
| 1,940
| 28.409091
| 100
|
py
|
SubGNN
|
SubGNN-main/prepare_dataset/prepare_dataset.py
|
# General
import numpy as np
import random
import typing
import logging
from collections import Counter, defaultdict
import config_prepare_dataset as config
import os
if not os.path.exists(config.DATASET_DIR):
os.makedirs(config.DATASET_DIR)
import train_node_emb
# Pytorch
import torch
from torch_geometric.data import Data
from torch_geometric.utils import from_networkx
# NetworkX
import networkx as nx
from networkx.generators.random_graphs import barabasi_albert_graph, extended_barabasi_albert_graph
from networkx.generators.duplication import duplication_divergence_graph
class SyntheticGraph():
def __init__(self, base_graph_type: str, subgraph_type: str,
features_type: str, base_graph=None, feature_matrix=None, **kwargs):
self.base_graph_type = base_graph_type
self.subgraph_type = subgraph_type
self.features_type = features_type
self.graph = self.generate_base_graph(**kwargs)
self.subgraphs = self.generate_and_add_subgraphs(**kwargs)
self.subgraph_labels = self.generate_subgraph_labels(**kwargs)
self.feature_matrix = self.initialize_features(**kwargs)
def generate_base_graph(self, **kwargs):
"""
Generate the base graph.
Return
- G (networkx object): base graph
"""
if self.base_graph_type == 'barabasi_albert':
m = kwargs.get('m', 5)
n = kwargs.get('n', 500)
G = barabasi_albert_graph(n, m, seed=config.RANDOM_SEED)
elif self.base_graph_type == 'duplication_divergence_graph':
n = kwargs.get('n', 500)
p = kwargs.get('p', 0.5)
G = duplication_divergence_graph(n, p, seed=config.RANDOM_SEED)
else:
raise Exception('The base graph you specified is not implemented')
return G
def initialize_features(self, **kwargs):
"""
Initialize node features in base graph.
Return
- Numpy matrix
"""
n_nodes = len(self.graph.nodes)
if self.features_type == 'one_hot':
return np.eye(n_nodes, dtype=int)
elif self.features_type == 'constant':
n_features = kwargs.pop('n_features', 20)
return np.full((n_nodes, n_features), 1)
else:
raise Exception('The feature initialization you specified is not implemented')
def generate_and_add_subgraphs(self, **kwargs):
"""
Generate and add subgraphs to the base graph.
Return
- subgraphs (list of lists): list of subgraphs, where each subgraph is a list of nodes
"""
n_subgraphs = kwargs.pop('n_subgraphs', 3)
n_nodes_in_subgraph = kwargs.pop('n_subgraph_nodes', 5)
n_connected_components = kwargs.pop('n_connected_components', 1)
modify_graph_for_properties = kwargs.pop('modify_graph_for_properties', False)
desired_property = kwargs.get('desired_property', None)
if self.subgraph_type == 'random':
subgraphs = self._get_subgraphs_randomly(n_subgraphs, n_nodes_in_subgraph, **kwargs)
elif self.subgraph_type == 'bfs':
subgraphs = self._get_subgraphs_by_bfs(n_subgraphs, n_nodes_in_subgraph, n_connected_components, **kwargs)
elif self.subgraph_type == 'staple':
subgraphs = self._get_subgraphs_by_k_hops(n_subgraphs, n_nodes_in_subgraph, n_connected_components, **kwargs)
elif self.subgraph_type == 'plant':
if desired_property == 'coreness':
subgraphs = self._get_subgraphs_by_coreness(n_subgraphs, n_nodes_in_subgraph, n_connected_components, **kwargs)
else:
subgraphs = self._get_subgraphs_by_planting(n_subgraphs, n_nodes_in_subgraph, n_connected_components, **kwargs)
else:
raise Exception('The subgraph generation you specified is not implemented')
if modify_graph_for_properties:
self._modify_graph_for_desired_subgraph_properties(subgraphs, **kwargs)
self._relabel_nodes(subgraphs, **kwargs)
return subgraphs
def _get_subgraphs_randomly(self, n_subgraphs, n_nodes_in_subgraph, **kwargs):
"""
Randomly generates subgraphs of size n_nodes_in_subgraph
Args
- n_subgraphs (int): number of subgraphs
- n_nodes_in_subgraph (int): number of nodes in each subgraph
Return
- subgraphs (list of lists): list of subgraphs, where each subgraph is a list of nodes
"""
subgraphs = []
for s in range(n_subgraphs):
sampled_nodes = random.sample(self.graph.nodes, n_nodes_in_subgraph)
subgraphs.append(sampled_nodes)
return subgraphs
def staple_component_to_graph(self, n_nodes_in_subgraph, graph_root_node, **kwargs):
"""
Staple a connected component to a graph.
Args
- n_nodes_in_subgraph (int): number of nodes in each subgraph
- graph_root_node (int): node in the base graph that the component should be "stapled" to
Return
- cc_node_ids (list): nodes in a connected component
- cc_root_node (int): node in the connected component (subgraph) to connect with the graph_root_node
"""
# Create new connected component for the node in base graph
con_component = self.generate_subgraph(n_nodes_in_subgraph, **kwargs)
cc_node_ids = list(range(len(self.graph.nodes), len(self.graph.nodes) + n_nodes_in_subgraph ))
# Staple the connected component to the base graph
joined_graph = nx.disjoint_union(self.graph, con_component)
cc_root_node = random.sample(cc_node_ids, 1)[0]
joined_graph.add_edge(graph_root_node, cc_root_node)
self.graph = joined_graph.copy()
return cc_node_ids, cc_root_node
def _get_subgraphs_by_k_hops(self, n_subgraphs, n_nodes_in_subgraph, n_connected_components, **kwargs):
"""
Generate subgraphs that are k hops apart, staple each subgraph to the base graph by adding edge between a random node
from the subgraph and a random node from the base graph
Args
- n_subgraphs (int): number of subgraphs
- n_nodes_in_subgraph (int): number of nodes in each subgraph
- n_connected_components (int): number of connected components in each subgraph
Return
- validated_subgraphs (list of lists): list of subgraphs, where each subgraph is a list of nodes
"""
diameter = nx.diameter(self.graph)
k_hops_range = [int(diameter * k) for k in config.K_HOPS_RANGE]
p_range = [float(p) for p in config.BA_P_RANGE]
cc_range = [int(cc) for cc in config.CC_RANGE]
shuffle_cc = False
if n_connected_components == None: shuffle_cc = True
print("DIAMETER: ", diameter)
print("K-HOPS RANGE: ", k_hops_range)
print("N CONNECTED COMPONENTS: ", n_connected_components)
subgraphs = []
original_node_ids = self.graph.nodes
for s in range(n_subgraphs):
curr_subgraph = []
seen_nodes = []
all_cc_start_nodes = []
k_hops = random.sample(k_hops_range, 1)[0]
p = p_range[k_hops_range.index(k_hops)]
kwargs['p'] = p
# Randomly select a node from base graph
graph_root_node = random.sample(original_node_ids, 1)[0]
seen_nodes.append(graph_root_node)
cc_node_ids, cc_root_node = self.staple_component_to_graph(n_nodes_in_subgraph, graph_root_node, **kwargs)
curr_subgraph.extend(cc_node_ids)
seen_nodes.extend(cc_node_ids)
all_cc_start_nodes.append(cc_root_node) # keep track of start nodes across CCs
# Get nodes that are k hops away
n_hops_paths = nx.single_source_shortest_path_length(self.graph, graph_root_node, cutoff=k_hops)
candidate_nodes = [node for node in n_hops_paths if self.is_k_hops_from_all_cc(node, all_cc_start_nodes, k_hops) and node not in seen_nodes]
if len(candidate_nodes) == 0: candidate_nodes = [node for node, length in n_hops_paths.items() if length == max(n_hops_paths.values())]
if shuffle_cc: n_connected_components = random.sample(cc_range, 1)[0]
for c in range(n_connected_components - 1):
new_graph_root_node = random.sample(candidate_nodes, 1)[0] # choose a random node that is k hops away
seen_nodes.append(new_graph_root_node)
cc_node_ids, cc_root_node = self.staple_component_to_graph(n_nodes_in_subgraph, new_graph_root_node, **kwargs)
curr_subgraph.extend(cc_node_ids)
seen_nodes.extend(cc_node_ids)
all_cc_start_nodes.append(cc_root_node) # keep track of start nodes across CCs
if len(curr_subgraph) >= n_nodes_in_subgraph * n_connected_components:
actual_num_cc = nx.number_connected_components(self.graph.subgraph(curr_subgraph))
if shuffle_cc and actual_num_cc in config.CC_RANGE: subgraphs.append(curr_subgraph)
elif not shuffle_cc and actual_num_cc > 1: subgraphs.append(curr_subgraph) # must have >1 CC
# Validate that subgraphs have the desired number of CCs
validated_subgraphs = []
for s in subgraphs:
actual_num_cc = nx.number_connected_components(self.graph.subgraph(s))
if shuffle_cc and actual_num_cc in config.CC_RANGE: validated_subgraphs.append(s)
elif not shuffle_cc and actual_num_cc > 1: validated_subgraphs.append(s) # must have >1 CC
print(len(validated_subgraphs))
return validated_subgraphs
def _get_subgraphs_by_coreness(self, n_subgraphs, n_nodes_in_subgraph, n_connected_components, remove_edges=False, **kwargs):
"""
Sample nodes from the base graph that have at least n nodes with k core. Merge the edges from the generated
subgraph with the edges from the base graph. Optionally, remove all other edges in the subgraphs
Args
- n_subgraphs (int): number of subgraphs
- n_nodes_in_subgraph (int): number of nodes in each subgraph
- n_connected_components (int): number of connected components in each subgraph
- remove_edges (bool): true if should remove unmerged edges in subgraphs, false otherwise
Return
- subgraphs (list of lists): list of subgraphs, where each subgraph is a list of nodes
"""
subgraphs = []
k_core_dict = nx.core_number(self.graph)
nodes_per_k_core = Counter(list(k_core_dict.values()))
print(nodes_per_k_core)
nodes_with_core_number = defaultdict()
for n, k in k_core_dict.items():
if k in nodes_with_core_number: nodes_with_core_number[k].append(n)
else: nodes_with_core_number[k] = [n]
for k in nodes_with_core_number:
# Get nodes with core number k that have not been sampled already
nodes_with_k_cores = nodes_with_core_number[k]
# Sample n_subgraphs subgraphs per core number
for s in range(n_subgraphs):
curr_subgraph = []
for c in range(n_connected_components):
if len(nodes_with_k_cores) < n_nodes_in_subgraph: break
con_component = self.generate_subgraph(n_nodes_in_subgraph, **kwargs)
cc_node_ids = random.sample(nodes_with_k_cores, n_nodes_in_subgraph)
# Relabel subgraph to have the same ids as the randomly sampled nodes
cc_id_mapping = {curr_id:new_id for curr_id, new_id in zip(con_component.nodes, cc_node_ids)}
nx.relabel_nodes(con_component, cc_id_mapping, copy=False)
if remove_edges:
# Remove the existing edges between nodes in the planted subgraph (except the ones to be added)
self.graph.remove_edges_from(self.graph.subgraph(cc_node_ids).edges)
# Combine the base graph & subgraph. Nodes with the same ID are merged
joined_graph = nx.compose(self.graph, con_component) #NOTE: attributes from subgraph take precedent over attributes from self.graph
self.graph = joined_graph.copy()
curr_subgraph.extend(cc_node_ids) # add nodes to subgraph
nodes_with_k_cores = list(set(nodes_with_k_cores).difference(set(cc_node_ids)))
nodes_with_core_number[k] = nodes_with_k_cores
if len(curr_subgraph) > 0: subgraphs.append(curr_subgraph)
return subgraphs
def _get_subgraphs_by_bfs(self, n_subgraphs, n_nodes_in_subgraph, n_connected_components, **kwargs):
"""
Sample n_connected_components number of start nodes from the base graph. Perform BFS to create subgraphs
of size n_nodes_in_subgraph.
Args
- n_subgraphs (int): number of subgraphs
- n_nodes_in_subgraph (int): number of nodes in each subgraph
- n_connected_components (int): number of connected components in each subgraph
Return
- subgraphs (list of lists): list of subgraphs, where each subgraph is a list of nodes
"""
max_depth = kwargs.pop('max_depth', 3)
subgraphs = []
for s in range(n_subgraphs):
#randomly select start nodes. # of start nodes == n connected components
curr_subgraph = []
start_nodes = random.sample(self.graph.nodes, n_connected_components)
for start_node in start_nodes:
edges = nx.bfs_edges(self.graph, start_node, depth_limit=max_depth)
nodes = [start_node] + [v for u, v in edges]
nodes = nodes[:n_nodes_in_subgraph] #limit nodes to n_nodes_in_subgraph
if max(nodes) > max(self.graph.nodes): print(max(nodes), max(self.graph.nodes))
assert max(nodes) <= max(self.graph.nodes)
assert nx.is_connected(self.graph.subgraph(nodes)) #check to see if selected nodes represent a conencted component
curr_subgraph.extend(nodes)
subgraphs.append(curr_subgraph)
seen = []
for g in subgraphs:
seen += g
assert max(seen) <= max(self.graph.nodes)
return subgraphs
def generate_subgraph(self, n_nodes_in_subgraph, **kwargs):
"""
Generate a subgraph with specified properties.
Args
- n_nodes_in_subgraph (int): number of nodes in each subgraph
Return
- G (networkx object): subgraph
"""
subgraph_generator = kwargs.pop('subgraph_generator', 'path')
if subgraph_generator == 'cycle':
G = nx.cycle_graph(n_nodes_in_subgraph)
elif subgraph_generator == 'path':
G = nx.path_graph(n_nodes_in_subgraph)
elif subgraph_generator == 'house':
G = nx.house_graph()
elif subgraph_generator == 'complete':
G = nx.complete_graph(n_nodes_in_subgraph)
elif subgraph_generator == 'star':
G = nx.star_graph(n_nodes_in_subgraph)
elif subgraph_generator == 'barabasi_albert':
m = kwargs.get('m', 5)
G = barabasi_albert_graph(n_nodes_in_subgraph, m, seed=config.RANDOM_SEED)
elif subgraph_generator == 'extended_barabasi_albert':
m = kwargs.get('m', 5)
p = kwargs.get('p', 0.5)
q = kwargs.get('q', 0)
G = extended_barabasi_albert_graph(n_nodes_in_subgraph, m, p, q, seed=config.RANDOM_SEED)
elif subgraph_generator == 'duplication_divergence_graph':
p = kwargs.get('p', 0.5)
G = duplication_divergence_graph(n_nodes_in_subgraph, p)
else:
raise Exception('The subgraph generator you specified is not implemented.')
return G
def is_k_hops_away(self, start, end, n_hops):
"""
Check whether the start node is k hops away from the end node.
Args
- start (int): start node
- end (int): end node
- n_hops (int): k hops
Return
- True if the start node is k hops away from the end node, false otherwise
"""
shortest_path_lengh = nx.shortest_path_length(self.graph, start, end)
if shortest_path_lengh == n_hops:
return True
else:
return False
def is_k_hops_from_all_cc(self, cand, all_cc_start_nodes, k_hops):
"""
Check whether the candidate node is k hops away from all CC start nodes.
Args
- cand (int): candidate node
- all_cc_start_nodes (list): cc start nodes
- k_hops (int): k hops
Return
- True if the candidate node is k hops away from all CC start nodes, false otherwise
"""
for cc_start in all_cc_start_nodes:
if not self.is_k_hops_away(cc_start, cand, k_hops):
return False
return True
def _get_subgraphs_by_stapling(self, n_subgraphs, n_nodes_in_subgraph, n_connected_components, **kwargs):
"""
Generate n subgraphs, staple each subgraph to the base graph by adding an edge between random node
from the subgraph and a random node from the base graph.
Args
- n_subgraphs (int): number of subgraphs
- n_nodes_in_subgraph (int): number of nodes in each subgraph
- n_connected_components (int): number of connected components in each subgraph
Return
- subgraphs (list of lists): list of subgraphs, where each subgraph is a list of nodes
"""
k_core_to_sample = kwargs.pop('k_core_to_sample', -1)
k_hops = kwargs.pop('k_hops', -1)
subgraphs = []
original_node_ids = self.graph.nodes
for s in range(n_subgraphs):
curr_subgraph = []
all_cc_start_nodes = []
for c in range(n_connected_components):
con_component = self.generate_subgraph(n_nodes_in_subgraph, **kwargs)
graph_root_node = random.sample(original_node_ids, 1)[0]
if c > 0 and k_hops != -1:
# make sure to sample the next node k hops away from the previously sampled root node
# and check to see that the selected start node is k hops away from all previous start nodes
n_hops_paths = nx.single_source_shortest_path_length(self.graph, cc_root_node, cutoff=k_hops)
candidate_nodes = [node for node,length in n_hops_paths.items()]
random.shuffle(candidate_nodes)
candidate_nodes = [cand for cand in candidate_nodes if self.is_k_hops_from_all_cc(cand, all_cc_start_nodes, k_hops)]
if len(candidate_nodes) == 0:
raise Exception('There are no nodes that are k hops away from all other CC start nodes.')
cc_root_node = random.sample(candidate_nodes, 1)[0]
all_cc_start_nodes.append(cc_root_node) # keep track of start nodes across CCs
elif k_core_to_sample != -1:
k_core_dict = nx.core_number(self.graph)
nodes_with_core_number = [node for node, core_num in k_core_dict.items()if core_num == k_core_to_sample]
cc_root_node = random.sample(nodes_with_core_number, 1)[0]
all_cc_start_nodes.append(cc_root_node) # keep track of start nodes across CCs
else: # if we're not trying to sample each CC k hops away OR if it's the first time we sample a CC,
# just randomly sample a start node from the graph
#randomly sample root node where the CC will be attached
cc_node_ids = list(range(len(self.graph.nodes), len(self.graph.nodes) + n_nodes_in_subgraph ))
cc_root_node = random.sample(cc_node_ids, 1)[0]
all_cc_start_nodes.append(cc_root_node) # keep track of start nodes across CCs
#combine the generated subgraph & the graph
joined_graph = nx.disjoint_union(self.graph, con_component)
# add an edge between one node in the graph & subgraph
joined_graph.add_edge(graph_root_node, cc_root_node)
self.graph = joined_graph.copy()
#add connected component to IDs for current subgraph
curr_subgraph.extend(cc_node_ids)
subgraphs.append(curr_subgraph)
return subgraphs
def _get_subgraphs_by_planting(self, n_subgraphs, n_nodes_in_subgraph, n_connected_components, remove_edges=False, **kwargs):
"""
Randomly sample nodes from base graph that will be in each subgraph. Merge the edges from the generated
subgraph with the edges from the base graph. Optionally, remove all other edges in the subgraphs
Args
- n_subgraphs (int): number of subgraphs
- n_nodes_in_subgraph (int): number of nodes in each subgraph
- n_connected_components (int): number of connected components in each subgraph
- remove_edges (bool): true if should remove unmerged edges in subgraphs, false otherwise
Return
- subgraphs (list of lists): list of subgraphs, where each subgraph is a list of nodes
"""
k_core_to_sample = kwargs.pop('k_core_to_sample', -1)
subgraphs = []
for s in range(n_subgraphs):
curr_subgraph = []
for c in range(n_connected_components):
con_component = self.generate_subgraph(n_nodes_in_subgraph, **kwargs)
#randomly sample which nodes from the base graph will be the subgraph
if k_core_to_sample != -1:
k_core_dict = nx.core_number(self.graph)
nodes_with_core_number = [node for node, core_num in k_core_dict.items()if core_num == k_core_to_sample]
cc_node_ids = random.sample(nodes_with_core_number, n_nodes_in_subgraph)
else:
cc_node_ids = random.sample(self.graph.nodes, n_nodes_in_subgraph)
#relabel subgraph to have the same ids as the randomly sampled nodes
cc_id_mapping = {curr_id:new_id for curr_id, new_id in zip(con_component.nodes, cc_node_ids)}
nx.relabel_nodes(con_component, cc_id_mapping, copy=False)
if remove_edges:
#remove the existing edges between nodes in the planted subgraph (except the ones to be added)
self.graph.remove_edges_from(self.graph.subgraph(cc_node_ids).edges)
# combine the base graph & subgraph. Nodes with the same ID are merged
joined_graph = nx.compose(self.graph, con_component) #NOTE: attributes from subgraph take precedent over attributes from self.graph
self.graph = joined_graph.copy()
curr_subgraph.extend(cc_node_ids)
subgraphs.append(curr_subgraph)
return subgraphs
def _get_property(self, subgraph, subgraph_property):
"""
Compute the value of a specified property.
Args
- subgraph (networkx object): subgraph
- subgraph_property (str): desired property of subgraph
Return
- Value of subgraph
"""
if subgraph_property == 'density':
return nx.density(subgraph)
elif subgraph_property == 'cut_ratio':
nodes_except_subgraph = set(self.graph.nodes).difference(set(subgraph.nodes))
n_boundary_edges = len(list(nx.edge_boundary(self.graph, subgraph.nodes, nodes_except_subgraph)))
n_nodes = len(list(self.graph.nodes))
n_sugraph_nodes = len(list(subgraph.nodes))
return n_boundary_edges / (n_sugraph_nodes * (n_nodes - n_sugraph_nodes))
elif subgraph_property == 'coreness':
all_cores = nx.core_number(subgraph)
avg_coreness = np.average(list(all_cores.values()))
return avg_coreness
elif subgraph_property == 'cc':
return nx.number_connected_components(self.graph.subgraph(subgraph))
else:
raise Exception('The subgraph property you specificed is not implemented.')
def _modify_graph_for_desired_subgraph_properties(self, subgraphs, **kwargs):
"""
Modify the graph to achieve the desired subgraph property.
Args
- subgraphs (list of lists): list of subgraphs, where each subgraph is a list of nodes
"""
desired_property = kwargs.get('desired_property', 'density')
# Iterate through subgraphs
for s in subgraphs:
subgraph = self.graph.subgraph(s)
# DENSITY
if desired_property == 'density':
# Randomly select a density value
desired_prop_value = random.sample(config.DENSITY_RANGE, 1)[0]
n_tries = 0
while True:
curr_subg_property = self._get_property(subgraph, desired_property)
if abs(curr_subg_property - desired_prop_value) < config.DENSITY_EPSILON: break
if n_tries >= config.MAX_TRIES: break
if curr_subg_property > desired_prop_value: #remove edges to decrease density
sampled_edge = random.sample(subgraph.edges, 1)[0]
self.graph.remove_edge(*sampled_edge)
else: # add edges to increase density
sampled_nodes = random.sample(subgraph.nodes, 2)
self.graph.add_edge(*sampled_nodes)
n_tries += 1
# CUT RATIO
elif desired_property == 'cut_ratio':
# Randomly select a cut ratio value
desired_prop_value = random.sample(config.CUT_RATIO_RANGE, 1)[0]
n_tries = 0
while True:
curr_subg_property = self._get_property(subgraph, desired_property)
if abs(curr_subg_property - desired_prop_value) < config.CUT_RATIO_EPSILON: break
if n_tries >= config.MAX_TRIES: break
# get edges on boundary
nodes_except_subgraph = set(self.graph.nodes).difference(set(subgraph.nodes))
subgraph_boundary_edges = list(nx.edge_boundary(self.graph, subgraph.nodes, nodes_except_subgraph))
if curr_subg_property > desired_prop_value: # high cut ratio -> too many edges
edge_to_remove = random.sample(subgraph_boundary_edges, 1)[0]
self.graph.remove_edge(*edge_to_remove)
else: # low cut ratio -> too few edges -> add edge
sampled_subgraph_node = random.sample(subgraph.nodes, 1)[0]
sampled_rest_graph_node = random.sample(nodes_except_subgraph,1)[0]
self.graph.add_edge(sampled_subgraph_node, sampled_rest_graph_node)
n_tries += 1
elif desired_property == 'coreness' or desired_property == 'cc':
continue
else:
raise Exception('Other properties have not yet been implemented')
def _relabel_nodes(self, subgraphs, **kwargs):
"""
Relabel nodes in the graph and subgraphs to ensure that all nodes are indexed consecutively
"""
largest_cc = max(nx.connected_components(self.graph), key=len)
removed_nodes = set(list(self.graph.nodes)).difference(set(largest_cc))
print("Original graph: %d, Largest cc: %d, Removed nodes: %d" % (len(self.graph.nodes), len(largest_cc), len(removed_nodes)))
self.graph = self.graph.subgraph(largest_cc)
mapping = {k: v for k, v in zip(list(self.graph.nodes), range(len(self.graph.nodes)))}
self.graph = nx.relabel_nodes(self.graph, mapping)
new_subgraphs = []
for s in subgraphs:
new_s = [mapping[n] for n in s if n not in removed_nodes]
new_subgraphs.append(new_s)
return new_subgraphs
def generate_subgraph_labels(self, **kwargs):
"""
Generate subgraph labels
Return
- labels (list): subgraph labels
"""
# Make sure base graph is connected
if nx.is_connected(self.graph) == False:
max_cc = max(nx.connected_components(self.graph), key=len)
self.graph = self.graph.subgraph(max_cc)
# Setup
densities = []
cut_ratios = []
coreness = []
cc = []
desired_property = kwargs.get('desired_property', 'density')
for subgraph_nodes in self.subgraphs:
subgraph = self.graph.subgraph(subgraph_nodes).copy()
if desired_property == 'density':
value = self._get_property(subgraph, desired_property)
densities.append(value)
elif desired_property == 'cut_ratio':
value = self._get_property(subgraph, desired_property)
cut_ratios.append(value)
elif desired_property == 'coreness':
value = self._get_property(subgraph, desired_property)
coreness.append(value)
elif desired_property == 'cc':
value = self._get_property(subgraph, desired_property)
cc.append(value)
if desired_property == 'density':
bins = self.generate_bins(sorted(densities), len(config.DENSITY_RANGE))
labels = np.digitize(densities, bins = bins)
labels = self.convert_number_to_chr(labels)
print(Counter(labels))
return labels
elif desired_property == 'cut_ratio':
bins = self.generate_bins(sorted(cut_ratios), len(config.CUT_RATIO_RANGE))
labels = np.digitize(cut_ratios, bins = bins)
labels = self.convert_number_to_chr(labels)
print(Counter(labels))
return labels
elif desired_property == 'coreness':
n_bins = kwargs.pop('n_bins', 5)
bins = self.generate_bins(sorted(coreness), n_bins)
labels = np.digitize(coreness, bins = bins)
labels = self.convert_number_to_chr(labels)
print(Counter(labels))
return labels
elif desired_property == 'cc':
print(Counter(cc))
bins = [1, 5] # 1 CC vs. >1 CC
labels = np.digitize(cc, bins = bins)
labels = self.convert_number_to_chr(labels)
print(Counter(labels))
assert len(list(Counter(labels).keys())) == len(bins)
return labels
else:
raise Exception('Other properties have not yet been implemented')
def generate_bins(self, values, n_bins):
"""
Generate bins for given subgraph values.
Args
- values (list): values for each subgraph
- n_bins (int): number of pins to split the subgraph values into
Return
- bins (list): cutoffs values for each bin
"""
bins = (len(values) / float(n_bins)) * np.arange(1, n_bins + 1)
bins = np.unique(np.array([values[int(b) - 1] for b in bins]))
bins = np.delete(bins, len(bins) - 1)
print("Bins: ", bins, "Min: ", min(values), "Max: ", max(values))
return bins
def convert_number_to_chr(self, labels):
"""
Convert label bins from int to str.
Args
- labels (list): subgraph labels
Return
- new_labels (list): converted subgraph labels as strings
"""
types = {}
alpha_int = 65 # A
# Create new keys
for t in set(labels):
types[t] = chr(alpha_int)
alpha_int += 1
# Convert labels
new_labels = []
for l in labels:
new_labels.append(types[l])
return new_labels
def generate_mask(n_subgraphs):
"""
Generate train/val/test masks for the subgraphs.
Args
- n_subgraphs (int): number of subgraphs
Return
- mask (list): 0 if subgraph is in train set, 1 if in val set, 2 if in test set
"""
idx = set(range(n_subgraphs))
train_mask = list(random.sample(idx, int(len(idx) * 0.8)))
idx = idx.difference(set(train_mask))
val_mask = list(random.sample(idx, len(idx) // 2))
idx = idx.difference(set(val_mask))
test_mask = list(random.sample(idx, len(idx)))
mask = []
for i in range(n_subgraphs):
if i in train_mask: mask.append(0)
elif i in val_mask: mask.append(1)
elif i in test_mask: mask.append(2)
return mask
def write_f(sub_f, sub_G, sub_G_label, mask):
"""
Write subgraph information into the appropriate format for SubGNN (tab-delimited file where each row
has dash-delimited nodes, subgraph label, and train/val/test label).
Args
- sub_f (str): file directory to save subgraph information
- sub_G (list of lists): list of subgraphs, where each subgraph is a list of nodes
- sub_G_label (list): subgraph labels
- mask (list): 0 if subgraph is in train set, 1 if in val set, 2 if in test set
"""
with open(sub_f, "w") as fout:
for g, l, m in zip(sub_G, sub_G_label, mask):
g = [str(val) for val in g]
if len(g) == 0: continue
if m == 0: fout.write("\t".join(["-".join(g), str(l), "train", "\n"]))
elif m == 1: fout.write("\t".join(["-".join(g), str(l), "val", "\n"]))
elif m == 2: fout.write("\t".join(["-".join(g), str(l), "test", "\n"]))
def main():
if config.GENERATE_SYNTHETIC_G:
synthetic_graph = SyntheticGraph(base_graph_type = config.BASE_GRAPH_TYPE,
subgraph_type = config.SUBGRAPH_TYPE,
n_subgraphs = config.N_SUBGRAPHS,
n_connected_components = config.N_CONNECTED_COMPONENTS,
n_subgraph_nodes = config.N_SUBGRAPH_NODES,
features_type = config.FEATURES_TYPE,
n = config.N,
p = config.P,
q = config.Q,
m = config.M,
n_bins = config.N_BINS,
subgraph_generator = config.SUBGRAPH_GENERATOR,
modify_graph_for_properties = config.MODIFY_GRAPH_FOR_PROPERTIES,
desired_property = config.DESIRED_PROPERTY)
nx.write_edgelist(synthetic_graph.graph, str(config.DATASET_DIR / "edge_list.txt"), data=False)
sub_G = synthetic_graph.subgraphs
sub_G_label = synthetic_graph.subgraph_labels
mask = generate_mask(len(sub_G_label))
write_f(str(config.DATASET_DIR / "subgraphs.pth"), sub_G, sub_G_label, mask)
if config.GENERATE_NODE_EMB: train_node_emb.generate_emb()
if __name__ == "__main__":
main()
| 36,300
| 42.63101
| 152
|
py
|
SubGNN
|
SubGNN-main/prepare_dataset/precompute_graph_metrics.py
|
# General
import networkx as nx
import sys
import argparse
import snap
from pathlib import Path
import numpy as np
import json
import os
import multiprocessing
# Our methods
import config_prepare_dataset as config
'''
Use this script to precompute information about the underlying base graph.
'''
def get_shortest_path(node_id):
NIdToDistH = snap.TIntH()
path_len = snap.GetShortPath(snap_graph, int(node_id), NIdToDistH)
paths = np.zeros((max(node_ids) + 1)) #previously was n_nodes
for dest_node in NIdToDistH:
paths[dest_node] = NIdToDistH[dest_node]
return paths
def calculate_stats():
# create similarities folder
if not os.path.exists(config.DATASET_DIR / 'similarities'):
os.makedirs(config.DATASET_DIR / 'similarities')
if config.CALCULATE_EGO_GRAPHS:
print(f'Calculating ego graphs for {config.DATASET_DIR }...')
if not (config.DATASET_DIR / 'ego_graphs.txt').exists() or config.OVERRIDE:
ego_graph_dict = {}
for node in snap_graph.Nodes():
node_id = int(node.GetId())
nodes_vec = snap.TIntV()
snap.GetNodesAtHop(snap_graph, node_id, 1, nodes_vec, False)
ego_graph_dict[node_id] = list(nodes_vec)
with open(str(config.DATASET_DIR / 'ego_graphs.txt'), 'w') as f:
json.dump(ego_graph_dict, f)
if config.CALCULATE_DEGREE_SEQUENCE:
print(f'Calculating degree sequences for {config.DATASET_DIR}...')
if not (config.DATASET_DIR / 'degree_sequence.txt').exists() or config.OVERRIDE:
n_nodes = len(list(snap_graph.Nodes()))
degrees = {}
InDegV = snap.TIntPrV()
snap.GetNodeInDegV(snap_graph, InDegV)
OutDegV = snap.TIntPrV()
snap.GetNodeOutDegV(snap_graph, OutDegV)
for item1, item2 in zip(InDegV,OutDegV) :
degrees[item1.GetVal1()] = item1.GetVal2()
with open(str(config.DATASET_DIR / 'degree_sequence.txt'), 'w') as f:
json.dump(degrees, f)
if config.CALCULATE_SHORTEST_PATHS:
print(f'Calculating shortest paths for {config.DATASET_DIR}...')
if not (config.DATASET_DIR /'shortest_path_matrix.npy').exists() or config.OVERRIDE:
with multiprocessing.Pool(processes=config.N_PROCESSSES) as pool:
shortest_paths = pool.map(get_shortest_path, node_ids)
all_shortest_paths = np.stack(shortest_paths)
np.save(str(config.DATASET_DIR / 'shortest_path_matrix.npy'), all_shortest_paths)
# get SNAP graph for the specified dataset
snap_graph = snap.LoadEdgeList(snap.PUNGraph, str(config.DATASET_DIR / 'edge_list.txt'), 0, 1)
node_ids = np.sort([node.GetId() for node in snap_graph.Nodes()])
# calculate graph metrics
calculate_stats()
| 2,858
| 35.189873
| 94
|
py
|
SubGNN
|
SubGNN-main/prepare_dataset/preprocess.py
|
# General
import numpy as np
import random
import pickle
from collections import Counter
# Pytorch
import torch
from torch_geometric.data import Data
from torch_geometric.utils import from_networkx, negative_sampling
from torch_geometric.utils.convert import to_networkx
# NetworkX
import networkx as nx
from networkx.relabel import convert_node_labels_to_integers, relabel_nodes
from networkx.generators.random_graphs import barabasi_albert_graph
# Sklearn
from sklearn.feature_extraction.text import CountVectorizer
import sys
sys.path.insert(0, '../') # add config to path
import config_prepare_dataset as config
import utils
def read_graphs(edge_f):
"""
Read in base graph and create a Data object for Pytorch geometric
Args
- edge_f (str): directory of edge list
Return
- all_data (Data object): Data object of base graph
"""
nx_G = nx.read_edgelist(edge_f, nodetype = int)
feat_mat = np.eye(len(nx_G.nodes), dtype=int)
print("Graph density", nx.density(nx_G))
all_data = create_dataset(nx_G, feat_mat)
print(all_data)
assert nx.is_connected(nx_G)
assert len(nx_G) == all_data.x.shape[0]
return all_data
def create_dataset(G, feat_mat, split=False):
"""
Create Data object of the base graph for Pytorch geometric
Args
- G (object): NetworkX graph
- feat_mat (tensor): feature matrix for each node
Return
- new_G (Data object): new Data object of base graph for Pytorch geometric
"""
edge_index = torch.tensor(list(G.edges)).t().contiguous()
x = torch.tensor(feat_mat, dtype=torch.float) # Feature matrix
y = torch.ones(edge_index.shape[1])
num_classes = len(torch.unique(y))
split_idx = np.arange(len(y))
np.random.shuffle(split_idx)
train_idx = split_idx[ : 8 * len(split_idx) // 10]
val_idx = split_idx[ 8 * len(split_idx) // 10 : 9 * len(split_idx) // 10]
test_idx = split_idx[9 * len(split_idx) // 10 : ]
# Train set
train_mask = torch.zeros(len(y), dtype=torch.bool)
train_mask[train_idx] = 1
# Val set
val_mask = torch.zeros(len(y), dtype=torch.bool)
val_mask[val_idx] = 1
# Test set
test_mask = torch.zeros(len(y), dtype=torch.bool)
test_mask[test_idx] = 1
new_G = Data(x = x, y = y, num_classes = num_classes, edge_index = edge_index, train_mask = train_mask, val_mask = val_mask, test_mask = test_mask)
return new_G
def set_data(data, all_data, minibatch):
"""
Create per-minibatch Data object
Args
- data (Data object): batched dataset
- all_data (Data object): full dataset
- minibatch (str): NeighborSampler
Return
- data (Data object): base graph as Pytorch Geometric Data object
"""
batch_size, n_id, adjs = data
data = Data(edge_index = adjs[0], n_id = n_id, e_id = adjs[1])
data.x = all_data.x[data.n_id]
data.train_mask = all_data.train_mask[data.e_id]
data.val_mask = all_data.val_mask[data.e_id]
data.y = torch.ones(len(data.e_id))
return data
| 3,076
| 27.490741
| 152
|
py
|
B-SOID
|
B-SOID-master/bsoid_app.py
|
from streamlit import caching
from bsoid_app import data_preprocess, extract_features, clustering, machine_learner, \
export_training, video_creator, predict
from bsoid_app.bsoid_utilities import visuals
from bsoid_app.bsoid_utilities.load_css import local_css
from bsoid_app.bsoid_utilities.load_workspace import *
def streamlit_run(pyfile):
os.system("streamlit run {}.py".format(pyfile))
st.set_page_config(page_title='B-SOiD v2.0', page_icon="🐁",
layout='wide', initial_sidebar_state='auto')
local_css("bsoid_app/bsoid_utilities/style.css")
title = "<div> <span class='bold'><span class='h1'>B-SOID</span></span> " \
" <span class='h2'>--version 2.0 🐁</span> </div>"
st.markdown(title, unsafe_allow_html=True)
st.markdown('Step 1: Select Load Data and Preprocess. Complete the step.')
st.markdown('Step 2: Deselect Load Data and Preprocess, and select Load Previous Iteration. Fill in prompts.')
st.markdown('Step 3: Starting with Extract Features and Identify Clusters, select single procedure and progress.')
st.text('')
if st.sidebar.checkbox('Load previous iteration', False, key='l'):
working_dir, prefix = query_workspace()
if st.sidebar.checkbox('Load data and preprocess', False, key='d'):
try:
[_, _, _, _, _, raw_input_data, processed_input_data, sub_threshold] = load_data(working_dir, prefix)
st.markdown('**_CHECK POINT_**: Processed a total of **{}** data files, '
'and compiled into a **{}** data list. Move on to '
'__Extract and embed features__.'.format(len(raw_input_data), processed_input_data.shape))
if st.checkbox('Redo?', False):
caching.clear_cache()
processor = data_preprocess.preprocess()
processor.compile_data()
if st.checkbox('Show % time (possibly) occluded?', True):
visuals.plot_bar(sub_threshold)
if st.checkbox("Show raw vs processed data?", False):
visuals.show_data_table(raw_input_data, processed_input_data)
except NameError:
processor = data_preprocess.preprocess()
processor.compile_data()
if st.sidebar.checkbox('Extract and embed features', False, key='f'):
[_, _, framerate, _, _, _, processed_input_data, _] = load_data(working_dir, prefix)
extractor = extract_features.extract(working_dir, prefix, processed_input_data, framerate)
extractor.main()
if st.sidebar.checkbox('Identify and tweak number of clusters', False, key='c'):
[_, sampled_embeddings] = load_embeddings(working_dir, prefix)
clusterer = clustering.cluster(working_dir, prefix, sampled_embeddings)
clusterer.main()
if st.sidebar.checkbox('(Optional) What did B-SOiD learn?', False, key='e'):
[sampled_features, _] = load_embeddings(working_dir, prefix)
[_, assignments, assign_prob, soft_assignments] = load_clusters(working_dir, prefix)
exporter = export_training.export(working_dir, prefix, sampled_features,
assignments, assign_prob, soft_assignments)
exporter.save_csv()
if st.sidebar.checkbox('Create a model', False, key='t'):
[features, _] = load_feats(working_dir, prefix)
[sampled_features, _] = load_embeddings(working_dir, prefix)
[_, assignments, _, _] = load_clusters(working_dir, prefix)
learning_protocol = machine_learner.protocol(working_dir, prefix, features, sampled_features, assignments)
learning_protocol.main()
if st.sidebar.checkbox('Generate video snippets for interpretation', False, key='g'):
[root_path, data_directories, framerate, pose_chosen, input_filenames, _, processed_input_data, _] \
= load_data(working_dir, prefix)
[_, _, _, clf, _, _] = load_classifier(working_dir, prefix)
creator = video_creator.creator(root_path, data_directories, processed_input_data, pose_chosen,
working_dir, prefix, framerate, clf, input_filenames)
creator.main()
if st.sidebar.checkbox('Predict old/new files using a model', False, key='p'):
[root_path, data_directories, framerate, pose_chosen, input_filenames, _, processed_input_data, _] \
= load_data(working_dir, prefix)
[_, _, _, clf, _, predictions] = load_classifier(working_dir, prefix)
predictor = predict.prediction(root_path, data_directories, input_filenames, processed_input_data, working_dir,
prefix, framerate, pose_chosen, predictions, clf)
predictor.main()
if st.sidebar.checkbox('Load up analysis app (please close current browser when new browser pops up)', False):
streamlit_run('./bsoid_app/bsoid_analysis')
| 4,644
| 57.797468
| 115
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/fig3.py
|
import sys
import subprocess
print('\n \n \n B-SOID QUANTIFICATION \n \n \n')
path = '/Volumes/Elements/Manuscripts/B-SOiD/bsoid_natcomm/workspace/l5neural5ms_.mat'
fig_format = 'png'
outpath = '/Volumes/Elements/Manuscripts/B-SOiD/bsoid_natcomm/figure_panels/neural_data/'
print('\n DATA FROM {} \n'.format(path))
print('-' * 50)
# FIG3A
print('\n' * 1)
print('Preparing neural matrix as xxx.{} found in fig3a...'.format(fig_format))
print('\n' * 1)
algorithm = 'non-frameshifted'
c = 'Orange'
c_range = str([0, 3])
cline = 'black'
n = '4'
colorbar = '0'
p = subprocess.Popen([sys.executable, './subroutines/neural_plot.py',
'-p', path, '-a', algorithm,
'-c', c, '-r', c_range, '-n', n, '-l', cline, '-m', fig_format, '-o', outpath, '-b', colorbar])
p.communicate()
p.kill()
algorithm = 'frameshifted'
p = subprocess.Popen([sys.executable, './subroutines/neural_plot.py',
'-p', path, '-a', algorithm,
'-c', c, '-r', c_range, '-n', n, '-l', cline, '-m', fig_format, '-o', outpath, '-b', colorbar])
p.communicate()
p.kill()
| 1,115
| 31.823529
| 117
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/fig2.py
|
import sys
import subprocess
import os
print('\n \n \n MODEL PERFORMANCE \n \n \n')
path = '/Volumes/Elements/B-SOID/output3/'
fig_format = 'png'
outpath = '/Volumes/Elements/Manuscripts/B-SOiD/bsoid_natcomm/figure_panels/model_performance/'
print('\n DATA FROM {} \n'.format(path))
print('-' * 50)
# FIG2A
vidpath = '/Volumes/Elements/B-SOID/datasets/'
mp4name = '080219/mp4s/'
projectname = '2019-08-02_10-56-50cut30min_1hrDeepCut_resnet50_OpenFieldHighResApr8shuffle1_1030000/'
for g in range(11):
for e in range(5):
mp4file = 'group_{}_example_{}.mp4'.format(g, e)
if os.path.exists(str.join('', (vidpath, mp4name, projectname, mp4file))):
var = str.join('', (mp4file.partition('.')[0], '_images'))
p = subprocess.Popen([sys.executable, './subroutines/extract_images.py',
'-p', vidpath, '-n', mp4name, '-f', projectname,
'-g', mp4file, '-v', var])
p.communicate()
p.kill()
# FIG2B
print('\n' * 1)
print('Preparing confusion matrix heatmap as xxx.{} found in fig2b...'.format(fig_format))
print('\n' * 1)
# FIG2C
print('\n' * 1)
print('Preparing accuracy boxplot as xxx.{} found in fig2c...'.format(fig_format))
print('\n' * 1)
name = 'openfield_60min_N6'
k = '10'
order = str([4, 5, 7, 0, 3, 2, 1, 6, 8, 9, 10])
var = 'accuracy_kf'
algorithm = 'Randomforests'
c = str(['indianred', 'indianred',
'goldenrod', 'goldenrod',
'royalblue', 'royalblue', 'royalblue', 'royalblue',
'mediumseagreen', 'mediumseagreen', 'mediumseagreen'])
# p = subprocess.Popen([sys.executable, './subroutines/kfold_accuracy.py',
# '-p', path, '-f', name,
# '-o', order, '-k', k, '-v', var])
# p.communicate()
# p.kill()
p = subprocess.Popen([sys.executable, './subroutines/accuracy_boxplot.py',
'-p', path, '-f', name, '-v', var,
'-a', algorithm, '-c', c, '-m', fig_format, '-o', outpath])
p.communicate()
p.kill()
# FIG2D/G
print('\n' * 1)
print('Preparing limb trajectories as xxx.{} found in fig2d/g right...'.format(fig_format))
print('\n' * 1)
animal_index = '0'
bodyparts = str([1, 2, 3, 4])
time_range = str([(20*60+42) * 60 - 1, (20*60+44) * 60]) # headgroom 2 scratch
order1 = str([0, 2])
order2 = str([1, 3])
c = str(['coral', 'cyan'])
p = subprocess.Popen([sys.executable, './subroutines/trajectory_plot.py',
'-p', path, '-f', name, '-i', animal_index, '-b', bodyparts, '-t', time_range,
'-r', order1, '-R', order2, '-c', c, '-m', fig_format, '-o', outpath])
p.communicate()
p.kill()
print('\n' * 1)
print('Preparing limb trajectories as xxx.{} found in fig2d/g left...'.format(fig_format))
print('\n' * 1)
time_range = str([int((11*60+19.5)*60-1), int((11*60+21.5)*60)])
p = subprocess.Popen([sys.executable, './subroutines/trajectory_plot.py',
'-p', path, '-f', name, '-i', animal_index, '-b', bodyparts, '-t', time_range,
'-r', order1, '-R', order2, '-c', c, '-m', fig_format, '-o', outpath])
p.communicate()
p.kill()
# FIG2F
print('\n' * 1)
print('Preparing coherence boxplot as xxx.{} found in fig2f...'.format(fig_format))
print('\n' * 1)
name = 'openfield_200fps'
fps = '200'
target_fps = '600'
frame_skips = str([60, 30, 12, 6, 4])
animal_index = '0'
# order = str([4, 5, 7, 0, 3, 1, 6, 8, 9, 10])
order = str([4, 5, 7, 0, 1, 6, 8, 9, 10])
time = '300000'
var = 'coherence_data'
p = subprocess.Popen([sys.executable, './subroutines/frameshift_coherence.py',
'-p', path, '-n', name, '-f', fps, '-F', target_fps, '-s', frame_skips,
'-i', animal_index, '-o', order, '-t', time, '-v', var])
p.communicate()
p.kill()
algorithm = 'Randomforests'
c = 'k'
p = subprocess.Popen([sys.executable, './subroutines/coherence_boxplot.py',
'-p', path, '-f', name, '-v', var,
'-a', algorithm, '-c', c, '-m', fig_format, '-o', outpath])
p.communicate()
p.kill()
print("All xxx.{}s generated. see {} for results".format(fig_format, outpath))
print('-' * 50)
| 4,182
| 33.570248
| 101
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/figS6.py
|
import sys
import subprocess
print('\n \n \n A2A CASPASE KINEMATICS ANALYSIS \n \n \n')
path = '/Volumes/Elements/Manuscripts/B-SOiD/bsoid_natcomm/workspace/a2a_loc_Rhkin.mat'
fig_format = 'png'
outpath = '/Volumes/Elements/Manuscripts/B-SOiD/bsoid_natcomm/figure_panels/kinematics_cdf/'
print('\n DATA FROM {} \n'.format(path))
print('-' * 50)
# FIGS6
print('\n' * 1)
print('Preparing head groom kinematics cdf curves as xxx.{} found in fig6b...'.format(fig_format))
print('\n' * 1)
variables = str(['c_loc_len_N4_1', 'a_loc_len_N4_1'])
c = str(['deepskyblue', 'red'])
x_range = str([1, 7])
leg = '0'
p = subprocess.Popen([sys.executable, './subroutines/kinematics_cdf.py',
'-p', path, '-v', variables,
'-c', c, '-r', x_range, '-l', leg, '-m', fig_format, '-o', outpath])
p.communicate()
p.kill()
variables = str(['c_loc_speed_N4_1', 'a_loc_speed_N4_1'])
x_range = str([10, 50])
leg = '0'
p = subprocess.Popen([sys.executable, './subroutines/kinematics_cdf.py',
'-p', path, '-v', variables,
'-c', c, '-r', x_range, '-l', leg, '-m', fig_format, '-o', outpath])
p.communicate()
p.kill()
variables = str(['c_loc_dur_N4', 'a_loc_dur_N4'])
x_range = str([0, 4])
p = subprocess.Popen([sys.executable, './subroutines/kinematics_cdf.py',
'-p', path, '-v', variables,
'-c', c, '-r', x_range, '-l', leg, '-m', fig_format, '-o', outpath])
p.communicate()
p.kill()
# FIGS6
# print('\n' * 1)
# print('Preparing face groom kinematics cdf curves as xxx.{} found in fig6d...'.format(fig_format))
# print('\n' * 1)
# variables = str(['c_it_len_N4_1', 'a_it_len_N4_1'])
# c = str(['deepskyblue', 'red'])
# x_range = str([1, 5])
# leg = '0'
#
# p = subprocess.Popen([sys.executable, './subroutines/kinematics_cdf.py',
# '-p', path, '-v', variables,
# '-c', c, '-r', x_range, '-l', leg, '-m', fig_format, '-o', outpath])
# p.communicate()
# p.kill()
#
#
# variables = str(['c_it_speed_N4_1', 'a_it_speed_N4_1'])
# x_range = str([7, 35])
# leg = '0'
#
# p = subprocess.Popen([sys.executable, './subroutines/kinematics_cdf.py',
# '-p', path, '-v', variables,
# '-c', c, '-r', x_range, '-l', leg, '-m', fig_format, '-o', outpath])
# p.communicate()
# p.kill()
#
#
# variables = str(['c_it_dur_N4', 'a_it_dur_N4'])
# x_range = str([0, 2])
#
# p = subprocess.Popen([sys.executable, './subroutines/kinematics_cdf.py',
# '-p', path, '-v', variables,
# '-c', c, '-r', x_range, '-l', leg, '-m', fig_format, '-o', outpath])
# p.communicate()
# p.kill()
| 2,703
| 30.08046
| 100
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/fig5.py
|
import sys
import subprocess
print('\n \n \n B-SOID QUANTIFICATION \n \n \n')
path = '/Volumes/Elements/Manuscripts/B-SOiD/bsoid_natcomm/workspace/MvsBvsS_zscore_mse3.mat'
fig_format = 'png'
outpath = '/Volumes/Elements/Manuscripts/B-SOiD/bsoid_natcomm/figure_panels/motion_energy/'
print('\n DATA FROM {} \n'.format(path))
print('-' * 50)
# FIG5B
print('\n' * 1)
print('Preparing image MSE comparison matrix as xxx.{} found in fig5b...'.format(fig_format))
print('\n' * 1)
algorithm = 'MotionMapper'
c = 'Orange'
c_range = str([0, 3])
cline = 'deepskyblue'
n = '4'
colorbar = '0'
p = subprocess.Popen([sys.executable, './subroutines/immse_heatmap.py',
'-p', path, '-a', algorithm,
'-c', c, '-r', c_range, '-n', n, '-l', cline, '-m', fig_format, '-o', outpath, '-b', colorbar])
p.communicate()
p.kill()
algorithm = 'B-SOiD'
cline = 'hotpink'
p = subprocess.Popen([sys.executable, './subroutines/immse_heatmap.py',
'-p', path, '-a', algorithm,
'-c', c, '-r', c_range, '-n', n, '-l', cline, '-m', fig_format, '-o', outpath, '-b', colorbar])
p.communicate()
p.kill()
# FIG5C
print('\n' * 1)
print('Preparing image MSE cdf curves as xxx.{} found in fig5c...'.format(fig_format))
print('\n' * 1)
# c = str(['deepskyblue', 'hotpink', 'gray'])
c = str(['deepskyblue', 'hotpink', 'black'])
x_range = str([0, 3])
fig_format = 'png'
outpath = '/Volumes/Elements/Manuscripts/B-SOiD/bsoid_natcomm/figure_panels/motion_energy/'
p = subprocess.Popen([sys.executable, './subroutines/immse_cdf.py',
'-p', path,
'-c', c, '-r', x_range, '-m', fig_format, '-o', outpath])
p.communicate()
p.kill()
print("All xxx.{}s generated. see {} for results".format(fig_format, outpath))
print('-' * 50)
| 1,812
| 32.574074
| 117
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/github_hist.py
|
import sys
import subprocess
# print('\n \n \n GENERATE POSE RELATIONSHIPS HISTOGRAMS \n \n \n')
# path = '/Volumes/Elements/B-SOID/output3/'
# fig_format = 'png'
# outpath = '/Volumes/Elements/Manuscripts/B-SOiD/bsoid_natcomm/figure_panels/pose_relationships/'
# print('\n DATA FROM {} \n'.format(path))
# print('-' * 50)
#
#
# # HISTOGRAMS
# print('\n' * 1)
# print('Preparing pose relationships histograms as xxx.{} found on GitHub...'.format(fig_format))
# print('\n' * 1)
# name = 'openfield_60min_N6'
# order = str([-1, 4, 5, 7, 0, 3, 2, 1, 6, 8, 9, 10])
#
# p = subprocess.Popen([sys.executable, './subroutines/pose_relationships_hist.py',
# '-p', path, '-f', name,
# '-r', order, '-m', fig_format, '-o', outpath])
# p.communicate()
# p.kill()
print('\n \n \n GENERATE POSE RELATIONSHIPS HISTOGRAMS \n \n \n')
path = '/Volumes/Elements/Manuscripts/TopDown/output/'
fig_format = 'png'
outpath = '/Volumes/Elements/Manuscripts/B-SOiD/bsoid_natcomm/figure_panels/pose_relationships_topdown/'
print('\n DATA FROM {} \n'.format(path))
print('-' * 50)
# HISTOGRAMS
print('\n' * 1)
print('Preparing TOPDOWN pose relationships histograms as xxx.{} found on GitHub...'.format(fig_format))
print('\n' * 1)
name = 'openfield_60min_N1'
order = str([-1, 4, 0, 2, 1, 3, 5, 6, 7])
p = subprocess.Popen([sys.executable, './subroutines/pose_relationships_hist2.py',
'-p', path, '-f', name,
'-r', order, '-m', fig_format, '-o', outpath])
p.communicate()
p.kill()
| 1,550
| 30.02
| 104
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/fig6_v2.py
|
import sys
import subprocess
print('\n \n \n KINEMATICS ANALYSIS \n \n \n')
path = '/Volumes/Elements/B-SOID/output3/'
fig_format = 'png'
outpath = '/Volumes/Elements/Manuscripts/B-SOiD/bsoid_natcomm/figure_panels/kinematics_py/'
print('\n DATA FROM {} \n'.format(path))
print('-' * 50)
print('\n' * 1)
print('Preparing kinematics cdf as xxx.{} found on GitHub...'.format(fig_format))
print('\n' * 1)
name = 'openfield_60min_N6'
group_num = '10'
body_parts = str([0, 1, 2, 3, 4, 5])
exp = str([[0, 2, 4, 6, 8, 10], [1, 3, 5, 7, 9, 11]])
var = 'locRf_kin_C57_N6'
p = subprocess.Popen([sys.executable, './subroutines/extract_kinematics.py',
'-p', path, '-n', name,
'-g', group_num, '-b', body_parts, '-e', exp, '-v', var])
p.communicate()
p.kill()
#
# vname = 'Distance'
# bp = '1'
# c = str(['deepskyblue', 'red'])
# x_range = str([0.5, 6.5])
# leg = '0'
#
# p = subprocess.Popen([sys.executable, './subroutines/kinematics_cdf_v2.py',
# '-p', path, '-n', name, '-v', var, '-V', vname, '-b', bp,
# '-c', c, '-r', x_range, '-l', leg, '-m', fig_format, '-o', outpath])
# p.communicate()
# p.kill()
#
# vname = 'Speed'
# bp = '1'
# x_range = str([5, 45])
# leg = '0'
#
# p = subprocess.Popen([sys.executable, './subroutines/kinematics_cdf_v2.py',
# '-p', path, '-n', name, '-v', var, '-V', vname, '-b', bp,
# '-c', c, '-r', x_range, '-l', leg, '-m', fig_format, '-o', outpath])
# p.communicate()
# p.kill()
#
# vname = 'Duration'
# bp = '1'
# x_range = str([0, 2])
# leg = '0'
#
# p = subprocess.Popen([sys.executable, './subroutines/kinematics_cdf_v2.py',
# '-p', path, '-n', name, '-v', var, '-V', vname, '-b', bp,
# '-c', c, '-r', x_range, '-l', leg, '-m', fig_format, '-o', outpath])
# p.communicate()
# p.kill()
#
#
# print('\n \n \n A2A CASPASE KINEMATICS ANALYSIS \n \n \n')
# path = '/Volumes/Elements/B-SOID/output3/'
# fig_format = 'png'
# outpath = '/Volumes/Elements/Manuscripts/B-SOiD/bsoid_natcomm/figure_panels/kinematics_py/'
# print('\n DATA FROM {} \n'.format(path))
# print('-' * 50)
#
#
# print('\n' * 1)
# print('Preparing kinematics cdf as xxx.{} found on GitHub...'.format(fig_format))
# print('\n' * 1)
# name = 'a2a_60min_N4'
# group_num = '10'
# body_parts = str([0, 1])
# exp = str([[0, 1, 2, 3, 4, 5, 6, 7], [8, 9, 10, 11, 12, 13, 14, 15]])
# var = 'locRf_kin_A2ACaspase_N4'
#
# p = subprocess.Popen([sys.executable, './subroutines/extract_kinematics.py',
# '-p', path, '-n', name,
# '-g', group_num, '-b', body_parts, '-e', exp, '-v', var])
# p.communicate()
# p.kill()
#
#
# vname = 'Distance'
# bp = '1'
# c = str(['deepskyblue', 'red'])
# x_range = str([0.5, 6.5])
# leg = '1'
#
# p = subprocess.Popen([sys.executable, './subroutines/kinematics_cdf_v2.py',
# '-p', path, '-n', name, '-v', var, '-V', vname, '-b', bp,
# '-c', c, '-r', x_range, '-l', leg, '-m', fig_format, '-o', outpath])
# p.communicate()
# p.kill()
#
#
# vname = 'Speed'
# bp = '1'
# x_range = str([5, 45])
# leg = '0'
#
# p = subprocess.Popen([sys.executable, './subroutines/kinematics_cdf_v2.py',
# '-p', path, '-n', name, '-v', var, '-V', vname, '-b', bp,
# '-c', c, '-r', x_range, '-l', leg, '-m', fig_format, '-o', outpath])
# p.communicate()
# p.kill()
#
#
# vname = 'Duration'
# bp = '1'
# x_range = str([0, 2])
# leg = '0'
#
# p = subprocess.Popen([sys.executable, './subroutines/kinematics_cdf_v2.py',
# '-p', path, '-n', name, '-v', var, '-V', vname, '-b', bp,
# '-c', c, '-r', x_range, '-l', leg, '-m', fig_format, '-o', outpath])
# p.communicate()
# p.kill()
#
#
#
# group_num = '3'
# body_parts = str([0, 1])
# exp = str([[0, 1, 2, 3, 4, 5, 6, 7], [8, 9, 10, 11, 12, 13, 14, 15]])
# var = 'facegrRf_kin_A2ACaspase_N4'
#
# p = subprocess.Popen([sys.executable, './subroutines/extract_kinematics.py',
# '-p', path, '-n', name,
# '-g', group_num, '-b', body_parts, '-e', exp, '-v', var])
# p.communicate()
# p.kill()
#
#
# vname = 'Distance'
# bp = '1'
# x_range = str([0, 4])
# leg = '1'
#
# p = subprocess.Popen([sys.executable, './subroutines/kinematics_cdf_v2.py',
# '-p', path, '-n', name, '-v', var, '-V', vname, '-b', bp,
# '-c', c, '-r', x_range, '-l', leg, '-m', fig_format, '-o', outpath])
# p.communicate()
# p.kill()
#
#
# vname = 'Speed'
# bp = '1'
# x_range = str([5, 15])
# leg = '0'
#
# p = subprocess.Popen([sys.executable, './subroutines/kinematics_cdf_v2.py',
# '-p', path, '-n', name, '-v', var, '-V', vname, '-b', bp,
# '-c', c, '-r', x_range, '-l', leg, '-m', fig_format, '-o', outpath])
# p.communicate()
# p.kill()
#
#
# vname = 'Duration'
# bp = '1'
# x_range = str([0, 4])
# leg = '0'
#
# p = subprocess.Popen([sys.executable, './subroutines/kinematics_cdf_v2.py',
# '-p', path, '-n', name, '-v', var, '-V', vname, '-b', bp,
# '-c', c, '-r', x_range, '-l', leg, '-m', fig_format, '-o', outpath])
# p.communicate()
# p.kill()
#
#
#
#
# group_num = '2'
# body_parts = str([0, 1])
# exp = str([[0, 1, 2, 3, 4, 5, 6, 7], [8, 9, 10, 11, 12, 13, 14, 15]])
# var = 'headgrRf_kin_A2ACaspase_N4'
#
# p = subprocess.Popen([sys.executable, './subroutines/extract_kinematics.py',
# '-p', path, '-n', name,
# '-g', group_num, '-b', body_parts, '-e', exp, '-v', var])
# p.communicate()
# p.kill()
#
#
# vname = 'Distance'
# bp = '1'
# x_range = str([0, 4])
# leg = '1'
#
# p = subprocess.Popen([sys.executable, './subroutines/kinematics_cdf_v2.py',
# '-p', path, '-n', name, '-v', var, '-V', vname, '-b', bp,
# '-c', c, '-r', x_range, '-l', leg, '-m', fig_format, '-o', outpath])
# p.communicate()
# p.kill()
#
#
# vname = 'Speed'
# bp = '1'
# x_range = str([5, 15])
# leg = '0'
#
# p = subprocess.Popen([sys.executable, './subroutines/kinematics_cdf_v2.py',
# '-p', path, '-n', name, '-v', var, '-V', vname, '-b', bp,
# '-c', c, '-r', x_range, '-l', leg, '-m', fig_format, '-o', outpath])
# p.communicate()
# p.kill()
#
#
# vname = 'Duration'
# bp = '1'
# x_range = str([0, 4])
# leg = '0'
#
# p = subprocess.Popen([sys.executable, './subroutines/kinematics_cdf_v2.py',
# '-p', path, '-n', name, '-v', var, '-V', vname, '-b', bp,
# '-c', c, '-r', x_range, '-l', leg, '-m', fig_format, '-o', outpath])
# p.communicate()
# p.kill()
#
#
# group_num = '6'
# body_parts = str([0, 1, 2, 3, 4, 5])
# exp = str([[0, 1, 2, 3, 4, 5, 6, 7], [8, 9, 10, 11, 12, 13, 14, 15]])
# var = 'itchRf_kin_A2ACaspase_N4'
#
# p = subprocess.Popen([sys.executable, './subroutines/extract_kinematics.py',
# '-p', path, '-n', name,
# '-g', group_num, '-b', body_parts, '-e', exp, '-v', var])
# p.communicate()
# p.kill()
#
#
# vname = 'Distance'
# bp = '3'
# x_range = str([0.5, 4.5])
# leg = '1'
#
# p = subprocess.Popen([sys.executable, './subroutines/kinematics_cdf_v2.py',
# '-p', path, '-n', name, '-v', var, '-V', vname, '-b', bp,
# '-c', c, '-r', x_range, '-l', leg, '-m', fig_format, '-o', outpath])
# p.communicate()
# p.kill()
#
#
# vname = 'Speed'
# bp = '3'
# x_range = str([5, 37])
# leg = '0'
#
# p = subprocess.Popen([sys.executable, './subroutines/kinematics_cdf_v2.py',
# '-p', path, '-n', name, '-v', var, '-V', vname, '-b', bp,
# '-c', c, '-r', x_range, '-l', leg, '-m', fig_format, '-o', outpath])
# p.communicate()
# p.kill()
#
#
# vname = 'Duration'
# bp = '3'
# x_range = str([0, 2])
# leg = '0'
#
# p = subprocess.Popen([sys.executable, './subroutines/kinematics_cdf_v2.py',
# '-p', path, '-n', name, '-v', var, '-V', vname, '-b', bp,
# '-c', c, '-r', x_range, '-l', leg, '-m', fig_format, '-o', outpath])
# p.communicate()
# p.kill()
| 8,235
| 29.6171
| 93
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/figS4.py
|
import sys
import subprocess
print('\n \n \n FRAMSHIFT NEURAL DIFFERNCES \n \n \n')
path = '/Volumes/Elements/Manuscripts/B-SOiD/bsoid_natcomm/workspace/neuralbehavior_durs.mat'
fig_format = 'png'
outpath = '/Volumes/Elements/Manuscripts/B-SOiD/bsoid_natcomm/figure_panels/neural_data/'
print('\n DATA FROM {} \n'.format(path))
print('-' * 50)
# FIG6B
print('\n' * 1)
print('Preparing head groom kinematics cdf curves as xxx.{} found in fig6b...'.format(fig_format))
print('\n' * 1)
variables = str(['L5neuralbehavioral'])
c = str(['black', 'magenta'])
x_range = str([0, 1.5])
order = str([4, 5, 7, 0, 3, 2, 1, 6, 8, 9, 10])
p = subprocess.Popen([sys.executable, './subroutines/fsdiff_hist.py',
'-p', path, '-v', variables,
'-c', c, '-r', x_range, '-O', order, '-m', fig_format, '-o', outpath])
p.communicate()
p.kill()
| 868
| 32.423077
| 98
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/figS2.py
|
import sys
import subprocess
import os
print('\n \n \n HUMAN EXERCISE UMAP \n \n \n')
path = '/Volumes/Elements/exercise_data/output/'
fig_format = 'png'
outpath = '/Volumes/Elements/Manuscripts/B-SOiD/bsoid_natcomm/figure_panels/human_exercise/'
print('\n DATA FROM {} \n'.format(path))
print('-' * 50)
# FIG S2
print('\n' * 1)
print('Preparing UMAP + HDBSCAN xxx.{} found in fig S2...'.format(fig_format))
print('\n' * 1)
name = 'exercise_30fps_n2fixed'
p = subprocess.Popen([sys.executable, './subroutines/umap_clustering_plot.py',
'-p', path, '-f', name, '-m', fig_format, '-o', outpath])
p.communicate()
p.kill()
# FIG S2
vidpath = '/Volumes/Elements/exercise_data/'
mp4name = 'TEST/mp4s/'
projectname = 'eric_exercise_fixed/'
for g in range(20):
for e in range(10):
mp4file = 'group_{}_example_{}.mp4'.format(g, e)
if os.path.exists(str.join('', (vidpath, mp4name, projectname, mp4file))):
var = str.join('', (mp4file.partition('.')[0], '_images'))
p = subprocess.Popen([sys.executable, './subroutines/extract_images.py',
'-p', vidpath, '-n', mp4name, '-f', projectname,
'-g', mp4file, '-v', var])
p.communicate()
p.kill()
# FIG S2
vidpath = '/Volumes/Elements/exercise_data/'
mp4name = 'TRAIN/mp4s/'
projectname = 'Andy_exercise2_fixed/'
for g in range(20):
for e in range(10):
mp4file = 'group_{}_example_{}.mp4'.format(g, e)
if os.path.exists(str.join('', (vidpath, mp4name, projectname, mp4file))):
var = str.join('', (mp4file.partition('.')[0], '_images'))
p = subprocess.Popen([sys.executable, './subroutines/extract_images.py',
'-p', vidpath, '-n', mp4name, '-f', projectname,
'-g', mp4file, '-v', var])
p.communicate()
p.kill()
| 1,947
| 32.586207
| 92
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/fig6.py
|
import sys
import subprocess
print('\n \n \n A2A CASPASE KINEMATICS ANALYSIS \n \n \n')
path = '/Volumes/Elements/Manuscripts/B-SOiD/bsoid_natcomm/workspace/a2a_gr_Rkin.mat'
fig_format = 'png'
outpath = '/Volumes/Elements/Manuscripts/B-SOiD/bsoid_natcomm/figure_panels/kinematics_cdf/'
print('\n DATA FROM {} \n'.format(path))
print('-' * 50)
# FIG6B
print('\n' * 1)
print('Preparing head groom kinematics cdf curves as xxx.{} found in fig6b...'.format(fig_format))
print('\n' * 1)
variables = str(['c_hg_len_N4_1', 'a_hg_len_N4_1'])
c = str(['deepskyblue', 'red'])
x_range = str([0, 2])
leg = '0'
p = subprocess.Popen([sys.executable, './subroutines/kinematics_cdf.py',
'-p', path, '-v', variables,
'-c', c, '-r', x_range, '-l', leg, '-m', fig_format, '-o', outpath])
p.communicate()
p.kill()
variables = str(['c_hg_speed_N4_1', 'a_hg_speed_N4_1'])
x_range = str([0, 10])
leg = '0'
p = subprocess.Popen([sys.executable, './subroutines/kinematics_cdf.py',
'-p', path, '-v', variables,
'-c', c, '-r', x_range, '-l', leg, '-m', fig_format, '-o', outpath])
p.communicate()
p.kill()
variables = str(['c_hg_dur_N4', 'a_hg_dur_N4'])
x_range = str([0, 4])
p = subprocess.Popen([sys.executable, './subroutines/kinematics_cdf.py',
'-p', path, '-v', variables,
'-c', c, '-r', x_range, '-l', leg, '-m', fig_format, '-o', outpath])
p.communicate()
p.kill()
# FIG6D
print('\n' * 1)
print('Preparing face groom kinematics cdf curves as xxx.{} found in fig6d...'.format(fig_format))
print('\n' * 1)
variables = str(['c_fg_len_N4_1', 'a_fg_len_N4_1'])
c = str(['deepskyblue', 'red'])
x_range = str([0, 2])
leg = '0'
p = subprocess.Popen([sys.executable, './subroutines/kinematics_cdf.py',
'-p', path, '-v', variables,
'-c', c, '-r', x_range, '-l', leg, '-m', fig_format, '-o', outpath])
p.communicate()
p.kill()
variables = str(['c_fg_speed_N4_1', 'a_fg_speed_N4_1'])
x_range = str([0, 10])
leg = '0'
p = subprocess.Popen([sys.executable, './subroutines/kinematics_cdf.py',
'-p', path, '-v', variables,
'-c', c, '-r', x_range, '-l', leg, '-m', fig_format, '-o', outpath])
p.communicate()
p.kill()
variables = str(['c_fg_dur_N4', 'a_fg_dur_N4'])
x_range = str([0, 4])
p = subprocess.Popen([sys.executable, './subroutines/kinematics_cdf.py',
'-p', path, '-v', variables,
'-c', c, '-r', x_range, '-l', leg, '-m', fig_format, '-o', outpath])
p.communicate()
p.kill()
# # FIG6F
# print('\n' * 1)
# print('Preparing itch kinematics cdf curves as xxx.{} found in fig6f...'.format(fig_format))
# print('\n' * 1)
# variables = str(['c_it_len_N4_1', 'a_it_len_N4_1'])
# c = str(['deepskyblue', 'red'])
# x_range = str([1, 5])
#
# p = subprocess.Popen([sys.executable, './subroutines/kinematics_cdf.py',
# '-p', path, '-v', variables,
# '-c', c, '-r', x_range, '-m', fig_format, '-o', outpath])
# p.communicate()
# p.kill()
# print("All xxx.{}s generated. see {} for results".format(fig_format, outpath))
# print('-' * 50)
| 3,226
| 30.637255
| 98
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/subroutines/kfold_accuracy.py
|
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import KFold
from operator import itemgetter
from utilities.load_data import appdata
from utilities.save_data import results
import sys, getopt
from ast import literal_eval
def generate_kfold(path, name, k):
appdata_ = appdata(path, name)
f_10fps_sub, train_embeddings = appdata_.load_embeddings()
min_cluster_range, assignments, soft_clusters, soft_assignments = appdata_.load_clusters()
y = assignments[assignments >= 0]
X = f_10fps_sub[assignments >= 0, :]
kf = KFold(n_splits=k)
kf.get_n_splits(X)
accuracy_data = []
for train_index, test_index in kf.split(X):
print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
classifier = RandomForestClassifier(random_state=42)
classifier.fit(X_train, y_train)
accuracy_vec = []
predictions = classifier.predict(X_test)
for i in range(len(np.unique(y_test))):
accuracy_vec.append(len(np.argwhere((predictions - y_test == 0) & (y_test == i)))
/ len(np.argwhere(y_test == i)))
accuracy_data.append(np.array(accuracy_vec))
accuracy_data = np.array(accuracy_data)
return accuracy_data
def reorganize_accuracy(accuracy_data, order):
accuracy_ordered = []
for i in range(len(accuracy_data)):
accuracy_ordered.append(itemgetter(order)(accuracy_data[i]))
return accuracy_ordered
def main(argv):
path = None
name = None
k = None
order = None
vname = None
options, args = getopt.getopt(
argv[1:],
'p:f:k:o:v:',
['path=', 'file=', 'kfold=', 'order=', 'variable='])
for option_key, option_value in options:
if option_key in ('-p', '--path'):
path = option_value
elif option_key in ('-f', '--file'):
name = option_value
elif option_key in ('-k', '--kfold'):
k = option_value
elif option_key in ('-o', '--order'):
order = option_value
elif option_key in ('-v', '--variable'):
vname = option_value
print('*' * 50)
print('PATH :', path)
print('NAME :', name)
print('K-FOLD :', k)
print('ORDER :', order)
print('VARIABLE :', vname)
print('*' * 50)
print('Computing...')
accuracy_data = generate_kfold(path, name, int(k))
accuracy_ordered = reorganize_accuracy(accuracy_data, literal_eval(order))
results_ = results(path, name)
results_.save_sav([accuracy_data, accuracy_ordered], vname)
if __name__ == '__main__':
main(sys.argv)
| 2,753
| 32.585366
| 94
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/subroutines/extract_kinematics.py
|
import numpy as np
from numpy import trapz
import os
from scipy.signal import find_peaks, resample, peak_widths
from utilities.detect_peaks import _plot
import matplotlib.pyplot as plt
from tqdm import tqdm
from utilities.load_data import appdata
from utilities.save_data import results
from utilities.statistics import rle
from utilities.processing import data_processing
import sys, getopt
from ast import literal_eval
def get_kinematics(path, name, group_num, bp, FPS):
appdata_ = appdata(path, name)
_, _, filenames2, data_new, fs_labels = appdata_.load_predictions()
win_len = np.int(np.round(0.05 / (1 / FPS)) * 2 - 1)
bout_frames = []
term_frame = []
pose_all_animal = []
eu_all_animal = []
all_bouts_disp = []
all_bouts_peak_speed = []
all_bouts_dur = []
for an in range(len(data_new)):
bout_frames.append(np.array(np.where(fs_labels[an] == group_num)))
term_f = np.diff(bout_frames[an]) != 1
term_frame.append(np.array(term_f*1))
lengths, pos, grp = rle(term_frame[an].T)
endpos = np.where(np.diff(pos) < 1)[0][0] + 1
pos = pos[:endpos]
poses = data_new[an]
proc_pose = []
for col in range(poses.shape[1]):
pose = data_processing(poses[:, col])
proc_pose.append(pose.boxcar_center(win_len))
proc_pose = np.array(proc_pose, dtype=object).T
pose_all_bp = []
eu_all_bp = []
for b in bp:
pose_single_bp = []
bt = 0
eu_dist_bout = []
for bout in range(0, len(pos) - 1, 2):
eu_dist_ = []
pose_single_bp.append(proc_pose[int(bout_frames[an][:, pos[bout]]):
int(bout_frames[an][:, pos[bout+1]])+1,
2 * b:2 * b + 2])
for row in range(len(pose_single_bp[bt]) - 1):
eu_dist_ = np.hstack((eu_dist_, np.linalg.norm(pose_single_bp[bt][row + 1, :] -
pose_single_bp[bt][row, :])))
eu_dist_bout.append(eu_dist_)
bt += 1
pose_all_bp.append(pose_single_bp) # all body parts pose estimation in one animal
eu_all_bp.append(eu_dist_bout) # all body parts euclidean distance in one animal
pose_all_animal.append(pose_all_bp) # all body parts pose estimations for all animals
eu_all_animal.append(eu_all_bp) # all body parts euclidean distances for all animals
bps_bouts_disp = []
bps_bouts_peak_speed = []
bps_bouts_dur = []
for i in tqdm(range(len(eu_all_bp))):
bouts_disp = []
bouts_pk_speed = []
bouts_dur = []
for j in range(len(eu_all_bp[i])):
newsig = eu_all_bp[i][j].copy()
newsig = np.array([0 if a_ < 0.05 * np.max(eu_all_bp[i][j]) else a_ for a_ in newsig])
pk, info = find_peaks(newsig, prominence=2, distance=int(FPS/10)) # prominence 2 was better than 1
try:
os.mkdir('/Volumes/Elements/Manuscripts/B-SOiD/bsoid_natcomm/figure_panels/'
'kinematics_analysis/{}'.format(name))
except FileExistsError:
pass
try:
os.mkdir('/Volumes/Elements/Manuscripts/B-SOiD/bsoid_natcomm/figure_panels/'
'kinematics_analysis/{}/behavior{}'.format(name, group_num))
except FileExistsError:
pass
output_path = '/Volumes/Elements/Manuscripts/B-SOiD/bsoid_natcomm/figure_panels/' \
'kinematics_analysis/{}/group{}/file{}'.format(name, group_num, an)
if pk.size:
bout_disp = []
for k in range(len(info['left_bases'])):
bout_disp.append(np.sum((
eu_all_bp[i][j][int(round(info['left_bases'][k])):int(round(info['right_bases'][k]))])))
if pk.size > 3:
_plot(eu_all_bp[i][j], None, pk)
R = np.linspace(0, 1, len(info['left_bases']))
cm = plt.cm.Spectral(R)
for k in range(len(info['left_bases'])):
plt.fill_between(
np.arange(int(round(info['left_bases'][k])),
int(round(info['right_bases'][k]))),
eu_all_bp[i][j][int(round(info['left_bases'][k])):int(round(info['right_bases'][k]))],
alpha=0.5, color=cm[k])
plt.savefig(str.join('', (output_path, 'pose{}_bout{}_kinematics_extraction.'.format(i, j),
'png')), format='png', transparent=True)
plt.close('all')
bouts_disp.append(np.array(bout_disp, dtype=object))
bouts_pk_speed.append(eu_all_bp[i][j][pk])
bouts_dur.append(len(eu_all_bp[i][j]))
# else:
# bouts_disp.append(np.array(0, dtype=object))
# bouts_pk_speed.append(np.array(0, dtype=object))
# bouts_dur.append(np.array(0, dtype=object))
bps_bouts_disp.append(np.array(bouts_disp, dtype=object))
bps_bouts_peak_speed.append(np.array(bouts_pk_speed, dtype=object))
bps_bouts_dur.append(np.array(bouts_dur, dtype=object))
all_bouts_disp.append(np.array(bps_bouts_disp, dtype=object))
all_bouts_peak_speed.append(np.array(bps_bouts_peak_speed, dtype=object))
all_bouts_dur.append(np.array(bps_bouts_dur, dtype=object))
return pose_all_animal, eu_all_animal, all_bouts_disp, all_bouts_peak_speed, all_bouts_dur
def group_kinematics(all_bouts_disp, all_bouts_peak_speed, all_bouts_dur, exp):
bps_exp1_bout_disp = []
bps_exp2_bout_disp = []
for j in range(len(all_bouts_disp[0])):
exp1_bout_disp = []
exp2_bout_disp = []
for i in range(len(all_bouts_disp)):
if any(i == sess for sess in exp[0]):
for j in range(len(all_bouts_disp[i])):
if all_bouts_disp[i][j].size:
try:
exp1_bout_disp = np.concatenate((exp1_bout_disp, all_bouts_disp[i][j]))
except ValueError:
pass
elif any(i == sess for sess in exp[1]):
for j in range(len(all_bouts_disp[i])):
if all_bouts_disp[i][j].size:
try:
exp2_bout_disp = np.concatenate((exp2_bout_disp, all_bouts_disp[i][j]))
except ValueError:
pass
bps_exp1_bout_disp.append(exp1_bout_disp)
bps_exp2_bout_disp.append(exp2_bout_disp)
bps_exp1_bout_peak_speed = []
bps_exp2_bout_peak_speed = []
for j in range(len(all_bouts_peak_speed[0])):
exp1_bout_peak_speed = []
exp2_bout_peak_speed = []
for i in range(len(all_bouts_peak_speed)):
if any(i == sess for sess in exp[0]):
for j in range(len(all_bouts_peak_speed[i])):
if all_bouts_peak_speed[i][j].size:
try:
exp1_bout_peak_speed = np.concatenate((exp1_bout_peak_speed, all_bouts_peak_speed[i][j]))
except ValueError:
pass
elif any(i == sess for sess in exp[1]):
for j in range(len(all_bouts_peak_speed[i])):
if all_bouts_peak_speed[i][j].size:
try:
exp2_bout_peak_speed = np.concatenate((exp2_bout_peak_speed, all_bouts_peak_speed[i][j]))
except ValueError:
pass
bps_exp1_bout_peak_speed.append(exp1_bout_peak_speed)
bps_exp2_bout_peak_speed.append(exp2_bout_peak_speed)
bps_exp1_bout_dur = []
bps_exp2_bout_dur = []
for j in range(len(all_bouts_dur[0])):
exp1_bout_dur = []
exp2_bout_dur = []
for i in range(len(all_bouts_dur)):
if any(i == sess for sess in exp[0]):
for j in range(len(all_bouts_dur[i])):
if all_bouts_dur[i][j].size:
try:
exp1_bout_dur = np.concatenate((exp1_bout_dur, all_bouts_dur[i][j]))
except ValueError:
pass
if any(i == sess for sess in exp[1]):
for j in range(len(all_bouts_dur[i])):
if all_bouts_dur[i][j].size:
try:
exp2_bout_dur = np.concatenate((exp2_bout_dur, all_bouts_dur[i][j]))
except ValueError:
pass
bps_exp1_bout_dur.append(exp1_bout_dur)
bps_exp2_bout_dur.append(exp2_bout_dur)
return bps_exp1_bout_disp, bps_exp2_bout_disp, bps_exp1_bout_peak_speed, bps_exp2_bout_peak_speed, \
bps_exp1_bout_dur, bps_exp2_bout_dur
def main(argv):
path = None
name = None
group_num = None
bodyparts = None
exp = None
vname = None
options, args = getopt.getopt(
argv[1:],
'p:n:g:b:e:v:',
['path=', 'file=', 'group_num=', 'bodyparts=', 'experiment=', 'variable='])
for option_key, option_value in options:
if option_key in ('-p', '--path'):
path = option_value
elif option_key in ('-n', '--file'):
name = option_value
elif option_key in ('-g', '--group_num'):
group_num = option_value
elif option_key in ('-b', '--bodyparts'):
bodyparts = option_value
elif option_key in ('-e', '--experiment'):
exp = option_value
elif option_key in ('-v', '--variable'):
vname = option_value
print('*' * 50)
print('PATH :', path)
print('NAME :', name)
print('BEHAVIOR :', group_num)
print('BODY PARTS :', bodyparts)
print('EXPERIMENT ORDER :', exp)
print('VARIABLES :', vname)
print('*' * 50)
print('Computing...')
_, _, all_bouts_disp, all_bouts_peak_speed, all_bouts_dur = \
get_kinematics(path, name, int(group_num), literal_eval(bodyparts), 60)
bps_exp1_bout_disp, bps_exp2_bout_disp, bps_exp1_bout_peak_speed, bps_exp2_bout_peak_speed, \
bps_exp1_bout_dur, bps_exp2_bout_dur = group_kinematics(all_bouts_disp, all_bouts_peak_speed, all_bouts_dur,
literal_eval(exp))
results_ = results(path, name)
results_.save_sav([bps_exp1_bout_disp, bps_exp2_bout_disp, bps_exp1_bout_peak_speed, bps_exp2_bout_peak_speed, \
bps_exp1_bout_dur, bps_exp2_bout_dur], vname)
if __name__ == '__main__':
main(sys.argv)
| 11,164
| 45.327801
| 118
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/subroutines/extract_images.py
|
import os
import ffmpeg
import cv2
from utilities.save_data import results
from utilities.processing import sort_nicely
import sys, getopt
def get_images(pathname, group_num):
try:
os.mkdir(str.join('', (pathname, '/pngs')))
except FileExistsError:
pass
try:
os.mkdir(str.join('', (pathname, 'pngs', '/', group_num)))
except FileExistsError:
pass
frame_dir = str.join('', (pathname, 'pngs', '/', group_num))
print('You have created {} as your PNG directory for video {}.'.format(frame_dir, group_num))
probe = ffmpeg.probe(os.path.join(pathname, group_num))
video_info = next(s for s in probe['streams'] if s['codec_type'] == 'video')
width = int(video_info['width'])
height = int(video_info['height'])
num_frames = int(video_info['nb_frames'])
bit_rate = int(video_info['bit_rate'])
avg_frame_rate = round(
int(video_info['avg_frame_rate'].rpartition('/')[0]) / int(video_info['avg_frame_rate'].rpartition('/')[2]))
print('Extracting {} frames at {} frames per second...'.format(num_frames, avg_frame_rate))
try:
(ffmpeg.input(os.path.join(pathname, group_num))
.filter('fps', fps=avg_frame_rate)
.output(str.join('', (frame_dir, '/frame%01d.png')), video_bitrate=bit_rate,
s=str.join('', (str(int(width)), 'x', str(int(height)))), sws_flags='bilinear',
start_number=0)
.run(capture_stdout=True, capture_stderr=True))
print('Done extracting {} frames from video {}.'.format(num_frames, group_num))
except ffmpeg.Error as e:
print('stdout:', e.stdout.decode('utf8'))
print('stderr:', e.stderr.decode('utf8'))
image_files = [img for img in os.listdir(frame_dir) if img.endswith(".png")]
sort_nicely(image_files)
im = []
for i in range(len(image_files)):
im.append(cv2.imread(os.path.join(frame_dir, image_files[i])))
return im, image_files
def main(argv):
path = None
mp4name = None
projectname = None
group_num = None
var = None
options, args = getopt.getopt(
argv[1:],
'p:n:f:g:v:',
['path=', 'mp4_name=', 'project_name=', 'group_num=', 'variable='])
for option_key, option_value in options:
if option_key in ('-p', '--path'):
path = option_value
elif option_key in ('-n', '--mp4_name'):
mp4name = option_value
elif option_key in ('-f', '--project_name'):
projectname = option_value
elif option_key in ('-g', '--group_num'):
group_num = option_value
elif option_key in ('-v', '--variable'):
var = option_value
print('*' * 50)
print('PATH :', path)
print('MP4 NAME :', mp4name)
print('PROJECT :', projectname)
print('BEHAVIOR :', group_num)
print('VARIABLES :', var)
print('*' * 50)
print('Computing...')
pathname = str.join('', (path, mp4name, projectname))
im, image_files = get_images(pathname, group_num)
results_ = results(pathname, '')
results_.save_sav([im, image_files], var)
if __name__ == '__main__':
main(sys.argv)
| 3,172
| 35.895349
| 116
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/subroutines/trajectory_plot.py
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
from utilities.load_data import appdata
from utilities.processing import data_processing
import sys, getopt
from ast import literal_eval
def limb_trajectory(path, name, animal_idx, bp, t_range):
appdata_ = appdata(path, name)
_, _, _, data_new, fs_labels = appdata_.load_predictions()
limbs = []
labels = fs_labels[animal_idx][t_range[0]:t_range[1]]
for b in range(len(bp)):
limb = []
for t in range(t_range[0], t_range[1]):
limb.append(np.linalg.norm(data_new[animal_idx][t, bp[b]*2:bp[b]*2+2] -
data_new[animal_idx][t - 1, bp[b]*2:bp[b]*2+2]))
limbs.append(np.array(limb))
return labels, limbs
def plot_trajectory(limbs, labels, t_range, ord1, ord2, c, fig_size, fig_format, outpath):
proc_limb = []
for l in range(len(limbs)):
proc_data = data_processing(limbs[l])
proc_limb.append(proc_data.boxcar_center(5))
figure(num=None, figsize=fig_size, dpi=300, facecolor='w', edgecolor='k')
plt.subplot()
plt.subplot(211)
ax1 = plt.subplot(2, 1, 1)
for o in range(len(ord1)):
if o > 0:
a = 0.3
else:
a = 1
ax1.plot(proc_limb[ord1[o]], linewidth=8, color=c[0], alpha=a)
ax1 = plt.gca()
# ax.set_xlim(43 - 30, 43 + 30)
# ax.set_ylim(0, max(
# np.concatenate((boxcar_center(Rforelimb, 5)[43 - 30:43 + 30], boxcar_center(Lforelimb, 5)[43 - 30:43 + 30],
# boxcar_center(Rhindlimb, 5)[43 - 30:43 + 30], boxcar_center(Lhindlimb, 5)[43 - 30:43 + 30]))))
ax1.set_axis_off()
ax2 = plt.subplot(2, 1, 2)
for o in range(len(ord2)):
if o > 0:
a = 0.3
else:
a = 1
ax2.plot(proc_limb[ord2[o]], linewidth=8, color=c[1], alpha=a)
ax2 = plt.gca()
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax1.spines['bottom'].set_visible(False)
ax2.spines['bottom'].set_visible(True)
ax2.spines['bottom'].set_linewidth(4)
ax1.spines['left'].set_visible(True)
ax2.spines['left'].set_visible(True)
plt.gca().invert_yaxis()
# plt.axvline(x=6 * 8, linewidth=4, color='k')
# plt.axvline(x=43, linewidth=4, color='k')
ax1.tick_params(length=24, width=4)
ax2.tick_params(length=24, width=4)
ax2.xaxis.set_ticklabels([])
# ax.set_xticks(range(43 - 30, 43 + 30, 15))
plt.savefig(str.join('', (outpath, 'start{}_end{}_limb_trajectory.'.format(*t_range), fig_format)),
format=fig_format, transparent=True)
def main(argv):
path = None
name = None
animal_idx = None
bp = None
t_range = None
order1 = None
order2 = None
c = None
fig_format = None
outpath = None
options, args = getopt.getopt(
argv[1:],
'p:f:i:b:t:r:R:c:m:o:',
['path=', 'file=', 'animal_idx=', 'bodypart=', 'timerange=', 'order1=', 'order2=', 'colors=',
'format=', 'outpath='])
for option_key, option_value in options:
if option_key in ('-p', '--path'):
path = option_value
elif option_key in ('-f', '--file'):
name = option_value
elif option_key in ('-i', '--animal_idx'):
animal_idx = option_value
elif option_key in ('-b', '--bodypart'):
bp = option_value
elif option_key in ('-t', '--timerange'):
t_range = option_value
elif option_key in ('-r', '--order1'):
order1 = option_value
elif option_key in ('-R', '--order1'):
order2 = option_value
elif option_key in ('-c', '--colors'):
c = option_value
elif option_key in ('-m', '--format'):
fig_format = option_value
elif option_key in ('-o', '--outpath'):
outpath = option_value
print('*' * 50)
print('PATH :', path)
print('NAME :', name)
print('ANIMAL INDEX :', animal_idx)
print('BODYPARTS :', bp)
print('TIME RANGE :', t_range)
print('TOP PLOT :', order1)
print('BOTTOM PLOT :', order2)
print('COLORS :', c)
print('FIG FORMAT :', fig_format)
print('OUT PATH :', outpath)
print('*' * 50)
print('Plotting...')
labels, limbs = limb_trajectory(path, name, int(animal_idx), literal_eval(bp), literal_eval(t_range))
plot_trajectory(limbs, labels, literal_eval(t_range), literal_eval(order1), literal_eval(order2), literal_eval(c),
(8.5, 16), fig_format, outpath)
if __name__ == '__main__':
main(sys.argv)
| 4,750
| 33.427536
| 120
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/subroutines/umap_clustering_plot.py
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
from utilities.load_data import appdata
import sys, getopt
from matplotlib.axes._axes import _log as matplotlib_axes_logger
matplotlib_axes_logger.setLevel('ERROR')
def plot_enhanced_umap(path, name, fig_size, fig_format, outpath):
appdata_ = appdata(path, name)
f_10fps_sub, train_embeddings = appdata_.load_embeddings()
min_cluster_range, assignments, soft_clusters, soft_assignments = appdata_.load_clusters()
uk = list(np.unique(assignments))
R = np.linspace(0, 1, len(uk) - 1)
cmap = plt.cm.get_cmap("Spectral")(R)
umap_x, umap_y = train_embeddings[:, 0], train_embeddings[:, 1]
fig = figure(num=None, figsize=fig_size, dpi=300, facecolor='w', edgecolor='k')
ax = fig.add_subplot(111)
for g in np.unique(assignments):
if g >= 0:
idx = np.where(np.array(assignments) == g)
ax.scatter(umap_x[idx], umap_y[idx], c=cmap[g],
label=g+1, s=50, marker='o', alpha=0.6)
plt.legend(ncol=3, loc=0, prop={'family': 'Helvetica', 'size': 28})
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.spines['top'].set_visible(True)
ax.spines['top'].set_linewidth(3)
ax.spines['right'].set_visible(True)
ax.spines['right'].set_linewidth(3)
ax.spines['bottom'].set_visible(True)
ax.spines['bottom'].set_linewidth(3)
ax.spines['left'].set_visible(True)
ax.spines['left'].set_linewidth(3)
ax.tick_params(length=10, width=3)
plt.savefig(str.join('', (outpath, '{}'.format(name), '_umap_enahnced_clustering.', fig_format)),
format=fig_format, transparent=True)
def main(argv):
path = None
name = None
fig_format = None
outpath = None
options, args = getopt.getopt(
argv[1:],
'p:f:m:o:',
['path=', 'file=', 'format=', 'outpath='])
for option_key, option_value in options:
if option_key in ('-p', '--path'):
path = option_value
elif option_key in ('-f', '--file'):
name = option_value
elif option_key in ('-m', '--format'):
fig_format = option_value
elif option_key in ('-o', '--outpath'):
outpath = option_value
print('*' * 50)
print('PATH :', path)
print('NAME :', name)
print('FIG FORMAT :', fig_format)
print('OUT PATH :', outpath)
print('*' * 50)
print('Plotting...')
plot_enhanced_umap(path, name, (16, 11), fig_format, outpath)
if __name__ == '__main__':
main(sys.argv)
| 2,591
| 35
| 101
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/subroutines/accuracy_boxplot.py
|
import seaborn as sns
import matplotlib.colors as mc
import colorsys
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import numpy as np
from utilities.load_data import load_sav
import sys, getopt
from ast import literal_eval
def lighten_color(color, amount=0):
# --------------------- SOURCE: @IanHincks ---------------------
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])
def plot_boxplot(algo, data, c, fig_size, fig_format, outpath):
figure(num=None, figsize=fig_size, dpi=300, facecolor='w', edgecolor='k')
ax = plt.subplot()
sns.set_palette(sns.color_palette(c))
sns.boxplot(data=np.array(data), orient='h', width=0.7, ax=ax)
for i, artist in enumerate(ax.artists):
col = lighten_color(artist.get_facecolor(), 1.4)
artist.set_edgecolor('k')
for j in range(i * 6, i * 6 + 6):
line = ax.lines[j]
line.set_color(col)
line.set_mfc(col)
line.set_mec(col)
line.set_linewidth(3)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(True)
ax.spines['bottom'].set_linewidth(3)
ax.spines['left'].set_visible(True)
ax.spines['left'].set_linewidth(3)
ax.tick_params(length=24, width=3)
ax.set_xlim(0.8, 1)
ax.set_xticks(np.arange(0.80, 1.01, 0.1))
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
plt.savefig(str.join('', (outpath, algo, '_Kfold_accuracy.', fig_format)), format=fig_format, transparent=True)
def main(argv):
path = None
name = None
vname = None
algorithm = None
c = None
fig_format = None
outpath = None
options, args = getopt.getopt(
argv[1:],
'p:f:v:a:c:m:o:',
['path=', 'file=', 'variable=', 'algorithm=', 'colors=', 'format=', 'outpath='])
for option_key, option_value in options:
if option_key in ('-p', '--path'):
path = option_value
elif option_key in ('-f', '--file'):
name = option_value
elif option_key in ('-v', '--variable'):
vname = option_value
elif option_key in ('-a', '--algorithm'):
algorithm = option_value
elif option_key in ('-c', '--colors'):
c = option_value
elif option_key in ('-m', '--format'):
fig_format = option_value
elif option_key in ('-o', '--outpath'):
outpath = option_value
print('*' * 50)
print('PATH :', path)
print('NAME :', name)
print('VARIABLE :', vname)
print('ALGORITHM :', algorithm)
print('COLORS :', c)
print('FIG FORMAT :', fig_format)
print('OUT PATH :', outpath)
print('*' * 50)
print('Plotting...')
_, accuracy_ordered = load_sav(path, name, vname)
plot_boxplot(algorithm, accuracy_ordered, literal_eval(c), (6, 16), fig_format, outpath)
if __name__ == '__main__':
main(sys.argv)
| 3,084
| 32.172043
| 115
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/subroutines/immse_cdf.py
|
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import numpy as np
from utilities.load_data import load_mat
import sys, getopt
from ast import literal_eval
def plot_cdf(data, c, x_range, fig_size, fig_format, outpath):
figure(num=None, figsize=fig_size, dpi=300, facecolor='w', edgecolor='k')
ax = plt.axes()
values1, base = np.histogram(data[0], bins=np.arange(0, 3, 0.01),
weights=np.ones(len(data[0])).reshape(len(data[0]), 1) / len(data[0]), density=False)
values2, base = np.histogram(data[1], bins=np.arange(0, 3, 0.01),
weights=np.ones(len(data[1])).reshape(len(data[1]), 1) / len(data[1]), density=False)
values3, base = np.histogram(data[2], bins=np.arange(0, 3, 0.01),
weights=np.ones(len(data[2])).reshape(len(data[2]), 1) / len(data[2]), density=False)
values4, base = np.histogram(data[3], bins=np.arange(0, 3, 0.01),
weights=np.ones(len(data[3])).reshape(len(data[3]), 1) / len(data[3]), density=False)
values5, base = np.histogram(data[4], bins=np.arange(0, 3, 0.01),
weights=np.ones(len(data[4])).reshape(len(data[4]), 1) / len(data[4]), density=False)
values6, base = np.histogram(data[5], bins=np.arange(0, 3, 0.01),
weights=np.ones(len(data[5])).reshape(len(data[5]), 1) / len(data[5]), density=False)
values1 = np.append(values1, 0)
values2 = np.append(values2, 0)
values3 = np.append(values3, 0)
values4 = np.append(values4, 0)
values5 = np.append(values5, 0)
values6 = np.append(values6, 0)
ax.plot(base, np.cumsum(values5) / np.cumsum(values5)[-1],
color=c[2], marker='None', linestyle='-',
label="Shuff. same", linewidth=8)
ax.plot(base, np.cumsum(values6) / np.cumsum(values6)[-1],
color=c[2], marker='None', linestyle='--',
label="Shuff. diff.", linewidth=8)
ax.plot(base, np.cumsum(values1) / np.cumsum(values1)[-1],
color=c[0], marker='None', linestyle='-',
label="MM same", linewidth=6)
ax.plot(base, np.cumsum(values2) / np.cumsum(values2)[-1],
color=c[0], marker='None', linestyle='--',
label="MM diff.", linewidth=6)
ax.plot(base, np.cumsum(values3) / np.cumsum(values3)[-1],
color=c[1], marker='None', linestyle='-',
label="BSOiD same", linewidth=6)
ax.plot(base, np.cumsum(values4) / np.cumsum(values4)[-1],
color=c[1], marker='None', linestyle='--',
label="BSOiD diff.", linewidth=6)
ax.set_xlim(x_range[0], x_range[1])
ax.set_ylim(0, 1)
ax.set_axisbelow(True)
ax.grid(False)
ax.set_xticks(np.arange(x_range[0], x_range[1]+0.1, (x_range[1]-x_range[0])/3))
ax.set_yticks(np.arange(0, 1.1, 0.2))
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
order = [2, 3, 4, 5, 0, 1]
handles, labels = plt.gca().get_legend_handles_labels()
lgnd = plt.legend([handles[idx] for idx in order], [labels[idx] for idx in order],
loc=0, prop={'family': 'Helvetica', 'size': 48})
lgnd.legendHandles[0]._legmarker.set_markersize(2)
lgnd.legendHandles[1]._legmarker.set_markersize(2)
ax.spines['top'].set_visible(True)
ax.spines['top'].set_linewidth(5)
ax.spines['right'].set_visible(True)
ax.spines['right'].set_linewidth(5)
ax.spines['bottom'].set_visible(True)
ax.spines['bottom'].set_linewidth(5)
ax.spines['left'].set_visible(True)
ax.spines['left'].set_linewidth(5)
ax.tick_params(length=20, width=5)
plt.savefig(str.join('', (outpath, 'mse_cdf.', fig_format)), format=fig_format, transparent=True)
def main(argv):
path = None
c = None
x_range = None
fig_format = None
outpath = None
options, args = getopt.getopt(
argv[1:],
'p:c:r:m:o:',
['path=', 'color=', 'range=', 'format=', 'outpath='])
for option_key, option_value in options:
if option_key in ('-p', '--path'):
path = option_value
elif option_key in ('-c', '--color'):
c = option_value
elif option_key in ('-r', '--range'):
x_range = option_value
elif option_key in ('-m', '--format'):
fig_format = option_value
elif option_key in ('-o', '--outpath'):
outpath = option_value
print('*' * 50)
print('PATH :', path)
print('COLOR :', c)
print('RANGE :', x_range)
print('FIG FORMAT :', fig_format)
print('OUT PATH :', outpath)
print('*' * 50)
print('Plotting...')
mat = load_mat(path)
data = [mat['mm_within_vec2'], mat['mm_between_vec2'],
mat['bsf_within_vec2'], mat['bsf_between_vec2'],
mat['sbsf_within_vec2'], mat['sbsf_between_vec2']]
plot_cdf(data, literal_eval(c), literal_eval(x_range), (16, 13), fig_format, outpath)
if __name__ == '__main__':
main(sys.argv)
| 5,045
| 43.263158
| 118
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/subroutines/kinematics_cdf_v2.py
|
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import numpy as np
from utilities.load_data import load_sav
import sys, getopt
from ast import literal_eval
def plot_cdf(var, vname, data, c, x_range, bnct, tk, leg, fig_size, fig_format, outpath):
figure(num=None, figsize=fig_size, dpi=300, facecolor='w', edgecolor='k')
ax = plt.axes()
values1, base = np.histogram(data[0], bins=np.arange(x_range[0], x_range[1]+0.5, 0.05),
# np.percentile(data[0], 95),
# num=bnct),
weights=np.ones(len(data[0])) / len(data[0]), density=False)
values2, base = np.histogram(data[1], bins=np.arange(x_range[0], x_range[1]+0.5, 0.05),
# np.percentile(data[0], 95),
# num=bnct),
weights=np.ones(len(data[1])) / len(data[1]), density=False)
values1 = np.append(values1, 0)
values2 = np.append(values2, 0)
# figure(num=None, figsize=fig_size, dpi=300, facecolor='w', edgecolor='k')
# ax = plt.axes()
# values1, base = np.histogram(data1[0], bins=np.linspace(x_range[0], data1[0].max(),
# # np.percentile(data[0], 95),
# num=bnct),
# weights=np.ones(len(data1[0])) / len(data1[0]), density=False)
# values2, base = np.histogram(data1[1], bins=np.linspace(x_range[0], data1[1].max(),
# # np.percentile(data[0], 95),
# num=bnct),
# weights=np.ones(len(data1[1])) / len(data1[1]), density=False)
# values1 = np.append(values1, 0)
# values2 = np.append(values2, 0)
ax.plot(base, np.cumsum(values1) / np.cumsum(values1)[-1],
color=c[0], marker='None', linestyle='-',
label="A2A Ctrl.", linewidth=8)
ax.plot(base, np.cumsum(values2) / np.cumsum(values2)[-1],
color=c[1], marker='None', linestyle='-',
label="A2A Casp.", linewidth=8)
# ax.set_xlim(np.percentile(data[0], 5), np.percentile(data[0], 95))
ax.set_xlim(x_range[0], x_range[1])
ax.set_ylim(0, 1)
ax.set_axisbelow(True)
ax.grid(linestyle='-', linewidth=5, axis='both')
ax.set_xticks(np.arange(x_range[0], x_range[1]+0.1, (x_range[1]-x_range[0])/tk))
if leg:
lgnd = plt.legend(loc=0, prop={'family': 'Helvetica', 'size': 60})
lgnd.legendHandles[0]._legmarker.set_markersize(8)
lgnd.legendHandles[1]._legmarker.set_markersize(8)
ax.set_yticks(np.arange(0, 1.1, 0.2))
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.spines['top'].set_visible(True)
ax.spines['top'].set_linewidth(5)
ax.spines['right'].set_visible(True)
ax.spines['right'].set_linewidth(5)
ax.spines['bottom'].set_visible(True)
ax.spines['bottom'].set_linewidth(5)
ax.spines['left'].set_visible(True)
ax.spines['left'].set_linewidth(5)
ax.spines['top'].set_color('k')
ax.spines['right'].set_color('k')
ax.spines['bottom'].set_color('k')
ax.spines['left'].set_color('k')
ax.tick_params(length=25, width=5)
plt.savefig(str.join('', (outpath, '{}_{}_cdf.'.format(var, vname), fig_format)),
format=fig_format, transparent=True)
def main(argv):
path = None
name = None
var = None
vname = None
bp = None
c = None
x_range = None
leg = None
fig_format = None
outpath = None
options, args = getopt.getopt(
argv[1:],
'p:n:v:V:b:c:r:l:m:o:',
['path=', 'file=', 'variables=', 'variable_name', 'bodypart=',
'colors=', 'range=', 'legend=', 'format=', 'outpath='])
for option_key, option_value in options:
if option_key in ('-p', '--path'):
path = option_value
elif option_key in ('-n', '--file'):
name = option_value
elif option_key in ('-v', '--variables'):
var = option_value
elif option_key in ('-V', '--variable_name'):
vname = option_value
elif option_key in ('-b', '--bodypart'):
bp = option_value
elif option_key in ('-c', '--colors'):
c = option_value
elif option_key in ('-r', '--range'):
x_range = option_value
elif option_key in ('-l', '--legend'):
leg = option_value
elif option_key in ('-m', '--format'):
fig_format = option_value
elif option_key in ('-o', '--outpath'):
outpath = option_value
print('*' * 50)
print('PATH :', path)
print('NAME :', name)
print('VARIABLES :', var)
print('VARIABLE NAME :', vname)
print('BODYPART :', bp)
print('COLOR :', c)
print('RANGE :', x_range)
print('FIG FORMAT :', fig_format)
print('OUT PATH :', outpath)
print('*' * 50)
print('Plotting...')
kin_data = load_sav(path, name, var)
if vname == 'Distance':
conv = 0
elif vname == 'Speed':
conv = 1
elif vname == 'Duration':
conv = 2
if conv == 0:
data = [np.concatenate(kin_data[0][int(bp)] / 23.5126),
np.concatenate(kin_data[1][int(bp)] / 23.5126)]
# data1 = [data[0][data[0] > 0.5], data[1][data[1] > 0.5]]
elif conv == 1:
data = [np.concatenate(kin_data[2][int(bp)] * 60 / 23.5126),
np.concatenate(kin_data[3][int(bp)] * 60 / 23.5126)]
# data1 = [data[0][data[0] > 3], data[1][data[1] > 3]]
elif conv == 2:
data = [kin_data[4][int(bp)] / 60,
kin_data[5][int(bp)] / 60]
# data1 = data
plot_cdf(var, vname, data, literal_eval(c), literal_eval(x_range),
50, 4, int(leg), (16, 16), fig_format, outpath)
if __name__ == '__main__':
main(sys.argv)
| 6,124
| 40.385135
| 97
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/subroutines/neural_plot.py
|
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import numpy as np
from utilities.load_data import load_mat
import sys, getopt
from ast import literal_eval
def plot_neural_heatmap(algo, data, c, c_range, discrete_n, delim, cl, fig_size, fig_format, outpath, cb=False):
figure(num=None, figsize=fig_size, dpi=300, facecolor='w', edgecolor='k')
ax = plt.subplot()
cm = sns.diverging_palette(275, 35, center='light', s=100, l=70, n=5)
# cm = sns.light_palette(c, n_colors=discrete_n)
# for i in range(len(data)):
sns.heatmap(data, vmin=c_range[0], vmax=c_range[1], center=0, cmap=cm, cbar=cb, ax=ax)
# sns.heatmap(data, vmin=c_range[0], vmax=c_range[1], cmap=cm, cbar=cb, ax=ax)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
plt.axvline(x=delim - 1, linewidth=4, linestyle='--', color=cl)
ax.set_xticks([])
ax.set_yticks([])
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
if cb:
cax = plt.gcf().axes[-1]
cax.tick_params(length=20, width=5, color='k')
plt.savefig(str.join('', (outpath, algo, '_neuralheatmap.', fig_format)), format=fig_format, transparent=True)
def main(argv):
path = None
algorithm = None
c = None
c_range = None
discrete_n = None
cline = None
fig_format = None
outpath = None
cb = None
options, args = getopt.getopt(
argv[1:],
'p:a:c:r:n:l:m:o:b:',
['path=', 'algorithm=', 'color=', 'range=', 'discrete_n=', 'cline=', 'format=', 'outpath=', 'colorbar='])
for option_key, option_value in options:
if option_key in ('-p', '--path'):
path = option_value
elif option_key in ('-a', '--algorithm'):
algorithm = option_value
elif option_key in ('-c', '--color'):
c = option_value
elif option_key in ('-r', '--range'):
c_range = option_value
elif option_key in ('-n', '--discrete_n'):
discrete_n = option_value
elif option_key in ('-l', '--cline'):
cline = option_value
elif option_key in ('-m', '--format'):
fig_format = option_value
elif option_key in ('-o', '--outpath'):
outpath = option_value
elif option_key in ('-b', '--colorbar'):
cb = option_value
print('*' * 50)
print('PATH :', path)
print('ALGORITHM :', algorithm)
print('COLOR :', c)
print('RANGE :', c_range)
print('DISCRETE COLORS :', discrete_n)
print('LINE COLOR :', cline)
print('FIG FORMAT :', fig_format)
print('OUT PATH :', outpath)
print('COLORBAR :', cb)
print('*' * 50)
print('Plotting...')
mat = load_mat(path)
if algorithm == 'non-frameshifted':
data = mat['l5neural']['nonfs'][0]
elif algorithm == 'frameshifted':
data = mat['l5neural']['fs'][0]
delim = 200
plot_neural_heatmap(algorithm, data[10], c, literal_eval(c_range), int(discrete_n), delim, cline,
(16, 14), fig_format, outpath, bool(int(cb)))
if __name__ == '__main__':
main(sys.argv)
| 3,251
| 34.736264
| 114
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/subroutines/coherence_boxplot.py
|
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import numpy as np
from utilities.load_data import load_sav
import sys, getopt
def plot_boxplot(algo, data, c, fig_size, fig_format, outpath):
figure(num=None, figsize=fig_size, dpi=300, facecolor='w', edgecolor='k')
ax = plt.subplot()
sns.boxplot(data=np.array(data), orient='h', width=0.7, medianprops={'color': 'white'}, color=c, ax=ax)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(True)
ax.spines['bottom'].set_linewidth(3)
ax.spines['left'].set_visible(True)
ax.spines['left'].set_linewidth(3)
ax.tick_params(length=24, width=3)
ax.set_xlim(0.7, 1)
ax.set_xticks(np.arange(0.70, 1.01, 0.1))
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
plt.savefig(str.join('', (outpath, algo, '_frameshift_coherence.', fig_format)), format=fig_format, transparent=True)
def main(argv):
path = None
name = None
vname = None
algorithm = None
c = None
fig_format = None
outpath = None
options, args = getopt.getopt(
argv[1:],
'p:f:v:a:c:m:o:',
['path=', 'file=', 'variable=', 'algorithm=', 'color=', 'format=', 'outpath='])
for option_key, option_value in options:
if option_key in ('-p', '--path'):
path = option_value
elif option_key in ('-f', '--file'):
name = option_value
elif option_key in ('-v', '--variable'):
vname = option_value
elif option_key in ('-a', '--algorithm'):
algorithm = option_value
elif option_key in ('-c', '--color'):
c = option_value
elif option_key in ('-m', '--format'):
fig_format = option_value
elif option_key in ('-o', '--outpath'):
outpath = option_value
print('*' * 50)
print('PATH :', path)
print('NAME :', name)
print('VARIABLE :', vname)
print('ALGORITHM :', algorithm)
print('FIG FORMAT :', fig_format)
print('OUT PATH :', outpath)
print('*' * 50)
print('Plotting...')
coherence_reordered = load_sav(path, name, vname)
plot_boxplot(algorithm, np.array(coherence_reordered).T, c, (6, 16), fig_format, outpath)
if __name__ == '__main__':
main(sys.argv)
| 2,367
| 32.352113
| 121
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/subroutines/pose_relationships_hist.py
|
import numpy as np
import itertools
from collections import OrderedDict
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
from utilities.load_data import appdata
import sys, getopt
from ast import literal_eval
def plot_pose_relationships(path, name, order, fig_size, fig_format, outpath):
appdata_ = appdata(path, name)
f_10fps_sub, _ = appdata_.load_embeddings()
_, assignments, _, _ = appdata_.load_clusters()
length_nm = []
angle_nm = []
disp_nm = []
for i, j in itertools.combinations(range(0, int(np.sqrt(f_10fps_sub.shape[1]))), 2):
length_nm.append(['distance between points:', i + 1, j + 1])
angle_nm.append(['angular change for points:', i + 1, j + 1])
for i in range(int(np.sqrt(f_10fps_sub.shape[1]))):
disp_nm.append(['displacement for point:', i + 1, i + 1])
keys = np.arange(len(length_nm) + len(angle_nm) + len(disp_nm))
POSE_RELATIONSHIPS = OrderedDict({key: [] for key in keys})
for m, feat_name in enumerate(length_nm):
POSE_RELATIONSHIPS[m] = feat_name
for n, feat_name in enumerate(angle_nm):
POSE_RELATIONSHIPS[m + n + 1] = feat_name
for o, feat_name in enumerate(disp_nm):
POSE_RELATIONSHIPS[m + n + o + 2] = feat_name
R = np.linspace(0, 1, len(np.unique(assignments)))
cm = plt.cm.get_cmap("Spectral")(R)
for f in range(f_10fps_sub.shape[1]):
fig = figure(num=None, figsize=fig_size, dpi=300, facecolor='w', edgecolor='k')
fig.suptitle("{}".format(POSE_RELATIONSHIPS[f]), fontsize=30)
k = 0
for i in order:
k += 1
ax = plt.subplot(len(np.unique(assignments)), 1, k)
if f <= m or f > m + n + 1:
values, base = np.histogram(f_10fps_sub[assignments == i, f] / 23.5126,
bins=np.linspace(0, np.mean(f_10fps_sub[assignments == i, f] / 23.5126) +
3 * np.std(f_10fps_sub[assignments == i, f] / 23.5126),
num=50),
weights=np.ones(len(f_10fps_sub[assignments == i, f])) /
len(f_10fps_sub[assignments == i, f]),
density=False)
values = np.append(values, 0)
ax.plot(base, values,
color=cm[k-1], marker='None', linestyle='-', linewidth=5)
ax.set_xlim(0, np.mean(f_10fps_sub[:, f] / 23.5126) + 3 * np.std(f_10fps_sub[:, f] / 23.5126))
if i < len(np.unique(assignments)) - 2:
ax.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False, labelsize=16)
ax.tick_params(axis='y', which='both', right=False, labelright=False, labelsize=16)
else:
ax.tick_params(labelsize=16)
ax.set_xticks(np.linspace(0, np.mean(f_10fps_sub[:, f] / 23.5126) +
3 * np.std(f_10fps_sub[:, f] / 23.5126), num=5))
fig.text(0.5, 0.07, 'Centimeters', ha='center', fontsize=16)
fig.text(0.03, 0.5, 'Probability', va='center', rotation='vertical', fontsize=16)
else:
values, base = np.histogram(f_10fps_sub[assignments == i, f],
bins=np.linspace(np.mean(f_10fps_sub[assignments == i, f]) -
3 * np.std(f_10fps_sub[assignments == i, f]),
np.mean(f_10fps_sub[assignments == i, f]) +
3 * np.std(f_10fps_sub[assignments == i, f]), num=50),
weights=np.ones(len(f_10fps_sub[assignments == i, f])) /
len(f_10fps_sub[assignments == i, f]),
density=False)
values = np.append(values, 0)
ax.plot(base, values,
color=cm[k-1], marker='None', linestyle='-', linewidth=5)
ax.set_xlim(np.mean(f_10fps_sub[:, f]) - 3 * np.std(f_10fps_sub[:, f]),
np.mean(f_10fps_sub[:, f]) + 3 * np.std(f_10fps_sub[:, f]))
if i < len(np.unique(assignments)) - 2:
ax.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False, labelsize=16)
ax.tick_params(axis='y', which='both', right=False, labelright=False, labelsize=16)
else:
ax.tick_params(labelsize=16)
ax.set_xticks(np.linspace(np.mean(f_10fps_sub[:, f]) -
3 * np.std(f_10fps_sub[:, f]),
np.mean(f_10fps_sub[:, f]) +
3 * np.std(f_10fps_sub[:, f]), num=5))
fig.text(0.5, 0.07, 'Degrees', ha='center', fontsize=16)
fig.text(0.03, 0.5, 'Probability', va='center', rotation='vertical', fontsize=16)
plt.savefig(str.join('', (outpath, '{}_{}_histogram.'.format(name, POSE_RELATIONSHIPS[f]), fig_format)),
format=fig_format, transparent=True)
plt.close()
return
def main(argv):
path = None
name = None
order = None
fig_format = None
outpath = None
options, args = getopt.getopt(
argv[1:],
'p:f:r:m:o:',
['path=', 'file=', 'order=', 'format=', 'outpath='])
for option_key, option_value in options:
if option_key in ('-p', '--path'):
path = option_value
elif option_key in ('-f', '--file'):
name = option_value
elif option_key in ('-r', '--order'):
order = option_value
elif option_key in ('-m', '--format'):
fig_format = option_value
elif option_key in ('-o', '--outpath'):
outpath = option_value
print('*' * 50)
print('PATH :', path)
print('NAME :', name)
print('ORDER :', order)
print('FIG FORMAT :', fig_format)
print('OUT PATH :', outpath)
print('*' * 50)
print('Plotting...')
plot_pose_relationships(path, name, literal_eval(order), (11, 16), fig_format, outpath)
if __name__ == '__main__':
main(sys.argv)
| 6,572
| 49.953488
| 117
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/subroutines/frameshift_coherence.py
|
import numpy as np
from utilities.load_data import appdata
from kfold_accuracy import reorganize_accuracy
from utilities.save_data import results
import sys, getopt
from ast import literal_eval
def generate_coherence(path, name, fps, target_fps, frame_skips, animal_index, t, order):
appdata_ = appdata(path, name)
flders, flder, filenames, data_new, fs_labels = appdata_.load_predictions()
coherence_data = []
labels = np.repeat(fs_labels[animal_index], np.floor(target_fps / fps))
t = int(t * np.floor(target_fps / fps))
for i in frame_skips:
downsampled_labels = labels[0:t:i]
filled_labels = np.repeat(downsampled_labels, i)
coh_vec = []
for j in range(len(np.unique(fs_labels[0][0:t]))):
coh_vec.append(
len(np.argwhere((filled_labels[0:t] - labels[0:t] == 0) & (labels[0:t] == j)))
/ len(np.argwhere(labels[0:t] == j)))
coherence_data.append(np.array(coh_vec))
coherence_data = np.array(coherence_data)
coherence_reordered = reorganize_accuracy(coherence_data, order)
return np.array(coherence_reordered)
def main(argv):
path = None
name = None
fps = None
target_fps = None
frame_skips = None
animal_index = None
t = None
order = None
vname = None
options, args = getopt.getopt(
argv[1:],
'p:n:f:F:s:i:t:o:v:',
['path=', 'file=', 'fps=', 'target_fps=', 'frame_skips=', 'animal_idx=', 'time=', 'order=', 'variable='])
for option_key, option_value in options:
if option_key in ('-p', '--path'):
path = option_value
elif option_key in ('-n', '--file'):
name = option_value
elif option_key in ('-f', '--framerate'):
fps = option_value
elif option_key in ('-F', '--target_fps'):
target_fps = option_value
elif option_key in ('-s', '--frame_skips'):
frame_skips = option_value
elif option_key in ('-i', '--animal_idx'):
animal_index = option_value
elif option_key in ('-t', '--time'):
t = option_value
elif option_key in ('-o', '--order'):
order = option_value
elif option_key in ('-v', '--variable'):
vname = option_value
print('*' * 50)
print('PATH :', path)
print('NAME :', name)
print('FRAMERATE :', fps)
print('TARGET FRAMERATE :', target_fps)
print('FRAME SKIPS :', frame_skips)
print('ANIMAL INDEX :', animal_index)
print('TIME :', t)
print('ORDER :', order)
print('VARIABLE :', vname)
print('*' * 50)
print('Computing...')
coherence_reordered = generate_coherence(path, name, int(fps), int(target_fps), literal_eval(frame_skips),
int(animal_index), int(t), literal_eval(order))
results_ = results(path, name)
results_.save_sav(coherence_reordered, vname)
if __name__ == '__main__':
main(sys.argv)
| 3,008
| 34.821429
| 113
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/subroutines/__init__.py
| 0
| 0
| 0
|
py
|
|
B-SOID
|
B-SOID-master/bsoid_figs/subroutines/fsdiff_hist.py
|
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import numpy as np
from utilities.load_data import load_mat
import sys, getopt
from ast import literal_eval
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import seaborn as sns
def add_subplot_axes(ax, rect, axisbg='w'):
fig = plt.gcf()
box = ax.get_position()
width = box.width
height = box.height
inax_position = ax.transAxes.transform(rect[0:2])
transFigure = fig.transFigure.inverted()
infig_position = transFigure.transform(inax_position)
x = infig_position[0]
y = infig_position[1]
width *= rect[2]
height *= rect[3] # <= Typo was here
subax = fig.add_axes([x,y,width,height], facecolor=axisbg)
x_labelsize = subax.get_xticklabels()[0].get_size()
y_labelsize = subax.get_yticklabels()[0].get_size()
x_labelsize *= rect[2]**0.5
y_labelsize *= rect[3]**0.5
subax.xaxis.set_tick_params(labelsize=x_labelsize)
subax.yaxis.set_tick_params(labelsize=y_labelsize)
return subax
def plot_neural_fs_adv(var, data1, data2, order, c, x_range, bn, tk, fig_size, fig_format, outpath):
fig = figure(num=None, figsize=fig_size, dpi=300, facecolor='w', edgecolor='k')
axes = []
Values1 = []
Values2 =[]
count1 = []
count2 = []
subpos = [0.5, 0.6, 0.3, 0.3]
for i in range(len(data1)):
axes.append(fig.add_subplot(3, 4, order[i]+1))
plt.subplots_adjust(wspace=0.5, hspace=0.5)
values1, base = np.histogram(np.concatenate(data1[i].T), bins=np.arange(0, x_range[1]+0.01, bn),
weights=np.ones(len(data1[i].T)) / len(data1[i].T), density=False)
values2, base = np.histogram(np.concatenate(data2[i].T), bins=np.arange(0, x_range[1]+0.01, bn),
weights=np.ones(len(data2[i].T)) / len(data2[i].T), density=False)
Values1.append(np.append(values1, 0))
Values2.append(np.append(values2, 0))
count1.append(data1[i].shape[1])
count2.append(data2[i].shape[1])
for i, axis in enumerate(axes):
axis.set_xlim(x_range[0], x_range[1])
# axis.plot(base, Values1[i], color=c[0], marker='None', linestyle='-', linewidth=2)
# axis.plot(base, Values2[i], color=c[1], marker='None', linestyle='-', linewidth=2)
plt.hist(np.concatenate(data1[i].T), bins=np.arange(0, x_range[1]+0.01, bn),
weights=np.ones(len(data1[i].T)) / len(data1[i].T), density=False)
plt.hist(np.concatenate(data2[i].T), bins=np.arange(0, x_range[1]+0.01, bn),
weights=np.ones(len(data2[i].T)) / len(data2[i].T), density=False)
subax1 = add_subplot_axes(axis, subpos)
subax1.bar(np.arange(0, 2), [count1[i], count2[i]], color=c)
axis.set_xticks(np.arange(x_range[0], x_range[1]+0.1, (x_range[1]-x_range[0])/tk))
axis.set_yticks(np.arange(0, 0.55, 0.5))
axis.xaxis.set_ticklabels([])
axis.yaxis.set_ticklabels([])
subax1.xaxis.set_ticklabels([])
subax1.set_xticks([])
subax1.yaxis.set_ticklabels([])
subax1.set_yticks([])
axis.spines['top'].set_visible(False)
axis.spines['top'].set_linewidth(3)
axis.spines['right'].set_visible(False)
axis.spines['right'].set_linewidth(3)
axis.spines['bottom'].set_visible(True)
axis.spines['bottom'].set_linewidth(3)
axis.spines['left'].set_visible(True)
axis.spines['left'].set_linewidth(3)
axis.spines['top'].set_color('k')
axis.spines['right'].set_color('k')
axis.spines['bottom'].set_color('k')
axis.spines['left'].set_color('k')
axis.tick_params(length=10, width=3)
plt.savefig(str.join('', (outpath, '{}_fsvsnonfs_duration_counts.'.format(var[0]), fig_format)),
format=fig_format, transparent=True)
def main(argv):
path = None
var = None
c = None
x_range = None
order = None
fig_format = None
outpath = None
options, args = getopt.getopt(
argv[1:],
'p:v:c:r:O:m:o:',
['path=', 'variables=', 'colors=', 'range=', 'order=', 'format=', 'outpath='])
for option_key, option_value in options:
if option_key in ('-p', '--path'):
path = option_value
elif option_key in ('-v', '--variables'):
var = option_value
elif option_key in ('-c', '--colors'):
c = option_value
elif option_key in ('-r', '--range'):
x_range = option_value
elif option_key in ('-O', '--order'):
order = option_value
elif option_key in ('-m', '--format'):
fig_format = option_value
elif option_key in ('-o', '--outpath'):
outpath = option_value
print('*' * 50)
print('PATH :', path)
print('VARIABLES :', var)
print('COLOR :', c)
print('RANGE :', x_range)
print('ORDER :', order)
print('FIG FORMAT :', fig_format)
print('OUT PATH :', outpath)
print('*' * 50)
print('Plotting...')
mat = load_mat(path)
data1 = mat['L5nb_nonfsdurs'][0]
data2 = mat['L5nb_fsdurs'][0]
plot_neural_fs_adv(literal_eval(var), data1, data2, literal_eval(order), literal_eval(c), literal_eval(x_range),
0.01, 3, (16, 12), fig_format, outpath)
if __name__ == '__main__':
main(sys.argv)
| 5,406
| 39.350746
| 116
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/subroutines/immse_heatmap.py
|
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import numpy as np
from utilities.load_data import load_mat
import sys, getopt
from ast import literal_eval
def plot_heatmap(algo, data, c, c_range, discrete_n, delim, cl, fig_size, fig_format, outpath, cb=False):
figure(num=None, figsize=fig_size, dpi=300, facecolor='w', edgecolor='k')
ax = plt.subplot()
cm = sns.diverging_palette(275, 35, center='light', s=100, l=70, n=5)
# cm = sns.light_palette(c, n_colors=discrete_n)
sns.heatmap(data, vmin=c_range[0], vmax=c_range[1], center=1, cmap=cm, cbar=cb, ax=ax)
# sns.heatmap(data, vmin=c_range[0], vmax=c_range[1], cmap=cm, cbar=cb, ax=ax)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
for i in range(delim.shape[1]):
plt.axvline(x=np.cumsum(delim)[i] - 1, linewidth=4, linestyle='--', color=cl)
plt.axhline(y=np.cumsum(delim)[i], linewidth=4, linestyle='--', color=cl)
ax.set_xticks([])
ax.set_yticks([])
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
if cb:
cax = plt.gcf().axes[-1]
cax.tick_params(length=20, width=5, color='k')
plt.savefig(str.join('', (outpath, algo, '_mse_matrix.', fig_format)), format=fig_format, transparent=True)
def main(argv):
path = None
algorithm = None
c = None
c_range = None
discrete_n = None
cline = None
fig_format = None
outpath = None
cb = None
options, args = getopt.getopt(
argv[1:],
'p:a:c:r:n:l:m:o:b:',
['path=', 'algorithm=', 'color=', 'range=', 'discrete_n=', 'cline=', 'format=', 'outpath=', 'colorbar='])
for option_key, option_value in options:
if option_key in ('-p', '--path'):
path = option_value
elif option_key in ('-a', '--algorithm'):
algorithm = option_value
elif option_key in ('-c', '--color'):
c = option_value
elif option_key in ('-r', '--range'):
c_range = option_value
elif option_key in ('-n', '--discrete_n'):
discrete_n = option_value
elif option_key in ('-l', '--cline'):
cline = option_value
elif option_key in ('-m', '--format'):
fig_format = option_value
elif option_key in ('-o', '--outpath'):
outpath = option_value
elif option_key in ('-b', '--colorbar'):
cb = option_value
print('*' * 50)
print('PATH :', path)
print('ALGORITHM :', algorithm)
print('COLOR :', c)
print('RANGE :', c_range)
print('DISCRETE COLORS :', discrete_n)
print('LINE COLOR :', cline)
print('FIG FORMAT :', fig_format)
print('OUT PATH :', outpath)
print('COLORBAR :', cb)
print('*' * 50)
print('Plotting...')
mat = load_mat(path)
if algorithm == 'MotionMapper':
data = mat['mm_mat_norm2_']
delim = mat['mm_mse_counts']
elif algorithm == 'B-SOiD':
data = mat['bsf_mat_norm2_']
delim = mat['bsf_mse_counts']
plot_heatmap(algorithm, data, c, literal_eval(c_range), int(discrete_n), delim, cline,
(16, 14), fig_format, outpath, bool(int(cb)))
if __name__ == '__main__':
main(sys.argv)
| 3,372
| 35.268817
| 113
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/subroutines/kinematics_cdf.py
|
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import numpy as np
from utilities.load_data import load_mat
import sys, getopt
from ast import literal_eval
def plot_cdf(var, data, c, x_range, bn, tk, leg, fig_size, fig_format, outpath):
figure(num=None, figsize=fig_size, dpi=300, facecolor='w', edgecolor='k')
ax = plt.axes()
values1, base = np.histogram(data[0], bins=np.arange(0, x_range[1]+0.5, bn),
weights=np.ones(len(data[0])) / len(data[0]), density=False)
values2, base = np.histogram(data[1], bins=np.arange(0, x_range[1]+0.5, bn),
weights=np.ones(len(data[1])) / len(data[1]), density=False)
values1 = np.append(values1, 0)
values2 = np.append(values2, 0)
ax.plot(base, np.cumsum(values1) / np.cumsum(values1)[-1],
color=c[0], marker='None', linestyle='-',
label="A2A Ctrl.", linewidth=8)
ax.plot(base, np.cumsum(values2) / np.cumsum(values2)[-1],
color=c[1], marker='None', linestyle='-',
label="A2A Casp.", linewidth=8)
ax.set_xlim(x_range[0], x_range[1])
ax.set_ylim(0, 1)
ax.set_axisbelow(True)
ax.grid(linestyle='-', linewidth=5, axis='both')
ax.set_xticks(np.arange(x_range[0], x_range[1]+0.1, (x_range[1]-x_range[0])/tk))
if leg:
lgnd = plt.legend(loc=0, prop={'family': 'Helvetica', 'size': 60})
lgnd.legendHandles[0]._legmarker.set_markersize(8)
lgnd.legendHandles[1]._legmarker.set_markersize(8)
ax.set_yticks(np.arange(0, 1.1, 0.2))
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.spines['top'].set_visible(True)
ax.spines['top'].set_linewidth(5)
ax.spines['right'].set_visible(True)
ax.spines['right'].set_linewidth(5)
ax.spines['bottom'].set_visible(True)
ax.spines['bottom'].set_linewidth(5)
ax.spines['left'].set_visible(True)
ax.spines['left'].set_linewidth(5)
ax.spines['top'].set_color('k')
ax.spines['right'].set_color('k')
ax.spines['bottom'].set_color('k')
ax.spines['left'].set_color('k')
ax.tick_params(length=25, width=5)
plt.savefig(str.join('', (outpath, '{}_kinematics_cdf.'.format(var[0]), fig_format)),
format=fig_format, transparent=True)
def main(argv):
path = None
var = None
c = None
x_range = None
leg = None
fig_format = None
outpath = None
options, args = getopt.getopt(
argv[1:],
'p:v:c:r:l:m:o:',
['path=', 'variables=', 'colors=', 'range=', 'legend=', 'format=', 'outpath='])
for option_key, option_value in options:
if option_key in ('-p', '--path'):
path = option_value
elif option_key in ('-v', '--variables'):
var = option_value
elif option_key in ('-c', '--colors'):
c = option_value
elif option_key in ('-r', '--range'):
x_range = option_value
elif option_key in ('-l', '--legend'):
leg = option_value
elif option_key in ('-m', '--format'):
fig_format = option_value
elif option_key in ('-o', '--outpath'):
outpath = option_value
print('*' * 50)
print('PATH :', path)
print('VARIABLES :', var)
print('COLOR :', c)
print('RANGE :', x_range)
print('FIG FORMAT :', fig_format)
print('OUT PATH :', outpath)
print('*' * 50)
print('Plotting...')
mat = load_mat(path)
data = [mat[literal_eval(var)[0]][0], mat[literal_eval(var)[1]][0]]
plot_cdf(literal_eval(var), data, literal_eval(c), literal_eval(x_range),
0.01, 4, int(leg), (16, 12), fig_format, outpath)
if __name__ == '__main__':
main(sys.argv)
| 3,747
| 36.48
| 93
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/subroutines/pose_relationships_hist2.py
|
import numpy as np
import itertools
from collections import OrderedDict
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
from utilities.load_data import appdata
import sys, getopt
from ast import literal_eval
def plot_pose_relationships(path, name, order, fig_size, fig_format, outpath):
appdata_ = appdata(path, name)
f_10fps_sub, _ = appdata_.load_embeddings()
_, assignments, _, _ = appdata_.load_clusters()
length_nm = []
angle_nm = []
disp_nm = []
for i, j in itertools.combinations(range(0, int(np.sqrt(f_10fps_sub.shape[1]))), 2):
length_nm.append(['distance between points:', i + 1, j + 1])
angle_nm.append(['angular change for points:', i + 1, j + 1])
for i in range(int(np.sqrt(f_10fps_sub.shape[1]))):
disp_nm.append(['displacement for point:', i + 1, i + 1])
keys = np.arange(len(length_nm) + len(angle_nm) + len(disp_nm))
POSE_RELATIONSHIPS = OrderedDict({key: [] for key in keys})
for m, feat_name in enumerate(length_nm):
POSE_RELATIONSHIPS[m] = feat_name
for n, feat_name in enumerate(angle_nm):
POSE_RELATIONSHIPS[m + n + 1] = feat_name
for o, feat_name in enumerate(disp_nm):
POSE_RELATIONSHIPS[m + n + o + 2] = feat_name
R = np.linspace(0, 1, len(np.unique(assignments)))
cm = plt.cm.get_cmap("Spectral")(R)
for f in range(f_10fps_sub.shape[1]):
fig = figure(num=None, figsize=fig_size, dpi=300, facecolor='w', edgecolor='k')
fig.suptitle("{}".format(POSE_RELATIONSHIPS[f]), fontsize=30)
k = 0
for i in order:
k += 1
ax = plt.subplot(len(np.unique(assignments)), 1, k)
if f <= m or f > m + n + 1:
values, base = np.histogram(f_10fps_sub[assignments == i, f] / 14.7553,
bins=np.linspace(0, np.mean(f_10fps_sub[assignments == i, f] / 14.7553) +
3 * np.std(f_10fps_sub[assignments == i, f] / 14.7553),
num=50),
weights=np.ones(len(f_10fps_sub[assignments == i, f])) /
len(f_10fps_sub[assignments == i, f]),
density=False)
values = np.append(values, 0)
ax.plot(base, values,
color=cm[k-1], marker='None', linestyle='-', linewidth=5)
ax.set_xlim(0, np.mean(f_10fps_sub[:, f] / 14.7553) + 3 * np.std(f_10fps_sub[:, f] / 14.7553))
if i < len(np.unique(assignments)) - 2:
ax.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False, labelsize=16)
ax.tick_params(axis='y', which='both', right=False, labelright=False, labelsize=16)
else:
ax.tick_params(labelsize=16)
ax.set_xticks(np.linspace(0, np.mean(f_10fps_sub[:, f] / 14.7553) +
3 * np.std(f_10fps_sub[:, f] / 14.7553), num=5))
fig.text(0.5, 0.07, 'Centimeters', ha='center', fontsize=16)
fig.text(0.03, 0.5, 'Probability', va='center', rotation='vertical', fontsize=16)
else:
values, base = np.histogram(f_10fps_sub[assignments == i, f],
bins=np.linspace(np.mean(f_10fps_sub[assignments == i, f]) -
3 * np.std(f_10fps_sub[assignments == i, f]),
np.mean(f_10fps_sub[assignments == i, f]) +
3 * np.std(f_10fps_sub[assignments == i, f]), num=50),
weights=np.ones(len(f_10fps_sub[assignments == i, f])) /
len(f_10fps_sub[assignments == i, f]),
density=False)
values = np.append(values, 0)
ax.plot(base, values,
color=cm[k-1], marker='None', linestyle='-', linewidth=5)
ax.set_xlim(np.mean(f_10fps_sub[:, f]) - 3 * np.std(f_10fps_sub[:, f]),
np.mean(f_10fps_sub[:, f]) + 3 * np.std(f_10fps_sub[:, f]))
if i < len(np.unique(assignments)) - 2:
ax.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False, labelsize=16)
ax.tick_params(axis='y', which='both', right=False, labelright=False, labelsize=16)
else:
ax.tick_params(labelsize=16)
ax.set_xticks(np.linspace(np.mean(f_10fps_sub[:, f]) -
3 * np.std(f_10fps_sub[:, f]),
np.mean(f_10fps_sub[:, f]) +
3 * np.std(f_10fps_sub[:, f]), num=5))
fig.text(0.5, 0.07, 'Degrees', ha='center', fontsize=16)
fig.text(0.03, 0.5, 'Probability', va='center', rotation='vertical', fontsize=16)
plt.savefig(str.join('', (outpath, '{}_{}_histogram.'.format(name, POSE_RELATIONSHIPS[f]), fig_format)),
format=fig_format, transparent=True)
plt.close()
return
def main(argv):
path = None
name = None
order = None
fig_format = None
outpath = None
options, args = getopt.getopt(
argv[1:],
'p:f:r:m:o:',
['path=', 'file=', 'order=', 'format=', 'outpath='])
for option_key, option_value in options:
if option_key in ('-p', '--path'):
path = option_value
elif option_key in ('-f', '--file'):
name = option_value
elif option_key in ('-r', '--order'):
order = option_value
elif option_key in ('-m', '--format'):
fig_format = option_value
elif option_key in ('-o', '--outpath'):
outpath = option_value
print('*' * 50)
print('PATH :', path)
print('NAME :', name)
print('ORDER :', order)
print('FIG FORMAT :', fig_format)
print('OUT PATH :', outpath)
print('*' * 50)
print('Plotting...')
plot_pose_relationships(path, name, literal_eval(order), (11, 16), fig_format, outpath)
if __name__ == '__main__':
main(sys.argv)
| 6,572
| 49.953488
| 117
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/subroutines/utilities/load_json.py
|
import pandas as pd
import numpy as np
import glob
from tqdm import tqdm
path = '/Volumes/Elements/Drive/Data/Nahom/output/exercise2/'
POSE_BODY_25_BODY_PARTS = {0: "Nose", 1: "Neck", 2: "RShoulder", 3: "RElbow", 4: "RWrist", 5: "LShoulder", 6: "LElbow",
7: "LWrist", 8: "MidHip", 9: "RHip", 10: "RKnee", 11: "RAnkle", 12: "LHip", 13: "LKnee",
14: "LAnkle", 15: "REye", 16: "LEye", 17: "REar", 18: "LEar", 19: "LBigToe", 20: "LSmallToe",
21: "LHeel", 22: "RBigToe", 23: "RSmallToe", 24: "RHeel", 25: "Background"}
filenames = glob.glob(path + '/*.json')
xyl_array = np.empty((len(filenames), 75))
empty_count = 0
for j, f in enumerate(tqdm(filenames)):
df = pd.read_json(f)
data_arr = df['people']
try:
data_length = len(data_arr[0]['pose_keypoints_2d'])
x_val = data_arr[0]['pose_keypoints_2d'][0:data_length:3]
y_val = data_arr[0]['pose_keypoints_2d'][1:data_length:3]
l_val = data_arr[0]['pose_keypoints_2d'][2:data_length:3]
xyl = []
for i in range(int(data_length / 3)):
xyl.extend([x_val[i], y_val[i], l_val[i]])
xyl_array[j, :] = np.array(xyl).reshape(1, len(xyl))
except IndexError:
xyl_array[j, :] = xyl_array[j - 1, :]
empty_count += 1
empty_ratio = empty_count / j
print('There are {}% empty jsons'.format(empty_ratio * 100))
a = []
for i in range(len(POSE_BODY_25_BODY_PARTS)-1):
a.extend((('Openpose', POSE_BODY_25_BODY_PARTS[i], 'x'), ('OpenPose', POSE_BODY_25_BODY_PARTS[i], 'y'),
('Openpose', POSE_BODY_25_BODY_PARTS[i], 'likelihood')))
micolumns = pd.MultiIndex.from_tuples(a, names=['Algorithm', 'Body parts', 'Frame number'])
df = pd.DataFrame(xyl_array, columns=micolumns)
df.to_csv(str.join('', (path, 'exercise2.csv')), index=True, chunksize=10000, encoding='utf-8')
| 1,888
| 48.710526
| 120
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/subroutines/utilities/processing.py
|
import numpy as np
import pandas as pd
import re
def convert_int(s):
""" Converts digit string to integer
"""
if s.isdigit():
return int(s)
else:
return s
def alphanum_key(s):
""" Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
"""
return [convert_int(c) for c in re.split('([0-9]+)', s)]
def sort_nicely(l):
""" Sort the given list in the way that humans expect.
"""
l.sort(key=alphanum_key)
class data_processing:
def __init__(self, data):
self.data = data
def boxcar_center(self, n):
a1 = pd.Series(self.data)
moving_avg = np.array(a1.rolling(window=n, min_periods=1, center=True).mean())
return moving_avg
| 755
| 20
| 86
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/subroutines/utilities/detect_peaks.py
|
"""Detect peaks in data based on their amplitude and other features."""
from __future__ import division, print_function
import numpy as np
__author__ = "Marcos Duarte, https://github.com/demotu/BMC"
__version__ = "1.0.4"
__license__ = "MIT"
def detect_peaks(x, mph=None, mpd=1, threshold=0, edge='rising',
kpsh=False, valley=False, show=False, ax=None):
"""Detect peaks in data based on their amplitude and other features.
Parameters
----------
x : 1D array_like
data.
mph : {None, number}, optional (default = None)
detect peaks that are greater than minimum peak height.
mpd : positive integer, optional (default = 1)
detect peaks that are at least separated by minimum peak distance (in
number of data).
threshold : positive number, optional (default = 0)
detect peaks (valleys) that are greater (smaller) than `threshold`
in relation to their immediate neighbors.
edge : {None, 'rising', 'falling', 'both'}, optional (default = 'rising')
for a flat peak, keep only the rising edge ('rising'), only the
falling edge ('falling'), both edges ('both'), or don't detect a
flat peak (None).
kpsh : bool, optional (default = False)
keep peaks with same height even if they are closer than `mpd`.
valley : bool, optional (default = False)
if True (1), detect valleys (local minima) instead of peaks.
show : bool, optional (default = False)
if True (1), plot data in matplotlib figure.
ax : a matplotlib.axes.Axes instance, optional (default = None).
Returns
-------
ind : 1D array_like
indeces of the peaks in `x`.
Notes
-----
The detection of valleys instead of peaks is performed internally by simply
negating the data: `ind_valleys = detect_peaks(-x)`
The function can handle NaN's
See this IPython Notebook [1]_.
References
----------
.. [1] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb
Examples
--------
>>> from detect_peaks import detect_peaks
>>> x = np.random.randn(100)
>>> x[60:81] = np.nan
>>> # detect all peaks and plot data
>>> ind = detect_peaks(x, show=True)
>>> print(ind)
>>> x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5
>>> # set minimum peak height = 0 and minimum peak distance = 20
>>> detect_peaks(x, mph=0, mpd=20, show=True)
>>> x = [0, 1, 0, 2, 0, 3, 0, 2, 0, 1, 0]
>>> # set minimum peak distance = 2
>>> detect_peaks(x, mpd=2, show=True)
>>> x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5
>>> # detection of valleys instead of peaks
>>> detect_peaks(x, mph=0, mpd=20, valley=True, show=True)
>>> x = [0, 1, 1, 0, 1, 1, 0]
>>> # detect both edges
>>> detect_peaks(x, edge='both', show=True)
>>> x = [-2, 1, -2, 2, 1, 1, 3, 0]
>>> # set threshold = 2
>>> detect_peaks(x, threshold = 2, show=True)
"""
x = np.atleast_1d(x).astype('float64')
if x.size < 3:
return np.array([], dtype=int)
if valley:
x = -x
# find indexes of all peaks
dx = x[1:] - x[:-1]
# handle NaN's
indnan = np.where(np.isnan(x))[0]
if indnan.size:
x[indnan] = np.inf
dx[np.where(np.isnan(dx))[0]] = np.inf
ine, ire, ife = np.array([[], [], []], dtype=int)
if not edge:
ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0]
else:
if edge.lower() in ['rising', 'both']:
ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0]
if edge.lower() in ['falling', 'both']:
ife = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0))[0]
ind = np.unique(np.hstack((ine, ire, ife)))
# handle NaN's
if ind.size and indnan.size:
# NaN's and values close to NaN's cannot be peaks
ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan-1, indnan+1))), invert=True)]
# first and last values of x cannot be peaks
if ind.size and ind[0] == 0:
ind = ind[1:]
if ind.size and ind[-1] == x.size-1:
ind = ind[:-1]
# remove peaks < minimum peak height
if ind.size and mph is not None:
ind = ind[x[ind] >= mph]
# remove peaks - neighbors < threshold
if ind.size and threshold > 0:
dx = np.min(np.vstack([x[ind]-x[ind-1], x[ind]-x[ind+1]]), axis=0)
ind = np.delete(ind, np.where(dx < threshold)[0])
# detect small peaks closer than minimum peak distance
if ind.size and mpd > 1:
ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height
idel = np.zeros(ind.size, dtype=bool)
for i in range(ind.size):
if not idel[i]:
# keep peaks with the same height if kpsh is True
idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) \
& (x[ind[i]] > x[ind] if kpsh else True)
idel[i] = 0 # Keep current peak
# remove the small peaks and sort back the indexes by their occurrence
ind = np.sort(ind[~idel])
if show:
if indnan.size:
x[indnan] = np.nan
if valley:
x = -x
_plot(x, mph, mpd, threshold, edge, valley, ax, ind)
return ind
# def _plot(x, mph, mpd, threshold, edge, valley, ax, ind):
# """Plot results of the detect_peaks function, see its help."""
# try:
# import matplotlib.pyplot as plt
# except ImportError:
# print('matplotlib is not available.')
# else:
# if ax is None:
# _, ax = plt.subplots(1, 1, figsize=(8, 4))
#
# ax.plot(x, 'k', lw=1)
# if ind.size:
# label = 'valley' if valley else 'peak'
# label = label + 's' if ind.size > 1 else label
# ax.plot(ind, x[ind], '+', mfc=None, mec='r', mew=2, ms=8,
# label='%d %s' % (ind.size, label))
# ax.legend(loc='best', framealpha=.5, numpoints=1)
# ax.set_xlim(-.02*x.size, x.size*1.02-1)
# ymin, ymax = x[np.isfinite(x)].min(), x[np.isfinite(x)].max()
# yrange = ymax - ymin if ymax > ymin else 1
# ax.set_ylim(ymin - 0.1*yrange, ymax + 0.1*yrange)
# ax.set_xlabel('Data #', fontsize=14)
# ax.set_ylabel('Amplitude', fontsize=14)
# mode = 'Valley detection' if valley else 'Peak detection'
# ax.set_title("%s (mph=%s, mpd=%d, threshold=%s, edge='%s')"
# % (mode, str(mph), mpd, str(threshold), edge))
# # plt.grid()
# # plt.show()
def _plot(x, ax, ind):
"""Plot results of the detect_peaks function, see its help."""
try:
import matplotlib.pyplot as plt
import matplotlib as mpl
except ImportError:
print('matplotlib is not available.')
else:
if ax is None:
_, ax = plt.subplots(1, 1, figsize=(16, 8.5))
ax.plot(x, 'k', lw=1)
hfont = {'fontname': 'Helvetica'}
if ind.size:
label = 'peak'
label = label + 's' if ind.size > 1 else label
ax.plot(ind, x[ind], '+', mfc=None, mec='r', mew=4, ms=16,
label='%d %s' % (ind.size, label))
ax.legend(loc='best', framealpha=.5, numpoints=1, prop={'family': 'Helvetica', 'size': 24})
ax.set_xlim(-.02*x.size, x.size*1.02-1)
ymin, ymax = x[np.isfinite(x)].min(), x[np.isfinite(x)].max()
yrange = ymax - ymin if ymax > ymin else 1
ax.set_ylim(ymin - 0.1*yrange, ymax + 0.1*yrange)
ax.set_xlabel('Frame #', fontsize=24, **hfont)
ax.set_ylabel('$\Delta$ Pose', fontsize=24, **hfont)
# ax.xaxis.set_ticklabels(np.linspace(0, len(x), 5), )
ax.spines['top'].set_visible(False)
ax.spines['top'].set_linewidth(4)
ax.spines['right'].set_visible(False)
ax.spines['right'].set_linewidth(4)
ax.spines['bottom'].set_visible(True)
ax.spines['bottom'].set_linewidth(4)
ax.spines['left'].set_visible(True)
ax.spines['left'].set_linewidth(4)
ax.tick_params(length=20, width=4, labelsize=20)
ticks_font = mpl.font_manager.FontProperties(family='Helvetica', size=24)
for l in ax.get_xticklabels():
l.set_fontproperties(ticks_font)
# mode = 'Valley detection' if valley else 'Peak detection'
# ax.set_title("%s (mph=%s, mpd=%d, threshold=%s, edge='%s')"
# % (mode, str(mph), mpd, str(threshold), edge))
# plt.grid()
# plt.show()
| 8,655
| 37.816143
| 103
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/subroutines/utilities/statistics.py
|
import numpy as np
import pandas as pd
def transition_matrix(labels, n):
"""
:param labels: 1D array, predicted labels
:return df_tm: object, transition matrix data frame
"""
# n = 1 + max(labels)
tm = [[0] * n for _ in range(n)]
for (i, j) in zip(labels, labels[1:]):
tm[i][j] += 1
B = np.array(tm)
df_tm = pd.DataFrame(tm)
B = np.array(tm)
B_norm = B / B.sum(axis=1)
return B, df_tm, B_norm
def rle(inarray):
""" run length encoding. Partial credit to R rle function.
Multi datatype arrays catered for including non Numpy
returns: tuple (runlengths, startpositions, values) """
ia = np.asarray(inarray) # force numpy
n = len(ia)
if n == 0:
return (None, None, None)
else:
y = np.array(ia[1:] != ia[:-1]) # pairwise unequal (string safe)
i = np.append(np.where(y), n - 1) # must include last element posi
z = np.diff(np.append(-1, i)) # run lengths
p = np.cumsum(np.append(0, z))[:-1] # positions
return z, p, ia[i]
| 1,066
| 29.485714
| 75
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/subroutines/utilities/load_data.py
|
import scipy.io
import os
import joblib
def load_mat(file):
return scipy.io.loadmat(file)
def load_sav(path, name, fname):
with open(os.path.join(path, str.join('', (name, '_', fname, '.sav'))), 'rb') as fr:
data = joblib.load(fr)
return [i for i in data]
class appdata:
def __init__(self, path, name):
self.path = path
self.name = name
def load_data(self):
with open(os.path.join(self.path, str.join('', (self.name, '_data.sav'))), 'rb') as fr:
BASE_PATH, TRAIN_FOLDERS, FPS, BODYPARTS, filenames, \
rawdata_li, training_data, perc_rect_li = joblib.load(fr)
return BASE_PATH, TRAIN_FOLDERS, FPS, BODYPARTS, filenames, rawdata_li, training_data, perc_rect_li
def load_feats(self):
with open(os.path.join(self.path, str.join('', (self.name, '_feats.sav'))), 'rb') as fr:
f_10fps, f_10fps_sc = joblib.load(fr)
return f_10fps, f_10fps_sc
def load_embeddings(self):
with open(os.path.join(self.path, str.join('', (self.name, '_embeddings.sav'))), 'rb') as fr:
f_10fps_sub, train_embeddings = joblib.load(fr)
return f_10fps_sub, train_embeddings
def load_clusters(self):
with open(os.path.join(self.path, str.join('', (self.name, '_clusters.sav'))), 'rb') as fr:
min_cluster_range, assignments, soft_clusters, soft_assignments = joblib.load(fr)
return min_cluster_range, assignments, soft_clusters, soft_assignments
def load_classifier(self):
with open(os.path.join(self.path, str.join('', (self.name, '_randomforest.sav'))), 'rb') as fr:
feats_test, labels_test, classifier, clf, scores, nn_assignments = joblib.load(fr)
return feats_test, labels_test, classifier, clf, scores, nn_assignments
def load_predictions(self):
with open(os.path.join(self.path, str.join('', (self.name, '_predictions.sav'))), 'rb') as fr:
flders, flder, filenames, data_new, fs_labels = joblib.load(fr)
return flders, flder, filenames, data_new, fs_labels
| 2,087
| 38.396226
| 107
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/subroutines/utilities/__init__.py
| 0
| 0
| 0
|
py
|
|
B-SOID
|
B-SOID-master/bsoid_figs/subroutines/utilities/save_data.py
|
import scipy.io
import os
import joblib
class results:
def __init__(self, path, name):
self.path = path
self.name = name
def save_sav(self, datalist, fname):
with open(os.path.join(self.path, str.join('', (self.name, '_', fname, '.sav'))), 'wb') as f:
joblib.dump(datalist, f)
| 325
| 20.733333
| 101
|
py
|
B-SOID
|
B-SOID-master/bsoid_figs/subroutines/utilities/discrete_cmap.py
|
import numpy as np
import matplotlib.pyplot as plt
def discrete_cmap(N, base_cmap=None):
"""Create an N-bin discrete colormap from the specified input map"""
# Note that if base_cmap is a string or None, you can simply do
# return plt.cm.get_cmap(base_cmap, N)
# The following works for string, None, or a colormap instance:
base = plt.cm.get_cmap(base_cmap)
color_list = base(np.linspace(0, 1, N))
cmap_name = base.name + str(N)
return base.from_list(cmap_name, color_list, N)
| 516
| 33.466667
| 72
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/extract_features.py
|
import itertools
import math
import os
import joblib
import numpy as np
import randfacts
import streamlit as st
import umap
from psutil import virtual_memory
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from streamlit import caching
from bsoid_app.bsoid_utilities.likelihoodprocessing import boxcar_center
from bsoid_app.bsoid_utilities.load_workspace import load_feats, load_embeddings
from bsoid_app.config import *
class extract:
def __init__(self, working_dir, prefix, processed_input_data, framerate):
st.subheader('EXTRACT AND EMBED FEATURES')
self.working_dir = working_dir
self.prefix = prefix
self.processed_input_data = processed_input_data
self.framerate = framerate
self.train_size = []
self.features = []
self.scaled_features = []
self.sampled_features = []
self.sampled_embeddings = []
def subsample(self):
data_size = 0
for n in range(len(self.processed_input_data)):
data_size += len(range(round(self.framerate / 10), self.processed_input_data[n].shape[0],
round(self.framerate / 10)))
fraction = st.number_input('Enter training input __fraction__ (do not change this value if you wish '
'to generate the side-by-side video seen on our GitHub page):',
min_value=0.1, max_value=1.0, value=1.0)
if fraction == 1.0:
self.train_size = data_size
else:
self.train_size = int(data_size * fraction)
st.markdown('You have opted to train on a cumulative of **{} minutes** total. '
'If this does not sound right, the framerate might be wrong.'.format(self.train_size / 600))
def compute(self):
if st.button("__Extract Features__"):
funfacts = randfacts.getFact()
st.info(str.join('', ('Extracting... Here is a random fact: ', funfacts)))
try:
[self.features, self.scaled_features] = load_feats(self.working_dir, self.prefix)
except:
window = np.int(np.round(0.05 / (1 / self.framerate)) * 2 - 1)
f = []
my_bar = st.progress(0)
for n in range(len(self.processed_input_data)):
data_n_len = len(self.processed_input_data[n])
dxy_list = []
disp_list = []
for r in range(data_n_len):
if r < data_n_len - 1:
disp = []
for c in range(0, self.processed_input_data[n].shape[1], 2):
disp.append(
np.linalg.norm(self.processed_input_data[n][r + 1, c:c + 2] -
self.processed_input_data[n][r, c:c + 2]))
disp_list.append(disp)
dxy = []
for i, j in itertools.combinations(range(0, self.processed_input_data[n].shape[1], 2), 2):
dxy.append(self.processed_input_data[n][r, i:i + 2] -
self.processed_input_data[n][r, j:j + 2])
dxy_list.append(dxy)
disp_r = np.array(disp_list)
dxy_r = np.array(dxy_list)
disp_boxcar = []
dxy_eu = np.zeros([data_n_len, dxy_r.shape[1]])
ang = np.zeros([data_n_len - 1, dxy_r.shape[1]])
dxy_boxcar = []
ang_boxcar = []
for l in range(disp_r.shape[1]):
disp_boxcar.append(boxcar_center(disp_r[:, l], window))
for k in range(dxy_r.shape[1]):
for kk in range(data_n_len):
dxy_eu[kk, k] = np.linalg.norm(dxy_r[kk, k, :])
if kk < data_n_len - 1:
b_3d = np.hstack([dxy_r[kk + 1, k, :], 0])
a_3d = np.hstack([dxy_r[kk, k, :], 0])
c = np.cross(b_3d, a_3d)
ang[kk, k] = np.dot(np.dot(np.sign(c[2]), 180) / np.pi,
math.atan2(np.linalg.norm(c),
np.dot(dxy_r[kk, k, :], dxy_r[kk + 1, k, :])))
dxy_boxcar.append(boxcar_center(dxy_eu[:, k], window))
ang_boxcar.append(boxcar_center(ang[:, k], window))
disp_feat = np.array(disp_boxcar)
dxy_feat = np.array(dxy_boxcar)
ang_feat = np.array(ang_boxcar)
f.append(np.vstack((dxy_feat[:, 1:], ang_feat, disp_feat)))
my_bar.progress(round((n + 1) / len(self.processed_input_data) * 100))
for m in range(0, len(f)):
f_integrated = np.zeros(len(self.processed_input_data[m]))
for k in range(round(self.framerate / 10), len(f[m][0]), round(self.framerate / 10)):
if k > round(self.framerate / 10):
f_integrated = np.concatenate(
(f_integrated.reshape(f_integrated.shape[0], f_integrated.shape[1]),
np.hstack((np.mean((f[m][0:dxy_feat.shape[0],
range(k - round(self.framerate / 10), k)]), axis=1),
np.sum((f[m][dxy_feat.shape[0]:f[m].shape[0],
range(k - round(self.framerate / 10), k)]), axis=1)
)).reshape(len(f[0]), 1)), axis=1
)
else:
f_integrated = np.hstack(
(np.mean((f[m][0:dxy_feat.shape[0], range(k - round(self.framerate / 10), k)]), axis=1),
np.sum((f[m][dxy_feat.shape[0]:f[m].shape[0],
range(k - round(self.framerate / 10), k)]), axis=1))).reshape(len(f[0]), 1)
if m > 0:
self.features = np.concatenate((self.features, f_integrated), axis=1)
scaler = StandardScaler()
scaler.fit(f_integrated.T)
scaled_f_integrated = scaler.transform(f_integrated.T).T
self.scaled_features = np.concatenate((self.scaled_features, scaled_f_integrated), axis=1)
else:
self.features = f_integrated
scaler = StandardScaler()
scaler.fit(f_integrated.T)
scaled_f_integrated = scaler.transform(f_integrated.T).T
self.scaled_features = scaled_f_integrated
self.features = np.array(self.features)
self.scaled_features = np.array(self.scaled_features)
with open(os.path.join(self.working_dir, str.join('', (self.prefix, '_feats.sav'))), 'wb') as f:
joblib.dump([self.features, self.scaled_features], f)
st.info('Done extracting features from a total of **{}** training data files. '
'Now reducing dimensions...'.format(len(self.processed_input_data)))
self.learn_embeddings()
def learn_embeddings(self):
input_feats = self.scaled_features.T
pca = PCA()
pca.fit(self.scaled_features.T)
num_dimensions = np.argwhere(np.cumsum(pca.explained_variance_ratio_) >= 0.7)[0][0] + 1
if self.train_size > input_feats.shape[0]:
self.train_size = input_feats.shape[0]
np.random.seed(0)
sampled_input_feats = input_feats[np.random.choice(input_feats.shape[0], self.train_size, replace=False)]
features_transposed = self.features.T
np.random.seed(0)
self.sampled_features = features_transposed[np.random.choice(features_transposed.shape[0],
self.train_size, replace=False)]
st.info('Randomly sampled **{} minutes**... '.format(self.train_size / 600))
mem = virtual_memory()
available_mb = mem.available >> 20
st.write('You have {} MB RAM 🐏 available'.format(available_mb))
if available_mb > (sampled_input_feats.shape[0] * sampled_input_feats.shape[1] * 32 * 60) / 1024 ** 2 + 64:
st.write('RAM 🐏 available is sufficient')
try:
learned_embeddings = umap.UMAP(n_neighbors=60, n_components=num_dimensions,
**UMAP_PARAMS).fit(sampled_input_feats)
except:
st.error('Failed on feature embedding. Try again by unchecking sidebar and rerunning extract features.')
else:
st.info(
'Detecting that you are running low on available memory for this computation, '
'setting low_memory so will take longer.')
try:
learned_embeddings = umap.UMAP(n_neighbors=60, n_components=num_dimensions, low_memory=True,
**UMAP_PARAMS).fit(sampled_input_feats)
except:
st.error('Failed on feature embedding. Try again by unchecking sidebar and rerunning extract features.')
self.sampled_embeddings = learned_embeddings.embedding_
st.info(
'Done non-linear embedding of {} instances from **{}** D into **{}** D.'.format(
*self.sampled_features.shape, self.sampled_embeddings.shape[1]))
with open(os.path.join(self.working_dir, str.join('', (self.prefix, '_embeddings.sav'))), 'wb') as f:
joblib.dump([self.sampled_features, self.sampled_embeddings], f)
st.balloons()
def main(self):
try:
[self.sampled_features, self.sampled_embeddings] = load_embeddings(self.working_dir, self.prefix)
st.markdown('**_CHECK POINT_**: Done non-linear transformation of **{}** instances '
'from **{}** D into **{}** D. Move on to __Identify and '
'tweak number of clusters__'.format(*self.sampled_features.shape, self.sampled_embeddings.shape[1]))
if st.checkbox('Redo?', False, key='er'):
caching.clear_cache()
self.subsample()
self.compute()
except FileNotFoundError:
self.subsample()
self.compute()
| 10,846
| 55.202073
| 124
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/bsoid_analysis.py
|
import streamlit as st
from analysis_subroutines import video_analysis, machine_performance, trajectory_analysis, \
kinematics_analysis, directed_graph_analysis
from analysis_subroutines.analysis_utilities.cache_workspace import load_data
from analysis_subroutines.analysis_utilities.visuals import *
from bsoid_utilities.load_css import local_css
def streamlit_run(pyfile):
os.system("streamlit run {}.py".format(pyfile))
st.set_page_config(page_title='B-SOiD anaylsis', page_icon="📊",
layout='wide', initial_sidebar_state='auto')
local_css("./bsoid_app/bsoid_utilities/style.css")
title = "<div> <span class='bold'><span class='h1'>B-SOID</span></span> " \
" <span class='h2'>anaylsis 📊</span></span> </div>"
st.markdown(title, unsafe_allow_html=True)
st.markdown('Step 1: Pick the directory and workspace to analyze.')
st.markdown('Step 2: Once input, select the type of results to analyze using the sidebar modules.')
st.text('')
working_dir = st.text_input('Enter B-SOiD __output directory__ from using the B-SOiD --version 2.0 App')
try:
os.listdir(working_dir)
st.markdown(
'You have selected **{}** as your B-SOiD App results run root directory.'.format(working_dir))
except FileNotFoundError:
st.error('No such directory')
files = [i for i in os.listdir(working_dir) if os.path.isfile(os.path.join(working_dir, i)) and \
'_data.sav' in i and not '_accuracy' in i and not '_coherence' in i]
bsoid_variables = [files[i].partition('_data.sav')[0] for i in range(len(files))]
bsoid_prefix = []
for var in bsoid_variables:
if var not in bsoid_prefix:
bsoid_prefix.append(var)
prefix = st.selectbox('Select prior B-SOiD prefix', bsoid_prefix)
try:
st.markdown('You have selected **{}_XXX.sav** for prior prefix.'.format(prefix))
except TypeError:
st.error('Please input a prior prefix to load workspace.')
[framerate, features, sampled_features, sampled_embeddings, assignments, soft_assignments,
folders, folder, filenames, new_data, new_predictions] = load_data(working_dir, prefix)
if st.sidebar.checkbox('Synchronized B-SOiD video (paper Supp. Video 1)', False, key='v'):
video_generator = video_analysis.bsoid_video(working_dir, prefix, features, sampled_features,
sampled_embeddings, soft_assignments, framerate,
filenames, new_data)
video_generator.main()
if st.sidebar.checkbox('K-fold accuracy boxplot (paper fig2c)', False, key='a'):
performance_eval = machine_performance.performance(working_dir, prefix, soft_assignments)
performance_eval.main()
if st.sidebar.checkbox('Limb trajectories (paper fig2d/g)', False, key='t'):
st.write(filenames[0].partition('.')[-1])
trajectory_mapper = trajectory_analysis.trajectory(working_dir, prefix, soft_assignments, framerate,
filenames, new_data, new_predictions)
trajectory_mapper.main()
if st.sidebar.checkbox('(beta) Kinematics (paper fig6b/d)', False, key='k'):
kinematics_analyzer = kinematics_analysis.kinematics(working_dir, prefix, framerate, soft_assignments, filenames)
kinematics_analyzer.main()
if st.sidebar.checkbox('(alpha) Behavioral directed graph', False, key='d'):
network = directed_graph_analysis.directed_graph(working_dir, prefix, soft_assignments,
folders, folder, new_predictions)
network.main()
if st.sidebar.checkbox('Return computation to main app (please close current browser when new browser pops up)', False):
streamlit_run('./bsoid_app')
| 3,674
| 53.044118
| 120
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/clustering.py
|
import os
import hdbscan
import joblib
import numpy as np
import streamlit as st
import randfacts
from bsoid_app.config import *
from bsoid_app.bsoid_utilities import visuals
from bsoid_app.bsoid_utilities.load_workspace import load_clusters
from streamlit import caching
class cluster:
def __init__(self, working_dir, prefix, sampled_embeddings):
st.subheader('IDENTIFY AND TWEAK NUMBER OF CLUSTERS.')
self.working_dir = working_dir
self.prefix = prefix
self.sampled_embeddings = sampled_embeddings
self.cluster_range = []
self.min_cluster_size = []
self.assignments = []
self.assign_prob = []
self.soft_assignments = []
def hierarchy(self):
if st.button("__Identify Clusters__"):
funfacts = randfacts.getFact()
st.info(str.join('', ('Identifying... Here is a random fact: ', funfacts)))
max_num_clusters = -np.infty
num_clusters = []
self.min_cluster_size = np.linspace(self.cluster_range[0], self.cluster_range[1], 25)
for min_c in self.min_cluster_size:
learned_hierarchy = hdbscan.HDBSCAN(
prediction_data=True, min_cluster_size=int(round(min_c * 0.01 * self.sampled_embeddings.shape[0])),
**HDBSCAN_PARAMS).fit(self.sampled_embeddings)
num_clusters.append(len(np.unique(learned_hierarchy.labels_)))
if num_clusters[-1] > max_num_clusters:
max_num_clusters = num_clusters[-1]
retained_hierarchy = learned_hierarchy
self.assignments = retained_hierarchy.labels_
self.assign_prob = hdbscan.all_points_membership_vectors(retained_hierarchy)
self.soft_assignments = np.argmax(self.assign_prob, axis=1)
st.info('Done assigning labels for **{}** instances ({} minutes) '
'in **{}** D space'.format(self.assignments.shape,
round(self.assignments.shape[0] / 600),
self.sampled_embeddings.shape[1]))
st.balloons()
def show_classes(self):
st.write('Showing {}% data that were confidently assigned.'
''.format(round(self.assignments[self.assignments >= 0].shape[0] /
self.assignments.shape[0] * 100)))
fig1, plt1 = visuals.plot_classes(self.sampled_embeddings[self.assignments >= 0],
self.assignments[self.assignments >= 0])
plt1.suptitle('HDBSCAN assignment')
col1, col2 = st.beta_columns([2, 2])
col1.pyplot(fig1)
def slider(self, min_=0.5, max_=1.0):
st.markdown('The following slider allows you to tweak number of groups based on minimum size requirements.')
st.text('')
self.cluster_range = st.slider('Select range of __minimum cluster size__ in %', 0.01, 5.0, (min_, max_))
st.markdown('Your minimum cluster size ranges between **{}%** and **{}%**, '
'which is equivalent to roughly {} seconds for the '
'smallest cluster.'.format(self.cluster_range[0], self.cluster_range[1],
round(self.cluster_range[0] * 0.001 * self.sampled_embeddings.shape[0])))
def save(self):
save_ = st.radio('Autosave the clustering as you go? This will overwrite the previous saved clustering.',
options=['Yes', 'No'], index=0)
if save_ == 'Yes':
with open(os.path.join(self.working_dir, str.join('', (self.prefix, '_clusters.sav'))), 'wb') as f:
joblib.dump([self.min_cluster_size, self.assignments, self.assign_prob, self.soft_assignments], f)
st.text('')
st.text('')
def main(self):
try:
caching.clear_cache()
[self.min_cluster_size, self.assignments, self.assign_prob, self.soft_assignments] = \
load_clusters(self.working_dir, self.prefix)
st.markdown(
'**_CHECK POINT_**: Done assigning labels for **{}** instances in **{}** D space. Move on to __create '
'a model__.'.format(self.assignments.shape, self.sampled_embeddings.shape[1]))
st.markdown('Your last saved run range was __{}%__ to __{}%__'.format(self.min_cluster_size[0],
self.min_cluster_size[-1]))
if st.checkbox('Redo?', False, key='cr'):
caching.clear_cache()
self.slider(min_=float(self.min_cluster_size[0]), max_=float(self.min_cluster_size[-1]))
self.hierarchy()
self.save()
if st.checkbox("Show first 3D UMAP enhanced clustering plot?", True, key='cs'):
self.show_classes()
except (AttributeError, FileNotFoundError) as e:
self.slider()
self.hierarchy()
self.save()
if st.checkbox("Show first 3D UMAP enhanced clustering plot?", True, key='cs'):
self.show_classes()
| 5,206
| 47.663551
| 120
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/data_preprocess.py
|
import os
from datetime import date
import h5py
import joblib
import randfacts
import streamlit as st
from bsoid_app.bsoid_utilities import visuals
from bsoid_app.bsoid_utilities.likelihoodprocessing import *
from bsoid_app.bsoid_utilities.load_json import *
class preprocess:
def __init__(self):
st.subheader('LOAD DATA and PREPROCESS')
self.pose_chosen = []
self.input_filenames = []
self.raw_input_data = []
self.processed_input_data = []
self.sub_threshold = []
self.software = st.selectbox('What type of __pose-estimation software__?',
('DeepLabCut', 'SLEAP', 'OpenPose'))
if self.software == 'DeepLabCut':
self.ftype = st.selectbox('What type of __output file__?',
('csv', 'h5'))
if self.software == 'SLEAP':
self.ftype = 'h5'
st.write('Currently only supporting {} type files'.format(self.ftype))
if self.software == 'OpenPose':
self.ftype = 'json'
st.write('Currently only supporting {} type files'.format(self.ftype))
st.write('Select the pose estimate root directory containing '
'1 or more xxx.{} containing sub-directories.'.format(self.ftype))
self.root_path = st.text_input('Enter a __root directory__, e.g. __/Users/projectX__', os.getcwd())
try:
os.listdir(self.root_path)
st.markdown(
'You have selected **{}** as your _root directory_'.format(self.root_path))
except FileNotFoundError:
st.error('No such directory')
st.write(
'Select the pose estimate containing sub-directories, e.g. /control, under root directory. '
'Currently supporting _2D_ and _single_ animal.')
self.data_directories = []
no_dir = int(st.number_input('How many __data containing directories__ '
'under {} for training?'.format(self.root_path), value=3))
st.markdown('Your will be training on *{}* data file containing sub-directories.'.format(no_dir))
for i in range(no_dir):
d = str.join('', ('/', st.selectbox('Enter # {} __data file containing directory__ under {}, '
'e.g. __/control__ for /Users/projectX/control/xxx.{}'
''.format(i + 1, self.root_path, self.ftype),
(os.listdir(self.root_path)), index=0)))
try:
os.listdir(str.join('', (self.root_path, d)))
except FileNotFoundError:
st.error('No such directory')
if not d in self.data_directories:
self.data_directories.append(d)
st.markdown('You have selected **{}** as your _sub-directory(ies)_.'.format(self.data_directories))
st.write('Average video frame-rate for xxx.{} pose estimate files.'.format(self.ftype))
self.framerate = int(st.number_input('What is your frame-rate?', value=60))
st.markdown('You have selected **{} frames per second**.'.format(self.framerate))
st.write('Select a working directory for B-SOiD')
self.working_dir = st.text_input('Enter a __working directory__, e.g. __/Users/projectX/output__',
str.join('', (self.root_path, '/output')))
try:
os.listdir(self.working_dir)
st.markdown('You have selected **{}** for B-SOiD working directory.'.format(self.working_dir))
except FileNotFoundError:
st.error('Cannot access working directory, was there a typo or did you forget to create one?')
st.write('Input a prefix name for B-SOiD variables.')
st.text('')
st.write('*CAUTION*: It will OVERWRITE same prefix.')
today = date.today()
d4 = today.strftime("%b-%d-%Y")
self.prefix = st.text_input('Enter a __variable filename__ prefix, e.g. __control_sessions_2020__', d4)
if self.prefix:
st.markdown('You have decided on **{}** as the prefix.'.format(self.prefix))
else:
st.error('Please enter a prefix.')
def compile_data(self):
st.write('Identify pose to include in clustering.')
if self.software == 'DeepLabCut' and self.ftype == 'csv':
data_files = glob.glob(self.root_path + self.data_directories[0] + '/*.csv')
file0_df = pd.read_csv(data_files[0], low_memory=False)
file0_array = np.array(file0_df)
p = st.multiselect('Identified __pose__ to include:', [*file0_array[0, 1:-1:3]], [*file0_array[0, 1:-1:3]])
for a in p:
index = [i for i, s in enumerate(file0_array[0, 1:]) if a in s]
if not index in self.pose_chosen:
self.pose_chosen += index
self.pose_chosen.sort()
if st.button("__Preprocess__"):
funfacts = randfacts.getFact()
st.info(str.join('', ('Preprocessing... Here is a random fact: ', funfacts)))
for i, fd in enumerate(self.data_directories): # Loop through folders
f = get_filenames(self.root_path, fd)
my_bar = st.progress(0)
for j, filename in enumerate(f):
file_j_df = pd.read_csv(filename, low_memory=False)
file_j_processed, p_sub_threshold = adp_filt(file_j_df, self.pose_chosen)
self.raw_input_data.append(file_j_df)
self.sub_threshold.append(p_sub_threshold)
self.processed_input_data.append(file_j_processed)
self.input_filenames.append(filename)
my_bar.progress(round((j + 1) / len(f) * 100))
with open(os.path.join(self.working_dir, str.join('', (self.prefix, '_data.sav'))), 'wb') as f:
joblib.dump(
[self.root_path, self.data_directories, self.framerate, self.pose_chosen, self.input_filenames,
self.raw_input_data, np.array(self.processed_input_data), self.sub_threshold], f
)
st.info('Processed a total of **{}** .{} files, and compiled into a '
'**{}** data list.'.format(len(self.processed_input_data), self.ftype,
np.array(self.processed_input_data).shape))
st.balloons()
elif self.software == 'DeepLabCut' and self.ftype == 'h5':
data_files = glob.glob(self.root_path + self.data_directories[0] + '/*.h5')
file0_df = pd.read_hdf(data_files[0], low_memory=False)
p = st.multiselect('Identified __pose__ to include:',
[*np.array(file0_df.columns.get_level_values(1)[1:-1:3])],
[*np.array(file0_df.columns.get_level_values(1)[1:-1:3])])
for a in p:
index = [i for i, s in enumerate(np.array(file0_df.columns.get_level_values(1))) if a in s]
if not index in self.pose_chosen:
self.pose_chosen += index
self.pose_chosen.sort()
if st.button("__Preprocess__"):
funfacts = randfacts.getFact()
st.info(str.join('', ('Preprocessing... Here is a random fact: ', funfacts)))
for i, fd in enumerate(self.data_directories):
f = get_filenamesh5(self.root_path, fd)
my_bar = st.progress(0)
for j, filename in enumerate(f):
file_j_df = pd.read_hdf(filename, low_memory=False)
file_j_processed, p_sub_threshold = adp_filt_h5(file_j_df, self.pose_chosen)
self.raw_input_data.append(file_j_df)
self.sub_threshold.append(p_sub_threshold)
self.processed_input_data.append(file_j_processed)
self.input_filenames.append(filename)
my_bar.progress(round((j + 1) / len(f) * 100))
with open(os.path.join(self.working_dir, str.join('', (self.prefix, '_data.sav'))), 'wb') as f:
joblib.dump(
[self.root_path, self.data_directories, self.framerate, self.pose_chosen, self.input_filenames,
self.raw_input_data, np.array(self.processed_input_data), self.sub_threshold], f
)
st.info('Processed a total of **{}** .{} files, and compiled into a '
'**{}** data list.'.format(len(self.processed_input_data), self.ftype,
np.array(self.processed_input_data).shape))
st.balloons()
elif self.software == 'SLEAP' and self.ftype == 'h5':
data_files = glob.glob(self.root_path + self.data_directories[0] + '/*.h5')
file0_df = h5py.File(data_files[0], 'r')
p = st.multiselect('Identified __pose__ to include:',
[*np.array(file0_df['node_names'][:])],
[*np.array(file0_df['node_names'][:])])
for a in p:
index = [i for i, s in enumerate(np.array(file0_df['node_names'][:])) if a in s]
if not index in self.pose_chosen:
self.pose_chosen += index
self.pose_chosen.sort()
if st.button("__Preprocess__"):
funfacts = randfacts.getFact()
st.info(str.join('', ('Preprocessing... Here is a random fact: ', funfacts)))
for i, fd in enumerate(self.data_directories):
f = get_filenamesh5(self.root_path, fd)
my_bar = st.progress(0)
for j, filename in enumerate(f):
file_j_df = h5py.File(filename, 'r')
file_j_processed, p_sub_threshold = adp_filt_sleap_h5(file_j_df, self.pose_chosen)
self.raw_input_data.append(file_j_df['tracks'][:][0])
self.sub_threshold.append(p_sub_threshold)
self.processed_input_data.append(file_j_processed)
self.input_filenames.append(filename)
my_bar.progress(round((j + 1) / len(f) * 100))
with open(os.path.join(self.working_dir, str.join('', (self.prefix, '_data.sav'))), 'wb') as f:
joblib.dump(
[self.root_path, self.data_directories, self.framerate, self.pose_chosen, self.input_filenames,
self.raw_input_data, np.array(self.processed_input_data), self.sub_threshold], f
)
st.info('Processed a total of **{}** .{} files, and compiled into a '
'**{}** data list.'.format(len(self.processed_input_data), self.ftype,
np.array(self.processed_input_data).shape))
st.balloons()
elif self.software == 'OpenPose' and self.ftype == 'json':
data_files = glob.glob(self.root_path + self.data_directories[0] + '/*.json')
file0_df = read_json_single(data_files[0])
file0_array = np.array(file0_df)
p = st.multiselect('Identified __pose__ to include:', [*file0_array[0, 1:-1:3]], [*file0_array[0, 1:-1:3]])
for a in p:
index = [i for i, s in enumerate(file0_array[0, 1:]) if a in s]
if not index in self.pose_chosen:
self.pose_chosen += index
self.pose_chosen.sort()
if st.button("__Preprocess__"):
funfacts = randfacts.getFact()
st.info(str.join('', ('Preprocessing... Here is a random fact: ', funfacts)))
for i, fd in enumerate(self.data_directories):
f = get_filenamesjson(self.root_path, fd)
json2csv_multi(f)
filename = f[0].rpartition('/')[-1].rpartition('_')[0].rpartition('_')[0]
file_j_df = pd.read_csv(str.join('', (f[0].rpartition('/')[0], '/', filename, '.csv')),
low_memory=False)
file_j_processed, p_sub_threshold = adp_filt(file_j_df, self.pose_chosen)
self.raw_input_data.append(file_j_df)
self.sub_threshold.append(p_sub_threshold)
self.processed_input_data.append(file_j_processed)
self.input_filenames.append(str.join('', (f[0].rpartition('/')[0], '/', filename, '.csv')))
with open(os.path.join(self.working_dir, str.join('', (self.prefix, '_data.sav'))), 'wb') as f:
joblib.dump(
[self.root_path, self.data_directories, self.framerate, self.pose_chosen, self.input_filenames,
self.raw_input_data, np.array(self.processed_input_data), self.sub_threshold], f
)
st.info('Processed a total of **{}** .{} files, and compiled into a '
'**{}** data list.'.format(len(self.processed_input_data), self.ftype,
np.array(self.processed_input_data).shape))
st.balloons()
def show_bar(self):
visuals.plot_bar(self.sub_threshold)
def show_data_table(self):
visuals.show_data_table(self.raw_input_data, self.processed_input_data)
| 13,767
| 59.920354
| 119
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/machine_learner.py
|
import os
import joblib
import randfacts
import streamlit as st
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split, cross_val_score
from bsoid_app.bsoid_utilities import visuals
from bsoid_app.bsoid_utilities.load_workspace import load_classifier
from streamlit import caching
class protocol:
def __init__(self, working_dir, prefix, features, sampled_features, assignments):
st.subheader('CREATE A MODEL')
self.working_dir = working_dir
self.prefix = prefix
self.features = features
self.sampled_features = sampled_features
self.assignments = assignments
self.part = 0.2
self.it = 10
self.x_test = []
self.y_test = []
self.validate_clf = []
self.clf = []
self.validate_score = []
self.predictions = []
def randomforest(self):
if st.button("Start training a random forest classifier"):
try:
x = self.sampled_features[self.assignments >= 0, :]
y = self.assignments[self.assignments >= 0]
x_train, self.x_test, y_train, self.y_test = train_test_split(x, y.T, test_size=self.part, random_state=42)
funfacts = randfacts.getFact()
st.info(str.join('', ('Training random forest classifier on randomly partitioned '
'{}%...'.format((1 - self.part) * 100), 'Here is a random fact: ', funfacts)))
self.validate_clf = RandomForestClassifier(random_state=42)
self.validate_clf.fit(x_train, y_train)
self.clf = RandomForestClassifier(random_state=42)
self.clf.fit(x, y.T)
self.predictions = self.clf.predict(self.features.T)
st.info('Done training random forest classifier mapping '
'**{}** features to **{}** assignments.'.format(self.features.T.shape, self.predictions.shape))
self.validate_score = cross_val_score(self.validate_clf, self.x_test, self.y_test, cv=self.it, n_jobs=-1)
with open(os.path.join(self.working_dir, str.join('', (self.prefix, '_randomforest.sav'))), 'wb') as f:
joblib.dump([self.x_test, self.y_test, self.validate_clf, self.clf,
self.validate_score, self.predictions], f)
st.balloons()
except AttributeError:
st.error('Sometimes this takes a bit to update, recheck identify clusters (previous step) '
'and rerun this in 30 seconds.')
def show_confusion_matrix(self):
fig = visuals.plot_confusion(self.validate_clf, self.x_test, self.y_test)
col1, col2 = st.beta_columns([2, 2])
col1.pyplot(fig[0])
col2.pyplot(fig[1])
st.write('To improve, either _increase_ minimum cluster size, or include _more data_')
def show_crossval_score(self):
fig, plt = visuals.plot_accuracy(self.validate_score)
col1, col2 = st.beta_columns([2, 2])
col1.pyplot(fig)
st.write('To improve, either _increase_ minimum cluster size, or include _more data_')
def main(self):
try:
[self.x_test, self.y_test, self.validate_clf, self.clf, self.validate_score, self.predictions] = \
load_classifier(self.working_dir, self.prefix)
st.markdown('**_CHECK POINT_**: Done training random forest classifier '
'mapping **{}** features to **{}** assignments. Move on to '
'__Generate video snippets for interpretation__.'.format(self.features.shape[0],
self.predictions.shape[0]))
if st.checkbox('Redo?', False, key='mr'):
caching.clear_cache()
self.randomforest()
if st.checkbox("Show confusion matrix on test?", False, key='ms'):
self.show_confusion_matrix()
if st.checkbox("Show cross-validated accuracy on test?", False, key='mss'):
self.show_crossval_score()
except FileNotFoundError:
self.randomforest()
if st.checkbox("Show confusion matrix on test?", False, key='ms'):
self.show_confusion_matrix()
if st.checkbox("Show cross-validated accuracy on test?", False, key='mss'):
self.show_crossval_score()
| 4,516
| 48.637363
| 123
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/__init__.py
| 0
| 0
| 0
|
py
|
|
B-SOID
|
B-SOID-master/bsoid_app/predict.py
|
import os
from datetime import date
import h5py
import joblib
import streamlit as st
from bsoid_app.bsoid_utilities import statistics
from bsoid_app.bsoid_utilities.bsoid_classification import *
from bsoid_app.bsoid_utilities.likelihoodprocessing import *
from bsoid_app.bsoid_utilities.load_json import *
class prediction:
def __init__(self, root_path, data_directories, input_filenames, processed_input_data, working_dir, prefix,
framerate, pose_chosen, predictions, clf):
st.subheader('PREDICT OLD/NEW FILES USING A MODEL')
st.markdown('This could take some time for large datasets.')
self.options = st.multiselect('What csv files to export?',
['Labels tagged onto pose files', 'Group durations (in frames)',
'Transition matrix'],
['Labels tagged onto pose files', 'Transition matrix'])
self.root_path = root_path
self.data_directories = data_directories
self.input_filenames = input_filenames
self.processed_input_data = processed_input_data
self.working_dir = working_dir
self.prefix = prefix
self.framerate = framerate
self.pose_chosen = pose_chosen
self.predictions = predictions
self.clf = clf
self.use_train = []
self.new_root_path = []
self.new_directories = []
self.filetype = []
self.new_prefix = []
self.new_framerate = []
self.new_data = []
self.new_features = []
self.nonfs_predictions = []
self.folders = []
self.folder = []
self.filenames = []
self.new_predictions = []
self.all_df = []
def setup(self):
if st.checkbox('All {} training folders containing '
'a total of {} files?'.format(len(self.data_directories),
self.processed_input_data.shape[0]), False, key='pt'):
self.new_root_path = self.root_path
self.filetype = [s for i, s in enumerate(['csv', 'h5', 'json'])
if s in self.input_filenames[0].partition('.')[-1]][0]
self.new_directories = self.data_directories
self.new_framerate = self.framerate
self.new_prefix = self.prefix
else:
st.write(
'Select the pose estimate containing sub-directories, e.g. /control, under root directory. '
'Currently supporting _2D_ and _single_ animal.')
if st.checkbox('New root directory (not {}) for these new files?'.format(self.root_path), False, key='pn'):
self.new_root_path = st.text_input('Enter a __root directory__, e.g. __/Users/projectX__', os.getcwd())
else:
self.new_root_path = self.root_path
num_dir = int(st.number_input('How many __data containing directories__ '
'under {} for B-SOiD predictions?'.format(self.new_root_path), value=3))
st.markdown('Your will predict on data files in *{}* directories.'.format(num_dir))
self.filetype = st.selectbox('What type of file are these new data?', ('csv', 'h5', 'json'),
index=int([i for i, s in enumerate(['csv', 'h5', 'json'])
if s in self.input_filenames[0].partition('.')[-1]][0]))
for i in range(num_dir):
new_directory = st.text_input('Enter # {} __data file containing directory__ under {}, '
'e.g. __/control__ for /Users/projectX/control/xxx.{}'
''.format(i + 1, self.new_root_path, self.filetype))
try:
os.listdir(str.join('', (self.new_root_path, new_directory)))
except FileNotFoundError:
st.error('No such directory')
if not new_directory in self.new_directories:
self.new_directories.append(new_directory)
st.markdown('You have selected **{}** as your _sub-directory(ies)_.'.format(self.new_directories))
st.write('Average video frame-rate for xxx.{} pose estimate files.'.format(self.filetype))
self.new_framerate = int(st.number_input('What is your frame-rate?', value=self.framerate))
st.markdown('You have selected **{} frames per second**.'.format(self.new_framerate))
if st.checkbox('For every new dataset, you would want to change the prefix so it does not overwrite previous '
'predictions. Would you like to change the prefix? Currently it is set to save'
' as **{}/{}_predictions.sav**.'.format(self.working_dir, self.prefix), False, key='pp'):
today = date.today()
d4 = today.strftime("%b-%d-%Y")
self.new_prefix = st.text_input('Enter new prediction variable prefix:', d4)
if self.new_prefix:
st.markdown('You have chosen **{}_predictions.sav** for new predictions.'.format(self.new_prefix))
else:
st.error('Please enter a name for your new prediction variable prefix.')
else:
self.new_prefix = self.prefix
def predict(self):
if st.button("Predict labels"):
st.markdown('These files will be saved in {}/_your_data_folder_x_/BSOID'.format(self.new_root_path))
if self.filetype == 'csv':
for i, fd in enumerate(self.new_directories):
f = get_filenames(self.new_root_path, fd)
for j, filename in enumerate(f):
file_j_df = pd.read_csv(filename, low_memory=False)
file_j_processed, _ = adp_filt(file_j_df, self.pose_chosen)
self.all_df.append(file_j_df)
self.new_data.append(file_j_processed)
self.filenames.append(filename)
self.folder.append(fd)
self.folders.append(fd)
elif self.filetype == 'h5':
try:
for i, fd in enumerate(self.new_directories):
f = get_filenamesh5(self.new_root_path, fd)
for j, filename in enumerate(f):
file_j_df = pd.read_hdf(filename, low_memory=False)
file_j_processed, _ = adp_filt_h5(file_j_df, self.pose_chosen)
self.all_df.append(file_j_df)
self.new_data.append(file_j_processed)
self.filenames.append(filename)
self.folder.append(fd)
self.folders.append(fd)
except:
st.info('Detecting SLEAP .h5 files...')
for i, fd in enumerate(self.new_directories):
f = get_filenamesh5(self.new_root_path, fd)
for j, filename in enumerate(f):
file_j_df = h5py.File(filename, 'r')
file_j_processed, p_sub_threshold = adp_filt_sleap_h5(file_j_df, self.pose_chosen)
df = no_filt_sleap_h5(file_j_df, self.pose_chosen)
self.all_df.append(df)
self.new_data.append(file_j_processed)
self.filenames.append(filename)
self.folder.append(fd)
self.folders.append(fd)
elif self.filetype == 'json':
for i, fd in enumerate(self.new_directories):
f = get_filenamesjson(self.root_path, fd)
json2csv_multi(f)
filename = f[0].rpartition('/')[-1].rpartition('_')[0].rpartition('_')[0]
file_j_df = pd.read_csv(str.join('', (f[0].rpartition('/')[0], '/', filename, '.csv')),
low_memory=False)
file_j_processed, p_sub_threshold = adp_filt(file_j_df, self.pose_chosen)
self.all_df.append(file_j_df)
self.new_data.append(file_j_processed)
self.filenames.append(str.join('', (f[0].rpartition('/')[0], '/', filename, '.csv')))
self.folder.append(fd)
self.folders.append(fd)
st.info('Extracting features and predicting labels... ')
labels_fs = []
bar = st.progress(0)
for i in range(0, len(self.new_data)):
feats_new = bsoid_extract([self.new_data[i]], self.new_framerate)
labels = bsoid_predict(feats_new, self.clf)
for m in range(0, len(labels)):
labels[m] = labels[m][::-1]
labels_pad = -1 * np.ones([len(labels), len(max(labels, key=lambda x: len(x)))])
for n, l in enumerate(labels):
labels_pad[n][0:len(l)] = l
labels_pad[n] = labels_pad[n][::-1]
if n > 0:
labels_pad[n][0:n] = labels_pad[n - 1][0:n]
labels_fs.append(labels_pad.astype(int))
bar.progress(round((i + 1) / len(self.new_data) * 100))
st.info('Frameshift arrangement of predicted labels...')
for k in range(0, len(labels_fs)):
labels_fs2 = []
for l in range(math.floor(self.new_framerate / 10)):
labels_fs2.append(labels_fs[k][l])
self.new_predictions.append(np.array(labels_fs2).flatten('F'))
st.info('Done frameshift-predicting a total of **{}** files.'.format(len(self.new_data)))
for i in range(0, len(self.new_predictions)):
filename_i = os.path.basename(self.filenames[i]).rpartition('.')[0]
fs_labels_pad = np.pad(self.new_predictions[i], (0, len(self.all_df[i]) -
len(self.new_predictions[i])), 'edge')
df2 = pd.DataFrame(fs_labels_pad, columns={'B-SOiD labels'})
frames = [df2, self.all_df[i]]
xyfs_df = pd.concat(frames, axis=1)
runlen_df, dur_stats, tm_array, tm_df, tm_norm = statistics.main(self.new_predictions[i],
len(np.unique(self.predictions)))
try:
os.mkdir(str.join('', (self.new_root_path, self.folder[i], '/BSOID')))
except FileExistsError:
pass
if any('Labels tagged onto pose files' in o for o in self.options):
xyfs_df.to_csv(os.path.join(
str.join('', (self.new_root_path, self.folder[i], '/BSOID')),
str.join('', (self.new_prefix, 'labels_pose_', str(self.new_framerate),
'Hz', filename_i, '.csv'))),
index=True, chunksize=10000, encoding='utf-8')
st.info('Saved Labels .csv in {}'.format(
str.join('', (self.new_root_path, self.folder[i], '/BSOID'))))
if any('Group durations (in frames)' in o for o in self.options):
runlen_df.to_csv(os.path.join(
str.join('', (self.new_root_path, self.folder[i], '/BSOID')),
str.join('', (self.new_prefix, 'bout_lengths_', str(self.new_framerate),
'Hz', filename_i, '.csv'))),
index=True, chunksize=10000, encoding='utf-8')
st.info('Saved Group durations .csv in {}'.format(
str.join('', (self.new_root_path, self.folder[i], '/BSOID'))))
if any('Transition matrix' in o for o in self.options):
tm_df.to_csv(os.path.join(
str.join('', (self.new_root_path, self.folder[i], '/BSOID')),
str.join('', (self.new_prefix, 'transitions_mat_',
str(self.new_framerate), 'Hz', filename_i, '.csv'))),
index=True, chunksize=10000, encoding='utf-8')
st.info('Saved transition matrix .csv in {}'.format(
str.join('', (self.new_root_path, self.folder[i], '/BSOID'))))
with open(os.path.join(self.working_dir, str.join('', (self.new_prefix, '_predictions.sav'))), 'wb') as f:
joblib.dump([self.folders, self.folder, self.filenames, self.new_data, self.new_predictions], f)
st.balloons()
st.markdown('**_CHECK POINT_**: Done predicting old/new files. Move on to '
'__Load up analysis app (please close current browser when new browser pops up)__.')
def main(self):
self.setup()
self.predict()
| 13,196
| 58.714932
| 119
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/export_training.py
|
import itertools
import os
import streamlit as st
from bsoid_app.bsoid_utilities.statistics import *
class export:
def __init__(self, working_dir, prefix, sampled_features, assignments, assign_prob, soft_assignments):
st.subheader('WHAT DID B-SOID LEARN?')
self.options = st.multiselect('What do you want to know?',
['List of pose-relationship features',
'Mapping between features and assignment',
'Assignment probabilities'],
['Mapping between features and assignment'])
try:
self.working_dir = working_dir
self.prefix = prefix
self.sampled_features = sampled_features
self.assignments = assignments
self.assign_prob = assign_prob
self.soft_assignments = soft_assignments
except FileNotFoundError:
st.error('Cannot find training, please complete all the necessary steps')
def save_csv(self):
if st.button('Generate training features and probabilities'):
try:
if any('List of pose-relationship features' in o for o in self.options):
feats_range, feats_median, feats_pct, edges = feat_dist(self.sampled_features)
feats_med_df = pd.DataFrame(feats_median, columns=['median'])
feats_pcts_df = pd.DataFrame(feats_pct)
feats_edge_df = pd.DataFrame(edges)
feats_edge_df.columns = pd.MultiIndex.from_product([['histogram edge'], feats_edge_df.columns])
feats_pcts_df.columns = pd.MultiIndex.from_product([['histogram prob for edge'],
feats_pcts_df.columns])
f_dist_data = pd.concat((feats_med_df, feats_edge_df, feats_pcts_df), axis=1)
f_dist_data.index.name = 'Pose_relationships'
f_dist_data.to_csv(
(os.path.join(self.working_dir, str.join('', (self.prefix, '_pose_relationships.csv')))),
index=True, chunksize=10000, encoding='utf-8')
if any('Mapping between features and assignment' in o for o in self.options):
feature_type1_name = []
feature_type2_name = []
feature_type3_name = []
for i, j in itertools.combinations(range(0, int(np.sqrt(self.sampled_features.shape[1]))), 2):
feature_type1_name.append(['Pose ', i + 1, j + 1, 'delta pixels'])
feature_type2_name.append(['Pose vector ', i + 1, j + 1, 'delta degrees'])
for i in range(int(np.sqrt(self.sampled_features.shape[1]))):
feature_type3_name.append(['Pose ', i + 1, 'vs prev. time', 'delta pixels'])
multi_columns = np.vstack((feature_type1_name, feature_type2_name, feature_type3_name))
features_df = pd.DataFrame(self.sampled_features, columns=multi_columns)
assignments_data = np.concatenate([self.assignments.reshape(len(self.assignments), 1),
self.soft_assignments.reshape(len(self.soft_assignments), 1),
], axis=1)
multi_columns2 = pd.MultiIndex.from_tuples([('HDBSCAN', 'Assignment'),
('HDBSCAN*SOFT', 'Assignment')],
names=['Type', 'Frame@10Hz'])
assignments_df = pd.DataFrame(assignments_data, columns=multi_columns2)
training_data = pd.concat((features_df, assignments_df), axis=1)
training_data.index.name = 'Frame@10hz'
training_data.to_csv(
(os.path.join(self.working_dir, str.join('', (self.prefix, '_mapping.csv')))),
index=True, chunksize=10000, encoding='utf-8')
if any('Assignment probabilities' in o for o in self.options):
multi_columns = [str.join('', ('Group', str(i), '_probability'))
for i in range(len(np.unique(self.soft_assignments)))]
assign_prob_df = pd.DataFrame(self.assign_prob, columns=multi_columns)
assign_prob_df.index.name = 'Frame@10hz'
assign_prob_df.to_csv(
(os.path.join(self.working_dir, str.join('', (self.prefix, '_assign_prob.csv')))),
index=True, chunksize=10000, encoding='utf-8')
st.balloons()
except AttributeError:
st.error('Sometimes this takes a bit to update, recheck identify clusters (previous step) '
'and rerun this in 30 seconds.')
| 5,051
| 62.949367
| 116
|
py
|
B-SOID
|
B-SOID-master/bsoid_app/video_creator.py
|
import base64
import ffmpeg
import h5py
import streamlit as st
from bsoid_app.bsoid_utilities.bsoid_classification import *
from bsoid_app.bsoid_utilities.likelihoodprocessing import *
from bsoid_app.bsoid_utilities.load_json import *
from bsoid_app.bsoid_utilities.videoprocessing import *
@st.cache(allow_output_mutation=True)
def selected_file(d_file):
return d_file
@st.cache(allow_output_mutation=True)
def selected_vid(vid_file):
return vid_file
class creator:
def __init__(self, root_path, data_directories, processed_input_data,
pose_chosen, working_dir, prefix, framerate, clf, input_filenames):
st.subheader('GENERATE VIDEOS SNIPPETS FOR INTERPRETATION')
self.root_path = root_path
self.data_directories = data_directories
self.processed_input_data = processed_input_data
self.pose_chosen = pose_chosen
self.working_dir = working_dir
self.prefix = prefix
self.framerate = framerate
self.clf = clf
self.input_filenames = input_filenames
self.file_directory = []
self.d_file = []
self.vid_dir = []
self.vid_file = []
self.frame_dir = []
self.filetype = []
self.width = []
self.height = []
self.bit_rate = []
self.num_frames = []
self.avg_frame_rate = []
self.shortvid_dir = []
self.min_frames = []
self.number_examples = []
self.out_fps = []
self.file_j_processed = []
def setup(self):
if st.checkbox("Change __root directory__ to other than **{}**? Do this if you have another project "
"that benefits from built classifier.".format(self.root_path), False, 'vc'):
self.root_path = st.text_input('Enter new __root directory__, e.g. /Users/projectY')
self.file_directory = st.text_input(str.join('', ('Enter the __data containing sub-directory__'
' within ', self.root_path)),
self.data_directories[0])
try:
os.listdir(str.join('', (self.root_path, self.file_directory)))
st.markdown('You have selected **{}** as your csv/h5/json data sub-directory.'.format(self.file_directory))
except FileNotFoundError:
st.error('No such directory')
st.markdown('If your input was openpose **JSON(s)**, the app has converted into a SINGLE CSV for each folder. '
'Hence, the following will autodetect CSV as your filetype.')
self.filetype = st.selectbox('What type of file?',
('csv', 'h5', 'json'),
index=int([i for i, s in enumerate(['csv', 'h5', 'json'])
if s in self.input_filenames[0].partition('.')[-1]][0]))
if self.filetype == 'csv':
d_file = st.selectbox('Select the csv file',
sorted(os.listdir(str.join('', (self.root_path, self.file_directory)))))
self.d_file = selected_file(d_file)
elif self.filetype == 'h5':
d_file = st.selectbox('Select the h5 file',
sorted(os.listdir(str.join('', (self.root_path, self.file_directory)))))
self.d_file = selected_file(d_file)
elif self.filetype == 'json':
d_files = get_filenamesjson(self.root_path, self.file_directory)
fname = d_files[0].rpartition('/')[-1].rpartition('_')[0].rpartition('_')[0]
if not os.path.isfile(str.join('', (d_files[0].rpartition('/')[0], '/', fname, '.csv'))):
json2csv_multi(d_files)
d_file = st.selectbox('Select the autocompiled csv file containing all jsons',
sorted(os.listdir(str.join('', (self.root_path, self.file_directory)))))
self.d_file = selected_file(d_file)
self.vid_dir = st.text_input('Enter corresponding video directory (Absolute path):',
str.join('', (self.root_path, self.data_directories[0])))
try:
os.listdir(self.vid_dir)
st.markdown(
'You have selected **{}** as your video directory.'.format(self.vid_dir))
except FileNotFoundError:
st.error('No such directory')
vid_file = st.selectbox('Select the video (.mp4 or .avi)', sorted(os.listdir(self.vid_dir)))
self.vid_file = selected_vid(vid_file)
if self.filetype == 'csv' or self.filetype == 'h5':
st.markdown('You have selected **{}** matching **{}**.'.format(self.vid_file, self.d_file))
csvname = os.path.basename(self.d_file).rpartition('.')[0]
else:
st.markdown(
'You have selected **{}** matching **{}** json directory.'.format(self.vid_file, self.file_directory))
csvname = os.path.basename(self.file_directory)
try:
os.mkdir(str.join('', (self.root_path, self.file_directory, '/pngs')))
except FileExistsError:
pass
try:
os.mkdir(str.join('', (self.root_path, self.file_directory, '/pngs', '/', csvname)))
except FileExistsError:
pass
self.frame_dir = str.join('', (self.root_path, self.file_directory, '/pngs', '/', csvname))
st.markdown('Created {} as your **video frames** directory.'.format(self.frame_dir, self.vid_file))
probe = ffmpeg.probe(os.path.join(self.vid_dir, self.vid_file))
video_info = next(s for s in probe['streams'] if s['codec_type'] == 'video')
self.width = int(video_info['width'])
self.height = int(video_info['height'])
self.num_frames = int(video_info['nb_frames'])
self.bit_rate = int(video_info['bit_rate'])
self.avg_frame_rate = round(
int(video_info['avg_frame_rate'].rpartition('/')[0]) / int(video_info['avg_frame_rate'].rpartition('/')[2]))
try:
os.mkdir(str.join('', (self.root_path, self.file_directory, '/mp4s')))
except FileExistsError:
pass
try:
os.mkdir(str.join('', (self.root_path, self.file_directory, '/mp4s', '/', csvname)))
except FileExistsError:
pass
self.shortvid_dir = str.join('', (self.root_path, self.file_directory, '/mp4s', '/', csvname))
st.markdown('Created {} as your **behavioral snippets** directory.'.format(self.shortvid_dir, self.vid_file))
min_time = st.number_input('Enter minimum time for bout in ms:', value=200)
self.min_frames = round(float(min_time) * 0.001 * float(self.framerate))
st.markdown('Entered **{} ms** as minimum duration per bout, '
'which is equivalent to **{} frames**.'.format(min_time, self.min_frames))
self.number_examples = st.slider('Select number of non-repeated examples', 1, 20, 5)
st.markdown(
'Your will obtain a maximum of **{}** non-repeated output examples per group.'.format(self.number_examples))
playback_speed = st.number_input('Enter playback speed:', value=0.75)
self.out_fps = int(float(playback_speed) * float(self.framerate))
st.markdown('Playback at **{} x speed** (rounded to {} FPS).'.format(playback_speed, self.out_fps))
def frame_extraction(self):
if st.button('Start frame extraction for {} frames '
'at {} frames per second'.format(self.num_frames, self.avg_frame_rate)):
st.info('Extracting frames from the video... ')
try:
(ffmpeg.input(os.path.join(self.vid_dir, self.vid_file))
.filter('fps', fps=self.avg_frame_rate)
.output(str.join('', (self.frame_dir, '/frame%01d.png')), video_bitrate=self.bit_rate,
s=str.join('', (str(int(self.width * 0.5)), 'x', str(int(self.height * 0.5)))),
sws_flags='bilinear', start_number=0)
.run(capture_stdout=True, capture_stderr=True))
st.info('Done extracting **{}** frames from video **{}**.'.format(self.num_frames, self.vid_file))
except ffmpeg.Error as e:
st.error('stdout:', e.stdout.decode('utf8'))
st.error('stderr:', e.stderr.decode('utf8'))
st.info('Done extracting {} frames from {}'.format(self.num_frames, self.vid_file))
def create_videos(self):
radio = st.radio(label='Have you extracted frames?', options=["Yes", "No"])
if radio == 'Yes':
if st.checkbox('Clear old videos? Uncheck after check to prevent from auto-clearing', False, key='vr'):
try:
for file_name in glob.glob(self.shortvid_dir + "/*"):
os.remove(file_name)
except:
pass
if st.button("Predict labels and create example videos"):
if self.filetype == 'csv' or self.filetype == 'json':
file_j_df = pd.read_csv(
os.path.join(str.join('', (self.root_path, self.file_directory, '/', self.d_file))),
low_memory=False)
file_j_processed, p_sub_threshold = adp_filt(file_j_df, self.pose_chosen)
elif self.filetype == 'h5':
try:
file_j_df = pd.read_hdf(
os.path.join(str.join('', (self.root_path, self.file_directory, '/', self.d_file))),
low_memory=False)
file_j_processed, p_sub_threshold = adp_filt_h5(file_j_df, self.pose_chosen)
except:
st.info('Detecting a SLEAP .h5 file...')
file_j_df = h5py.File(
os.path.join(str.join('', (self.root_path, self.file_directory, '/', self.d_file))), 'r')
file_j_processed, p_sub_threshold = adp_filt_sleap_h5(file_j_df, self.pose_chosen)
self.file_j_processed = [file_j_processed]
labels_fs = []
fs_labels = []
st.info('Predicting labels... ')
for i in range(0, len(self.file_j_processed)):
feats_new = bsoid_extract([self.file_j_processed[i]], self.framerate)
labels = bsoid_predict(feats_new, self.clf)
for m in range(0, len(labels)):
labels[m] = labels[m][::-1]
labels_pad = -1 * np.ones([len(labels), len(max(labels, key=lambda x: len(x)))])
for n, l in enumerate(labels):
labels_pad[n][0:len(l)] = l
labels_pad[n] = labels_pad[n][::-1]
if n > 0:
labels_pad[n][0:n] = labels_pad[n - 1][0:n]
labels_fs.append(labels_pad.astype(int))
st.info('Frameshifted arrangement of labels... ')
for k in range(0, len(labels_fs)):
labels_fs2 = []
for l in range(math.floor(self.framerate / 10)):
labels_fs2.append(labels_fs[k][l])
fs_labels.append(np.array(labels_fs2).flatten('F'))
st.info('Done frameshift-predicting **{}**.'.format(self.d_file))
create_labeled_vid(fs_labels[0], int(self.min_frames), int(self.number_examples), int(self.out_fps),
self.frame_dir, self.shortvid_dir)
st.balloons()
st.markdown('**_CHECK POINT_**: Done generating video snippets. Move on to '
'__Predict old/new files using a model__.')
elif radio == 'No':
self.frame_extraction()
def show_snippets(self):
video_bytes = []
grp_names = []
files = []
for file in os.listdir(self.shortvid_dir):
files.append(file)
sort_nicely(files)
st.info('Creating gifs from mp4s...')
for file in files:
if file.endswith('0.mp4'):
try:
example_vid_file = open(os.path.join(
str.join('', (self.shortvid_dir, '/', file.partition('.')[0], '.gif'))), 'rb')
except FileNotFoundError:
convert2gif(str.join('', (self.shortvid_dir, '/', file)), TargetFormat.GIF)
example_vid_file = open(os.path.join(
str.join('', (self.shortvid_dir, '/', file.partition('.')[0], '.gif'))), 'rb')
contents = example_vid_file.read()
data_url = base64.b64encode(contents).decode("utf-8")
video_bytes.append(data_url)
grp_names.append('{}'.format(file.partition('.')[0]))
col = [None] * 3
col[0], col[1], col[2] = st.beta_columns([1, 1, 1])
for i in range(0, len(video_bytes) + 3, 3):
try:
col[0].markdown(
f'<div class="container">'
f'<img src="data:image/gif;base64,{video_bytes[i]}" alt="" width="300" height="300">'
f'<div class="bottom-left">{grp_names[i]}</div>'
f'</div>',
unsafe_allow_html=True,
)
col[1].markdown(
f'<div class="container">'
f'<img src="data:image/gif;base64,{video_bytes[i + 1]}" alt="" width="300" height="300">'
f'<div class="bottom-left">{grp_names[i + 1]}</div>'
f'</div>',
unsafe_allow_html=True,
)
col[2].markdown(
f'<div class="container">'
f'<img src="data:image/gif;base64,{video_bytes[i + 2]}" alt="" width="300" height="300">'
f'<div class="bottom-left">{grp_names[i + 2]}</div>'
f'</div>',
unsafe_allow_html=True,
)
except IndexError:
pass
def main(self):
self.setup()
self.create_videos()
if st.checkbox("Show a collage of example group? "
"This could take some time for gifs conversions.".format(self.shortvid_dir), False, key='vs'):
self.show_snippets()
| 14,523
| 52.007299
| 120
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.